7.3 GPT2模型深度解析
GPT2模型¶
GPT2核心源代码解析¶
- GPT2注意力机制代码:
class GPT2Attention(nn.Module):
def __init__(self, config, is_cross_attention=False, layer_idx=None):
super().__init__()
max_positions = config.max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.split_size = self.embed_dim
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
)
self.scale_attn_weights = config.scale_attn_weights
self.is_cross_attention = is_cross_attention
# Layer-wise attention scaling, reordering, and upcasting
self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
self.layer_idx = layer_idx
self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
attn_weights = torch.matmul(query, key.transpose(-1, -2))
if self.scale_attn_weights:
attn_weights = attn_weights / (value.size(-1) ** 0.5)
# Layer-wise attention scaling
if self.scale_attn_by_inverse_layer_idx:
attn_weights = attn_weights / float(self.layer_idx + 1)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype))
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
# Downcast (if necessary) back to V s dtype (if in mixed-precision) -- No-Op otherwise
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _split_heads(self, tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(new_shape)
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def _merge_heads(self, tensor, num_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
tensor = tensor.permute(0, 2, 1, 3).contiguous()
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
return tensor.view(new_shape)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
if encoder_hidden_states is not None:
if not hasattr(self, "q_attn"):
raise ValueError(
"If class is used as cross attention, the weights `q_attn` have to be defined. "
"Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`."
)
query = self.q_attn(hidden_states)
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
attention_mask = encoder_attention_mask
else:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self._split_heads(query, self.num_heads, self.head_dim)
key = self._split_heads(key, self.num_heads, self.head_dim)
value = self._split_heads(value, self.num_heads, self.head_dim)
if layer_past is not None:
past_key, past_value = layer_past
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key, value)
else:
present = None
if self.reorder_and_upcast_attn:
attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
else:
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
outputs = (attn_output, present)
if output_attentions:
outputs += (attn_weights,)
return outputs # a, present, (attentions)
GPT2模型的应用一¶
-
首先展示GPT2在古文写作上的能力:
-
预训练模型: 下载并存放于文件夹gpt2_chinese_ancient中
-rw-r--r-- 1 ec2-user ec2-user 577 Jan 6 12:30 config.json
-rw-r--r-- 1 ec2-user ec2-user 433952719 Jan 6 12:33 pytorch_model.bin
-rw-r--r-- 1 ec2-user ec2-user 4035 Jan 6 12:30 README.md
-rw-r--r-- 1 ec2-user ec2-user 112 Jan 6 12:30 special_tokens_map.json
-rw-r--r-- 1 ec2-user ec2-user 216 Jan 6 12:30 tokenizer_config.json
-rw-r--r-- 1 ec2-user ec2-user 126508 Jan 6 12:30 vocab.txt
- 调用代码:
import torch
from transformers import BertTokenizer, GPT2LMHeadModel, TextGenerationPipeline
MODEL_PATH = './gpt2_chinese_ancient'
tokenizer = BertTokenizer.from_pretrained(MODEL_PATH)
model = GPT2LMHeadModel.from_pretrained(MODEL_PATH)
text_generator = TextGenerationPipeline(model, tokenizer)
print(text_generator("当天下大义", max_length=100, do_sample=True))
- 输出结果:
[{'generated_text': '当天下大义 。 当 事 势 之 定 断 , 惟 中 国 事 事 为 之 , 事 与 心 俱 成 , 无 往 非 中 国 之 利 害 , 有 往 非 中 国 之 利 害 。 今 中 国 不 利 于 中 国 , 而 强 其 中 国 , 吾 以 为 有 用 之 中 国 也 , 安 用 是 为 中 国 惜 法 而 不 急 于 法 哉 ! 况 中 国 所 以 不 能 与 外 国 联 和 者 ,'}]
GPT2模型的应用二¶
-
展示GPT2模型在歌词创作上的能力:
-
预训练模型: 下载并存放于文件夹gpt2_chinese_lyric中
-rw-r--r-- 1 ec2-user ec2-user 577 Jan 6 12:22 config.json
-rw-r--r-- 1 ec2-user ec2-user 420921295 Jan 6 12:26 pytorch_model.bin
-rw-r--r-- 1 ec2-user ec2-user 3846 Jan 6 12:22 README.md
-rw-r--r-- 1 ec2-user ec2-user 253 Jan 6 12:22 tokenizer_config.json
-rw-r--r-- 1 ec2-user ec2-user 109540 Jan 6 12:22 vocab.txt
- 调用代码:
import torch
from transformers import BertTokenizer, GPT2LMHeadModel, TextGenerationPipeline
MODEL_PATH = './gpt2_chinese_lyric'
tokenizer = BertTokenizer.from_pretrained(MODEL_PATH)
model = GPT2LMHeadModel.from_pretrained(MODEL_PATH)
text_generator = TextGenerationPipeline(model, tokenizer)
print(text_generator("最美的不是下雨天,而是一起陪你躲过雨的屋檐", max_length=100, do_sample=True))
- 输出结果:
[{'generated_text': '最美的不是下雨天,而是一起陪你躲过雨的屋檐 , 有 了 你 , 世 界 变 的 好 甜 , 看 幸 福 , 流 过 的 泪 痕 , 有 个 人 能 够 体 会 , 爱 需 要 勇 敢 些 我 最 爱 听 的 电 台 情 歌 , 想 让 你 知 道 , 只 要 你 肯 出 现 , 我 的 世 界 就 会 变 得 晴 天 , 你 让 我 知 道 我 的 那'}]
小节总结¶
- 本小节学习了GPT2预训练模型在古文写作和歌词写作上的代码操作, 并展示了GPT2在生成任务上的优秀能力.