|
--- |
|
license: apache-2.0 |
|
--- |
|
**Transformer_translation** |
|
``` |
|
import torch |
|
import warnings |
|
warnings.filterwarnings("ignore") |
|
import torch |
|
import torch.nn as nn |
|
import math |
|
from transformers import MarianTokenizer |
|
from datasets import load_dataset |
|
from typing import List |
|
from torch import Tensor |
|
from torch.nn import Transformer |
|
from torch.nn.utils.rnn import pad_sequence |
|
from torch.utils.data import DataLoader, Dataset |
|
from timeit import default_timer as timer |
|
import urllib.request |
|
import os |
|
from torch.cuda.amp import GradScaler, autocast |
|
import logging |
|
|
|
|
|
tokenizer = MarianTokenizer.from_pretrained('Helsinki-NLP/opus-mt-de-en') |
|
|
|
# 定义特殊token的索引 |
|
PAD_IDX = tokenizer.pad_token_id |
|
BOS_IDX = tokenizer.bos_token_id if tokenizer.bos_token_id is not None else 1 |
|
EOS_IDX = tokenizer.eos_token_id |
|
UNK_IDX = tokenizer.unk_token_id |
|
|
|
class PositionalEncoding(nn.Module): |
|
def __init__(self, emb_size: int, dropout: float, maxlen: int = 5000): |
|
super(PositionalEncoding, self).__init__() |
|
den = torch.exp(-torch.arange(0, emb_size, 2) * math.log(10000) / emb_size) |
|
pos = torch.arange(0, maxlen).reshape(maxlen, 1) |
|
pos_embedding = torch.zeros((maxlen, emb_size)) |
|
pos_embedding[:, 0::2] = torch.sin(pos * den) |
|
pos_embedding[:, 1::2] = torch.cos(pos * den) |
|
pos_embedding = pos_embedding.unsqueeze(-2) |
|
self.dropout = nn.Dropout(dropout) |
|
self.register_buffer('pos_embedding', pos_embedding) |
|
|
|
def forward(self, token_embedding: Tensor): |
|
return self.dropout(token_embedding + self.pos_embedding[:token_embedding.size(0), :]) |
|
|
|
class TokenEmbedding(nn.Module): |
|
def __init__(self, vocab_size: int, emb_size): |
|
super(TokenEmbedding, self).__init__() |
|
self.embedding = nn.Embedding(vocab_size, emb_size) |
|
self.emb_size = emb_size |
|
|
|
def forward(self, tokens: Tensor): |
|
return self.embedding(tokens.long()) * math.sqrt(self.emb_size) |
|
|
|
class Seq2SeqTransformer(nn.Module): |
|
def __init__(self, num_encoder_layers: int, num_decoder_layers: int, |
|
emb_size: int, nhead: int, src_vocab_size: int, |
|
tgt_vocab_size: int, dim_feedforward: int = 512, dropout: float = 0.1): |
|
super(Seq2SeqTransformer, self).__init__() |
|
self.transformer = Transformer(d_model=emb_size, |
|
nhead=nhead, |
|
num_encoder_layers=num_encoder_layers, |
|
num_decoder_layers=num_decoder_layers, |
|
dim_feedforward=dim_feedforward, |
|
dropout=dropout) |
|
self.generator = nn.Linear(emb_size, tgt_vocab_size) |
|
self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size) |
|
self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size) |
|
self.positional_encoding = PositionalEncoding(emb_size, dropout=dropout) |
|
|
|
def forward(self, src: Tensor, trg: Tensor, src_mask: Tensor, |
|
tgt_mask: Tensor, src_padding_mask: Tensor, |
|
tgt_padding_mask: Tensor, memory_key_padding_mask: Tensor): |
|
src_emb = self.positional_encoding(self.src_tok_emb(src)) |
|
tgt_emb = self.positional_encoding(self.tgt_tok_emb(trg)) |
|
outs = self.transformer(src_emb, tgt_emb, src_mask, tgt_mask, None, |
|
src_padding_mask, tgt_padding_mask, memory_key_padding_mask) |
|
return self.generator(outs) |
|
|
|
def encode(self, src: Tensor, src_mask: Tensor): |
|
return self.transformer.encoder(self.positional_encoding(self.src_tok_emb(src)), src_mask) |
|
|
|
def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor): |
|
return self.transformer.decoder(self.positional_encoding(self.tgt_tok_emb(tgt)), memory, tgt_mask) |
|
|
|
def generate_square_subsequent_mask(sz): |
|
mask = (torch.triu(torch.ones((sz, sz), device=DEVICE)) == 1).transpose(0, 1) |
|
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) |
|
return mask |
|
|
|
def create_mask(src, tgt): |
|
src_seq_len = src.shape[0] |
|
tgt_seq_len = tgt.shape[0] |
|
|
|
tgt_mask = generate_square_subsequent_mask(tgt_seq_len) |
|
src_mask = torch.zeros((src_seq_len, src_seq_len), device=DEVICE).type(torch.bool) |
|
|
|
src_padding_mask = (src == PAD_IDX).transpose(0, 1) |
|
tgt_padding_mask = (tgt == PAD_IDX).transpose(0, 1) |
|
return src_mask, tgt_mask, src_padding_mask, tgt_padding_mask |
|
|
|
|
|
def greedy_decode(model, src, src_mask, max_len, start_symbol): |
|
src = src.to(DEVICE) |
|
src_mask = src_mask.to(DEVICE) |
|
|
|
memory = model.encode(src, src_mask) |
|
ys = torch.ones(1, 1).fill_(start_symbol).type(torch.long).to(DEVICE) |
|
|
|
for i in range(max_len-1): |
|
memory = memory.to(DEVICE) |
|
tgt_mask = (generate_square_subsequent_mask(ys.size(0)) |
|
.type(torch.bool)).to(DEVICE) |
|
out = model.decode(ys, memory, tgt_mask) |
|
out = out.transpose(0, 1) |
|
prob = model.generator(out[:, -1]) |
|
_, next_word = torch.max(prob, dim=1) |
|
next_word = next_word.item() |
|
|
|
ys = torch.cat([ys, torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=0) |
|
if next_word == EOS_IDX: |
|
break |
|
return ys |
|
|
|
def translate(model: torch.nn.Module, src_sentence: str): |
|
model.eval() |
|
tokens = tokenizer(src_sentence, return_tensors='pt', padding=True) |
|
src = tokens['input_ids'].transpose(0, 1).to(DEVICE) |
|
src_mask = (torch.zeros(src.shape[0], src.shape[0])).type(torch.bool).to(DEVICE) |
|
|
|
tgt_tokens = greedy_decode(model, src, src_mask, max_len=src.shape[0] + 5, start_symbol=BOS_IDX).flatten() |
|
return tokenizer.decode(tgt_tokens.tolist(), skip_special_tokens=True) |
|
|
|
|
|
BATCH_SIZE = 32 # 减小批次大小,原来是64 |
|
EMB_SIZE = 512 # 保持不变 |
|
NHEAD = 8 # 保持不变 |
|
FFN_HID_DIM = 512 # 改回512,原来改成了1024 |
|
NUM_ENCODER_LAYERS = 3 # 改回3,原来改成了4 |
|
NUM_DECODER_LAYERS = 3 # 改回3,原来改成了4 |
|
NUM_EPOCHS = 18 # 保持不变 |
|
SRC_VOCAB_SIZE = tokenizer.vocab_size |
|
TGT_VOCAB_SIZE = tokenizer.vocab_size |
|
DEVICE = 'cuda' |
|
|
|
transformer = Seq2SeqTransformer(NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, EMB_SIZE, |
|
NHEAD, SRC_VOCAB_SIZE, TGT_VOCAB_SIZE, FFN_HID_DIM) |
|
transformer.load_state_dict(torch.load('./transformer_translation.pth')) |
|
transformer = transformer.to(DEVICE) |
|
print('开始翻译...') |
|
print(translate(transformer, "Eine Gruppe von Freunden spielt Billiade.")) |
|
print('翻译完成!!') |
|
``` |
|
|