File size: 1,249 Bytes
78d1101
 
 
 
 
1c7cbff
78d1101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import torch
import spaces
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
import os
from huggingface_hub import login

max_length = 512
auth_token = os.getenv('HF_SPACE_TOKEN')
login(token=auth_token)


@spaces.GPU
def goai_traduction(text, src_lang, tgt_lang):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if src_lang == "fra_Latn" and tgt_lang == "mos_Latn":
        model_id = "ArissBandoss/nllb-200-distilled-600M-finetuned-fr-to-mos-V4"
        
    elif src_lang == "mos_Latn" and tgt_lang == "fra_Latn":
        model_id = "ArissBandoss/nllb-200-distilled-600M-finetuned-mos-to-fr-V5"

    else:
        model_id = "ArissBandoss/nllb-200-distilled-600M-finetuned-fr-to-mos-V4"

    tokenizer = AutoTokenizer.from_pretrained(model_id, token=auth_token)
    model     = AutoModelForSeq2SeqLM.from_pretrained(model_id, token=auth_token)
        
    trans_pipe = pipeline("translation", 
                          model=model, tokenizer=tokenizer, 
                          src_lang=src_lang, tgt_lang=tgt_lang, 
                          max_length=max_length,
                          device=device
                         )
    
    return trans_pipe(text)[0]["translation_text"]