ikeno-ada commited on
Commit
615bea5
·
verified ·
1 Parent(s): 9fb9a13

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -7,7 +7,7 @@ from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer,QuantoC
7
  # app = FastAPI()
8
 
9
  quantization_config = QuantoConfig(weights="int4")
10
- model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M",quantization_config=quantization_config)
11
  model.to_bettertransformer()
12
  tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
13
 
@@ -120,7 +120,7 @@ lang_list = list(lang_ids.keys())
120
 
121
  def translate(lang, text):
122
  encoded = tokenizer(text, return_tensors="pt")
123
- generated_tokens = model(use_cache=True).generate(**encoded, forced_bos_token_id=tokenizer.get_lang_id(lang_ids[lang]))
124
  return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
125
 
126
 
 
7
  # app = FastAPI()
8
 
9
  quantization_config = QuantoConfig(weights="int4")
10
+ model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M",quantization_config=quantization_config,use_cache=True)
11
  model.to_bettertransformer()
12
  tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
13
 
 
120
 
121
  def translate(lang, text):
122
  encoded = tokenizer(text, return_tensors="pt")
123
+ generated_tokens = model.generate(**encoded, forced_bos_token_id=tokenizer.get_lang_id(lang_ids[lang]))
124
  return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
125
 
126