Update README.md
Browse files
README.md
CHANGED
@@ -65,15 +65,15 @@ model= AutoModelForCausalLM.from_pretrained("tuanle/VN-News-GPT2").to(device)
|
|
65 |
input_ids = tokenizer.encode(text, return_tensors='pt').to(device)
|
66 |
sample_outputs = model.generate(input_ids,
|
67 |
do_sample=True,
|
68 |
-
max_length=
|
69 |
-
min_length=
|
70 |
# temperature = .8,
|
71 |
-
top_k=
|
72 |
-
top_p =
|
73 |
-
num_beams=
|
74 |
early_stopping= True,
|
75 |
no_repeat_ngram_size= 2 ,
|
76 |
-
num_return_sequences=
|
77 |
|
78 |
for i, sample_output in enumerate(sample_outputs):
|
79 |
temp = tokenizer.decode(sample_output.tolist())
|
|
|
65 |
input_ids = tokenizer.encode(text, return_tensors='pt').to(device)
|
66 |
sample_outputs = model.generate(input_ids,
|
67 |
do_sample=True,
|
68 |
+
max_length= 768,
|
69 |
+
min_length= 60,
|
70 |
# temperature = .8,
|
71 |
+
top_k= 100,
|
72 |
+
top_p = 0.7,
|
73 |
+
num_beams= 5,
|
74 |
early_stopping= True,
|
75 |
no_repeat_ngram_size= 2 ,
|
76 |
+
num_return_sequences= 3)
|
77 |
|
78 |
for i, sample_output in enumerate(sample_outputs):
|
79 |
temp = tokenizer.decode(sample_output.tolist())
|