Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
|
|
3 |
modelname="gpt2"
|
4 |
config = AutoConfig.from_pretrained(modelname)
|
5 |
tokenizer = AutoTokenizer.from_pretrained(modelname)
|
6 |
-
model = AutoModelForCausalLM.from_pretrained(modelname,config=config)
|
7 |
|
8 |
|
9 |
def botsay(user_input):
|
@@ -29,7 +29,7 @@ def botsay(user_input):
|
|
29 |
if length >limit:
|
30 |
gen_tokens="⚠️sorry length limit. please reload the browser."
|
31 |
return gen_tokens
|
32 |
-
outs=model(input_ids=input_ids
|
33 |
topk = torch.topk(outs.logits.squeeze()[-1,:],k=j+1).indices
|
34 |
if new_token =="that":
|
35 |
that_id = 326
|
|
|
3 |
modelname="gpt2"
|
4 |
config = AutoConfig.from_pretrained(modelname)
|
5 |
tokenizer = AutoTokenizer.from_pretrained(modelname)
|
6 |
+
model = AutoModelForCausalLM.from_pretrained(modelname,config=config)
|
7 |
|
8 |
|
9 |
def botsay(user_input):
|
|
|
29 |
if length >limit:
|
30 |
gen_tokens="⚠️sorry length limit. please reload the browser."
|
31 |
return gen_tokens
|
32 |
+
outs=model(input_ids=input_ids)
|
33 |
topk = torch.topk(outs.logits.squeeze()[-1,:],k=j+1).indices
|
34 |
if new_token =="that":
|
35 |
that_id = 326
|