ahmed792002 commited on
Commit
4559878
·
verified ·
1 Parent(s): 73f0168

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -14
app.py CHANGED
@@ -14,20 +14,21 @@ def chatbot(query, history, system_message, max_length, temperature, top_k, top_
14
 
15
  # Tokenize input query
16
  input_ids = tokenizer.encode(query, return_tensors="pt")
17
-
18
- # Generate text using the model
19
- final_outputs = model.generate(
20
- input_ids,
21
- do_sample=True,
22
- max_length=int(max_length), # Convert max_length to integer
23
- temperature=float(temperature), # Convert temperature to float
24
- top_k=int(top_k), # Convert top_k to integer
25
- top_p=float(top_p), # Convert top_p to float
26
- pad_token_id=tokenizer.pad_token_id,
27
- )
28
-
29
- # Decode generated text
30
- response = tokenizer.decode(final_outputs[0], skip_special_tokens=True)
 
31
  return response.split('"')[1]
32
 
33
  # Gradio ChatInterface
 
14
 
15
  # Tokenize input query
16
  input_ids = tokenizer.encode(query, return_tensors="pt")
17
+ response = '.'
18
+ while response=='.':
19
+ # Generate text using the model
20
+ final_outputs = model.generate(
21
+ input_ids,
22
+ do_sample=True,
23
+ max_length=int(max_length), # Convert max_length to integer
24
+ temperature=float(temperature), # Convert temperature to float
25
+ top_k=int(top_k), # Convert top_k to integer
26
+ top_p=float(top_p), # Convert top_p to float
27
+ pad_token_id=tokenizer.pad_token_id,
28
+ )
29
+ # Decode generated text
30
+ response = tokenizer.decode(final_outputs[0], skip_special_tokens=True)
31
+
32
  return response.split('"')[1]
33
 
34
  # Gradio ChatInterface