Update app.py
Browse files
app.py
CHANGED
@@ -133,8 +133,6 @@ class RAG:
|
|
133 |
|
134 |
input_text = "answer: " + " ".join(context) + " " + question
|
135 |
|
136 |
-
print(input_text)
|
137 |
-
|
138 |
inputs = self.generator_tokenizer.encode(input_text, return_tensors='pt', max_length=1024, truncation=True).to(device)
|
139 |
outputs = self.generator_model.generate(inputs, max_length=150, min_length=2, length_penalty=2.0, num_beams=4, early_stopping=True)
|
140 |
|
@@ -159,8 +157,6 @@ class RAG:
|
|
159 |
if answer_end_index < answer_start_index:
|
160 |
answer_start_index, answer_end_index = answer_end_index, answer_start_index
|
161 |
|
162 |
-
print(answer_start_index, answer_end_index)
|
163 |
-
|
164 |
predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
|
165 |
answer = self.generator_tokenizer.decode(predict_answer_tokens, skip_special_tokens=True)
|
166 |
answer = answer.replace('\n', ' ').strip()
|
|
|
133 |
|
134 |
input_text = "answer: " + " ".join(context) + " " + question
|
135 |
|
|
|
|
|
136 |
inputs = self.generator_tokenizer.encode(input_text, return_tensors='pt', max_length=1024, truncation=True).to(device)
|
137 |
outputs = self.generator_model.generate(inputs, max_length=150, min_length=2, length_penalty=2.0, num_beams=4, early_stopping=True)
|
138 |
|
|
|
157 |
if answer_end_index < answer_start_index:
|
158 |
answer_start_index, answer_end_index = answer_end_index, answer_start_index
|
159 |
|
|
|
|
|
160 |
predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
|
161 |
answer = self.generator_tokenizer.decode(predict_answer_tokens, skip_special_tokens=True)
|
162 |
answer = answer.replace('\n', ' ').strip()
|