Update app.py
Browse files
app.py
CHANGED
@@ -30,8 +30,8 @@ class DocumentRetrievalAndGeneration:
|
|
30 |
all_splits = text_splitter.split_documents(documents)
|
31 |
print('Length of documents:', len(documents))
|
32 |
print("LEN of all_splits", len(all_splits))
|
33 |
-
for i in range(5):
|
34 |
-
|
35 |
return all_splits
|
36 |
|
37 |
def create_faiss_index(self):
|
@@ -67,7 +67,7 @@ class DocumentRetrievalAndGeneration:
|
|
67 |
try:
|
68 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
69 |
future = executor.submit(self.llm.model.generate, model_inputs, max_new_tokens=1000, do_sample=True)
|
70 |
-
generated_ids = future.result(timeout=
|
71 |
return generated_ids
|
72 |
except concurrent.futures.TimeoutError:
|
73 |
return "Text generation process timed out"
|
|
|
30 |
all_splits = text_splitter.split_documents(documents)
|
31 |
print('Length of documents:', len(documents))
|
32 |
print("LEN of all_splits", len(all_splits))
|
33 |
+
# for i in range(5):
|
34 |
+
# print(all_splits[i].page_content)
|
35 |
return all_splits
|
36 |
|
37 |
def create_faiss_index(self):
|
|
|
67 |
try:
|
68 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
69 |
future = executor.submit(self.llm.model.generate, model_inputs, max_new_tokens=1000, do_sample=True)
|
70 |
+
generated_ids = future.result(timeout=80) # Timeout set to 60 seconds
|
71 |
return generated_ids
|
72 |
except concurrent.futures.TimeoutError:
|
73 |
return "Text generation process timed out"
|