Spaces:
Running
Running
fschwartzer
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -22,21 +22,23 @@ def response(user_question, table_data):
|
|
22 |
# The query should be passed as a list
|
23 |
encoding = tokenizer(table=table_data, queries=[user_question], padding=True, return_tensors="pt", truncation=True)
|
24 |
|
25 |
-
#
|
26 |
-
outputs = model
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
34 |
)
|
35 |
|
36 |
-
|
|
|
37 |
|
38 |
query_result = {
|
39 |
-
"Resposta":
|
40 |
}
|
41 |
|
42 |
b = datetime.datetime.now()
|
@@ -44,7 +46,6 @@ def response(user_question, table_data):
|
|
44 |
|
45 |
return query_result
|
46 |
|
47 |
-
|
48 |
# Streamlit interface
|
49 |
st.markdown("""
|
50 |
<div style='display: flex; align-items: center;'>
|
|
|
22 |
# The query should be passed as a list
|
23 |
encoding = tokenizer(table=table_data, queries=[user_question], padding=True, return_tensors="pt", truncation=True)
|
24 |
|
25 |
+
# Instead of using generate, we pass the encoding through the model to get the logits
|
26 |
+
outputs = model(**encoding)
|
27 |
+
|
28 |
+
# Extract the answer coordinates
|
29 |
+
predicted_answer_coordinates = outputs.logits.argmax(-1)
|
30 |
+
|
31 |
+
# Decode the answer from the table using the coordinates
|
32 |
+
answer = tokenizer.convert_logits_to_predictions(
|
33 |
+
encoding.data,
|
34 |
+
predicted_answer_coordinates
|
35 |
)
|
36 |
|
37 |
+
# Process the answer into a readable format
|
38 |
+
answer_text = answer[0][0][0] if len(answer[0]) > 0 else "Não foi possível encontrar uma resposta"
|
39 |
|
40 |
query_result = {
|
41 |
+
"Resposta": answer_text
|
42 |
}
|
43 |
|
44 |
b = datetime.datetime.now()
|
|
|
46 |
|
47 |
return query_result
|
48 |
|
|
|
49 |
# Streamlit interface
|
50 |
st.markdown("""
|
51 |
<div style='display: flex; align-items: center;'>
|