import streamlit as st
import pandas as pd
import torch
from transformers import pipeline
import datetime
#from datasets import load_dataset
#dataset = load_dataset("wikitablequestions", trust_remote_code=True)
#item = dataset["test"][10] # show first test example
#def to_pandas(item):
#return pd.DataFrame(item['table']["rows"], columns=item['table']["header"])
#df = to_pandas(item)
#print(df.head())
df = pd.read_csv("anomalies.csv",quotechar='"',dtype={col: str for col in pd.read_csv('anomalies.csv', nrows=1)})
# Function to generate a response using the TAPEX model
def response(user_question, df):
a = datetime.datetime.now()
tqa = pipeline(task="table-question-answering", model="google/tapas-large-finetuned-wtq")
answer = tqa(table=df, query=user_question)['answer']
query_result = {
"Resposta": answer
}
b = datetime.datetime.now()
print(b - a)
return query_result
# Streamlit interface
st.markdown("""
""", unsafe_allow_html=True)
# Chat history
if 'history' not in st.session_state:
st.session_state['history'] = []
# Input box for user question
user_question = st.text_input("Escreva sua questΓ£o aqui:", "")
if user_question:
# Add human emoji when user asks a question
st.session_state['history'].append(('π€', user_question))
st.markdown(f"**π€ {user_question}**")
# Generate the response
bot_response = response(user_question, df)["Resposta"]
# Add robot emoji when generating response and align to the right
st.session_state['history'].append(('π€', bot_response))
st.markdown(f"**π€ {bot_response}**
", unsafe_allow_html=True)
# Clear history button
if st.button("Limpar"):
st.session_state['history'] = []
# Display chat history
for sender, message in st.session_state['history']:
if sender == 'π€':
st.markdown(f"**π€ {message}**")
elif sender == 'π€':
st.markdown(f"**π€ {message}**
", unsafe_allow_html=True)