Spaces:
Sleeping
Sleeping
leandroaraujodev
commited on
Commit
·
0411307
1
Parent(s):
7acc0b8
sync github code
Browse files- app.py +79 -5
- requirements.txt +8 -1
app.py
CHANGED
@@ -16,9 +16,25 @@ from llama_index.vector_stores.chroma import ChromaVectorStore
|
|
16 |
import chromadb
|
17 |
import nest_asyncio
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
|
|
|
|
|
|
20 |
|
21 |
-
|
|
|
|
|
|
|
22 |
|
23 |
# Lista de pastas que precisam ser criadas
|
24 |
pastas = ["bm25_retriever", "chat_store", "chroma_db", "documentos"]
|
@@ -35,7 +51,24 @@ for pasta in pastas:
|
|
35 |
|
36 |
# Configuração do Streamlit
|
37 |
st.sidebar.title("Configuração de LLM")
|
38 |
-
sidebar_option = st.sidebar.radio("Selecione o LLM", ["Ollama", "OpenAI"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
if sidebar_option == "Ollama":
|
41 |
Settings.llm = Ollama(model="llama3.2:latest", request_timeout=500.0, num_gpu=1)
|
@@ -43,9 +76,50 @@ if sidebar_option == "Ollama":
|
|
43 |
elif sidebar_option == "OpenAI":
|
44 |
from llama_index.llms.openai import OpenAI
|
45 |
from llama_index.embeddings.openai import OpenAIEmbedding
|
46 |
-
os.environ["OPENAI_API_KEY"] = "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"
|
47 |
Settings.llm = OpenAI(model="gpt-3.5-turbo")
|
48 |
Settings.embed_model = OpenAIEmbedding(model_name="text-embedding-ada-002")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
else:
|
50 |
raise Exception("Opção de LLM inválida!")
|
51 |
|
@@ -139,8 +213,8 @@ else:
|
|
139 |
chat_store.persist(persist_path=chat_store_path)
|
140 |
|
141 |
# Interface do Chatbot
|
142 |
-
st.title("Chatbot
|
143 |
-
st.write("Este chatbot
|
144 |
if "chat_history" not in st.session_state:
|
145 |
st.session_state.chat_history = []
|
146 |
|
|
|
16 |
import chromadb
|
17 |
import nest_asyncio
|
18 |
|
19 |
+
import os
|
20 |
+
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
|
21 |
+
from llama_index.llms.huggingface import HuggingFaceLLM
|
22 |
+
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
23 |
+
from llama_index.core import Settings
|
24 |
+
from typing import List, Optional
|
25 |
+
from llama_index.core import PromptTemplate
|
26 |
+
import torch
|
27 |
+
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
28 |
+
import huggingface_hub
|
29 |
|
30 |
+
import logging
|
31 |
+
import sys
|
32 |
+
from PIL import Image
|
33 |
|
34 |
+
#Configuração da imagem da aba
|
35 |
+
|
36 |
+
im = Image.open("./pngegg.png")
|
37 |
+
st.set_page_config(page_title = "Chatbot Carômetro", page_icon=im, layout = "wide")
|
38 |
|
39 |
# Lista de pastas que precisam ser criadas
|
40 |
pastas = ["bm25_retriever", "chat_store", "chroma_db", "documentos"]
|
|
|
51 |
|
52 |
# Configuração do Streamlit
|
53 |
st.sidebar.title("Configuração de LLM")
|
54 |
+
sidebar_option = st.sidebar.radio("Selecione o LLM", ["Ollama", "OpenAI", "HF Local"])
|
55 |
+
# logo_url = 'app\logos\logo-sicoob.jpg'
|
56 |
+
# st.sidebar.image(logo_url)
|
57 |
+
import base64
|
58 |
+
|
59 |
+
#Configuração da imagem da sidebar
|
60 |
+
with open("sicoob-logo.png", "rb") as f:
|
61 |
+
data = base64.b64encode(f.read()).decode("utf-8")
|
62 |
+
|
63 |
+
st.sidebar.markdown(
|
64 |
+
f"""
|
65 |
+
<div style="display:table;margin-top:-80%;margin-left:0%;">
|
66 |
+
<img src="data:image/png;base64,{data}" width="250" height="70">
|
67 |
+
</div>
|
68 |
+
""",
|
69 |
+
unsafe_allow_html=True,
|
70 |
+
)
|
71 |
+
|
72 |
|
73 |
if sidebar_option == "Ollama":
|
74 |
Settings.llm = Ollama(model="llama3.2:latest", request_timeout=500.0, num_gpu=1)
|
|
|
76 |
elif sidebar_option == "OpenAI":
|
77 |
from llama_index.llms.openai import OpenAI
|
78 |
from llama_index.embeddings.openai import OpenAIEmbedding
|
|
|
79 |
Settings.llm = OpenAI(model="gpt-3.5-turbo")
|
80 |
Settings.embed_model = OpenAIEmbedding(model_name="text-embedding-ada-002")
|
81 |
+
elif sidebar_option == 'HF Local':
|
82 |
+
|
83 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
84 |
+
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
|
85 |
+
|
86 |
+
query_wrapper_prompt = PromptTemplate(
|
87 |
+
"Below are several documents about a company "
|
88 |
+
"Write a response that appropriately completes the request.\n\n"
|
89 |
+
"### Instruction:\n{query_str}\n\n### Response:"
|
90 |
+
)
|
91 |
+
#Embedding do huggingface
|
92 |
+
Settings.embed_model = HuggingFaceEmbedding(
|
93 |
+
model_name="BAAI/bge-small-en-v1.5"
|
94 |
+
)
|
95 |
+
#Token do huggingface
|
96 |
+
HF_TOKEN: Optional[str] = os.getenv("HF_TOKEN")
|
97 |
+
huggingface_hub.login(HF_TOKEN)
|
98 |
+
#Carregamento do modelo local, descomentar o modelo desejado
|
99 |
+
llm = HuggingFaceLLM(
|
100 |
+
context_window=2048,
|
101 |
+
max_new_tokens=256,
|
102 |
+
generate_kwargs={"do_sample": False},
|
103 |
+
query_wrapper_prompt=query_wrapper_prompt,
|
104 |
+
#model_name="Qwen/Qwen2.5-Coder-32B-Instruct",
|
105 |
+
#model_name="Qwen/Qwen2.5-14B-Instruct",
|
106 |
+
# model_name="meta-llama/Llama-3.2-3B",
|
107 |
+
#model_name="HuggingFaceH4/zephyr-7b-beta",
|
108 |
+
# model_name="meta-llama/Meta-Llama-3-8B",
|
109 |
+
#model_name="numind/NuExtract-1.5",
|
110 |
+
model_name="meta-llama/Llama-3.1-8B",
|
111 |
+
|
112 |
+
tokenizer_name="meta-llama/Llama-3.1-8B",
|
113 |
+
device_map="auto",
|
114 |
+
tokenizer_kwargs={"max_length": 2048},
|
115 |
+
# uncomment this if using CUDA to reduce memory usage
|
116 |
+
model_kwargs={"torch_dtype": torch.float16},
|
117 |
+
)
|
118 |
+
|
119 |
+
|
120 |
+
Settings.chunk_size = 512
|
121 |
+
Settings.llm = llm
|
122 |
+
|
123 |
else:
|
124 |
raise Exception("Opção de LLM inválida!")
|
125 |
|
|
|
213 |
chat_store.persist(persist_path=chat_store_path)
|
214 |
|
215 |
# Interface do Chatbot
|
216 |
+
st.title("Chatbot Carômetro")
|
217 |
+
st.write("Este chatbot pode te ajudar a conseguir informações relevantes sobre os carômetros da Sicoob.")
|
218 |
if "chat_history" not in st.session_state:
|
219 |
st.session_state.chat_history = []
|
220 |
|
requirements.txt
CHANGED
@@ -7,4 +7,11 @@ llama-index-llms-ollama==0.3.6
|
|
7 |
llama-index-embeddings-ollama==0.3.1
|
8 |
llama-index-retrievers-bm25==0.4.0
|
9 |
llama-index-vector-stores-chroma==0.3.0
|
10 |
-
openpyxl
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
llama-index-embeddings-ollama==0.3.1
|
8 |
llama-index-retrievers-bm25==0.4.0
|
9 |
llama-index-vector-stores-chroma==0.3.0
|
10 |
+
openpyxl
|
11 |
+
|
12 |
+
llama-index-llms-huggingface
|
13 |
+
llama-index-llms-huggingface-api
|
14 |
+
transformers[torch]
|
15 |
+
huggingface_hub[inference]
|
16 |
+
llama_index.embeddings.huggingface
|
17 |
+
llama-index-readers-file
|