Spaces:
Running
Running
Ilyas KHIAT
commited on
Commit
·
42117bb
1
Parent(s):
c089258
chatbot
Browse files- agents_page/recommended_agent.py +130 -18
- app.py +2 -2
- audit_page/audit.py +6 -1
- requirements.txt +1 -0
- utils/audit/rag.py +52 -0
agents_page/recommended_agent.py
CHANGED
@@ -1,6 +1,13 @@
|
|
1 |
import streamlit as st
|
2 |
from utils.audit.response_llm import generate_response_via_langchain
|
3 |
from textwrap import dedent
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
#st.set_page_config(page_title="Agents recommandés", page_icon="", layout="wide")
|
5 |
def remove_images_from_content(content):
|
6 |
filtered_content = {}
|
@@ -12,8 +19,78 @@ def remove_images_from_content(content):
|
|
12 |
|
13 |
return filtered_content
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
def recommended_agent_main():
|
16 |
st.title("Agents recommandés")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
if "audit" not in st.session_state or st.session_state.audit == {}:
|
19 |
st.error("Veuillez d'abord effectuer un audit pour obtenir des recommandations d'agents.")
|
@@ -35,26 +112,33 @@ def recommended_agent_main():
|
|
35 |
|
36 |
ressources = content
|
37 |
|
38 |
-
prompt = '''
|
39 |
-
|
|
|
|
|
40 |
|
41 |
-
|
42 |
|
43 |
-
|
44 |
-
* Nom
|
45 |
-
* Rôle
|
46 |
-
* Objectifs
|
47 |
-
* Outils utilisés par l'agent
|
48 |
-
* Tâches réalisées par l'agents
|
49 |
-
* Compétences de l'agent (backstory)
|
50 |
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
-
Une fois ce travail réalisé, tu proposes une série de 3 missions avec objectifs SMART pour chacun des agents Sol. A et Sol. B en présentation les résultats dans un tableau contenant :
|
56 |
-
Nom de l’agent
|
57 |
-
Objectifs à atteindre
|
58 |
'''
|
59 |
|
60 |
#display prompt and modify it
|
@@ -63,9 +147,37 @@ def recommended_agent_main():
|
|
63 |
if st.button("Générer les recommandations"):
|
64 |
resource_prompt = f'''Ressources fournies par l'utilisateur :{ressources}'''
|
65 |
prompt_modified = f"{prompt_modified}\n{resource_prompt}"
|
66 |
-
|
|
|
|
|
67 |
elif st.session_state.response_llm:
|
68 |
st.info("la dernière réponse générée est affichée ci-dessous")
|
69 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
recommended_agent_main()
|
|
|
1 |
import streamlit as st
|
2 |
from utils.audit.response_llm import generate_response_via_langchain
|
3 |
from textwrap import dedent
|
4 |
+
import streamlit as st
|
5 |
+
from langchain_openai import ChatOpenAI
|
6 |
+
from langchain_mistralai import ChatMistralAI
|
7 |
+
from langchain_core.prompts import ChatPromptTemplate
|
8 |
+
from langchain_core.output_parsers import StrOutputParser
|
9 |
+
from langchain_core.messages import AIMessage, HumanMessage
|
10 |
+
|
11 |
#st.set_page_config(page_title="Agents recommandés", page_icon="", layout="wide")
|
12 |
def remove_images_from_content(content):
|
13 |
filtered_content = {}
|
|
|
19 |
|
20 |
return filtered_content
|
21 |
|
22 |
+
def get_response(user_query, chat_history, db,llm=None,history_limit=5,stream=True):
|
23 |
+
retriever = db.as_retriever()
|
24 |
+
context = retriever.invoke(user_query)
|
25 |
+
template = """
|
26 |
+
Étant donné l'historique de la conversation : {chat_history}, le contexte qui est le document : {context}, et la question de l'utilisateur : {user_question}, repond comme un expert en agent IA.
|
27 |
+
Assurez-vous que la réponse soit adaptée au niveau d'expertise de l'utilisateur et aux spécificités du contexte fourni.
|
28 |
+
|
29 |
+
"""
|
30 |
+
|
31 |
+
prompt = ChatPromptTemplate.from_template(template)
|
32 |
+
|
33 |
+
#llm = ChatOpenAI(model="gpt-4o")
|
34 |
+
if not llm:
|
35 |
+
llm = ChatOpenAI(model="gpt-4o-mini")
|
36 |
+
elif llm == "GPT-4o":
|
37 |
+
llm = ChatOpenAI(model="gpt-4o")
|
38 |
+
elif llm == "Mistral Large 2 (FR)":
|
39 |
+
llm = ChatMistralAI(model_name="mistral-large-2407")
|
40 |
+
elif llm == "GPT-4o-mini":
|
41 |
+
llm = ChatOpenAI(model="gpt-4o-mini")
|
42 |
+
elif llm == "Mistral Nemo (FR)":
|
43 |
+
llm = ChatMistralAI(model_name="open-mistral-nemo-2407")
|
44 |
+
|
45 |
+
|
46 |
+
chain = prompt | llm
|
47 |
+
|
48 |
+
if not stream:
|
49 |
+
return chain.invoke({
|
50 |
+
"context": context,
|
51 |
+
"chat_history": chat_history[-history_limit:],
|
52 |
+
"user_question": user_query,
|
53 |
+
})
|
54 |
+
|
55 |
+
chain = chain | StrOutputParser()
|
56 |
+
|
57 |
+
if history_limit:
|
58 |
+
return chain.stream({
|
59 |
+
"context": context,
|
60 |
+
"chat_history": chat_history[-history_limit:],
|
61 |
+
"user_question": user_query,
|
62 |
+
})
|
63 |
+
|
64 |
+
return chain.stream({
|
65 |
+
"context": context,
|
66 |
+
"chat_history": chat_history,
|
67 |
+
"user_question": user_query,
|
68 |
+
})
|
69 |
+
|
70 |
+
def handle_display_models(index, models_names):
|
71 |
+
model = st.radio("Choisir un modèle",models_names, index=index)
|
72 |
+
return model
|
73 |
+
|
74 |
def recommended_agent_main():
|
75 |
st.title("Agents recommandés")
|
76 |
+
models_names = ["GPT-4o", "GPT-4o-mini"]
|
77 |
+
|
78 |
+
if "chat_history" not in st.session_state:
|
79 |
+
st.session_state.chat_history = [
|
80 |
+
]
|
81 |
+
|
82 |
+
if "model" not in st.session_state:
|
83 |
+
st.session_state.model = "GPT-4o-mini"
|
84 |
+
|
85 |
+
header = st.container()
|
86 |
+
col1, col2 = header.columns([1, 2])
|
87 |
+
|
88 |
+
with col1.popover("Modèles disponibles"):
|
89 |
+
new_model = handle_display_models(models_names.index(st.session_state.model), models_names)
|
90 |
+
|
91 |
+
st.session_state.model = new_model
|
92 |
+
|
93 |
+
st.markdown(f"- **{st.session_state.model}**")
|
94 |
|
95 |
if "audit" not in st.session_state or st.session_state.audit == {}:
|
96 |
st.error("Veuillez d'abord effectuer un audit pour obtenir des recommandations d'agents.")
|
|
|
112 |
|
113 |
ressources = content
|
114 |
|
115 |
+
prompt = '''
|
116 |
+
Tu es designer en intelligence artificielle (IA) spécialisé dans la création d'agents IA autonomes et performants.
|
117 |
+
|
118 |
+
A partir de ressources fournies par l'utilisateur (texte, documents, images, audio), tu es chargé de réaliser les tâches suivantes :
|
119 |
|
120 |
+
A/ Faire un résumé des ressources fournies en 500 caractères maximum
|
121 |
|
122 |
+
B/ Suggérer la création d'agents autonomes pour mettre en pratique les informations contenues dans les ressources fournies.
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
+
Tu proposes deux solutions :
|
125 |
+
|
126 |
+
Sol. A : 1 seul agent IA dont tu suggéreras :
|
127 |
+
* Nom
|
128 |
+
* Rôle
|
129 |
+
* Objectifs
|
130 |
+
* Outils utilisés par l'agent
|
131 |
+
* Tâches réalisées par l'agents
|
132 |
+
* Compétences de l'agent (backstory)
|
133 |
+
|
134 |
+
Sol. B : 1 équipe d'agents tu suggéreras :
|
135 |
+
* Le nombre d'agents
|
136 |
+
* Pour chacune d'eux [Nom, Rôle, Objectifs, Outils utilisés par l'agent, Tâches réalisées par l'agents, Compétences de l'agent (backstory)]
|
137 |
+
|
138 |
+
Une fois ce travail réalisé, tu proposes une série de 3 missions avec objectifs SMART pour chacun des agents Sol. A et Sol. B en présentation les résultats dans un tableau contenant :
|
139 |
+
* Nom de l’agent
|
140 |
+
* Objectifs à atteindre
|
141 |
|
|
|
|
|
|
|
142 |
'''
|
143 |
|
144 |
#display prompt and modify it
|
|
|
147 |
if st.button("Générer les recommandations"):
|
148 |
resource_prompt = f'''Ressources fournies par l'utilisateur :{ressources}'''
|
149 |
prompt_modified = f"{prompt_modified}\n{resource_prompt}"
|
150 |
+
with st.chat_message("AI"):
|
151 |
+
st.session_state.response_llm = st.write_stream(generate_response_via_langchain(query=prompt_modified,stream=True))
|
152 |
+
st.session_state.chat_history.append(AIMessage(content=st.session_state.response_llm))
|
153 |
elif st.session_state.response_llm:
|
154 |
st.info("la dernière réponse générée est affichée ci-dessous")
|
155 |
+
with st.chat_message("AI"):
|
156 |
+
st.write(st.session_state.response_llm)
|
157 |
+
|
158 |
+
for message in st.session_state.chat_history[1:]:
|
159 |
+
if isinstance(message, AIMessage):
|
160 |
+
with st.chat_message("AI"):
|
161 |
+
st.markdown(message.content)
|
162 |
+
elif isinstance(message, HumanMessage):
|
163 |
+
with st.chat_message("Moi"):
|
164 |
+
st.write(message.content)
|
165 |
+
|
166 |
+
user_query = st.chat_input("Par ici ...")
|
167 |
+
if user_query is not None and user_query != "":
|
168 |
+
st.session_state.chat_history.append(HumanMessage(content=user_query))
|
169 |
+
|
170 |
+
with st.chat_message("Moi"):
|
171 |
+
st.markdown(user_query)
|
172 |
+
|
173 |
+
with st.chat_message("AI"):
|
174 |
+
st.markdown(f"**{st.session_state.model}**")
|
175 |
+
|
176 |
+
|
177 |
+
response = st.write_stream(get_response(user_query, st.session_state.chat_history,db=st.session_state.vectorstore, llm=st.session_state.model, stream=True))
|
178 |
+
st.session_state.chat_history.append(AIMessage(content=response))
|
179 |
+
|
180 |
+
|
181 |
+
|
182 |
|
183 |
recommended_agent_main()
|
app.py
CHANGED
@@ -17,8 +17,8 @@ def main():
|
|
17 |
|
18 |
pg = st.navigation(
|
19 |
{
|
20 |
-
"Audit de contenus": [audit_page
|
21 |
-
"Equipe d'agents IA": [recommended_agents
|
22 |
"Chatbot": [chatbot],
|
23 |
"Documentation": [documentation]
|
24 |
}
|
|
|
17 |
|
18 |
pg = st.navigation(
|
19 |
{
|
20 |
+
"Audit de contenus": [audit_page],
|
21 |
+
"Equipe d'agents IA": [recommended_agents],
|
22 |
"Chatbot": [chatbot],
|
23 |
"Documentation": [documentation]
|
24 |
}
|
audit_page/audit.py
CHANGED
@@ -2,6 +2,7 @@ import streamlit as st
|
|
2 |
import pymupdf as fitz
|
3 |
import pyperclip
|
4 |
from utils.audit.audit_doc import audit_descriptif_pdf,audit_text
|
|
|
5 |
import dotenv
|
6 |
from utils.audit.audit_audio import evaluate_audio_quality
|
7 |
from PIL import Image
|
@@ -56,7 +57,7 @@ def display_content_doc(content:dict,col:st):
|
|
56 |
else:
|
57 |
text = page["texte"]
|
58 |
|
59 |
-
col.
|
60 |
|
61 |
elif option == "liens":
|
62 |
if number == 0:
|
@@ -208,6 +209,8 @@ def audit_main():
|
|
208 |
st.session_state.name_file = ""
|
209 |
if "audit_simplified" not in st.session_state:
|
210 |
st.session_state.audit_simplified = {}
|
|
|
|
|
211 |
|
212 |
# File uploader
|
213 |
uploaded_file = col1.file_uploader("Télécharger un ou plusieurs documents")
|
@@ -223,5 +226,7 @@ def audit_main():
|
|
223 |
if "audit" in st.session_state and st.session_state.audit != {}:
|
224 |
display_audit(col1)
|
225 |
handle_display_content(col2)
|
|
|
|
|
226 |
|
227 |
audit_main()
|
|
|
2 |
import pymupdf as fitz
|
3 |
import pyperclip
|
4 |
from utils.audit.audit_doc import audit_descriptif_pdf,audit_text
|
5 |
+
from utils.audit.rag import setup_rag
|
6 |
import dotenv
|
7 |
from utils.audit.audit_audio import evaluate_audio_quality
|
8 |
from PIL import Image
|
|
|
57 |
else:
|
58 |
text = page["texte"]
|
59 |
|
60 |
+
col.code(text,language="text")
|
61 |
|
62 |
elif option == "liens":
|
63 |
if number == 0:
|
|
|
209 |
st.session_state.name_file = ""
|
210 |
if "audit_simplified" not in st.session_state:
|
211 |
st.session_state.audit_simplified = {}
|
212 |
+
if "vectorstore" not in st.session_state:
|
213 |
+
st.session_state.vectorstore = None
|
214 |
|
215 |
# File uploader
|
216 |
uploaded_file = col1.file_uploader("Télécharger un ou plusieurs documents")
|
|
|
226 |
if "audit" in st.session_state and st.session_state.audit != {}:
|
227 |
display_audit(col1)
|
228 |
handle_display_content(col2)
|
229 |
+
vectorstore = setup_rag(st.session_state.audit_simplified["type de fichier"],st.session_state.audit["content"])
|
230 |
+
st.session_state.vectorstore = vectorstore
|
231 |
|
232 |
audit_main()
|
requirements.txt
CHANGED
@@ -17,3 +17,4 @@ langchain-core
|
|
17 |
langchainhub
|
18 |
langchain-openai
|
19 |
langchain-mistralai
|
|
|
|
17 |
langchainhub
|
18 |
langchain-openai
|
19 |
langchain-mistralai
|
20 |
+
faiss-cpu
|
utils/audit/rag.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
from PyPDF2 import PdfReader
|
4 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
+
from langchain_community.embeddings import OpenAIEmbeddings
|
6 |
+
from langchain_community.vectorstores import FAISS
|
7 |
+
from langchain_community.chat_models import ChatOpenAI
|
8 |
+
from langchain.llms import HuggingFaceHub
|
9 |
+
from langchain import hub
|
10 |
+
from langchain_core.output_parsers import StrOutputParser
|
11 |
+
from langchain_core.runnables import RunnablePassthrough
|
12 |
+
from langchain_community.document_loaders import WebBaseLoader
|
13 |
+
|
14 |
+
|
15 |
+
def get_text_from_content_for_doc(content):
|
16 |
+
text = ""
|
17 |
+
for page in content:
|
18 |
+
text += content[page]["texte"]
|
19 |
+
return text
|
20 |
+
|
21 |
+
def get_text_from_content_for_audio(content):
|
22 |
+
return content["transcription"]
|
23 |
+
|
24 |
+
|
25 |
+
def get_text_chunks(text):
|
26 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
27 |
+
chunk_size=500, # the character length of the chunck
|
28 |
+
chunk_overlap=100, # the character length of the overlap between chuncks
|
29 |
+
length_function=len # the length function - in this case, character length (aka the python len() fn.)
|
30 |
+
)
|
31 |
+
chunks = text_splitter.split_text(text)
|
32 |
+
return chunks
|
33 |
+
|
34 |
+
def get_vectorstore(text_chunks):
|
35 |
+
embedding = OpenAIEmbeddings(model="text-embedding-3-small")
|
36 |
+
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embedding)
|
37 |
+
return vectorstore
|
38 |
+
|
39 |
+
def setup_rag(file_type,content):
|
40 |
+
if file_type == "pdf":
|
41 |
+
text = get_text_from_content_for_doc(content)
|
42 |
+
elif file_type == "audio":
|
43 |
+
text = get_text_from_content_for_audio(content)
|
44 |
+
|
45 |
+
|
46 |
+
chunks = get_text_chunks(text)
|
47 |
+
|
48 |
+
vectorstore = get_vectorstore(chunks)
|
49 |
+
|
50 |
+
return vectorstore
|
51 |
+
|
52 |
+
|