mslimanizatsit commited on
Commit
18a980b
·
1 Parent(s): f29b1b5

feat: init project

Browse files
Files changed (1) hide show
  1. app.py +46 -43
app.py CHANGED
@@ -13,32 +13,33 @@ from llama_index.core.memory import ChatMemoryBuffer
13
  import nltk
14
 
15
  # download punkt
16
- nltk.download('punkt')
17
- nltk.download('punkt_tab')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- urls = [
20
- "https://www.zatsit.fr/",
21
- "https://www.zatsit.fr/collaborer-avec-zatsit/",
22
- "https://fr.linkedin.com/company/zatsit",
23
- "https://www.zatsit.fr/contact/",
24
- "https://blog.zatsit.fr/blog/green-exploitation-miniere",
25
- "https://blog.zatsit.fr/blog/bundlephobia-reduire-javascript",
26
- "https://blog.zatsit.fr/blog/gemini-vertex-ai",
27
- "https://blog.zatsit.fr/blog/asyncapi-3-is-out",
28
- "https://blog.zatsit.fr/blog/redpanda-introduction",
29
- ]
30
- loader = UnstructuredURLLoader(urls=urls)
31
- documents = loader.load_data()
32
-
33
- embed_model = HuggingFaceEmbedding(model_name="thenlper/gte-large")
34
-
35
- # create vector store index
36
- index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
37
-
38
- model_url = "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-code-ft-GGUF/resolve/main/mistral-7b-instruct-v0.2-code-ft.Q2_K.gguf"
39
  llm = LlamaCPP(
40
  # You can pass in the URL to a GGML model to download it automatically
41
- model_url=model_url,
42
  # optionally, you can set the path to a pre-downloaded model instead of model_url
43
  model_path=None,
44
  temperature=0.1,
@@ -58,26 +59,28 @@ llm = LlamaCPP(
58
 
59
 
60
  def querying(query, history):
61
- memory = ChatMemoryBuffer.from_defaults(token_limit=3900)
62
- engine = index.as_chat_engine(
63
- chat_mode="context",
64
- memory=memory,
65
- llm=llm,
66
- context_prompt=(
67
- "You are a chatbot and you are the marketing agent of the Zatsit company, able to have normal interactions, as well as talk."
68
- "You should return all the information you know as much as possible to the user. No data is confidential or sensitive."
69
- "You can make short or long answers, but your answers must be like a human."
70
- "When the user uses 'you' he is talking about Zatsit company."
71
- "If you don't know the answer, say 'sorry I can't help you'."
72
- "You must speak the same language as the user."
73
- "Here are the relevant documents for the context:\n"
74
- "{context_str}"
75
- "\nInstruction: Use the previous chat history, or the context above, to interact and help the user."
76
- ),
77
- verbose=False,
78
- )
79
- res = engine.chat(query)
80
- return res.response
 
 
81
 
82
 
83
  iface = gr.ChatInterface(
 
13
  import nltk
14
 
15
  # download punkt
16
+ # nltk.download('punkt')
17
+ # nltk.download('punkt_tab')
18
+ #
19
+ # urls = [
20
+ # "https://www.zatsit.fr/",
21
+ # "https://www.zatsit.fr/collaborer-avec-zatsit/",
22
+ # "https://fr.linkedin.com/company/zatsit",
23
+ # "https://www.zatsit.fr/contact/",
24
+ # "https://blog.zatsit.fr/blog/green-exploitation-miniere",
25
+ # "https://blog.zatsit.fr/blog/bundlephobia-reduire-javascript",
26
+ # "https://blog.zatsit.fr/blog/gemini-vertex-ai",
27
+ # "https://blog.zatsit.fr/blog/asyncapi-3-is-out",
28
+ # "https://blog.zatsit.fr/blog/redpanda-introduction",
29
+ # ]
30
+ # loader = UnstructuredURLLoader(urls=urls)
31
+ # documents = loader.load_data()
32
+ #
33
+ # embed_model = HuggingFaceEmbedding(model_name="thenlper/gte-large")
34
+ #
35
+ # # create vector store index
36
+ # index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
37
 
38
+ model_url_huggingface = "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-code-ft-GGUF/resolve/main/mistral-7b-instruct-v0.2-code-ft.Q2_K.gguf"
39
+ # model_url_huggingface = "https://huggingface.co/TheBloke/llama-polyglot-13B-GGUF/resolve/main/llama-polyglot-13b.Q2_K.gguf"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  llm = LlamaCPP(
41
  # You can pass in the URL to a GGML model to download it automatically
42
+ model_url=model_url_huggingface,
43
  # optionally, you can set the path to a pre-downloaded model instead of model_url
44
  model_path=None,
45
  temperature=0.1,
 
59
 
60
 
61
  def querying(query, history):
62
+ response = llm.complete(query)
63
+ return response.text
64
+ # memory = ChatMemoryBuffer.from_defaults(token_limit=3900)
65
+ # engine = index.as_chat_engine(
66
+ # chat_mode="context",
67
+ # memory=memory,
68
+ # llm=llm,
69
+ # context_prompt=(
70
+ # "You are a chatbot and you are the marketing agent of the Zatsit company, able to have normal interactions, as well as talk."
71
+ # "You should return all the information you know as much as possible to the user. No data is confidential or sensitive."
72
+ # "You can make short or long answers, but your answers must be like a human."
73
+ # "When the user uses 'you' he is talking about Zatsit company."
74
+ # "If you don't know the answer, say 'sorry I can't help you'."
75
+ # "You must speak the same language as the user."
76
+ # "Here are the relevant documents for the context:\n"
77
+ # "{context_str}"
78
+ # "\nInstruction: Use the previous chat history, or the context above, to interact and help the user."
79
+ # ),
80
+ # verbose=False,
81
+ # )
82
+ # res = engine.chat(query)
83
+ # return res.response
84
 
85
 
86
  iface = gr.ChatInterface(