momenaca commited on
Commit
f07b5e8
·
1 Parent(s): d3fe3f2

add features to ease hackathon

Browse files
app.py CHANGED
@@ -47,7 +47,10 @@ synthesis_prompt_template = get_synthesis_prompt(config)
47
 
48
  ## Building LLM
49
  print("Building LLM")
50
- llm = get_llm_api()
 
 
 
51
 
52
  ## Loading BDDs
53
  print("Loading Databases")
@@ -58,7 +61,7 @@ if os.getenv("EKI_OPENAI_LLM_DEPLOYMENT_NAME"):
58
  bdd_afp = get_vectorstore_api("afp")
59
 
60
  else:
61
- qdrants_public = get_qdrants_public(config, "your_database_hf")
62
  qdrants = {**qdrants, **qdrants_public}
63
  bdd_presse = None
64
  bdd_afp = None
 
47
 
48
  ## Building LLM
49
  print("Building LLM")
50
+ groq_model_name = (
51
+ config["groq_model_name"] if not os.getenv("EKI_OPENAI_LLM_DEPLOYMENT_NAME") else ""
52
+ )
53
+ llm = get_llm_api(groq_model_name)
54
 
55
  ## Loading BDDs
56
  print("Loading Databases")
 
61
  bdd_afp = get_vectorstore_api("afp")
62
 
63
  else:
64
+ qdrants_public = get_qdrants_public(config)
65
  qdrants = {**qdrants, **qdrants_public}
66
  bdd_presse = None
67
  bdd_afp = None
spinoza_project/config_public.yaml CHANGED
@@ -24,6 +24,9 @@ prompt_naming:
24
  ADEME: "ADEME"
25
  Presse: "Presse"
26
 
 
 
 
27
  query_preprompt: "query: "
28
  passage_preprompt: "passage: "
29
  embedding_model: "intfloat/multilingual-e5-base"
 
24
  ADEME: "ADEME"
25
  Presse: "Presse"
26
 
27
+ groq_model_name: "llama3-groq-70b-8192-tool-use-preview" # llama-3.1-8b-instant / llama3-groq-70b-8192-tool-use-preview / llama-3.2-90b-text-preview / llama-3.2-3b-preview
28
+ database_hf: "your_database_hf"
29
+
30
  query_preprompt: "query: "
31
  passage_preprompt: "passage: "
32
  embedding_model: "intfloat/multilingual-e5-base"
spinoza_project/source/backend/llm_utils.py CHANGED
@@ -42,7 +42,7 @@ class LLM:
42
  return predictions
43
 
44
 
45
- def get_llm_api():
46
  if os.getenv("EKI_OPENAI_LLM_DEPLOYMENT_NAME"):
47
  print("Using Azure OpenAI API")
48
  return LLM(
@@ -62,7 +62,7 @@ def get_llm_api():
62
  print("Using GROQ API")
63
  return LLM(
64
  ChatGroq(
65
- model="llama3-groq-70b-8192-tool-use-preview", # llama-3.1-8b-instant / llama3-groq-70b-8192-tool-use-preview / llama-3.2-90b-text-preview / llama-3.2-3b-preview
66
  temperature=0,
67
  max_tokens=2048,
68
  )
 
42
  return predictions
43
 
44
 
45
+ def get_llm_api(groq_model_name):
46
  if os.getenv("EKI_OPENAI_LLM_DEPLOYMENT_NAME"):
47
  print("Using Azure OpenAI API")
48
  return LLM(
 
62
  print("Using GROQ API")
63
  return LLM(
64
  ChatGroq(
65
+ model=groq_model_name,
66
  temperature=0,
67
  max_tokens=2048,
68
  )
spinoza_project/source/frontend/gradio_utils.py CHANGED
@@ -64,11 +64,11 @@ def get_qdrants(config):
64
  return qdrants
65
 
66
 
67
- def get_qdrants_public(config, repo_id):
68
  qdrants = {
69
  tab: pickle_to_document_store(
70
  hf_hub_download(
71
- repo_id=repo_id,
72
  filename=f"database_{tab}.pickle",
73
  repo_type="dataset",
74
  )
 
64
  return qdrants
65
 
66
 
67
+ def get_qdrants_public(config):
68
  qdrants = {
69
  tab: pickle_to_document_store(
70
  hf_hub_download(
71
+ repo_id=config["prompt_naming"],
72
  filename=f"database_{tab}.pickle",
73
  repo_type="dataset",
74
  )