JosueElias commited on
Commit
b50e558
·
1 Parent(s): f6e7168

Working on frontend.

Browse files
mi_clase.py → RAG.py RENAMED
@@ -1,6 +1,6 @@
1
  # os.environ['CUDA_VISIBLE_DEVICES'] ='0'
2
 
3
- from my_dataset import datasetx
4
  from transformers import AutoModelForMultipleChoice
5
  from transformers import AutoTokenizer
6
  from nltk.corpus import stopwords
 
1
  # os.environ['CUDA_VISIBLE_DEVICES'] ='0'
2
 
3
+ from dataset_with_embeddings import datasetx
4
  from transformers import AutoModelForMultipleChoice
5
  from transformers import AutoTokenizer
6
  from nltk.corpus import stopwords
app.py CHANGED
@@ -1,7 +1,10 @@
 
 
1
  try:
2
- import streamlit as st
3
- from mi_clase import pipeline
4
- st.title("Ask your scientific question!")
 
5
  expected_format = "What is color?\nA)Is a name.\nB)Is something horrible.\nC)I don't know.\nD)You should ask someone else.\nE)Ask in a pyshic book."
6
  txt = st.text_area(
7
  "follow this format while making your question:",
@@ -28,12 +31,34 @@ try:
28
  "E":e
29
  }
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  try:
32
- answer = pipeline.give_the_best_answer(mi_dict)
33
- st.write(mi_dict)
34
- st.write(answer)
 
 
 
 
 
35
  except Exception as e:
36
- st.error("something bad happend to the model")
37
  st.error(e)
38
 
39
  except Exception as e:
@@ -41,5 +66,5 @@ try:
41
  st.error(e)
42
 
43
  except Exception as e:
44
- st.error("error at the beggining")
45
  st.error(e)
 
1
+ import streamlit as st
2
+
3
  try:
4
+
5
+ from RAG import pipeline
6
+
7
+ st.title("Ask your scientific question! 👨‍⚕")
8
  expected_format = "What is color?\nA)Is a name.\nB)Is something horrible.\nC)I don't know.\nD)You should ask someone else.\nE)Ask in a pyshic book."
9
  txt = st.text_area(
10
  "follow this format while making your question:",
 
31
  "E":e
32
  }
33
 
34
+ multi = f'''
35
+ ---
36
+ :green[**Question**] 🔎
37
+
38
+ {mi_dict["prompt"]}
39
+
40
+ :green[**Options**] 📗
41
+
42
+ * :blue[**A**] {mi_dict["A"]}
43
+ * :blue[**B**] {mi_dict["B"]}
44
+ * :blue[**C**] {mi_dict["C"]}
45
+ * :blue[**D**] {mi_dict["D"]}
46
+ * :blue[**E**] {mi_dict["E"]}
47
+ ---
48
+ '''
49
+ st.markdown(multi)
50
+
51
  try:
52
+
53
+
54
+
55
+ if st.button('Ask for answer'):
56
+ st.write('The answer is:')
57
+ answer = pipeline.give_the_best_answer(mi_dict)
58
+ st.write(mi_dict)
59
+ #st.write(answer)
60
  except Exception as e:
61
+ st.error("Something bad happend while trying to infer the answer.")
62
  st.error(e)
63
 
64
  except Exception as e:
 
66
  st.error(e)
67
 
68
  except Exception as e:
69
+ st.error("Error most likely related to the import of the object 'pipeline'")
70
  st.error(e)
my_dataset.py → dataset_with_embeddings.py RENAMED
@@ -3,15 +3,21 @@ from datasets import load_from_disk, Dataset
3
  from huggingface_hub import hf_hub_download
4
  from datasets import load_dataset
5
  import faiss
6
- # load wikipedia dataset https://huggingface.co/docs/datasets/loading#hugging-face-hub
7
- datasetx = load_dataset("JosueElias/pipeline_dataset2")
8
  # load faiss file and get route of file https://huggingface.co/docs/huggingface_hub/guides/download#from-latest-version
9
  path2 = hf_hub_download(repo_id="JosueElias/pipeline_faiss", filename="faiss.index", repo_type="dataset")
 
 
 
 
10
  # save wikipedia dataset locally https://huggingface.co/docs/datasets/process#save
11
  datasetx.save_to_disk("./directory")
 
12
  # delete variable to have more memory space
13
  del datasetx
 
14
  # load dataset again in arrow format
15
  datasetx = load_from_disk("./directory/train")
 
16
  # load faiss to dataset
17
  datasetx.load_faiss_index('embeddings', path2)
 
3
  from huggingface_hub import hf_hub_download
4
  from datasets import load_dataset
5
  import faiss
6
+
 
7
  # load faiss file and get route of file https://huggingface.co/docs/huggingface_hub/guides/download#from-latest-version
8
  path2 = hf_hub_download(repo_id="JosueElias/pipeline_faiss", filename="faiss.index", repo_type="dataset")
9
+
10
+ # load wikipedia dataset https://huggingface.co/docs/datasets/loading#hugging-face-hub
11
+ datasetx = load_dataset("JosueElias/pipeline_dataset2")
12
+
13
  # save wikipedia dataset locally https://huggingface.co/docs/datasets/process#save
14
  datasetx.save_to_disk("./directory")
15
+
16
  # delete variable to have more memory space
17
  del datasetx
18
+
19
  # load dataset again in arrow format
20
  datasetx = load_from_disk("./directory/train")
21
+
22
  # load faiss to dataset
23
  datasetx.load_faiss_index('embeddings', path2)