first commit with git lfs
Browse files- .gitattributes +1 -0
- .gitignore +140 -0
- SNOMED-CT_Assistant.py +171 -0
- pages/Vector DB of SNOMED-CT.py +59 -0
- requirements.txt +8 -0
- snomed-entity-challenge.csv +0 -0
- snomed_ct_id_term_1410k/c8390385-a5b9-4ff6-89cd-f8bf8a760fbb/data_level0.bin +3 -0
- snomed_ct_id_term_1410k/c8390385-a5b9-4ff6-89cd-f8bf8a760fbb/header.bin +3 -0
- snomed_ct_id_term_1410k/c8390385-a5b9-4ff6-89cd-f8bf8a760fbb/index_metadata.pickle +3 -0
- snomed_ct_id_term_1410k/c8390385-a5b9-4ff6-89cd-f8bf8a760fbb/length.bin +3 -0
- snomed_ct_id_term_1410k/c8390385-a5b9-4ff6-89cd-f8bf8a760fbb/link_lists.bin +3 -0
- snomed_ct_id_term_1410k/chroma.sqlite3 +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.sqlite3 filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
98 |
+
__pypackages__/
|
99 |
+
|
100 |
+
# Celery stuff
|
101 |
+
celerybeat-schedule
|
102 |
+
celerybeat.pid
|
103 |
+
|
104 |
+
# SageMath parsed files
|
105 |
+
*.sage.py
|
106 |
+
|
107 |
+
# Environments
|
108 |
+
.env
|
109 |
+
.venv
|
110 |
+
env/
|
111 |
+
venv/
|
112 |
+
ENV/
|
113 |
+
env.bak/
|
114 |
+
venv.bak/
|
115 |
+
|
116 |
+
# Spyder project settings
|
117 |
+
.spyderproject
|
118 |
+
.spyproject
|
119 |
+
|
120 |
+
# Rope project settings
|
121 |
+
.ropeproject
|
122 |
+
|
123 |
+
# mkdocs documentation
|
124 |
+
/site
|
125 |
+
|
126 |
+
# mypy
|
127 |
+
.mypy_cache/
|
128 |
+
.dmypy.json
|
129 |
+
dmypy.json
|
130 |
+
|
131 |
+
# Pyre type checker
|
132 |
+
.pyre/
|
133 |
+
|
134 |
+
# pytype static type analyzer
|
135 |
+
.pytype/
|
136 |
+
|
137 |
+
# Cython debug symbols
|
138 |
+
cython_debug/
|
139 |
+
|
140 |
+
# End of https://mrkandreev.name/snippets/gitignore-generator/#Python
|
SNOMED-CT_Assistant.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import random
|
3 |
+
import json
|
4 |
+
import streamlit as st
|
5 |
+
import chromadb
|
6 |
+
from openai import OpenAI
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
import pandas as pd
|
9 |
+
|
10 |
+
# Config Streamlit
|
11 |
+
st.set_page_config(layout="wide")
|
12 |
+
|
13 |
+
remote = True
|
14 |
+
|
15 |
+
if remote:
|
16 |
+
with st.sidebar:
|
17 |
+
if 'OPENAI_API_TOKEN' in st.secrets:
|
18 |
+
st.success('API key already provided!', icon='✅')
|
19 |
+
openai_api_key = st.secrets['OPENAI_API_TOKEN']
|
20 |
+
else:
|
21 |
+
load_dotenv()
|
22 |
+
openai_api_key = os.environ.get("OpenAI_API_KEY")
|
23 |
+
|
24 |
+
st.title("🏥 SNOMED-CT Assistant")
|
25 |
+
st.caption("👩⚕️ A smart medical assistant with SNOMED-CT knowledge.")
|
26 |
+
|
27 |
+
# System prompt
|
28 |
+
system_prompt = """You are a medical expert with rich experience in SNOMED-CT professional knowledge.
|
29 |
+
You are skilled at assisting medical professionals and answering questions in the medical field.
|
30 |
+
You are patient, helpful and professional.
|
31 |
+
Your comprehensive knowledge and mastery of these key components make you an invaluable asset in the realm of biomedical natural language processing and knowledge extraction.
|
32 |
+
With your specialized expertise, you are able to navigate the complexities of SNOMED CT Entity Linking with ease, delivering accurate and reliable results that support various healthcare and research applications.
|
33 |
+
Please refuse to answer inquiries and requests unrelated to the medical field, in order to maintain professionalism in medicine.
|
34 |
+
|
35 |
+
As an experienced professional, you possess deep expertise in the field of SNOMED CT Entity Linking.
|
36 |
+
You have a thorough understanding of the relevant workflows and critical aspects involved, encompassing:
|
37 |
+
- Adept handling of electronic medical record (EMR) data processing
|
38 |
+
- Entity Identification, Proficient entity recognition capabilities, identifying and extracting relevant medical concepts from unstructured text
|
39 |
+
- Skilled Entity Mapping, accurately linking identified entities to their corresponding SNOMED CT concepts
|
40 |
+
- Seamless integration and output of clinical terminology, ensuring the accurate representation and utilization of standardized medical language
|
41 |
+
- Patiently and professionally respond to all SNOMED CT related inquiries, even if the user repeats questions.
|
42 |
+
- Demonstrate deep expertise in the standard SNOMED CT Entity Linking workflow, which involves:
|
43 |
+
1. Performing Entity Identification to extract relevant medical terminology from the input.
|
44 |
+
2. Conducting Entity Mapping to link the identified entities to their corresponding SNOMED CT concepts.
|
45 |
+
- Present the results in a tabular format only with the following 3 columns: "Identified Entity", "SNOMED CT Concept IDs", "SNOMED CT Descriptions".
|
46 |
+
|
47 |
+
Here is the practical entity linking process example:
|
48 |
+
- the input text in EHRs: "Patient referred for a biopsy to investigate potential swelling in upper larynx."
|
49 |
+
- the identified entity: "biopsy", "larynx"
|
50 |
+
- response the identified entities with JSON format: {"identified_entity" : ["biopsy", "larynx"]}
|
51 |
+
- During Entity Identification processing, if the original medical text data clearly contains commonly used medical abbreviations, convert the abbreviations into their full names, and provide the original abbreviations in parentheses for easy reference.
|
52 |
+
- For example: "The patient has the multiple disease, including T2D, CAD, HTN, CKD etc. decreased T3 and T4 levels."
|
53 |
+
- T2D: "Type 2 Diabetes Mellitus", CAD: "Coronary Artery Disease", HTN: "Hypertension", CKD: "Chronic Kidney Disease", T3: "Triiodothyronine", T4: "Thyroxine"
|
54 |
+
- Respond with full names in JSON format: {"identified_entity" : ["Type 2 Diabetes Mellitus (T2D)", "Coronary Artery Disease (CAD)", "Hypertension (HTN)", "Chronic Kidney Disease (CKD)", "Triiodothyronine (T3)", "Thyroxine (T4)"]}
|
55 |
+
|
56 |
+
List out as many potential SNOMED entities as possible from the original medical text description,
|
57 |
+
including Diseases, Diagnoses, Clinical Findings (like Signs and Symptoms),
|
58 |
+
Procedures (Surgical, Therapeutic, Diagnostic, Nursing), Specimen Types, Living Organisms,
|
59 |
+
Observables (for example heart rate), Physical Objects and Forces,
|
60 |
+
Chemicals (including the chemicals used in drug preparations), Drugs (pharmaceutical products),
|
61 |
+
Human Anatomy (body structures, organisms), Physiological Processes and Functions,
|
62 |
+
Patients' Occupations, Patients' Social Contexts (e.g., religion and ethnicity), and various other types from the SNOMED CT standard.
|
63 |
+
Numbers or units related symbols are not included in this range and can be ignored.
|
64 |
+
|
65 |
+
Output Format Requirements (Must follow):
|
66 |
+
- As default, only process "Entity Identification", and find out the entity related to SNOMED CT terms.
|
67 |
+
- Present the results in JSON format, like: {"identified_entity" : ["biopsy", "larynx"]}
|
68 |
+
"""
|
69 |
+
|
70 |
+
|
71 |
+
# Func: generate random med text
|
72 |
+
raw_text_df = pd.read_csv('snomed-entity-challenge.csv')
|
73 |
+
|
74 |
+
def random_med_text(text_df):
|
75 |
+
rows = len(text_df['text'])
|
76 |
+
index = random.randint(0, rows)
|
77 |
+
raw_text = text_df["text"][index]
|
78 |
+
raw_text_spilt = raw_text.split('###TEXT:')
|
79 |
+
raw_text_spilt_2 = raw_text_spilt[1].split('###RESPONSE:')
|
80 |
+
human = raw_text_spilt[0]
|
81 |
+
med_text = raw_text_spilt_2[0]
|
82 |
+
response = raw_text_spilt_2[1]
|
83 |
+
return index, human, med_text, response
|
84 |
+
|
85 |
+
|
86 |
+
# Func: Gen Medical Prompt Example
|
87 |
+
def generate_entity_identification_prompt(medical_text):
|
88 |
+
return f"""Help me to do "SNOMED-CT Entity Identification" process with raw medical text (Electronic Health Record, EHR): \n {medical_text} \n """
|
89 |
+
|
90 |
+
def generate_entity_mapping_prompt(entity, query_result_dict):
|
91 |
+
return f"""Help me to do "SNOMED-CT Entity Mapping" process with entity: {entity} and query result \n {query_result_dict} \n , output with table format, including 5 columns: "Identified Entity", "Distance", "IDs", "SNOMED CT Concept IDs", "SNOMED CT Descriptions" \n """
|
92 |
+
|
93 |
+
# Chroma DB Client
|
94 |
+
chroma_client = chromadb.PersistentClient(path="snomed_ct_id_term_1410k")
|
95 |
+
collection = chroma_client.get_or_create_collection(name="snomed_ct_id_term")
|
96 |
+
|
97 |
+
# Func: query chrome_db
|
98 |
+
def query_chroma_db(query_text, query_number):
|
99 |
+
results = collection.query(
|
100 |
+
query_texts=[query_text],
|
101 |
+
n_results=query_number,
|
102 |
+
include=["distances", "metadatas", "documents"]
|
103 |
+
)
|
104 |
+
return results
|
105 |
+
|
106 |
+
# Func: chroma_db_result to dict
|
107 |
+
def get_dict_from_chroma_results(results):
|
108 |
+
result_dict = {'ids': results['ids'][0], 'concept_ids': [ str(sub['concept_id']) for sub in results['metadatas'][0] ], 'distances': results['distances'][0], 'documents': results['documents'][0]}
|
109 |
+
return result_dict
|
110 |
+
|
111 |
+
|
112 |
+
# OpenAI Client Configuration
|
113 |
+
client = OpenAI(api_key=openai_api_key)
|
114 |
+
model_tag = "gpt-3.5-turbo"
|
115 |
+
|
116 |
+
# Chat Session with OpenAI API
|
117 |
+
def chat_input(prompt, med_text):
|
118 |
+
st.session_state.messages.append({"role": "user", "content": med_text})
|
119 |
+
st.chat_message("user").write(med_text)
|
120 |
+
with st.spinner("Thinking..."):
|
121 |
+
entity_identification_response = client.chat.completions.create(
|
122 |
+
model=model_tag, response_format={ "type": "json_object" }, messages=st.session_state.messages, temperature=0.5)
|
123 |
+
msg = entity_identification_response.choices[0].message.content
|
124 |
+
entity_list = json.loads(msg)["identified_entity"]
|
125 |
+
print("entity list: ", entity_list)
|
126 |
+
st.session_state.messages.append({"role": "assistant", "content": msg})
|
127 |
+
st.chat_message("assistant").write(msg)
|
128 |
+
for entity in entity_list:
|
129 |
+
print("entity: ", entity)
|
130 |
+
results = query_chroma_db(entity, 10)
|
131 |
+
results_dict = get_dict_from_chroma_results(results)
|
132 |
+
entity_mapping_prompt = generate_entity_mapping_prompt(entity, results_dict)
|
133 |
+
st.session_state.messages.append({"role": "user", "content": entity_mapping_prompt})
|
134 |
+
entity_mapping_response = client.chat.completions.create(
|
135 |
+
model=model_tag, messages=st.session_state.messages, temperature=0.5)
|
136 |
+
mapping_msg = entity_mapping_response.choices[0].message.content
|
137 |
+
st.session_state.messages.append({"role": "assistant", "content": mapping_msg})
|
138 |
+
st.chat_message("assistant").write(mapping_msg)
|
139 |
+
|
140 |
+
|
141 |
+
|
142 |
+
|
143 |
+
if "messages" not in st.session_state:
|
144 |
+
st.session_state["messages"] = [{"role": "system", "content": system_prompt},
|
145 |
+
{"role": "assistant", "content": "👩⚕️ 您好,我是您的專業醫學助理。請問有任何我可以協助你的地方嗎?"}]
|
146 |
+
|
147 |
+
for msg in st.session_state.messages:
|
148 |
+
if msg["role"] == "system":
|
149 |
+
continue
|
150 |
+
st.chat_message(msg["role"]).write(msg["content"])
|
151 |
+
|
152 |
+
if user_input := st.chat_input():
|
153 |
+
if not openai_api_key:
|
154 |
+
st.info("Please add your OpenAI API key to continue.")
|
155 |
+
st.stop()
|
156 |
+
entity_identification_prompt = generate_entity_identification_prompt(user_input)
|
157 |
+
chat_input(entity_identification_prompt, user_input)
|
158 |
+
|
159 |
+
if st.sidebar.button("Example Input",type="primary"):
|
160 |
+
med_text = "Patient referred for a biopsy to investigate potential swelling in upper larynx."
|
161 |
+
entity_identification_prompt = generate_entity_identification_prompt(med_text)
|
162 |
+
chat_input(entity_identification_prompt, med_text)
|
163 |
+
|
164 |
+
if st.sidebar.button("Random Input",type="primary"):
|
165 |
+
index, human, med_text, response = random_med_text(raw_text_df)
|
166 |
+
response = response.replace(","," \n")
|
167 |
+
entity_identification_prompt = generate_entity_identification_prompt(med_text)
|
168 |
+
chat_input(entity_identification_prompt, med_text)
|
169 |
+
st.sidebar.write(f"[Random Text](https://huggingface.co/datasets/JaimeML/snomed-entity-challenge) Index: {index}")
|
170 |
+
st.sidebar.markdown(f"Ref Entity: \n {response}")
|
171 |
+
|
pages/Vector DB of SNOMED-CT.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from timeit import default_timer as timer
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
import chromadb
|
5 |
+
import pandas as pd
|
6 |
+
import numpy as np
|
7 |
+
|
8 |
+
# configure sqlite3
|
9 |
+
# __import__('pysqlite3')
|
10 |
+
# import sys
|
11 |
+
# sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
|
12 |
+
|
13 |
+
st.set_page_config(layout="wide")
|
14 |
+
|
15 |
+
# App Title
|
16 |
+
st.title("📚 Semantic Search with Vector Database of SNOMED-CT 💡")
|
17 |
+
st.caption("🔍 Search any SNOMED-CT relate decription & concept with natural language.")
|
18 |
+
st.sidebar.title("🔍 Search Setting")
|
19 |
+
query_number = st.sidebar.slider("Query Numbers", 10, 30, 10)
|
20 |
+
st.markdown("##### ➡️⌨️ Please input some medical description here, e.g. \"insomnia two nights a week.\", \"COPD\", \"Degenerative Joint Disease\"")
|
21 |
+
query_text = st.text_input("Input: any medical description snippet","Type-2 Diabetes")
|
22 |
+
|
23 |
+
# Chroma DB Client
|
24 |
+
chroma_client = chromadb.PersistentClient(path="snomed_ct_id_term_1410k")
|
25 |
+
collection = chroma_client.get_or_create_collection(name="snomed_ct_id_term")
|
26 |
+
start = 1.0
|
27 |
+
end = 1.1
|
28 |
+
st.markdown("##### ➡️Chroma DB will return " + str(query_number)
|
29 |
+
+ " related instances from " + str(collection.count()) + " collections.")
|
30 |
+
# st.warning("Due to the SQLite [file size limit on GitHub](https://docs.github.com/en/repositories/working-with-files/managing-large-files/about-git-large-file-storage), this testing only query from 500k SNOMED-CT instances.", icon="🚨")
|
31 |
+
|
32 |
+
|
33 |
+
# Func: query chrome_db
|
34 |
+
def query_chroma_db(query_text, query_number):
|
35 |
+
results = collection.query(
|
36 |
+
query_texts=[query_text],
|
37 |
+
n_results=query_number,
|
38 |
+
include=["distances", "metadatas", "documents"]
|
39 |
+
)
|
40 |
+
return results
|
41 |
+
|
42 |
+
# Func: chrome_db_result to df
|
43 |
+
def get_df_from_chroma_results(results):
|
44 |
+
result_dict = {'ids': results['ids'][0], 'concept_ids': [ str(sub['concept_id']) for sub in results['metadatas'][0] ], 'distances': results['distances'][0], 'documents': results['documents'][0]}
|
45 |
+
df = pd.DataFrame(result_dict)
|
46 |
+
return df
|
47 |
+
|
48 |
+
start = timer()
|
49 |
+
results = query_chroma_db(query_text, query_number)
|
50 |
+
end = timer()
|
51 |
+
st.markdown("###### ➡️ Query Time : {: .6f} seconds.".format(end - start))
|
52 |
+
st.divider()
|
53 |
+
|
54 |
+
results_df = get_df_from_chroma_results(results)
|
55 |
+
|
56 |
+
#displaying the dataframe as an interactive object
|
57 |
+
st.markdown("### 📊 Similar Search Results from Chroma Vector DB")
|
58 |
+
st.dataframe(results_df, 1000, 500)
|
59 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
pandas
|
3 |
+
openai
|
4 |
+
numpy
|
5 |
+
chromadb == 0.5.0
|
6 |
+
python-dotenv
|
7 |
+
pysqlite3-binary
|
8 |
+
|
snomed-entity-challenge.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
snomed_ct_id_term_1410k/c8390385-a5b9-4ff6-89cd-f8bf8a760fbb/data_level0.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e6052af3bc565baf830088dd4c367f3e260ddbb2cf7dfac904fb483aa64f6b31
|
3 |
+
size 2363160000
|
snomed_ct_id_term_1410k/c8390385-a5b9-4ff6-89cd-f8bf8a760fbb/header.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1dc4275c3ac7eb47b6540b51430e9f85f50a3ebda23a824a9afa7906a02946db
|
3 |
+
size 100
|
snomed_ct_id_term_1410k/c8390385-a5b9-4ff6-89cd-f8bf8a760fbb/index_metadata.pickle
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:25b66beb13495b59f604b58531f4b2ca7a4407ee9555c6d33a8faf2913dc420b
|
3 |
+
size 52473273
|
snomed_ct_id_term_1410k/c8390385-a5b9-4ff6-89cd-f8bf8a760fbb/length.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2582aa1cc6e61c9b0b3da6575206c81c03377e13cf96fa0eb7ca509bbd1f2692
|
3 |
+
size 5640000
|
snomed_ct_id_term_1410k/c8390385-a5b9-4ff6-89cd-f8bf8a760fbb/link_lists.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:91a660d0f12b9111f4217c2024c4b75f810fbf4c6beae03cd9576891096b06a4
|
3 |
+
size 12018944
|
snomed_ct_id_term_1410k/chroma.sqlite3
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5dbcfc18f1d97ee8184c664105863bc8be1d8b6c376aca94dea6cdb5e9b81bf1
|
3 |
+
size 3590983680
|