Spaces:
Sleeping
Sleeping
Create make_chain_model.py
Browse files- make_chain_model.py +32 -0
make_chain_model.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.runnables import RunnablePassthrough
|
2 |
+
from langchain_core.output_parsers import StrOutputParser
|
3 |
+
from langchain_community.chat_models import ChatOllama
|
4 |
+
from langchain_core.prompts import ChatPromptTemplate
|
5 |
+
from langchain_pinecone import PineconeVectorStore
|
6 |
+
from langchain_community.embeddings import SentenceTransformerEmbeddings
|
7 |
+
|
8 |
+
def make_chain_llm(retriever,llm):
|
9 |
+
def format_docs(docs):
|
10 |
+
# ๊ฒ์ํ ๋ฌธ์ ๊ฒฐ๊ณผ๋ฅผ ํ๋์ ๋ฌธ๋จ์ผ๋ก ํฉ์ณ์ค๋๋ค.
|
11 |
+
return "\n\n".join(doc.page_content for doc in docs)
|
12 |
+
|
13 |
+
# LangChain์ด ์ง์ํ๋ ๋ค๋ฅธ ์ฑํ
๋ชจ๋ธ์ ์ฌ์ฉํฉ๋๋ค. ์ฌ๊ธฐ์๋ Ollama๋ฅผ ์ฌ์ฉํฉ๋๋ค.
|
14 |
+
# llm = ChatOllama(model="zephyr:latest")
|
15 |
+
|
16 |
+
template = "\"```\" Below is an instruction that describes a task. Write a response that appropriately completes the request."\
|
17 |
+
"์ ์ํ๋ context์์๋ง ๋๋ตํ๊ณ context์ ์๋ ๋ด์ฉ์ ์์ฑํ์ง๋ง"\
|
18 |
+
"make answer in korean. ํ๊ตญ์ด๋ก ๋๋ตํ์ธ์"\
|
19 |
+
"\n\nContext:\n{context}\n;"\
|
20 |
+
"Question: {question}"\
|
21 |
+
"\n\nAnswer:"
|
22 |
+
|
23 |
+
prompt = ChatPromptTemplate.from_template(template)
|
24 |
+
|
25 |
+
rag_chain = (
|
26 |
+
{"context": retriever| format_docs, "question": RunnablePassthrough()}
|
27 |
+
| prompt
|
28 |
+
| llm
|
29 |
+
| StrOutputParser()
|
30 |
+
)
|
31 |
+
|
32 |
+
return rag_chain
|