Divyansh12 commited on
Commit
23da66a
·
verified ·
1 Parent(s): f725219

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -3
app.py CHANGED
@@ -2,6 +2,7 @@ import streamlit as st
2
  from langchain.chains import ConversationChain
3
  from langchain.memory import ConversationBufferMemory
4
  from langchain.schema import HumanMessage, AIMessage
 
5
  from llama_cpp import Llama
6
 
7
  # Initialize the Llama model
@@ -14,11 +15,28 @@ llm = Llama.from_pretrained(
14
  chat_format="chatml"
15
  )
16
 
17
- # Initialize memory
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  memory = ConversationBufferMemory(return_messages=True)
 
19
 
20
- # Create the conversation chain directly using the Llama model
21
- conversation = ConversationChain(memory=memory, llm=llm)
22
 
23
  # Streamlit UI
24
  st.title("Chatbot with LangChain and Llama")
 
2
  from langchain.chains import ConversationChain
3
  from langchain.memory import ConversationBufferMemory
4
  from langchain.schema import HumanMessage, AIMessage
5
+ from langchain.chat_models.base import BaseChatModel
6
  from llama_cpp import Llama
7
 
8
  # Initialize the Llama model
 
15
  chat_format="chatml"
16
  )
17
 
18
+ # Define the LangChain model for Llama
19
+ class LlamaChatModel(BaseChatModel):
20
+ def _llm_type(self) -> str:
21
+ return "llama"
22
+
23
+ def _generate(self, messages, stop=None):
24
+ # Prepare prompt from conversation history
25
+ prompt = "\n".join(
26
+ f"user: {msg.content}" if isinstance(msg, HumanMessage) else f"assistant: {msg.content}"
27
+ for msg in messages
28
+ )
29
+
30
+ # Generate response from Llama
31
+ response = llm.chat(prompt)
32
+ return [AIMessage(content=response)]
33
+
34
+ # Initialize memory and chat model
35
  memory = ConversationBufferMemory(return_messages=True)
36
+ llama_chat_model = LlamaChatModel()
37
 
38
+ # Create the conversation chain
39
+ conversation = ConversationChain(memory=memory, llm=llama_chat_model)
40
 
41
  # Streamlit UI
42
  st.title("Chatbot with LangChain and Llama")