Commit
·
e6e2d3b
1
Parent(s):
8f50610
added fromtf=true
Browse files
app.py
CHANGED
@@ -25,7 +25,7 @@ tokenizer = AutoTokenizer.from_pretrained(base_model_id)
|
|
25 |
|
26 |
# Load models with explicit LLaMA architecture
|
27 |
base_model = LlamaForCausalLM.from_pretrained(base_model_id)
|
28 |
-
instruct_model = LlamaForCausalLM.from_pretrained(instruct_model_id)
|
29 |
|
30 |
def generate_response(model, tokenizer, message, temperature=0.5, max_length=200, system_prompt="", is_instruct=False):
|
31 |
# Prepare input based on model type
|
|
|
25 |
|
26 |
# Load models with explicit LLaMA architecture
|
27 |
base_model = LlamaForCausalLM.from_pretrained(base_model_id)
|
28 |
+
instruct_model = LlamaForCausalLM.from_pretrained(instruct_model_id, from_tf = True)
|
29 |
|
30 |
def generate_response(model, tokenizer, message, temperature=0.5, max_length=200, system_prompt="", is_instruct=False):
|
31 |
# Prepare input based on model type
|