John6666 commited on
Commit
a1abf89
·
verified ·
1 Parent(s): 04f30d4

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -32,7 +32,11 @@ def generate_lean4_code(prompt):
32
  #response, history = model.chat(tokenizer, prompt, history=[], meta_instruction="")
33
  #print(response, history)
34
  #return response
35
- input_ids = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt").to(device)
 
 
 
 
36
  results = model.generate(input_ids, max_new_tokens=1024, eos_token_id=terminators, do_sample=True, temperature=0.85, top_p=0.9)
37
  result_str = tokenizer.decode(results[0], skip_special_tokens=True)
38
  return result_str
 
32
  #response, history = model.chat(tokenizer, prompt, history=[], meta_instruction="")
33
  #print(response, history)
34
  #return response
35
+ chat = [
36
+ {"role": "system", "content": "You are a Lean4 expert who can write good Lean4 code based on natural language mathematical theorem and proof"},
37
+ {"role": "user", "content": prompt},
38
+ ]
39
+ input_ids = tokenizer.apply_chat_template(chat, return_tensors="pt").to(device)
40
  results = model.generate(input_ids, max_new_tokens=1024, eos_token_id=terminators, do_sample=True, temperature=0.85, top_p=0.9)
41
  result_str = tokenizer.decode(results[0], skip_special_tokens=True)
42
  return result_str