yuchenlin commited on
Commit
56548ad
·
1 Parent(s): 643a2a0

fix typos and add 8B scripts

Browse files
Files changed (2) hide show
  1. app.py +2 -2
  2. app_8B.py +101 -0
app.py CHANGED
@@ -79,7 +79,7 @@ def respond(
79
  demo = gr.ChatInterface(
80
  respond,
81
  additional_inputs=[
82
- gr.Textbox(value="You are Magpie, a helpful AI assistant. For simple qeuries, try to answer them directly; for complex questions, try to think step-by-step before providing an answer.", label="System message"),
83
  gr.Slider(minimum=128, maximum=2048, value=512, step=64, label="Max new tokens"),
84
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
85
  gr.Slider(
@@ -89,7 +89,7 @@ demo = gr.ChatInterface(
89
  step=0.1,
90
  label="Top-p (nucleus sampling)",
91
  ),
92
- gr.Slider(minimum=0.5, maximum=1.5, value=1.0, step=0.1, label="Repetation Penalty"),
93
  ],
94
  description=header, # Add the header as the description
95
  title="MagpieLM-4B Chat (v0.1)",
 
79
  demo = gr.ChatInterface(
80
  respond,
81
  additional_inputs=[
82
+ gr.Textbox(value="You are Magpie, a helpful AI assistant. For simple queries, try to answer them directly; for complex questions, try to think step-by-step before providing an answer.", label="System message"),
83
  gr.Slider(minimum=128, maximum=2048, value=512, step=64, label="Max new tokens"),
84
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
85
  gr.Slider(
 
89
  step=0.1,
90
  label="Top-p (nucleus sampling)",
91
  ),
92
+ gr.Slider(minimum=0.5, maximum=1.5, value=1.0, step=0.1, label="Repetition Penalty"),
93
  ],
94
  description=header, # Add the header as the description
95
  title="MagpieLM-4B Chat (v0.1)",
app_8B.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
3
+ import spaces
4
+ from threading import Thread
5
+ from typing import Iterator
6
+
7
+ # Add markdown header
8
+ header = """
9
+ # 🐦‍⬛ MagpieLMs: Open LLMs with Fully Transparent Alignment Recipes
10
+
11
+ 💬 We've aligned Llama-3.1-8B and a 4B version (distilled by NVIDIA) using purely synthetic data generated by our [Magpie](https://arxiv.org/abs/2406.08464) method. Our open-source post-training recipe includes: SFT and DPO data, all training configs + logs. This allows everyone to reproduce the alignment process for their own research. Note that our data does not contain any GPT-generated data, and has a much friendly license for both commercial and academic use.
12
+
13
+ - **Magpie Collection**: [Magpie on Hugging Face](https://lnkd.in/g_pgX5Y2)
14
+ - **Magpie Paper**: [Read the research paper](https://arxiv.org/abs/2406.08464)
15
+
16
+ Contact: [Zhangchen Xu](https://zhangchenxu.com) and [Bill Yuchen Lin](https://yuchenlin.xyz).
17
+
18
+ ---
19
+ """
20
+
21
+ # Load model and tokenizer
22
+ model_name = "Magpie-Align/MagpieLM-8B-Chat-v0.1"
23
+
24
+ device = "cuda" # the device to load the model onto
25
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
26
+ model = AutoModelForCausalLM.from_pretrained(
27
+ model_name,
28
+ torch_dtype="auto",
29
+ ignore_mismatched_sizes=True
30
+ )
31
+ model.to(device)
32
+
33
+ MAX_INPUT_TOKEN_LENGTH = 4096 # You may need to adjust this value
34
+
35
+ @spaces.GPU
36
+ def respond(
37
+ message: str,
38
+ chat_history: list[tuple[str, str]],
39
+ system_prompt: str,
40
+ max_new_tokens: int = 1024,
41
+ temperature: float = 0.6,
42
+ top_p: float = 0.9,
43
+ top_k: int = 50,
44
+ repetition_penalty: float = 1.2,
45
+ ) -> Iterator[str]:
46
+ conversation = []
47
+ if system_prompt:
48
+ conversation.append({"role": "system", "content": system_prompt})
49
+ for user, assistant in chat_history:
50
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
51
+ conversation.append({"role": "user", "content": message})
52
+
53
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
54
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
55
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
56
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
57
+ input_ids = input_ids.to(model.device)
58
+
59
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
60
+ generate_kwargs = dict(
61
+ input_ids=input_ids,
62
+ streamer=streamer,
63
+ max_new_tokens=max_new_tokens,
64
+ do_sample=True,
65
+ top_p=top_p,
66
+ top_k=top_k,
67
+ temperature=temperature,
68
+ num_beams=1,
69
+ repetition_penalty=repetition_penalty,
70
+ )
71
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
72
+ t.start()
73
+
74
+ outputs = []
75
+ for text in streamer:
76
+ outputs.append(text)
77
+ yield "".join(outputs)
78
+
79
+ demo = gr.ChatInterface(
80
+ respond,
81
+ additional_inputs=[
82
+ gr.Textbox(value="You are Magpie, a helpful AI assistant. For simple queries, try to answer them directly; for complex questions, try to think step-by-step before providing an answer.", label="System message"),
83
+ gr.Slider(minimum=128, maximum=2048, value=512, step=64, label="Max new tokens"),
84
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
85
+ gr.Slider(
86
+ minimum=0.1,
87
+ maximum=1.0,
88
+ value=0.9,
89
+ step=0.1,
90
+ label="Top-p (nucleus sampling)",
91
+ ),
92
+ gr.Slider(minimum=0.5, maximum=1.5, value=1.0, step=0.1, label="Repetition Penalty"),
93
+ ],
94
+ description=header, # Add the header as the description
95
+ title="MagpieLM-8B Chat (v0.1)",
96
+ theme=gr.themes.Soft()
97
+ )
98
+
99
+ if __name__ == "__main__":
100
+ demo.queue()
101
+ demo.launch(share=True)