Update README.md
Browse files
README.md
CHANGED
@@ -20,3 +20,288 @@ language:
|
|
20 |
This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
|
21 |
|
22 |
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
|
21 |
|
22 |
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
23 |
+
|
24 |
+
|
25 |
+
---
|
26 |
+
base_model: llm-jp/llm-jp-3-13b
|
27 |
+
tags:
|
28 |
+
- text-generation-inference
|
29 |
+
- transformers
|
30 |
+
- unsloth
|
31 |
+
- llama
|
32 |
+
- trl
|
33 |
+
license: apache-2.0
|
34 |
+
language:
|
35 |
+
- en
|
36 |
+
---
|
37 |
+
|
38 |
+
|
39 |
+
東京大学 松尾・岩澤研究室 大規模言語モデル2024 最終課題
|
40 |
+
|
41 |
+
(作成日:2024年11月24日 作成者:出水 利樹 #SoftBank #MONET Technologies)
|
42 |
+
|
43 |
+
https://weblab.t.u-tokyo.ac.jp/lecture/course-list/large-language-model/
|
44 |
+
|
45 |
+
# Sample Use
|
46 |
+
以下は、elyza-tasks-100-TV_0.jsonlの回答用モデルコードです!
|
47 |
+
|
48 |
+
# -*- coding: utf-8 -*-
|
49 |
+
"""llm-jp-3-13b-finetune2.ipynb
|
50 |
+
|
51 |
+
Automatically generated by Colab.
|
52 |
+
|
53 |
+
Original file is located at
|
54 |
+
https://colab.research.google.com/drive/1TLF_HtVz6ng9ZAWe7lHy59kiHBg3_3y0
|
55 |
+
"""
|
56 |
+
|
57 |
+
!pip uninstall unsloth -y
|
58 |
+
!pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
|
59 |
+
|
60 |
+
!pip install --upgrade torch
|
61 |
+
!pip install --upgrade xformers
|
62 |
+
|
63 |
+
!pip install --upgrade torch
|
64 |
+
!pip install --upgrade xformers
|
65 |
+
|
66 |
+
!pip install ipywidgets --upgrade
|
67 |
+
|
68 |
+
!pip install ipywidgets --upgrade
|
69 |
+
|
70 |
+
pip install --upgrade torch
|
71 |
+
|
72 |
+
import torch
|
73 |
+
if torch.cuda.get_device_capability()[0] >= 8:
|
74 |
+
!pip install --no-deps packaging ninja einops "flash-attn>=2.6.3"
|
75 |
+
|
76 |
+
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
|
77 |
+
|
78 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
79 |
+
from unsloth import FastLanguageModel
|
80 |
+
import torch
|
81 |
+
max_seq_length = 888 # unslothではRoPEをサポートしているのでコンテキスト長は自由に設定可能。2回目Tryは少し大きめ設定
|
82 |
+
dtype = None # Noneにしておけば自動で設定
|
83 |
+
load_in_4bit = True # 今回は8Bクラスのモデルを扱うためTrue
|
84 |
+
|
85 |
+
model_id = "llm-jp/llm-jp-3-13b"
|
86 |
+
new_model_id = "llm-jp-3-13b-finetune-ex" #Fine-Tuningしたモデルにつけたい名前
|
87 |
+
# FastLanguageModel インスタンスを作成
|
88 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
89 |
+
model_name=model_id,
|
90 |
+
dtype=dtype,
|
91 |
+
load_in_4bit=load_in_4bit,
|
92 |
+
trust_remote_code=True,
|
93 |
+
)
|
94 |
+
|
95 |
+
model = FastLanguageModel.get_peft_model(
|
96 |
+
model,
|
97 |
+
r = 32,
|
98 |
+
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
|
99 |
+
"gate_proj", "up_proj", "down_proj",],
|
100 |
+
lora_alpha = 32,
|
101 |
+
lora_dropout = 0.05,
|
102 |
+
bias = "none",
|
103 |
+
use_gradient_checkpointing = "unsloth",
|
104 |
+
random_state = 3407,
|
105 |
+
use_rslora = False,
|
106 |
+
loftq_config = None,
|
107 |
+
max_seq_length = max_seq_length,)
|
108 |
+
|
109 |
+
HF_TOKEN = "*****" #@param {type:"string"} #My token is secret!
|
110 |
+
|
111 |
+
from datasets import load_dataset
|
112 |
+
|
113 |
+
dataset = load_dataset("elyza/ELYZA-tasks-100")
|
114 |
+
|
115 |
+
dataset = dataset.remove_columns('eval_aspect')
|
116 |
+
|
117 |
+
dataset = dataset.rename_columns({'input':'text'})
|
118 |
+
|
119 |
+
prompt = """### 指示
|
120 |
+
{}
|
121 |
+
### 回答
|
122 |
+
{}"""
|
123 |
+
|
124 |
+
|
125 |
+
|
126 |
+
"""
|
127 |
+
formatting_prompts_func: 各データをプロンプトに合わせた形式に合わせる
|
128 |
+
"""
|
129 |
+
EOS_TOKEN = tokenizer.eos_token # トークナイザーのEOSトークン(文末トークン)
|
130 |
+
def formatting_prompts_func(examples):
|
131 |
+
input = examples["text"] # 入力データ
|
132 |
+
output = examples["output"] # 出力データ
|
133 |
+
text = prompt.format(input, output) + EOS_TOKEN # プロンプトの作成
|
134 |
+
return { "formatted_text" : text, } # 新しいフィールド "formatted_text" を返す
|
135 |
+
pass
|
136 |
+
|
137 |
+
# # 各データにフォーマットを適用
|
138 |
+
dataset = dataset.map(
|
139 |
+
formatting_prompts_func,
|
140 |
+
num_proc= 4, # 並列処理数を指定
|
141 |
+
)
|
142 |
+
|
143 |
+
dataset
|
144 |
+
|
145 |
+
from google.colab import output
|
146 |
+
output.enable_custom_widget_manager()
|
147 |
+
|
148 |
+
"""Support for third party widgets will remain active for the duration of the session. To disable support:"""
|
149 |
+
|
150 |
+
from google.colab import output
|
151 |
+
output.disable_custom_widget_manager()
|
152 |
+
|
153 |
+
print(dataset["test"]["formatted_text"][3])
|
154 |
+
|
155 |
+
"""
|
156 |
+
training_arguments: 学習の設定
|
157 |
+
|
158 |
+
- output_dir:
|
159 |
+
-トレーニング後のモデルを保存するディレクトリ
|
160 |
+
|
161 |
+
- per_device_train_batch_size:
|
162 |
+
- デバイスごとのトレーニングバッチサイズ
|
163 |
+
|
164 |
+
- per_device_eval_batch_size:
|
165 |
+
- デバイスごとの評価バッチサイズ
|
166 |
+
|
167 |
+
- gradient_accumulation_steps:
|
168 |
+
- 勾配を更新する前にステップを積み重ねる回数
|
169 |
+
|
170 |
+
- optim:
|
171 |
+
- オプティマイザの設定
|
172 |
+
|
173 |
+
- num_train_epochs:
|
174 |
+
- エポック数
|
175 |
+
|
176 |
+
- eval_strategy:
|
177 |
+
- 評価の戦略 ("no"/"steps"/"epoch")
|
178 |
+
|
179 |
+
- eval_steps:
|
180 |
+
- eval_strategyが"steps"のとき、評価を行うstep間隔
|
181 |
+
|
182 |
+
- logging_strategy:
|
183 |
+
- ログ記録の戦略
|
184 |
+
|
185 |
+
- logging_steps:
|
186 |
+
- ログを出力するステップ間隔
|
187 |
+
|
188 |
+
- warmup_steps:
|
189 |
+
- 学習率のウォームアップステップ数
|
190 |
+
|
191 |
+
- save_steps:
|
192 |
+
- モデルを保存するステップ間隔
|
193 |
+
|
194 |
+
- save_total_limit:
|
195 |
+
- 保存しておくcheckpointの数
|
196 |
+
|
197 |
+
- max_steps:
|
198 |
+
- トレーニングの最大ステップ数
|
199 |
+
|
200 |
+
- learning_rate:
|
201 |
+
- 学習率
|
202 |
+
|
203 |
+
- fp16:
|
204 |
+
- 16bit浮動小数点の使用設定(第8回演習を参考にすると良いです)
|
205 |
+
|
206 |
+
- bf16:
|
207 |
+
- BFloat16の使用設定
|
208 |
+
|
209 |
+
- group_by_length:
|
210 |
+
- 入力シーケンスの長さによりバッチをグループ化 (トレーニングの効率化)
|
211 |
+
|
212 |
+
- report_to:
|
213 |
+
- ログの送信先 ("wandb"/"tensorboard"など)
|
214 |
+
"""
|
215 |
+
from trl import SFTTrainer
|
216 |
+
from transformers import TrainingArguments
|
217 |
+
from unsloth import is_bfloat16_supported
|
218 |
+
|
219 |
+
trainer = SFTTrainer(
|
220 |
+
model = model,
|
221 |
+
tokenizer = tokenizer,
|
222 |
+
train_dataset=dataset["test"],
|
223 |
+
max_seq_length = max_seq_length,
|
224 |
+
dataset_text_field="formatted_text",
|
225 |
+
packing = False,
|
226 |
+
args = TrainingArguments(
|
227 |
+
per_device_train_batch_size = 2,
|
228 |
+
gradient_accumulation_steps = 4,
|
229 |
+
num_train_epochs = 1,
|
230 |
+
logging_steps = 10,
|
231 |
+
warmup_steps = 10,
|
232 |
+
save_steps=100,
|
233 |
+
save_total_limit=2,
|
234 |
+
max_steps=-1,
|
235 |
+
learning_rate = 2e-4,
|
236 |
+
fp16 = not is_bfloat16_supported(),
|
237 |
+
bf16 = is_bfloat16_supported(),
|
238 |
+
group_by_length=True,
|
239 |
+
seed = 3407,
|
240 |
+
output_dir = "outputs",
|
241 |
+
report_to = "none",
|
242 |
+
),
|
243 |
+
)
|
244 |
+
|
245 |
+
from google.colab import output
|
246 |
+
output.enable_custom_widget_manager()
|
247 |
+
|
248 |
+
"""Support for third party widgets will remain active for the duration of the session. To disable support:"""
|
249 |
+
|
250 |
+
from google.colab import output
|
251 |
+
output.disable_custom_widget_manager()
|
252 |
+
|
253 |
+
gpu_stats = torch.cuda.get_device_properties(0)
|
254 |
+
start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
|
255 |
+
max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
|
256 |
+
print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
|
257 |
+
print(f"{start_gpu_memory} GB of memory reserved.")
|
258 |
+
|
259 |
+
trainer_stats = trainer.train()
|
260 |
+
|
261 |
+
import json
|
262 |
+
datasets = []
|
263 |
+
with open("./elyza-tasks-100-TV_0.jsonl", "r") as f:
|
264 |
+
item = ""
|
265 |
+
for line in f:
|
266 |
+
line = line.strip()
|
267 |
+
item += line
|
268 |
+
if item.endswith("}"):
|
269 |
+
datasets.append(json.loads(item))
|
270 |
+
item = ""
|
271 |
+
|
272 |
+
from tqdm import tqdm
|
273 |
+
|
274 |
+
FastLanguageModel.for_inference(model)
|
275 |
+
|
276 |
+
results = []
|
277 |
+
for dt in tqdm(datasets):
|
278 |
+
input = dt["input"]
|
279 |
+
|
280 |
+
prompt = f"""### 指示\n{input}\n### 回答\n"""
|
281 |
+
|
282 |
+
inputs = tokenizer([prompt], return_tensors = "pt").to(model.device)
|
283 |
+
|
284 |
+
outputs = model.generate(**inputs, max_new_tokens = 512, use_cache = True, do_sample=False, repetition_penalty=1.2)
|
285 |
+
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### 回答')[-1]
|
286 |
+
|
287 |
+
results.append({"task_id": dt["task_id"], "input": input, "output": prediction})
|
288 |
+
|
289 |
+
with open(f"{new_model_id}_output.jsonl", 'w', encoding='utf-8') as f:
|
290 |
+
for result in results:
|
291 |
+
json.dump(result, f, ensure_ascii=False)
|
292 |
+
f.write('\n')
|
293 |
+
|
294 |
+
with open(f"{new_model_id}_output.jsonl", 'w', encoding='utf-8') as f:
|
295 |
+
for result in results:
|
296 |
+
json.dump(result, f, ensure_ascii=False)
|
297 |
+
f.write('\n')
|
298 |
+
|
299 |
+
model.push_to_hub_merged(
|
300 |
+
new_model_id,
|
301 |
+
tokenizer=tokenizer,
|
302 |
+
save_method="lora",
|
303 |
+
token=HF_TOKEN,
|
304 |
+
private=True
|
305 |
+
)
|
306 |
+
```
|
307 |
+
|