takaganasu commited on
Commit
fc9a77d
·
verified ·
1 Parent(s): 5ae2ee7

Upload inference_code.py

Browse files
Files changed (1) hide show
  1. inference_code.py +91 -0
inference_code.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # 推論
3
+
4
+ ### パッケージインストール
5
+ """
6
+
7
+ # 必要なライブラリインストール(バージョン指定は一例)
8
+ !pip install -U transformers peft bitsandbytes accelerate
9
+
10
+ # 推論
11
+ import json
12
+ import torch
13
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
14
+ from peft import PeftModel
15
+
16
+ # Hugging Faceトークン、ベースモデル、LoRAアダプタIDの設定
17
+ HF_TOKEN = ""
18
+ base_model_id = "llm-jp/llm-jp-3-13b" # ベースモデルID
19
+ adapter_repo_id = " # アップロード済みLoRAアダプタのID
20
+
21
+ # BitsAndBytesConfigで4bit量子化設定
22
+ bnb_config = BitsAndBytesConfig(
23
+ load_in_4bit=True,
24
+ bnb_4bit_quant_type="nf4",
25
+ bnb_4bit_compute_dtype=torch.bfloat16
26
+ )
27
+
28
+ # トークナイザとモデルをHugging Face Hubからロード
29
+ tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True, token=HF_TOKEN)
30
+ model = AutoModelForCausalLM.from_pretrained(
31
+ base_model_id,
32
+ quantization_config=bnb_config,
33
+ device_map="auto",
34
+ trust_remote_code=True,
35
+ token=HF_TOKEN
36
+ )
37
+
38
+ # LoRAアダプタ適用
39
+ model = PeftModel.from_pretrained(model, adapter_repo_id, token=HF_TOKEN)
40
+ model.eval()
41
+
42
+ # 推論時のパラメータ
43
+ max_new_tokens = 200
44
+ temperature = 0.7
45
+ top_p = 0.9
46
+ do_sample = True
47
+
48
+ # タスクデータ読み込み(elyza-tasks-100-TV_0.jsonlは同一フォルダにアップロード)
49
+ datasets = []
50
+ with open("./elyza-tasks-100-TV_0.jsonl", "r", encoding="utf-8") as f:
51
+ for line in f:
52
+ line = line.strip()
53
+ if not line:
54
+ continue
55
+ data = json.loads(line)
56
+ datasets.append(data)
57
+
58
+ def generate_output(input_text):
59
+ # プロンプトフォーマット
60
+ prompt = f"### 指示\n{input_text}\n### 回答\n"
61
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
62
+ if "token_type_ids" in inputs:
63
+ del inputs["token_type_ids"]
64
+
65
+ with torch.no_grad():
66
+ outputs = model.generate(
67
+ **inputs,
68
+ max_new_tokens=max_new_tokens,
69
+ do_sample=do_sample,
70
+ top_p=top_p,
71
+ temperature=temperature,
72
+ pad_token_id=tokenizer.eos_token_id
73
+ )
74
+ output_text = tokenizer.decode(outputs[0][inputs.input_ids.size(1):], skip_special_tokens=True)
75
+ return output_text.strip()
76
+
77
+ results = []
78
+ for data in datasets:
79
+ task_id = data["task_id"]
80
+ input_text = data["input"]
81
+ output_text = generate_output(input_text)
82
+ results.append({"task_id": task_id, "output": output_text})
83
+
84
+ # JSONL形式で保存
85
+ with open("submission_attempt.jsonl", "w", encoding="utf-8") as f:
86
+ for r in results:
87
+ json.dump(r, f, ensure_ascii=False)
88
+ f.write("\n")
89
+
90
+ print("推論完了。'submission_attempt.jsonl'を生成しました。")
91
+