SkyOrbis commited on
Commit
3241e4e
ยท
verified ยท
1 Parent(s): 926c4f1

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +46 -0
README.md ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - ko
4
+ base_model:
5
+ - Qwen/Qwen2.5-3B-Instruct
6
+ ---
7
+
8
+ Training
9
+ GPU: H100
10
+
11
+ ## Test
12
+ ```
13
+ from transformers import AutoModelForCausalLM, AutoTokenizer
14
+
15
+ model_name = "SkyOrbis/SKY-Ko-Qwen2.5-3B-Instruct"
16
+
17
+ model = AutoModelForCausalLM.from_pretrained(
18
+ model_name,
19
+ torch_dtype="auto",
20
+ device_map="auto"
21
+ )
22
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
23
+
24
+ prompt = "์„œ์šธ์˜ ์ˆ˜๋„๋Š”?"
25
+ messages = [
26
+ {"role": "system", "content": "์ฃผ์–ด์ง„ ์งˆ๋ฌธ์— ๋Œ€๋‹ต์„ ํ•˜์„ธ์š”."},
27
+ {"role": "user", "content": prompt}
28
+ ]
29
+ text = tokenizer.apply_chat_template(
30
+ messages,
31
+ tokenize=False,
32
+ add_generation_prompt=True
33
+ )
34
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
35
+
36
+ generated_ids = model.generate(
37
+ **model_inputs,
38
+ max_new_tokens=512
39
+ )
40
+ generated_ids = [
41
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
42
+ ]
43
+
44
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
45
+ print(response)
46
+ ```