NickyNicky commited on
Commit
6304815
·
verified ·
1 Parent(s): c298520

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +69 -1
README.md CHANGED
@@ -7,7 +7,7 @@ base_model:
7
  tags:
8
  - mergekit
9
  - merge
10
-
11
  ---
12
  # merged
13
 
@@ -70,3 +70,71 @@ slices:
70
  density: 0.55
71
  weight: 0.56
72
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  tags:
8
  - mergekit
9
  - merge
10
+ license: apache-2.0
11
  ---
12
  # merged
13
 
 
70
  density: 0.55
71
  weight: 0.56
72
  ```
73
+
74
+
75
+ ```Python
76
+ from transformers import (
77
+ AutoModelForCausalLM,
78
+ AutoTokenizer,
79
+ BitsAndBytesConfig,
80
+ HfArgumentParser,
81
+ TrainingArguments,
82
+ pipeline,
83
+ logging,
84
+ GenerationConfig,
85
+ TextIteratorStreamer,
86
+ )
87
+ import torch
88
+
89
+ new_model= "NickyNicky/TinyDolphin-2.8-1.1b_oasst2_chatML_all_Cluster_merge_v1"
90
+ model = AutoModelForCausalLM.from_pretrained(#f'NickyNicky/{new_model}',
91
+ new_model,
92
+ device_map="auto",
93
+ trust_remote_code=True,
94
+ torch_dtype=torch.bfloat16,
95
+
96
+ low_cpu_mem_usage= True,
97
+ # use_flash_attention_2=False,
98
+
99
+ )
100
+
101
+
102
+ tokenizer = AutoTokenizer.from_pretrained(new_model,
103
+ max_length=2048,
104
+ trust_remote_code=True,
105
+ use_fast = True,
106
+ )
107
+
108
+ tokenizer.pad_token = tokenizer.eos_token
109
+ # tokenizer.padding_side = 'left'
110
+ tokenizer.padding_side = 'right'
111
+
112
+
113
+ prompt= """<|im_start|>system
114
+ You are a helpful AI assistant.<|im_end|>
115
+ <|im_start|>user
116
+ escribe una historia de amor.<|im_end|>
117
+ <|im_start|>assistant
118
+ """
119
+
120
+ inputs = tokenizer.encode(prompt,
121
+ return_tensors="pt",
122
+ add_special_tokens=False).cuda()#.to("cuda") # False # True
123
+
124
+
125
+ generation_config = GenerationConfig(
126
+ max_new_tokens=700,
127
+ temperature=0.5,
128
+ top_p=0.9,
129
+ top_k=40,
130
+ repetition_penalty=1.1, #1.1, # 1.0 means no penalty, > 1.0 means penalty, 1.2 from CTRL paper
131
+ do_sample=True,
132
+ pad_token_id=tokenizer.eos_token_id,
133
+ eos_token_id=tokenizer.eos_token_id,
134
+ )
135
+ outputs = model.generate(
136
+ generation_config=generation_config,
137
+ input_ids=inputs,)
138
+ # tokenizer.decode(outputs[0], skip_special_tokens=False) #True
139
+ print(tokenizer.decode(outputs[0], skip_special_tokens=False))
140
+ ```