Crystalcareai
commited on
Update generate.py
Browse files- generate.py +110 -84
generate.py
CHANGED
@@ -8,41 +8,84 @@ from transformers.generation.utils import (
|
|
8 |
|
9 |
logger = logging.get_logger(__name__)
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
return input_ids, attention_mask
|
47 |
|
48 |
def generate(
|
@@ -78,57 +121,40 @@ def generate(
|
|
78 |
forced_eos_token_id=None,
|
79 |
remove_invalid_values=None,
|
80 |
synced_gpus=None,
|
81 |
-
n_ahead=4,
|
82 |
-
n_ahead_talk=4,
|
83 |
-
merged_talk_heads=True,
|
84 |
-
merged_lm_and_talk_heads=False,
|
85 |
-
merged_lm_and_think_heads=True,
|
86 |
-
use_concat_talk_head=True,
|
87 |
-
use_shallow_think=True,
|
88 |
-
use_shallow_talk=False,
|
89 |
-
use_complex_think_head=False,
|
90 |
-
use_complex_talk_head=True,
|
91 |
-
use_weighted_talk_head=True,
|
92 |
-
trust_remote_code=True,
|
93 |
-
torch_dtype=torch.bfloat16,
|
94 |
**model_kwargs,
|
95 |
):
|
96 |
-
|
97 |
-
self.max_thoughts = n_ahead + n_ahead_talk + 1
|
98 |
-
self.merged_talk_heads = merged_talk_heads
|
99 |
-
self.merged_lm_and_talk_heads = merged_lm_and_talk_heads
|
100 |
-
self.merged_lm_and_think_heads = merged_lm_and_think_heads
|
101 |
-
self.use_concat_talk_head = use_concat_talk_head
|
102 |
-
self.use_shallow_think = use_shallow_think
|
103 |
-
self.use_shallow_talk = use_shallow_talk
|
104 |
-
self.use_complex_think_head = use_complex_think_head
|
105 |
-
self.use_complex_talk_head = use_complex_talk_head
|
106 |
-
self.use_weighted_talk_head = use_weighted_talk_head
|
107 |
-
|
108 |
-
# Set model properties
|
109 |
-
self.use_end_thought_token = True
|
110 |
-
self.use_start_thought_token = True
|
111 |
-
self.wandb_enabled = True
|
112 |
-
self.n_ahead = n_ahead
|
113 |
-
self.n_passes = 1
|
114 |
-
self.eval_mode = True
|
115 |
-
self.first_run = False
|
116 |
-
self.kill_after = 100
|
117 |
-
self.rm_initialized = True
|
118 |
-
self.original_mode = False
|
119 |
-
|
120 |
-
# Initialize a TextStreamer for streaming the generated text
|
121 |
-
streamer = TextStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True)
|
122 |
-
|
123 |
-
# Generate using the custom generate function
|
124 |
-
input_ids, attention_mask = custom_generate(
|
125 |
self,
|
126 |
-
input_ids,
|
127 |
-
attention_mask,
|
128 |
-
max_length,
|
129 |
-
|
|
|
|
|
|
|
130 |
temperature=temperature,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
**model_kwargs,
|
132 |
-
)
|
133 |
-
|
134 |
-
return input_ids, attention_mask
|
|
|
8 |
|
9 |
logger = logging.get_logger(__name__)
|
10 |
|
11 |
+
def custom_generate(
|
12 |
+
self,
|
13 |
+
input_ids,
|
14 |
+
attention_mask=None,
|
15 |
+
max_length=None,
|
16 |
+
min_length=None,
|
17 |
+
do_sample=None,
|
18 |
+
early_stopping=None,
|
19 |
+
num_beams=None,
|
20 |
+
temperature=1.0,
|
21 |
+
top_k=None,
|
22 |
+
top_p=None,
|
23 |
+
repetition_penalty=None,
|
24 |
+
bad_words_ids=None,
|
25 |
+
bos_token_id=None,
|
26 |
+
pad_token_id=None,
|
27 |
+
eos_token_id=None,
|
28 |
+
length_penalty=None,
|
29 |
+
no_repeat_ngram_size=None,
|
30 |
+
num_return_sequences=None,
|
31 |
+
decoder_start_token_id=None,
|
32 |
+
use_cache=None,
|
33 |
+
num_beam_groups=None,
|
34 |
+
diversity_penalty=None,
|
35 |
+
prefix_allowed_tokens_fn=None,
|
36 |
+
output_attentions=None,
|
37 |
+
output_hidden_states=None,
|
38 |
+
output_scores=None,
|
39 |
+
return_dict_in_generate=None,
|
40 |
+
forced_bos_token_id=None,
|
41 |
+
forced_eos_token_id=None,
|
42 |
+
remove_invalid_values=None,
|
43 |
+
synced_gpus=None,
|
44 |
+
**kwargs,
|
45 |
+
):
|
46 |
+
with torch.no_grad():
|
47 |
+
finished_generating = torch.zeros(len(input_ids), dtype=torch.bool, device=input_ids.device)
|
48 |
+
|
49 |
+
while not finished_generating.all() and input_ids.shape[1] < max_length:
|
50 |
+
# Sample the next token
|
51 |
+
new_ids = self(
|
52 |
+
input_ids[~finished_generating],
|
53 |
+
attention_mask=attention_mask[~finished_generating] if attention_mask is not None else None,
|
54 |
+
**kwargs
|
55 |
+
)['logits']
|
56 |
+
|
57 |
+
# Mask out the start and end thought tokens so we don't accidentally sample them
|
58 |
+
new_ids[:, :, self.tokenizer.vocab_size:] = -float("inf")
|
59 |
+
|
60 |
+
for list_idx, answer_idx in enumerate((~finished_generating).nonzero(as_tuple=True)[0]):
|
61 |
+
# Find the index of the last token that is not padding
|
62 |
+
base_answer_ids = input_ids[answer_idx]
|
63 |
+
new_answer_ids = new_ids[list_idx]
|
64 |
+
last_token_idx = (base_answer_ids != self.tokenizer.pad_token_id).nonzero(as_tuple=True)[0].max()
|
65 |
+
|
66 |
+
new_ids_sampled = torch.multinomial(
|
67 |
+
torch.nn.functional.softmax(new_answer_ids[last_token_idx] / temperature, dim=-1), 1)
|
68 |
+
|
69 |
+
# Assign the new id to the last token
|
70 |
+
if last_token_idx + 1 >= len(base_answer_ids):
|
71 |
+
# Add padding everywhere
|
72 |
+
new_padding = torch.full((len(input_ids), 1), self.tokenizer.pad_token_id, dtype=torch.long,
|
73 |
+
device=input_ids.device)
|
74 |
+
input_ids = torch.cat([input_ids, new_padding], dim=-1)
|
75 |
+
if attention_mask is not None:
|
76 |
+
attention_mask = torch.cat([attention_mask, torch.zeros_like(new_padding)], dim=-1)
|
77 |
+
|
78 |
+
if attention_mask is not None:
|
79 |
+
attention_mask[answer_idx, last_token_idx + 1] = 1
|
80 |
+
input_ids[answer_idx, last_token_idx + 1] = new_ids_sampled
|
81 |
+
|
82 |
+
if new_ids_sampled == self.tokenizer.eos_token_id or new_ids_sampled == self.tokenizer.bos_token_id or new_ids_sampled == self.tokenizer.pad_token_id:
|
83 |
+
finished_generating[answer_idx] = 1
|
84 |
+
|
85 |
+
# Check if the end token is generated
|
86 |
+
if new_ids_sampled == self.tokenizer.convert_tokens_to_ids("<|/assistant|>"):
|
87 |
+
finished_generating[answer_idx] = 1
|
88 |
+
|
89 |
return input_ids, attention_mask
|
90 |
|
91 |
def generate(
|
|
|
121 |
forced_eos_token_id=None,
|
122 |
remove_invalid_values=None,
|
123 |
synced_gpus=None,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
**model_kwargs,
|
125 |
):
|
126 |
+
return custom_generate(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
self,
|
128 |
+
input_ids=input_ids,
|
129 |
+
attention_mask=attention_mask,
|
130 |
+
max_length=max_length,
|
131 |
+
min_length=min_length,
|
132 |
+
do_sample=do_sample,
|
133 |
+
early_stopping=early_stopping,
|
134 |
+
num_beams=num_beams,
|
135 |
temperature=temperature,
|
136 |
+
top_k=top_k,
|
137 |
+
top_p=top_p,
|
138 |
+
repetition_penalty=repetition_penalty,
|
139 |
+
bad_words_ids=bad_words_ids,
|
140 |
+
bos_token_id=bos_token_id,
|
141 |
+
pad_token_id=pad_token_id,
|
142 |
+
eos_token_id=eos_token_id,
|
143 |
+
length_penalty=length_penalty,
|
144 |
+
no_repeat_ngram_size=no_repeat_ngram_size,
|
145 |
+
num_return_sequences=num_return_sequences,
|
146 |
+
decoder_start_token_id=decoder_start_token_id,
|
147 |
+
use_cache=use_cache,
|
148 |
+
num_beam_groups=num_beam_groups,
|
149 |
+
diversity_penalty=diversity_penalty,
|
150 |
+
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
|
151 |
+
output_attentions=output_attentions,
|
152 |
+
output_hidden_states=output_hidden_states,
|
153 |
+
output_scores=output_scores,
|
154 |
+
return_dict_in_generate=return_dict_in_generate,
|
155 |
+
forced_bos_token_id=forced_bos_token_id,
|
156 |
+
forced_eos_token_id=forced_eos_token_id,
|
157 |
+
remove_invalid_values=remove_invalid_values,
|
158 |
+
synced_gpus=synced_gpus,
|
159 |
**model_kwargs,
|
160 |
+
)
|
|
|
|