Crystalcareai
commited on
Update modeling_quiet.py
Browse files- modeling_quiet.py +6 -6
modeling_quiet.py
CHANGED
@@ -1203,14 +1203,14 @@ class QuietForCausalLM(QuietPreTrainedModel, GenerationMixin):
|
|
1203 |
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
|
1204 |
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
|
1205 |
|
1206 |
-
if input_ids is None:
|
1207 |
-
|
1208 |
|
1209 |
-
batch_size = input_ids.shape[0]
|
1210 |
-
cur_len = input_ids.shape[-1]
|
1211 |
|
1212 |
-
if attention_mask is None:
|
1213 |
-
|
1214 |
|
1215 |
return self._generate_no_beam_search(
|
1216 |
input_ids,
|
|
|
1203 |
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
|
1204 |
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
|
1205 |
|
1206 |
+
# if input_ids is None:
|
1207 |
+
# raise ValueError("You have to specify either input_ids")
|
1208 |
|
1209 |
+
# batch_size = input_ids.shape[0]
|
1210 |
+
# cur_len = input_ids.shape[-1]
|
1211 |
|
1212 |
+
# if attention_mask is None:
|
1213 |
+
# attention_mask = torch.ones(batch_size, cur_len, device=input_ids.device)
|
1214 |
|
1215 |
return self._generate_no_beam_search(
|
1216 |
input_ids,
|