Crystalcareai commited on
Commit
accf604
·
verified ·
1 Parent(s): 7523550

Update modeling_quiet.py

Browse files
Files changed (1) hide show
  1. modeling_quiet.py +12 -2
modeling_quiet.py CHANGED
@@ -1119,7 +1119,7 @@ class QuietForCausalLM(QuietPreTrainedModel, GenerationMixin):
1119
  max_new_tokens: Optional[int] = None,
1120
  temperature: float = 1.1,
1121
  **kwargs,
1122
- ):
1123
  if isinstance(input_ids, str):
1124
  input_ids = self.tokenizer(input_ids, return_tensors="pt").input_ids
1125
 
@@ -1128,7 +1128,17 @@ class QuietForCausalLM(QuietPreTrainedModel, GenerationMixin):
1128
  attention_mask = torch.ones_like(input_ids)
1129
 
1130
  from .generate import generate
1131
- return generate(self, input_ids, attention_mask=attention_mask, max_new_tokens=max_new_tokens, temperature=temperature, **kwargs)
 
 
 
 
 
 
 
 
 
 
1132
 
1133
  @add_start_docstrings_to_model_forward(QUIET_INPUTS_DOCSTRING)
1134
  @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
 
1119
  max_new_tokens: Optional[int] = None,
1120
  temperature: float = 1.1,
1121
  **kwargs,
1122
+ ):
1123
  if isinstance(input_ids, str):
1124
  input_ids = self.tokenizer(input_ids, return_tensors="pt").input_ids
1125
 
 
1128
  attention_mask = torch.ones_like(input_ids)
1129
 
1130
  from .generate import generate
1131
+
1132
+ output = generate(
1133
+ self,
1134
+ input_ids,
1135
+ attention_mask=attention_mask,
1136
+ max_new_tokens=max_new_tokens,
1137
+ temperature=temperature,
1138
+ **kwargs,
1139
+ )
1140
+
1141
+ return output.sequences
1142
 
1143
  @add_start_docstrings_to_model_forward(QUIET_INPUTS_DOCSTRING)
1144
  @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)