Nechba commited on
Commit
e0c600f
·
verified ·
1 Parent(s): 6a88742

Update modeling_chatglm.py

Browse files
Files changed (1) hide show
  1. modeling_chatglm.py +2 -3
modeling_chatglm.py CHANGED
@@ -762,9 +762,8 @@ class GLMTransformer(torch.nn.Module):
762
  ):
763
  if not kv_caches:
764
  kv_caches = [None for _ in range(self.num_layers)]
765
- else:
766
- print("kv_caches",kv_caches)
767
- kv_caches=kv_caches[1]
768
  presents = () if use_cache else None
769
  if self.gradient_checkpointing and self.training:
770
  if use_cache:
 
762
  ):
763
  if not kv_caches:
764
  kv_caches = [None for _ in range(self.num_layers)]
765
+
766
+ print("kv_caches",kv_caches)
 
767
  presents = () if use_cache else None
768
  if self.gradient_checkpointing and self.training:
769
  if use_cache: