suvadityamuk commited on
Commit
a5edd99
·
1 Parent(s): d9181cb

Update pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +28 -1
pipeline.py CHANGED
@@ -55,7 +55,7 @@ class StableDiffusionComparisonPipeline(DiffusionPipeline):
55
  feature_extractor: CLIPFeatureExtractor,
56
  requires_safety_checker: bool = True,
57
  ):
58
- super().__init__()
59
 
60
  self.pipe1 = StableDiffusionPipeline.from_pretrained(pipe1_model_id)
61
  self.pipe2 = StableDiffusionPipeline.from_pretrained(pipe2_model_id)
@@ -103,6 +103,33 @@ class StableDiffusionComparisonPipeline(DiffusionPipeline):
103
  def layers(self) -> Dict[str, Any]:
104
  return {k: getattr(self, k) for k in self.config.keys() if not k.startswith("_")}
105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  @torch.no_grad()
107
  def text2img_sd1_1(
108
  self,
 
55
  feature_extractor: CLIPFeatureExtractor,
56
  requires_safety_checker: bool = True,
57
  ):
58
+ super()._init_()
59
 
60
  self.pipe1 = StableDiffusionPipeline.from_pretrained(pipe1_model_id)
61
  self.pipe2 = StableDiffusionPipeline.from_pretrained(pipe2_model_id)
 
103
  def layers(self) -> Dict[str, Any]:
104
  return {k: getattr(self, k) for k in self.config.keys() if not k.startswith("_")}
105
 
106
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
107
+ r"""
108
+ Enable sliced attention computation.
109
+
110
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
111
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
112
+
113
+ Args:
114
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
115
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
116
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
117
+ `attention_head_dim` must be a multiple of `slice_size`.
118
+ """
119
+ if slice_size == "auto":
120
+ # half the attention head size is usually a good trade-off between
121
+ # speed and memory
122
+ slice_size = self.unet.config.attention_head_dim // 2
123
+ self.unet.set_attention_slice(slice_size)
124
+
125
+ def disable_attention_slicing(self):
126
+ r"""
127
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
128
+ back to computing attention in one step.
129
+ """
130
+ # set slice_size = `None` to disable `attention slicing`
131
+ self.enable_attention_slicing(None)
132
+
133
  @torch.no_grad()
134
  def text2img_sd1_1(
135
  self,