ford442 commited on
Commit
5f668d1
·
verified ·
1 Parent(s): 53d4df7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -123,7 +123,7 @@ def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
123
  return {"latents": callback_kwargs["latents"]}
124
 
125
  def load_and_prepare_model():
126
- #vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False).to(device=device, dtype=torch.bfloat16)
127
  #vaeRV = AutoencoderKL.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='vae', safety_checker=None, use_safetensors=False).to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
128
  #sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
129
  sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
@@ -132,10 +132,11 @@ def load_and_prepare_model():
132
  #torch_dtype=torch.bfloat16,
133
  add_watermarker=False,
134
  # low_cpu_mem_usage = False,
135
- token=HF_TOKEN,
 
136
  )
137
- #pipe.vae = vaeRV #.to(torch.bfloat16)
138
- pipe.scheduler = sched
139
  #pipe.vae.do_resize=False
140
  #pipe.vae.vae_scale_factor=8
141
  # pipe.to(device=device, dtype=torch.bfloat16)
@@ -222,6 +223,7 @@ def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
222
  torch.set_float32_matmul_precision("highest")
223
  #callback_kwargs["latents"] = callback_kwargs["latents"].to(torch.bfloat16)
224
  #pipe.unet.to(torch.float64)
 
225
  # pipe.vae = vae_a
226
  # pipe.unet = unet_a
227
  torch.backends.cudnn.deterministic = False
 
123
  return {"latents": callback_kwargs["latents"]}
124
 
125
  def load_and_prepare_model():
126
+ vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
127
  #vaeRV = AutoencoderKL.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='vae', safety_checker=None, use_safetensors=False).to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
128
  #sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
129
  sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
 
132
  #torch_dtype=torch.bfloat16,
133
  add_watermarker=False,
134
  # low_cpu_mem_usage = False,
135
+ token = HF_TOKEN,
136
+ scheduler = sched,
137
  )
138
+ pipe.vae = vaeXL #.to(torch.bfloat16)
139
+ #pipe.scheduler = sched,
140
  #pipe.vae.do_resize=False
141
  #pipe.vae.vae_scale_factor=8
142
  # pipe.to(device=device, dtype=torch.bfloat16)
 
223
  torch.set_float32_matmul_precision("highest")
224
  #callback_kwargs["latents"] = callback_kwargs["latents"].to(torch.bfloat16)
225
  #pipe.unet.to(torch.float64)
226
+ pipe.unet.set_default_attn_processor() ## custom ##
227
  # pipe.vae = vae_a
228
  # pipe.unet = unet_a
229
  torch.backends.cudnn.deterministic = False