Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -113,14 +113,14 @@ def load_and_prepare_model(model_id):
|
|
113 |
#vae = AutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2',use_safetensors=False)
|
114 |
#vae = AutoencoderKL.from_single_file('https://huggingface.co/ford442/sdxl-vae-bf16/mySLR/myslrVAE_v10.safetensors')
|
115 |
#vaeX = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse",use_safetensors=True)
|
116 |
-
vaeX = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
117 |
#vaeX = AutoencoderKL.from_pretrained('ford442/RealVisXL_V5.0_FP64',subfolder='vae').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
118 |
#unetX = UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_BF16',subfolder='unet').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
119 |
# vae = AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",safety_checker=None).to(torch.bfloat16)
|
120 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler',beta_schedule="scaled_linear", steps_offset=1,timestep_spacing="trailing"))
|
121 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler', steps_offset=1,timestep_spacing="trailing")
|
122 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
|
123 |
-
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
124 |
#pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0").to(torch.bfloat16)
|
125 |
#pipeX = StableDiffusionXLPipeline.from_pretrained("ford442/Juggernaut-XI-v11-fp32",use_safetensors=True)
|
126 |
|
@@ -142,17 +142,19 @@ def load_and_prepare_model(model_id):
|
|
142 |
# scheduler = EulerAncestralDiscreteScheduler.from_config(pipeX.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
143 |
#scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset =1)
|
144 |
)
|
145 |
-
|
|
|
146 |
#sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear",use_karras_sigmas=True, algorithm_type="dpmsolver++")
|
147 |
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
148 |
#pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler', algorithm_type='sde-dpmsolver++')
|
149 |
-
pipe.vae = vaeX
|
150 |
#pipe.unet = unetX
|
151 |
#pipe.vae.do_resize=False
|
152 |
-
pipe.scheduler = sched
|
153 |
#pipe.vae=vae.to(torch.bfloat16)
|
154 |
#pipe.unet=pipeX.unet
|
155 |
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
|
|
156 |
|
157 |
pipe.to(device)
|
158 |
pipe.to(torch.bfloat16)
|
@@ -160,7 +162,7 @@ def load_and_prepare_model(model_id):
|
|
160 |
#apply_hidiffusion(pipe)
|
161 |
|
162 |
#pipe.unet.set_default_attn_processor()
|
163 |
-
|
164 |
|
165 |
print(f'Pipeline: ')
|
166 |
#print(f'_optional_components: {pipe._optional_components}')
|
@@ -179,7 +181,7 @@ def load_and_prepare_model(model_id):
|
|
179 |
#sched = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++")
|
180 |
#sched = DDIMScheduler.from_config(pipe.scheduler.config)
|
181 |
return pipe
|
182 |
-
|
183 |
# Preload and compile both models
|
184 |
models = {key: load_and_prepare_model(value) for key, value in MODEL_OPTIONS.items()}
|
185 |
|
@@ -223,9 +225,10 @@ def uploadNote():
|
|
223 |
f.write(f"SPACE SETUP: \n")
|
224 |
f.write(f"Use Model Dtype: no \n")
|
225 |
f.write(f"Model Scheduler: Euler_a custom before cuda \n")
|
226 |
-
f.write(f"Model VAE: juggernaut to bfloat before cuda \n")
|
227 |
f.write(f"Model UNET: default ford442/RealVisXL_V5.0_BF16 \n")
|
228 |
f.write(f"Model HiDiffusion OFF \n")
|
|
|
229 |
upload_to_ftp(filename)
|
230 |
|
231 |
@spaces.GPU(duration=30)
|
|
|
113 |
#vae = AutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2',use_safetensors=False)
|
114 |
#vae = AutoencoderKL.from_single_file('https://huggingface.co/ford442/sdxl-vae-bf16/mySLR/myslrVAE_v10.safetensors')
|
115 |
#vaeX = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse",use_safetensors=True)
|
116 |
+
#vaeX = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
117 |
#vaeX = AutoencoderKL.from_pretrained('ford442/RealVisXL_V5.0_FP64',subfolder='vae').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
118 |
#unetX = UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_BF16',subfolder='unet').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
119 |
# vae = AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",safety_checker=None).to(torch.bfloat16)
|
120 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler',beta_schedule="scaled_linear", steps_offset=1,timestep_spacing="trailing"))
|
121 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler', steps_offset=1,timestep_spacing="trailing")
|
122 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
|
123 |
+
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
124 |
#pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0").to(torch.bfloat16)
|
125 |
#pipeX = StableDiffusionXLPipeline.from_pretrained("ford442/Juggernaut-XI-v11-fp32",use_safetensors=True)
|
126 |
|
|
|
142 |
# scheduler = EulerAncestralDiscreteScheduler.from_config(pipeX.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
143 |
#scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset =1)
|
144 |
)
|
145 |
+
#pipe.vae = AsymmetricAutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
146 |
+
pipe.vae = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
147 |
#sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear",use_karras_sigmas=True, algorithm_type="dpmsolver++")
|
148 |
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
149 |
#pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler', algorithm_type='sde-dpmsolver++')
|
150 |
+
#pipe.vae = vaeX
|
151 |
#pipe.unet = unetX
|
152 |
#pipe.vae.do_resize=False
|
153 |
+
#pipe.scheduler = sched
|
154 |
#pipe.vae=vae.to(torch.bfloat16)
|
155 |
#pipe.unet=pipeX.unet
|
156 |
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
157 |
+
pipe.scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
158 |
|
159 |
pipe.to(device)
|
160 |
pipe.to(torch.bfloat16)
|
|
|
162 |
#apply_hidiffusion(pipe)
|
163 |
|
164 |
#pipe.unet.set_default_attn_processor()
|
165 |
+
pipe.vae.set_default_attn_processor()
|
166 |
|
167 |
print(f'Pipeline: ')
|
168 |
#print(f'_optional_components: {pipe._optional_components}')
|
|
|
181 |
#sched = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++")
|
182 |
#sched = DDIMScheduler.from_config(pipe.scheduler.config)
|
183 |
return pipe
|
184 |
+
|
185 |
# Preload and compile both models
|
186 |
models = {key: load_and_prepare_model(value) for key, value in MODEL_OPTIONS.items()}
|
187 |
|
|
|
225 |
f.write(f"SPACE SETUP: \n")
|
226 |
f.write(f"Use Model Dtype: no \n")
|
227 |
f.write(f"Model Scheduler: Euler_a custom before cuda \n")
|
228 |
+
f.write(f"Model VAE: juggernaut to bfloat before cuda then attn_proc \n")
|
229 |
f.write(f"Model UNET: default ford442/RealVisXL_V5.0_BF16 \n")
|
230 |
f.write(f"Model HiDiffusion OFF \n")
|
231 |
+
f.write(f"Model do_resize ON \n")
|
232 |
upload_to_ftp(filename)
|
233 |
|
234 |
@spaces.GPU(duration=30)
|