Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -107,14 +107,16 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
|
|
107 |
def load_and_prepare_model(model_id):
|
108 |
model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
|
109 |
dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
|
110 |
-
vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None)
|
111 |
-
|
112 |
# vae = AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",safety_checker=None).to(torch.bfloat16)
|
113 |
# vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None).to('cuda')
|
114 |
#sched = EulerAncestralDiscreteScheduler.from_config('ford442/Juggernaut-XI-v11-fp32', subfolder='scheduler',beta_schedule="scaled_linear",use_karras_sigmas=True)
|
115 |
-
sched = EulerAncestralDiscreteScheduler.from_config(
|
116 |
-
|
117 |
-
|
|
|
|
|
118 |
#pipeX = StableDiffusionXLPipeline.from_pretrained("ford442/Juggernaut-XI-v11-fp32",torch_dtype=torch.float32)
|
119 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
120 |
'ford442/RealVisXL_V5.0_BF16',
|
@@ -127,7 +129,7 @@ def load_and_prepare_model(model_id):
|
|
127 |
# vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
|
128 |
# vae=AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",repo_type='model',safety_checker=None, torch_dtype=torch.float32),
|
129 |
# vae=AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16",repo_type='model',safety_checker=None),
|
130 |
-
vae=vae,
|
131 |
# unet=pipeX.unet,
|
132 |
scheduler = sched
|
133 |
# scheduler = EulerAncestralDiscreteScheduler.from_config(pipeX.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
@@ -137,8 +139,7 @@ def load_and_prepare_model(model_id):
|
|
137 |
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
138 |
#pipe.to('cuda')
|
139 |
#pipe.scheduler = sched
|
140 |
-
|
141 |
-
#pipe.vae=pipeX.vae
|
142 |
# pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
143 |
#pipe.to(dtype=torch.bfloat16)
|
144 |
#pipe.unet = pipeX.unet
|
@@ -146,7 +147,9 @@ def load_and_prepare_model(model_id):
|
|
146 |
#pipe.unet.to(torch.bfloat16)
|
147 |
|
148 |
pipe.to(device)
|
149 |
-
pipe.to(torch.bfloat16)
|
|
|
|
|
150 |
|
151 |
#pipe.to(torch.device("cuda:0"))
|
152 |
#pipe.vae.to(torch.bfloat16)
|
|
|
107 |
def load_and_prepare_model(model_id):
|
108 |
model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
|
109 |
dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
|
110 |
+
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None)
|
111 |
+
vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",safety_checker=None)
|
112 |
# vae = AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",safety_checker=None).to(torch.bfloat16)
|
113 |
# vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None).to('cuda')
|
114 |
#sched = EulerAncestralDiscreteScheduler.from_config('ford442/Juggernaut-XI-v11-fp32', subfolder='scheduler',beta_schedule="scaled_linear",use_karras_sigmas=True)
|
115 |
+
sched = EulerAncestralDiscreteScheduler.from_config("SG161222/RealVisXL_V5.0", subfolder='scheduler',beta_schedule="scaled_linear")
|
116 |
+
#sched = EulerAncestralDiscreteScheduler.from_config('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
|
117 |
+
# sched = EulerAncestralDiscreteScheduler.from_config('ford442/RealVisXL_V5.0_BF16', beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
118 |
+
pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0")
|
119 |
+
|
120 |
#pipeX = StableDiffusionXLPipeline.from_pretrained("ford442/Juggernaut-XI-v11-fp32",torch_dtype=torch.float32)
|
121 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
122 |
'ford442/RealVisXL_V5.0_BF16',
|
|
|
129 |
# vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
|
130 |
# vae=AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",repo_type='model',safety_checker=None, torch_dtype=torch.float32),
|
131 |
# vae=AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16",repo_type='model',safety_checker=None),
|
132 |
+
#vae=vae,
|
133 |
# unet=pipeX.unet,
|
134 |
scheduler = sched
|
135 |
# scheduler = EulerAncestralDiscreteScheduler.from_config(pipeX.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
|
|
139 |
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
140 |
#pipe.to('cuda')
|
141 |
#pipe.scheduler = sched
|
142 |
+
pipe.unet=pipeX.unet.to(torch.bfloat16)
|
|
|
143 |
# pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
144 |
#pipe.to(dtype=torch.bfloat16)
|
145 |
#pipe.unet = pipeX.unet
|
|
|
147 |
#pipe.unet.to(torch.bfloat16)
|
148 |
|
149 |
pipe.to(device)
|
150 |
+
pipe.vae=vae.to(torch.bfloat16)
|
151 |
+
|
152 |
+
#pipe.to(torch.bfloat16)
|
153 |
|
154 |
#pipe.to(torch.device("cuda:0"))
|
155 |
#pipe.vae.to(torch.bfloat16)
|