Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -113,21 +113,23 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
|
|
113 |
def load_and_prepare_model(model_id):
|
114 |
model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
|
115 |
dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
|
116 |
-
|
|
|
|
|
117 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
118 |
model_id,
|
119 |
-
|
120 |
add_watermarker=False,
|
121 |
use_safetensors=True,
|
122 |
vae=vae,
|
|
|
123 |
).to('cuda')
|
124 |
#pipe.to(device=device, dtype=torch.bfloat16)
|
125 |
|
126 |
#sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="dpmsolver++")
|
127 |
-
sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
128 |
#sched = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++")
|
129 |
#sched = DDIMScheduler.from_config(pipe.scheduler.config)
|
130 |
-
pipe.scheduler=sched
|
131 |
return pipe
|
132 |
|
133 |
# Preload and compile both models
|
@@ -223,7 +225,7 @@ def generate_60(
|
|
223 |
num_inference_steps: int = 250,
|
224 |
randomize_seed: bool = False,
|
225 |
use_resolution_binning: bool = True,
|
226 |
-
num_images: int = 1,
|
227 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
228 |
):
|
229 |
global models
|
@@ -240,6 +242,7 @@ def generate_60(
|
|
240 |
"num_inference_steps": num_inference_steps,
|
241 |
"generator": generator,
|
242 |
"output_type": "pil",
|
|
|
243 |
}
|
244 |
if use_resolution_binning:
|
245 |
options["use_resolution_binning"] = True
|
|
|
113 |
def load_and_prepare_model(model_id):
|
114 |
model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
|
115 |
dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
|
116 |
+
# vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", torch_dtype=torch.bfloat16,safety_checker=None)
|
117 |
+
vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None)
|
118 |
+
sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
119 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
120 |
model_id,
|
121 |
+
# torch_dtype=torch.bfloat16,
|
122 |
add_watermarker=False,
|
123 |
use_safetensors=True,
|
124 |
vae=vae,
|
125 |
+
scheduler=sched
|
126 |
).to('cuda')
|
127 |
#pipe.to(device=device, dtype=torch.bfloat16)
|
128 |
|
129 |
#sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="dpmsolver++")
|
|
|
130 |
#sched = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++")
|
131 |
#sched = DDIMScheduler.from_config(pipe.scheduler.config)
|
132 |
+
#pipe.scheduler=sched
|
133 |
return pipe
|
134 |
|
135 |
# Preload and compile both models
|
|
|
225 |
num_inference_steps: int = 250,
|
226 |
randomize_seed: bool = False,
|
227 |
use_resolution_binning: bool = True,
|
228 |
+
num_images: int = 1,
|
229 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
230 |
):
|
231 |
global models
|
|
|
242 |
"num_inference_steps": num_inference_steps,
|
243 |
"generator": generator,
|
244 |
"output_type": "pil",
|
245 |
+
"target_size": (width,height),
|
246 |
}
|
247 |
if use_resolution_binning:
|
248 |
options["use_resolution_binning"] = True
|