Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -33,7 +33,7 @@ torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
|
33 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
34 |
torch.backends.cudnn.allow_tf32 = False
|
35 |
torch.backends.cudnn.deterministic = False
|
36 |
-
torch.backends.cudnn.benchmark = False
|
37 |
torch.backends.cuda.preferred_blas_library="cublas"
|
38 |
torch.backends.cuda.preferred_linalg_library="cusolver"
|
39 |
|
@@ -192,8 +192,8 @@ def generate_60(
|
|
192 |
options["use_resolution_binning"] = True
|
193 |
images = []
|
194 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
195 |
-
with torch.no_grad():
|
196 |
-
|
197 |
batch_options = options.copy()
|
198 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
199 |
if "negative_prompt" in batch_options:
|
@@ -243,8 +243,8 @@ def generate_90(
|
|
243 |
options["use_resolution_binning"] = True
|
244 |
images = []
|
245 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
246 |
-
with torch.no_grad():
|
247 |
-
|
248 |
batch_options = options.copy()
|
249 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
250 |
if "negative_prompt" in batch_options:
|
|
|
33 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
34 |
torch.backends.cudnn.allow_tf32 = False
|
35 |
torch.backends.cudnn.deterministic = False
|
36 |
+
#torch.backends.cudnn.benchmark = False
|
37 |
torch.backends.cuda.preferred_blas_library="cublas"
|
38 |
torch.backends.cuda.preferred_linalg_library="cusolver"
|
39 |
|
|
|
192 |
options["use_resolution_binning"] = True
|
193 |
images = []
|
194 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
195 |
+
#with torch.no_grad():
|
196 |
+
for i in range(0, num_images, BATCH_SIZE):
|
197 |
batch_options = options.copy()
|
198 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
199 |
if "negative_prompt" in batch_options:
|
|
|
243 |
options["use_resolution_binning"] = True
|
244 |
images = []
|
245 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
246 |
+
#with torch.no_grad():
|
247 |
+
for i in range(0, num_images, BATCH_SIZE):
|
248 |
batch_options = options.copy()
|
249 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
250 |
if "negative_prompt" in batch_options:
|