Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -27,7 +27,7 @@ torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
|
27 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
28 |
torch.backends.cudnn.allow_tf32 = False
|
29 |
torch.backends.cudnn.deterministic = False
|
30 |
-
|
31 |
torch.backends.cuda.preferred_blas_library="cublas"
|
32 |
# torch.backends.cuda.preferred_linalg_library="cusolver"
|
33 |
|
@@ -267,9 +267,9 @@ def generate_30(
|
|
267 |
denoise: float = 0.3,
|
268 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
269 |
):
|
270 |
-
torch.backends.cudnn.benchmark = False
|
271 |
-
torch.cuda.empty_cache()
|
272 |
-
gc.collect()
|
273 |
global models
|
274 |
pipe = models[model_choice]
|
275 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
@@ -329,9 +329,9 @@ def generate_60(
|
|
329 |
denoise: float = 0.3,
|
330 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
331 |
):
|
332 |
-
torch.backends.cudnn.benchmark = True
|
333 |
-
torch.cuda.empty_cache()
|
334 |
-
gc.collect()
|
335 |
global models
|
336 |
pipe = models[model_choice]
|
337 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
@@ -391,9 +391,9 @@ def generate_90(
|
|
391 |
denoise: float = 0.3,
|
392 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
393 |
):
|
394 |
-
torch.backends.cudnn.benchmark = True
|
395 |
-
torch.cuda.empty_cache()
|
396 |
-
gc.collect()
|
397 |
global models
|
398 |
pipe = models[model_choice]
|
399 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
|
|
27 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
28 |
torch.backends.cudnn.allow_tf32 = False
|
29 |
torch.backends.cudnn.deterministic = False
|
30 |
+
torch.backends.cudnn.benchmark = False
|
31 |
torch.backends.cuda.preferred_blas_library="cublas"
|
32 |
# torch.backends.cuda.preferred_linalg_library="cusolver"
|
33 |
|
|
|
267 |
denoise: float = 0.3,
|
268 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
269 |
):
|
270 |
+
#torch.backends.cudnn.benchmark = False
|
271 |
+
#torch.cuda.empty_cache()
|
272 |
+
#gc.collect()
|
273 |
global models
|
274 |
pipe = models[model_choice]
|
275 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
|
|
329 |
denoise: float = 0.3,
|
330 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
331 |
):
|
332 |
+
#torch.backends.cudnn.benchmark = True
|
333 |
+
#torch.cuda.empty_cache()
|
334 |
+
#gc.collect()
|
335 |
global models
|
336 |
pipe = models[model_choice]
|
337 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
|
|
391 |
denoise: float = 0.3,
|
392 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
393 |
):
|
394 |
+
#torch.backends.cudnn.benchmark = True
|
395 |
+
#torch.cuda.empty_cache()
|
396 |
+
#gc.collect()
|
397 |
global models
|
398 |
pipe = models[model_choice]
|
399 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|