ford442 commited on
Commit
e208bb7
·
verified ·
1 Parent(s): 6a81bc9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -13
app.py CHANGED
@@ -20,10 +20,6 @@ import cyper
20
  from image_gen_aux import UpscaleWithModel
21
  import torch
22
 
23
- import preallocate_cuda_memory as pc
24
- mc = pc.MemoryController(0) # 0 is the GPU index
25
- mc.occupy_all_available_memory()
26
-
27
  torch.backends.cuda.matmul.allow_tf32 = False
28
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
29
  torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
@@ -118,7 +114,7 @@ def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
118
  # pipe.vae = vae_a
119
  # pipe.unet = unet_a
120
  torch.backends.cudnn.deterministic = False
121
- pipe.unet.set_default_attn_processor()
122
  print("-- swapping scheduler --")
123
  # pipeline.scheduler = heun_scheduler
124
  #pipe.scheduler.set_timesteps(num_inference_steps*.70)
@@ -213,7 +209,7 @@ FTP_PASS = "GoogleBez12!"
213
  def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
214
  # adjust the batch_size of prompt_embeds according to guidance_scale
215
  if step_index == int(pipeline.num_timesteps * 0.1):
216
- print("-- swapping scheduler --")
217
  # pipeline.scheduler = euler_scheduler
218
  torch.set_float32_matmul_precision("high")
219
  # pipe.vae = vae_b
@@ -240,7 +236,7 @@ def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
240
  # pipe.vae = vae_a
241
  # pipe.unet = unet_a
242
  torch.backends.cudnn.deterministic = False
243
- print("-- swapping scheduler --")
244
  # pipeline.scheduler = heun_scheduler
245
  #pipe.scheduler.set_timesteps(num_inference_steps*.70)
246
  # print(f"-- setting step {pipeline.num_timesteps * 0.9} --")
@@ -292,7 +288,7 @@ def generate_30(
292
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
293
  ):
294
  seed = random.randint(0, MAX_SEED)
295
- # generator = torch.Generator(device='cuda').manual_seed(seed)
296
  options = {
297
  "prompt": [prompt],
298
  "negative_prompt": [negative_prompt],
@@ -301,7 +297,7 @@ def generate_30(
301
  "height": height,
302
  "guidance_scale": guidance_scale,
303
  "num_inference_steps": num_inference_steps,
304
- # "generator": generator,
305
  "output_type": "pil",
306
  "callback_on_step_end": pyx.scheduler_swap_callback
307
  }
@@ -343,7 +339,7 @@ def generate_60(
343
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
344
  ):
345
  seed = random.randint(0, MAX_SEED)
346
- # generator = torch.Generator(device='cuda').manual_seed(seed)
347
  options = {
348
  "prompt": [prompt],
349
  "negative_prompt": [negative_prompt],
@@ -352,7 +348,7 @@ def generate_60(
352
  "height": height,
353
  "guidance_scale": guidance_scale,
354
  "num_inference_steps": num_inference_steps,
355
- # "generator": generator,
356
  "output_type": "pil",
357
  "callback_on_step_end": scheduler_swap_callback
358
  }
@@ -384,7 +380,7 @@ def generate_90(
384
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
385
  ):
386
  seed = random.randint(0, MAX_SEED)
387
- # generator = torch.Generator(device='cuda').manual_seed(seed)
388
  options = {
389
  "prompt": [prompt],
390
  "negative_prompt": [negative_prompt],
@@ -393,7 +389,7 @@ def generate_90(
393
  "height": height,
394
  "guidance_scale": guidance_scale,
395
  "num_inference_steps": num_inference_steps,
396
- # "generator": generator,
397
  "output_type": "pil",
398
  "callback_on_step_end": scheduler_swap_callback
399
  }
 
20
  from image_gen_aux import UpscaleWithModel
21
  import torch
22
 
 
 
 
 
23
  torch.backends.cuda.matmul.allow_tf32 = False
24
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
25
  torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
 
114
  # pipe.vae = vae_a
115
  # pipe.unet = unet_a
116
  torch.backends.cudnn.deterministic = False
117
+ #pipe.unet.set_default_attn_processor()
118
  print("-- swapping scheduler --")
119
  # pipeline.scheduler = heun_scheduler
120
  #pipe.scheduler.set_timesteps(num_inference_steps*.70)
 
209
  def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
210
  # adjust the batch_size of prompt_embeds according to guidance_scale
211
  if step_index == int(pipeline.num_timesteps * 0.1):
212
+ print("-- swapping torch modes --")
213
  # pipeline.scheduler = euler_scheduler
214
  torch.set_float32_matmul_precision("high")
215
  # pipe.vae = vae_b
 
236
  # pipe.vae = vae_a
237
  # pipe.unet = unet_a
238
  torch.backends.cudnn.deterministic = False
239
+ print("-- swapping torch modes --")
240
  # pipeline.scheduler = heun_scheduler
241
  #pipe.scheduler.set_timesteps(num_inference_steps*.70)
242
  # print(f"-- setting step {pipeline.num_timesteps * 0.9} --")
 
288
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
289
  ):
290
  seed = random.randint(0, MAX_SEED)
291
+ generator = torch.Generator(device='cuda').manual_seed(seed)
292
  options = {
293
  "prompt": [prompt],
294
  "negative_prompt": [negative_prompt],
 
297
  "height": height,
298
  "guidance_scale": guidance_scale,
299
  "num_inference_steps": num_inference_steps,
300
+ "generator": generator,
301
  "output_type": "pil",
302
  "callback_on_step_end": pyx.scheduler_swap_callback
303
  }
 
339
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
340
  ):
341
  seed = random.randint(0, MAX_SEED)
342
+ generator = torch.Generator(device='cuda').manual_seed(seed)
343
  options = {
344
  "prompt": [prompt],
345
  "negative_prompt": [negative_prompt],
 
348
  "height": height,
349
  "guidance_scale": guidance_scale,
350
  "num_inference_steps": num_inference_steps,
351
+ "generator": generator,
352
  "output_type": "pil",
353
  "callback_on_step_end": scheduler_swap_callback
354
  }
 
380
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
381
  ):
382
  seed = random.randint(0, MAX_SEED)
383
+ generator = torch.Generator(device='cuda').manual_seed(seed)
384
  options = {
385
  "prompt": [prompt],
386
  "negative_prompt": [negative_prompt],
 
389
  "height": height,
390
  "guidance_scale": guidance_scale,
391
  "num_inference_steps": num_inference_steps,
392
+ "generator": generator,
393
  "output_type": "pil",
394
  "callback_on_step_end": scheduler_swap_callback
395
  }