ford442 commited on
Commit
3338fd0
·
verified ·
1 Parent(s): 3739ae6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -19
app.py CHANGED
@@ -25,7 +25,6 @@ import datetime
25
  from gradio import themes
26
  from hidiffusion import apply_hidiffusion, remove_hidiffusion
27
  import gc
28
- from diffusers.utils.torch_utils import randn_tensor
29
 
30
  torch.backends.cuda.matmul.allow_tf32 = False
31
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
@@ -149,7 +148,7 @@ def load_and_prepare_model(model_id):
149
  #pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
150
  #pipe.scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
151
 
152
- pipe.to(device=torch.device('cuda'), dtype=torch.bfloat16)
153
  #pipe.to(torch.bfloat16)
154
 
155
  #apply_hidiffusion(pipe)
@@ -252,9 +251,7 @@ def generate_30(
252
  #if juggernaut == True:
253
  # pipe.vae=vaeX
254
  seed = int(randomize_seed_fn(seed, randomize_seed))
255
- generator = torch.Generator(device=torch.device('cuda')).manual_seed(seed)
256
- latent_size = int(height / 8)
257
- latent_input = randn_tensor(shape=(1,4,latent_size,latent_size),generator=generator, device=torch.device('cuda'), dtype=torch.bfloat16)
258
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
259
  options = {
260
  "prompt": [prompt],
@@ -268,12 +265,11 @@ def generate_30(
268
  "generator": generator,
269
  # "timesteps": sampling_schedule,
270
  "output_type": "pil",
271
- "latents": latent_input
272
  }
273
  if use_resolution_binning:
274
  options["use_resolution_binning"] = True
275
  images = []
276
- pipe.scheduler.set_timesteps(num_inference_steps,torch.device('cuda'))
277
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
278
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
279
  batch_options = options.copy()
@@ -306,7 +302,7 @@ def generate_60(
306
  denoise: float = 0.3,
307
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
308
  ):
309
- torch.backends.cudnn.benchmark = False
310
  torch.cuda.empty_cache()
311
  gc.collect()
312
  global models
@@ -314,9 +310,7 @@ def generate_60(
314
  #if juggernaut == True:
315
  # pipe.vae=vaeX
316
  seed = int(randomize_seed_fn(seed, randomize_seed))
317
- generator = torch.Generator(device=torch.device('cuda')).manual_seed(seed)
318
- latent_size = int(height / 8)
319
- latent_input = randn_tensor(shape=(1,4,latent_size,latent_size),generator=generator, device=torch.device('cuda'), dtype=torch.bfloat16)
320
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
321
  options = {
322
  "prompt": [prompt],
@@ -330,12 +324,11 @@ def generate_60(
330
  "generator": generator,
331
  # "timesteps": sampling_schedule,
332
  "output_type": "pil",
333
- "latents": latent_input
334
  }
335
  if use_resolution_binning:
336
  options["use_resolution_binning"] = True
337
  images = []
338
- pipe.scheduler.set_timesteps(num_inference_steps,torch.device('cuda'))
339
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
340
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
341
  batch_options = options.copy()
@@ -368,7 +361,7 @@ def generate_90(
368
  denoise: float = 0.3,
369
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
370
  ):
371
- torch.backends.cudnn.benchmark = False
372
  torch.cuda.empty_cache()
373
  gc.collect()
374
  global models
@@ -376,9 +369,7 @@ def generate_90(
376
  #if juggernaut == True:
377
  # pipe.vae=vaeX
378
  seed = int(randomize_seed_fn(seed, randomize_seed))
379
- generator = torch.Generator(device=torch.device('cuda')).manual_seed(seed)
380
- latent_size = int(height / 8)
381
- latent_input = randn_tensor(shape=(1,4,latent_size,latent_size),generator=generator, device=torch.device('cuda'), dtype=torch.bfloat16)
382
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
383
  options = {
384
  "prompt": [prompt],
@@ -392,12 +383,11 @@ def generate_90(
392
  "generator": generator,
393
  # "timesteps": sampling_schedule,
394
  "output_type": "pil",
395
- "latents": latent_input
396
  }
397
  if use_resolution_binning:
398
  options["use_resolution_binning"] = True
399
  images = []
400
- pipe.scheduler.set_timesteps(num_inference_steps,torch.device('cuda'))
401
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
402
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
403
  batch_options = options.copy()
 
25
  from gradio import themes
26
  from hidiffusion import apply_hidiffusion, remove_hidiffusion
27
  import gc
 
28
 
29
  torch.backends.cuda.matmul.allow_tf32 = False
30
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
 
148
  #pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
149
  #pipe.scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
150
 
151
+ pipe.to(device=device, dtype=torch.bfloat16)
152
  #pipe.to(torch.bfloat16)
153
 
154
  #apply_hidiffusion(pipe)
 
251
  #if juggernaut == True:
252
  # pipe.vae=vaeX
253
  seed = int(randomize_seed_fn(seed, randomize_seed))
254
+ generator = torch.Generator(device='cuda').manual_seed(seed)
 
 
255
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
256
  options = {
257
  "prompt": [prompt],
 
265
  "generator": generator,
266
  # "timesteps": sampling_schedule,
267
  "output_type": "pil",
 
268
  }
269
  if use_resolution_binning:
270
  options["use_resolution_binning"] = True
271
  images = []
272
+ pipe.scheduler.set_timesteps(num_inference_steps,device)
273
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
274
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
275
  batch_options = options.copy()
 
302
  denoise: float = 0.3,
303
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
304
  ):
305
+ torch.backends.cudnn.benchmark = True
306
  torch.cuda.empty_cache()
307
  gc.collect()
308
  global models
 
310
  #if juggernaut == True:
311
  # pipe.vae=vaeX
312
  seed = int(randomize_seed_fn(seed, randomize_seed))
313
+ generator = torch.Generator(device='cuda').manual_seed(seed)
 
 
314
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
315
  options = {
316
  "prompt": [prompt],
 
324
  "generator": generator,
325
  # "timesteps": sampling_schedule,
326
  "output_type": "pil",
 
327
  }
328
  if use_resolution_binning:
329
  options["use_resolution_binning"] = True
330
  images = []
331
+ pipe.scheduler.set_timesteps(num_inference_steps,device)
332
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
333
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
334
  batch_options = options.copy()
 
361
  denoise: float = 0.3,
362
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
363
  ):
364
+ torch.backends.cudnn.benchmark = True
365
  torch.cuda.empty_cache()
366
  gc.collect()
367
  global models
 
369
  #if juggernaut == True:
370
  # pipe.vae=vaeX
371
  seed = int(randomize_seed_fn(seed, randomize_seed))
372
+ generator = torch.Generator(device='cuda').manual_seed(seed)
 
 
373
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
374
  options = {
375
  "prompt": [prompt],
 
383
  "generator": generator,
384
  # "timesteps": sampling_schedule,
385
  "output_type": "pil",
 
386
  }
387
  if use_resolution_binning:
388
  options["use_resolution_binning"] = True
389
  images = []
390
+ pipe.scheduler.set_timesteps(num_inference_steps,device)
391
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
392
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
393
  batch_options = options.copy()