John6666 commited on
Commit
d34f363
·
verified ·
1 Parent(s): 053fb4e

Upload 3 files

Browse files
Files changed (3) hide show
  1. dc.py +11 -7
  2. env.py +6 -0
  3. requirements.txt +1 -1
dc.py CHANGED
@@ -326,12 +326,12 @@ class GuiSD:
326
 
327
  print("Loading model...")
328
  self.model = Model_Diffusers(
329
- base_model_id="cagliostrolab/animagine-xl-3.1",
330
  task_name="txt2img",
331
  vae_model=None,
332
  type_model_precision=torch.float16,
333
  retain_task_model_in_cache=False,
334
- #device="cpu",
335
  )
336
  self.model.device = torch.device("cpu") #
337
 
@@ -367,12 +367,13 @@ class GuiSD:
367
  model_name,
368
  task_name=task_stablepy[task],
369
  vae_model=vae_model if vae_model != "None" else None,
370
- type_model_precision=torch.float16,
371
  retain_task_model_in_cache=False,
372
  )
373
  yield f"Model loaded: {model_name}"
374
 
375
  @spaces.GPU
 
376
  def generate_pipeline(
377
  self,
378
  prompt,
@@ -509,7 +510,7 @@ class GuiSD:
509
  vae_model = None
510
 
511
  for la in loras_list:
512
- if la is not None and la != "None" and la in lora_model_list:
513
  print(la)
514
  lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
515
  if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
@@ -539,8 +540,9 @@ class GuiSD:
539
  params_ip_mode.append(modeip)
540
  params_ip_scale.append(scaleip)
541
 
 
 
542
  # First load
543
- self.model.device = torch.device("cuda:0")
544
  model_precision = torch.float16
545
  if not self.model:
546
  print("Loading model...")
@@ -695,8 +697,10 @@ class GuiSD:
695
  "ip_adapter_scale": params_ip_scale,
696
  }
697
 
698
- # Maybe fix lora issue: 'Cannot copy out of meta tensor; no data!''
699
- #self.model.pipe.to("cuda:0" if torch.cuda.is_available() else "cpu")
 
 
700
 
701
  progress(1, desc="Inference preparation completed. Starting inference...")
702
 
 
326
 
327
  print("Loading model...")
328
  self.model = Model_Diffusers(
329
+ base_model_id="Lykon/dreamshaper-8",
330
  task_name="txt2img",
331
  vae_model=None,
332
  type_model_precision=torch.float16,
333
  retain_task_model_in_cache=False,
334
+ device="cpu",
335
  )
336
  self.model.device = torch.device("cpu") #
337
 
 
367
  model_name,
368
  task_name=task_stablepy[task],
369
  vae_model=vae_model if vae_model != "None" else None,
370
+ type_model_precision=torch.float16 if "flux" not in model_name.lower() else torch.bfloat16,
371
  retain_task_model_in_cache=False,
372
  )
373
  yield f"Model loaded: {model_name}"
374
 
375
  @spaces.GPU
376
+ @torch.inference_mode()
377
  def generate_pipeline(
378
  self,
379
  prompt,
 
510
  vae_model = None
511
 
512
  for la in loras_list:
513
+ if la is not None and la != "None" and la != "" and la in lora_model_list:
514
  print(la)
515
  lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
516
  if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
 
540
  params_ip_mode.append(modeip)
541
  params_ip_scale.append(scaleip)
542
 
543
+ model_precision = torch.float16 if "flux" not in model_name.lower() else torch.bfloat16
544
+
545
  # First load
 
546
  model_precision = torch.float16
547
  if not self.model:
548
  print("Loading model...")
 
697
  "ip_adapter_scale": params_ip_scale,
698
  }
699
 
700
+ self.model.device = torch.device("cuda:0")
701
+ if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * 5:
702
+ self.model.pipe.transformer.to(self.model.device)
703
+ print("transformer to cuda")
704
 
705
  progress(1, desc="Inference preparation completed. Starting inference...")
706
 
env.py CHANGED
@@ -94,6 +94,12 @@ load_diffusers_format_model = [
94
  'Eugeoter/artiwaifu-diffusion-2.0',
95
  'Raelina/Rae-Diffusion-XL-V2',
96
  'Raelina/Raemu-XL-V4',
 
 
 
 
 
 
97
  ]
98
 
99
  # List all Models for specified user
 
94
  'Eugeoter/artiwaifu-diffusion-2.0',
95
  'Raelina/Rae-Diffusion-XL-V2',
96
  'Raelina/Raemu-XL-V4',
97
+ "camenduru/FLUX.1-dev-diffusers",
98
+ "black-forest-labs/FLUX.1-schnell",
99
+ "sayakpaul/FLUX.1-merged",
100
+ "ostris/OpenFLUX.1",
101
+ "multimodalart/FLUX.1-dev2pro-full",
102
+ "Raelina/Raemu-Flux",
103
  ]
104
 
105
  # List all Models for specified user
requirements.txt CHANGED
@@ -4,7 +4,7 @@ diffusers
4
  invisible_watermark
5
  transformers
6
  xformers
7
- git+https://github.com/R3gm/stablepy.git
8
  torch==2.2.0
9
  gdown
10
  opencv-python
 
4
  invisible_watermark
5
  transformers
6
  xformers
7
+ git+https://github.com/R3gm/stablepy.git@flux_beta
8
  torch==2.2.0
9
  gdown
10
  opencv-python