zhangyang-0123 commited on
Commit
d2d3f28
·
1 Parent(s): fa31c55

change layout to lost one model

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -11,8 +11,8 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
11
  model_repo_id = "black-forest-labs/FLUX.1-schnell" # Replace to the model you would like to use
12
  torch_dtype = torch.bfloat16
13
 
14
- pipe = FluxPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
15
- pipe = pipe.to(device)
16
 
17
  # load pruned model
18
  pruned_pipe = FluxPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
@@ -29,14 +29,12 @@ MAX_IMAGE_SIZE = 1024
29
 
30
  @spaces.GPU
31
  def generate_images(prompt, seed, steps):
32
- pipe.to("cuda")
33
- pruned_pipe.to("cuda")
34
  # Run the model and return images directly
35
- g_cpu = torch.Generator("cuda").manual_seed(seed)
36
- original_image = pipe(prompt=prompt, generator=g_cpu, num_inference_steps=steps).images[0]
37
  g_cpu = torch.Generator("cuda").manual_seed(seed)
38
  ecodiff_image = pruned_pipe(prompt=prompt, generator=g_cpu, num_inference_steps=steps).images[0]
39
- return original_image, ecodiff_image
40
 
41
 
42
  examples = [
@@ -56,6 +54,8 @@ css = """
56
 
57
  header = """
58
  # 🌱 EcoDiff Pruned FLUX-Schnell (20% Pruning Ratio)
 
 
59
  """
60
 
61
  header_2 = """
@@ -95,7 +95,7 @@ with gr.Blocks(css=css) as demo:
95
  inputs=[prompt],
96
  )
97
  with gr.Row():
98
- original_output = gr.Image(label="Original Output")
99
  ecodiff_output = gr.Image(label="EcoDiff Output")
100
  gr.on(
101
  triggers=[generate_btn.click, prompt.submit],
@@ -105,7 +105,7 @@ with gr.Blocks(css=css) as demo:
105
  seed,
106
  steps,
107
  ],
108
- outputs=[original_output, ecodiff_output],
109
  )
110
 
111
  if __name__ == "__main__":
 
11
  model_repo_id = "black-forest-labs/FLUX.1-schnell" # Replace to the model you would like to use
12
  torch_dtype = torch.bfloat16
13
 
14
+ # pipe = FluxPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
15
+ # pipe = pipe.to(device)
16
 
17
  # load pruned model
18
  pruned_pipe = FluxPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
 
29
 
30
  @spaces.GPU
31
  def generate_images(prompt, seed, steps):
 
 
32
  # Run the model and return images directly
33
+ # g_cpu = torch.Generator("cuda").manual_seed(seed)
34
+ # original_image = pipe(prompt=prompt, generator=g_cpu, num_inference_steps=steps).images[0]
35
  g_cpu = torch.Generator("cuda").manual_seed(seed)
36
  ecodiff_image = pruned_pipe(prompt=prompt, generator=g_cpu, num_inference_steps=steps).images[0]
37
+ return ecodiff_image
38
 
39
 
40
  examples = [
 
54
 
55
  header = """
56
  # 🌱 EcoDiff Pruned FLUX-Schnell (20% Pruning Ratio)
57
+
58
+ We are not able to host two FLUX models in the same space, so we only show the pruned model here.
59
  """
60
 
61
  header_2 = """
 
95
  inputs=[prompt],
96
  )
97
  with gr.Row():
98
+ # original_output = gr.Image(label="Original Output")
99
  ecodiff_output = gr.Image(label="EcoDiff Output")
100
  gr.on(
101
  triggers=[generate_btn.click, prompt.submit],
 
105
  seed,
106
  steps,
107
  ],
108
+ outputs=[ecodiff_output],
109
  )
110
 
111
  if __name__ == "__main__":