mokady commited on
Commit
3b994bf
·
verified ·
1 Parent(s): 73aa562

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -23
app.py CHANGED
@@ -25,29 +25,29 @@ del vae
25
  pipe.force_zeros_for_empty_prompt = False
26
  negative_prompt= "Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers"
27
 
28
- print("Optimizing BRIA 2.2 HD - this could take a while")
29
- t=time.time()
30
- pipe.unet = torch.compile(
31
- pipe.unet, mode="reduce-overhead", fullgraph=True # 600 secs compilation
32
- )
33
- with torch.no_grad():
34
- outputs = pipe(
35
- prompt="an apple",
36
- num_inference_steps=30,
37
- width=1536,
38
- height=1536,
39
- negative_prompt=negative_prompt
40
- )
41
-
42
- # This will avoid future compilations on different shapes
43
- unet_compiled = torch._dynamo.run(pipe.unet)
44
- unet_compiled.config=pipe.unet.config
45
- unet_compiled.add_embedding = Dummy()
46
- unet_compiled.add_embedding.linear_1 = Dummy()
47
- unet_compiled.add_embedding.linear_1.in_features = pipe.unet.add_embedding.linear_1.in_features
48
- pipe.unet = unet_compiled
49
-
50
- print(f"Optimizing finished successfully after {time.time()-t} secs")
51
 
52
  @spaces.GPU(enable_queue=True)
53
  def infer(prompt,seed,resolution):
 
25
  pipe.force_zeros_for_empty_prompt = False
26
  negative_prompt= "Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers"
27
 
28
+ # print("Optimizing BRIA 2.2 HD - this could take a while")
29
+ # t=time.time()
30
+ # pipe.unet = torch.compile(
31
+ # pipe.unet, mode="reduce-overhead", fullgraph=True # 600 secs compilation
32
+ # )
33
+ # with torch.no_grad():
34
+ # outputs = pipe(
35
+ # prompt="an apple",
36
+ # num_inference_steps=30,
37
+ # width=1536,
38
+ # height=1536,
39
+ # negative_prompt=negative_prompt
40
+ # )
41
+
42
+ # # This will avoid future compilations on different shapes
43
+ # unet_compiled = torch._dynamo.run(pipe.unet)
44
+ # unet_compiled.config=pipe.unet.config
45
+ # unet_compiled.add_embedding = Dummy()
46
+ # unet_compiled.add_embedding.linear_1 = Dummy()
47
+ # unet_compiled.add_embedding.linear_1.in_features = pipe.unet.add_embedding.linear_1.in_features
48
+ # pipe.unet = unet_compiled
49
+
50
+ # print(f"Optimizing finished successfully after {time.time()-t} secs")
51
 
52
  @spaces.GPU(enable_queue=True)
53
  def infer(prompt,seed,resolution):