NightRaven109 commited on
Commit
77f9404
·
verified ·
1 Parent(s): b000100

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -36
app.py CHANGED
@@ -18,7 +18,6 @@ class Args:
18
  def __init__(self, **kwargs):
19
  self.__dict__.update(kwargs)
20
 
21
- # Initialize models at startup
22
  @spaces.GPU
23
  def initialize_models():
24
  global pipeline, generator, accelerator
@@ -50,27 +49,24 @@ def initialize_models():
50
  # Load pipeline
51
  pipeline = load_pipeline(args, accelerator, enable_xformers_memory_efficient_attention=False)
52
 
53
- # Ensure all models are in eval mode and on CUDA
54
- pipeline = pipeline.to("cuda")
55
  pipeline.unet.eval()
56
  pipeline.controlnet.eval()
57
  pipeline.vae.eval()
58
  pipeline.text_encoder.eval()
59
 
 
 
 
60
  # Initialize generator
61
  generator = torch.Generator("cuda")
62
 
63
- print("Models initialized and ready!")
64
  return True
65
 
66
  except Exception as e:
67
  print(f"Error initializing models: {str(e)}")
68
  return False
69
 
70
- # Load models at module level
71
- print("Initializing models...")
72
- initialize_models()
73
-
74
  @spaces.GPU(processing_timeout=180)
75
  def process_image(
76
  input_image,
@@ -83,16 +79,13 @@ def process_image(
83
  upscale_factor=4,
84
  color_fix_method="adain"
85
  ):
86
- global pipeline, generator
87
 
88
  try:
89
- # Handle seed
90
- if seed is not None and seed != 0: # Only set seed if it's provided and not 0
91
- if generator is None:
92
- generator = torch.Generator("cuda")
93
- generator.manual_seed(seed)
94
- elif generator is None:
95
- generator = torch.Generator("cuda")
96
 
97
  # Create args object with all necessary parameters
98
  args = Args(
@@ -112,10 +105,14 @@ def process_image(
112
  tile_diffusion_stride=None,
113
  start_steps=999,
114
  start_point='lr',
115
- use_vae_encode_condition=True,
116
  sample_times=1
117
  )
118
 
 
 
 
 
119
  # Process input image
120
  validation_image = Image.fromarray(input_image)
121
  ori_width, ori_height = validation_image.size
@@ -131,27 +128,42 @@ def process_image(
131
  validation_image = validation_image.resize((validation_image.size[0]//8*8, validation_image.size[1]//8*8))
132
  width, height = validation_image.size
133
 
 
 
 
 
 
 
 
134
  # Generate image
135
  with torch.no_grad():
136
- inference_time, output = pipeline(
137
- args.t_max,
138
- args.t_min,
139
- args.tile_diffusion,
140
- args.tile_diffusion_size,
141
- args.tile_diffusion_stride,
142
- args.added_prompt,
143
- validation_image,
144
- num_inference_steps=args.num_inference_steps,
145
- generator=generator,
146
- height=height,
147
- width=width,
148
- guidance_scale=args.guidance_scale,
149
- negative_prompt=args.negative_prompt,
150
- conditioning_scale=args.conditioning_scale,
151
- start_steps=args.start_steps,
152
- start_point=args.start_point,
153
- use_vae_encode_condition=True,
154
- )
 
 
 
 
 
 
 
 
155
 
156
  image = output.images[0]
157
 
@@ -172,6 +184,7 @@ def process_image(
172
  traceback.print_exc()
173
  return None
174
 
 
175
  # Define default values
176
  DEFAULT_VALUES = {
177
  "prompt": "clean, texture, high-resolution, 8k",
 
18
  def __init__(self, **kwargs):
19
  self.__dict__.update(kwargs)
20
 
 
21
  @spaces.GPU
22
  def initialize_models():
23
  global pipeline, generator, accelerator
 
49
  # Load pipeline
50
  pipeline = load_pipeline(args, accelerator, enable_xformers_memory_efficient_attention=False)
51
 
52
+ # Ensure all models are in eval mode
 
53
  pipeline.unet.eval()
54
  pipeline.controlnet.eval()
55
  pipeline.vae.eval()
56
  pipeline.text_encoder.eval()
57
 
58
+ # Move pipeline to CUDA
59
+ pipeline = pipeline.to("cuda")
60
+
61
  # Initialize generator
62
  generator = torch.Generator("cuda")
63
 
 
64
  return True
65
 
66
  except Exception as e:
67
  print(f"Error initializing models: {str(e)}")
68
  return False
69
 
 
 
 
 
70
  @spaces.GPU(processing_timeout=180)
71
  def process_image(
72
  input_image,
 
79
  upscale_factor=4,
80
  color_fix_method="adain"
81
  ):
82
+ global pipeline, generator, accelerator
83
 
84
  try:
85
+ # Initialize models if not already done
86
+ if pipeline is None:
87
+ if not initialize_models():
88
+ return None
 
 
 
89
 
90
  # Create args object with all necessary parameters
91
  args = Args(
 
105
  tile_diffusion_stride=None,
106
  start_steps=999,
107
  start_point='lr',
108
+ use_vae_encode_condition=True, # Changed to True
109
  sample_times=1
110
  )
111
 
112
+ # Set seed if provided
113
+ if seed is not None:
114
+ generator.manual_seed(seed)
115
+
116
  # Process input image
117
  validation_image = Image.fromarray(input_image)
118
  ori_width, ori_height = validation_image.size
 
128
  validation_image = validation_image.resize((validation_image.size[0]//8*8, validation_image.size[1]//8*8))
129
  width, height = validation_image.size
130
 
131
+ # Ensure pipeline is on CUDA and in eval mode
132
+ pipeline = pipeline.to("cuda")
133
+ pipeline.unet.eval()
134
+ pipeline.controlnet.eval()
135
+ pipeline.vae.eval()
136
+ pipeline.text_encoder.eval()
137
+
138
  # Generate image
139
  with torch.no_grad():
140
+ try:
141
+ # First encode the image with VAE
142
+ image_tensor = pipeline.image_processor.preprocess(validation_image)
143
+ image_tensor = image_tensor.unsqueeze(0).to(device="cuda", dtype=torch.float32)
144
+
145
+ inference_time, output = pipeline(
146
+ args.t_max,
147
+ args.t_min,
148
+ args.tile_diffusion,
149
+ args.tile_diffusion_size,
150
+ args.tile_diffusion_stride,
151
+ args.added_prompt,
152
+ validation_image,
153
+ num_inference_steps=args.num_inference_steps,
154
+ generator=generator,
155
+ height=height,
156
+ width=width,
157
+ guidance_scale=args.guidance_scale,
158
+ negative_prompt=args.negative_prompt,
159
+ conditioning_scale=args.conditioning_scale,
160
+ start_steps=args.start_steps,
161
+ start_point=args.start_point,
162
+ use_vae_encode_condition=True, # Set to True
163
+ )
164
+ except Exception as e:
165
+ print(f"Pipeline execution error: {str(e)}")
166
+ raise
167
 
168
  image = output.images[0]
169
 
 
184
  traceback.print_exc()
185
  return None
186
 
187
+
188
  # Define default values
189
  DEFAULT_VALUES = {
190
  "prompt": "clean, texture, high-resolution, 8k",