wren93 commited on
Commit
db932ed
·
1 Parent(s): 92813e4
Files changed (1) hide show
  1. app.py +53 -48
app.py CHANGED
@@ -42,7 +42,7 @@ os.makedirs(savedir, exist_ok=True)
42
  # config models
43
  pipeline = ConditionalAnimationPipeline.from_pretrained("TIGER-Lab/ConsistI2V")
44
  pipeline.to("cuda")
45
- # pipeline.to("cuda")
46
 
47
  def update_textbox_and_save_image(input_image, height_slider, width_slider, center_crop):
48
  pil_image = Image.fromarray(input_image.astype(np.uint8)).convert("RGB")
@@ -72,6 +72,7 @@ def update_textbox_and_save_image(input_image, height_slider, width_slider, cent
72
  return gr.Textbox(value=img_path), gr.Image(value=np.array(pil_image))
73
 
74
 
 
75
  def animate(
76
  prompt_textbox,
77
  negative_prompt_textbox,
@@ -136,6 +137,7 @@ def animate(
136
  ])
137
 
138
  first_frame = img_transform(first_frame).unsqueeze(0)
 
139
 
140
  if use_frameinit:
141
  pipeline.init_filter(
@@ -145,20 +147,23 @@ def animate(
145
  filter_params = OmegaConf.create({'method': 'gaussian', 'd_s': 0.25, 'd_t': 0.25,})
146
  )
147
 
148
- sample = run_pipeline(
149
- pipeline,
150
  prompt_textbox,
151
- negative_prompt_textbox,
152
- first_frame,
153
- sample_step_slider,
154
- width_slider,
155
- height_slider,
156
- txt_cfg_scale_slider,
157
- img_cfg_scale_slider,
158
- frame_stride,
159
- use_frameinit,
160
- frame_init_noise_level,
161
- )
 
 
 
 
162
 
163
  global sample_idx
164
  sample_idx += 1
@@ -186,41 +191,41 @@ def animate(
186
  return gr.Video(value=save_sample_path)
187
 
188
 
189
- @spaces.GPU
190
- def run_pipeline(
191
- pipeline,
192
- prompt_textbox,
193
- negative_prompt_textbox,
194
- first_frame,
195
- sample_step_slider,
196
- width_slider,
197
- height_slider,
198
- txt_cfg_scale_slider,
199
- img_cfg_scale_slider,
200
- frame_stride,
201
- use_frameinit,
202
- frame_init_noise_level,
203
 
204
- ):
205
- first_frame = first_frame.to("cuda")
206
- sample = pipeline(
207
- prompt_textbox,
208
- negative_prompt = negative_prompt_textbox,
209
- first_frames = first_frame,
210
- num_inference_steps = sample_step_slider,
211
- guidance_scale_txt = txt_cfg_scale_slider,
212
- guidance_scale_img = img_cfg_scale_slider,
213
- width = width_slider,
214
- height = height_slider,
215
- video_length = 16,
216
- noise_sampling_method = "pyoco_mixed",
217
- noise_alpha = 1.0,
218
- frame_stride = frame_stride,
219
- use_frameinit = use_frameinit,
220
- frameinit_noise_level = frame_init_noise_level,
221
- camera_motion = None,
222
- ).videos
223
- return sample
224
 
225
 
226
  def ui():
 
42
  # config models
43
  pipeline = ConditionalAnimationPipeline.from_pretrained("TIGER-Lab/ConsistI2V")
44
  pipeline.to("cuda")
45
+
46
 
47
  def update_textbox_and_save_image(input_image, height_slider, width_slider, center_crop):
48
  pil_image = Image.fromarray(input_image.astype(np.uint8)).convert("RGB")
 
72
  return gr.Textbox(value=img_path), gr.Image(value=np.array(pil_image))
73
 
74
 
75
+ @spaces.GPU(duration=30)
76
  def animate(
77
  prompt_textbox,
78
  negative_prompt_textbox,
 
137
  ])
138
 
139
  first_frame = img_transform(first_frame).unsqueeze(0)
140
+ first_frame = first_frame.to("cuda")
141
 
142
  if use_frameinit:
143
  pipeline.init_filter(
 
147
  filter_params = OmegaConf.create({'method': 'gaussian', 'd_s': 0.25, 'd_t': 0.25,})
148
  )
149
 
150
+ sample = pipeline(
 
151
  prompt_textbox,
152
+ negative_prompt = negative_prompt_textbox,
153
+ first_frames = first_frame,
154
+ num_inference_steps = sample_step_slider,
155
+ guidance_scale_txt = txt_cfg_scale_slider,
156
+ guidance_scale_img = img_cfg_scale_slider,
157
+ width = width_slider,
158
+ height = height_slider,
159
+ video_length = 16,
160
+ noise_sampling_method = "pyoco_mixed",
161
+ noise_alpha = 1.0,
162
+ frame_stride = frame_stride,
163
+ use_frameinit = use_frameinit,
164
+ frameinit_noise_level = frame_init_noise_level,
165
+ camera_motion = None,
166
+ ).videos
167
 
168
  global sample_idx
169
  sample_idx += 1
 
191
  return gr.Video(value=save_sample_path)
192
 
193
 
194
+ # @spaces.GPU
195
+ # def run_pipeline(
196
+ # pipeline,
197
+ # prompt_textbox,
198
+ # negative_prompt_textbox,
199
+ # first_frame,
200
+ # sample_step_slider,
201
+ # width_slider,
202
+ # height_slider,
203
+ # txt_cfg_scale_slider,
204
+ # img_cfg_scale_slider,
205
+ # frame_stride,
206
+ # use_frameinit,
207
+ # frame_init_noise_level,
208
 
209
+ # ):
210
+ # first_frame = first_frame.to("cuda")
211
+ # sample = pipeline(
212
+ # prompt_textbox,
213
+ # negative_prompt = negative_prompt_textbox,
214
+ # first_frames = first_frame,
215
+ # num_inference_steps = sample_step_slider,
216
+ # guidance_scale_txt = txt_cfg_scale_slider,
217
+ # guidance_scale_img = img_cfg_scale_slider,
218
+ # width = width_slider,
219
+ # height = height_slider,
220
+ # video_length = 16,
221
+ # noise_sampling_method = "pyoco_mixed",
222
+ # noise_alpha = 1.0,
223
+ # frame_stride = frame_stride,
224
+ # use_frameinit = use_frameinit,
225
+ # frameinit_noise_level = frame_init_noise_level,
226
+ # camera_motion = None,
227
+ # ).videos
228
+ # return sample
229
 
230
 
231
  def ui():