wangfuyun commited on
Commit
7b06b6a
·
verified ·
1 Parent(s): fd6090f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -56
app.py CHANGED
@@ -152,9 +152,6 @@ class AnimateController:
152
 
153
  def animate(
154
  self,
155
- stable_diffusion_dropdown,
156
- motion_module_dropdown,
157
- base_model_dropdown,
158
  lora_alpha_slider,
159
  spatial_lora_slider,
160
  prompt_textbox,
@@ -167,12 +164,6 @@ class AnimateController:
167
  cfg_scale_slider,
168
  seed_textbox
169
  ):
170
- if self.unet is None:
171
- raise gr.Error(f"Please select a pretrained model path.")
172
- if motion_module_dropdown == "":
173
- raise gr.Error(f"Please select a motion module.")
174
- if base_model_dropdown == "":
175
- raise gr.Error(f"Please select a base DreamBooth model.")
176
 
177
  if is_xformers_available():
178
  self.unet.enable_xformers_memory_efficient_attention()
@@ -232,6 +223,10 @@ class AnimateController:
232
 
233
  controller = AnimateController()
234
 
 
 
 
 
235
 
236
  def ui():
237
  with gr.Blocks(css=css) as demo:
@@ -241,54 +236,21 @@ def ui():
241
  Fu-Yun Wang, Zhaoyang Huang (*Corresponding Author), Xiaoyu Shi, Weikang Bian, Guanglu Song, Yu Liu, Hongsheng Li (*Corresponding Author)<br>
242
  [arXiv Report](https://arxiv.org/abs/2402.00769) | [Project Page](https://animatelcm.github.io/) | [Github](https://github.com/G-U-N/AnimateLCM) | [Civitai](https://civitai.com/models/290375/animatelcm-fast-video-generation) | [Replicate](https://replicate.com/camenduru/animate-lcm)
243
  """
 
 
 
 
 
 
244
  )
245
  with gr.Column(variant="panel"):
246
- gr.Markdown(
247
- """
248
- ### 1. Model checkpoints (select pretrained model path first).
249
- """
250
- )
251
  with gr.Row():
252
- stable_diffusion_dropdown = gr.Dropdown(
253
- label="Pretrained Model Path",
254
- choices=controller.stable_diffusion_list,
255
- # value="models/StableDiffusion/stable-diffusion-v1-5",
256
- interactive=True,
257
- )
258
- stable_diffusion_dropdown.change(fn=controller.update_stable_diffusion, inputs=[
259
- stable_diffusion_dropdown], outputs=[stable_diffusion_dropdown])
260
-
261
- stable_diffusion_refresh_button = gr.Button(
262
- value="\U0001F503", elem_classes="toolbutton")
263
-
264
- def update_stable_diffusion():
265
- controller.refresh_stable_diffusion()
266
- return gr.Dropdown.update(choices=controller.stable_diffusion_list)
267
- stable_diffusion_refresh_button.click(
268
- fn=update_stable_diffusion, inputs=[], outputs=[stable_diffusion_dropdown])
269
-
270
- with gr.Row():
271
- motion_module_dropdown = gr.Dropdown(
272
- label="Select motion module",
273
- choices=controller.motion_module_list,
274
- interactive=True,
275
- )
276
- motion_module_dropdown.change(fn=controller.update_motion_module, inputs=[
277
- motion_module_dropdown], outputs=[motion_module_dropdown])
278
-
279
- motion_module_refresh_button = gr.Button(
280
- value="\U0001F503", elem_classes="toolbutton")
281
-
282
- def update_motion_module():
283
- controller.refresh_motion_module()
284
- return gr.Dropdown.update(choices=controller.motion_module_list)
285
- motion_module_refresh_button.click(
286
- fn=update_motion_module, inputs=[], outputs=[motion_module_dropdown])
287
 
288
  base_model_dropdown = gr.Dropdown(
289
  label="Select base Dreambooth model (required)",
290
  choices=controller.personalized_model_list,
291
  interactive=True,
 
292
  )
293
  base_model_dropdown.change(fn=controller.update_base_model, inputs=[
294
  base_model_dropdown], outputs=[base_model_dropdown])
@@ -328,9 +290,9 @@ def ui():
328
  """
329
  )
330
 
331
- prompt_textbox = gr.Textbox(label="Prompt", lines=2)
332
  negative_prompt_textbox = gr.Textbox(
333
- label="Negative prompt", lines=2)
334
 
335
  with gr.Row().style(equal_height=False):
336
  with gr.Column():
@@ -338,7 +300,7 @@ def ui():
338
  sampler_dropdown = gr.Dropdown(label="Sampling method", choices=list(
339
  scheduler_dict.keys()), value=list(scheduler_dict.keys())[0])
340
  sample_step_slider = gr.Slider(
341
- label="Sampling steps", value=4, minimum=1, maximum=25, step=1)
342
 
343
  width_slider = gr.Slider(
344
  label="Width", value=512, minimum=256, maximum=1024, step=64)
@@ -347,7 +309,7 @@ def ui():
347
  length_slider = gr.Slider(
348
  label="Animation length", value=16, minimum=12, maximum=20, step=1)
349
  cfg_scale_slider = gr.Slider(
350
- label="CFG Scale", value=1, minimum=1, maximum=2)
351
 
352
  with gr.Row():
353
  seed_textbox = gr.Textbox(label="Seed", value=-1)
@@ -365,9 +327,6 @@ def ui():
365
  generate_button.click(
366
  fn=controller.animate,
367
  inputs=[
368
- stable_diffusion_dropdown,
369
- motion_module_dropdown,
370
- base_model_dropdown,
371
  lora_alpha_slider,
372
  spatial_lora_slider,
373
  prompt_textbox,
@@ -386,6 +345,7 @@ def ui():
386
  return demo
387
 
388
 
 
389
  if __name__ == "__main__":
390
  demo = ui()
391
  # gr.close_all()
 
152
 
153
  def animate(
154
  self,
 
 
 
155
  lora_alpha_slider,
156
  spatial_lora_slider,
157
  prompt_textbox,
 
164
  cfg_scale_slider,
165
  seed_textbox
166
  ):
 
 
 
 
 
 
167
 
168
  if is_xformers_available():
169
  self.unet.enable_xformers_memory_efficient_attention()
 
223
 
224
  controller = AnimateController()
225
 
226
+ controller.update_stable_diffusion("stable-diffusion-v1-5")
227
+ controller.update_motion_module("sd15_t2v_beta_motion.ckpt")
228
+ controller.update_base_model("realistic2.safetensors")
229
+
230
 
231
  def ui():
232
  with gr.Blocks(css=css) as demo:
 
236
  Fu-Yun Wang, Zhaoyang Huang (*Corresponding Author), Xiaoyu Shi, Weikang Bian, Guanglu Song, Yu Liu, Hongsheng Li (*Corresponding Author)<br>
237
  [arXiv Report](https://arxiv.org/abs/2402.00769) | [Project Page](https://animatelcm.github.io/) | [Github](https://github.com/G-U-N/AnimateLCM) | [Civitai](https://civitai.com/models/290375/animatelcm-fast-video-generation) | [Replicate](https://replicate.com/camenduru/animate-lcm)
238
  """
239
+
240
+ '''
241
+ Important Notes:
242
+ 1. The generation speed is around 1~2 seconds. There is delay in the space.
243
+ 2. Increase the sampling step and cfg if you want more fancy videos.
244
+ '''
245
  )
246
  with gr.Column(variant="panel"):
 
 
 
 
 
247
  with gr.Row():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
 
249
  base_model_dropdown = gr.Dropdown(
250
  label="Select base Dreambooth model (required)",
251
  choices=controller.personalized_model_list,
252
  interactive=True,
253
+ value="realistic2.safetensors"
254
  )
255
  base_model_dropdown.change(fn=controller.update_base_model, inputs=[
256
  base_model_dropdown], outputs=[base_model_dropdown])
 
290
  """
291
  )
292
 
293
+ prompt_textbox = gr.Textbox(label="Prompt", lines=2, value="a boy holding a rabbit")
294
  negative_prompt_textbox = gr.Textbox(
295
+ label="Negative prompt", lines=2, value="bad quality")
296
 
297
  with gr.Row().style(equal_height=False):
298
  with gr.Column():
 
300
  sampler_dropdown = gr.Dropdown(label="Sampling method", choices=list(
301
  scheduler_dict.keys()), value=list(scheduler_dict.keys())[0])
302
  sample_step_slider = gr.Slider(
303
+ label="Sampling steps", value=6, minimum=1, maximum=25, step=1)
304
 
305
  width_slider = gr.Slider(
306
  label="Width", value=512, minimum=256, maximum=1024, step=64)
 
309
  length_slider = gr.Slider(
310
  label="Animation length", value=16, minimum=12, maximum=20, step=1)
311
  cfg_scale_slider = gr.Slider(
312
+ label="CFG Scale", value=1.5, minimum=1, maximum=2)
313
 
314
  with gr.Row():
315
  seed_textbox = gr.Textbox(label="Seed", value=-1)
 
327
  generate_button.click(
328
  fn=controller.animate,
329
  inputs=[
 
 
 
330
  lora_alpha_slider,
331
  spatial_lora_slider,
332
  prompt_textbox,
 
345
  return demo
346
 
347
 
348
+
349
  if __name__ == "__main__":
350
  demo = ui()
351
  # gr.close_all()