OmPrakashSingh1704 commited on
Commit
af4a9f2
·
1 Parent(s): 25ef180
options/Video_model/Model.py CHANGED
@@ -53,9 +53,21 @@ def Video(
53
  base_count = len(glob(os.path.join(output_folder, "*.mp4")))
54
  video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
55
 
56
- # Use float32 for image processing to avoid BFloat16 errors
57
- image = image.convert("RGB") # Ensure image is in RGB format
58
- with torch.autocast(device, dtype=torch.float32):
 
 
 
 
 
 
 
 
 
 
 
 
59
  frames = pipeline(
60
  image, height=height, width=width,
61
  num_inference_steps=num_inference_steps,
 
53
  base_count = len(glob(os.path.join(output_folder, "*.mp4")))
54
  video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
55
 
56
+ # Only use autocast if on CUDA, otherwise run without it on CPU
57
+ if device == "cuda":
58
+ with torch.autocast(device_type='cuda', dtype=torch.float16):
59
+ frames = pipeline(
60
+ image, height=height, width=width,
61
+ num_inference_steps=num_inference_steps,
62
+ min_guidance_scale=min_guidance_scale,
63
+ max_guidance_scale=max_guidance_scale,
64
+ num_frames=num_frames, fps=fps, motion_bucket_id=motion_bucket_id,
65
+ decode_chunk_size=8,
66
+ noise_aug_strength=0.02,
67
+ generator=generator,
68
+ ).frames[0]
69
+ else:
70
+ # No autocast for CPU since it doesn't support float32 in autocast
71
  frames = pipeline(
72
  image, height=height, width=width,
73
  num_inference_steps=num_inference_steps,
options/Video_model/__pycache__/Model.cpython-310.pyc CHANGED
Binary files a/options/Video_model/__pycache__/Model.cpython-310.pyc and b/options/Video_model/__pycache__/Model.cpython-310.pyc differ