pablovela5620 commited on
Commit
ced891c
·
verified ·
1 Parent(s): 870c029

Upload gradio_app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. gradio_app.py +51 -48
gradio_app.py CHANGED
@@ -207,55 +207,58 @@ def gradio_warped_image(
207
  lambda_ts: Float64[torch.Tensor, "n b"] = load_lambda_ts(num_denoise_iters)
208
  progress(0.15, desc="Starting diffusion")
209
 
210
- # frames: list[PIL.Image.Image] = svd_render(
211
- # image_o=rgb_resized,
212
- # masks=masks,
213
- # cond_image=cond_image,
214
- # lambda_ts=lambda_ts,
215
- # num_denoise_iters=num_denoise_iters,
216
- # weight_clamp=0.2,
217
- # svd_pipe=SVD_PIPE,
218
- # )
219
-
220
- # to allow logging from a separate thread
221
- log_queue: Queue = Queue()
222
- handle = threading.Thread(
223
- target=svd_render_threaded,
224
- kwargs={
225
- "image_o": rgb_resized,
226
- "masks": masks,
227
- "cond_image": cond_image,
228
- "lambda_ts": lambda_ts,
229
- "num_denoise_iters": num_denoise_iters,
230
- "weight_clamp": 0.2,
231
- "svd_pipe": SVD_PIPE,
232
- "log_queue": log_queue,
233
- },
234
- )
 
235
 
236
- handle.start()
237
- i = 0
238
- while True:
239
- msg = log_queue.get()
240
- match msg:
241
- case frames if all(isinstance(frame, PIL.Image.Image) for frame in frames):
242
- break
243
- case entity_path, entity, times:
244
- i += 1
245
- rr.reset_time()
246
- for timeline, time in times:
247
- if isinstance(time, int):
248
- rr.set_time_sequence(timeline, time)
249
- else:
250
- rr.set_time_seconds(timeline, time)
251
- static = False
252
- if entity_path == "latents":
253
- static = True
254
- rr.log(entity_path, entity, static=static)
255
- yield stream.read(), None, [], f"{i} out of {num_denoise_iters}"
256
- case _:
257
- assert False
258
- handle.join()
 
 
259
 
260
  # all frames but the first one
261
  frame: np.ndarray
 
207
  lambda_ts: Float64[torch.Tensor, "n b"] = load_lambda_ts(num_denoise_iters)
208
  progress(0.15, desc="Starting diffusion")
209
 
210
+ if IN_SPACES:
211
+ frames: list[PIL.Image.Image] = svd_render(
212
+ image_o=rgb_resized,
213
+ masks=masks,
214
+ cond_image=cond_image,
215
+ lambda_ts=lambda_ts,
216
+ num_denoise_iters=num_denoise_iters,
217
+ weight_clamp=0.2,
218
+ svd_pipe=SVD_PIPE,
219
+ )
220
+ else:
221
+ # to allow logging from a separate thread
222
+ log_queue: Queue = Queue()
223
+ handle = threading.Thread(
224
+ target=svd_render_threaded,
225
+ kwargs={
226
+ "image_o": rgb_resized,
227
+ "masks": masks,
228
+ "cond_image": cond_image,
229
+ "lambda_ts": lambda_ts,
230
+ "num_denoise_iters": num_denoise_iters,
231
+ "weight_clamp": 0.2,
232
+ "svd_pipe": SVD_PIPE,
233
+ "log_queue": log_queue,
234
+ },
235
+ )
236
 
237
+ handle.start()
238
+ i = 0
239
+ while True:
240
+ msg = log_queue.get()
241
+ match msg:
242
+ case frames if all(
243
+ isinstance(frame, PIL.Image.Image) for frame in frames
244
+ ):
245
+ break
246
+ case entity_path, entity, times:
247
+ i += 1
248
+ rr.reset_time()
249
+ for timeline, time in times:
250
+ if isinstance(time, int):
251
+ rr.set_time_sequence(timeline, time)
252
+ else:
253
+ rr.set_time_seconds(timeline, time)
254
+ static = False
255
+ if entity_path == "latents":
256
+ static = True
257
+ rr.log(entity_path, entity, static=static)
258
+ yield stream.read(), None, [], f"{i} out of {num_denoise_iters}"
259
+ case _:
260
+ assert False
261
+ handle.join()
262
 
263
  # all frames but the first one
264
  frame: np.ndarray