from typing import Dict, List, Any import torch from transformers import LlavaNextVideoForConditionalGeneration, LlavaNextVideoProcessor class EndpointHandler: def __init__(self, path=""): # Load the model and processor self.model = LlavaNextVideoForConditionalGeneration.from_pretrained(path) self.processor = LlavaNextVideoProcessor.from_pretrained(path) # Ensure the model is in evaluation mode self.model.eval() def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]: """ Args: data (Dict): Contains the input data including "clip" and "prompt". Returns: List[Dict[str, Any]]: The generated text from the model. """ # Extract inputs from the data dictionary clip = data.get("clip") prompt = data.get("prompt") if clip is None or prompt is None: return [{"error": "Missing 'clip' or 'prompt' in input data"}] # Prepare the inputs for the model inputs_video = self.processor(text=prompt, videos=clip, padding=True, return_tensors="pt").to(self.model.device) # Generate output from the model generate_kwargs = {"max_new_tokens": 512, "do_sample": True, "top_p": 0.9} output = self.model.generate(**inputs_video, **generate_kwargs) generated_text = self.processor.batch_decode(output, skip_special_tokens=True) # Extract the relevant part of the assistant's answer assistant_answer_start = generated_text[0].find("ASSISTANT:") + len("ASSISTANT:") assistant_answer = generated_text[0][assistant_answer_start:].strip() return [{"generated_text": assistant_answer}]