morthens commited on
Commit
8c0009d
·
1 Parent(s): 736504f

handler and requirements file creation

Browse files
Files changed (2) hide show
  1. handler.py +62 -0
  2. requirements.txt +1 -0
handler.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Any
2
+ import torch
3
+ from transformers import AutoProcessor, Qwen2VLForConditionalGeneration
4
+ from PIL import Image
5
+ import requests
6
+ from io import BytesIO
7
+
8
+ # Check for GPU
9
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
+
11
+ class EndpointHandler:
12
+ def __init__(self, path: str = "morthens/qwen2-vl-7b-infer"):
13
+ # Load the processor and model
14
+ self.processor = AutoProcessor.from_pretrained(path)
15
+ self.model = Qwen2VLForConditionalGeneration.from_pretrained(
16
+ path,
17
+ torch_dtype="auto",
18
+ device_map="auto"
19
+ )
20
+ # Move the model to the appropriate device
21
+ self.model.to(device)
22
+
23
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
24
+ # Extract the input data
25
+ image_url = data.get("image_url", "")
26
+ text = data.get("text", "")
27
+
28
+ # Load the image from the URL
29
+ try:
30
+ response = requests.get(image_url)
31
+ response.raise_for_status()
32
+ image = Image.open(BytesIO(response.content))
33
+ except Exception as e:
34
+ return {"error": f"Failed to fetch or process image: {str(e)}"}
35
+
36
+ # Preprocess the input
37
+ inputs = self.processor(
38
+ text=[text],
39
+ images=[image],
40
+ padding=True,
41
+ return_tensors="pt"
42
+ )
43
+
44
+ # Move inputs to the correct device
45
+ inputs = {key: value.to(device) for key, value in inputs.items()}
46
+
47
+ # Perform inference
48
+ output_ids = self.model.generate(
49
+ **inputs,
50
+ max_new_tokens=128
51
+ )
52
+
53
+ # Decode the output
54
+ output_text = self.processor.batch_decode(
55
+ output_ids,
56
+ skip_special_tokens=True,
57
+ clean_up_tokenization_spaces=True
58
+ )[0]
59
+
60
+ # Return the raw prediction
61
+ return {"prediction": output_text}
62
+
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ transformers==4.45.0