jykim310 commited on
Commit
39c1bad
·
verified ·
1 Parent(s): 616ddd5

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +29 -0
README.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Usage
2
+ ```python
3
+ import requests
4
+ from PIL import Image
5
+
6
+ import torch
7
+ from transformers import AutoProcessor, LlavaForConditionalGeneration
8
+
9
+ model_id = "nota-ai/phiva-4b-hf"
10
+
11
+ prompt = "USER: <image>\nWhat are these?\nASSISTANT:"
12
+ image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
13
+
14
+ model = LlavaForConditionalGeneration.from_pretrained(
15
+ model_id,
16
+ torch_dtype=torch.float16,
17
+ low_cpu_mem_usage=True,
18
+ attn_implementation="eager"
19
+ ).to(0)
20
+
21
+ processor = AutoProcessor.from_pretrained(model_id)
22
+
23
+
24
+ raw_image = Image.open(requests.get(image_file, stream=True).raw)
25
+ inputs = processor(prompt, raw_image, return_tensors='pt').to(0, torch.float16)
26
+
27
+ output = model.generate(**inputs, max_new_tokens=200, do_sample=False)
28
+ print(processor.decode(output[0][inputs['input_ids'].shape[-1]:], skip_special_tokens=True))
29
+ ```