test1
Browse files- DockerFile +24 -0
- app.py +86 -0
- requirements.txt +12 -0
DockerFile
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM pytorch/pytorch:latest
|
2 |
+
|
3 |
+
# Set working directory
|
4 |
+
WORKDIR /app
|
5 |
+
|
6 |
+
# Install system dependencies
|
7 |
+
RUN apt-get update && apt-get install -y \
|
8 |
+
libgl1-mesa-glx
|
9 |
+
|
10 |
+
|
11 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
12 |
+
|
13 |
+
# Install additional required libraries
|
14 |
+
RUN pip install byaldi qwen-vl-utils
|
15 |
+
|
16 |
+
# Copy your application code
|
17 |
+
COPY app.py .
|
18 |
+
COPY .env .
|
19 |
+
|
20 |
+
# Expose the port the app runs on
|
21 |
+
EXPOSE 8000
|
22 |
+
|
23 |
+
# Command to run the application
|
24 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]
|
app.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from fastapi import FastAPI, File, UploadFile
|
3 |
+
from pydantic import BaseModel
|
4 |
+
from typing import List
|
5 |
+
import torch
|
6 |
+
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
|
7 |
+
from qwen_vl_utils import process_vision_info
|
8 |
+
from byaldi import RAGMultiModalModel
|
9 |
+
from PIL import Image
|
10 |
+
import io
|
11 |
+
|
12 |
+
# Initialize FastAPI app
|
13 |
+
app = FastAPI()
|
14 |
+
|
15 |
+
# Define model and processor paths
|
16 |
+
RAG_MODEL = "vidore/colpali"
|
17 |
+
QWN_MODEL = "Qwen/Qwen2-VL-7B-Instruct"
|
18 |
+
QWN_PROCESSOR = "Qwen/Qwen2-VL-2B-Instruct"
|
19 |
+
|
20 |
+
# Load models and processors
|
21 |
+
RAG = RAGMultiModalModel.from_pretrained(RAG_MODEL)
|
22 |
+
|
23 |
+
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
24 |
+
QWN_MODEL,
|
25 |
+
torch_dtype=torch.bfloat16,
|
26 |
+
attn_implementation="flash_attention_2",
|
27 |
+
device_map="auto",
|
28 |
+
trust_remote_code=True
|
29 |
+
).cuda().eval()
|
30 |
+
|
31 |
+
processor = AutoProcessor.from_pretrained(QWN_PROCESSOR, trust_remote_code=True)
|
32 |
+
|
33 |
+
# Define request model
|
34 |
+
class DocumentRequest(BaseModel):
|
35 |
+
text_query: str
|
36 |
+
|
37 |
+
# Define processing function
|
38 |
+
def document_rag(text_query, image):
|
39 |
+
messages = [
|
40 |
+
{
|
41 |
+
"role": "user",
|
42 |
+
"content": [
|
43 |
+
{
|
44 |
+
"type": "image",
|
45 |
+
"image": image,
|
46 |
+
},
|
47 |
+
{"type": "text", "text": text_query},
|
48 |
+
],
|
49 |
+
}
|
50 |
+
]
|
51 |
+
text = processor.apply_chat_template(
|
52 |
+
messages, tokenize=False, add_generation_prompt=True
|
53 |
+
)
|
54 |
+
image_inputs, video_inputs = process_vision_info(messages)
|
55 |
+
inputs = processor(
|
56 |
+
text=[text],
|
57 |
+
images=image_inputs,
|
58 |
+
videos=video_inputs,
|
59 |
+
padding=True,
|
60 |
+
return_tensors="pt",
|
61 |
+
)
|
62 |
+
inputs = inputs.to("cuda")
|
63 |
+
generated_ids = model.generate(**inputs, max_new_tokens=50)
|
64 |
+
generated_ids_trimmed = [
|
65 |
+
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
66 |
+
]
|
67 |
+
output_text = processor.batch_decode(
|
68 |
+
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
69 |
+
)
|
70 |
+
return output_text[0]
|
71 |
+
|
72 |
+
# Define API endpoints
|
73 |
+
@app.post("/process_document")
|
74 |
+
async def process_document(request: DocumentRequest, file: UploadFile = File(...)):
|
75 |
+
# Read and process the uploaded file
|
76 |
+
contents = await file.read()
|
77 |
+
image = Image.open(io.BytesIO(contents))
|
78 |
+
|
79 |
+
# Process the document
|
80 |
+
result = document_rag(request.text_query, image)
|
81 |
+
|
82 |
+
return {"result": result}
|
83 |
+
|
84 |
+
if __name__ == "__main__":
|
85 |
+
import uvicorn
|
86 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
requirements.txt
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
torchvision
|
3 |
+
torchaudio
|
4 |
+
torchao
|
5 |
+
git+https://github.com/huggingface/transformers.git
|
6 |
+
diffusers
|
7 |
+
Pillow
|
8 |
+
byaldi
|
9 |
+
qwen_vl_utils
|
10 |
+
flash-attn
|
11 |
+
fastapi
|
12 |
+
uvicorn[standard]
|