prithivMLmods commited on
Commit
d375bde
·
verified ·
1 Parent(s): 64c516e

Upload omni-r.ipynb

Browse files
Files changed (1) hide show
  1. Omni-R/omni-r.ipynb +569 -0
Omni-R/omni-r.ipynb ADDED
@@ -0,0 +1,569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "-b4-SW1aGOcF"
7
+ },
8
+ "source": [
9
+ "# **Omni-Reasoner**\n",
10
+ "\n",
11
+ "Qwen2VLForConditionalGeneration"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "code",
16
+ "execution_count": 1,
17
+ "metadata": {
18
+ "colab": {
19
+ "base_uri": "https://localhost:8080/"
20
+ },
21
+ "id": "oDmd1ZObGSel",
22
+ "outputId": "83835185-bd5e-4251-a5bd-9dea119b62bc"
23
+ },
24
+ "outputs": [
25
+ {
26
+ "name": "stdout",
27
+ "output_type": "stream",
28
+ "text": [
29
+ "Collecting gradio\n",
30
+ " Downloading gradio-5.12.0-py3-none-any.whl.metadata (16 kB)\n",
31
+ "Collecting spaces\n",
32
+ " Downloading spaces-0.32.0-py3-none-any.whl.metadata (1.0 kB)\n",
33
+ "Requirement already satisfied: transformers in /usr/local/lib/python3.11/dist-packages (4.47.1)\n",
34
+ "Requirement already satisfied: accelerate in /usr/local/lib/python3.11/dist-packages (1.2.1)\n",
35
+ "Requirement already satisfied: numpy in /usr/local/lib/python3.11/dist-packages (1.26.4)\n",
36
+ "Requirement already satisfied: requests in /usr/local/lib/python3.11/dist-packages (2.32.3)\n",
37
+ "Requirement already satisfied: torch in /usr/local/lib/python3.11/dist-packages (2.5.1+cu121)\n",
38
+ "Requirement already satisfied: torchvision in /usr/local/lib/python3.11/dist-packages (0.20.1+cu121)\n",
39
+ "Collecting qwen-vl-utils\n",
40
+ " Downloading qwen_vl_utils-0.0.8-py3-none-any.whl.metadata (3.6 kB)\n",
41
+ "Collecting av\n",
42
+ " Downloading av-14.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.5 kB)\n",
43
+ "Requirement already satisfied: ipython in /usr/local/lib/python3.11/dist-packages (7.34.0)\n",
44
+ "Collecting reportlab\n",
45
+ " Downloading reportlab-4.2.5-py3-none-any.whl.metadata (1.5 kB)\n",
46
+ "Collecting fpdf\n",
47
+ " Downloading fpdf-1.7.2.tar.gz (39 kB)\n",
48
+ " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
49
+ "Collecting python-docx\n",
50
+ " Downloading python_docx-1.1.2-py3-none-any.whl.metadata (2.0 kB)\n",
51
+ "Requirement already satisfied: pillow in /usr/local/lib/python3.11/dist-packages (11.1.0)\n",
52
+ "Requirement already satisfied: huggingface_hub in /usr/local/lib/python3.11/dist-packages (0.27.1)\n",
53
+ "Collecting aiofiles<24.0,>=22.0 (from gradio)\n",
54
+ " Downloading aiofiles-23.2.1-py3-none-any.whl.metadata (9.7 kB)\n",
55
+ "Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.11/dist-packages (from gradio) (3.7.1)\n",
56
+ "Collecting fastapi<1.0,>=0.115.2 (from gradio)\n",
57
+ " Downloading fastapi-0.115.6-py3-none-any.whl.metadata (27 kB)\n",
58
+ "Collecting ffmpy (from gradio)\n",
59
+ " Downloading ffmpy-0.5.0-py3-none-any.whl.metadata (3.0 kB)\n",
60
+ "Collecting gradio-client==1.5.4 (from gradio)\n",
61
+ " Downloading gradio_client-1.5.4-py3-none-any.whl.metadata (7.1 kB)\n",
62
+ "Requirement already satisfied: httpx>=0.24.1 in /usr/local/lib/python3.11/dist-packages (from gradio) (0.28.1)\n",
63
+ "Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.11/dist-packages (from gradio) (3.1.5)\n",
64
+ "Collecting markupsafe~=2.0 (from gradio)\n",
65
+ " Downloading MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (3.0 kB)\n",
66
+ "Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.11/dist-packages (from gradio) (3.10.14)\n",
67
+ "Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from gradio) (24.2)\n",
68
+ "Requirement already satisfied: pandas<3.0,>=1.0 in /usr/local/lib/python3.11/dist-packages (from gradio) (2.2.2)\n",
69
+ "Requirement already satisfied: pydantic>=2.0 in /usr/local/lib/python3.11/dist-packages (from gradio) (2.10.5)\n",
70
+ "Collecting pydub (from gradio)\n",
71
+ " Downloading pydub-0.25.1-py2.py3-none-any.whl.metadata (1.4 kB)\n",
72
+ "Collecting python-multipart>=0.0.18 (from gradio)\n",
73
+ " Downloading python_multipart-0.0.20-py3-none-any.whl.metadata (1.8 kB)\n",
74
+ "Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.11/dist-packages (from gradio) (6.0.2)\n",
75
+ "Collecting ruff>=0.2.2 (from gradio)\n",
76
+ " Downloading ruff-0.9.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (25 kB)\n",
77
+ "Collecting safehttpx<0.2.0,>=0.1.6 (from gradio)\n",
78
+ " Downloading safehttpx-0.1.6-py3-none-any.whl.metadata (4.2 kB)\n",
79
+ "Collecting semantic-version~=2.0 (from gradio)\n",
80
+ " Downloading semantic_version-2.10.0-py2.py3-none-any.whl.metadata (9.7 kB)\n",
81
+ "Collecting starlette<1.0,>=0.40.0 (from gradio)\n",
82
+ " Downloading starlette-0.45.2-py3-none-any.whl.metadata (6.3 kB)\n",
83
+ "Collecting tomlkit<0.14.0,>=0.12.0 (from gradio)\n",
84
+ " Downloading tomlkit-0.13.2-py3-none-any.whl.metadata (2.7 kB)\n",
85
+ "Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.11/dist-packages (from gradio) (0.15.1)\n",
86
+ "Requirement already satisfied: typing-extensions~=4.0 in /usr/local/lib/python3.11/dist-packages (from gradio) (4.12.2)\n",
87
+ "Collecting uvicorn>=0.14.0 (from gradio)\n",
88
+ " Downloading uvicorn-0.34.0-py3-none-any.whl.metadata (6.5 kB)\n",
89
+ "Requirement already satisfied: fsspec in /usr/local/lib/python3.11/dist-packages (from gradio-client==1.5.4->gradio) (2024.10.0)\n",
90
+ "Requirement already satisfied: websockets<15.0,>=10.0 in /usr/local/lib/python3.11/dist-packages (from gradio-client==1.5.4->gradio) (14.1)\n",
91
+ "Requirement already satisfied: psutil<6,>=2 in /usr/local/lib/python3.11/dist-packages (from spaces) (5.9.5)\n",
92
+ "Requirement already satisfied: filelock in /usr/local/lib/python3.11/dist-packages (from transformers) (3.16.1)\n",
93
+ "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.11/dist-packages (from transformers) (2024.11.6)\n",
94
+ "Requirement already satisfied: tokenizers<0.22,>=0.21 in /usr/local/lib/python3.11/dist-packages (from transformers) (0.21.0)\n",
95
+ "Requirement already satisfied: safetensors>=0.4.1 in /usr/local/lib/python3.11/dist-packages (from transformers) (0.5.2)\n",
96
+ "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.11/dist-packages (from transformers) (4.67.1)\n",
97
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests) (3.4.1)\n",
98
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.11/dist-packages (from requests) (3.10)\n",
99
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.11/dist-packages (from requests) (2.3.0)\n",
100
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.11/dist-packages (from requests) (2024.12.14)\n",
101
+ "Requirement already satisfied: networkx in /usr/local/lib/python3.11/dist-packages (from torch) (3.4.2)\n",
102
+ "Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.1.105 in /usr/local/lib/python3.11/dist-packages (from torch) (12.1.105)\n",
103
+ "Requirement already satisfied: nvidia-cuda-runtime-cu12==12.1.105 in /usr/local/lib/python3.11/dist-packages (from torch) (12.1.105)\n",
104
+ "Requirement already satisfied: nvidia-cuda-cupti-cu12==12.1.105 in /usr/local/lib/python3.11/dist-packages (from torch) (12.1.105)\n",
105
+ "Requirement already satisfied: nvidia-cudnn-cu12==9.1.0.70 in /usr/local/lib/python3.11/dist-packages (from torch) (9.1.0.70)\n",
106
+ "Requirement already satisfied: nvidia-cublas-cu12==12.1.3.1 in /usr/local/lib/python3.11/dist-packages (from torch) (12.1.3.1)\n",
107
+ "Requirement already satisfied: nvidia-cufft-cu12==11.0.2.54 in /usr/local/lib/python3.11/dist-packages (from torch) (11.0.2.54)\n",
108
+ "Requirement already satisfied: nvidia-curand-cu12==10.3.2.106 in /usr/local/lib/python3.11/dist-packages (from torch) (10.3.2.106)\n",
109
+ "Requirement already satisfied: nvidia-cusolver-cu12==11.4.5.107 in /usr/local/lib/python3.11/dist-packages (from torch) (11.4.5.107)\n",
110
+ "Requirement already satisfied: nvidia-cusparse-cu12==12.1.0.106 in /usr/local/lib/python3.11/dist-packages (from torch) (12.1.0.106)\n",
111
+ "Requirement already satisfied: nvidia-nccl-cu12==2.21.5 in /usr/local/lib/python3.11/dist-packages (from torch) (2.21.5)\n",
112
+ "Requirement already satisfied: nvidia-nvtx-cu12==12.1.105 in /usr/local/lib/python3.11/dist-packages (from torch) (12.1.105)\n",
113
+ "Requirement already satisfied: triton==3.1.0 in /usr/local/lib/python3.11/dist-packages (from torch) (3.1.0)\n",
114
+ "Requirement already satisfied: sympy==1.13.1 in /usr/local/lib/python3.11/dist-packages (from torch) (1.13.1)\n",
115
+ "Requirement already satisfied: nvidia-nvjitlink-cu12 in /usr/local/lib/python3.11/dist-packages (from nvidia-cusolver-cu12==11.4.5.107->torch) (12.6.85)\n",
116
+ "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.11/dist-packages (from sympy==1.13.1->torch) (1.3.0)\n",
117
+ "Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.11/dist-packages (from ipython) (75.1.0)\n",
118
+ "Collecting jedi>=0.16 (from ipython)\n",
119
+ " Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n",
120
+ "Requirement already satisfied: decorator in /usr/local/lib/python3.11/dist-packages (from ipython) (4.4.2)\n",
121
+ "Requirement already satisfied: pickleshare in /usr/local/lib/python3.11/dist-packages (from ipython) (0.7.5)\n",
122
+ "Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.11/dist-packages (from ipython) (5.7.1)\n",
123
+ "Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.11/dist-packages (from ipython) (3.0.48)\n",
124
+ "Requirement already satisfied: pygments in /usr/local/lib/python3.11/dist-packages (from ipython) (2.18.0)\n",
125
+ "Requirement already satisfied: backcall in /usr/local/lib/python3.11/dist-packages (from ipython) (0.2.0)\n",
126
+ "Requirement already satisfied: matplotlib-inline in /usr/local/lib/python3.11/dist-packages (from ipython) (0.1.7)\n",
127
+ "Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.11/dist-packages (from ipython) (4.9.0)\n",
128
+ "Requirement already satisfied: chardet in /usr/local/lib/python3.11/dist-packages (from reportlab) (5.2.0)\n",
129
+ "Requirement already satisfied: lxml>=3.1.0 in /usr/local/lib/python3.11/dist-packages (from python-docx) (5.3.0)\n",
130
+ "Requirement already satisfied: sniffio>=1.1 in /usr/local/lib/python3.11/dist-packages (from anyio<5.0,>=3.0->gradio) (1.3.1)\n",
131
+ "Collecting starlette<1.0,>=0.40.0 (from gradio)\n",
132
+ " Downloading starlette-0.41.3-py3-none-any.whl.metadata (6.0 kB)\n",
133
+ "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.11/dist-packages (from httpx>=0.24.1->gradio) (1.0.7)\n",
134
+ "Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.11/dist-packages (from httpcore==1.*->httpx>=0.24.1->gradio) (0.14.0)\n",
135
+ "Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.11/dist-packages (from jedi>=0.16->ipython) (0.8.4)\n",
136
+ "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.11/dist-packages (from pandas<3.0,>=1.0->gradio) (2.8.2)\n",
137
+ "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/dist-packages (from pandas<3.0,>=1.0->gradio) (2024.2)\n",
138
+ "Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/dist-packages (from pandas<3.0,>=1.0->gradio) (2024.2)\n",
139
+ "Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.11/dist-packages (from pexpect>4.3->ipython) (0.7.0)\n",
140
+ "Requirement already satisfied: wcwidth in /usr/local/lib/python3.11/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython) (0.2.13)\n",
141
+ "Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.11/dist-packages (from pydantic>=2.0->gradio) (0.7.0)\n",
142
+ "Requirement already satisfied: pydantic-core==2.27.2 in /usr/local/lib/python3.11/dist-packages (from pydantic>=2.0->gradio) (2.27.2)\n",
143
+ "Requirement already satisfied: click>=8.0.0 in /usr/local/lib/python3.11/dist-packages (from typer<1.0,>=0.12->gradio) (8.1.8)\n",
144
+ "Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.11/dist-packages (from typer<1.0,>=0.12->gradio) (1.5.4)\n",
145
+ "Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.11/dist-packages (from typer<1.0,>=0.12->gradio) (13.9.4)\n",
146
+ "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.11/dist-packages (from python-dateutil>=2.8.2->pandas<3.0,>=1.0->gradio) (1.17.0)\n",
147
+ "Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.11/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio) (3.0.0)\n",
148
+ "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.11/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio) (0.1.2)\n",
149
+ "Downloading gradio-5.12.0-py3-none-any.whl (57.6 MB)\n",
150
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m57.6/57.6 MB\u001b[0m \u001b[31m10.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
151
+ "\u001b[?25hDownloading gradio_client-1.5.4-py3-none-any.whl (321 kB)\n",
152
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m321.4/321.4 kB\u001b[0m \u001b[31m25.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
153
+ "\u001b[?25hDownloading spaces-0.32.0-py3-none-any.whl (29 kB)\n",
154
+ "Downloading qwen_vl_utils-0.0.8-py3-none-any.whl (5.9 kB)\n",
155
+ "Downloading av-14.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (33.8 MB)\n",
156
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m33.8/33.8 MB\u001b[0m \u001b[31m27.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
157
+ "\u001b[?25hDownloading reportlab-4.2.5-py3-none-any.whl (1.9 MB)\n",
158
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.9/1.9 MB\u001b[0m \u001b[31m33.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
159
+ "\u001b[?25hDownloading python_docx-1.1.2-py3-none-any.whl (244 kB)\n",
160
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m244.3/244.3 kB\u001b[0m \u001b[31m10.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
161
+ "\u001b[?25hDownloading aiofiles-23.2.1-py3-none-any.whl (15 kB)\n",
162
+ "Downloading fastapi-0.115.6-py3-none-any.whl (94 kB)\n",
163
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m94.8/94.8 kB\u001b[0m \u001b[31m6.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
164
+ "\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n",
165
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m29.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
166
+ "\u001b[?25hDownloading MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (28 kB)\n",
167
+ "Downloading python_multipart-0.0.20-py3-none-any.whl (24 kB)\n",
168
+ "Downloading ruff-0.9.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (12.4 MB)\n",
169
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m12.4/12.4 MB\u001b[0m \u001b[31m40.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
170
+ "\u001b[?25hDownloading safehttpx-0.1.6-py3-none-any.whl (8.7 kB)\n",
171
+ "Downloading semantic_version-2.10.0-py2.py3-none-any.whl (15 kB)\n",
172
+ "Downloading starlette-0.41.3-py3-none-any.whl (73 kB)\n",
173
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m73.2/73.2 kB\u001b[0m \u001b[31m4.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
174
+ "\u001b[?25hDownloading tomlkit-0.13.2-py3-none-any.whl (37 kB)\n",
175
+ "Downloading uvicorn-0.34.0-py3-none-any.whl (62 kB)\n",
176
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.3/62.3 kB\u001b[0m \u001b[31m2.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
177
+ "\u001b[?25hDownloading ffmpy-0.5.0-py3-none-any.whl (6.0 kB)\n",
178
+ "Downloading pydub-0.25.1-py2.py3-none-any.whl (32 kB)\n",
179
+ "Building wheels for collected packages: fpdf\n",
180
+ " Building wheel for fpdf (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
181
+ " Created wheel for fpdf: filename=fpdf-1.7.2-py2.py3-none-any.whl size=40704 sha256=30ea77e9b92cd500a9949be10d62f59d531839024746553304a480325794ec70\n",
182
+ " Stored in directory: /root/.cache/pip/wheels/65/4f/66/bbda9866da446a72e206d6484cd97381cbc7859a7068541c36\n",
183
+ "Successfully built fpdf\n",
184
+ "Installing collected packages: pydub, fpdf, uvicorn, tomlkit, semantic-version, ruff, reportlab, python-multipart, python-docx, markupsafe, jedi, ffmpy, av, aiofiles, starlette, qwen-vl-utils, safehttpx, gradio-client, fastapi, gradio, spaces\n",
185
+ " Attempting uninstall: markupsafe\n",
186
+ " Found existing installation: MarkupSafe 3.0.2\n",
187
+ " Uninstalling MarkupSafe-3.0.2:\n",
188
+ " Successfully uninstalled MarkupSafe-3.0.2\n",
189
+ "Successfully installed aiofiles-23.2.1 av-14.0.1 fastapi-0.115.6 ffmpy-0.5.0 fpdf-1.7.2 gradio-5.12.0 gradio-client-1.5.4 jedi-0.19.2 markupsafe-2.1.5 pydub-0.25.1 python-docx-1.1.2 python-multipart-0.0.20 qwen-vl-utils-0.0.8 reportlab-4.2.5 ruff-0.9.2 safehttpx-0.1.6 semantic-version-2.10.0 spaces-0.32.0 starlette-0.41.3 tomlkit-0.13.2 uvicorn-0.34.0\n"
190
+ ]
191
+ }
192
+ ],
193
+ "source": [
194
+ "!pip install gradio spaces transformers accelerate numpy requests torch torchvision qwen-vl-utils av ipython reportlab fpdf python-docx pillow huggingface_hub"
195
+ ]
196
+ },
197
+ {
198
+ "cell_type": "code",
199
+ "execution_count": 3,
200
+ "metadata": {
201
+ "colab": {
202
+ "base_uri": "https://localhost:8080/",
203
+ "height": 715
204
+ },
205
+ "id": "ovBSsRFhGbs2",
206
+ "outputId": "dac679b7-0850-4cb4-b67e-538b4b49cc11"
207
+ },
208
+ "outputs": [
209
+ {
210
+ "name": "stderr",
211
+ "output_type": "stream",
212
+ "text": [
213
+ "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored.\n"
214
+ ]
215
+ },
216
+ {
217
+ "name": "stdout",
218
+ "output_type": "stream",
219
+ "text": [
220
+ "Loading Omni-Reasoner...\n",
221
+ "Running Gradio in a Colab notebook requires sharing enabled. Automatically setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
222
+ "\n",
223
+ "Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. To turn off, set debug=False in launch().\n",
224
+ "* Running on public URL: https://f0e8ea5657a5c96892.gradio.live\n",
225
+ "\n",
226
+ "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"
227
+ ]
228
+ },
229
+ {
230
+ "data": {
231
+ "text/html": [
232
+ "<div><iframe src=\"https://f0e8ea5657a5c96892.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
233
+ ],
234
+ "text/plain": [
235
+ "<IPython.core.display.HTML object>"
236
+ ]
237
+ },
238
+ "metadata": {},
239
+ "output_type": "display_data"
240
+ },
241
+ {
242
+ "name": "stdout",
243
+ "output_type": "stream",
244
+ "text": [
245
+ "Keyboard interruption in main thread... closing server.\n",
246
+ "Killing tunnel 127.0.0.1:7860 <> https://f0e8ea5657a5c96892.gradio.live\n"
247
+ ]
248
+ },
249
+ {
250
+ "data": {
251
+ "text/plain": []
252
+ },
253
+ "execution_count": 3,
254
+ "metadata": {},
255
+ "output_type": "execute_result"
256
+ }
257
+ ],
258
+ "source": [
259
+ "# Authenticate with Hugging Face\n",
260
+ "from huggingface_hub import login\n",
261
+ "\n",
262
+ "# Log in to Hugging Face using the provided token\n",
263
+ "hf_token = '---xxxx---'\n",
264
+ "login(hf_token)\n",
265
+ "\n",
266
+ "#Demo\n",
267
+ "import gradio as gr\n",
268
+ "import spaces\n",
269
+ "from transformers import Qwen2VLForConditionalGeneration, AutoProcessor, TextIteratorStreamer\n",
270
+ "from qwen_vl_utils import process_vision_info\n",
271
+ "import torch\n",
272
+ "from PIL import Image\n",
273
+ "import os\n",
274
+ "import uuid\n",
275
+ "import io\n",
276
+ "from threading import Thread\n",
277
+ "from reportlab.lib.pagesizes import A4\n",
278
+ "from reportlab.lib.styles import getSampleStyleSheet\n",
279
+ "from reportlab.lib import colors\n",
280
+ "from reportlab.platypus import SimpleDocTemplate, Image as RLImage, Paragraph, Spacer\n",
281
+ "from reportlab.lib.units import inch\n",
282
+ "from reportlab.pdfbase import pdfmetrics\n",
283
+ "from reportlab.pdfbase.ttfonts import TTFont\n",
284
+ "import docx\n",
285
+ "from docx.enum.text import WD_ALIGN_PARAGRAPH\n",
286
+ "\n",
287
+ "# Define model options\n",
288
+ "MODEL_OPTIONS = {\n",
289
+ " \"Omni-Reasoner\": \"prithivMLmods/Omni-Reasoner-2B\",\n",
290
+ "}\n",
291
+ "\n",
292
+ "# Preload models and processors into CUDA\n",
293
+ "models = {}\n",
294
+ "processors = {}\n",
295
+ "for name, model_id in MODEL_OPTIONS.items():\n",
296
+ " print(f\"Loading {name}...\")\n",
297
+ " models[name] = Qwen2VLForConditionalGeneration.from_pretrained(\n",
298
+ " model_id,\n",
299
+ " trust_remote_code=True,\n",
300
+ " torch_dtype=torch.float16\n",
301
+ " ).to(\"cuda\").eval()\n",
302
+ " processors[name] = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)\n",
303
+ "\n",
304
+ "image_extensions = Image.registered_extensions()\n",
305
+ "\n",
306
+ "def identify_and_save_blob(blob_path):\n",
307
+ " \"\"\"Identifies if the blob is an image and saves it.\"\"\"\n",
308
+ " try:\n",
309
+ " with open(blob_path, 'rb') as file:\n",
310
+ " blob_content = file.read()\n",
311
+ " try:\n",
312
+ " Image.open(io.BytesIO(blob_content)).verify() # Check if it's a valid image\n",
313
+ " extension = \".png\" # Default to PNG for saving\n",
314
+ " media_type = \"image\"\n",
315
+ " except (IOError, SyntaxError):\n",
316
+ " raise ValueError(\"Unsupported media type. Please upload a valid image.\")\n",
317
+ "\n",
318
+ " filename = f\"temp_{uuid.uuid4()}_media{extension}\"\n",
319
+ " with open(filename, \"wb\") as f:\n",
320
+ " f.write(blob_content)\n",
321
+ "\n",
322
+ " return filename, media_type\n",
323
+ "\n",
324
+ " except FileNotFoundError:\n",
325
+ " raise ValueError(f\"The file {blob_path} was not found.\")\n",
326
+ " except Exception as e:\n",
327
+ " raise ValueError(f\"An error occurred while processing the file: {e}\")\n",
328
+ "\n",
329
+ "@spaces.GPU\n",
330
+ "def qwen_inference(model_name, media_input, text_input=None):\n",
331
+ " \"\"\"Handles inference for the selected model.\"\"\"\n",
332
+ " model = models[model_name]\n",
333
+ " processor = processors[model_name]\n",
334
+ "\n",
335
+ " if isinstance(media_input, str):\n",
336
+ " media_path = media_input\n",
337
+ " if media_path.endswith(tuple([i for i in image_extensions.keys()])):\n",
338
+ " media_type = \"image\"\n",
339
+ " else:\n",
340
+ " try:\n",
341
+ " media_path, media_type = identify_and_save_blob(media_input)\n",
342
+ " except Exception as e:\n",
343
+ " raise ValueError(\"Unsupported media type. Please upload a valid image.\")\n",
344
+ "\n",
345
+ " messages = [\n",
346
+ " {\n",
347
+ " \"role\": \"user\",\n",
348
+ " \"content\": [\n",
349
+ " {\n",
350
+ " \"type\": media_type,\n",
351
+ " media_type: media_path\n",
352
+ " },\n",
353
+ " {\"type\": \"text\", \"text\": text_input},\n",
354
+ " ],\n",
355
+ " }\n",
356
+ " ]\n",
357
+ "\n",
358
+ " text = processor.apply_chat_template(\n",
359
+ " messages, tokenize=False, add_generation_prompt=True\n",
360
+ " )\n",
361
+ " image_inputs, _ = process_vision_info(messages)\n",
362
+ " inputs = processor(\n",
363
+ " text=[text],\n",
364
+ " images=image_inputs,\n",
365
+ " padding=True,\n",
366
+ " return_tensors=\"pt\",\n",
367
+ " ).to(\"cuda\")\n",
368
+ "\n",
369
+ " streamer = TextIteratorStreamer(\n",
370
+ " processor.tokenizer, skip_prompt=True, skip_special_tokens=True\n",
371
+ " )\n",
372
+ " generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)\n",
373
+ "\n",
374
+ " thread = Thread(target=model.generate, kwargs=generation_kwargs)\n",
375
+ " thread.start()\n",
376
+ "\n",
377
+ " buffer = \"\"\n",
378
+ " for new_text in streamer:\n",
379
+ " buffer += new_text\n",
380
+ " # Remove <|im_end|> or similar tokens from the output\n",
381
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
382
+ " yield buffer\n",
383
+ "\n",
384
+ "def format_plain_text(output_text):\n",
385
+ " \"\"\"Formats the output text as plain text without LaTeX delimiters.\"\"\"\n",
386
+ " # Remove LaTeX delimiters and convert to plain text\n",
387
+ " plain_text = output_text.replace(\"\\\\(\", \"\").replace(\"\\\\)\", \"\").replace(\"\\\\[\", \"\").replace(\"\\\\]\", \"\")\n",
388
+ " return plain_text\n",
389
+ "\n",
390
+ "def generate_document(media_path, output_text, file_format, font_size, line_spacing, alignment, image_size):\n",
391
+ " \"\"\"Generates a document with the input image and plain text output.\"\"\"\n",
392
+ " plain_text = format_plain_text(output_text)\n",
393
+ " if file_format == \"pdf\":\n",
394
+ " return generate_pdf(media_path, plain_text, font_size, line_spacing, alignment, image_size)\n",
395
+ " elif file_format == \"docx\":\n",
396
+ " return generate_docx(media_path, plain_text, font_size, line_spacing, alignment, image_size)\n",
397
+ "\n",
398
+ "def generate_pdf(media_path, plain_text, font_size, line_spacing, alignment, image_size):\n",
399
+ " \"\"\"Generates a PDF document.\"\"\"\n",
400
+ " filename = f\"output_{uuid.uuid4()}.pdf\"\n",
401
+ " doc = SimpleDocTemplate(\n",
402
+ " filename,\n",
403
+ " pagesize=A4,\n",
404
+ " rightMargin=inch,\n",
405
+ " leftMargin=inch,\n",
406
+ " topMargin=inch,\n",
407
+ " bottomMargin=inch\n",
408
+ " )\n",
409
+ " styles = getSampleStyleSheet()\n",
410
+ " styles[\"Normal\"].fontSize = int(font_size)\n",
411
+ " styles[\"Normal\"].leading = int(font_size) * line_spacing\n",
412
+ " styles[\"Normal\"].alignment = {\n",
413
+ " \"Left\": 0,\n",
414
+ " \"Center\": 1,\n",
415
+ " \"Right\": 2,\n",
416
+ " \"Justified\": 4\n",
417
+ " }[alignment]\n",
418
+ "\n",
419
+ " story = []\n",
420
+ "\n",
421
+ " # Add image with size adjustment\n",
422
+ " image_sizes = {\n",
423
+ " \"Small\": (200, 200),\n",
424
+ " \"Medium\": (400, 400),\n",
425
+ " \"Large\": (600, 600)\n",
426
+ " }\n",
427
+ " img = RLImage(media_path, width=image_sizes[image_size][0], height=image_sizes[image_size][1])\n",
428
+ " story.append(img)\n",
429
+ " story.append(Spacer(1, 12))\n",
430
+ "\n",
431
+ " # Add plain text output\n",
432
+ " text = Paragraph(plain_text, styles[\"Normal\"])\n",
433
+ " story.append(text)\n",
434
+ "\n",
435
+ " doc.build(story)\n",
436
+ " return filename\n",
437
+ "\n",
438
+ "def generate_docx(media_path, plain_text, font_size, line_spacing, alignment, image_size):\n",
439
+ " \"\"\"Generates a DOCX document.\"\"\"\n",
440
+ " filename = f\"output_{uuid.uuid4()}.docx\"\n",
441
+ " doc = docx.Document()\n",
442
+ "\n",
443
+ " # Add image with size adjustment\n",
444
+ " image_sizes = {\n",
445
+ " \"Small\": docx.shared.Inches(2),\n",
446
+ " \"Medium\": docx.shared.Inches(4),\n",
447
+ " \"Large\": docx.shared.Inches(6)\n",
448
+ " }\n",
449
+ " doc.add_picture(media_path, width=image_sizes[image_size])\n",
450
+ " doc.add_paragraph()\n",
451
+ "\n",
452
+ " # Add plain text output\n",
453
+ " paragraph = doc.add_paragraph()\n",
454
+ " paragraph.paragraph_format.line_spacing = line_spacing\n",
455
+ " paragraph.paragraph_format.alignment = {\n",
456
+ " \"Left\": WD_ALIGN_PARAGRAPH.LEFT,\n",
457
+ " \"Center\": WD_ALIGN_PARAGRAPH.CENTER,\n",
458
+ " \"Right\": WD_ALIGN_PARAGRAPH.RIGHT,\n",
459
+ " \"Justified\": WD_ALIGN_PARAGRAPH.JUSTIFY\n",
460
+ " }[alignment]\n",
461
+ " run = paragraph.add_run(plain_text)\n",
462
+ " run.font.size = docx.shared.Pt(int(font_size))\n",
463
+ "\n",
464
+ " doc.save(filename)\n",
465
+ " return filename\n",
466
+ "\n",
467
+ "# CSS for output styling\n",
468
+ "css = \"\"\"\n",
469
+ " #output {\n",
470
+ " height: 500px;\n",
471
+ " overflow: auto;\n",
472
+ " border: 1px solid #ccc;\n",
473
+ " }\n",
474
+ ".submit-btn {\n",
475
+ " background-color: #cf3434 !important;\n",
476
+ " color: white !important;\n",
477
+ "}\n",
478
+ ".submit-btn:hover {\n",
479
+ " background-color: #ff2323 !important;\n",
480
+ "}\n",
481
+ ".download-btn {\n",
482
+ " background-color: #35a6d6 !important;\n",
483
+ " color: white !important;\n",
484
+ "}\n",
485
+ ".download-btn:hover {\n",
486
+ " background-color: #22bcff !important;\n",
487
+ "}\n",
488
+ "\"\"\"\n",
489
+ "\n",
490
+ "# Gradio app setup\n",
491
+ "with gr.Blocks(css=css) as demo:\n",
492
+ " gr.Markdown(\"# Omni-Reasoner\")\n",
493
+ "\n",
494
+ " with gr.Tab(label=\"Image Input\"):\n",
495
+ "\n",
496
+ " with gr.Row():\n",
497
+ " with gr.Column():\n",
498
+ " model_choice = gr.Dropdown(\n",
499
+ " label=\"Model Selection\",\n",
500
+ " choices=list(MODEL_OPTIONS.keys()),\n",
501
+ " value=\"Omni-Reasoner\"\n",
502
+ " )\n",
503
+ " input_media = gr.File(\n",
504
+ " label=\"Upload Image\", type=\"filepath\"\n",
505
+ " )\n",
506
+ " text_input = gr.Textbox(label=\"Question\", placeholder=\"Ask a question about the image...\")\n",
507
+ " submit_btn = gr.Button(value=\"Submit\", elem_classes=\"submit-btn\")\n",
508
+ "\n",
509
+ " with gr.Column():\n",
510
+ " output_text = gr.Textbox(label=\"Output Text\", lines=10)\n",
511
+ " plain_text_output = gr.Textbox(label=\"Standardized Plain Text\", lines=10)\n",
512
+ "\n",
513
+ " submit_btn.click(\n",
514
+ " qwen_inference, [model_choice, input_media, text_input], [output_text]\n",
515
+ " ).then(\n",
516
+ " lambda output_text: format_plain_text(output_text), [output_text], [plain_text_output]\n",
517
+ " )\n",
518
+ "\n",
519
+ " # Add examples directly usable by clicking\n",
520
+ " with gr.Row():\n",
521
+ " with gr.Column():\n",
522
+ " line_spacing = gr.Dropdown(\n",
523
+ " choices=[0.5, 1.0, 1.15, 1.5, 2.0, 2.5, 3.0],\n",
524
+ " value=1.5,\n",
525
+ " label=\"Line Spacing\"\n",
526
+ " )\n",
527
+ " font_size = gr.Dropdown(\n",
528
+ " choices=[\"8\", \"10\", \"12\", \"14\", \"16\", \"18\", \"20\", \"22\", \"24\"],\n",
529
+ " value=\"18\",\n",
530
+ " label=\"Font Size\"\n",
531
+ " )\n",
532
+ " alignment = gr.Dropdown(\n",
533
+ " choices=[\"Left\", \"Center\", \"Right\", \"Justified\"],\n",
534
+ " value=\"Justified\",\n",
535
+ " label=\"Text Alignment\"\n",
536
+ " )\n",
537
+ " image_size = gr.Dropdown(\n",
538
+ " choices=[\"Small\", \"Medium\", \"Large\"],\n",
539
+ " value=\"Small\",\n",
540
+ " label=\"Image Size\"\n",
541
+ " )\n",
542
+ " file_format = gr.Radio([\"pdf\", \"docx\"], label=\"File Format\", value=\"pdf\")\n",
543
+ " get_document_btn = gr.Button(value=\"Get Document\", elem_classes=\"download-btn\")\n",
544
+ "\n",
545
+ " get_document_btn.click(\n",
546
+ " generate_document, [input_media, output_text, file_format, font_size, line_spacing, alignment, image_size], gr.File(label=\"Download Document\")\n",
547
+ " )\n",
548
+ "\n",
549
+ "demo.launch(debug=True)"
550
+ ]
551
+ }
552
+ ],
553
+ "metadata": {
554
+ "accelerator": "GPU",
555
+ "colab": {
556
+ "gpuType": "T4",
557
+ "provenance": []
558
+ },
559
+ "kernelspec": {
560
+ "display_name": "Python 3",
561
+ "name": "python3"
562
+ },
563
+ "language_info": {
564
+ "name": "python"
565
+ }
566
+ },
567
+ "nbformat": 4,
568
+ "nbformat_minor": 0
569
+ }