Spaces:
Runtime error
Runtime error
JustinLin610
commited on
Commit
·
db9e582
1
Parent(s):
653ab88
update
Browse files- app.py +12 -10
- requirements.txt +4 -1
app.py
CHANGED
@@ -2,33 +2,35 @@ import gradio as gr
|
|
2 |
import os
|
3 |
import torch
|
4 |
import numpy as np
|
|
|
|
|
|
|
|
|
5 |
from fairseq import utils, tasks
|
6 |
-
from
|
7 |
from utils.eval_utils import eval_step
|
8 |
from tasks.mm_tasks.caption import CaptionTask
|
9 |
from models.ofa import OFAModel
|
10 |
from PIL import Image
|
11 |
from torchvision import transforms
|
12 |
|
13 |
-
|
14 |
# Register caption task
|
15 |
-
tasks.register_task('caption',CaptionTask)
|
16 |
# turn on cuda if GPU is available
|
17 |
use_cuda = torch.cuda.is_available()
|
18 |
# use fp16 only when GPU is available
|
19 |
use_fp16 = False
|
20 |
|
21 |
-
os.system('wget https://ofa-silicon.oss-us-west-1.aliyuncs.com/checkpoints/caption_large_best_clean.pt'
|
22 |
-
|
23 |
-
os.system('mv caption_large_best_clean.pt checkpoints/caption.pt')
|
24 |
|
25 |
# Load pretrained ckpt & config
|
26 |
overrides = {"bpe_dir": "utils/BPE", "eval_cider": False, "beam": 5,
|
27 |
"max_len_b": 16, "no_repeat_ngram_size": 3, "seed": 7}
|
28 |
models, cfg, task = checkpoint_utils.load_model_ensemble_and_task(
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
|
33 |
# Move models to GPU
|
34 |
for model in models:
|
@@ -109,4 +111,4 @@ def image_caption(inp):
|
|
109 |
|
110 |
|
111 |
io = gr.Interface(fn=image_caption, inputs=gr.inputs.Image(type='pil'), outputs='text')
|
112 |
-
io.launch(enable_queue=True)
|
|
|
2 |
import os
|
3 |
import torch
|
4 |
import numpy as np
|
5 |
+
|
6 |
+
os.system('git clone https://github.com/pytorch/fairseq.git; cd fairseq;'
|
7 |
+
'pip install --use-feature=in-tree-build ./; cd ..')
|
8 |
+
|
9 |
from fairseq import utils, tasks
|
10 |
+
from utils import checkpoint_utils
|
11 |
from utils.eval_utils import eval_step
|
12 |
from tasks.mm_tasks.caption import CaptionTask
|
13 |
from models.ofa import OFAModel
|
14 |
from PIL import Image
|
15 |
from torchvision import transforms
|
16 |
|
|
|
17 |
# Register caption task
|
18 |
+
tasks.register_task('caption', CaptionTask)
|
19 |
# turn on cuda if GPU is available
|
20 |
use_cuda = torch.cuda.is_available()
|
21 |
# use fp16 only when GPU is available
|
22 |
use_fp16 = False
|
23 |
|
24 |
+
os.system('wget https://ofa-silicon.oss-us-west-1.aliyuncs.com/checkpoints/caption_large_best_clean.pt; '
|
25 |
+
'mkdir -p checkpoints; mv caption_large_best_clean.pt checkpoints/caption.pt')
|
|
|
26 |
|
27 |
# Load pretrained ckpt & config
|
28 |
overrides = {"bpe_dir": "utils/BPE", "eval_cider": False, "beam": 5,
|
29 |
"max_len_b": 16, "no_repeat_ngram_size": 3, "seed": 7}
|
30 |
models, cfg, task = checkpoint_utils.load_model_ensemble_and_task(
|
31 |
+
utils.split_paths('checkpoints/caption.pt'),
|
32 |
+
arg_overrides=overrides
|
33 |
+
)
|
34 |
|
35 |
# Move models to GPU
|
36 |
for model in models:
|
|
|
111 |
|
112 |
|
113 |
io = gr.Interface(fn=image_caption, inputs=gr.inputs.Image(type='pil'), outputs='text')
|
114 |
+
io.launch(enable_queue=True)
|
requirements.txt
CHANGED
@@ -1 +1,4 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
1 |
+
ftfy==6.0.3
|
2 |
+
tensorboardX==2.4.1
|
3 |
+
pycocotools==2.0.4
|
4 |
+
pycocoevalcap==1.2
|