Spaces:
Runtime error
Runtime error
use pretrained_weights
Browse files- models/__pycache__/blip2_model.cpython-38.pyc +0 -0
- models/blip2_model.py +2 -2
- models/controlnet_model.py +1 -1
- models/segment_models/semantic_segment_anything_model.py +5 -5
- pretrained_models/blip-image-captioning-large +1 -0
- pretrained_models/blip2-opt-2.7b +1 -0
- pretrained_models/clip-vit-large-patch14 +1 -0
- pretrained_models/clipseg-rd64-refined +1 -0
- pretrained_models/download_pretrain.sh +5 -0
- pretrained_models/oneformer_ade20k_swin_large +1 -0
- pretrained_models/oneformer_coco_swin_large +1 -0
- pretrained_models/stable-diffusion-v1-5 +1 -0
models/__pycache__/blip2_model.cpython-38.pyc
CHANGED
Binary files a/models/__pycache__/blip2_model.cpython-38.pyc and b/models/__pycache__/blip2_model.cpython-38.pyc differ
|
|
models/blip2_model.py
CHANGED
@@ -14,9 +14,9 @@ class ImageCaptioning:
|
|
14 |
self.data_type = torch.float32
|
15 |
else:
|
16 |
self.data_type = torch.float16
|
17 |
-
processor = Blip2Processor.from_pretrained("
|
18 |
model = Blip2ForConditionalGeneration.from_pretrained(
|
19 |
-
"
|
20 |
)
|
21 |
model.to(self.device)
|
22 |
return processor, model
|
|
|
14 |
self.data_type = torch.float32
|
15 |
else:
|
16 |
self.data_type = torch.float16
|
17 |
+
processor = Blip2Processor.from_pretrained("pretrained_models/blip2-opt-2.7b")
|
18 |
model = Blip2ForConditionalGeneration.from_pretrained(
|
19 |
+
"pretrained_models/blip2-opt-2.7b", torch_dtype=self.data_type
|
20 |
)
|
21 |
model.to(self.device)
|
22 |
return processor, model
|
models/controlnet_model.py
CHANGED
@@ -25,7 +25,7 @@ class TextToImage:
|
|
25 |
map_location=self.device, # Add this line
|
26 |
).to(self.device)
|
27 |
pipeline = StableDiffusionControlNetPipeline.from_pretrained(
|
28 |
-
"
|
29 |
controlnet=controlnet,
|
30 |
safety_checker=None,
|
31 |
torch_dtype=self.data_type,
|
|
|
25 |
map_location=self.device, # Add this line
|
26 |
).to(self.device)
|
27 |
pipeline = StableDiffusionControlNetPipeline.from_pretrained(
|
28 |
+
"pretrained_models/stable-diffusion-v1-5",
|
29 |
controlnet=controlnet,
|
30 |
safety_checker=None,
|
31 |
torch_dtype=self.data_type,
|
models/segment_models/semantic_segment_anything_model.py
CHANGED
@@ -27,27 +27,27 @@ class SemanticSegment():
|
|
27 |
self.init_clipseg()
|
28 |
|
29 |
def init_clip(self):
|
30 |
-
model_name = "
|
31 |
self.clip_processor = CLIPProcessor.from_pretrained(model_name)
|
32 |
self.clip_model = CLIPModel.from_pretrained(model_name).to(self.device)
|
33 |
|
34 |
def init_oneformer_ade20k(self):
|
35 |
-
model_name = "
|
36 |
self.oneformer_ade20k_processor = OneFormerProcessor.from_pretrained(model_name)
|
37 |
self.oneformer_ade20k_model = OneFormerForUniversalSegmentation.from_pretrained(model_name).to(self.device)
|
38 |
|
39 |
def init_oneformer_coco(self):
|
40 |
-
model_name = "
|
41 |
self.oneformer_coco_processor = OneFormerProcessor.from_pretrained(model_name)
|
42 |
self.oneformer_coco_model = OneFormerForUniversalSegmentation.from_pretrained(model_name).to(self.device)
|
43 |
|
44 |
def init_blip(self):
|
45 |
-
model_name = "
|
46 |
self.blip_processor = BlipProcessor.from_pretrained(model_name)
|
47 |
self.blip_model = BlipForConditionalGeneration.from_pretrained(model_name).to(self.device)
|
48 |
|
49 |
def init_clipseg(self):
|
50 |
-
model_name = "
|
51 |
self.clipseg_processor = AutoProcessor.from_pretrained(model_name)
|
52 |
self.clipseg_model = CLIPSegForImageSegmentation.from_pretrained(model_name).to(self.device)
|
53 |
self.clipseg_processor.image_processor.do_resize = False
|
|
|
27 |
self.init_clipseg()
|
28 |
|
29 |
def init_clip(self):
|
30 |
+
model_name = "pretrained_models/clip-vit-large-patch14"
|
31 |
self.clip_processor = CLIPProcessor.from_pretrained(model_name)
|
32 |
self.clip_model = CLIPModel.from_pretrained(model_name).to(self.device)
|
33 |
|
34 |
def init_oneformer_ade20k(self):
|
35 |
+
model_name = "pretrained_models/oneformer_ade20k_swin_large"
|
36 |
self.oneformer_ade20k_processor = OneFormerProcessor.from_pretrained(model_name)
|
37 |
self.oneformer_ade20k_model = OneFormerForUniversalSegmentation.from_pretrained(model_name).to(self.device)
|
38 |
|
39 |
def init_oneformer_coco(self):
|
40 |
+
model_name = "pretrained_models/oneformer_coco_swin_large"
|
41 |
self.oneformer_coco_processor = OneFormerProcessor.from_pretrained(model_name)
|
42 |
self.oneformer_coco_model = OneFormerForUniversalSegmentation.from_pretrained(model_name).to(self.device)
|
43 |
|
44 |
def init_blip(self):
|
45 |
+
model_name = "pretrained_models/blip-image-captioning-large"
|
46 |
self.blip_processor = BlipProcessor.from_pretrained(model_name)
|
47 |
self.blip_model = BlipForConditionalGeneration.from_pretrained(model_name).to(self.device)
|
48 |
|
49 |
def init_clipseg(self):
|
50 |
+
model_name = "pretrained_models/clipseg-rd64-refined"
|
51 |
self.clipseg_processor = AutoProcessor.from_pretrained(model_name)
|
52 |
self.clipseg_model = CLIPSegForImageSegmentation.from_pretrained(model_name).to(self.device)
|
53 |
self.clipseg_processor.image_processor.do_resize = False
|
pretrained_models/blip-image-captioning-large
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit 293ab01f2dc41c1c214299314f11de635d0937dc
|
pretrained_models/blip2-opt-2.7b
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit 56e1fe81e7e7c346e95e196ace7b442b3f8ff483
|
pretrained_models/clip-vit-large-patch14
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit 8d052a0f05efbaefbc9e8786ba291cfdf93e5bff
|
pretrained_models/clipseg-rd64-refined
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit 583b388deb98a04feb3e1f816dcdb8f3062ee205
|
pretrained_models/download_pretrain.sh
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
git clone https://huggingface.co/openai/clip-vit-large-patch14
|
2 |
+
git clone https://huggingface.co/shi-labs/oneformer_ade20k_swin_large
|
3 |
+
git clone https://huggingface.co/shi-labs/oneformer_coco_swin_large
|
4 |
+
git clone https://huggingface.co/Salesforce/blip-image-captioning-large
|
5 |
+
git clone https://huggingface.co/CIDAS/clipseg-rd64-refined
|
pretrained_models/oneformer_ade20k_swin_large
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit 4a5bac8e64f82681a12db2e151a4c2f4ce6092b2
|
pretrained_models/oneformer_coco_swin_large
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit 3a263017ca5c75adbea145f25f81b118243d4394
|
pretrained_models/stable-diffusion-v1-5
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit 39593d5650112b4cc580433f6b0435385882d819
|