zyhe0211 commited on
Commit
c2dd497
1 Parent(s): 17f1a81
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ LLM2CLIP-Llama3.2-1B-EVA02-L-14-336.pt filter=lfs diff=lfs merge=lfs -text
LLM2CLIP-Llama3.2-1B-EVA02-L-14-336.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a99ca8c922f7e1cf414c4e3bfd318f33befbfbe68ce8d01ba627bd6a2cac864f
3
+ size 749702998
README.md CHANGED
@@ -1,3 +1,113 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ pipeline_tag: zero-shot-image-classification
4
+ ---
5
+ <div align="center">
6
+
7
+ <h2><a href="">LLM2CLIP: Extending the Capability Boundaries of CLIP through Large Language Models</a></h2>
8
+ Weiquan Huang<sup>1*</sup>, Aoqi Wu<sup>1*</sup>, Yifan Yang<sup>2†</sup>, Xufang Luo<sup>2</sup>, Yuqing Yang<sup>2</sup>, Liang Hu<sup>1</sup>, Qi Dai<sup>2</sup>, Xiyang Dai<sup>2</sup>, Dongdong Chen<sup>2</sup>, Chong Luo<sup>2</sup>, Lili Qiu<sup>2</sup>
9
+
10
+ <sup>1</sup>Tongji Universiy, <sup>2</sup>Microsoft Corporation <br><sup>*</sup>Equal contribution <br><sup>†</sup> Corresponding to: [email protected]
11
+
12
+ <p><a rel="nofollow" href="https://github.com/microsoft/LLM2CLIP">[📂 GitHub]</a> <a rel="nofollow" href="https://microsoft.github.io/LLM2CLIP/">[🆕 Blog]</a> <a rel="nofollow" href="">[📜 LLM2CLIP]</a>
13
+ </div>
14
+
15
+
16
+ In this paper, we propose LLM2CLIP, a novel approach that embraces the power of LLMs to unlock CLIP’s potential. By fine-tuning the LLM in the caption space with contrastive learning, we extract its textual capabilities into the output embeddings, significantly improving the output layer’s textual discriminability. We then design an efficient training process where the fine-tuned LLM acts as a powerful teacher for CLIP’s visual encoder. Thanks to the LLM’s presence, we can now incorporate longer and more complex captions without being restricted by vanilla CLIP text encoder’s context window and ability limitations. Our experiments demonstrate that this approach brings substantial improvements in cross-modal tasks. Our method directly boosted the performance of the previously SOTA EVA02 model by 16.5% on both long-text and short-text retrieval tasks, transforming a CLIP model trained solely on English data into a state-of-the-art cross-lingual model. Moreover, when integrated into mul- timodal training with models like Llava 1.5, it consistently outperformed CLIP across nearly all benchmarks, demonstrating comprehensive performance improvements.
17
+
18
+ ## LLM2CLIP performance
19
+
20
+ <div align="center">
21
+ <img src="teaser.png" alt="summary_tab" width="85%">
22
+ </div>
23
+ **It's important to note that all results presented in the paper are evaluated using PyTorch weights. There may be differences in performance when using Hugging Face (hf) models.**
24
+
25
+ ## Model Details
26
+ - **Model Type:** vision foundation model, feature backbone
27
+ - **Pretrain Dataset:** CC3M, CC12M, YFCC15M and Recap-DataComp-1B(30M subset)
28
+
29
+
30
+ ## Usage
31
+
32
+ ### Pytorch version
33
+ pip install llm2vec
34
+
35
+ Go to [GitHub](https://github.com/microsoft/LLM2CLIP/tree/main/llm2clip)
36
+ ```python
37
+ import os
38
+ os.environ["CUDA_VISIBLE_DEVICES"] = "0"
39
+
40
+ from transformers import AutoModel, AutoConfig, AutoTokenizer
41
+ from eva_clip import create_model_and_transforms
42
+ from llm2vec import LLM2Vec
43
+ from PIL import Image
44
+ import torch
45
+
46
+ class LLM2VecWrapper(LLM2Vec):
47
+ def prepare_for_tokenization(self, text):
48
+ text = (
49
+ "<|start_header_id|>user<|end_header_id|>\n\n"
50
+ + text.strip()
51
+ + "<|eot_id|>"
52
+ )
53
+ return text
54
+
55
+ class LlamaVec_1B_FeatureExtractor(nn.Module):
56
+ def __init__(self):
57
+ super().__init__()
58
+
59
+ model_path = 'microsoft/LLM2CLIP-Llama-3.2-1B-Instruct-CC-Finetuned'
60
+ config = AutoConfig.from_pretrained(model_path)
61
+
62
+ model = AutoModel.from_pretrained(model_path, config=config, trust_remote_code=True, torch_dtype=torch.bfloat16)
63
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
64
+
65
+ tokenizer.pad_token = tokenizer.eos_token
66
+ tokenizer.padding_side = "left"
67
+
68
+ self.l2v = LLM2VecWrapper(model, tokenizer, pooling_mode="mean", max_length=512, skip_instruction=True)
69
+
70
+ def extract_features(self, text):
71
+ with torch.amp.autocast('cuda'):
72
+ reps_norm = self.l2v.encode(text, convert_to_tensor=True)
73
+ reps_norm = torch.nn.functional.normalize(reps_norm, p=2, dim=1)
74
+ return reps_norm
75
+
76
+
77
+ model, _, preprocess_val = create_model_and_transforms('Llama3.2-1B-EVA02-L-14-336', force_custom_clip=True)
78
+ ckpt = torch.load('LLM2CLIP-Llama3.2-1B-EVA02-L-14-336.pt')
79
+ model.load_state_dict(ckpt)
80
+ model = model.cuda().eval()
81
+
82
+ text_model = LlamaVec_1B_FeatureExtractor()
83
+ image_path = "CLIP.png"
84
+ captions = ["a diagram", "a dog", "a cat"]
85
+
86
+ image = preprocess_val(Image.open(image_path)).cuda().unsqueeze(dim=0)
87
+ text_features = text_model.extract_features(captions).to('cuda')
88
+
89
+ with torch.no_grad(), torch.cuda.amp.autocast():
90
+ image_features = model.encode_image(image)
91
+ text_features = model.encode_text(text_features)
92
+
93
+ image_features /= image_features.norm(dim=-1, keepdim=True)
94
+ text_features /= text_features.norm(dim=-1, keepdim=True)
95
+
96
+ text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)
97
+
98
+ print("Label probs:", text_probs)
99
+ ```
100
+
101
+ ## BibTeX & Citation
102
+
103
+ ```
104
+ @misc{huang2024llm2clippowerfullanguagemodel,
105
+ title={LLM2CLIP: Powerful Language Model Unlock Richer Visual Representation},
106
+ author={Weiquan Huang and Aoqi Wu and Yifan Yang and Xufang Luo and Yuqing Yang and Liang Hu and Qi Dai and Xiyang Dai and Dongdong Chen and Chong Luo and Lili Qiu},
107
+ year={2024},
108
+ eprint={2411.04997},
109
+ archivePrefix={arXiv},
110
+ primaryClass={cs.CV},
111
+ url={https://arxiv.org/abs/2411.04997},
112
+ }
113
+ ```