MiniCPM-Embedding-Light / scripts /transformers_demo.py
1
init
75f07f8
raw
history blame
1.6 kB
from transformers import AutoModel
import torch
model_name = "openbmb/UltraRAG-Embedding"
model = AutoModel.from_pretrained(model_name, trust_remote_code=True, torch_dtype=torch.float16).to("cuda")
# you can use flash_attention_2 for faster inference
# model = AutoModel.from_pretrained(model_name, trust_remote_code=True, attn_implementation="flash_attention_2", torch_dtype=torch.float16).to("cuda")
model.eval()
queries = ["MiniCPM-o 2.6 A GPT-4o Level MLLM for Vision, Speech and Multimodal Live Streaming on Your Phone"]
passages = ["MiniCPM-o 2.6 is the latest and most capable model in the MiniCPM-o series. The model is built in an end-to-end fashion based on SigLip-400M, Whisper-medium-300M, ChatTTS-200M, and Qwen2.5-7B with a total of 8B parameters. It exhibits a significant performance improvement over MiniCPM-V 2.6, and introduces new features for real-time speech conversation and multimodal live streaming."]
embeddings_query_dense, embeddings_query_sparse = model.encode_query(queries, return_sparse_vectors=True, max_length=8192, dense_dim=1024)
embeddings_doc_dense, embeddings_doc_sparse = model.encode_corpus(passages, return_sparse_vectors=True)
dense_scores = (embeddings_query_dense @ embeddings_doc_dense.T)
print(dense_scores.tolist()) # [[0.6512398719787598]]
print(model.compute_sparse_score_dicts(embeddings_query_sparse, embeddings_doc_sparse)) # [[0.27202296]]
dense_scores, sparse_scores, mixed_scores = model.compute_score(queries, passages)
print(dense_scores) # [[0.65123993]]
print(sparse_scores) # [[0.27202296]]
print(mixed_scores) # [[0.73284686]]