Spaces:
Running
Running
from transformers import AutoTokenizer, AutoModelForQuestionAnswering | |
import torch | |
def load_and_answer(question, context, model_name): | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForQuestionAnswering.from_pretrained(model_name) | |
# Tokenize the input question-context pair | |
inputs = tokenizer.encode_plus(question, context, max_length=512, truncation=True, padding=True, return_tensors='pt') | |
# Send inputs to the same device as your model | |
inputs = {k: v.to(model.device) for k, v in inputs.items()} | |
with torch.no_grad(): | |
# Forward pass, get model outputs | |
outputs = model(**inputs) | |
# Extract the start and end positions of the answer in the tokens | |
answer_start_scores, answer_end_scores = outputs.start_logits, outputs.end_logits | |
answer_start_index = torch.argmax(answer_start_scores) # Most likely start of answer | |
answer_end_index = torch.argmax(answer_end_scores) + 1 # Most likely end of answer; +1 for inclusive slicing | |
# Convert token indices to the actual answer text | |
answer_tokens = inputs['input_ids'][0, answer_start_index:answer_end_index] | |
answer = tokenizer.decode(answer_tokens, skip_special_tokens=True) | |
return {"answer": answer, "start": answer_start_index.item(), "end": answer_end_index.item()} | |
def squeezebert(context, question): | |
# Define the specific model and tokenizer for SqueezeBERT | |
model_name = "ALOQAS/squeezebert-uncased-finetuned-squad-v2" | |
return load_and_answer(question, context, model_name) | |
def bert(context, question): | |
# Define the specific model and tokenizer for BERT | |
model_name = "ALOQAS/bert-large-uncased-finetuned-squad-v2" | |
return load_and_answer(question, context, model_name) | |
def deberta(context, question): | |
# Define the specific model and tokenizer for DeBERTa | |
model_name = "ALOQAS/deberta-large-finetuned-squad-v2" | |
return load_and_answer(question, context, model_name) |