File size: 765 Bytes
dacf14a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
import os

model_name = "fine-tuned-model"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_safetensors=True)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

device = torch.device("cpu")

def predict_sentiment(review_text):
    inputs = tokenizer(review_text, padding=True, truncation=True, return_tensors="pt").to(device)

    with torch.no_grad():
        outputs = model(**inputs)

    logits = outputs.logits
    predictions = torch.softmax(logits, dim=-1)

    predicted_label = torch.argmax(predictions, dim=-1).item()
    sentiment = "Positive" if predicted_label == 1 else "Negative"

    return sentiment, predictions[0].cpu().numpy()