import gradio as gr from model import DepressionClassifier import torch from huggingface_hub import hf_hub_download import transformers from transformers import BertModel, BertTokenizer from PIL import Image import requests import os import tweepy import pandas as pd #from dotenv import load_dotenv consumer_key = 'I64Mtu7Kel6TZjSJaZmuDfKXu' consumer_secret = 'ul7KfBJN7m8f86MAe4Cd8yBi9kklBGvqIFBSS2ZndsM4uy8I9b' access_token = '1612188039123505152-wt420WPJTsKJ2ggf97g5CMkAuMcLOp' access_token_secret = 'eOozLppemYIgDZY4zrSZzARQjOgxUKd5MUunUYdXp2Rwm' auth = tweepy.OAuth1UserHandler( consumer_key, consumer_secret, access_token, access_token_secret ) api = tweepy.API(auth) class_names = ['Not Depressed', 'Depressed'] pt_file = hf_hub_download(repo_id="liangc40/sentimental_analysis", filename="model.pt") model = DepressionClassifier(len(class_names), 'bert-base-cased') model.load_state_dict(torch.load(pt_file, map_location=torch.device('cpu'))) model.eval() def analyse(text): #text = "I'm depressed" #model = model.to('cpu') tokenizer = BertTokenizer.from_pretrained('bert-base-cased') encoding = tokenizer.encode_plus(text, max_length=32, add_special_tokens=True, # Add '[CLS]' and '[SEP]' return_token_type_ids=False, pad_to_max_length=True, return_attention_mask=True, return_tensors='pt') outputs = model(input_ids = encoding['input_ids'], attention_mask = encoding['attention_mask']) _, preds = torch.max(outputs, dim=1) face_url = "https://raw.githubusercontent.com/liangc40/ID2223_Sentimental_Analysis_Project/main/Image/"+ str(preds.cpu().detach().numpy()[0]) + ".png" img = Image.open(requests.get(face_url, stream=True).raw) #print(preds) return img def tweets(search_words): label_list=['Non-depressed','Depressed'] tweets = tweepy.Cursor(api.search_tweets, q=search_words, lang="en", ).items(10) tweet_content = [i.text for i in tweets] pred = [i for i in analyse(tweet_content)] label = [label_list[j] for j in pred] df = pd.DataFrame(list(zip(tweet_content, label)),columns =['Ten tweets'+' on '+search_words, 'sentiment']) return df with gr.Blocks() as demo: gr.Markdown("