Spaces:
Runtime error
Runtime error
Upload app.py
Browse files
app.py
CHANGED
@@ -6,16 +6,14 @@ import hopsworks
|
|
6 |
import joblib
|
7 |
import torch
|
8 |
from huggingface_hub import hf_hub_download
|
|
|
|
|
9 |
|
10 |
class_names = ['Not Depressed', 'Depressed']
|
11 |
pt_file = hf_hub_download(repo_id="liangc40/sentimental_analysis", filename="model.pt")
|
12 |
|
13 |
model = DepressionClassifier(len(class_names), 'bert-base-cased')
|
14 |
model.load_state_dict(torch.load(pt_file, map_location=torch.device('cpu')))
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
model.eval()
|
20 |
#pipe = pipeline(model="liangc40/sentimental_analysis")
|
21 |
|
@@ -28,9 +26,21 @@ model.eval()
|
|
28 |
#model_dir = model.download()
|
29 |
#model = joblib.load(model_dir + "/sentimental_analysis_model.pkl")
|
30 |
|
|
|
31 |
def analyse(text):
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
with gr.Blocks() as demo:
|
36 |
gr.Markdown("<h1><center>Sentiment Analysis with Fine-tuned BERT Model")
|
|
|
6 |
import joblib
|
7 |
import torch
|
8 |
from huggingface_hub import hf_hub_download
|
9 |
+
import transformers
|
10 |
+
from transformers import BertModel, BertTokenizer
|
11 |
|
12 |
class_names = ['Not Depressed', 'Depressed']
|
13 |
pt_file = hf_hub_download(repo_id="liangc40/sentimental_analysis", filename="model.pt")
|
14 |
|
15 |
model = DepressionClassifier(len(class_names), 'bert-base-cased')
|
16 |
model.load_state_dict(torch.load(pt_file, map_location=torch.device('cpu')))
|
|
|
|
|
|
|
|
|
17 |
model.eval()
|
18 |
#pipe = pipeline(model="liangc40/sentimental_analysis")
|
19 |
|
|
|
26 |
#model_dir = model.download()
|
27 |
#model = joblib.load(model_dir + "/sentimental_analysis_model.pkl")
|
28 |
|
29 |
+
|
30 |
def analyse(text):
|
31 |
+
#text = "I'm depressed"
|
32 |
+
model = model.to('cpu')
|
33 |
+
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
|
34 |
+
encoding = tokenizer.encode_plus(text, max_length=32, add_special_tokens=True, # Add '[CLS]' and '[SEP]'
|
35 |
+
return_token_type_ids=False,
|
36 |
+
pad_to_max_length=True,
|
37 |
+
return_attention_mask=True,
|
38 |
+
return_tensors='pt')
|
39 |
+
|
40 |
+
outputs = model(input_ids = encoding['input_ids'], attention_mask = encoding['attention_mask'])
|
41 |
+
_, preds = torch.max(outputs, dim=1)
|
42 |
+
#print(preds)
|
43 |
+
return preds
|
44 |
|
45 |
with gr.Blocks() as demo:
|
46 |
gr.Markdown("<h1><center>Sentiment Analysis with Fine-tuned BERT Model")
|