Update app.py
Browse files
app.py
CHANGED
@@ -1,65 +1,10 @@
|
|
1 |
-
import subprocess
|
2 |
-
subprocess.run(["pip", "install", "-q", "transformers", "datasets", "gradio", "scipy"])
|
3 |
-
|
4 |
-
from transformers import AutoModelForSequenceClassification
|
5 |
-
from transformers import TFAutoModelForSequenceClassification
|
6 |
-
from transformers import AutoTokenizer, AutoConfig
|
7 |
-
import numpy as np
|
8 |
-
from scipy.special import softmax
|
9 |
-
|
10 |
-
|
11 |
-
tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
|
12 |
-
|
13 |
-
model_path = f"avichr/heBERT_sentiment_analysis"
|
14 |
-
config = AutoConfig.from_pretrained(model_path)
|
15 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_path)
|
16 |
-
|
17 |
-
# Preprocess text (username and link placeholders)
|
18 |
-
def preprocess(text):
|
19 |
-
new_text = []
|
20 |
-
for t in text.split(" "):
|
21 |
-
t = '@user' if t.startswith('@') and len(t) > 1 else t
|
22 |
-
t = 'http' if t.startswith('http') else t
|
23 |
-
new_text.append(t)
|
24 |
-
return " ".join(new_text)
|
25 |
-
|
26 |
-
# Input preprocessing
|
27 |
-
text = "Covid cases are increasing fast!"
|
28 |
-
text = preprocess(text)
|
29 |
-
|
30 |
-
# PyTorch-based models
|
31 |
-
encoded_input = tokenizer(text, return_tensors='pt')
|
32 |
-
output = model(**encoded_input)
|
33 |
-
scores = output[0][0].detach().numpy()
|
34 |
-
scores = softmax(scores)
|
35 |
-
|
36 |
-
# TensorFlow-based models
|
37 |
-
# model = TFAutoModelForSequenceClassification.from_pretrained(model_path)
|
38 |
-
# model.save_pretrained(model_path)
|
39 |
-
# text = "Covid cases are increasing fast!"
|
40 |
-
# encoded_input = tokenizer(text, return_tensors='tf')
|
41 |
-
# output = model(encoded_input)
|
42 |
-
# scores = output[0][0].numpy()
|
43 |
-
# scores = softmax(scores)
|
44 |
-
|
45 |
-
config.id2label = {0: 'NEGATIVE', 1: 'NEUTRAL', 2: 'POSITIVE'}
|
46 |
-
|
47 |
-
# Print labels and scores
|
48 |
-
ranking = np.argsort(scores)
|
49 |
-
ranking = ranking[::-1]
|
50 |
-
print(f"Classified text: {text}")
|
51 |
-
for i in range(scores.shape[0]):
|
52 |
-
l = config.id2label[ranking[i]]
|
53 |
-
s = scores[ranking[i]]
|
54 |
-
print(f"{i+1}) {l} {np.round(float(s), 4)}")
|
55 |
-
|
56 |
from transformers import AutoModelForSequenceClassification
|
57 |
from transformers import TFAutoModelForSequenceClassification
|
58 |
from transformers import AutoTokenizer, AutoConfig
|
59 |
from scipy.special import softmax
|
60 |
import gradio as gr
|
61 |
-
|
62 |
-
|
63 |
|
64 |
# Requirements
|
65 |
model_path = f"avichr/heBERT_sentiment_analysis"
|
@@ -95,8 +40,9 @@ def sentiment_analysis(text):
|
|
95 |
demo = gr.Interface(
|
96 |
fn=sentiment_analysis,
|
97 |
inputs=gr.Textbox(placeholder="Write how you feel about Covid here..."),
|
98 |
-
outputs="
|
99 |
-
examples=[["
|
|
|
|
|
100 |
|
101 |
demo.launch()
|
102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from transformers import AutoModelForSequenceClassification
|
2 |
from transformers import TFAutoModelForSequenceClassification
|
3 |
from transformers import AutoTokenizer, AutoConfig
|
4 |
from scipy.special import softmax
|
5 |
import gradio as gr
|
6 |
+
import numpy as np
|
7 |
+
from scipy.special import softmax
|
8 |
|
9 |
# Requirements
|
10 |
model_path = f"avichr/heBERT_sentiment_analysis"
|
|
|
40 |
demo = gr.Interface(
|
41 |
fn=sentiment_analysis,
|
42 |
inputs=gr.Textbox(placeholder="Write how you feel about Covid here..."),
|
43 |
+
outputs="text",
|
44 |
+
examples=[["What's up with the Vaccine"]]
|
45 |
+
title = "Tutorial: Sentiment Analysis App"
|
46 |
+
description = "This App assess if a sentiment about Covid is positive or negative")
|
47 |
|
48 |
demo.launch()
|
|