Kingston Yip commited on
Commit
f3c89dc
Β·
1 Parent(s): 743c8db

removed cache

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -10,8 +10,13 @@ def predict_cyberbullying_probability(sentence, tokenizer, model):
10
  # Preprocess the input sentence
11
  inputs = tokenizer(sentence, padding='max_length', return_token_type_ids=False, return_attention_mask=True, truncation=True, max_length=512, return_tensors='pt')
12
 
13
-
 
 
 
14
  attention_mask = inputs['attention_mask'].flatten()
 
 
15
  inputs = inputs['input_ids'].flatten()
16
  # print("\n\ninputs\n\n", inputs)
17
  # Disable gradient computation
@@ -19,7 +24,8 @@ def predict_cyberbullying_probability(sentence, tokenizer, model):
19
  # Forward pass
20
  outputs = model(inputs, attention_mask=attention_mask)
21
 
22
- probs = torch.sigmoid(outputs.logits.flatten())
 
23
 
24
 
25
  res = probs.numpy().tolist()
@@ -73,8 +79,6 @@ st.image(image, use_column_width=True)
73
 
74
  labels = ['comment', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
75
 
76
- # toxic_list = st.cache(comments.comments)
77
-
78
 
79
  with st.form("my_form"):
80
  #select model
@@ -85,9 +89,10 @@ with st.form("my_form"):
85
  tweet = st.text_area(label="Enter Text:",value=default)
86
  submitted = st.form_submit_button("Analyze textbox")
87
  random = st.form_submit_button("Analyze a random 😈😈😈 tweet")
88
-
89
  if random:
90
  tweet = comments.comments[randint(0, 354)]
 
91
 
92
 
93
  df = perform_cyberbullying_analysis(tweet)
 
10
  # Preprocess the input sentence
11
  inputs = tokenizer(sentence, padding='max_length', return_token_type_ids=False, return_attention_mask=True, truncation=True, max_length=512, return_tensors='pt')
12
 
13
+ print("==========")
14
+ print(inputs)
15
+ print("==========")
16
+
17
  attention_mask = inputs['attention_mask'].flatten()
18
+ print("==========")
19
+ print(attention_mask)
20
  inputs = inputs['input_ids'].flatten()
21
  # print("\n\ninputs\n\n", inputs)
22
  # Disable gradient computation
 
24
  # Forward pass
25
  outputs = model(inputs, attention_mask=attention_mask)
26
 
27
+ probs = torch.sigmoid(outputs.logits.unsqueeze(1).flatten())
28
+
29
 
30
 
31
  res = probs.numpy().tolist()
 
79
 
80
  labels = ['comment', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
81
 
 
 
82
 
83
  with st.form("my_form"):
84
  #select model
 
89
  tweet = st.text_area(label="Enter Text:",value=default)
90
  submitted = st.form_submit_button("Analyze textbox")
91
  random = st.form_submit_button("Analyze a random 😈😈😈 tweet")
92
+
93
  if random:
94
  tweet = comments.comments[randint(0, 354)]
95
+ st.write(tweet)
96
 
97
 
98
  df = perform_cyberbullying_analysis(tweet)