hungnm commited on
Commit
1adb5db
·
verified ·
1 Parent(s): cd3f868

End of training

Browse files
README.md CHANGED
@@ -9,21 +9,21 @@ metrics:
9
  - precision
10
  - recall
11
  model-index:
12
- - name: roberta-large-multilingual-sentiment
13
  results: []
14
  ---
15
 
16
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
  should probably proofread and complete it, then remove this comment. -->
18
 
19
- # roberta-large-multilingual-sentiment
20
 
21
  This model is a fine-tuned version of [FacebookAI/xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) on an unknown dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.4302
24
- - F1: 0.8242
25
- - Precision: 0.8247
26
- - Recall: 0.8238
27
 
28
  ## Model description
29
 
 
9
  - precision
10
  - recall
11
  model-index:
12
+ - name: clapAI/roberta-large-multilingual-sentiment
13
  results: []
14
  ---
15
 
16
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
  should probably proofread and complete it, then remove this comment. -->
18
 
19
+ # clapAI/roberta-large-multilingual-sentiment
20
 
21
  This model is a fine-tuned version of [FacebookAI/xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) on an unknown dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 0.4136
24
+ - F1: 0.8249
25
+ - Precision: 0.8256
26
+ - Recall: 0.8243
27
 
28
  ## Model description
29
 
all_results.json CHANGED
@@ -1,5 +1,19 @@
1
  {
2
  "epoch": 5.0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "train_loss": 0.7460857761162166,
4
  "train_runtime": 176390.3873,
5
  "train_samples_per_second": 89.219,
 
1
  {
2
  "epoch": 5.0,
3
+ "eval_f1": 0.8249050225524125,
4
+ "eval_loss": 0.41357421875,
5
+ "eval_precision": 0.825624581488519,
6
+ "eval_recall": 0.8243496742948847,
7
+ "eval_runtime": 1162.682,
8
+ "eval_samples_per_second": 338.386,
9
+ "eval_steps_per_second": 0.661,
10
+ "test_f1": 0.8262945499829749,
11
+ "test_loss": 0.411376953125,
12
+ "test_precision": 0.8271664012247752,
13
+ "test_recall": 0.8256432203503072,
14
+ "test_runtime": 1038.4953,
15
+ "test_samples_per_second": 378.852,
16
+ "test_steps_per_second": 0.74,
17
  "train_loss": 0.7460857761162166,
18
  "train_runtime": 176390.3873,
19
  "train_samples_per_second": 89.219,
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_f1": 0.8249050225524125,
4
+ "eval_loss": 0.41357421875,
5
+ "eval_precision": 0.825624581488519,
6
+ "eval_recall": 0.8243496742948847,
7
+ "eval_runtime": 1162.682,
8
+ "eval_samples_per_second": 338.386,
9
+ "eval_steps_per_second": 0.661
10
+ }
runs/Jan03_07-02-47_hn-fornix-testing-gpu-platform-2/events.out.tfevents.1736065945.hn-fornix-testing-gpu-platform-2.4140080.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab6a40bd58d6201333a502f960bdc21a410bcc9434acabe985e4a88bad5ca12c
3
+ size 508
test_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "test_f1": 0.8262945499829749,
3
+ "test_loss": 0.411376953125,
4
+ "test_precision": 0.8271664012247752,
5
+ "test_recall": 0.8256432203503072,
6
+ "test_runtime": 1038.4953,
7
+ "test_samples_per_second": 378.852,
8
+ "test_steps_per_second": 0.74
9
+ }