deryauysal commited on
Commit
641e3fe
·
1 Parent(s): 9843fb2

End of training

Browse files
README.md CHANGED
@@ -5,24 +5,9 @@ tags:
5
  - generated_from_trainer
6
  datasets:
7
  - common_voice_6_1
8
- metrics:
9
- - wer
10
  model-index:
11
  - name: wav2vec2-large-mms-1b-turkish-colab
12
- results:
13
- - task:
14
- name: Automatic Speech Recognition
15
- type: automatic-speech-recognition
16
- dataset:
17
- name: common_voice_6_1
18
- type: common_voice_6_1
19
- config: tr
20
- split: test
21
- args: tr
22
- metrics:
23
- - name: Wer
24
- type: wer
25
- value: 0.9835563272393014
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -31,9 +16,6 @@ should probably proofread and complete it, then remove this comment. -->
31
  # wav2vec2-large-mms-1b-turkish-colab
32
 
33
  This model is a fine-tuned version of [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) on the common_voice_6_1 dataset.
34
- It achieves the following results on the evaluation set:
35
- - Loss: 2.4395
36
- - Wer: 0.9836
37
 
38
  ## Model description
39
 
@@ -53,7 +35,7 @@ More information needed
53
 
54
  The following hyperparameters were used during training:
55
  - learning_rate: 0.001
56
- - train_batch_size: 16
57
  - eval_batch_size: 8
58
  - seed: 42
59
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
@@ -61,23 +43,9 @@ The following hyperparameters were used during training:
61
  - lr_scheduler_warmup_steps: 100
62
  - num_epochs: 4
63
 
64
- ### Training results
65
-
66
- | Training Loss | Epoch | Step | Validation Loss | Wer |
67
- |:-------------:|:-----:|:----:|:---------------:|:------:|
68
- | 6.2025 | 0.46 | 100 | 5.8610 | 1.0478 |
69
- | 3.2809 | 0.92 | 200 | 3.5010 | 1.0029 |
70
- | 3.0173 | 1.38 | 300 | 3.2271 | 0.9990 |
71
- | 2.8897 | 1.83 | 400 | 3.4469 | 0.9981 |
72
- | 2.8081 | 2.29 | 500 | 3.1375 | 1.0044 |
73
- | 2.7393 | 2.75 | 600 | 2.6899 | 0.9948 |
74
- | 2.5172 | 3.21 | 700 | 2.7153 | 0.9949 |
75
- | 2.4154 | 3.67 | 800 | 2.4395 | 0.9836 |
76
-
77
-
78
  ### Framework versions
79
 
80
- - Transformers 4.33.0.dev0
81
  - Pytorch 2.0.1+cu117
82
  - Datasets 2.14.4
83
  - Tokenizers 0.13.3
 
5
  - generated_from_trainer
6
  datasets:
7
  - common_voice_6_1
 
 
8
  model-index:
9
  - name: wav2vec2-large-mms-1b-turkish-colab
10
+ results: []
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
16
  # wav2vec2-large-mms-1b-turkish-colab
17
 
18
  This model is a fine-tuned version of [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) on the common_voice_6_1 dataset.
 
 
 
19
 
20
  ## Model description
21
 
 
35
 
36
  The following hyperparameters were used during training:
37
  - learning_rate: 0.001
38
+ - train_batch_size: 32
39
  - eval_batch_size: 8
40
  - seed: 42
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
 
43
  - lr_scheduler_warmup_steps: 100
44
  - num_epochs: 4
45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  ### Framework versions
47
 
48
+ - Transformers 4.33.0
49
  - Pytorch 2.0.1+cu117
50
  - Datasets 2.14.4
51
  - Tokenizers 0.13.3
adapter.tur.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed8fe2f53c5d0fde0f80ab677ff86415aaf26efae3d6c4182db8f89ea69c1c3c
3
- size 8839532
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a49adfac4de6cf5b45e2655e80419d4bb6d3d32f6ab057457df5504620d421a1
3
+ size 9054748
config.json CHANGED
@@ -77,7 +77,7 @@
77
  "num_hidden_layers": 48,
78
  "num_negatives": 100,
79
  "output_hidden_size": 1280,
80
- "pad_token_id": 36,
81
  "proj_codevector_dim": 1024,
82
  "tdnn_dilation": [
83
  1,
@@ -101,8 +101,8 @@
101
  1
102
  ],
103
  "torch_dtype": "float32",
104
- "transformers_version": "4.33.0.dev0",
105
  "use_weighted_layer_sum": false,
106
- "vocab_size": 39,
107
  "xvector_output_dim": 512
108
  }
 
77
  "num_hidden_layers": 48,
78
  "num_negatives": 100,
79
  "output_hidden_size": 1280,
80
+ "pad_token_id": 1,
81
  "proj_codevector_dim": 1024,
82
  "tdnn_dilation": [
83
  1,
 
101
  1
102
  ],
103
  "torch_dtype": "float32",
104
+ "transformers_version": "4.33.0",
105
  "use_weighted_layer_sum": false,
106
+ "vocab_size": 81,
107
  "xvector_output_dim": 512
108
  }
preprocessor_config.json CHANGED
@@ -1,9 +1,19 @@
1
  {
2
- "do_normalize": true,
3
- "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
  "feature_size": 1,
 
 
 
 
 
 
5
  "padding_side": "right",
6
  "padding_value": 0.0,
 
 
7
  "return_attention_mask": true,
8
- "sampling_rate": 16000
 
 
9
  }
 
1
  {
2
+ "do_normalize": false,
3
+ "feature_extractor_type": "SpeechT5FeatureExtractor",
4
  "feature_size": 1,
5
+ "fmax": 7600,
6
+ "fmin": 80,
7
+ "frame_signal_scale": 1.0,
8
+ "hop_length": 16,
9
+ "mel_floor": 1e-10,
10
+ "num_mel_bins": 80,
11
  "padding_side": "right",
12
  "padding_value": 0.0,
13
+ "processor_class": "SpeechT5Processor",
14
+ "reduction_factor": 2,
15
  "return_attention_mask": true,
16
+ "sampling_rate": 16000,
17
+ "win_function": "hann_window",
18
+ "win_length": 64
19
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:418bde28a3118f67f989b961834588c31b54e42ca8b5ad9145e1aafed71873db
3
- size 3859175565
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81b70144e8d7d8e190d4c93638e620240fc471b846cf31375d90355a3e4560f2
3
+ size 3859390797
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:972d6512832df904da66bfd304b32c108f74762499b024eee235dcf47022f660
3
  size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26fca2f44571a70125f2ca7e5b4adfa4707bebf6ae2fe41b1963fc2b6a968433
3
  size 4091