Teerawach12
commited on
Commit
·
303c17b
1
Parent(s):
ac6f7d9
Upload 12 files
Browse files- .gitattributes +8 -25
- README.md +135 -1
- config.json +76 -0
- optimizer.pt +3 -0
- preprocessor_config.json +8 -0
- pytorch_model.bin +3 -0
- scheduler.pt +3 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- trainer_state.json +142 -0
- training_args.bin +3 -0
- vocab.json +1 -0
.gitattributes
CHANGED
@@ -1,33 +1,16 @@
|
|
1 |
-
*.
|
2 |
-
*.
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
11 |
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
18 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
21 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
22 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
25 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
4 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
11 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
12 |
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
14 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
15 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
16 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
@@ -1,3 +1,137 @@
|
|
1 |
---
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
language: th
|
3 |
+
datasets:
|
4 |
+
- common_voice
|
5 |
+
tags:
|
6 |
+
- audio
|
7 |
+
- automatic-speech-recognition
|
8 |
+
- speech
|
9 |
+
- xlsr-fine-tuning-week
|
10 |
+
license: apache-2.0
|
11 |
+
model-index:
|
12 |
+
- name: XLSR Wav2Vec2 Large Thai by Sakares
|
13 |
+
results:
|
14 |
+
- task:
|
15 |
+
name: Speech Recognition
|
16 |
+
type: automatic-speech-recognition
|
17 |
+
dataset:
|
18 |
+
name: Common Voice th
|
19 |
+
type: common_voice
|
20 |
+
args: th
|
21 |
+
metrics:
|
22 |
+
- name: Test WER
|
23 |
+
type: wer
|
24 |
+
value: 44.46
|
25 |
---
|
26 |
+
|
27 |
+
# Wav2Vec2-Large-XLSR-53-Thai
|
28 |
+
|
29 |
+
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) in Thai using the [Common Voice](https://huggingface.co/datasets/common_voice)
|
30 |
+
When using this model, make sure that your speech input is sampled at 16kHz.
|
31 |
+
|
32 |
+
## Usage
|
33 |
+
|
34 |
+
The model can be used directly (without a language model) as follows:
|
35 |
+
|
36 |
+
```python
|
37 |
+
import torch
|
38 |
+
import torchaudio
|
39 |
+
from datasets import load_dataset
|
40 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
41 |
+
from pythainlp.tokenize import word_tokenize
|
42 |
+
|
43 |
+
test_dataset = load_dataset("common_voice", "th", split="test[:2%]")
|
44 |
+
|
45 |
+
processor = Wav2Vec2Processor.from_pretrained("sakares/wav2vec2-large-xlsr-thai-demo")
|
46 |
+
model = Wav2Vec2ForCTC.from_pretrained("sakares/wav2vec2-large-xlsr-thai-demo")
|
47 |
+
|
48 |
+
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
49 |
+
|
50 |
+
## For Thai NLP Library, please feel free to check https://pythainlp.github.io/docs/2.2/api/tokenize.html
|
51 |
+
def th_tokenize(batch):
|
52 |
+
batch["sentence"] = " ".join(word_tokenize(batch["sentence"], engine="newmm"))
|
53 |
+
return batch
|
54 |
+
|
55 |
+
# Preprocessing the datasets.
|
56 |
+
# We need to read the aduio files as arrays
|
57 |
+
def speech_file_to_array_fn(batch):
|
58 |
+
speech_array, sampling_rate = torchaudio.load(batch["path"])
|
59 |
+
batch["speech"] = resampler(speech_array).squeeze().numpy()
|
60 |
+
return batch
|
61 |
+
|
62 |
+
test_dataset = test_dataset.map(speech_file_to_array_fn).map(th_tokenize)
|
63 |
+
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
|
64 |
+
|
65 |
+
with torch.no_grad():
|
66 |
+
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
|
67 |
+
|
68 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
69 |
+
|
70 |
+
print("Prediction:", processor.batch_decode(predicted_ids))
|
71 |
+
print("Reference:", test_dataset["sentence"][:2])
|
72 |
+
```
|
73 |
+
Usage script [here](https://colab.research.google.com/drive/1w0VywsBtjrO2pHHPmiPugYI9yeF8nUKg?usp=sharing)
|
74 |
+
|
75 |
+
## Evaluation
|
76 |
+
|
77 |
+
The model can be evaluated as follows on the {language} test data of Common Voice.
|
78 |
+
|
79 |
+
|
80 |
+
```python
|
81 |
+
import torch
|
82 |
+
import torchaudio
|
83 |
+
from datasets import load_dataset, load_metric
|
84 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
85 |
+
from pythainlp.tokenize import word_tokenize
|
86 |
+
import re
|
87 |
+
|
88 |
+
test_dataset = load_dataset("common_voice", "th", split="test")
|
89 |
+
wer = load_metric("wer")
|
90 |
+
|
91 |
+
processor = Wav2Vec2Processor.from_pretrained("sakares/wav2vec2-large-xlsr-thai-demo")
|
92 |
+
model = Wav2Vec2ForCTC.from_pretrained("sakares/wav2vec2-large-xlsr-thai-demo")
|
93 |
+
model.to("cuda")
|
94 |
+
|
95 |
+
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“]'
|
96 |
+
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
97 |
+
|
98 |
+
## For Thai NLP Library, please feel free to check https://pythainlp.github.io/docs/2.2/api/tokenize.html
|
99 |
+
def th_tokenize(batch):
|
100 |
+
batch["sentence"] = " ".join(word_tokenize(batch["sentence"], engine="newmm"))
|
101 |
+
return batch
|
102 |
+
|
103 |
+
# Preprocessing the datasets.
|
104 |
+
# We need to read the aduio files as arrays
|
105 |
+
def speech_file_to_array_fn(batch):
|
106 |
+
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
|
107 |
+
speech_array, sampling_rate = torchaudio.load(batch["path"])
|
108 |
+
batch["speech"] = resampler(speech_array).squeeze().numpy()
|
109 |
+
return batch
|
110 |
+
|
111 |
+
test_dataset = test_dataset.map(speech_file_to_array_fn).map(th_tokenize)
|
112 |
+
|
113 |
+
# Preprocessing the datasets.
|
114 |
+
# We need to read the aduio files as arrays
|
115 |
+
def evaluate(batch):
|
116 |
+
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
117 |
+
|
118 |
+
with torch.no_grad():
|
119 |
+
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
|
120 |
+
|
121 |
+
pred_ids = torch.argmax(logits, dim=-1)
|
122 |
+
batch["pred_strings"] = processor.batch_decode(pred_ids)
|
123 |
+
return batch
|
124 |
+
|
125 |
+
result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
126 |
+
|
127 |
+
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
|
128 |
+
```
|
129 |
+
|
130 |
+
**Test Result**: 44.46 %
|
131 |
+
Evaluate script [here](https://colab.research.google.com/drive/1WZGtHKWXBztRsuXHIdebf6uoAsp7rTnK?usp=sharing)
|
132 |
+
|
133 |
+
## Training
|
134 |
+
|
135 |
+
The Common Voice `train`, `validation` datasets were used for training.
|
136 |
+
|
137 |
+
The script used for training can be found [here](https://colab.research.google.com/drive/18oUbeZgBGSkz16zC_WOa154QZOdmvjyt?usp=sharing)
|
config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/wav2vec2-large-xlsr-53",
|
3 |
+
"activation_dropout": 0.0,
|
4 |
+
"apply_spec_augment": true,
|
5 |
+
"architectures": [
|
6 |
+
"Wav2Vec2ForCTC"
|
7 |
+
],
|
8 |
+
"attention_dropout": 0.1,
|
9 |
+
"bos_token_id": 1,
|
10 |
+
"conv_bias": true,
|
11 |
+
"conv_dim": [
|
12 |
+
512,
|
13 |
+
512,
|
14 |
+
512,
|
15 |
+
512,
|
16 |
+
512,
|
17 |
+
512,
|
18 |
+
512
|
19 |
+
],
|
20 |
+
"conv_kernel": [
|
21 |
+
10,
|
22 |
+
3,
|
23 |
+
3,
|
24 |
+
3,
|
25 |
+
3,
|
26 |
+
2,
|
27 |
+
2
|
28 |
+
],
|
29 |
+
"conv_stride": [
|
30 |
+
5,
|
31 |
+
2,
|
32 |
+
2,
|
33 |
+
2,
|
34 |
+
2,
|
35 |
+
2,
|
36 |
+
2
|
37 |
+
],
|
38 |
+
"ctc_loss_reduction": "mean",
|
39 |
+
"ctc_zero_infinity": false,
|
40 |
+
"do_stable_layer_norm": true,
|
41 |
+
"eos_token_id": 2,
|
42 |
+
"feat_extract_activation": "gelu",
|
43 |
+
"feat_extract_dropout": 0.0,
|
44 |
+
"feat_extract_norm": "layer",
|
45 |
+
"feat_proj_dropout": 0.0,
|
46 |
+
"final_dropout": 0.0,
|
47 |
+
"gradient_checkpointing": true,
|
48 |
+
"hidden_act": "gelu",
|
49 |
+
"hidden_dropout": 0.1,
|
50 |
+
"hidden_size": 1024,
|
51 |
+
"initializer_range": 0.02,
|
52 |
+
"intermediate_size": 4096,
|
53 |
+
"layer_norm_eps": 1e-05,
|
54 |
+
"layerdrop": 0.1,
|
55 |
+
"mask_channel_length": 10,
|
56 |
+
"mask_channel_min_space": 1,
|
57 |
+
"mask_channel_other": 0.0,
|
58 |
+
"mask_channel_prob": 0.0,
|
59 |
+
"mask_channel_selection": "static",
|
60 |
+
"mask_feature_length": 10,
|
61 |
+
"mask_feature_prob": 0.0,
|
62 |
+
"mask_time_length": 10,
|
63 |
+
"mask_time_min_space": 1,
|
64 |
+
"mask_time_other": 0.0,
|
65 |
+
"mask_time_prob": 0.05,
|
66 |
+
"mask_time_selection": "static",
|
67 |
+
"model_type": "wav2vec2",
|
68 |
+
"num_attention_heads": 16,
|
69 |
+
"num_conv_pos_embedding_groups": 16,
|
70 |
+
"num_conv_pos_embeddings": 128,
|
71 |
+
"num_feat_extract_layers": 7,
|
72 |
+
"num_hidden_layers": 24,
|
73 |
+
"pad_token_id": 70,
|
74 |
+
"transformers_version": "4.4.0",
|
75 |
+
"vocab_size": 71
|
76 |
+
}
|
optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9cb1f03e6996f45067b37846665462c9469495843102ce187588b897e28a7f4e
|
3 |
+
size 2490659335
|
preprocessor_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_size": 1,
|
4 |
+
"padding_side": "right",
|
5 |
+
"padding_value": 0.0,
|
6 |
+
"return_attention_mask": true,
|
7 |
+
"sampling_rate": 16000
|
8 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8cf9e30486947e5ed90fbee04baf77edce0daadf0effb971a783e7eee53f6215
|
3 |
+
size 1262224919
|
scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4eadfc8ad40d25eb205d452eb44ca33a8d304d9199be7da405ad3378ff843c81
|
3 |
+
size 623
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|"}
|
trainer_state.json
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 23.83828382838284,
|
5 |
+
"global_step": 3600,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 2.65,
|
12 |
+
"learning_rate": 0.00023999999999999998,
|
13 |
+
"loss": 6.5741,
|
14 |
+
"step": 400
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 2.65,
|
18 |
+
"eval_loss": 3.4423439502716064,
|
19 |
+
"eval_runtime": 231.306,
|
20 |
+
"eval_samples_per_second": 9.459,
|
21 |
+
"eval_wer": 1.0,
|
22 |
+
"step": 400
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"epoch": 5.3,
|
26 |
+
"learning_rate": 0.0002711907810499359,
|
27 |
+
"loss": 1.6231,
|
28 |
+
"step": 800
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"epoch": 5.3,
|
32 |
+
"eval_loss": 0.6918022036552429,
|
33 |
+
"eval_runtime": 234.8428,
|
34 |
+
"eval_samples_per_second": 9.317,
|
35 |
+
"eval_wer": 0.7177260916407884,
|
36 |
+
"step": 800
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"epoch": 7.94,
|
40 |
+
"learning_rate": 0.00023277848911651725,
|
41 |
+
"loss": 0.5103,
|
42 |
+
"step": 1200
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"epoch": 7.94,
|
46 |
+
"eval_loss": 0.5981740951538086,
|
47 |
+
"eval_runtime": 233.6413,
|
48 |
+
"eval_samples_per_second": 9.365,
|
49 |
+
"eval_wer": 0.6444007858546169,
|
50 |
+
"step": 1200
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"epoch": 10.59,
|
54 |
+
"learning_rate": 0.0001943661971830986,
|
55 |
+
"loss": 0.316,
|
56 |
+
"step": 1600
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"epoch": 10.59,
|
60 |
+
"eval_loss": 0.6127611398696899,
|
61 |
+
"eval_runtime": 239.3303,
|
62 |
+
"eval_samples_per_second": 9.142,
|
63 |
+
"eval_wer": 0.6053615564991445,
|
64 |
+
"step": 1600
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"epoch": 13.24,
|
68 |
+
"learning_rate": 0.0001559539052496799,
|
69 |
+
"loss": 0.227,
|
70 |
+
"step": 2000
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"epoch": 13.24,
|
74 |
+
"eval_loss": 0.6392495036125183,
|
75 |
+
"eval_runtime": 236.7294,
|
76 |
+
"eval_samples_per_second": 9.243,
|
77 |
+
"eval_wer": 0.5767158882058432,
|
78 |
+
"step": 2000
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"epoch": 15.89,
|
82 |
+
"learning_rate": 0.00011754161331626119,
|
83 |
+
"loss": 0.1783,
|
84 |
+
"step": 2400
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"epoch": 15.89,
|
88 |
+
"eval_loss": 0.6448690891265869,
|
89 |
+
"eval_runtime": 240.8912,
|
90 |
+
"eval_samples_per_second": 9.083,
|
91 |
+
"eval_wer": 0.5625831801761836,
|
92 |
+
"step": 2400
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"epoch": 18.54,
|
96 |
+
"learning_rate": 7.91293213828425e-05,
|
97 |
+
"loss": 0.1346,
|
98 |
+
"step": 2800
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"epoch": 18.54,
|
102 |
+
"eval_loss": 0.6509573459625244,
|
103 |
+
"eval_runtime": 238.9168,
|
104 |
+
"eval_samples_per_second": 9.158,
|
105 |
+
"eval_wer": 0.5524431206033336,
|
106 |
+
"step": 2800
|
107 |
+
},
|
108 |
+
{
|
109 |
+
"epoch": 21.19,
|
110 |
+
"learning_rate": 4.071702944942381e-05,
|
111 |
+
"loss": 0.1149,
|
112 |
+
"step": 3200
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"epoch": 21.19,
|
116 |
+
"eval_loss": 0.7118895053863525,
|
117 |
+
"eval_runtime": 239.1827,
|
118 |
+
"eval_samples_per_second": 9.148,
|
119 |
+
"eval_wer": 0.5582736548577223,
|
120 |
+
"step": 3200
|
121 |
+
},
|
122 |
+
{
|
123 |
+
"epoch": 23.84,
|
124 |
+
"learning_rate": 2.3047375160051214e-06,
|
125 |
+
"loss": 0.1024,
|
126 |
+
"step": 3600
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"epoch": 23.84,
|
130 |
+
"eval_loss": 0.6984374523162842,
|
131 |
+
"eval_runtime": 239.7197,
|
132 |
+
"eval_samples_per_second": 9.127,
|
133 |
+
"eval_wer": 0.5488307243805057,
|
134 |
+
"step": 3600
|
135 |
+
}
|
136 |
+
],
|
137 |
+
"max_steps": 3624,
|
138 |
+
"num_train_epochs": 24,
|
139 |
+
"total_flos": 1.4828294022260212e+19,
|
140 |
+
"trial_name": null,
|
141 |
+
"trial_params": null
|
142 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:172c59484ed932416417e6a198f03373837732beb2793acc906d873aa5e63514
|
3 |
+
size 2351
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"ู": 0, "ั": 1, "ะ": 2, "ฆ": 3, "ำ": 4, "ึ": 5, "๋": 6, "ส": 7, "์": 8, "ฮ": 9, "ค": 10, "่": 11, "ผ": 12, "ศ": 13, "จ": 14, "ล": 15, "ฒ": 16, "ป": 17, "ม": 18, "็": 19, "’": 20, "ง": 21, "ํ": 22, "ฝ": 23, "ื": 24, "โ": 25, "ห": 26, "้": 27, "ษ": 29, "ๆ": 30, "า": 31, "ฟ": 32, "แ": 33, "ด": 34, "ท": 35, "ใ": 36, "ณ": 37, "ฬ": 38, "ไ": 39, "ๅ": 40, "อ": 41, "ี": 42, "๊": 43, "บ": 44, "ย": 45, "ิ": 46, "ฉ": 47, "ภ": 48, "ฏ": 49, "ข": 50, "ก": 51, "'": 52, "เ": 53, "พ": 54, "ฐ": 55, "ญ": 56, "น": 57, "ธ": 58, "ถ": 59, "ซ": 60, "ร": 61, "ฤ": 62, "ช": 63, "ุ": 64, "ต": 65, "ฑ": 66, "ฎ": 67, "ว": 68, "|": 28, "[UNK]": 69, "[PAD]": 70}
|