Training in progress, step 16000
Browse files- .ipynb_checkpoints/added_tokens-checkpoint.json +1 -0
- .ipynb_checkpoints/config-checkpoint.json +107 -0
- .ipynb_checkpoints/eval-checkpoint.py +1 -1
- .ipynb_checkpoints/preprocessor_config-checkpoint.json +9 -0
- .ipynb_checkpoints/special_tokens_map-checkpoint.json +1 -0
- .ipynb_checkpoints/todo-checkpoint.txt +4 -0
- .ipynb_checkpoints/tokenizer_config-checkpoint.json +1 -0
- eval.py +1 -1
- log_mozilla-foundation_common_voice_7_0_fr_test_predictions.txt +10 -10
- mozilla-foundation_common_voice_7_0_fr_test_eval_results.txt +2 -2
- pytorch_model.bin +1 -1
- todo.txt +4 -0
.ipynb_checkpoints/added_tokens-checkpoint.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 317, "</s>": 318}
|
.ipynb_checkpoints/config-checkpoint.json
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/wav2vec2-xls-r-300m",
|
3 |
+
"activation_dropout": 0.1,
|
4 |
+
"adapter_kernel_size": 3,
|
5 |
+
"adapter_stride": 2,
|
6 |
+
"add_adapter": false,
|
7 |
+
"apply_spec_augment": true,
|
8 |
+
"architectures": [
|
9 |
+
"Wav2Vec2ForCTC"
|
10 |
+
],
|
11 |
+
"attention_dropout": 0.0,
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"classifier_proj_size": 256,
|
14 |
+
"codevector_dim": 768,
|
15 |
+
"contrastive_logits_temperature": 0.1,
|
16 |
+
"conv_bias": true,
|
17 |
+
"conv_dim": [
|
18 |
+
512,
|
19 |
+
512,
|
20 |
+
512,
|
21 |
+
512,
|
22 |
+
512,
|
23 |
+
512,
|
24 |
+
512
|
25 |
+
],
|
26 |
+
"conv_kernel": [
|
27 |
+
10,
|
28 |
+
3,
|
29 |
+
3,
|
30 |
+
3,
|
31 |
+
3,
|
32 |
+
2,
|
33 |
+
2
|
34 |
+
],
|
35 |
+
"conv_stride": [
|
36 |
+
5,
|
37 |
+
2,
|
38 |
+
2,
|
39 |
+
2,
|
40 |
+
2,
|
41 |
+
2,
|
42 |
+
2
|
43 |
+
],
|
44 |
+
"ctc_loss_reduction": "mean",
|
45 |
+
"ctc_zero_infinity": false,
|
46 |
+
"diversity_loss_weight": 0.1,
|
47 |
+
"do_stable_layer_norm": true,
|
48 |
+
"eos_token_id": 2,
|
49 |
+
"feat_extract_activation": "gelu",
|
50 |
+
"feat_extract_dropout": 0.0,
|
51 |
+
"feat_extract_norm": "layer",
|
52 |
+
"feat_proj_dropout": 0.0,
|
53 |
+
"feat_quantizer_dropout": 0.0,
|
54 |
+
"final_dropout": 0.0,
|
55 |
+
"hidden_act": "gelu",
|
56 |
+
"hidden_dropout": 0.0,
|
57 |
+
"hidden_size": 1024,
|
58 |
+
"initializer_range": 0.02,
|
59 |
+
"intermediate_size": 4096,
|
60 |
+
"layer_norm_eps": 1e-05,
|
61 |
+
"layerdrop": 0.0,
|
62 |
+
"mask_feature_length": 64,
|
63 |
+
"mask_feature_min_masks": 0,
|
64 |
+
"mask_feature_prob": 0.25,
|
65 |
+
"mask_time_length": 10,
|
66 |
+
"mask_time_min_masks": 2,
|
67 |
+
"mask_time_prob": 0.75,
|
68 |
+
"model_type": "wav2vec2",
|
69 |
+
"num_adapter_layers": 3,
|
70 |
+
"num_attention_heads": 16,
|
71 |
+
"num_codevector_groups": 2,
|
72 |
+
"num_codevectors_per_group": 320,
|
73 |
+
"num_conv_pos_embedding_groups": 16,
|
74 |
+
"num_conv_pos_embeddings": 128,
|
75 |
+
"num_feat_extract_layers": 7,
|
76 |
+
"num_hidden_layers": 24,
|
77 |
+
"num_negatives": 100,
|
78 |
+
"output_hidden_size": 1024,
|
79 |
+
"pad_token_id": 317,
|
80 |
+
"proj_codevector_dim": 768,
|
81 |
+
"tdnn_dilation": [
|
82 |
+
1,
|
83 |
+
2,
|
84 |
+
3,
|
85 |
+
1,
|
86 |
+
1
|
87 |
+
],
|
88 |
+
"tdnn_dim": [
|
89 |
+
512,
|
90 |
+
512,
|
91 |
+
512,
|
92 |
+
512,
|
93 |
+
1500
|
94 |
+
],
|
95 |
+
"tdnn_kernel": [
|
96 |
+
5,
|
97 |
+
3,
|
98 |
+
3,
|
99 |
+
1,
|
100 |
+
1
|
101 |
+
],
|
102 |
+
"torch_dtype": "float32",
|
103 |
+
"transformers_version": "4.17.0.dev0",
|
104 |
+
"use_weighted_layer_sum": false,
|
105 |
+
"vocab_size": 319,
|
106 |
+
"xvector_output_dim": 512
|
107 |
+
}
|
.ipynb_checkpoints/eval-checkpoint.py
CHANGED
@@ -86,7 +86,7 @@ def main(args):
|
|
86 |
batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s
|
87 |
)
|
88 |
|
89 |
-
batch["prediction"] = prediction["text"]
|
90 |
batch["target"] = normalize_text(batch["sentence"])
|
91 |
return batch
|
92 |
|
|
|
86 |
batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s
|
87 |
)
|
88 |
|
89 |
+
batch["prediction"] = prediction["text"].replace("<s>", "")
|
90 |
batch["target"] = normalize_text(batch["sentence"])
|
91 |
return batch
|
92 |
|
.ipynb_checkpoints/preprocessor_config-checkpoint.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
+
"feature_size": 1,
|
5 |
+
"padding_side": "right",
|
6 |
+
"padding_value": 0,
|
7 |
+
"return_attention_mask": true,
|
8 |
+
"sampling_rate": 16000
|
9 |
+
}
|
.ipynb_checkpoints/special_tokens_map-checkpoint.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
.ipynb_checkpoints/todo-checkpoint.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
TODO:
|
2 |
+
- enlever apostrophe des caractères à enlever
|
3 |
+
- ajouter une liste de caractères qui ne sont pas de la langue française
|
4 |
+
- change epoch pour éviter des entrainements trop long
|
.ipynb_checkpoints/tokenizer_config-checkpoint.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
eval.py
CHANGED
@@ -86,7 +86,7 @@ def main(args):
|
|
86 |
batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s
|
87 |
)
|
88 |
|
89 |
-
batch["prediction"] = prediction["text"]
|
90 |
batch["target"] = normalize_text(batch["sentence"])
|
91 |
return batch
|
92 |
|
|
|
86 |
batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s
|
87 |
)
|
88 |
|
89 |
+
batch["prediction"] = prediction["text"].replace("<s>", "")
|
90 |
batch["target"] = normalize_text(batch["sentence"])
|
91 |
return batch
|
92 |
|
log_mozilla-foundation_common_voice_7_0_fr_test_predictions.txt
CHANGED
@@ -1,20 +1,20 @@
|
|
1 |
0
|
2 |
-
|
3 |
1
|
4 |
-
|
5 |
2
|
6 |
-
|
7 |
3
|
8 |
-
|
9 |
4
|
10 |
-
|
11 |
5
|
12 |
-
|
13 |
6
|
14 |
-
|
15 |
7
|
16 |
-
|
17 |
8
|
18 |
-
|
19 |
9
|
20 |
-
|
|
|
1 |
0
|
2 |
+
un vrai travail intéressant va enfin être menéer sur ce sujet
|
3 |
1
|
4 |
+
une réforme profonde est nécessairetre
|
5 |
2
|
6 |
+
passi nombreuses que ça
|
7 |
3
|
8 |
+
un commité interministérial du handicap sest tenu il yy a quelques semaines
|
9 |
4
|
10 |
+
la parole est à monsieurlalanramandière pour soutenir lamendement numéro cint vingthuit
|
11 |
5
|
12 |
+
cesten tout cas jupiterien
|
13 |
6
|
14 |
+
o voix
|
15 |
7
|
16 |
+
jai donc lexpérience des années passés jen dirais un mot tout à lheur
|
17 |
8
|
18 |
+
douze minutes trente
|
19 |
9
|
20 |
+
cest une évidence
|
mozilla-foundation_common_voice_7_0_fr_test_eval_results.txt
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
-
WER: 0.
|
2 |
-
CER:
|
|
|
1 |
+
WER: 0.25
|
2 |
+
CER: 0.05714285714285714
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1263231601
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:225a50bb21443d9a7d1334e463a7ba97349b1b0eaa025b85174c20035155fdbd
|
3 |
size 1263231601
|
todo.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
TODO:
|
2 |
+
- enlever apostrophe des caractères à enlever
|
3 |
+
- ajouter une liste de caractères qui ne sont pas de la langue française
|
4 |
+
- change epoch pour éviter des entrainements trop long
|