joselobenitezg commited on
Commit
f5c493a
·
verified ·
1 Parent(s): d332094

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +14 -9
  2. config.json +79 -86
  3. model.safetensors +2 -2
  4. pytorch_model.bin +2 -2
README.md CHANGED
@@ -1,24 +1,25 @@
1
  ---
2
- language: gn
 
3
  tags:
4
  - guarani
5
- - tts
6
  - speech
 
7
  - vits
8
  license: mit
9
- datasets:
10
- - mozilla-foundation/common_voice_11_0
11
  ---
12
 
13
- # Guarani VITS TTS Model
14
 
15
- This is a Text-to-Speech model for the Guarani language, based on the VITS architecture.
16
 
17
  ## Model Description
18
 
19
- This model is designed for Guarani text-to-speech synthesis, utilizing the VITS architecture. It can generate natural-sounding speech from Guarani text input.
20
 
21
  ## Usage
 
22
  ```python
23
  from transformers import VitsModel, AutoTokenizer
24
  import torch
@@ -26,9 +27,13 @@ import torch
26
  model = VitsModel.from_pretrained("joselobenitezg/mms-grn-tts")
27
  tokenizer = AutoTokenizer.from_pretrained("joselobenitezg/mms-grn-tts")
28
 
29
- text = "mba'eichapa"
30
  inputs = tokenizer(text, return_tensors="pt")
31
 
32
  with torch.no_grad():
33
- output = model.generate(**inputs)
 
 
 
 
34
  ```
 
1
  ---
2
+ language:
3
+ - grn
4
  tags:
5
  - guarani
6
+ - text-to-speech
7
  - speech
8
+ - audio
9
  - vits
10
  license: mit
 
 
11
  ---
12
 
13
+ # MMS-TTS Guarani Model
14
 
15
+ This is a VITS-based text-to-speech model for the Guarani language, based on the MMS-TTS architecture.
16
 
17
  ## Model Description
18
 
19
+ This model can generate speech from Guarani text input using the VITS architecture.
20
 
21
  ## Usage
22
+
23
  ```python
24
  from transformers import VitsModel, AutoTokenizer
25
  import torch
 
27
  model = VitsModel.from_pretrained("joselobenitezg/mms-grn-tts")
28
  tokenizer = AutoTokenizer.from_pretrained("joselobenitezg/mms-grn-tts")
29
 
30
+ text = "some example text in the Guarani language"
31
  inputs = tokenizer(text, return_tensors="pt")
32
 
33
  with torch.no_grad():
34
+ output = model(**inputs).waveform
35
+
36
+ # Save the output as a wav file
37
+ import scipy
38
+ scipy.io.wavfile.write("output.wav", rate=model.config.sampling_rate, data=output)
39
  ```
config.json CHANGED
@@ -1,90 +1,83 @@
1
  {
2
- "activation_dropout": 0.1,
3
- "architectures": [
4
- "VitsModel"
5
- ],
6
- "attention_dropout": 0.1,
7
- "depth_separable_channels": 2,
8
- "depth_separable_num_layers": 3,
9
- "duration_predictor_dropout": 0.5,
10
- "duration_predictor_filter_channels": 256,
11
- "duration_predictor_flow_bins": 10,
12
- "duration_predictor_kernel_size": 3,
13
- "duration_predictor_num_flows": 4,
14
- "duration_predictor_tail_bound": 5.0,
15
- "ffn_dim": 768,
16
- "ffn_kernel_size": 3,
17
- "flow_size": 192,
18
- "hidden_act": "relu",
19
- "hidden_dropout": 0.1,
20
- "hidden_size": 192,
21
- "initializer_range": 0.02,
22
- "layer_norm_eps": 1e-05,
23
- "layerdrop": 0.1,
24
- "leaky_relu_slope": 0.1,
25
- "model_type": "vits",
26
- "noise_scale": 0.667,
27
- "noise_scale_duration": 0.8,
28
- "num_attention_heads": 2,
29
- "num_hidden_layers": 6,
30
- "num_speakers": 1,
31
- "posterior_encoder_num_wavenet_layers": 16,
32
- "prior_encoder_num_flows": 4,
33
- "prior_encoder_num_wavenet_layers": 4,
34
- "resblock_dilation_sizes": [
35
- [
36
- 1,
37
- 3,
38
- 5
39
  ],
40
- [
41
- 1,
42
- 3,
43
- 5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  ],
45
- [
46
- 1,
47
  3,
48
- 5
49
- ]
50
- ],
51
- "resblock_kernel_sizes": [
52
- 3,
53
- 7,
54
- 11
55
- ],
56
- "sampling_rate": 16000,
57
- "speaker_embedding_size": 0,
58
- "speaking_rate": 1.0,
59
- "spectrogram_bins": 513,
60
- "torch_dtype": "float32",
61
- "transformers_version": "4.28.0",
62
- "upsample_initial_channel": 512,
63
- "upsample_kernel_sizes": [
64
- 16,
65
- 16,
66
- 4,
67
- 4
68
- ],
69
- "upsample_rates": [
70
- 8,
71
- 8,
72
- 2,
73
- 2
74
- ],
75
- "use_bias": true,
76
- "use_stochastic_duration_prediction": true,
77
- "vocab_size": 53,
78
- "wavenet_dilation_rate": 1,
79
- "wavenet_dropout": 0.0,
80
- "wavenet_kernel_size": 5,
81
- "window_size": 4,
82
- "is_encoder_decoder": true,
83
- "intermediate_size": 768,
84
- "max_position_embeddings": 2048,
85
- "pad_token_id": 0,
86
- "bos_token_id": 1,
87
- "eos_token_id": 2,
88
- "model_id": "mms-grn-tts",
89
- "task": "text-to-speech"
90
- }
 
1
  {
2
+ "activation_dropout": 0.1,
3
+ "architectures": [
4
+ "VitsModel"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  ],
6
+ "attention_dropout": 0.1,
7
+ "depth_separable_channels": 2,
8
+ "depth_separable_num_layers": 3,
9
+ "duration_predictor_dropout": 0.5,
10
+ "duration_predictor_filter_channels": 256,
11
+ "duration_predictor_flow_bins": 10,
12
+ "duration_predictor_kernel_size": 3,
13
+ "duration_predictor_num_flows": 4,
14
+ "duration_predictor_tail_bound": 5.0,
15
+ "ffn_dim": 768,
16
+ "ffn_kernel_size": 3,
17
+ "flow_size": 192,
18
+ "hidden_act": "relu",
19
+ "hidden_dropout": 0.1,
20
+ "hidden_size": 192,
21
+ "initializer_range": 0.02,
22
+ "layer_norm_eps": 1e-05,
23
+ "layerdrop": 0.1,
24
+ "leaky_relu_slope": 0.1,
25
+ "model_type": "vits",
26
+ "noise_scale": 0.667,
27
+ "noise_scale_duration": 0.8,
28
+ "num_attention_heads": 2,
29
+ "num_hidden_layers": 6,
30
+ "num_speakers": 1,
31
+ "posterior_encoder_num_wavenet_layers": 16,
32
+ "prior_encoder_num_flows": 4,
33
+ "prior_encoder_num_wavenet_layers": 4,
34
+ "resblock_dilation_sizes": [
35
+ [
36
+ 1,
37
+ 3,
38
+ 5
39
+ ],
40
+ [
41
+ 1,
42
+ 3,
43
+ 5
44
+ ],
45
+ [
46
+ 1,
47
+ 3,
48
+ 5
49
+ ]
50
  ],
51
+ "resblock_kernel_sizes": [
 
52
  3,
53
+ 7,
54
+ 11
55
+ ],
56
+ "sampling_rate": 16000,
57
+ "speaker_embedding_size": 0,
58
+ "speaking_rate": 1.0,
59
+ "spectrogram_bins": 513,
60
+ "torch_dtype": "float32",
61
+ "transformers_version": "4.33.0.dev0",
62
+ "upsample_initial_channel": 512,
63
+ "upsample_kernel_sizes": [
64
+ 16,
65
+ 16,
66
+ 4,
67
+ 4
68
+ ],
69
+ "upsample_rates": [
70
+ 8,
71
+ 8,
72
+ 2,
73
+ 2
74
+ ],
75
+ "use_bias": true,
76
+ "use_stochastic_duration_prediction": true,
77
+ "vocab_size": 53,
78
+ "wavenet_dilation_rate": 1,
79
+ "wavenet_dropout": 0.0,
80
+ "wavenet_kernel_size": 5,
81
+ "window_size": 4
82
+ }
83
+
 
 
 
 
 
 
 
 
 
 
 
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98571fe9308634639b03be782aa1a52d1064ea2b361f78f149474a2482541fdc
3
- size 145280528
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df93b9c09021ca6ca0c523dd592c6dda97ab201f1396ff4f0ab8beeee86702d2
3
+ size 145242200
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:22c37f591dc560adb1ff8267b95cbd8a1285e1ba35fc522540a6591a83e83b81
3
- size 145458210
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e965dedf44e12b1cd1f0d8cd0c6e3e3e483dce2b83886a503f539711491e850
3
+ size 145452018