Datasets:
TenzinGayche
commited on
Commit
·
58cb7b6
1
Parent(s):
779b358
wylie subsets added
Browse files- tibetan_voice.py +15 -1
- transcripts /test-wylie.tsv +3 -0
- transcripts /train-wylie.tsv +3 -0
- transcripts /valid--wylie.tsv +3 -0
tibetan_voice.py
CHANGED
@@ -49,6 +49,9 @@ _URLS = {
|
|
49 |
"train": _URL + "train-uni.tsv",
|
50 |
"valid": _URL + "valid-uni.tsv",
|
51 |
"test": _URL + "test-uni.tsv",
|
|
|
|
|
|
|
52 |
}
|
53 |
|
54 |
|
@@ -73,6 +76,11 @@ class TibetanVoice(datasets.GeneratorBasedBuilder):
|
|
73 |
version=datasets.Version("1.0.0", ""),
|
74 |
description="The dataset comprises 6.5 hours of validated transcribed speech data from 9 audio book in lhasa dialect ",
|
75 |
),
|
|
|
|
|
|
|
|
|
|
|
76 |
]
|
77 |
|
78 |
def _info(self):
|
@@ -88,7 +96,7 @@ class TibetanVoice(datasets.GeneratorBasedBuilder):
|
|
88 |
# No default supervised_keys (as we have to pass both question
|
89 |
# and context as input).
|
90 |
supervised_keys=None,
|
91 |
-
homepage="https://huggingface.co/datasets/
|
92 |
citation=_CITATION,
|
93 |
)
|
94 |
|
@@ -97,6 +105,12 @@ class TibetanVoice(datasets.GeneratorBasedBuilder):
|
|
97 |
downloaded_wav = dl_manager.download(_DataUrl)
|
98 |
wavs= dl_manager.iter_archive(downloaded_wav)
|
99 |
downloaded_wav = dl_manager.download_and_extract(_DataUrl)
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
return [
|
101 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"],"wavs":wavs,'wavfilepath':downloaded_wav}),
|
102 |
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"],"wavs":wavs,'wavfilepath':downloaded_wav}),
|
|
|
49 |
"train": _URL + "train-uni.tsv",
|
50 |
"valid": _URL + "valid-uni.tsv",
|
51 |
"test": _URL + "test-uni.tsv",
|
52 |
+
"train-wylie": _URL + "train-wylie.tsv",
|
53 |
+
"valid-wylie": _URL + "valid-wylie.tsv",
|
54 |
+
"test-wylie": _URL + "test-wylie.tsv",
|
55 |
}
|
56 |
|
57 |
|
|
|
76 |
version=datasets.Version("1.0.0", ""),
|
77 |
description="The dataset comprises 6.5 hours of validated transcribed speech data from 9 audio book in lhasa dialect ",
|
78 |
),
|
79 |
+
TibetanVoiceConfig(
|
80 |
+
name="lhasa-wylie",
|
81 |
+
version=datasets.Version("1.0.0", ""),
|
82 |
+
description="The dataset comprises 6.5 hours of validated transcribed speech data (wylie) from 9 audio book in lhasa dialect ",
|
83 |
+
),
|
84 |
]
|
85 |
|
86 |
def _info(self):
|
|
|
96 |
# No default supervised_keys (as we have to pass both question
|
97 |
# and context as input).
|
98 |
supervised_keys=None,
|
99 |
+
homepage="https://huggingface.co/datasets/openpecha/tibetan_voice/",
|
100 |
citation=_CITATION,
|
101 |
)
|
102 |
|
|
|
105 |
downloaded_wav = dl_manager.download(_DataUrl)
|
106 |
wavs= dl_manager.iter_archive(downloaded_wav)
|
107 |
downloaded_wav = dl_manager.download_and_extract(_DataUrl)
|
108 |
+
if(self.config.name!='lhasa'):
|
109 |
+
downloaded_files['train']= downloaded_files['train-wylie']
|
110 |
+
downloaded_files['test']= downloaded_files['test-wylie']
|
111 |
+
downloaded_files['valid']= downloaded_files['valid-wylie']
|
112 |
+
|
113 |
+
|
114 |
return [
|
115 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"],"wavs":wavs,'wavfilepath':downloaded_wav}),
|
116 |
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"],"wavs":wavs,'wavfilepath':downloaded_wav}),
|
transcripts /test-wylie.tsv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e0fe3d07bb772783e6351c0d64360453a9ee796611f22c327e953e97f5b04216
|
3 |
+
size 54417
|
transcripts /train-wylie.tsv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:305bab453e6b50d660b9df598bbbc3afc8572324dc3b2a08c6d6f48bef24ce49
|
3 |
+
size 439648
|
transcripts /valid--wylie.tsv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2fdc0462979d4484a45f4735e01dce5961f4057f77234a256344c24ee97c1a02
|
3 |
+
size 54940
|