Datasets:
Tasks:
Question Answering
Modalities:
Text
Formats:
parquet
Sub-tasks:
multiple-choice-qa
Languages:
Chinese
Size:
10K - 100K
ArXiv:
License:
Update files from the datasets library (from 1.6.1)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.6.1
c3.py
CHANGED
@@ -39,7 +39,7 @@ _URL = "https://raw.githubusercontent.com/nlpdata/c3/master/data/"
|
|
39 |
|
40 |
|
41 |
class C3Config(datasets.BuilderConfig):
|
42 |
-
"""
|
43 |
|
44 |
def __init__(self, type_, **kwargs):
|
45 |
"""
|
@@ -138,7 +138,7 @@ class C3(datasets.GeneratorBasedBuilder):
|
|
138 |
]
|
139 |
|
140 |
def _generate_examples(self, filename, split):
|
141 |
-
"""
|
142 |
with open(filename, "r", encoding="utf-8") as sf:
|
143 |
data = json.load(sf)
|
144 |
for id_, (documents, questions, document_id) in enumerate(data):
|
|
|
39 |
|
40 |
|
41 |
class C3Config(datasets.BuilderConfig):
|
42 |
+
"""BuilderConfig for NewDataset"""
|
43 |
|
44 |
def __init__(self, type_, **kwargs):
|
45 |
"""
|
|
|
138 |
]
|
139 |
|
140 |
def _generate_examples(self, filename, split):
|
141 |
+
"""Yields examples."""
|
142 |
with open(filename, "r", encoding="utf-8") as sf:
|
143 |
data = json.load(sf)
|
144 |
for id_, (documents, questions, document_id) in enumerate(data):
|