Convert dataset to Parquet

#3
by rishabbala - opened
README.md CHANGED
@@ -1,16 +1,15 @@
1
  ---
2
  annotations_creators:
3
  - crowdsourced
4
- language:
5
- - en
6
  language_creators:
7
  - crowdsourced
8
  - expert-generated
 
 
9
  license:
10
  - apache-2.0
11
  multilinguality:
12
  - monolingual
13
- pretty_name: MathQA
14
  size_categories:
15
  - 10K<n<100K
16
  source_datasets:
@@ -20,6 +19,7 @@ task_categories:
20
  task_ids:
21
  - multiple-choice-qa
22
  paperswithcode_id: mathqa
 
23
  dataset_info:
24
  features:
25
  - name: Problem
@@ -37,17 +37,26 @@ dataset_info:
37
  - name: category
38
  dtype: string
39
  splits:
40
- - name: test
41
- num_bytes: 1844184
42
- num_examples: 2985
43
  - name: train
44
- num_bytes: 18368826
45
  num_examples: 29837
 
 
 
46
  - name: validation
47
- num_bytes: 2752969
48
  num_examples: 4475
49
- download_size: 7302821
50
- dataset_size: 22965979
 
 
 
 
 
 
 
 
 
51
  ---
52
 
53
  # Dataset Card for MathQA
 
1
  ---
2
  annotations_creators:
3
  - crowdsourced
 
 
4
  language_creators:
5
  - crowdsourced
6
  - expert-generated
7
+ language:
8
+ - en
9
  license:
10
  - apache-2.0
11
  multilinguality:
12
  - monolingual
 
13
  size_categories:
14
  - 10K<n<100K
15
  source_datasets:
 
19
  task_ids:
20
  - multiple-choice-qa
21
  paperswithcode_id: mathqa
22
+ pretty_name: MathQA
23
  dataset_info:
24
  features:
25
  - name: Problem
 
37
  - name: category
38
  dtype: string
39
  splits:
 
 
 
40
  - name: train
41
+ num_bytes: 18338902
42
  num_examples: 29837
43
+ - name: test
44
+ num_bytes: 1841164
45
+ num_examples: 2985
46
  - name: validation
47
+ num_bytes: 2748461
48
  num_examples: 4475
49
+ download_size: 11267301
50
+ dataset_size: 22928527
51
+ configs:
52
+ - config_name: default
53
+ data_files:
54
+ - split: train
55
+ path: data/train-*
56
+ - split: test
57
+ path: data/test-*
58
+ - split: validation
59
+ path: data/validation-*
60
  ---
61
 
62
  # Dataset Card for MathQA
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9726a151a46cccac5136a86c1223fe09943780c0fe8a23a0a38a9366d0519539
3
+ size 903427
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ba231ef278b1f9974520f5b7ce685c86dbe1e782578d4b4ba66fbc0e47d52ff
3
+ size 9013733
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12610e6772c866a9e9ef81ae58663f195b04e64bb4e9cff7fdc6120d4fda02f5
3
+ size 1350141
math_qa.py DELETED
@@ -1,84 +0,0 @@
1
- """TODO(math_qa): Add a description here."""
2
-
3
-
4
- import json
5
- import os
6
-
7
- import datasets
8
-
9
-
10
- # TODO(math_qa): BibTeX citation
11
- _CITATION = """
12
- """
13
-
14
- # TODO(math_qa):
15
- _DESCRIPTION = """
16
- Our dataset is gathered by using a new representation language to annotate over the AQuA-RAT dataset. AQuA-RAT has provided the questions, options, rationale, and the correct options.
17
- """
18
- _URL = "https://math-qa.github.io/math-QA/data/MathQA.zip"
19
-
20
-
21
- class MathQa(datasets.GeneratorBasedBuilder):
22
- """TODO(math_qa): Short description of my dataset."""
23
-
24
- # TODO(math_qa): Set up version.
25
- VERSION = datasets.Version("0.1.0")
26
-
27
- def _info(self):
28
- # TODO(math_qa): Specifies the datasets.DatasetInfo object
29
- return datasets.DatasetInfo(
30
- # This is the description that will appear on the datasets page.
31
- description=_DESCRIPTION,
32
- # datasets.features.FeatureConnectors
33
- features=datasets.Features(
34
- {
35
- # These are the features of your dataset like images, labels ...
36
- "Problem": datasets.Value("string"),
37
- "Rationale": datasets.Value("string"),
38
- "options": datasets.Value("string"),
39
- "correct": datasets.Value("string"),
40
- "annotated_formula": datasets.Value("string"),
41
- "linear_formula": datasets.Value("string"),
42
- "category": datasets.Value("string"),
43
- }
44
- ),
45
- # If there's a common (input, target) tuple from the features,
46
- # specify them here. They'll be used if as_supervised=True in
47
- # builder.as_dataset.
48
- supervised_keys=None,
49
- # Homepage of the dataset for documentation
50
- homepage="https://math-qa.github.io/math-QA/",
51
- citation=_CITATION,
52
- )
53
-
54
- def _split_generators(self, dl_manager):
55
- """Returns SplitGenerators."""
56
- # TODO(math_qa): Downloads the data and defines the splits
57
- # dl_manager is a datasets.download.DownloadManager that can be used to
58
- # download and extract URLs
59
- dl_path = dl_manager.download_and_extract(_URL)
60
- return [
61
- datasets.SplitGenerator(
62
- name=datasets.Split.TRAIN,
63
- # These kwargs will be passed to _generate_examples
64
- gen_kwargs={"filepath": os.path.join(dl_path, "train.json")},
65
- ),
66
- datasets.SplitGenerator(
67
- name=datasets.Split.TEST,
68
- # These kwargs will be passed to _generate_examples
69
- gen_kwargs={"filepath": os.path.join(dl_path, "test.json")},
70
- ),
71
- datasets.SplitGenerator(
72
- name=datasets.Split.VALIDATION,
73
- # These kwargs will be passed to _generate_examples
74
- gen_kwargs={"filepath": os.path.join(dl_path, "dev.json")},
75
- ),
76
- ]
77
-
78
- def _generate_examples(self, filepath):
79
- """Yields examples."""
80
- # TODO(math_qa): Yields (key, example) tuples from the dataset
81
- with open(filepath, encoding="utf-8") as f:
82
- data = json.load(f)
83
- for id_, row in enumerate(data):
84
- yield id_, row