Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
pandas
License:
File size: 2,244 Bytes
85df600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
{
    "default": {
        "description": "This dataset consists of 6,642 question/answer pairs.\nThe questions are supposed to be answerable by Freebase, a large knowledge graph.\nThe questions are mostly centered around a single named entity.\nThe questions are popular ones asked on the web (at least in 2013).\n",
        "citation": "\n@inproceedings{berant-etal-2013-semantic,\n    title = \"Semantic Parsing on {F}reebase from Question-Answer Pairs\",\n    author = \"Berant, Jonathan  and\n      Chou, Andrew  and\n      Frostig, Roy  and\n      Liang, Percy\",\n    booktitle = \"Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing\",\n    month = oct,\n    year = \"2013\",\n    address = \"Seattle, Washington, USA\",\n    publisher = \"Association for Computational Linguistics\",\n    url = \"https://www.aclweb.org/anthology/D13-1160\",\n    pages = \"1533--1544\",\n}\n",
        "homepage": "https://worksheets.codalab.org/worksheets/0xba659fe363cb46e7a505c5b6a774dc8a",
        "license": "",
        "features": {
            "url": {
                "dtype": "string",
                "_type": "Value"
            },
            "question": {
                "dtype": "string",
                "_type": "Value"
            },
            "answers": {
                "feature": {
                    "dtype": "string",
                    "_type": "Value"
                },
                "_type": "Sequence"
            }
        },
        "builder_name": "web_questions",
        "dataset_name": "web_questions",
        "config_name": "default",
        "version": {
            "version_str": "1.0.0",
            "major": 1,
            "minor": 0,
            "patch": 0
        },
        "splits": {
            "train": {
                "name": "train",
                "num_bytes": 530711,
                "num_examples": 3778,
                "dataset_name": null
            },
            "test": {
                "name": "test",
                "num_bytes": 288184,
                "num_examples": 2032,
                "dataset_name": null
            }
        },
        "download_size": 402395,
        "dataset_size": 818895,
        "size_in_bytes": 1221290
    }
}