Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
KennethEnevoldsen
commited on
Add test for to ensure yaml is up to date
Browse files- CONTRIBUTING.md +4 -4
- README.md +1 -1
- tests/conftest.py +21 -0
- tests/test_load.py +15 -2
- tests/test_unique_ids.py +3 -3
CONTRIBUTING.md
CHANGED
@@ -3,8 +3,8 @@
|
|
3 |
A huggingface datasets repository is a GitHub repository like any other. You can simply download it like so:
|
4 |
|
5 |
```bash
|
6 |
-
git clone https://huggingface.co/datasets/danish-foundation-models/danish-
|
7 |
-
cd danish-
|
8 |
```
|
9 |
|
10 |
You can the work with the dataset locally like so:
|
@@ -12,7 +12,7 @@ You can the work with the dataset locally like so:
|
|
12 |
```py
|
13 |
from datasets import load_dataset
|
14 |
|
15 |
-
name = "../." # instead of "danish-foundation-models/danish-
|
16 |
dataset = load_dataset("../.", split="train")
|
17 |
# make transformations here
|
18 |
```
|
@@ -53,4 +53,4 @@ Before you make the PR do be sure to make sure that the tests have been run.
|
|
53 |
|
54 |
To see example PR you can see the following:
|
55 |
|
56 |
-
- [Restructuring columns in the dataset](https://huggingface.co/datasets/danish-foundation-models/danish-
|
|
|
3 |
A huggingface datasets repository is a GitHub repository like any other. You can simply download it like so:
|
4 |
|
5 |
```bash
|
6 |
+
git clone https://huggingface.co/datasets/danish-foundation-models/danish-dynaword
|
7 |
+
cd danish-dynaword
|
8 |
```
|
9 |
|
10 |
You can the work with the dataset locally like so:
|
|
|
12 |
```py
|
13 |
from datasets import load_dataset
|
14 |
|
15 |
+
name = "../." # instead of "danish-foundation-models/danish-dynaword"
|
16 |
dataset = load_dataset("../.", split="train")
|
17 |
# make transformations here
|
18 |
```
|
|
|
53 |
|
54 |
To see example PR you can see the following:
|
55 |
|
56 |
+
- [Restructuring columns in the dataset](https://huggingface.co/datasets/danish-foundation-models/danish-dynaword/discussions/11)
|
README.md
CHANGED
@@ -117,7 +117,7 @@ https://github.com/huggingface/datasets/blob/main/templates/README_guide.md -->
|
|
117 |
| **Language** | dan, dansk, Danish |
|
118 |
| **License** | Permissible, See the respective dataset |
|
119 |
| **Models** | For model trained used this data see [danish-foundation-models](https://huggingface.co/danish-foundation-models) |
|
120 |
-
| **Contact** | If you have question about this project please create an issue [here](https://huggingface.co/datasets/danish-foundation-models/danish-
|
121 |
|
122 |
|
123 |
## Table of Contents
|
|
|
117 |
| **Language** | dan, dansk, Danish |
|
118 |
| **License** | Permissible, See the respective dataset |
|
119 |
| **Models** | For model trained used this data see [danish-foundation-models](https://huggingface.co/danish-foundation-models) |
|
120 |
+
| **Contact** | If you have question about this project please create an issue [here](https://huggingface.co/datasets/danish-foundation-models/danish-dynaword/discussions) |
|
121 |
|
122 |
|
123 |
## Table of Contents
|
tests/conftest.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
# from typing import Any
|
6 |
+
# import yaml
|
7 |
+
|
8 |
+
|
9 |
+
@pytest.fixture()
|
10 |
+
def repo_path() -> Path:
|
11 |
+
return Path(__file__).parent.parent
|
12 |
+
|
13 |
+
|
14 |
+
# def readme_yaml_header(repo_path: Path) -> dict[str, Any]:
|
15 |
+
# readme_path = repo_path / "README.md"
|
16 |
+
|
17 |
+
# with readme_path.open("r") as f:
|
18 |
+
# readme = f.read()
|
19 |
+
|
20 |
+
# frontmatter = readme.split("---")[1]
|
21 |
+
# return yaml.load()
|
tests/test_load.py
CHANGED
@@ -1,9 +1,22 @@
|
|
1 |
-
from datasets import load_dataset
|
2 |
from pathlib import Path
|
3 |
|
|
|
|
|
|
|
4 |
def test_dataset_loads():
|
|
|
5 |
repo = Path(__file__).parent.parent
|
6 |
-
name = str(repo.resolve())
|
7 |
ds = load_dataset(name, split="train", streaming=True)
|
8 |
sample = next(iter(ds))
|
9 |
assert isinstance(sample, dict)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from pathlib import Path
|
2 |
|
3 |
+
from datasets import load_dataset
|
4 |
+
|
5 |
+
|
6 |
def test_dataset_loads():
|
7 |
+
"""Ensures that the dataset can load as intended"""
|
8 |
repo = Path(__file__).parent.parent
|
9 |
+
name = str(repo.resolve())
|
10 |
ds = load_dataset(name, split="train", streaming=True)
|
11 |
sample = next(iter(ds))
|
12 |
assert isinstance(sample, dict)
|
13 |
+
|
14 |
+
|
15 |
+
# def test_all_datasets_in_yaml(repo_path: Path, readme_yaml_header: dict[str, Any]):
|
16 |
+
# configs = readme_yaml_header["configs"]
|
17 |
+
|
18 |
+
# data_folder = repo_path / "data"
|
19 |
+
# datasets = data_folder.glob("*")
|
20 |
+
|
21 |
+
# for dataset in datasets:
|
22 |
+
# assert dataset in configs
|
tests/test_unique_ids.py
CHANGED
@@ -4,9 +4,9 @@ from typing import cast
|
|
4 |
from datasets import Dataset, load_dataset
|
5 |
|
6 |
|
7 |
-
def test_ensure_ids_are_unique():
|
8 |
-
repo = Path(__file__).parent.parent
|
9 |
-
name = str(
|
10 |
ds = load_dataset(name, split="train")
|
11 |
ds = cast(Dataset, ds)
|
12 |
assert len(set(ds["id"])) == len(ds)
|
|
|
4 |
from datasets import Dataset, load_dataset
|
5 |
|
6 |
|
7 |
+
def test_ensure_ids_are_unique(repo_path: Path):
|
8 |
+
# repo = Path(__file__).parent.parent
|
9 |
+
name = str(repo_path.resolve())
|
10 |
ds = load_dataset(name, split="train")
|
11 |
ds = cast(Dataset, ds)
|
12 |
assert len(set(ds["id"])) == len(ds)
|