datasetId
stringlengths 5
121
| author
stringlengths 2
42
| last_modified
unknown | downloads
int64 0
4.83M
| likes
int64 0
7.6k
| tags
sequencelengths 1
7.92k
| task_categories
sequencelengths 0
47
⌀ | createdAt
unknown | card
stringlengths 15
1.02M
|
---|---|---|---|---|---|---|---|---|
Warlock700/Plahov | Warlock700 | "2024-11-30T15:02:48Z" | 30 | 0 | [
"license:apache-2.0",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-11-30T15:02:28Z" | ---
license: apache-2.0
---
|
jons024/sabrinaluana | jons024 | "2024-11-30T18:28:20Z" | 30 | 0 | [
"license:openrail",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-11-30T15:45:12Z" | ---
license: openrail
---
|
MikeLud/ipcam-combined | MikeLud | "2024-11-30T16:23:45Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-11-30T16:23:45Z" | ---
license: apache-2.0
---
|
Ibaahjnr/Twi_Train_Dataset | Ibaahjnr | "2024-11-30T18:43:33Z" | 30 | 0 | [
"license:unknown",
"size_categories:10K<n<100K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-30T17:50:03Z" | ---
license: unknown
dataset_info:
features:
- name: audio
dtype: audio
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 2580378594.011
num_examples: 28063
download_size: 1967678284
dataset_size: 2580378594.011
---
|
rahul77/pubtables-1m-batch5 | rahul77 | "2024-11-30T19:01:37Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-30T19:01:34Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: latex
dtype: string
- name: filename
dtype: string
splits:
- name: train
num_bytes: 17019038.0
num_examples: 500
download_size: 16366785
dataset_size: 17019038.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Unstudied/swainvoz | Unstudied | "2024-11-30T23:33:54Z" | 30 | 0 | [
"license:openrail",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-11-30T23:33:15Z" | ---
license: openrail
---
|
mpanda27/voxpopuli_sl_pseudo_labelled | mpanda27 | "2024-12-01T01:30:27Z" | 30 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T01:16:21Z" | ---
dataset_info:
config_name: sl
features:
- name: audio_id
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: normalized_text
dtype: string
- name: condition_on_prev
sequence: int64
- name: whisper_transcript
dtype: string
splits:
- name: train
num_bytes: 700481584.0
num_examples: 938
- name: validation
num_bytes: 319469640.0
num_examples: 432
- name: test
num_bytes: 94388877.0
num_examples: 127
download_size: 1109387674
dataset_size: 1114340101.0
configs:
- config_name: sl
data_files:
- split: train
path: sl/train-*
- split: validation
path: sl/validation-*
- split: test
path: sl/test-*
---
|
khursani8/text | khursani8 | "2024-12-01T02:03:48Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T02:03:44Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
splits:
- name: train
num_bytes: 60941
num_examples: 447
download_size: 26747
dataset_size: 60941
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Zack157/zecarlos | Zack157 | "2024-12-01T04:06:45Z" | 30 | 0 | [
"license:openrail",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-12-01T04:03:11Z" | ---
license: openrail
---
|
HCBAI/PublicFiles | HCBAI | "2024-12-15T05:03:56Z" | 30 | 0 | [
"license:unknown",
"region:us"
] | null | "2024-12-01T05:05:10Z" | ---
license: unknown
---
|
ADHIZ/image_germ | ADHIZ | "2024-12-01T05:39:08Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T05:39:07Z" | ---
dataset_info:
features:
- name: file_name
dtype: string
- name: text
dtype: string
- name: image
dtype: image
splits:
- name: train
num_bytes: 1678972.0
num_examples: 2
download_size: 1680896
dataset_size: 1678972.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ADHIZ/image_sacdkdkldaz | ADHIZ | "2024-12-01T06:16:07Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T06:16:06Z" | ---
dataset_info:
features:
- name: file_name
dtype: string
- name: text
dtype: string
- name: image
dtype: image
splits:
- name: train
num_bytes: 1678972.0
num_examples: 2
download_size: 1680896
dataset_size: 1678972.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ADHIZ/image_devi | ADHIZ | "2024-12-01T08:45:13Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T08:45:06Z" | ---
dataset_info:
features:
- name: file_name
dtype: string
- name: text
dtype: string
- name: image
dtype: image
splits:
- name: train
num_bytes: 1678972.0
num_examples: 2
download_size: 1680896
dataset_size: 1678972.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
SheriffReed/YandereSimi202X | SheriffReed | "2024-12-01T09:36:46Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-01T09:36:45Z" | ---
license: apache-2.0
---
|
rowlandzhang/aligned_ais_video_datset | rowlandzhang | "2024-12-01T13:44:39Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-01T13:44:39Z" | ---
license: apache-2.0
---
|
taufiqsyed/salami-processed-enriched | taufiqsyed | "2024-12-01T16:02:24Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T16:01:50Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: song_id
dtype: string
- name: structure
dtype: string
- name: start_time
dtype: float64
- name: end_time
dtype: float64
- name: metadata
dtype: string
splits:
- name: train
num_bytes: 605969118.0
num_examples: 229
- name: eval
num_bytes: 21169212.0
num_examples: 8
download_size: 613896719
dataset_size: 627138330.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: eval
path: data/eval-*
---
|
richmondsin/arc_it_results | richmondsin | "2024-12-01T17:07:44Z" | 30 | 0 | [
"region:us"
] | null | "2024-12-01T17:07:35Z" | ---
pretty_name: Evaluation run of google/gemma-2-2b
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)\nThe dataset is\
\ composed of 0 configuration(s), each one corresponding to one of the evaluated\
\ task.\n\nThe dataset has been created from 2 run(s). Each run can be found as\
\ a specific split in each configuration, the split being named using the timestamp\
\ of the run.The \"train\" split is always pointing to the latest results.\n\nAn\
\ additional configuration \"results\" store all the aggregated results of the run.\n\
\nTo load the details from a run, you can for instance do the following:\n```python\n\
from datasets import load_dataset\ndata = load_dataset(\n\t\"richmondsin/arc_it_results\"\
,\n\tname=\"google__gemma-2-2b__arc_it\",\n\tsplit=\"latest\"\n)\n```\n\n## Latest\
\ results\n\nThese are the [latest results from run 2024-12-01T12-07-35.117919](https://huggingface.co/datasets/richmondsin/arc_it_results/blob/main/google/gemma-2-2b/results_2024-12-01T12-07-35.117919.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"arc_it\": {\n \
\ \"alias\": \"arc_it\",\n \"acc,none\": 0.3888888888888889,\n\
\ \"acc_stderr,none\": 0.014599413987491596,\n \"acc_norm,none\"\
: 0.4390681003584229,\n \"acc_norm_stderr,none\": 0.014862216324833933\n\
\ }\n },\n \"arc_it\": {\n \"alias\": \"arc_it\",\n \"\
acc,none\": 0.3888888888888889,\n \"acc_stderr,none\": 0.014599413987491596,\n\
\ \"acc_norm,none\": 0.4390681003584229,\n \"acc_norm_stderr,none\"\
: 0.014862216324833933\n }\n}\n```"
repo_url: https://huggingface.co/google/gemma-2-2b
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: google__gemma-2-2b__arc_it
data_files:
- split: 2024_12_01T12_07_35.117919
path:
- '**/samples_arc_it_2024-12-01T12-07-35.117919.jsonl'
- split: latest
path:
- '**/samples_arc_it_2024-12-01T12-07-35.117919.jsonl'
---
# Dataset Card for Evaluation run of google/gemma-2-2b
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)
The dataset is composed of 0 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"richmondsin/arc_it_results",
name="google__gemma-2-2b__arc_it",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-12-01T12-07-35.117919](https://huggingface.co/datasets/richmondsin/arc_it_results/blob/main/google/gemma-2-2b/results_2024-12-01T12-07-35.117919.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"arc_it": {
"alias": "arc_it",
"acc,none": 0.3888888888888889,
"acc_stderr,none": 0.014599413987491596,
"acc_norm,none": 0.4390681003584229,
"acc_norm_stderr,none": 0.014862216324833933
}
},
"arc_it": {
"alias": "arc_it",
"acc,none": 0.3888888888888889,
"acc_stderr,none": 0.014599413987491596,
"acc_norm,none": 0.4390681003584229,
"acc_norm_stderr,none": 0.014862216324833933
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
richmondsin/arc_id_results | richmondsin | "2024-12-01T17:48:57Z" | 30 | 0 | [
"region:us"
] | null | "2024-12-01T17:48:48Z" | ---
pretty_name: Evaluation run of google/gemma-2-2b
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)\nThe dataset is\
\ composed of 0 configuration(s), each one corresponding to one of the evaluated\
\ task.\n\nThe dataset has been created from 2 run(s). Each run can be found as\
\ a specific split in each configuration, the split being named using the timestamp\
\ of the run.The \"train\" split is always pointing to the latest results.\n\nAn\
\ additional configuration \"results\" store all the aggregated results of the run.\n\
\nTo load the details from a run, you can for instance do the following:\n```python\n\
from datasets import load_dataset\ndata = load_dataset(\n\t\"richmondsin/arc_id_results\"\
,\n\tname=\"google__gemma-2-2b__arc_id\",\n\tsplit=\"latest\"\n)\n```\n\n## Latest\
\ results\n\nThese are the [latest results from run 2024-12-01T12-48-48.275872](https://huggingface.co/datasets/richmondsin/arc_id_results/blob/main/google/gemma-2-2b/results_2024-12-01T12-48-48.275872.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"arc_id\": {\n \
\ \"alias\": \"arc_id\",\n \"acc,none\": 0.36379928315412186,\n\
\ \"acc_stderr,none\": 0.014407564179556647,\n \"acc_norm,none\"\
: 0.4014336917562724,\n \"acc_norm_stderr,none\": 0.014679984936613356\n\
\ }\n },\n \"arc_id\": {\n \"alias\": \"arc_id\",\n \"\
acc,none\": 0.36379928315412186,\n \"acc_stderr,none\": 0.014407564179556647,\n\
\ \"acc_norm,none\": 0.4014336917562724,\n \"acc_norm_stderr,none\"\
: 0.014679984936613356\n }\n}\n```"
repo_url: https://huggingface.co/google/gemma-2-2b
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: google__gemma-2-2b__arc_id
data_files:
- split: 2024_12_01T12_48_48.275872
path:
- '**/samples_arc_id_2024-12-01T12-48-48.275872.jsonl'
- split: latest
path:
- '**/samples_arc_id_2024-12-01T12-48-48.275872.jsonl'
---
# Dataset Card for Evaluation run of google/gemma-2-2b
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)
The dataset is composed of 0 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"richmondsin/arc_id_results",
name="google__gemma-2-2b__arc_id",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-12-01T12-48-48.275872](https://huggingface.co/datasets/richmondsin/arc_id_results/blob/main/google/gemma-2-2b/results_2024-12-01T12-48-48.275872.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"arc_id": {
"alias": "arc_id",
"acc,none": 0.36379928315412186,
"acc_stderr,none": 0.014407564179556647,
"acc_norm,none": 0.4014336917562724,
"acc_norm_stderr,none": 0.014679984936613356
}
},
"arc_id": {
"alias": "arc_id",
"acc,none": 0.36379928315412186,
"acc_stderr,none": 0.014407564179556647,
"acc_norm,none": 0.4014336917562724,
"acc_norm_stderr,none": 0.014679984936613356
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
alisonmt/wasteclf | alisonmt | "2024-12-01T17:55:12Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-01T17:53:43Z" | ---
license: apache-2.0
---
|
amuvarma/60k-fac-with-audio-1dups | amuvarma | "2024-12-01T19:49:38Z" | 30 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T18:25:24Z" | ---
dataset_info:
features:
- name: transcript
dtype: string
- name: audio
dtype: audio
- name: facodec_0
sequence: int64
- name: facodec_1
sequence: int64
- name: facodec_2
sequence: int64
- name: facodec_3
sequence: int64
- name: facodec_4
sequence: int64
- name: facodec_5
sequence: int64
- name: spk_embs
sequence: float64
splits:
- name: train
num_bytes: 7325882445.0
num_examples: 60000
download_size: 4560587252
dataset_size: 7325882445.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
richmondsin/arc_ca_results | richmondsin | "2024-12-01T18:49:52Z" | 30 | 0 | [
"region:us"
] | null | "2024-12-01T18:49:44Z" | ---
pretty_name: Evaluation run of google/gemma-2-2b
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)\nThe dataset is\
\ composed of 0 configuration(s), each one corresponding to one of the evaluated\
\ task.\n\nThe dataset has been created from 2 run(s). Each run can be found as\
\ a specific split in each configuration, the split being named using the timestamp\
\ of the run.The \"train\" split is always pointing to the latest results.\n\nAn\
\ additional configuration \"results\" store all the aggregated results of the run.\n\
\nTo load the details from a run, you can for instance do the following:\n```python\n\
from datasets import load_dataset\ndata = load_dataset(\n\t\"richmondsin/arc_ca_results\"\
,\n\tname=\"google__gemma-2-2b__arc_ca\",\n\tsplit=\"latest\"\n)\n```\n\n## Latest\
\ results\n\nThese are the [latest results from run 2024-12-01T13-49-44.047418](https://huggingface.co/datasets/richmondsin/arc_ca_results/blob/main/google/gemma-2-2b/results_2024-12-01T13-49-44.047418.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"arc_ca\": {\n \
\ \"alias\": \"arc_ca\",\n \"acc,none\": 0.3557347670250896,\n\
\ \"acc_stderr,none\": 0.014336992903670756,\n \"acc_norm,none\"\
: 0.3870967741935484,\n \"acc_norm_stderr,none\": 0.014587077689446054\n\
\ }\n },\n \"arc_ca\": {\n \"alias\": \"arc_ca\",\n \"\
acc,none\": 0.3557347670250896,\n \"acc_stderr,none\": 0.014336992903670756,\n\
\ \"acc_norm,none\": 0.3870967741935484,\n \"acc_norm_stderr,none\"\
: 0.014587077689446054\n }\n}\n```"
repo_url: https://huggingface.co/google/gemma-2-2b
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: google__gemma-2-2b__arc_ca
data_files:
- split: 2024_12_01T13_49_44.047418
path:
- '**/samples_arc_ca_2024-12-01T13-49-44.047418.jsonl'
- split: latest
path:
- '**/samples_arc_ca_2024-12-01T13-49-44.047418.jsonl'
---
# Dataset Card for Evaluation run of google/gemma-2-2b
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)
The dataset is composed of 0 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"richmondsin/arc_ca_results",
name="google__gemma-2-2b__arc_ca",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-12-01T13-49-44.047418](https://huggingface.co/datasets/richmondsin/arc_ca_results/blob/main/google/gemma-2-2b/results_2024-12-01T13-49-44.047418.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"arc_ca": {
"alias": "arc_ca",
"acc,none": 0.3557347670250896,
"acc_stderr,none": 0.014336992903670756,
"acc_norm,none": 0.3870967741935484,
"acc_norm_stderr,none": 0.014587077689446054
}
},
"arc_ca": {
"alias": "arc_ca",
"acc,none": 0.3557347670250896,
"acc_stderr,none": 0.014336992903670756,
"acc_norm,none": 0.3870967741935484,
"acc_norm_stderr,none": 0.014587077689446054
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
Nachiket-S/stereo_set_dataset_train | Nachiket-S | "2024-12-01T19:18:14Z" | 30 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T19:18:13Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
splits:
- name: train
num_bytes: 1458381
num_examples: 12687
download_size: 473120
dataset_size: 1458381
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Nachiket-S/stereo_set_dataset_test | Nachiket-S | "2024-12-01T19:32:51Z" | 30 | 0 | [
"size_categories:10K<n<100K",
"region:us"
] | null | "2024-12-01T19:18:15Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
splits:
- name: test
num_bytes: 4389544
num_examples: 38298
download_size: 1462757
dataset_size: 4389544
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
Nachiket-S/mbias_dataset_train | Nachiket-S | "2024-12-01T19:56:03Z" | 30 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T19:18:17Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
splits:
- name: train
num_bytes: 19429488
num_examples: 7566
download_size: 2139629
dataset_size: 19429488
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Nachiket-S/mbias_dataset_test | Nachiket-S | "2024-12-01T19:56:05Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T19:18:19Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
splits:
- name: test
num_bytes: 2157120
num_examples: 840
download_size: 238073
dataset_size: 2157120
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
Nachiket-S/train_crows_dataset | Nachiket-S | "2024-12-01T19:56:07Z" | 30 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T19:18:21Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
splits:
- name: train
num_bytes: 625076
num_examples: 1357
download_size: 89745
dataset_size: 625076
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Nachiket-S/test_crows_dataset | Nachiket-S | "2024-12-01T19:56:08Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T19:18:23Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
splits:
- name: test
num_bytes: 58588
num_examples: 151
download_size: 13117
dataset_size: 58588
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
Nachiket-S/train_nouns_dataset | Nachiket-S | "2024-12-01T19:56:10Z" | 30 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T19:18:24Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
splits:
- name: train
num_bytes: 1603499
num_examples: 16543
download_size: 343708
dataset_size: 1603499
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Nachiket-S/test_nouns_dataset | Nachiket-S | "2024-12-01T19:56:12Z" | 30 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T19:18:26Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
splits:
- name: test
num_bytes: 186027
num_examples: 1839
download_size: 37668
dataset_size: 186027
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
taufiqsyed/salami-processed-enriched-clean | taufiqsyed | "2024-12-01T21:04:19Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-01T21:03:46Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: song_id
dtype: string
- name: structure
dtype: string
- name: start_time
dtype: float64
- name: end_time
dtype: float64
- name: metadata
dtype: string
splits:
- name: train
num_bytes: 484246151.0
num_examples: 183
- name: eval
num_bytes: 21169204.0
num_examples: 8
download_size: 497422966
dataset_size: 505415355.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: eval
path: data/eval-*
---
|
richmondsin/arc_hi_results | richmondsin | "2024-12-01T21:17:41Z" | 30 | 0 | [
"region:us"
] | null | "2024-12-01T21:17:29Z" | ---
pretty_name: Evaluation run of google/gemma-2-2b
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)\nThe dataset is\
\ composed of 0 configuration(s), each one corresponding to one of the evaluated\
\ task.\n\nThe dataset has been created from 2 run(s). Each run can be found as\
\ a specific split in each configuration, the split being named using the timestamp\
\ of the run.The \"train\" split is always pointing to the latest results.\n\nAn\
\ additional configuration \"results\" store all the aggregated results of the run.\n\
\nTo load the details from a run, you can for instance do the following:\n```python\n\
from datasets import load_dataset\ndata = load_dataset(\n\t\"richmondsin/arc_hi_results\"\
,\n\tname=\"google__gemma-2-2b__arc_hi\",\n\tsplit=\"latest\"\n)\n```\n\n## Latest\
\ results\n\nThese are the [latest results from run 2024-12-01T16-17-29.326907](https://huggingface.co/datasets/richmondsin/arc_hi_results/blob/main/google/gemma-2-2b/results_2024-12-01T16-17-29.326907.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"arc_hi\": {\n \
\ \"alias\": \"arc_hi\",\n \"acc,none\": 0.27419354838709675,\n\
\ \"acc_stderr,none\": 0.013359850379455064,\n \"acc_norm,none\"\
: 0.3046594982078853,\n \"acc_norm_stderr,none\": 0.013783791363713757\n\
\ }\n },\n \"arc_hi\": {\n \"alias\": \"arc_hi\",\n \"\
acc,none\": 0.27419354838709675,\n \"acc_stderr,none\": 0.013359850379455064,\n\
\ \"acc_norm,none\": 0.3046594982078853,\n \"acc_norm_stderr,none\"\
: 0.013783791363713757\n }\n}\n```"
repo_url: https://huggingface.co/google/gemma-2-2b
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: google__gemma-2-2b__arc_hi
data_files:
- split: 2024_12_01T16_17_29.326907
path:
- '**/samples_arc_hi_2024-12-01T16-17-29.326907.jsonl'
- split: latest
path:
- '**/samples_arc_hi_2024-12-01T16-17-29.326907.jsonl'
---
# Dataset Card for Evaluation run of google/gemma-2-2b
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)
The dataset is composed of 0 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"richmondsin/arc_hi_results",
name="google__gemma-2-2b__arc_hi",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-12-01T16-17-29.326907](https://huggingface.co/datasets/richmondsin/arc_hi_results/blob/main/google/gemma-2-2b/results_2024-12-01T16-17-29.326907.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"arc_hi": {
"alias": "arc_hi",
"acc,none": 0.27419354838709675,
"acc_stderr,none": 0.013359850379455064,
"acc_norm,none": 0.3046594982078853,
"acc_norm_stderr,none": 0.013783791363713757
}
},
"arc_hi": {
"alias": "arc_hi",
"acc,none": 0.27419354838709675,
"acc_stderr,none": 0.013359850379455064,
"acc_norm,none": 0.3046594982078853,
"acc_norm_stderr,none": 0.013783791363713757
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
Honi086/balancear | Honi086 | "2024-12-02T01:26:16Z" | 30 | 0 | [
"license:openrail",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-12-02T01:22:49Z" | ---
license: openrail
---
|
mtruong9/gt_smt_grandstaff_random_10percent_max_700_length | mtruong9 | "2024-12-02T01:48:58Z" | 30 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T01:48:52Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 32040144.430020433
num_examples: 2857
- name: val
num_bytes: 3620965.0439108806
num_examples: 323
- name: test
num_bytes: 5979717.989296436
num_examples: 533
download_size: 30972996
dataset_size: 41640827.46322775
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: val
path: data/val-*
- split: test
path: data/test-*
---
|
indra87g/kamen_rider | indra87g | "2024-12-02T04:24:57Z" | 30 | 0 | [
"language:en",
"license:cc-by-3.0",
"region:us"
] | null | "2024-12-02T04:23:22Z" | ---
license: cc-by-3.0
language:
- en
--- |
Ai1terror/knowledge_base_1 | Ai1terror | "2024-12-02T06:13:34Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-02T06:13:34Z" | ---
license: apache-2.0
---
|
teeses/holiday | teeses | "2024-12-02T08:27:43Z" | 30 | 0 | [
"license:openrail",
"region:us"
] | null | "2024-12-02T08:27:43Z" | ---
license: openrail
---
|
KastanEr/ai_software_image_to_text | KastanEr | "2024-12-02T12:18:59Z" | 30 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-02T12:18:59Z" | ---
license: mit
---
|
nicolynnvila/pre-train_variability_af250 | nicolynnvila | "2024-12-02T12:31:04Z" | 30 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-02T12:22:37Z" | ---
license: mit
---
|
amuvarma/1m-fac-raw-1dups-proc-train-col-clean | amuvarma | "2024-12-02T16:33:14Z" | 30 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T15:32:11Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
splits:
- name: train
num_bytes: 49851548900
num_examples: 2282634
download_size: 21790993194
dataset_size: 49851548900
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
nanosanek/testo | nanosanek | "2024-12-02T23:34:42Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-02T18:06:21Z" | ---
license: apache-2.0
---
|
Carlosgg14/Yuta | Carlosgg14 | "2024-12-02T18:22:57Z" | 30 | 0 | [
"license:openrail",
"region:us"
] | null | "2024-12-02T18:21:52Z" | ---
license: openrail
---
|
marco-schouten/exp1 | marco-schouten | "2024-12-02T18:35:58Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:35:56Z" | ---
dataset_info:
features:
- name: input_image
dtype: image
- name: edit_prompt
dtype: string
- name: edited_image
dtype: image
splits:
- name: train
num_bytes: 116903.0
num_examples: 28
download_size: 56123
dataset_size: 116903.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
marco-schouten/exp2 | marco-schouten | "2024-12-02T18:36:02Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:35:59Z" | ---
dataset_info:
features:
- name: input_image
dtype: image
- name: edit_prompt
dtype: string
- name: edited_image
dtype: image
splits:
- name: train
num_bytes: 532978.0
num_examples: 111
download_size: 282927
dataset_size: 532978.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
bellomuiz78/knowledgebase | bellomuiz78 | "2024-12-04T00:54:45Z" | 30 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-02T23:46:46Z" | ---
license: mit
---
|
kevinnejad/clevr_val | kevinnejad | "2024-12-03T00:44:17Z" | 30 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T00:41:51Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: question
dtype: string
- name: answer
dtype: string
- name: filename
dtype: string
- name: question_type
dtype: string
--- |
quandao92/ad-clip-dataset | quandao92 | "2024-12-03T06:03:04Z" | 30 | 0 | [
"license:other",
"region:us"
] | null | "2024-12-03T00:51:53Z" | ---
license: other
license_name: 4inlab
license_link: LICENSE
---
|
inasse23/france | inasse23 | "2024-12-03T02:15:01Z" | 30 | 0 | [
"license:openrail",
"region:us"
] | null | "2024-12-03T02:15:01Z" | ---
license: openrail
---
|
sseilene/bic_logistic_output | sseilene | "2024-12-03T02:55:04Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-03T02:55:04Z" | ---
license: apache-2.0
---
|
sseilene/sic-logistic-output | sseilene | "2024-12-03T02:56:47Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-03T02:56:47Z" | ---
license: apache-2.0
---
|
yessu/njdg | yessu | "2024-12-03T06:14:11Z" | 30 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-03T05:54:53Z" | ---
license: mit
---
|
whllvlzc/ContinuePretrain | whllvlzc | "2024-12-03T08:40:31Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-03T08:40:31Z" | ---
license: apache-2.0
---
|
MaiolS/AML | MaiolS | "2024-12-03T09:03:50Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T08:52:15Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: label
dtype:
class_label:
names:
'0': Picasso
'1': Catherine_Abel
'2': Manet
'3': Andy_Warhol
'4': Van_Gogh
'5': Velazquez
- name: id
dtype: int32
- name: loss_bw
dtype: float32
- name: loss_colored
dtype: float32
- name: canny_edges
dtype: float32
- name: similarity
dtype: float32
splits:
- name: train
num_bytes: 10472671.75
num_examples: 163
- name: test
num_bytes: 4376809.25
num_examples: 41
download_size: 14874637
dataset_size: 14849481.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
Krasavchik221/Pavlo | Krasavchik221 | "2024-12-03T08:53:17Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-03T08:53:17Z" | ---
license: apache-2.0
---
|
interhin/fora-documentation | interhin | "2024-12-03T11:19:34Z" | 30 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-03T11:09:12Z" | ---
license: mit
---
|
xenn-11/LLMa-122 | xenn-11 | "2024-12-03T13:31:06Z" | 30 | 0 | [
"license:epl-1.0",
"region:us"
] | null | "2024-12-03T13:31:06Z" | ---
license: epl-1.0
---
|
Forcewithme/dassartdadas | Forcewithme | "2024-12-04T15:25:57Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-03T14:06:58Z" | ---
license: apache-2.0
---
|
jmgauzan/persoJMG | jmgauzan | "2024-12-03T14:48:09Z" | 30 | 0 | [
"license:bigscience-bloom-rail-1.0",
"region:us"
] | null | "2024-12-03T14:48:09Z" | ---
license: bigscience-bloom-rail-1.0
---
|
kelbrown20/barbite | kelbrown20 | "2024-12-03T15:00:56Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-03T15:00:56Z" | ---
license: apache-2.0
---
|
ttaylor99/fastmri-sample | ttaylor99 | "2024-12-04T06:12:25Z" | 30 | 0 | [
"license:unknown",
"region:us"
] | null | "2024-12-03T16:23:25Z" | ---
license: unknown
---
|
Shalini731/tmp | Shalini731 | "2024-12-03T19:21:31Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T16:26:33Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: prompt
dtype: string
splits:
- name: train
num_bytes: 15543218.0
num_examples: 34
download_size: 15503794
dataset_size: 15543218.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
annalogiacomo67/ANNA75 | annalogiacomo67 | "2024-12-03T17:30:31Z" | 30 | 0 | [
"license:apache-2.0",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-12-03T17:23:15Z" | ---
license: apache-2.0
---
|
rufaelfekadu/common_voice_16_1_hi_pseudo_labelled | rufaelfekadu | "2024-12-03T18:50:09Z" | 30 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T18:14:07Z" | ---
dataset_info:
config_name: ar
features:
- name: path
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: condition_on_prev
sequence: int64
- name: whisper_transcript
dtype: string
splits:
- name: train
num_bytes: 3736265607.264
num_examples: 4236
- name: validation
num_bytes: 1469594231.57
num_examples: 1670
- name: test
num_bytes: 1460576354.125
num_examples: 1663
download_size: 5762526833
dataset_size: 6666436192.959
configs:
- config_name: ar
data_files:
- split: train
path: ar/train-*
- split: validation
path: ar/validation-*
- split: test
path: ar/test-*
---
|
MLRockswow/Where_In_Europe_Am_I | MLRockswow | "2024-12-09T22:51:57Z" | 30 | 0 | [
"language:en",
"size_categories:1K<n<10K",
"format:imagefolder",
"modality:image",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-12-03T19:47:37Z" | ---
language:
- en
size_categories:
- 1K<n<10K
--- |
Yoooooo887/sdfsdfdsf | Yoooooo887 | "2024-12-03T21:03:03Z" | 30 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-03T21:02:58Z" | ---
license: mit
---
|
cavalolindo/Ricardo | cavalolindo | "2024-12-04T00:33:25Z" | 30 | 0 | [
"license:openrail",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-12-04T00:32:26Z" | ---
license: openrail
---
|
ramdaZ/HoshinoAI | ramdaZ | "2024-12-17T19:44:23Z" | 30 | 0 | [
"license:llama3.2",
"region:us"
] | null | "2024-12-04T01:03:53Z" | ---
license: llama3.2
---
|
br3nii/br3nii | br3nii | "2024-12-04T20:23:15Z" | 30 | 0 | [
"license:openrail",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-12-04T01:44:46Z" | ---
license: openrail
---
|
SCBrazil/JoTTa | SCBrazil | "2024-12-04T01:51:46Z" | 30 | 0 | [
"license:creativeml-openrail-m",
"region:us"
] | null | "2024-12-04T01:51:46Z" | ---
license: creativeml-openrail-m
---
|
suriya324/ecg_arryhthmia | suriya324 | "2024-12-04T04:26:29Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-04T04:26:28Z" | ---
license: apache-2.0
---
|
hhhFuture/class_stand | hhhFuture | "2024-12-04T06:50:17Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-04T06:50:17Z" | ---
license: apache-2.0
---
|
BrownEnergy/secchi_depth | BrownEnergy | "2024-12-05T12:55:40Z" | 30 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T07:59:13Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: sd_depth
dtype: float64
splits:
- name: train
num_bytes: 70836585.956
num_examples: 1428
download_size: 70006816
dataset_size: 70836585.956
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
RuishiCh0314/ASCEND-mixed-to-chinese-translation | RuishiCh0314 | "2024-12-04T08:26:27Z" | 30 | 1 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T08:26:10Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: path
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcription
dtype: string
- name: duration
dtype: float32
- name: language
dtype: string
- name: original_speaker_id
dtype: int64
- name: session_id
dtype: int64
- name: topic
dtype: string
- name: chinese_translation
dtype: string
splits:
- name: train
num_bytes: 418006789.846
num_examples: 2739
download_size: 416812818
dataset_size: 418006789.846
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Nachiket-S/LLaMa_3B_IsCoT_DebiasingInstruction | Nachiket-S | "2024-12-04T08:32:45Z" | 30 | 0 | [
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T08:32:44Z" | ---
dataset_info:
features:
- name: file_name
dtype: 'null'
- name: paragraph
dtype: 'null'
splits:
- name: inference
num_bytes: 0
num_examples: 0
download_size: 756
dataset_size: 0
configs:
- config_name: default
data_files:
- split: inference
path: data/inference-*
---
|
Nachiket-S/LLaMa_3B_NoCoT_DebiasingInstruction | Nachiket-S | "2024-12-04T08:33:45Z" | 30 | 0 | [
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T08:33:43Z" | ---
dataset_info:
features:
- name: file_name
dtype: 'null'
- name: paragraph
dtype: 'null'
splits:
- name: inference
num_bytes: 0
num_examples: 0
download_size: 756
dataset_size: 0
configs:
- config_name: default
data_files:
- split: inference
path: data/inference-*
---
|
walentines/car_dataset_stable_diffusion | walentines | "2024-12-04T09:05:18Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-04T09:05:14Z" | ---
license: apache-2.0
---
|
Telkwevr/Multishot-AD-Bench | Telkwevr | "2024-12-04T12:57:44Z" | 30 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-04T09:48:59Z" | ---
license: mit
---
|
TKayWortmann/shoe-sales-ds-train | TKayWortmann | "2024-12-04T10:21:18Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T10:21:05Z" | ---
dataset_info:
features:
- name: image
sequence:
sequence:
sequence: float32
- name: sales
dtype: float64
splits:
- name: train
num_bytes: 536478888
num_examples: 887
download_size: 45090710
dataset_size: 536478888
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
VargheseP/test_dataset_area_dsc | VargheseP | "2024-12-04T11:20:34Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T11:19:42Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: bbx
dtype: image
- name: dist
dtype: image
- name: ellipse
dtype: image
- name: basic
dtype: string
- name: artsy
dtype: string
- name: caption
dtype: string
- name: mask
dtype: image
splits:
- name: train
num_bytes: 84967003.0
num_examples: 931
download_size: 82073779
dataset_size: 84967003.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
AdityasArsenal/Yoga_poses | AdityasArsenal | "2024-12-04T12:08:55Z" | 30 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T11:59:51Z" | ---
dataset_info:
features:
- name: image
dtype: image
splits:
- name: train
num_bytes: 148154.0
num_examples: 1
download_size: 149558
dataset_size: 148154.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
FacuNdito/MPRAM | FacuNdito | "2024-12-04T20:56:20Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-04T12:21:20Z" | ---
license: apache-2.0
---
configs:
- config_name: default
data_files:
- split: train
path: "data.csv"
--- |
AdityasArsenal/YogaDataSet | AdityasArsenal | "2024-12-07T14:56:05Z" | 30 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T12:42:09Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: label
dtype:
class_label:
names:
'0': downdog
'1': goddess
'2': plank
'3': tree
'4': warrior2
splits:
- name: train
num_bytes: 417267797.20536083
num_examples: 1813
- name: test
num_bytes: 69214487.65463917
num_examples: 321
download_size: 531037157
dataset_size: 486482284.86
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
Newvel/utkface_filtered_processed | Newvel | "2024-12-04T12:58:11Z" | 30 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T12:50:25Z" | ---
dataset_info:
features:
- name: pixel_values
sequence:
sequence:
sequence: float32
- name: label
dtype: int64
splits:
- name: train
num_bytes: 14337352920
num_examples: 23705
download_size: 3114082212
dataset_size: 14337352920
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Sebastianycx/alpaca_train_cleaned | Sebastianycx | "2024-12-04T13:18:53Z" | 30 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-04T13:18:53Z" | ---
license: mit
---
|
Forcewithme/mkldsajsers | Forcewithme | "2024-12-04T15:53:47Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-04T15:28:06Z" | ---
license: apache-2.0
---
|
geodevwalid23/palm_tree_dataset | geodevwalid23 | "2024-12-04T17:49:09Z" | 30 | 0 | [
"license:unknown",
"region:us"
] | null | "2024-12-04T17:09:03Z" | ---
license: unknown
---
|
KienNgyuen/DMS_dataset | KienNgyuen | "2024-12-04T19:44:39Z" | 30 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-04T19:39:11Z" | ---
license: mit
---
|
Honi086/Billie_Ellish | Honi086 | "2024-12-04T19:53:43Z" | 30 | 0 | [
"license:openrail",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-12-04T19:52:25Z" | ---
license: openrail
---
|
amuvarma/smoltalk-audio-speech-raw-1dups-6rows-proc | amuvarma | "2024-12-05T05:09:04Z" | 30 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T21:25:03Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
splits:
- name: train
num_bytes: 223738725
num_examples: 2260
download_size: 60200697
dataset_size: 223738725
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
LuanaMARD/qadataset | LuanaMARD | "2024-12-04T21:31:08Z" | 30 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-04T21:30:33Z" | ---
license: mit
---
|
andreaparker/wiki-ss-corpus-subset | andreaparker | "2024-12-04T23:50:09Z" | 30 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T23:49:53Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: docid
dtype: string
- name: text
dtype: string
- name: title
dtype: string
splits:
- name: train
num_bytes: 318879929.0
num_examples: 1000
download_size: 317919193
dataset_size: 318879929.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mikan5916/MyBench | mikan5916 | "2024-12-05T00:43:57Z" | 30 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-05T00:43:57Z" | ---
license: mit
---
|
JasmineQiuqiu/diagrams_with_captions | JasmineQiuqiu | "2024-12-05T01:08:04Z" | 30 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T01:07:45Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 323383371.012
num_examples: 3524
download_size: 346179878
dataset_size: 323383371.012
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
1398listener/tmp_4 | 1398listener | "2024-12-05T15:13:27Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-05T06:22:06Z" | ---
license: apache-2.0
---
|
NewBridge/CVdataset | NewBridge | "2024-12-05T06:50:18Z" | 30 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-05T06:50:18Z" | ---
license: mit
---
|
suraj164/samplepdf | suraj164 | "2024-12-05T07:38:56Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-05T07:37:41Z" | ---
license: apache-2.0
---
|
WDD115/test | WDD115 | "2024-12-05T08:05:49Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-05T08:05:49Z" | ---
license: apache-2.0
---
|
KienNgyuen/seashipHaze_VOC | KienNgyuen | "2024-12-08T11:00:17Z" | 30 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-05T10:28:11Z" | ---
license: mit
---
|
docinquire/test | docinquire | "2024-12-05T10:39:12Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-05T10:39:12Z" | ---
license: apache-2.0
---
|
arshiasoori/test | arshiasoori | "2024-12-05T12:11:28Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-05T12:11:28Z" | ---
license: apache-2.0
---
|
alu0101512297/calidad_del_aire_y_climatologia | alu0101512297 | "2024-12-05T13:12:34Z" | 30 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-05T13:12:34Z" | ---
license: apache-2.0
---
|
Subsets and Splits