|
--- |
|
dataset_info: |
|
- config_name: en |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: type |
|
dtype: string |
|
- name: body |
|
dtype: string |
|
- name: ideal_answer |
|
sequence: string |
|
- name: exact_answer |
|
sequence: string |
|
- name: snippets |
|
sequence: string |
|
- name: documents |
|
sequence: string |
|
- name: triples |
|
list: |
|
- name: p |
|
dtype: string |
|
- name: s |
|
dtype: string |
|
- name: o |
|
dtype: string |
|
- name: concepts |
|
sequence: string |
|
splits: |
|
- name: train |
|
num_bytes: 10827410 |
|
num_examples: 2251 |
|
- name: test |
|
num_bytes: 1709411 |
|
num_examples: 500 |
|
download_size: 5185124 |
|
dataset_size: 12536821 |
|
- config_name: es |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: type |
|
dtype: string |
|
- name: body |
|
dtype: string |
|
- name: ideal_answer |
|
sequence: string |
|
- name: exact_answer |
|
sequence: string |
|
- name: snippets |
|
sequence: string |
|
- name: documents |
|
sequence: string |
|
- name: triples |
|
list: |
|
- name: p |
|
dtype: string |
|
- name: s |
|
dtype: string |
|
- name: o |
|
dtype: string |
|
- name: concepts |
|
sequence: string |
|
splits: |
|
- name: train |
|
num_bytes: 11694723 |
|
num_examples: 2251 |
|
- name: test |
|
num_bytes: 1808733 |
|
num_examples: 500 |
|
download_size: 5417329 |
|
dataset_size: 13503456 |
|
- config_name: fr |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: type |
|
dtype: string |
|
- name: body |
|
dtype: string |
|
- name: ideal_answer |
|
sequence: string |
|
- name: exact_answer |
|
sequence: string |
|
- name: snippets |
|
sequence: string |
|
- name: documents |
|
sequence: string |
|
- name: triples |
|
list: |
|
- name: p |
|
dtype: string |
|
- name: s |
|
dtype: string |
|
- name: o |
|
dtype: string |
|
- name: concepts |
|
sequence: string |
|
splits: |
|
- name: train |
|
num_bytes: 11760491 |
|
num_examples: 2251 |
|
- name: test |
|
num_bytes: 1799313 |
|
num_examples: 500 |
|
download_size: 5402467 |
|
dataset_size: 13559804 |
|
- config_name: it |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: type |
|
dtype: string |
|
- name: body |
|
dtype: string |
|
- name: ideal_answer |
|
sequence: string |
|
- name: exact_answer |
|
sequence: string |
|
- name: snippets |
|
sequence: string |
|
- name: documents |
|
sequence: string |
|
- name: triples |
|
list: |
|
- name: p |
|
dtype: string |
|
- name: s |
|
dtype: string |
|
- name: o |
|
dtype: string |
|
- name: concepts |
|
sequence: string |
|
splits: |
|
- name: train |
|
num_bytes: 11241823 |
|
num_examples: 2251 |
|
- name: test |
|
num_bytes: 1737683 |
|
num_examples: 500 |
|
download_size: 5320580 |
|
dataset_size: 12979506 |
|
configs: |
|
- config_name: en |
|
data_files: |
|
- split: train |
|
path: en/train-* |
|
- split: test |
|
path: en/test-* |
|
- config_name: es |
|
data_files: |
|
- split: train |
|
path: es/train-* |
|
- split: test |
|
path: es/test-* |
|
- config_name: fr |
|
data_files: |
|
- split: train |
|
path: fr/train-* |
|
- split: test |
|
path: fr/test-* |
|
- config_name: it |
|
data_files: |
|
- split: train |
|
path: it/train-* |
|
- split: test |
|
path: it/test-* |
|
license: apache-2.0 |
|
task_categories: |
|
- question-answering |
|
- summarization |
|
language: |
|
- en |
|
- es |
|
- fr |
|
- it |
|
tags: |
|
- biology |
|
- medical |
|
pretty_name: Multilingual BioASQ-6B |
|
--- |
|
|
|
|
|
<p align="center"> |
|
<br> |
|
<img src="http://www.ixa.eus/sites/default/files/anitdote.png" style="width: 30%;"> |
|
<h2 align="center">Mutilingual BioASQ-6B</h2> |
|
<be> |
|
|
|
<p align="justify"> |
|
We translate the BioASQ-6B English Question Answering dataset to generate parallel French, Italian and Spanish versions using the NLLB200 3B parameter model. For more info read the original task description: [http://bioasq.org/participate/challenges_year_6](http://bioasq.org/participate/challenges_year_6) |
|
|
|
We translate the `body`, `snippets`, `ideal_answer` and `exact_answer` fields. We have validated the quality of the `ideal_answer` field, however, the `exact_answer` field can contain translation artifacts, as NLLB200 often produces low-quality translations of single-word sentences. |
|
</p> |
|
|
|
- 📖 Paper: [Medical mT5: An Open-Source Multilingual Text-to-Text LLM for The Medical Domain. In LREC-COLING 2024](https://arxiv.org/abs/2404.07613) |
|
- 🌐 Project Website: [https://univ-cotedazur.eu/antidote](https://univ-cotedazur.eu/antidote) |
|
- Original Dataset: [http://bioasq.org/participate/challenges_year_6](http://bioasq.org/participate/challenges_year_6) |
|
- Funding: CHIST-ERA XAI 2019 call. Antidote (PCI2020-120717-2) funded by MCIN/AEI /10.13039/501100011033 and by European Union NextGenerationEU/PRTR |
|
|
|
## Citation |
|
```bibtext |
|
@proceedings{garcíaferrero2024medical, |
|
title={Medical mT5: An Open-Source Multilingual Text-to-Text LLM for The Medical Domain}, |
|
author={Iker García-Ferrero and Rodrigo Agerri and Aitziber Atutxa Salazar and Elena Cabrio and Iker de la Iglesia and Alberto Lavelli and Bernardo Magnini and Benjamin Molinet and Johana Ramirez-Romero and German Rigau and Jose Maria Villa-Gonzalez and Serena Villata and Andrea Zaninello}, |
|
year={2024}, |
|
booktitle={Proceedings of LREC-COLING} |
|
} |
|
``` |