aes_enem_dataset / aes_enem_dataset.py
abarbosa's picture
finish dataset parsing
059e5ac
raw
history blame
25 kB
# Copyright 2023 Andre Barbosa, Igor Caetano Silveira & The HuggingFace Datasets Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Address all TODOs and remove all explanatory comments
"""TODO: Add a description here."""
import csv
import math
import os
import re
import datasets
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from tqdm.auto import tqdm
np.random.seed(42) # Set the seed
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
TODO
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
_URLS = {
"sourceAOnly": "sourceAWithGraders.tar.gz",
"sourceAWithGraders": "sourceAWithGraders.tar.gz",
"sourceB": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceB.tar.gz?download=true",
}
PROMPTS_TO_IGNORE = [
"brasileiros-tem-pessima-educacao-argumentativa-segundo-cientista",
"carta-convite-discutir-discriminacao-na-escola",
"informacao-no-rotulo-de-produtos-transgenicos",
]
# Essays to Ignore
ESSAY_TO_IGNORE = [
"direitos-em-conflito-liberdade-de-expressao-e-intimidade/2.html",
"terceirizacao-avanco-ou-retrocesso/2.html",
"artes-e-educacao-fisica-opcionais-ou-obrigatorias/2.html",
"violencia-e-drogas-o-papel-do-usuario/0.html",
"internacao-compulsoria-de-dependentes-de-crack/0.html",
]
CSV_HEADER = [
"id",
"id_prompt",
"title",
"essay",
"grades",
"general",
"specific",
"essay_year",
]
class AesEnemDataset(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("0.0.1")
# You will be able to load one or the other configurations in the following list with
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="sourceAOnly", version=VERSION, description="TODO"),
datasets.BuilderConfig(
name="sourceAWithGraders", version=VERSION, description="TODO"
),
datasets.BuilderConfig(
name="sourceB",
version=VERSION,
description="TODO",
),
]
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("string"),
"id_prompt": datasets.Value("string"),
"essay_title": datasets.Value("string"),
"essay_text": datasets.Value("string"),
"grades": datasets.Sequence(datasets.Value("int16")),
"essay_year": datasets.Value("int16"),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _post_process_dataframe(self, filepath):
def map_year(year):
if year <= 2017:
return "<=2017"
return str(year)
def normalize_grades(grades):
grades = grades.strip("[]").split(", ")
grade_mapping = {"0.0": 0, "20": 40}
# We will remove the rows that match the criteria below
if any(
single_grade
in grades[:-1] # we ignore the sum, and only check the concetps
for single_grade in ["50", "100", "150", "0.5", "1.0", "1.5"]
):
return None
# Use the mapping to transform grades, ignoring the last grade
mapped_grades = [
int(grade_mapping.get(grade_concept, grade_concept))
for grade_concept in grades[:-1]
]
# Calculate and append the sum of the mapped grades as the last element
mapped_grades.append(sum(mapped_grades))
return mapped_grades
df = pd.read_csv(filepath)
df["general"] = df["general"].fillna("")
df["essay_year"] = df["essay_year"].astype("int")
df["mapped_year"] = df["essay_year"].apply(map_year)
df["grades"] = df["grades"].apply(normalize_grades)
df = df.dropna(subset=["grades"])
df = df[
~(df["id_prompt"] + "/" + df["id"]).isin(ESSAY_TO_IGNORE)
] # arbitrary removal of zero graded essays
df.to_csv(filepath, index=False)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
extracted_files = dl_manager.download_and_extract({self.config.name: urls})
html_parser = self._process_html_files(extracted_files)
if "sourceA" in self.config.name:
self._post_process_dataframe(html_parser.sourceA)
self._generate_splits(html_parser.sourceA)
folder_sourceA = "/".join((html_parser.sourceA).split("/")[:-1])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(folder_sourceA, "train.csv"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(folder_sourceA, "validation.csv"),
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(folder_sourceA, "test.csv"),
"split": "test",
},
),
]
elif self.config.name == "sourceB":
self._post_process_dataframe(html_parser.sourceB)
return [
datasets.SplitGenerator(
name="full",
gen_kwargs={
"filepath": html_parser.sourceB,
"split": "full",
},
),
]
def _process_html_files(self, paths_dict):
html_parser = HTMLParser(paths_dict)
html_parser.parse(self.config.name)
return html_parser
def _parse_graders_data(self, dirname):
map_grades = {"0": 0, "1": 40, "2": 80, "3": 120, "4": 160, "5": 200}
def map_list(grades_list):
result = [map_grades.get(item, None) for item in grades_list]
sum_grades = sum(result)
result.append(sum_grades)
return result
grader_a = pd.read_csv(f"{dirname}/GraderA.csv")
grader_b = pd.read_csv(f"{dirname}/GraderB.csv")
for grader in [grader_a, grader_b]:
grader.grades = grader.grades.apply(lambda x: x.strip("[]").split(", "))
grader.grades = grader.grades.apply(map_list)
return grader_a, grader_b
def _generate_splits(self, filepath: str, train_size=0.7):
df = pd.read_csv(filepath)
buckets = df.groupby("mapped_year")["id_prompt"].unique().to_dict()
df.drop("mapped_year", axis=1, inplace=True)
train_set = []
val_set = []
test_set = []
for year, prompts in buckets.items():
np.random.shuffle(prompts)
num_prompts = len(prompts)
# All prompts go to the test if less than 3
if num_prompts <= 3:
train_set.append(df[df["id_prompt"].isin([prompts[0]])])
val_set.append(df[df["id_prompt"].isin([prompts[1]])])
test_set.append(df[df["id_prompt"].isin([prompts[2]])])
continue
# Determine the number of prompts for each set based on train_size and remaining prompts
num_train = math.floor(num_prompts * train_size)
num_val_test = num_prompts - num_train
num_val = num_val_test // 2
num_test = num_val_test - num_val
# Assign prompts to each set
train_set.append(df[df["id_prompt"].isin(prompts[:num_train])])
val_set.append(
df[df["id_prompt"].isin(prompts[num_train : (num_train + num_val)])]
)
test_set.append(
df[
df["id_prompt"].isin(
prompts[
(num_train + num_val) : (num_train + num_val + num_test)
]
)
]
)
# Convert lists of groups to DataFrames
train_df = pd.concat(train_set)
val_df = pd.concat(val_set)
test_df = pd.concat(test_set)
dirname = os.path.dirname(filepath)
if self.config.name == "sourceAWithGraders":
grader_a, grader_b = self._parse_graders_data(dirname)
grader_a_data = pd.merge(
train_df[["id", "id_prompt"]],
grader_a,
on=["id", "id_prompt"],
how="inner",
)
grader_b_data = pd.merge(
train_df[["id", "id_prompt"]],
grader_b,
on=["id", "id_prompt"],
how="inner",
)
train_df = pd.concat([train_df, grader_a_data])
train_df = pd.concat([train_df, grader_b_data])
grader_a_data = pd.merge(
val_df[["id", "id_prompt"]],
grader_a,
on=["id", "id_prompt"],
how="inner",
)
grader_b_data = pd.merge(
val_df[["id", "id_prompt"]],
grader_b,
on=["id", "id_prompt"],
how="inner",
)
val_df = pd.concat([val_df, grader_a_data])
val_df = pd.concat([val_df, grader_b_data])
grader_a_data = pd.merge(
test_df[["id", "id_prompt"]],
grader_a,
on=["id", "id_prompt"],
how="inner",
)
grader_b_data = pd.merge(
test_df[["id", "id_prompt"]],
grader_b,
on=["id", "id_prompt"],
how="inner",
)
test_df = pd.concat([test_df, grader_a_data])
test_df = pd.concat([test_df, grader_b_data])
# Data Validation Assertions
assert (
len(set(train_df["id_prompt"]).intersection(set(val_df["id_prompt"]))) == 0
), "Overlap between train and val id_prompt"
assert (
len(set(train_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0
), "Overlap between train and test id_prompt"
assert (
len(set(val_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0
), "Overlap between val and test id_prompt"
train_df.to_csv(f"{dirname}/train.csv", index=False)
val_df.to_csv(f"{dirname}/validation.csv", index=False)
test_df.to_csv(f"{dirname}/test.csv", index=False)
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf-8") as csvfile:
next(csvfile)
csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADER)
for i, row in enumerate(csv_reader):
grades = row["grades"].strip("[]").split(", ")
yield i, {
"id": row["id"],
"id_prompt": row["id_prompt"],
"essay_title": row["title"],
"essay_text": row["essay"],
"grades": grades,
"essay_year": row["essay_year"],
}
class HTMLParser:
def __init__(self, paths_dict):
self.paths_dict = paths_dict
self.sourceA = None
self.sourceB = None
def apply_soup(self, filepath, num):
# recebe uma URL, salva o HTML dessa página e retorna o soup dela
file = open(os.path.join(filepath, num), "r", encoding="utf8")
conteudo = file.read()
soup = BeautifulSoup(conteudo, "html.parser")
return soup
def _get_title(self, soup):
if self.sourceA:
title = soup.find("div", class_="container-composition")
if title is None:
title = soup.find("h1", class_="pg-color10").get_text()
else:
title = title.h2.get_text()
title = title.replace("\xa0", "")
return title.replace(";", ",")
elif self.sourceB:
title = soup.find("h1", class_="titulo-conteudo").get_text()
return title.strip("- Banco de redações").strip()
def _get_grades(self, soup):
if self.sourceA:
grades = soup.find("section", class_="results-table")
final_grades = []
if grades is not None:
grades = grades.find_all("span", class_="points")
assert len(grades) == 6, f"Missing grades: {len(grades)}"
for single_grade in grades:
grade = int(single_grade.get_text())
final_grades.append(grade)
assert final_grades[-1] == sum(
final_grades[:-1]
), "Grading sum is not making sense"
else:
grades = soup.find("div", class_="redacoes-corrigidas pg-bordercolor7")
grades_sum = float(
soup.find("th", class_="noBorder-left").get_text().replace(",", ".")
)
grades = grades.find_all("td")[:10]
for idx in range(1, 10, 2):
grade = float(grades[idx].get_text().replace(",", "."))
final_grades.append(grade)
assert grades_sum == sum(
final_grades
), "Grading sum is not making sense"
final_grades.append(grades_sum)
return final_grades
elif self.sourceB:
table = soup.find("table", {"id": "redacoes_corrigidas"})
grades = table.find_all("td", class_="simple-td")
grades = grades[3:]
result = []
for single_grade in grades:
result.append(int(single_grade.get_text()))
return result
def _get_general_comment(self, soup):
if self.sourceA:
def get_general_comment_aux(soup):
result = soup.find("article", class_="list-item c")
if result is not None:
result = result.find("div", class_="description")
return result.get_text()
else:
result = soup.find("p", style="margin: 0px 0px 11px;")
if result is not None:
return result.get_text()
else:
result = soup.find("p", style="margin: 0px;")
if result is not None:
return result.get_text()
else:
result = soup.find(
"p", style="margin: 0px; text-align: justify;"
)
if result is not None:
return result.get_text()
else:
return ""
text = soup.find("div", class_="text")
if text is not None:
text = text.find("p")
if (text is None) or (len(text.get_text()) < 2):
return get_general_comment_aux(soup)
return text.get_text()
else:
return get_general_comment_aux(soup)
elif self.sourceB:
return ""
def _get_specific_comment(self, soup, general_comment):
if self.sourceA:
result = soup.find("div", class_="text")
cms = []
if result is not None:
result = result.find_all("li")
if result != []:
for item in result:
text = item.get_text()
if text != "\xa0":
cms.append(text)
else:
result = soup.find("div", class_="text").find_all("p")
for item in result:
text = item.get_text()
if text != "\xa0":
cms.append(text)
else:
result = soup.find_all("article", class_="list-item c")
if len(result) < 2:
return ["First if"]
result = result[1].find_all("p")
for item in result:
text = item.get_text()
if text != "\xa0":
cms.append(text)
specific_comment = cms.copy()
if general_comment in specific_comment:
specific_comment.remove(general_comment)
if (len(specific_comment) > 1) and (len(specific_comment[0]) < 2):
specific_comment = specific_comment[1:]
return self._clean_list(specific_comment)
elif self.sourceB:
return ""
def _get_essay(self, soup):
if self.sourceA:
essay = soup.find("div", class_="text-composition")
result = []
if essay is not None:
essay = essay.find_all("p")
for f in essay:
while f.find("span", style="color:#00b050") is not None:
f.find("span", style="color:#00b050").decompose()
while f.find("span", class_="certo") is not None:
f.find("span", class_="certo").decompose()
for paragraph in essay:
result.append(paragraph.get_text())
else:
essay = soup.find("div", {"id": "texto"})
essay.find("section", class_="list-items").decompose()
essay = essay.find_all("p")
for f in essay:
while f.find("span", class_="certo") is not None:
f.find("span", class_="certo").decompose()
for paragraph in essay:
result.append(paragraph.get_text())
return " ".join(self._clean_list(result))
elif self.sourceB:
table = soup.find("article", class_="texto-conteudo entire")
table = soup.find("div", class_="area-redacao-corrigida")
if table is None:
result = None
else:
for span in soup.find_all("span"):
span.decompose()
result = table.find_all("p")
result = " ".join(
[paragraph.get_text().strip() for paragraph in result]
)
return result
def _get_essay_year(self, soup):
if self.sourceA:
pattern = r"redações corrigidas - \w+/\d+"
first_occurrence = re.search(pattern, soup.get_text().lower())
matched_url = first_occurrence.group(0) if first_occurrence else None
year_pattern = r"\d{4}"
return re.search(year_pattern, matched_url).group(0)
elif self.sourceB:
pattern = r"Enviou seu texto em.*?(\d{4})"
match = re.search(pattern, soup.get_text())
return match.group(1) if match else -1
def _clean_title(self, title):
if self.sourceA:
smaller_index = title.find("[")
if smaller_index == -1:
return title
else:
bigger_index = title.find("]")
new_title = title[:smaller_index] + title[bigger_index + 1 :]
return self._clean_title(new_title.replace(" ", " "))
elif self.sourceB:
return title
def _clean_list(self, list):
if list == []:
return []
else:
new_list = []
for phrase in list:
phrase = (
phrase.replace("\xa0", "").replace(" ,", ",").replace(" .", ".")
)
while phrase.find(" ") != -1:
phrase = phrase.replace(" ", " ")
if len(phrase) > 1:
new_list.append(phrase)
return new_list
def parse(self, config_name):
for key, filepath in self.paths_dict.items():
if key != config_name:
continue # TODO improve later, we will only support a single config at a time
if "sourceA" in config_name:
self.sourceA = f"{filepath}/sourceAWithGraders/sourceA.csv"
elif config_name == "sourceB":
self.sourceB = f"{filepath}/sourceB/sourceB.csv"
file = self.sourceA if self.sourceA else self.sourceB
file_dir = "/".join((file).split("/")[:-1])
with open(file, "w", newline="", encoding="utf8") as final_file:
writer = csv.writer(final_file)
writer.writerow(CSV_HEADER)
sub_folders = [
name for name in os.listdir(file_dir) if not name.endswith(".csv")
]
essay_id = 0
essay_title = None
essay_text = None
essay_grades = None
general_comment = None
specific_comment = None
essay_year = None
for prompt_folder in tqdm(
sub_folders,
desc=f"Parsing HTML files from: {key}",
total=len(sub_folders),
):
if prompt_folder in PROMPTS_TO_IGNORE:
continue
prompt = os.path.join(file_dir, prompt_folder)
prompt_essays = [name for name in os.listdir(prompt)]
essay_year = self._get_essay_year(
self.apply_soup(prompt, "Prompt.html")
)
for essay in prompt_essays:
soup_text = self.apply_soup(prompt, essay)
if essay == "Prompt.html":
continue
essay_title = self._clean_title(self._get_title(soup_text))
essay_grades = self._get_grades(soup_text)
essay_text = self._get_essay(soup_text)
general_comment = self._get_general_comment(soup_text).strip()
specific_comment = self._get_specific_comment(
soup_text, general_comment
)
writer.writerow(
[
essay,
prompt_folder,
essay_title,
essay_text,
essay_grades,
general_comment,
specific_comment,
essay_year,
]
)
essay_id += 1