abarbosa commited on
Commit
059e5ac
·
1 Parent(s): 82791b1

finish dataset parsing

Browse files
Files changed (1) hide show
  1. aes_enem_dataset.py +71 -5
aes_enem_dataset.py CHANGED
@@ -47,7 +47,8 @@ _HOMEPAGE = ""
47
  _LICENSE = ""
48
 
49
  _URLS = {
50
- "sourceAOnly": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceA.tar.gz?download=true",
 
51
  "sourceB": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceB.tar.gz?download=true",
52
  }
53
 
@@ -86,6 +87,9 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
86
  # You will be able to load one or the other configurations in the following list with
87
  BUILDER_CONFIGS = [
88
  datasets.BuilderConfig(name="sourceAOnly", version=VERSION, description="TODO"),
 
 
 
89
  datasets.BuilderConfig(
90
  name="sourceB",
91
  version=VERSION,
@@ -208,6 +212,24 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
208
  html_parser.parse(self.config.name)
209
  return html_parser
210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  def _generate_splits(self, filepath: str, train_size=0.7):
212
  df = pd.read_csv(filepath)
213
  buckets = df.groupby("mapped_year")["id_prompt"].unique().to_dict()
@@ -251,7 +273,53 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
251
  train_df = pd.concat(train_set)
252
  val_df = pd.concat(val_set)
253
  test_df = pd.concat(test_set)
254
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
  # Data Validation Assertions
256
  assert (
257
  len(set(train_df["id_prompt"]).intersection(set(val_df["id_prompt"]))) == 0
@@ -262,8 +330,6 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
262
  assert (
263
  len(set(val_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0
264
  ), "Overlap between val and test id_prompt"
265
- # TODO if self.config.name == sourceAWithGrader
266
- dirname = os.path.dirname(filepath)
267
  train_df.to_csv(f"{dirname}/train.csv", index=False)
268
  val_df.to_csv(f"{dirname}/validation.csv", index=False)
269
  test_df.to_csv(f"{dirname}/test.csv", index=False)
@@ -499,7 +565,7 @@ class HTMLParser:
499
  if key != config_name:
500
  continue # TODO improve later, we will only support a single config at a time
501
  if "sourceA" in config_name:
502
- self.sourceA = f"{filepath}/sourceA/sourceA.csv"
503
  elif config_name == "sourceB":
504
  self.sourceB = f"{filepath}/sourceB/sourceB.csv"
505
  file = self.sourceA if self.sourceA else self.sourceB
 
47
  _LICENSE = ""
48
 
49
  _URLS = {
50
+ "sourceAOnly": "sourceAWithGraders.tar.gz",
51
+ "sourceAWithGraders": "sourceAWithGraders.tar.gz",
52
  "sourceB": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceB.tar.gz?download=true",
53
  }
54
 
 
87
  # You will be able to load one or the other configurations in the following list with
88
  BUILDER_CONFIGS = [
89
  datasets.BuilderConfig(name="sourceAOnly", version=VERSION, description="TODO"),
90
+ datasets.BuilderConfig(
91
+ name="sourceAWithGraders", version=VERSION, description="TODO"
92
+ ),
93
  datasets.BuilderConfig(
94
  name="sourceB",
95
  version=VERSION,
 
212
  html_parser.parse(self.config.name)
213
  return html_parser
214
 
215
+ def _parse_graders_data(self, dirname):
216
+ map_grades = {"0": 0, "1": 40, "2": 80, "3": 120, "4": 160, "5": 200}
217
+
218
+ def map_list(grades_list):
219
+ result = [map_grades.get(item, None) for item in grades_list]
220
+ sum_grades = sum(result)
221
+ result.append(sum_grades)
222
+ return result
223
+
224
+ grader_a = pd.read_csv(f"{dirname}/GraderA.csv")
225
+ grader_b = pd.read_csv(f"{dirname}/GraderB.csv")
226
+
227
+ for grader in [grader_a, grader_b]:
228
+ grader.grades = grader.grades.apply(lambda x: x.strip("[]").split(", "))
229
+ grader.grades = grader.grades.apply(map_list)
230
+
231
+ return grader_a, grader_b
232
+
233
  def _generate_splits(self, filepath: str, train_size=0.7):
234
  df = pd.read_csv(filepath)
235
  buckets = df.groupby("mapped_year")["id_prompt"].unique().to_dict()
 
273
  train_df = pd.concat(train_set)
274
  val_df = pd.concat(val_set)
275
  test_df = pd.concat(test_set)
276
+ dirname = os.path.dirname(filepath)
277
+ if self.config.name == "sourceAWithGraders":
278
+ grader_a, grader_b = self._parse_graders_data(dirname)
279
+ grader_a_data = pd.merge(
280
+ train_df[["id", "id_prompt"]],
281
+ grader_a,
282
+ on=["id", "id_prompt"],
283
+ how="inner",
284
+ )
285
+ grader_b_data = pd.merge(
286
+ train_df[["id", "id_prompt"]],
287
+ grader_b,
288
+ on=["id", "id_prompt"],
289
+ how="inner",
290
+ )
291
+ train_df = pd.concat([train_df, grader_a_data])
292
+ train_df = pd.concat([train_df, grader_b_data])
293
+
294
+ grader_a_data = pd.merge(
295
+ val_df[["id", "id_prompt"]],
296
+ grader_a,
297
+ on=["id", "id_prompt"],
298
+ how="inner",
299
+ )
300
+ grader_b_data = pd.merge(
301
+ val_df[["id", "id_prompt"]],
302
+ grader_b,
303
+ on=["id", "id_prompt"],
304
+ how="inner",
305
+ )
306
+ val_df = pd.concat([val_df, grader_a_data])
307
+ val_df = pd.concat([val_df, grader_b_data])
308
+
309
+ grader_a_data = pd.merge(
310
+ test_df[["id", "id_prompt"]],
311
+ grader_a,
312
+ on=["id", "id_prompt"],
313
+ how="inner",
314
+ )
315
+ grader_b_data = pd.merge(
316
+ test_df[["id", "id_prompt"]],
317
+ grader_b,
318
+ on=["id", "id_prompt"],
319
+ how="inner",
320
+ )
321
+ test_df = pd.concat([test_df, grader_a_data])
322
+ test_df = pd.concat([test_df, grader_b_data])
323
  # Data Validation Assertions
324
  assert (
325
  len(set(train_df["id_prompt"]).intersection(set(val_df["id_prompt"]))) == 0
 
330
  assert (
331
  len(set(val_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0
332
  ), "Overlap between val and test id_prompt"
 
 
333
  train_df.to_csv(f"{dirname}/train.csv", index=False)
334
  val_df.to_csv(f"{dirname}/validation.csv", index=False)
335
  test_df.to_csv(f"{dirname}/test.csv", index=False)
 
565
  if key != config_name:
566
  continue # TODO improve later, we will only support a single config at a time
567
  if "sourceA" in config_name:
568
+ self.sourceA = f"{filepath}/sourceAWithGraders/sourceA.csv"
569
  elif config_name == "sourceB":
570
  self.sourceB = f"{filepath}/sourceB/sourceB.csv"
571
  file = self.sourceA if self.sourceA else self.sourceB