ghomasHudson commited on
Commit
dc98da6
·
1 Parent(s): 6ddb818

Create character_id.py

Browse files
Files changed (1) hide show
  1. character_id.py +154 -0
character_id.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Identifying character archetypes from movie scripts"""
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import csv
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """\
26
+ """
27
+
28
+ _DESCRIPTION = """\
29
+ The character types identification dataset consists of movie
30
+ scripts annotated with character archetypes (Hero, Villain, Mentor, etc.).
31
+ """
32
+
33
+ _URLS = {
34
+ "full_text": "https://drive.google.com/uc?export=download&id=1pivLkYl6l6_jJlQkHGsvziEn82GBapWc",
35
+ # "repo": "https://github.com/ghomasHudson/character-type-identification/archive/master.zip",
36
+ "repo": "https://github.com/ghomasHudson/character-type-identification/archive/refs/heads/master.zip"
37
+ }
38
+
39
+
40
+ class CharacterTypeID(datasets.GeneratorBasedBuilder):
41
+ """Character Type Identification"""
42
+
43
+ def _info(self):
44
+ return datasets.DatasetInfo(
45
+ description=_DESCRIPTION,
46
+ citation=_CITATION,
47
+ features=datasets.Features(
48
+ {
49
+ "document": {
50
+ "id": datasets.Value("string"),
51
+ "url": datasets.Value("string"),
52
+ "file_size": datasets.Value("int32"),
53
+ "word_count": datasets.Value("int32"),
54
+ "start": datasets.Value("string"),
55
+ "end": datasets.Value("string"),
56
+ "summary": {
57
+ "text": datasets.Value("string"),
58
+ "url": datasets.Value("string"),
59
+ "title": datasets.Value("string"),
60
+ },
61
+ "text": datasets.Value("string"),
62
+ },
63
+ "character_name": datasets.Value("string"),
64
+ "unit_quality_score": datasets.Value("float32"),
65
+ "character_type": datasets.ClassLabel(names=[
66
+ "Hero",
67
+ "Villain/Antagonist",
68
+ "Spouse/Partner/Lover of Hero",
69
+ "Spouse/Partner/Lover of Villain",
70
+ "Sidekick of Hero",
71
+ "Sidekick of Villain",
72
+ "Supporting role character of Hero",
73
+ "Supporting role character of Villain",
74
+ "Mentor",
75
+ "No Applicable Type"
76
+ ])
77
+ }
78
+ ),
79
+ homepage="https://github.com/ghomasHudson/character-type-identification",
80
+ )
81
+
82
+ def _split_generators(self, dl_manager):
83
+ """Returns SplitGenerators."""
84
+
85
+ dl_dir = dl_manager.download_and_extract(_URLS)
86
+ dl_dir["repo"] = os.path.join(dl_dir["repo"], "character-type-identification-master")
87
+
88
+ return [
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.TRAIN,
91
+ gen_kwargs={"repo_dir": dl_dir["repo"], "full_text_dir": dl_dir["full_text"], "split": "train"},
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.TEST,
95
+ gen_kwargs={"repo_dir": dl_dir["repo"], "full_text_dir": dl_dir["full_text"], "split": "test"},
96
+ ),
97
+ datasets.SplitGenerator(
98
+ name=datasets.Split.VALIDATION,
99
+ gen_kwargs={"repo_dir": dl_dir["repo"], "full_text_dir": dl_dir["full_text"], "split": "valid"},
100
+ ),
101
+ ]
102
+
103
+ def _generate_examples(self, repo_dir, full_text_dir, split):
104
+ """Yields examples."""
105
+ documents = {}
106
+ with open(os.path.join(repo_dir, "documents.csv"), encoding="utf-8") as f:
107
+ reader = csv.DictReader(f)
108
+ for row in reader:
109
+ if row["set"] != split:
110
+ continue
111
+ documents[row["document_id"]] = row
112
+
113
+ summaries = {}
114
+ with open(os.path.join(repo_dir, "summaries.csv"), encoding="utf-8") as f:
115
+ reader = csv.DictReader(f)
116
+ for row in reader:
117
+ if row["set"] != split:
118
+ continue
119
+ summaries[row["document_id"]] = row
120
+
121
+ char_fn = "character_labels.csv"
122
+ if split == "test":
123
+ char_fn = "character_labels_gold.csv"
124
+ with open(os.path.join(repo_dir, char_fn), encoding="utf-8") as f:
125
+ reader = csv.DictReader(f)
126
+ for id_, row in enumerate(reader):
127
+ if row["set"] != split:
128
+ continue
129
+ document_id = row["document_id"]
130
+ if document_id not in documents.keys():
131
+ print("NO KEY")
132
+ document = documents[document_id]
133
+ summary = summaries[document_id]
134
+ full_text = open(os.path.join(full_text_dir, document_id + ".txt"), encoding="latin-1").read()
135
+ res = {
136
+ "document": {
137
+ "id": document["document_id"],
138
+ "url": document["script_url"],
139
+ "file_size": document["script_file_size"],
140
+ "word_count": document["script_word_count"],
141
+ "start": document["script_start"],
142
+ "end": document["script_end"],
143
+ "summary": {
144
+ "text": summary["summary"],
145
+ "url": document["wiki_url"],
146
+ "title": document["wiki_title"],
147
+ },
148
+ "text": full_text,
149
+ },
150
+ "character_name": row["character_name"],
151
+ "unit_quality_score": row["unit_quality_score"],
152
+ "character_type": row["character_type"]
153
+ }
154
+ yield id_, res