Fix missing expected links by token during inference process (#120)
Browse files- german.yaml +1 -1
- huggingface/vocabulary_parser.py +24 -2
- tests/test_vocabulary_parser.py +65 -55
german.yaml
CHANGED
@@ -6219,7 +6219,7 @@ vocabulary:
|
|
6219 |
- at all
|
6220 |
- any
|
6221 |
- term: jemand
|
6222 |
-
definition: someone
|
6223 |
- term: Ergebnis
|
6224 |
definition: result
|
6225 |
- term: führt zu keinem Ergebnis
|
|
|
6219 |
- at all
|
6220 |
- any
|
6221 |
- term: jemand
|
6222 |
+
definition: (pron.) someone
|
6223 |
- term: Ergebnis
|
6224 |
definition: result
|
6225 |
- term: führt zu keinem Ergebnis
|
huggingface/vocabulary_parser.py
CHANGED
@@ -237,6 +237,21 @@ def get_inflection_tokens(
|
|
237 |
|
238 |
|
239 |
def get_tokens_of(word: dict, inflection_supplier: Callable[[object], dict[str, str]] = lambda word: {}) -> set[str]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
240 |
return get_inflection_tokens(word, inflection_supplier) | get_term_tokens(word) | get_definition_tokens(word)
|
241 |
|
242 |
|
@@ -275,11 +290,15 @@ def get_inferred_tokenization_links(
|
|
275 |
|
276 |
:param vocabulary: A wilhelm-vocabulary repo YAML file deserialized
|
277 |
:param label_key: The name of the node attribute that will be used as the label in displaying the node
|
|
|
|
|
278 |
|
279 |
:return: a list of link object, each of which has a "source_label", a "target_label", and an "attributes" key
|
280 |
"""
|
281 |
all_vocabulary_tokenizations_by_term = dict(
|
282 |
[word["term"], get_tokens_of(word, inflection_supplier)] for word in vocabulary)
|
|
|
|
|
283 |
inferred_links = []
|
284 |
for this_word in vocabulary:
|
285 |
this_term = this_word["term"]
|
@@ -290,14 +309,17 @@ def get_inferred_tokenization_links(
|
|
290 |
if this_term == that_term:
|
291 |
continue
|
292 |
|
293 |
-
for this_token in
|
294 |
for that_token in that_term_tokens:
|
295 |
-
if this_token.lower().strip() == that_token:
|
|
|
|
|
296 |
inferred_links.append({
|
297 |
"source_label": this_term,
|
298 |
"target_label": that_term,
|
299 |
"attributes": {label_key: "term related"},
|
300 |
})
|
|
|
301 |
jump_to_next_term = True
|
302 |
break
|
303 |
|
|
|
237 |
|
238 |
|
239 |
def get_tokens_of(word: dict, inflection_supplier: Callable[[object], dict[str, str]] = lambda word: {}) -> set[str]:
|
240 |
+
"""
|
241 |
+
Returns the tokens of a word used for link inferences.
|
242 |
+
|
243 |
+
The tokens come from the following attributes:
|
244 |
+
|
245 |
+
1. term
|
246 |
+
2. definition
|
247 |
+
3. inflection field (conjugation & declension)
|
248 |
+
|
249 |
+
:param word: A list entry of wilhelm-vocabulary repo YAML file deserialized
|
250 |
+
:param inflection_supplier: A functional object that, given a YAML dictionary, returns the inflection table of that
|
251 |
+
word. The key of the table can be arbitrary but the value must be a sole inflected word
|
252 |
+
|
253 |
+
:return: a list of tokens
|
254 |
+
"""
|
255 |
return get_inflection_tokens(word, inflection_supplier) | get_term_tokens(word) | get_definition_tokens(word)
|
256 |
|
257 |
|
|
|
290 |
|
291 |
:param vocabulary: A wilhelm-vocabulary repo YAML file deserialized
|
292 |
:param label_key: The name of the node attribute that will be used as the label in displaying the node
|
293 |
+
:param inflection_supplier: A functional object that, given a YAML dictionary, returns the inflection table of that
|
294 |
+
word. The key of the table can be arbitrary but the value must be a sole inflected word
|
295 |
|
296 |
:return: a list of link object, each of which has a "source_label", a "target_label", and an "attributes" key
|
297 |
"""
|
298 |
all_vocabulary_tokenizations_by_term = dict(
|
299 |
[word["term"], get_tokens_of(word, inflection_supplier)] for word in vocabulary)
|
300 |
+
|
301 |
+
existing_pairs: set[set] = set()
|
302 |
inferred_links = []
|
303 |
for this_word in vocabulary:
|
304 |
this_term = this_word["term"]
|
|
|
309 |
if this_term == that_term:
|
310 |
continue
|
311 |
|
312 |
+
for this_token in all_vocabulary_tokenizations_by_term[this_term]:
|
313 |
for that_token in that_term_tokens:
|
314 |
+
if this_token.lower().strip() == that_token and ({this_term, that_term} not in existing_pairs):
|
315 |
+
existing_pairs.add(frozenset({this_term, that_term}))
|
316 |
+
|
317 |
inferred_links.append({
|
318 |
"source_label": this_term,
|
319 |
"target_label": that_term,
|
320 |
"attributes": {label_key: "term related"},
|
321 |
})
|
322 |
+
|
323 |
jump_to_next_term = True
|
324 |
break
|
325 |
|
tests/test_vocabulary_parser.py
CHANGED
@@ -12,6 +12,7 @@
|
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
|
|
|
15 |
import unittest
|
16 |
|
17 |
import yaml
|
@@ -34,8 +35,8 @@ UNKOWN_DECLENSION_NOUN_YAML = """
|
|
34 |
definition: the grilled tomato
|
35 |
declension: Unknown
|
36 |
"""
|
37 |
-
|
38 |
LABEL_KEY = "label"
|
|
|
39 |
|
40 |
|
41 |
class TestVocabularyParser(unittest.TestCase):
|
@@ -163,28 +164,39 @@ class TestVocabularyParser(unittest.TestCase):
|
|
163 |
|
164 |
def test_get_definition_tokens(self):
|
165 |
vocabulary = yaml.safe_load("""
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
self.assertEqual(
|
173 |
{"morning", "a.m."},
|
174 |
get_definition_tokens(vocabulary[0])
|
175 |
)
|
176 |
|
177 |
vocabulary = yaml.safe_load("""
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
self.assertEqual(
|
184 |
{"execute", "kill"},
|
185 |
get_definition_tokens(vocabulary[0])
|
186 |
)
|
187 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
def test_get_term_tokens(self):
|
189 |
vocabulary = yaml.safe_load("""
|
190 |
vocabulary:
|
@@ -257,49 +269,47 @@ class TestVocabularyParser(unittest.TestCase):
|
|
257 |
)
|
258 |
|
259 |
def test_get_inferred_tokenization_links(self):
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
get_inferred_tokenization_links(vocabulary, LABEL_KEY, get_declension_attributes)
|
302 |
-
)
|
303 |
|
304 |
def test_get_structurally_similar_links(self):
|
305 |
vocabulary = yaml.safe_load("""
|
|
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
|
15 |
+
import os
|
16 |
import unittest
|
17 |
|
18 |
import yaml
|
|
|
35 |
definition: the grilled tomato
|
36 |
declension: Unknown
|
37 |
"""
|
|
|
38 |
LABEL_KEY = "label"
|
39 |
+
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
|
40 |
|
41 |
|
42 |
class TestVocabularyParser(unittest.TestCase):
|
|
|
164 |
|
165 |
def test_get_definition_tokens(self):
|
166 |
vocabulary = yaml.safe_load("""
|
167 |
+
vocabulary:
|
168 |
+
- term: morgens
|
169 |
+
definition:
|
170 |
+
- (adv.) in the morning
|
171 |
+
- (adv.) a.m.
|
172 |
+
""")["vocabulary"]
|
173 |
self.assertEqual(
|
174 |
{"morning", "a.m."},
|
175 |
get_definition_tokens(vocabulary[0])
|
176 |
)
|
177 |
|
178 |
vocabulary = yaml.safe_load("""
|
179 |
+
vocabulary:
|
180 |
+
- term: exekutieren
|
181 |
+
definition: to execute (kill)
|
182 |
+
audio: https://upload.wikimedia.org/wikipedia/commons/f/f1/De-exekutieren.ogg
|
183 |
+
""")["vocabulary"]
|
184 |
self.assertEqual(
|
185 |
{"execute", "kill"},
|
186 |
get_definition_tokens(vocabulary[0])
|
187 |
)
|
188 |
|
189 |
+
vocabulary = yaml.safe_load("""
|
190 |
+
vocabulary:
|
191 |
+
- term: töten
|
192 |
+
definition: to kill
|
193 |
+
audio: https://upload.wikimedia.org/wikipedia/commons/b/b0/De-t%C3%B6ten.ogg
|
194 |
+
""")["vocabulary"]
|
195 |
+
self.assertEqual(
|
196 |
+
{"kill"},
|
197 |
+
get_definition_tokens(vocabulary[0])
|
198 |
+
)
|
199 |
+
|
200 |
def test_get_term_tokens(self):
|
201 |
vocabulary = yaml.safe_load("""
|
202 |
vocabulary:
|
|
|
269 |
)
|
270 |
|
271 |
def test_get_inferred_tokenization_links(self):
|
272 |
+
test_cases = [
|
273 |
+
{
|
274 |
+
"words": ["das Jahr", "seit zwei Jahren", "letzte", "in den letzten Jahren"],
|
275 |
+
"expected": [
|
276 |
+
{
|
277 |
+
'attributes': {'label': 'term related'},
|
278 |
+
'source_label': 'das Jahr',
|
279 |
+
'target_label': 'seit zwei Jahren'
|
280 |
+
},
|
281 |
+
{
|
282 |
+
'attributes': {'label': 'term related'},
|
283 |
+
'source_label': 'das Jahr',
|
284 |
+
'target_label': 'in den letzten Jahren'
|
285 |
+
},
|
286 |
+
{
|
287 |
+
'attributes': {'label': 'term related'},
|
288 |
+
'source_label': 'seit zwei Jahren',
|
289 |
+
'target_label': 'in den letzten Jahren'
|
290 |
+
}
|
291 |
+
]
|
292 |
+
},
|
293 |
+
{
|
294 |
+
"words": ["exekutieren", "töten"],
|
295 |
+
"expected": [
|
296 |
+
{
|
297 |
+
'attributes': {LABEL_KEY: 'term related'},
|
298 |
+
'source_label': 'exekutieren',
|
299 |
+
'target_label': 'töten'
|
300 |
+
},
|
301 |
+
]
|
302 |
+
}
|
303 |
+
]
|
304 |
+
|
305 |
+
for test_case in test_cases:
|
306 |
+
with open("{path}/../german.yaml".format(path=DIR_PATH), "r") as f:
|
307 |
+
vocabulary = [word for word in yaml.safe_load(f)["vocabulary"] if word["term"] in test_case["words"]]
|
308 |
+
|
309 |
+
self.assertEqual(
|
310 |
+
test_case["expected"],
|
311 |
+
get_inferred_tokenization_links(vocabulary, LABEL_KEY, get_declension_attributes)
|
312 |
+
)
|
|
|
|
|
313 |
|
314 |
def test_get_structurally_similar_links(self):
|
315 |
vocabulary = yaml.safe_load("""
|