first 3 stammer training
Browse files- .create_training_files.py.swp +0 -0
- .gitattributes +1 -0
- correct/correct_datafiles/correct_dev.tsv +2 -2
- correct/correct_datafiles/correct_test.tsv +2 -2
- correct/correct_datafiles/correct_train.tsv +2 -2
- correct/create_correct_dataset.py +2 -0
- create_training_files.py +24 -5
- eng_nob/create_english_train.py +42 -0
- eng_nob/dev.jsonl +3 -0
- eng_nob/dev.tsv +3 -0
- eng_nob/test.jsonl +3 -0
- eng_nob/test.tsv +3 -0
- eng_nob/train.jsonl +3 -0
- eng_nob/train.tsv +3 -0
- tsv_all_target/check.txt +0 -0
- tsv_all_target/dev.tsv +2 -2
- tsv_all_target/dev_small.tsv +2 -2
- tsv_all_target/test.tsv +2 -2
- tsv_all_target/test_small.tsv +2 -2
- tsv_all_target/train.tsv +2 -2
.create_training_files.py.swp
ADDED
Binary file (12.3 kB). View file
|
|
.gitattributes
CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
53 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
54 |
*.tsv filter=lfs diff=lfs merge=lfs -text
|
55 |
*.tmx filter=lfs diff=lfs merge=lfs -text
|
|
|
|
53 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
54 |
*.tsv filter=lfs diff=lfs merge=lfs -text
|
55 |
*.tmx filter=lfs diff=lfs merge=lfs -text
|
56 |
+
*.jsonl filter=lfs diff=lfs merge=lfs -text
|
correct/correct_datafiles/correct_dev.tsv
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:53a4aa6c8acac14492a898359d250ef543091757e0a3b3e5dd5faab817a87a08
|
3 |
+
size 2417746
|
correct/correct_datafiles/correct_test.tsv
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f049a0f878985fa1efd50151a4089b45d3dd10a23e45f161fd084c2714354911
|
3 |
+
size 2417751
|
correct/correct_datafiles/correct_train.tsv
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:47357d479c1a62b85c131d5f1cbdb01b963cd2022ffe568725a61cf92a815280
|
3 |
+
size 19408389
|
correct/create_correct_dataset.py
CHANGED
@@ -119,6 +119,8 @@ def main(args):
|
|
119 |
|
120 |
print(f"Removing a random number of commas 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
|
121 |
data = data.sample(frac=1).reset_index(drop=True)
|
|
|
|
|
122 |
|
123 |
# Train - test - dev
|
124 |
train, test = train_test_split(data, test_size=0.2)
|
|
|
119 |
|
120 |
print(f"Removing a random number of commas 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
|
121 |
data = data.sample(frac=1).reset_index(drop=True)
|
122 |
+
data.loc[:,'source'] = "correct: "+data['source']
|
123 |
+
|
124 |
|
125 |
# Train - test - dev
|
126 |
train, test = train_test_split(data, test_size=0.2)
|
create_training_files.py
CHANGED
@@ -43,6 +43,14 @@ data['source'] = data['source'].str[4:]
|
|
43 |
|
44 |
data = data.sample(frac=1).reset_index(drop=True)
|
45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
# Train - test - dev
|
47 |
train, test = train_test_split(data, test_size=0.2)
|
48 |
test, dev = train_test_split(test, test_size=0.5)
|
@@ -50,21 +58,32 @@ test, dev = train_test_split(test, test_size=0.5)
|
|
50 |
# Write the datasets to disk
|
51 |
test.to_csv('tsv_all_target/test.tsv', index=False, header=False, sep='\t')
|
52 |
dev.to_csv('tsv_all_target/dev.tsv', index=False, header=False, sep='\t')
|
53 |
-
|
54 |
|
55 |
# Add the language detection to the training dataset
|
56 |
filelist = glob.glob('langid/langid_datafiles/*train.tsv')
|
57 |
|
58 |
-
print(len(train))
|
59 |
-
|
60 |
for tsvfile in filelist:
|
61 |
tmp = pd.read_csv(tsvfile, sep='\t')
|
62 |
tmp.columns=['source','target']
|
63 |
|
64 |
train = pd.concat([train,tmp])
|
65 |
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
train.to_csv('tsv_all_target/train.tsv', index=False, header=False, sep='\t')
|
70 |
|
|
|
43 |
|
44 |
data = data.sample(frac=1).reset_index(drop=True)
|
45 |
|
46 |
+
#Clean before splitting
|
47 |
+
data['source'] = data['source'].str.replace('\t',' ')
|
48 |
+
data['source'] = data['source'].str.replace('\n',' ')
|
49 |
+
data['target'] = data['target'].str.replace('\t',' ')
|
50 |
+
data['target'] = data['target'].str.replace('\n',' ')
|
51 |
+
|
52 |
+
|
53 |
+
|
54 |
# Train - test - dev
|
55 |
train, test = train_test_split(data, test_size=0.2)
|
56 |
test, dev = train_test_split(test, test_size=0.5)
|
|
|
58 |
# Write the datasets to disk
|
59 |
test.to_csv('tsv_all_target/test.tsv', index=False, header=False, sep='\t')
|
60 |
dev.to_csv('tsv_all_target/dev.tsv', index=False, header=False, sep='\t')
|
61 |
+
print("Finished writint dev and test. Now we are adding some extra language detection and English translation")
|
62 |
|
63 |
# Add the language detection to the training dataset
|
64 |
filelist = glob.glob('langid/langid_datafiles/*train.tsv')
|
65 |
|
|
|
|
|
66 |
for tsvfile in filelist:
|
67 |
tmp = pd.read_csv(tsvfile, sep='\t')
|
68 |
tmp.columns=['source','target']
|
69 |
|
70 |
train = pd.concat([train,tmp])
|
71 |
|
72 |
+
|
73 |
+
# Add extra English training set - Currently we do not add this to the test/dev-collection
|
74 |
+
tmp = pd.read_csv('eng_nob/train.tsv', sep='\t')
|
75 |
+
tmp.columns=['source','target']
|
76 |
+
train = pd.concat([train,tmp])
|
77 |
+
|
78 |
+
#Final shuffle
|
79 |
+
train = train.sample(frac=1).reset_index(drop=True)
|
80 |
+
|
81 |
+
#Clean before splitting
|
82 |
+
train['source'] = train['source'].str.replace('\t',' ')
|
83 |
+
train['source'] = train['source'].str.replace('\n',' ')
|
84 |
+
train['target'] = train['target'].str.replace('\t',' ')
|
85 |
+
train['target'] = train['target'].str.replace('\n',' ')
|
86 |
+
|
87 |
|
88 |
train.to_csv('tsv_all_target/train.tsv', index=False, header=False, sep='\t')
|
89 |
|
eng_nob/create_english_train.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import xmltodict
|
3 |
+
from sklearn.model_selection import train_test_split
|
4 |
+
import glob
|
5 |
+
import sys
|
6 |
+
import os
|
7 |
+
|
8 |
+
filelist = glob.glob('*.jsonl')
|
9 |
+
|
10 |
+
for jsonfile in filelist:
|
11 |
+
data = pd.DataFrame([['source','target']])
|
12 |
+
temp = pd.DataFrame()
|
13 |
+
|
14 |
+
print(f"Processing {jsonfile}")
|
15 |
+
temp = pd.read_json(jsonfile, lines=True,encoding='utf8')
|
16 |
+
errors = 0
|
17 |
+
for index, row in temp.iterrows():
|
18 |
+
try:
|
19 |
+
engnob = ['nob: '+str(row['en']),str(row['no'])]
|
20 |
+
data.loc[len(data)] = engnob
|
21 |
+
nobeng = ['eng: '+str(row['no']),str(row['en'])]
|
22 |
+
data.loc[len(data)] = nobeng
|
23 |
+
|
24 |
+
except:
|
25 |
+
errors += 1
|
26 |
+
print("Unable to convert this line")
|
27 |
+
print(row)
|
28 |
+
try:
|
29 |
+
data['source'] = data['source'].str.replace('\t',' ')
|
30 |
+
data['target'] = data['target'].str.replace('\t',' ')
|
31 |
+
except:
|
32 |
+
errors += 1
|
33 |
+
print("Key error")
|
34 |
+
|
35 |
+
data = data.sample(frac=1).reset_index(drop=True)
|
36 |
+
|
37 |
+
filename = jsonfile.replace(".jsonl",".tsv")
|
38 |
+
|
39 |
+
# Write the datasets to disk
|
40 |
+
data.to_csv(filename, index=False, header=False, sep='\t')
|
41 |
+
|
42 |
+
print("Finished")
|
eng_nob/dev.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7825999c9ad6a7a7e77515783b7cf6a1b43795194f6864058ca1f9cd7be0fea8
|
3 |
+
size 3197577
|
eng_nob/dev.tsv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a3b9df30f2a0e69273055c8708d4ef042389d212a81ed5c03fda066a71f6057d
|
3 |
+
size 4963224
|
eng_nob/test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:442a5ec7b689293feea897cff4c56590d63c137f650b7a88416412e6e42713b2
|
3 |
+
size 3206230
|
eng_nob/test.tsv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fb25a3d288398c2c8b4f5eab2d6ca0ff0fa6edfd57986f4ee75d0d182b69decd
|
3 |
+
size 4983910
|
eng_nob/train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:434f255a6b48b46b2f6124df3a10eb4cfccd74a2405aeb2ce36998d4644da597
|
3 |
+
size 25675727
|
eng_nob/train.tsv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:78be2aeb8740785607219544dc2eb6182ebaa9fc6d985f734eff66e123b53eb8
|
3 |
+
size 39894522
|
tsv_all_target/check.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tsv_all_target/dev.tsv
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:25c38a038bd776b625a3b70588a0262f02880c29800a0ffc883b3ee1b26aa30d
|
3 |
+
size 29188226
|
tsv_all_target/dev_small.tsv
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c68ec7597310bcaf1f9fa7c6a2863d36775e75f6f9c434e3fdebc8d2e2aa2553
|
3 |
+
size 2022316
|
tsv_all_target/test.tsv
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e6f6a9bf9fe10ec302d1b4d00e0a3c0914606e0111928f5697ee387a10dcbc67
|
3 |
+
size 29178110
|
tsv_all_target/test_small.tsv
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dd36bdc1a55f3c44cadfbdde2c3b2992282f69613c3b46e63ea6b3264d9185a8
|
3 |
+
size 2037392
|
tsv_all_target/train.tsv
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:86e2efaec9f70719518ef21139247cbc3b3d4a3cb63a63498da7e13c44995c51
|
3 |
+
size 284512806
|