pere commited on
Commit
381bc18
·
1 Parent(s): df907da

first 3 stammer training

Browse files
.create_training_files.py.swp ADDED
Binary file (12.3 kB). View file
 
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.webp filter=lfs diff=lfs merge=lfs -text
54
  *.tsv filter=lfs diff=lfs merge=lfs -text
55
  *.tmx filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.webp filter=lfs diff=lfs merge=lfs -text
54
  *.tsv filter=lfs diff=lfs merge=lfs -text
55
  *.tmx filter=lfs diff=lfs merge=lfs -text
56
+ *.jsonl filter=lfs diff=lfs merge=lfs -text
correct/correct_datafiles/correct_dev.tsv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fec0f2575d5925b993aefdb9715ace66f3a4948258c8cfa1600fd9b264fd8963
3
- size 2320597
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53a4aa6c8acac14492a898359d250ef543091757e0a3b3e5dd5faab817a87a08
3
+ size 2417746
correct/correct_datafiles/correct_test.tsv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5e7595989509c671e3bc088129a080ab4292ba472ef21bc8c92d470ebc4f1196
3
- size 2339422
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f049a0f878985fa1efd50151a4089b45d3dd10a23e45f161fd084c2714354911
3
+ size 2417751
correct/correct_datafiles/correct_train.tsv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d42cbf75385b1918f7dd9fa479e65519ecbe9bfcf1996132c4236ea695cd9fdf
3
- size 18565347
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47357d479c1a62b85c131d5f1cbdb01b963cd2022ffe568725a61cf92a815280
3
+ size 19408389
correct/create_correct_dataset.py CHANGED
@@ -119,6 +119,8 @@ def main(args):
119
 
120
  print(f"Removing a random number of commas 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
121
  data = data.sample(frac=1).reset_index(drop=True)
 
 
122
 
123
  # Train - test - dev
124
  train, test = train_test_split(data, test_size=0.2)
 
119
 
120
  print(f"Removing a random number of commas 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
121
  data = data.sample(frac=1).reset_index(drop=True)
122
+ data.loc[:,'source'] = "correct: "+data['source']
123
+
124
 
125
  # Train - test - dev
126
  train, test = train_test_split(data, test_size=0.2)
create_training_files.py CHANGED
@@ -43,6 +43,14 @@ data['source'] = data['source'].str[4:]
43
 
44
  data = data.sample(frac=1).reset_index(drop=True)
45
 
 
 
 
 
 
 
 
 
46
  # Train - test - dev
47
  train, test = train_test_split(data, test_size=0.2)
48
  test, dev = train_test_split(test, test_size=0.5)
@@ -50,21 +58,32 @@ test, dev = train_test_split(test, test_size=0.5)
50
  # Write the datasets to disk
51
  test.to_csv('tsv_all_target/test.tsv', index=False, header=False, sep='\t')
52
  dev.to_csv('tsv_all_target/dev.tsv', index=False, header=False, sep='\t')
53
-
54
 
55
  # Add the language detection to the training dataset
56
  filelist = glob.glob('langid/langid_datafiles/*train.tsv')
57
 
58
- print(len(train))
59
-
60
  for tsvfile in filelist:
61
  tmp = pd.read_csv(tsvfile, sep='\t')
62
  tmp.columns=['source','target']
63
 
64
  train = pd.concat([train,tmp])
65
 
66
- data = data.sample(frac=1).reset_index(drop=True)
67
- print(len(train))
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  train.to_csv('tsv_all_target/train.tsv', index=False, header=False, sep='\t')
70
 
 
43
 
44
  data = data.sample(frac=1).reset_index(drop=True)
45
 
46
+ #Clean before splitting
47
+ data['source'] = data['source'].str.replace('\t',' ')
48
+ data['source'] = data['source'].str.replace('\n',' ')
49
+ data['target'] = data['target'].str.replace('\t',' ')
50
+ data['target'] = data['target'].str.replace('\n',' ')
51
+
52
+
53
+
54
  # Train - test - dev
55
  train, test = train_test_split(data, test_size=0.2)
56
  test, dev = train_test_split(test, test_size=0.5)
 
58
  # Write the datasets to disk
59
  test.to_csv('tsv_all_target/test.tsv', index=False, header=False, sep='\t')
60
  dev.to_csv('tsv_all_target/dev.tsv', index=False, header=False, sep='\t')
61
+ print("Finished writint dev and test. Now we are adding some extra language detection and English translation")
62
 
63
  # Add the language detection to the training dataset
64
  filelist = glob.glob('langid/langid_datafiles/*train.tsv')
65
 
 
 
66
  for tsvfile in filelist:
67
  tmp = pd.read_csv(tsvfile, sep='\t')
68
  tmp.columns=['source','target']
69
 
70
  train = pd.concat([train,tmp])
71
 
72
+
73
+ # Add extra English training set - Currently we do not add this to the test/dev-collection
74
+ tmp = pd.read_csv('eng_nob/train.tsv', sep='\t')
75
+ tmp.columns=['source','target']
76
+ train = pd.concat([train,tmp])
77
+
78
+ #Final shuffle
79
+ train = train.sample(frac=1).reset_index(drop=True)
80
+
81
+ #Clean before splitting
82
+ train['source'] = train['source'].str.replace('\t',' ')
83
+ train['source'] = train['source'].str.replace('\n',' ')
84
+ train['target'] = train['target'].str.replace('\t',' ')
85
+ train['target'] = train['target'].str.replace('\n',' ')
86
+
87
 
88
  train.to_csv('tsv_all_target/train.tsv', index=False, header=False, sep='\t')
89
 
eng_nob/create_english_train.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import xmltodict
3
+ from sklearn.model_selection import train_test_split
4
+ import glob
5
+ import sys
6
+ import os
7
+
8
+ filelist = glob.glob('*.jsonl')
9
+
10
+ for jsonfile in filelist:
11
+ data = pd.DataFrame([['source','target']])
12
+ temp = pd.DataFrame()
13
+
14
+ print(f"Processing {jsonfile}")
15
+ temp = pd.read_json(jsonfile, lines=True,encoding='utf8')
16
+ errors = 0
17
+ for index, row in temp.iterrows():
18
+ try:
19
+ engnob = ['nob: '+str(row['en']),str(row['no'])]
20
+ data.loc[len(data)] = engnob
21
+ nobeng = ['eng: '+str(row['no']),str(row['en'])]
22
+ data.loc[len(data)] = nobeng
23
+
24
+ except:
25
+ errors += 1
26
+ print("Unable to convert this line")
27
+ print(row)
28
+ try:
29
+ data['source'] = data['source'].str.replace('\t',' ')
30
+ data['target'] = data['target'].str.replace('\t',' ')
31
+ except:
32
+ errors += 1
33
+ print("Key error")
34
+
35
+ data = data.sample(frac=1).reset_index(drop=True)
36
+
37
+ filename = jsonfile.replace(".jsonl",".tsv")
38
+
39
+ # Write the datasets to disk
40
+ data.to_csv(filename, index=False, header=False, sep='\t')
41
+
42
+ print("Finished")
eng_nob/dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7825999c9ad6a7a7e77515783b7cf6a1b43795194f6864058ca1f9cd7be0fea8
3
+ size 3197577
eng_nob/dev.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3b9df30f2a0e69273055c8708d4ef042389d212a81ed5c03fda066a71f6057d
3
+ size 4963224
eng_nob/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:442a5ec7b689293feea897cff4c56590d63c137f650b7a88416412e6e42713b2
3
+ size 3206230
eng_nob/test.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb25a3d288398c2c8b4f5eab2d6ca0ff0fa6edfd57986f4ee75d0d182b69decd
3
+ size 4983910
eng_nob/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:434f255a6b48b46b2f6124df3a10eb4cfccd74a2405aeb2ce36998d4644da597
3
+ size 25675727
eng_nob/train.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78be2aeb8740785607219544dc2eb6182ebaa9fc6d985f734eff66e123b53eb8
3
+ size 39894522
tsv_all_target/check.txt ADDED
The diff for this file is too large to render. See raw diff
 
tsv_all_target/dev.tsv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6033fef40a251fa84c0bd2371dc137c671ca3c97d255765b0a0903ecd69cd0fb
3
- size 29236881
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25c38a038bd776b625a3b70588a0262f02880c29800a0ffc883b3ee1b26aa30d
3
+ size 29188226
tsv_all_target/dev_small.tsv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b8ecd9a7c55562eea70663bb7ed62639ed29cb88f5231136e690e72670f0cf8
3
- size 2011164
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c68ec7597310bcaf1f9fa7c6a2863d36775e75f6f9c434e3fdebc8d2e2aa2553
3
+ size 2022316
tsv_all_target/test.tsv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:433d5360a91971fc3ee2cc4627e0233603d37e30bf33d2aec6dcbfc58072c608
3
- size 29096305
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6f6a9bf9fe10ec302d1b4d00e0a3c0914606e0111928f5697ee387a10dcbc67
3
+ size 29178110
tsv_all_target/test_small.tsv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6963609d621487dfab571e6bbda2beac49f5960f8f8cc46a9e39e88b0db8941a
3
- size 2046850
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd36bdc1a55f3c44cadfbdde2c3b2992282f69613c3b46e63ea6b3264d9185a8
3
+ size 2037392
tsv_all_target/train.tsv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:301034a5c29ca96a946b999c9bb83a3e8f2e371f063ff62c0104e534094236c3
3
- size 244651793
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86e2efaec9f70719518ef21139247cbc3b3d4a3cb63a63498da7e13c44995c51
3
+ size 284512806