applied-ai-018 commited on
Commit
e781159
·
verified ·
1 Parent(s): f500ba4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_0.sh +25 -0
  2. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_114.sh +25 -0
  3. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_127.sh +25 -0
  4. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_129.sh +25 -0
  5. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_131.sh +25 -0
  6. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_143.sh +25 -0
  7. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_146.sh +25 -0
  8. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_158.sh +25 -0
  9. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_162.sh +25 -0
  10. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_165.sh +25 -0
  11. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_166.sh +25 -0
  12. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_177.sh +25 -0
  13. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_180.sh +25 -0
  14. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_184.sh +25 -0
  15. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_195.sh +25 -0
  16. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_206.sh +25 -0
  17. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_22.sh +25 -0
  18. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_227.sh +25 -0
  19. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_232.sh +25 -0
  20. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_238.sh +25 -0
  21. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_243.sh +25 -0
  22. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_25.sh +25 -0
  23. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_258.sh +25 -0
  24. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_268.sh +25 -0
  25. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_270.sh +25 -0
  26. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_29.sh +25 -0
  27. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_330.sh +25 -0
  28. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_331.sh +25 -0
  29. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_334.sh +25 -0
  30. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_347.sh +25 -0
  31. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_348.sh +25 -0
  32. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_355.sh +25 -0
  33. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_359.sh +25 -0
  34. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_406.sh +25 -0
  35. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_415.sh +25 -0
  36. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_416.sh +25 -0
  37. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_418.sh +25 -0
  38. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_421.sh +25 -0
  39. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_424.sh +25 -0
  40. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_435.sh +25 -0
  41. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_437.sh +25 -0
  42. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_439.sh +25 -0
  43. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_440.sh +25 -0
  44. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_495.sh +25 -0
  45. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_511.sh +25 -0
  46. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_514.sh +25 -0
  47. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_517.sh +25 -0
  48. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_522.sh +25 -0
  49. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_523.sh +25 -0
  50. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_529.sh +25 -0
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_0.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitaa_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer0/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitaa_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer0/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_114.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitaa_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer114/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitaa_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer114/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_127.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitab_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer127/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitab_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer127/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_129.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitab_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer129/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitab_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer129/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_131.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitac_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer131/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitac_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer131/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_143.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitae_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer143/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitae_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer143/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_146.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer146/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer146/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_158.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitae_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer158/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitae_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer158/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_162.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitae_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer162/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitae_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer162/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_165.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitab_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer165/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitab_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer165/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_166.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitab_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer166/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitab_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer166/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_177.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitad_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer177/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitad_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer177/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_180.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitae_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer180/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitae_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer180/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_184.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitae_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer184/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitae_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer184/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_195.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitab_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer195/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitab_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer195/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_206.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitae_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer206/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitae_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer206/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_22.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitaa_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer22/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitaa_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer22/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_227.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitae_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer227/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitae_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer227/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_232.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaa_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer232/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaa_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer232/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_238.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitac_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer238/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitac_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer238/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_243.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitad_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer243/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitad_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer243/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_25.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitab_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer25/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitab_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer25/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_258.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitac_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer258/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitac_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer258/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_268.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitae_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer268/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitae_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer268/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_270.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer270/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer270/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_29.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitac_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer29/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitac_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer29/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_330.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitae_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer330/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitae_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer330/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_331.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitae_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer331/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitae_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer331/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_334.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitae_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer334/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitae_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer334/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_347.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitac_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer347/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitac_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer347/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_348.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitac_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer348/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitac_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer348/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_355.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitad_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer355/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitad_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer355/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_359.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitaa_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer359/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitaa_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer359/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_406.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer406/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer406/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_415.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitae_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer415/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitae_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer415/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_416.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitae_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer416/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitae_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer416/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_418.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitae_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer418/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitae_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer418/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_421.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitab_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer421/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitab_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer421/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_424.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitab_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer424/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitab_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer424/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_435.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitad_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer435/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitad_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer435/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_437.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitae_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer437/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitae_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer437/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_439.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitae_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer439/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitae_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer439/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_440.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitae_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer440/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitae_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer440/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_495.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer495/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer495/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_511.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitab_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer511/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitab_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer511/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_514.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitac_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer514/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitac_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer514/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_517.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitac_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer517/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitac_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer517/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_522.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitad_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer522/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitad_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer522/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_523.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitad_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer523/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitad_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer523/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_529.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitaf
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer529/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitaf \
18
+ --output-prefix $FINAL_DIR/tokenizer529/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+