applied-ai-018 commited on
Commit
cf5c013
·
verified ·
1 Parent(s): fb1e9a9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_104.sh +25 -0
  2. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_112.sh +25 -0
  3. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_113.sh +25 -0
  4. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_134.sh +25 -0
  5. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_14.sh +25 -0
  6. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_140.sh +25 -0
  7. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_149.sh +25 -0
  8. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_15.sh +25 -0
  9. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_153.sh +25 -0
  10. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_16.sh +25 -0
  11. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_163.sh +25 -0
  12. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_174.sh +25 -0
  13. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_179.sh +25 -0
  14. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_189.sh +25 -0
  15. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_209.sh +25 -0
  16. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_213.sh +25 -0
  17. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_223.sh +25 -0
  18. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_263.sh +25 -0
  19. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_273.sh +25 -0
  20. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_280.sh +25 -0
  21. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_283.sh +25 -0
  22. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_286.sh +25 -0
  23. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_287.sh +25 -0
  24. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_290.sh +25 -0
  25. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_294.sh +25 -0
  26. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_316.sh +25 -0
  27. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_326.sh +25 -0
  28. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_337.sh +25 -0
  29. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_338.sh +25 -0
  30. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_366.sh +25 -0
  31. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_37.sh +25 -0
  32. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_389.sh +25 -0
  33. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_403.sh +25 -0
  34. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_42.sh +25 -0
  35. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_426.sh +25 -0
  36. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_431.sh +25 -0
  37. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_436.sh +25 -0
  38. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_441.sh +25 -0
  39. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_485.sh +25 -0
  40. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_49.sh +25 -0
  41. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_500.sh +25 -0
  42. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_504.sh +25 -0
  43. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_505.sh +25 -0
  44. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_507.sh +25 -0
  45. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_510.sh +25 -0
  46. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_527.sh +25 -0
  47. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_55.sh +25 -0
  48. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_551.sh +25 -0
  49. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_56.sh +25 -0
  50. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_580.sh +25 -0
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_104.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitae_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer104/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitae_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer104/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_112.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitaa_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer112/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitaa_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer112/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_113.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitaa_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer113/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitaa_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer113/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_134.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitac_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer134/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitac_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer134/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_14.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitac_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer14/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitac_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer14/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_140.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitae_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer140/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitae_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer140/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_149.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitab_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer149/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitab_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer149/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_15.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer15/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer15/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_153.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitac_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer153/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitac_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer153/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_16.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer16/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer16/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_163.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitaf
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer163/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitaf \
18
+ --output-prefix $FINAL_DIR/tokenizer163/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_174.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitac_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer174/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitac_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer174/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_179.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitad_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer179/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitad_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer179/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_189.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitaa_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer189/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitaa_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer189/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_209.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitaa_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer209/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitaa_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer209/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_213.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer213/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer213/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_223.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitad_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer223/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitad_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer223/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_263.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitad_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer263/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitad_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer263/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_273.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitab_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer273/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitab_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer273/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_280.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitac_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer280/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitac_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer280/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_283.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitad_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer283/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitad_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer283/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_286.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer286/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer286/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_287.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitaf
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer287/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitaf \
18
+ --output-prefix $FINAL_DIR/tokenizer287/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_290.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitaa_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer290/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitaa_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer290/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_294.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitab_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer294/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitab_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer294/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_316.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitab_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer316/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitab_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer316/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_326.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitad_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer326/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitad_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer326/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_337.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitaa_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer337/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitaa_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer337/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_338.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitaa_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer338/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitaa_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer338/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_366.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitab_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer366/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitab_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer366/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_37.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitae_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer37/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitae_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer37/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_389.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitab_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer389/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitab_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer389/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_403.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitae_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer403/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitae_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer403/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_42.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitaa_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer42/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitaa_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer42/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_426.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitac_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer426/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitac_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer426/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_431.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitad_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer431/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitad_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer431/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_436.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitae_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer436/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitae_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer436/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_441.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitaf
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer441/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitaf \
18
+ --output-prefix $FINAL_DIR/tokenizer441/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_485.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitad_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer485/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitad_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer485/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_49.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitab_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer49/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitab_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer49/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_500.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitac_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer500/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitac_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer500/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_504.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitae_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer504/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitae_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer504/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_505.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitae_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer505/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitae_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer505/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_507.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitaf
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer507/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitaf \
18
+ --output-prefix $FINAL_DIR/tokenizer507/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_510.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitab_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer510/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitab_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer510/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_527.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitae_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer527/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitae_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer527/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_55.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer55/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer55/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_551.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitaf
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer551/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitaf \
18
+ --output-prefix $FINAL_DIR/tokenizer551/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_56.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer56/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer56/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_580.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitac_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer580/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitac_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer580/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+