applied-ai-018 commited on
Commit
b0f8aa8
·
verified ·
1 Parent(s): d89f3e1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_101.sh +25 -0
  2. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_12.sh +25 -0
  3. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_128.sh +25 -0
  4. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_137.sh +25 -0
  5. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_147.sh +25 -0
  6. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_151.sh +25 -0
  7. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_154.sh +25 -0
  8. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_172.sh +25 -0
  9. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_178.sh +25 -0
  10. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_181.sh +25 -0
  11. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_196.sh +25 -0
  12. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_199.sh +25 -0
  13. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_21.sh +25 -0
  14. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_23.sh +25 -0
  15. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_234.sh +25 -0
  16. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_249.sh +25 -0
  17. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_251.sh +25 -0
  18. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_255.sh +25 -0
  19. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_259.sh +25 -0
  20. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_264.sh +25 -0
  21. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_277.sh +25 -0
  22. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_301.sh +25 -0
  23. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_305.sh +25 -0
  24. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_310.sh +25 -0
  25. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_324.sh +25 -0
  26. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_325.sh +25 -0
  27. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_33.sh +25 -0
  28. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_332.sh +25 -0
  29. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_343.sh +25 -0
  30. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_344.sh +25 -0
  31. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_354.sh +25 -0
  32. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_363.sh +25 -0
  33. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_372.sh +25 -0
  34. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_376.sh +25 -0
  35. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_381.sh +25 -0
  36. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_393.sh +25 -0
  37. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_398.sh +25 -0
  38. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_408.sh +25 -0
  39. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_409.sh +25 -0
  40. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_410.sh +25 -0
  41. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_450.sh +25 -0
  42. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_464.sh +25 -0
  43. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_469.sh +25 -0
  44. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_480.sh +25 -0
  45. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_494.sh +25 -0
  46. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_509.sh +25 -0
  47. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_520.sh +25 -0
  48. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_521.sh +25 -0
  49. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_548.sh +25 -0
  50. hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_568.sh +25 -0
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_101.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitad_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer101/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitad_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer101/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_12.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitac_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer12/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitac_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer12/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_128.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitab_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer128/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitab_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer128/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_137.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitad_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer137/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitad_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer137/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_147.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitab_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer147/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitab_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer147/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_151.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitab_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer151/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitab_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer151/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_154.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitac_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer154/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitac_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer154/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_172.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitac_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer172/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitac_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer172/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_178.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitad_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer178/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitad_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer178/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_181.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitae_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer181/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitae_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer181/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_196.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitac_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer196/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitac_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer196/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_199.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitac_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer199/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitac_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer199/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_21.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitaa_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer21/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitaa_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer21/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_23.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitab_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer23/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitab_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer23/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_234.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaa_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer234/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaa_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer234/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_249.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitab_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer249/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitab_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer249/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_251.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitab_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer251/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitab_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer251/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_255.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitac_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer255/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitac_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer255/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_259.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitad_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer259/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitad_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer259/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_264.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitae_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer264/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitae_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer264/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_277.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitac_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer277/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitac_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer277/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_301.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitad_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer301/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitad_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer301/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_305.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitae_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer305/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitae_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer305/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_310.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitaa_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer310/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitaa_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer310/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_324.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitac_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer324/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitac_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer324/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_325.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitad_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer325/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitad_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer325/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_33.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer33/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer33/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_332.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitae_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer332/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitae_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer332/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_343.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitab_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer343/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitab_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer343/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_344.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitab_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer344/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitab_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer344/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_354.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitad_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer354/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitad_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer354/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_363.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitab_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer363/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitab_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer363/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_372.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitac_splitae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer372/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitac_splitae \
18
+ --output-prefix $FINAL_DIR/tokenizer372/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_376.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitad_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer376/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitad_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer376/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_381.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitaa_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer381/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitaa_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer381/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_393.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitac_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer393/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitac_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer393/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_398.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitad_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer398/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitad_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer398/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_408.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer408/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer408/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_409.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitad_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer409/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitad_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer409/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_410.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitad_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer410/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitad_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer410/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_450.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitab_splitad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer450/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitab_splitad \
18
+ --output-prefix $FINAL_DIR/tokenizer450/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_464.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitae_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer464/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitae_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer464/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_469.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitaa_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer469/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitaa_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer469/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_480.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitac_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer480/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitac_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer480/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_494.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer494/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer494/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_509.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitab_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer509/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitab_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer509/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_520.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitad_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer520/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitad_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer520/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_521.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitad_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer521/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitad_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer521/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_548.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitae_splitac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer548/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitae_splitac \
18
+ --output-prefix $FINAL_DIR/tokenizer548/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_568.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitad_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer568/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitad_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer568/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+