applied-ai-018 commited on
Commit
68b2ea9
·
verified ·
1 Parent(s): 3ce08c3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. english_only/tok_english_only/shuffled_files.txt +0 -0
  2. english_only/tok_english_only/tok_files_check/tokenizer_0.sh +25 -0
  3. english_only/tok_english_only/tok_files_check/tokenizer_1.sh +25 -0
  4. english_only/tok_english_only/tok_files_check/tokenizer_10.sh +25 -0
  5. english_only/tok_english_only/tok_files_check/tokenizer_11.sh +25 -0
  6. english_only/tok_english_only/tok_files_check/tokenizer_12.sh +25 -0
  7. english_only/tok_english_only/tok_files_check/tokenizer_13.sh +25 -0
  8. english_only/tok_english_only/tok_files_check/tokenizer_14.sh +25 -0
  9. english_only/tok_english_only/tok_files_check/tokenizer_15.sh +25 -0
  10. english_only/tok_english_only/tok_files_check/tokenizer_16.sh +25 -0
  11. english_only/tok_english_only/tok_files_check/tokenizer_17.sh +25 -0
  12. english_only/tok_english_only/tok_files_check/tokenizer_18.sh +25 -0
  13. english_only/tok_english_only/tok_files_check/tokenizer_19.sh +25 -0
  14. english_only/tok_english_only/tok_files_check/tokenizer_2.sh +25 -0
  15. english_only/tok_english_only/tok_files_check/tokenizer_20.sh +25 -0
  16. english_only/tok_english_only/tok_files_check/tokenizer_21.sh +25 -0
  17. english_only/tok_english_only/tok_files_check/tokenizer_22.sh +25 -0
  18. english_only/tok_english_only/tok_files_check/tokenizer_23.sh +25 -0
  19. english_only/tok_english_only/tok_files_check/tokenizer_24.sh +25 -0
  20. english_only/tok_english_only/tok_files_check/tokenizer_25.sh +25 -0
  21. english_only/tok_english_only/tok_files_check/tokenizer_26.sh +25 -0
  22. english_only/tok_english_only/tok_files_check/tokenizer_27.sh +25 -0
  23. english_only/tok_english_only/tok_files_check/tokenizer_28.sh +25 -0
  24. english_only/tok_english_only/tok_files_check/tokenizer_29.sh +25 -0
  25. english_only/tok_english_only/tok_files_check/tokenizer_3.sh +25 -0
  26. english_only/tok_english_only/tok_files_check/tokenizer_30.sh +25 -0
  27. english_only/tok_english_only/tok_files_check/tokenizer_31.sh +25 -0
  28. english_only/tok_english_only/tok_files_check/tokenizer_32.sh +25 -0
  29. english_only/tok_english_only/tok_files_check/tokenizer_33.sh +25 -0
  30. english_only/tok_english_only/tok_files_check/tokenizer_34.sh +25 -0
  31. english_only/tok_english_only/tok_files_check/tokenizer_35.sh +25 -0
  32. english_only/tok_english_only/tok_files_check/tokenizer_36.sh +25 -0
  33. english_only/tok_english_only/tok_files_check/tokenizer_37.sh +25 -0
  34. english_only/tok_english_only/tok_files_check/tokenizer_38.sh +25 -0
  35. english_only/tok_english_only/tok_files_check/tokenizer_39.sh +25 -0
  36. english_only/tok_english_only/tok_files_check/tokenizer_4.sh +25 -0
  37. english_only/tok_english_only/tok_files_check/tokenizer_5.sh +25 -0
  38. english_only/tok_english_only/tok_files_check/tokenizer_6.sh +25 -0
  39. english_only/tok_english_only/tok_files_check/tokenizer_7.sh +25 -0
  40. english_only/tok_english_only/tok_files_check/tokenizer_8.sh +25 -0
  41. english_only/tok_english_only/tok_files_check/tokenizer_9.sh +25 -0
  42. english_only/tok_english_only/unshuffled.txt +0 -0
  43. hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_800.yaml +104 -0
  44. hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_801.yaml +104 -0
  45. hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_802.yaml +104 -0
  46. hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_803.yaml +104 -0
  47. hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_805.yaml +104 -0
  48. hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_806.yaml +104 -0
  49. hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_807.yaml +104 -0
  50. hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_808.yaml +104 -0
english_only/tok_english_only/shuffled_files.txt ADDED
The diff for this file is too large to render. See raw diff
 
english_only/tok_english_only/tok_files_check/tokenizer_0.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer0/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalaa \
18
+ --output-prefix $FINAL_DIR/tokenizer0/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_1.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer1/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalab \
18
+ --output-prefix $FINAL_DIR/tokenizer1/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_10.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalak
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer10/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalak \
18
+ --output-prefix $FINAL_DIR/tokenizer10/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_11.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalal
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer11/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalal \
18
+ --output-prefix $FINAL_DIR/tokenizer11/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_12.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalam
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer12/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalam \
18
+ --output-prefix $FINAL_DIR/tokenizer12/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_13.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalan
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer13/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalan \
18
+ --output-prefix $FINAL_DIR/tokenizer13/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_14.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalao
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer14/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalao \
18
+ --output-prefix $FINAL_DIR/tokenizer14/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_15.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalap
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer15/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalap \
18
+ --output-prefix $FINAL_DIR/tokenizer15/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_16.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalaq
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer16/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalaq \
18
+ --output-prefix $FINAL_DIR/tokenizer16/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_17.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalar
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer17/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalar \
18
+ --output-prefix $FINAL_DIR/tokenizer17/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_18.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalas
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer18/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalas \
18
+ --output-prefix $FINAL_DIR/tokenizer18/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_19.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalat
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer19/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalat \
18
+ --output-prefix $FINAL_DIR/tokenizer19/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_2.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalac
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer2/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalac \
18
+ --output-prefix $FINAL_DIR/tokenizer2/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_20.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalau
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer20/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalau \
18
+ --output-prefix $FINAL_DIR/tokenizer20/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_21.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalav
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer21/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalav \
18
+ --output-prefix $FINAL_DIR/tokenizer21/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_22.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalaw
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer22/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalaw \
18
+ --output-prefix $FINAL_DIR/tokenizer22/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_23.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalax
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer23/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalax \
18
+ --output-prefix $FINAL_DIR/tokenizer23/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_24.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalay
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer24/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalay \
18
+ --output-prefix $FINAL_DIR/tokenizer24/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_25.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalaz
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer25/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalaz \
18
+ --output-prefix $FINAL_DIR/tokenizer25/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_26.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalba
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer26/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalba \
18
+ --output-prefix $FINAL_DIR/tokenizer26/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_27.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbb
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer27/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbb \
18
+ --output-prefix $FINAL_DIR/tokenizer27/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_28.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbc
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer28/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbc \
18
+ --output-prefix $FINAL_DIR/tokenizer28/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_29.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbd
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer29/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbd \
18
+ --output-prefix $FINAL_DIR/tokenizer29/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_3.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalad
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer3/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalad \
18
+ --output-prefix $FINAL_DIR/tokenizer3/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_30.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbe
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer30/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbe \
18
+ --output-prefix $FINAL_DIR/tokenizer30/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_31.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbf
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer31/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbf \
18
+ --output-prefix $FINAL_DIR/tokenizer31/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_32.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbg
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer32/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbg \
18
+ --output-prefix $FINAL_DIR/tokenizer32/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_33.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbh
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer33/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbh \
18
+ --output-prefix $FINAL_DIR/tokenizer33/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_34.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbi
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer34/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbi \
18
+ --output-prefix $FINAL_DIR/tokenizer34/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_35.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbj
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer35/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbj \
18
+ --output-prefix $FINAL_DIR/tokenizer35/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_36.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbk
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer36/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbk \
18
+ --output-prefix $FINAL_DIR/tokenizer36/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_37.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbl
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer37/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbl \
18
+ --output-prefix $FINAL_DIR/tokenizer37/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_38.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbm
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer38/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbm \
18
+ --output-prefix $FINAL_DIR/tokenizer38/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_39.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbn
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer39/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalbn \
18
+ --output-prefix $FINAL_DIR/tokenizer39/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_4.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalae
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer4/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalae \
18
+ --output-prefix $FINAL_DIR/tokenizer4/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_5.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalaf
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer5/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalaf \
18
+ --output-prefix $FINAL_DIR/tokenizer5/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_6.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalag
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer6/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalag \
18
+ --output-prefix $FINAL_DIR/tokenizer6/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_7.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalah
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer7/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalah \
18
+ --output-prefix $FINAL_DIR/tokenizer7/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_8.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalai
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer8/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalai \
18
+ --output-prefix $FINAL_DIR/tokenizer8/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/tok_files_check/tokenizer_9.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalaj
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/english_only_check/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/encode_32k/
9
+ TOKENIZER=$TOKENIZER_PATH/en_code_32kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/en_code_32kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer9/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/english_only//tok_english_only/split//finalaj \
18
+ --output-prefix $FINAL_DIR/tokenizer9/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
english_only/tok_english_only/unshuffled.txt ADDED
The diff for this file is too large to render. See raw diff
 
hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_800.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-en-bn-hn-800
5
+ namespace: peacock
6
+ labels:
7
+ app: peacock-tokenizer-en-bn-hn-800
8
+ spec:
9
+ slotsPerWorker: 8
10
+ runPolicy:
11
+ backoffLimit: 1
12
+ cleanPodPolicy: Running
13
+ mpiReplicaSpecs:
14
+ Launcher:
15
+ replicas: 1
16
+ template:
17
+ spec:
18
+ hostIPC: true
19
+ volumes:
20
+ - name: work-dir
21
+ persistentVolumeClaim:
22
+ claimName: peacock-fs-pvc
23
+ containers:
24
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
25
+ name: peacock-tokenizer-en-bn-hn-800-launcher
26
+ imagePullPolicy: IfNotPresent
27
+ volumeMounts:
28
+ - name: work-dir
29
+ mountPath: /mnt/weka/peacock
30
+ command: ["/bin/bash", "-c"]
31
+ args:
32
+ - >-
33
+
34
+ export SYNAPSE_VERSION="1.15.1";
35
+ export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
36
+
37
+ export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
38
+ export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_800.sh
39
+ HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
40
+ echo "HOSTSFILE=$HOSTSFILE";
41
+ MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
42
+ NUM_NODES=$(wc -l < $HOSTSFILE);
43
+ CARDS_PER_NODE=8;
44
+ N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
45
+ echo "MPI_ROOT=$MPI_ROOT";
46
+ echo "N_CARDS=$N_CARDS";
47
+ echo "MASTER_ADDR=$MASTER_ADDR";
48
+ sleep 20;
49
+
50
+
51
+ mpirun -np $N_CARDS -npernode 8 \
52
+ --tag-output \
53
+ --allow-run-as-root \
54
+ --prefix $MPI_ROOT \
55
+ -x WORKER_DIR=$WORKER_DIR \
56
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
57
+
58
+ mpirun -np $N_CARDS -npernode 8 \
59
+ --tag-output \
60
+ --allow-run-as-root \
61
+ --prefix $MPI_ROOT \
62
+ -x WORKER_DIR=$WORKER_DIR \
63
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
64
+
65
+
66
+ Worker:
67
+ replicas: 1
68
+ template:
69
+ spec:
70
+ volumes:
71
+ - name: work-dir
72
+ persistentVolumeClaim:
73
+ claimName: peacock-fs-pvc
74
+ tolerations:
75
+ - key: "habana.ai/gaudi"
76
+ operator: "Exists"
77
+ effect: "NoSchedule"
78
+ - key: "k8s/namespace"
79
+ operator: "Equal"
80
+ value: "peacock"
81
+ effect: "NoSchedule"
82
+ hostIPC: true
83
+ containers:
84
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
85
+ name: peacock-llama-worker
86
+ imagePullPolicy: IfNotPresent
87
+ securityContext:
88
+ capabilities:
89
+ add:
90
+ - SYSLOG
91
+ resources:
92
+ limits:
93
+ habana.ai/gaudi: 8
94
+ hugepages-2Mi: 300Gi
95
+ memory: 700Gi
96
+ cpu: 150
97
+ requests:
98
+ habana.ai/gaudi: 8
99
+ hugepages-2Mi: 300Gi
100
+ memory: 700Gi
101
+ cpu: 150
102
+ volumeMounts:
103
+ - name: work-dir
104
+ mountPath: /mnt/weka/peacock
hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_801.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-en-bn-hn-801
5
+ namespace: peacock
6
+ labels:
7
+ app: peacock-tokenizer-en-bn-hn-801
8
+ spec:
9
+ slotsPerWorker: 8
10
+ runPolicy:
11
+ backoffLimit: 1
12
+ cleanPodPolicy: Running
13
+ mpiReplicaSpecs:
14
+ Launcher:
15
+ replicas: 1
16
+ template:
17
+ spec:
18
+ hostIPC: true
19
+ volumes:
20
+ - name: work-dir
21
+ persistentVolumeClaim:
22
+ claimName: peacock-fs-pvc
23
+ containers:
24
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
25
+ name: peacock-tokenizer-en-bn-hn-801-launcher
26
+ imagePullPolicy: IfNotPresent
27
+ volumeMounts:
28
+ - name: work-dir
29
+ mountPath: /mnt/weka/peacock
30
+ command: ["/bin/bash", "-c"]
31
+ args:
32
+ - >-
33
+
34
+ export SYNAPSE_VERSION="1.15.1";
35
+ export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
36
+
37
+ export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
38
+ export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_801.sh
39
+ HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
40
+ echo "HOSTSFILE=$HOSTSFILE";
41
+ MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
42
+ NUM_NODES=$(wc -l < $HOSTSFILE);
43
+ CARDS_PER_NODE=8;
44
+ N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
45
+ echo "MPI_ROOT=$MPI_ROOT";
46
+ echo "N_CARDS=$N_CARDS";
47
+ echo "MASTER_ADDR=$MASTER_ADDR";
48
+ sleep 20;
49
+
50
+
51
+ mpirun -np $N_CARDS -npernode 8 \
52
+ --tag-output \
53
+ --allow-run-as-root \
54
+ --prefix $MPI_ROOT \
55
+ -x WORKER_DIR=$WORKER_DIR \
56
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
57
+
58
+ mpirun -np $N_CARDS -npernode 8 \
59
+ --tag-output \
60
+ --allow-run-as-root \
61
+ --prefix $MPI_ROOT \
62
+ -x WORKER_DIR=$WORKER_DIR \
63
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
64
+
65
+
66
+ Worker:
67
+ replicas: 1
68
+ template:
69
+ spec:
70
+ volumes:
71
+ - name: work-dir
72
+ persistentVolumeClaim:
73
+ claimName: peacock-fs-pvc
74
+ tolerations:
75
+ - key: "habana.ai/gaudi"
76
+ operator: "Exists"
77
+ effect: "NoSchedule"
78
+ - key: "k8s/namespace"
79
+ operator: "Equal"
80
+ value: "peacock"
81
+ effect: "NoSchedule"
82
+ hostIPC: true
83
+ containers:
84
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
85
+ name: peacock-llama-worker
86
+ imagePullPolicy: IfNotPresent
87
+ securityContext:
88
+ capabilities:
89
+ add:
90
+ - SYSLOG
91
+ resources:
92
+ limits:
93
+ habana.ai/gaudi: 8
94
+ hugepages-2Mi: 300Gi
95
+ memory: 700Gi
96
+ cpu: 150
97
+ requests:
98
+ habana.ai/gaudi: 8
99
+ hugepages-2Mi: 300Gi
100
+ memory: 700Gi
101
+ cpu: 150
102
+ volumeMounts:
103
+ - name: work-dir
104
+ mountPath: /mnt/weka/peacock
hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_802.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-en-bn-hn-802
5
+ namespace: peacock
6
+ labels:
7
+ app: peacock-tokenizer-en-bn-hn-802
8
+ spec:
9
+ slotsPerWorker: 8
10
+ runPolicy:
11
+ backoffLimit: 1
12
+ cleanPodPolicy: Running
13
+ mpiReplicaSpecs:
14
+ Launcher:
15
+ replicas: 1
16
+ template:
17
+ spec:
18
+ hostIPC: true
19
+ volumes:
20
+ - name: work-dir
21
+ persistentVolumeClaim:
22
+ claimName: peacock-fs-pvc
23
+ containers:
24
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
25
+ name: peacock-tokenizer-en-bn-hn-802-launcher
26
+ imagePullPolicy: IfNotPresent
27
+ volumeMounts:
28
+ - name: work-dir
29
+ mountPath: /mnt/weka/peacock
30
+ command: ["/bin/bash", "-c"]
31
+ args:
32
+ - >-
33
+
34
+ export SYNAPSE_VERSION="1.15.1";
35
+ export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
36
+
37
+ export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
38
+ export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_802.sh
39
+ HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
40
+ echo "HOSTSFILE=$HOSTSFILE";
41
+ MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
42
+ NUM_NODES=$(wc -l < $HOSTSFILE);
43
+ CARDS_PER_NODE=8;
44
+ N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
45
+ echo "MPI_ROOT=$MPI_ROOT";
46
+ echo "N_CARDS=$N_CARDS";
47
+ echo "MASTER_ADDR=$MASTER_ADDR";
48
+ sleep 20;
49
+
50
+
51
+ mpirun -np $N_CARDS -npernode 8 \
52
+ --tag-output \
53
+ --allow-run-as-root \
54
+ --prefix $MPI_ROOT \
55
+ -x WORKER_DIR=$WORKER_DIR \
56
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
57
+
58
+ mpirun -np $N_CARDS -npernode 8 \
59
+ --tag-output \
60
+ --allow-run-as-root \
61
+ --prefix $MPI_ROOT \
62
+ -x WORKER_DIR=$WORKER_DIR \
63
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
64
+
65
+
66
+ Worker:
67
+ replicas: 1
68
+ template:
69
+ spec:
70
+ volumes:
71
+ - name: work-dir
72
+ persistentVolumeClaim:
73
+ claimName: peacock-fs-pvc
74
+ tolerations:
75
+ - key: "habana.ai/gaudi"
76
+ operator: "Exists"
77
+ effect: "NoSchedule"
78
+ - key: "k8s/namespace"
79
+ operator: "Equal"
80
+ value: "peacock"
81
+ effect: "NoSchedule"
82
+ hostIPC: true
83
+ containers:
84
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
85
+ name: peacock-llama-worker
86
+ imagePullPolicy: IfNotPresent
87
+ securityContext:
88
+ capabilities:
89
+ add:
90
+ - SYSLOG
91
+ resources:
92
+ limits:
93
+ habana.ai/gaudi: 8
94
+ hugepages-2Mi: 300Gi
95
+ memory: 700Gi
96
+ cpu: 150
97
+ requests:
98
+ habana.ai/gaudi: 8
99
+ hugepages-2Mi: 300Gi
100
+ memory: 700Gi
101
+ cpu: 150
102
+ volumeMounts:
103
+ - name: work-dir
104
+ mountPath: /mnt/weka/peacock
hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_803.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-en-bn-hn-803
5
+ namespace: peacock
6
+ labels:
7
+ app: peacock-tokenizer-en-bn-hn-803
8
+ spec:
9
+ slotsPerWorker: 8
10
+ runPolicy:
11
+ backoffLimit: 1
12
+ cleanPodPolicy: Running
13
+ mpiReplicaSpecs:
14
+ Launcher:
15
+ replicas: 1
16
+ template:
17
+ spec:
18
+ hostIPC: true
19
+ volumes:
20
+ - name: work-dir
21
+ persistentVolumeClaim:
22
+ claimName: peacock-fs-pvc
23
+ containers:
24
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
25
+ name: peacock-tokenizer-en-bn-hn-803-launcher
26
+ imagePullPolicy: IfNotPresent
27
+ volumeMounts:
28
+ - name: work-dir
29
+ mountPath: /mnt/weka/peacock
30
+ command: ["/bin/bash", "-c"]
31
+ args:
32
+ - >-
33
+
34
+ export SYNAPSE_VERSION="1.15.1";
35
+ export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
36
+
37
+ export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
38
+ export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_803.sh
39
+ HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
40
+ echo "HOSTSFILE=$HOSTSFILE";
41
+ MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
42
+ NUM_NODES=$(wc -l < $HOSTSFILE);
43
+ CARDS_PER_NODE=8;
44
+ N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
45
+ echo "MPI_ROOT=$MPI_ROOT";
46
+ echo "N_CARDS=$N_CARDS";
47
+ echo "MASTER_ADDR=$MASTER_ADDR";
48
+ sleep 20;
49
+
50
+
51
+ mpirun -np $N_CARDS -npernode 8 \
52
+ --tag-output \
53
+ --allow-run-as-root \
54
+ --prefix $MPI_ROOT \
55
+ -x WORKER_DIR=$WORKER_DIR \
56
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
57
+
58
+ mpirun -np $N_CARDS -npernode 8 \
59
+ --tag-output \
60
+ --allow-run-as-root \
61
+ --prefix $MPI_ROOT \
62
+ -x WORKER_DIR=$WORKER_DIR \
63
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
64
+
65
+
66
+ Worker:
67
+ replicas: 1
68
+ template:
69
+ spec:
70
+ volumes:
71
+ - name: work-dir
72
+ persistentVolumeClaim:
73
+ claimName: peacock-fs-pvc
74
+ tolerations:
75
+ - key: "habana.ai/gaudi"
76
+ operator: "Exists"
77
+ effect: "NoSchedule"
78
+ - key: "k8s/namespace"
79
+ operator: "Equal"
80
+ value: "peacock"
81
+ effect: "NoSchedule"
82
+ hostIPC: true
83
+ containers:
84
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
85
+ name: peacock-llama-worker
86
+ imagePullPolicy: IfNotPresent
87
+ securityContext:
88
+ capabilities:
89
+ add:
90
+ - SYSLOG
91
+ resources:
92
+ limits:
93
+ habana.ai/gaudi: 8
94
+ hugepages-2Mi: 300Gi
95
+ memory: 700Gi
96
+ cpu: 150
97
+ requests:
98
+ habana.ai/gaudi: 8
99
+ hugepages-2Mi: 300Gi
100
+ memory: 700Gi
101
+ cpu: 150
102
+ volumeMounts:
103
+ - name: work-dir
104
+ mountPath: /mnt/weka/peacock
hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_805.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-en-bn-hn-805
5
+ namespace: peacock
6
+ labels:
7
+ app: peacock-tokenizer-en-bn-hn-805
8
+ spec:
9
+ slotsPerWorker: 8
10
+ runPolicy:
11
+ backoffLimit: 1
12
+ cleanPodPolicy: Running
13
+ mpiReplicaSpecs:
14
+ Launcher:
15
+ replicas: 1
16
+ template:
17
+ spec:
18
+ hostIPC: true
19
+ volumes:
20
+ - name: work-dir
21
+ persistentVolumeClaim:
22
+ claimName: peacock-fs-pvc
23
+ containers:
24
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
25
+ name: peacock-tokenizer-en-bn-hn-805-launcher
26
+ imagePullPolicy: IfNotPresent
27
+ volumeMounts:
28
+ - name: work-dir
29
+ mountPath: /mnt/weka/peacock
30
+ command: ["/bin/bash", "-c"]
31
+ args:
32
+ - >-
33
+
34
+ export SYNAPSE_VERSION="1.15.1";
35
+ export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
36
+
37
+ export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
38
+ export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_805.sh
39
+ HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
40
+ echo "HOSTSFILE=$HOSTSFILE";
41
+ MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
42
+ NUM_NODES=$(wc -l < $HOSTSFILE);
43
+ CARDS_PER_NODE=8;
44
+ N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
45
+ echo "MPI_ROOT=$MPI_ROOT";
46
+ echo "N_CARDS=$N_CARDS";
47
+ echo "MASTER_ADDR=$MASTER_ADDR";
48
+ sleep 20;
49
+
50
+
51
+ mpirun -np $N_CARDS -npernode 8 \
52
+ --tag-output \
53
+ --allow-run-as-root \
54
+ --prefix $MPI_ROOT \
55
+ -x WORKER_DIR=$WORKER_DIR \
56
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
57
+
58
+ mpirun -np $N_CARDS -npernode 8 \
59
+ --tag-output \
60
+ --allow-run-as-root \
61
+ --prefix $MPI_ROOT \
62
+ -x WORKER_DIR=$WORKER_DIR \
63
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
64
+
65
+
66
+ Worker:
67
+ replicas: 1
68
+ template:
69
+ spec:
70
+ volumes:
71
+ - name: work-dir
72
+ persistentVolumeClaim:
73
+ claimName: peacock-fs-pvc
74
+ tolerations:
75
+ - key: "habana.ai/gaudi"
76
+ operator: "Exists"
77
+ effect: "NoSchedule"
78
+ - key: "k8s/namespace"
79
+ operator: "Equal"
80
+ value: "peacock"
81
+ effect: "NoSchedule"
82
+ hostIPC: true
83
+ containers:
84
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
85
+ name: peacock-llama-worker
86
+ imagePullPolicy: IfNotPresent
87
+ securityContext:
88
+ capabilities:
89
+ add:
90
+ - SYSLOG
91
+ resources:
92
+ limits:
93
+ habana.ai/gaudi: 8
94
+ hugepages-2Mi: 300Gi
95
+ memory: 700Gi
96
+ cpu: 150
97
+ requests:
98
+ habana.ai/gaudi: 8
99
+ hugepages-2Mi: 300Gi
100
+ memory: 700Gi
101
+ cpu: 150
102
+ volumeMounts:
103
+ - name: work-dir
104
+ mountPath: /mnt/weka/peacock
hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_806.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-en-bn-hn-806
5
+ namespace: peacock
6
+ labels:
7
+ app: peacock-tokenizer-en-bn-hn-806
8
+ spec:
9
+ slotsPerWorker: 8
10
+ runPolicy:
11
+ backoffLimit: 1
12
+ cleanPodPolicy: Running
13
+ mpiReplicaSpecs:
14
+ Launcher:
15
+ replicas: 1
16
+ template:
17
+ spec:
18
+ hostIPC: true
19
+ volumes:
20
+ - name: work-dir
21
+ persistentVolumeClaim:
22
+ claimName: peacock-fs-pvc
23
+ containers:
24
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
25
+ name: peacock-tokenizer-en-bn-hn-806-launcher
26
+ imagePullPolicy: IfNotPresent
27
+ volumeMounts:
28
+ - name: work-dir
29
+ mountPath: /mnt/weka/peacock
30
+ command: ["/bin/bash", "-c"]
31
+ args:
32
+ - >-
33
+
34
+ export SYNAPSE_VERSION="1.15.1";
35
+ export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
36
+
37
+ export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
38
+ export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_806.sh
39
+ HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
40
+ echo "HOSTSFILE=$HOSTSFILE";
41
+ MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
42
+ NUM_NODES=$(wc -l < $HOSTSFILE);
43
+ CARDS_PER_NODE=8;
44
+ N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
45
+ echo "MPI_ROOT=$MPI_ROOT";
46
+ echo "N_CARDS=$N_CARDS";
47
+ echo "MASTER_ADDR=$MASTER_ADDR";
48
+ sleep 20;
49
+
50
+
51
+ mpirun -np $N_CARDS -npernode 8 \
52
+ --tag-output \
53
+ --allow-run-as-root \
54
+ --prefix $MPI_ROOT \
55
+ -x WORKER_DIR=$WORKER_DIR \
56
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
57
+
58
+ mpirun -np $N_CARDS -npernode 8 \
59
+ --tag-output \
60
+ --allow-run-as-root \
61
+ --prefix $MPI_ROOT \
62
+ -x WORKER_DIR=$WORKER_DIR \
63
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
64
+
65
+
66
+ Worker:
67
+ replicas: 1
68
+ template:
69
+ spec:
70
+ volumes:
71
+ - name: work-dir
72
+ persistentVolumeClaim:
73
+ claimName: peacock-fs-pvc
74
+ tolerations:
75
+ - key: "habana.ai/gaudi"
76
+ operator: "Exists"
77
+ effect: "NoSchedule"
78
+ - key: "k8s/namespace"
79
+ operator: "Equal"
80
+ value: "peacock"
81
+ effect: "NoSchedule"
82
+ hostIPC: true
83
+ containers:
84
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
85
+ name: peacock-llama-worker
86
+ imagePullPolicy: IfNotPresent
87
+ securityContext:
88
+ capabilities:
89
+ add:
90
+ - SYSLOG
91
+ resources:
92
+ limits:
93
+ habana.ai/gaudi: 8
94
+ hugepages-2Mi: 300Gi
95
+ memory: 700Gi
96
+ cpu: 150
97
+ requests:
98
+ habana.ai/gaudi: 8
99
+ hugepages-2Mi: 300Gi
100
+ memory: 700Gi
101
+ cpu: 150
102
+ volumeMounts:
103
+ - name: work-dir
104
+ mountPath: /mnt/weka/peacock
hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_807.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-en-bn-hn-807
5
+ namespace: peacock
6
+ labels:
7
+ app: peacock-tokenizer-en-bn-hn-807
8
+ spec:
9
+ slotsPerWorker: 8
10
+ runPolicy:
11
+ backoffLimit: 1
12
+ cleanPodPolicy: Running
13
+ mpiReplicaSpecs:
14
+ Launcher:
15
+ replicas: 1
16
+ template:
17
+ spec:
18
+ hostIPC: true
19
+ volumes:
20
+ - name: work-dir
21
+ persistentVolumeClaim:
22
+ claimName: peacock-fs-pvc
23
+ containers:
24
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
25
+ name: peacock-tokenizer-en-bn-hn-807-launcher
26
+ imagePullPolicy: IfNotPresent
27
+ volumeMounts:
28
+ - name: work-dir
29
+ mountPath: /mnt/weka/peacock
30
+ command: ["/bin/bash", "-c"]
31
+ args:
32
+ - >-
33
+
34
+ export SYNAPSE_VERSION="1.15.1";
35
+ export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
36
+
37
+ export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
38
+ export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_807.sh
39
+ HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
40
+ echo "HOSTSFILE=$HOSTSFILE";
41
+ MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
42
+ NUM_NODES=$(wc -l < $HOSTSFILE);
43
+ CARDS_PER_NODE=8;
44
+ N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
45
+ echo "MPI_ROOT=$MPI_ROOT";
46
+ echo "N_CARDS=$N_CARDS";
47
+ echo "MASTER_ADDR=$MASTER_ADDR";
48
+ sleep 20;
49
+
50
+
51
+ mpirun -np $N_CARDS -npernode 8 \
52
+ --tag-output \
53
+ --allow-run-as-root \
54
+ --prefix $MPI_ROOT \
55
+ -x WORKER_DIR=$WORKER_DIR \
56
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
57
+
58
+ mpirun -np $N_CARDS -npernode 8 \
59
+ --tag-output \
60
+ --allow-run-as-root \
61
+ --prefix $MPI_ROOT \
62
+ -x WORKER_DIR=$WORKER_DIR \
63
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
64
+
65
+
66
+ Worker:
67
+ replicas: 1
68
+ template:
69
+ spec:
70
+ volumes:
71
+ - name: work-dir
72
+ persistentVolumeClaim:
73
+ claimName: peacock-fs-pvc
74
+ tolerations:
75
+ - key: "habana.ai/gaudi"
76
+ operator: "Exists"
77
+ effect: "NoSchedule"
78
+ - key: "k8s/namespace"
79
+ operator: "Equal"
80
+ value: "peacock"
81
+ effect: "NoSchedule"
82
+ hostIPC: true
83
+ containers:
84
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
85
+ name: peacock-llama-worker
86
+ imagePullPolicy: IfNotPresent
87
+ securityContext:
88
+ capabilities:
89
+ add:
90
+ - SYSLOG
91
+ resources:
92
+ limits:
93
+ habana.ai/gaudi: 8
94
+ hugepages-2Mi: 300Gi
95
+ memory: 700Gi
96
+ cpu: 150
97
+ requests:
98
+ habana.ai/gaudi: 8
99
+ hugepages-2Mi: 300Gi
100
+ memory: 700Gi
101
+ cpu: 150
102
+ volumeMounts:
103
+ - name: work-dir
104
+ mountPath: /mnt/weka/peacock
hn_eng_bn/tok_en-bn-hn/tok_jobs_9/tokenizer_808.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-en-bn-hn-808
5
+ namespace: peacock
6
+ labels:
7
+ app: peacock-tokenizer-en-bn-hn-808
8
+ spec:
9
+ slotsPerWorker: 8
10
+ runPolicy:
11
+ backoffLimit: 1
12
+ cleanPodPolicy: Running
13
+ mpiReplicaSpecs:
14
+ Launcher:
15
+ replicas: 1
16
+ template:
17
+ spec:
18
+ hostIPC: true
19
+ volumes:
20
+ - name: work-dir
21
+ persistentVolumeClaim:
22
+ claimName: peacock-fs-pvc
23
+ containers:
24
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
25
+ name: peacock-tokenizer-en-bn-hn-808-launcher
26
+ imagePullPolicy: IfNotPresent
27
+ volumeMounts:
28
+ - name: work-dir
29
+ mountPath: /mnt/weka/peacock
30
+ command: ["/bin/bash", "-c"]
31
+ args:
32
+ - >-
33
+
34
+ export SYNAPSE_VERSION="1.15.1";
35
+ export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
36
+
37
+ export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
38
+ export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_808.sh
39
+ HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
40
+ echo "HOSTSFILE=$HOSTSFILE";
41
+ MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
42
+ NUM_NODES=$(wc -l < $HOSTSFILE);
43
+ CARDS_PER_NODE=8;
44
+ N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
45
+ echo "MPI_ROOT=$MPI_ROOT";
46
+ echo "N_CARDS=$N_CARDS";
47
+ echo "MASTER_ADDR=$MASTER_ADDR";
48
+ sleep 20;
49
+
50
+
51
+ mpirun -np $N_CARDS -npernode 8 \
52
+ --tag-output \
53
+ --allow-run-as-root \
54
+ --prefix $MPI_ROOT \
55
+ -x WORKER_DIR=$WORKER_DIR \
56
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
57
+
58
+ mpirun -np $N_CARDS -npernode 8 \
59
+ --tag-output \
60
+ --allow-run-as-root \
61
+ --prefix $MPI_ROOT \
62
+ -x WORKER_DIR=$WORKER_DIR \
63
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
64
+
65
+
66
+ Worker:
67
+ replicas: 1
68
+ template:
69
+ spec:
70
+ volumes:
71
+ - name: work-dir
72
+ persistentVolumeClaim:
73
+ claimName: peacock-fs-pvc
74
+ tolerations:
75
+ - key: "habana.ai/gaudi"
76
+ operator: "Exists"
77
+ effect: "NoSchedule"
78
+ - key: "k8s/namespace"
79
+ operator: "Equal"
80
+ value: "peacock"
81
+ effect: "NoSchedule"
82
+ hostIPC: true
83
+ containers:
84
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
85
+ name: peacock-llama-worker
86
+ imagePullPolicy: IfNotPresent
87
+ securityContext:
88
+ capabilities:
89
+ add:
90
+ - SYSLOG
91
+ resources:
92
+ limits:
93
+ habana.ai/gaudi: 8
94
+ hugepages-2Mi: 300Gi
95
+ memory: 700Gi
96
+ cpu: 150
97
+ requests:
98
+ habana.ai/gaudi: 8
99
+ hugepages-2Mi: 300Gi
100
+ memory: 700Gi
101
+ cpu: 150
102
+ volumeMounts:
103
+ - name: work-dir
104
+ mountPath: /mnt/weka/peacock