File size: 999 Bytes
cf5c013 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
#!/bin/bash
set -m
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitaa_splitad
echo "above deepspeed$"
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
#TOKENIZER=facebook/nllb-200-distilled-600M
#TOKENIZER_TYPE=HuggingFaceTokenizer
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
TOKENIZER=$TOKENIZER_PATH/all.model
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
TOKENIZER_TYPE=SentencePieceTokenizer
mkdir -p $FINAL_DIR/tokenizer113/
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
echo "inside deepspeed"
pwd
python3 tools/preprocess_data.py \
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitaa_splitad \
--output-prefix $FINAL_DIR/tokenizer113/ \
--tokenizer-model $TOKENIZER \
--vocab-file $VOCAB_FILE \
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
# --merge-file $MERGES_FILE \
|