set -m | |
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitac_splitad | |
echo "above deepspeed$" | |
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ | |
#TOKENIZER=facebook/nllb-200-distilled-600M | |
#TOKENIZER_TYPE=HuggingFaceTokenizer | |
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k | |
TOKENIZER=$TOKENIZER_PATH/all.model | |
VOCAB_FILE=$TOKENIZER_PATH/all.vocab | |
TOKENIZER_TYPE=SentencePieceTokenizer | |
mkdir -p $FINAL_DIR/tokenizer119/ | |
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed | |
echo "inside deepspeed" | |
pwd | |
python3 tools/preprocess_data.py \ | |
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitac_splitad \ | |
--output-prefix $FINAL_DIR/tokenizer119/ \ | |
--tokenizer-model $TOKENIZER \ | |
--vocab-file $VOCAB_FILE \ | |
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ | |
--append-eod --workers 8 # --partitions 16 #--chunk-size 50 | |
# --merge-file $MERGES_FILE \ | |