#!/bin/bash set -m sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcd_splitaa echo "above deepspeed$" FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ #TOKENIZER=facebook/nllb-200-distilled-600M #TOKENIZER_TYPE=HuggingFaceTokenizer TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab TOKENIZER_TYPE=SentencePieceTokenizer mkdir -p $FINAL_DIR/tokenizer110/ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed echo "inside deepspeed" pwd python3 tools/preprocess_data.py \ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcd_splitaa \ --output-prefix $FINAL_DIR/tokenizer110/ \ --tokenizer-model $TOKENIZER \ --vocab-file $VOCAB_FILE \ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ --append-eod --workers 8 # --partitions 16 #--chunk-size 50 # --merge-file $MERGES_FILE \