applied-ai-018's picture
Add files using upload-large-folder tool
d96cd57 verified
#!/bin/bash
set -m
echo "above deepspeed"
TOKENIZER_SPLITS_PATH=/mnt/weka/peacock/idc/hineng/dravid/ #Directory path to tokenizers
INPUT_MERGE_FILES=$TOKENIZER_SPLITS_PATH/ #regex for tokenizer
#INPUT_MERGE_FILES=$TOKENIZER_SPLITS_PATH/merged_final
#TOKENIZER_SPLITS_PATH=/mnt/weka/peacock/idc/hineng/hn_eng_bn/tok_en_bn_hi
MERGE_PATH=$TOKENIZER_SPLITS_PATH/merged_all_113/ # Directory path for merging all
mkdir -p $MERGE_PATH
echo $INPUT_MERGE_FILES
TMP_COMBINED=/mnt/weka/peacock/idc/combined_all_113/
rm -rf $TMP_COMBINED
mkdir -p $TMP_COMBINED #all bin,idx in single directory
files=( tokenizer113 tokenizer114 tokenizer115 tokenizer116 tokenizer117 tokenizer118 tokenizer119 tokenizer120 tokenizer121 tokenizer122 tokenizer123 tokenizer124 tokenizer125 tokenizer126 tokenizer127 tokenizer128)
echo "$files";
for i in "${!files[@]}";do
filename=`ls $INPUT_MERGE_FILES/${files[i]}/*.bin | rev | cut -d '/' -f 2 | rev` # Change the final value dependeing on dir path
echo $filename;
cp $INPUT_MERGE_FILES/${files[i]}/*.bin $TMP_COMBINED/${filename}.bin
cp $INPUT_MERGE_FILES/${files[i]}/*.idx $TMP_COMBINED/${filename}.idx
done;
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
echo "inside deepspeed"
pwd
python3 tools/merge_datasets.py \
--input $TMP_COMBINED \
--output-prefix $MERGE_PATH/all #suffix of the generated file