File size: 1,332 Bytes
d96cd57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
#!/bin/bash
set -m

echo "above deepspeed"
TOKENIZER_SPLITS_PATH=/mnt/weka/peacock/idc/hineng/dravid/ #Directory path to tokenizers
INPUT_MERGE_FILES=$TOKENIZER_SPLITS_PATH/ #regex for tokenizer
#INPUT_MERGE_FILES=$TOKENIZER_SPLITS_PATH/merged_final
#TOKENIZER_SPLITS_PATH=/mnt/weka/peacock/idc/hineng/hn_eng_bn/tok_en_bn_hi
MERGE_PATH=$TOKENIZER_SPLITS_PATH/merged_all_33/ # Directory path for merging all
mkdir -p $MERGE_PATH
echo $INPUT_MERGE_FILES

TMP_COMBINED=/mnt/weka/peacock/idc/combined_all_33/
rm -rf $TMP_COMBINED
mkdir -p $TMP_COMBINED #all bin,idx in single directory

files=( tokenizer33 tokenizer34 tokenizer35 tokenizer36 tokenizer37 tokenizer38 tokenizer39 tokenizer40 tokenizer41 tokenizer42 tokenizer43 tokenizer44 tokenizer45 tokenizer46 tokenizer47 tokenizer48)
echo "$files";
for i in "${!files[@]}";do
	filename=`ls $INPUT_MERGE_FILES/${files[i]}/*.bin  | rev |  cut -d '/' -f 2 | rev` # Change the final value dependeing on dir path
	echo $filename;
	cp $INPUT_MERGE_FILES/${files[i]}/*.bin $TMP_COMBINED/${filename}.bin
	cp $INPUT_MERGE_FILES/${files[i]}/*.idx $TMP_COMBINED/${filename}.idx
done;
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
echo "inside deepspeed"
pwd
python3 tools/merge_datasets.py \
 --input $TMP_COMBINED \
 --output-prefix $MERGE_PATH/all #suffix of the generated file