File size: 995 Bytes
9d507cb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
#!/bin/bash
set -m
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitaa_splitab
echo "above deepspeed$"
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
#TOKENIZER=facebook/nllb-200-distilled-600M
#TOKENIZER_TYPE=HuggingFaceTokenizer
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
TOKENIZER=$TOKENIZER_PATH/all.model
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
TOKENIZER_TYPE=SentencePieceTokenizer
mkdir -p $FINAL_DIR/tokenizer1/
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
echo "inside deepspeed"
pwd
python3 tools/preprocess_data.py \
 --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitaa_splitab \
 --output-prefix $FINAL_DIR/tokenizer1/ \
  --tokenizer-model $TOKENIZER \
  --vocab-file $VOCAB_FILE  \
    --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
    --append-eod --workers 8 # --partitions 16 #--chunk-size 50   

  # --merge-file $MERGES_FILE \