diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_711.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_711.sh
new file mode 100644
index 0000000000000000000000000000000000000000..4451858b422bb8657c5c76bc2a2f19561cb50a4d
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_711.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+set -m
+sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitab_splitae
+echo "above deepspeed$"
+FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
+#TOKENIZER=facebook/nllb-200-distilled-600M
+#TOKENIZER_TYPE=HuggingFaceTokenizer
+TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
+TOKENIZER=$TOKENIZER_PATH/all.model
+VOCAB_FILE=$TOKENIZER_PATH/all.vocab
+TOKENIZER_TYPE=SentencePieceTokenizer
+mkdir -p $FINAL_DIR/tokenizer711/
+cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
+echo "inside deepspeed"
+pwd
+python3 tools/preprocess_data.py \
+ --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitab_splitae \
+ --output-prefix $FINAL_DIR/tokenizer711/ \
+  --tokenizer-model $TOKENIZER \
+  --vocab-file $VOCAB_FILE  \
+    --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
+    --append-eod --workers 8 # --partitions 16 #--chunk-size 50   
+
+  # --merge-file $MERGES_FILE \
+
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_100.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_100.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..72cbac8217e7a66f616bc03bd86b0047015a7168
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_100.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-100
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-100
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-100-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_100.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_101.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_101.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b98e7ea31dc0be5297ee2d4db6798b5b8494c22e
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_101.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-101
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-101
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-101-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_101.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_102.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_102.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ce966386eaf172266e5ddf537d8b3a771f608fe5
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_102.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-102
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-102
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-102-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_102.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_103.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_103.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b02ba1aa8c7db31843dffa59cf32f0f889ffbbff
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_103.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-103
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-103
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-103-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_103.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_104.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_104.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..af733000b8c462362943f269cbb0843919d08727
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_104.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-104
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-104
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-104-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_104.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_106.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_106.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..420931f4197e1df38203517a7780571837908c14
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_106.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-106
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-106
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-106-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_106.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_107.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_107.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5dc0bc8f2c453895eba9bb739204d4c6b5d431f6
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_107.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-107
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-107
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-107-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_107.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_108.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_108.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..717dc9f884ec9367a61b611bf294792949f00680
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_108.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-108
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-108
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-108-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_108.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_110.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_110.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..34862dc26f22d21d928b80988c0aeb454f4f5b8e
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_110.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-110
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-110
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-110-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_110.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_113.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_113.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..adaa683000959f81e7b86dbd3c104d6beab592aa
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_113.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-113
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-113
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-113-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_113.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_114.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_114.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2ba662b8159fa06a090e5e1fa0a6bbac2ac2bfa3
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_114.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-114
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-114
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-114-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_114.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_115.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_115.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dd6791fd96a4d27c760e5a9aff06fffaa804018a
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_115.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-115
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-115
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-115-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_115.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_116.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_116.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..50cf4261f6d42e35e97c6caa9e8271f60889c614
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_116.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-116
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-116
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-116-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_116.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_117.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_117.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ac41297fac6db01908438fd615870845a983836f
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_117.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-117
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-117
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-117-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_117.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_118.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_118.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5e4461efadc6189cf44987a7b016b352c25ff120
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_118.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-118
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-118
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-118-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_118.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_120.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_120.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5fbb29e4220d884af98d697fe787b7ed9ab29839
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_120.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-120
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-120
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-120-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_120.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_122.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_122.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..85fcacc3ec177a05131c011b8e8ee7c3e1f93764
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_122.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-122
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-122
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-122-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_122.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_124.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_124.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..93eb1f62809fc56bec18876bc3be4ca2c6f0f422
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_124.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-124
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-124
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-124-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_124.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_125.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_125.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2de688a2f003c9adc7f2017c39dd77ef23187aa4
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_125.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-125
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-125
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-125-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_125.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_128.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_128.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..18a00ac5de2951dd14a0d23720828dcce1e354d2
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_128.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-128
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-128
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-128-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_128.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_129.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_129.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bbb5be658a49af8f6a83e128bd4b68e9c6962641
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_129.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-129
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-129
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-129-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_129.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_130.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_130.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9b49f509da63aa85ee3d4f988997c14dd571145b
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_130.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-130
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-130
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-130-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_130.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_131.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_131.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0e4355b2f9b4ccd8c416da66dcda01593f8a7781
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_131.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-131
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-131
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-131-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_131.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_132.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_132.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..674c2a8cf0e61a564ae25c58895d9e4e0bdf0e53
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_132.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-132
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-132
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-132-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_132.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_133.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_133.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7db62ef7e848a4ad8f249c1fcd866a2247379839
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_133.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-133
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-133
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-133-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_133.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_134.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_134.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ddb79847a862f0a428e9c02701ac44d867871fec
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_134.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-134
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-134
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-134-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_134.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_135.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_135.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1ad11850dc6fde6ca1e8a90675d4a0701f1152d0
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_135.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-135
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-135
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-135-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_135.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_136.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_136.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a16ef892f53ffcde6fb7b9f0942e3e30afc0e0ae
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_136.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-136
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-136
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-136-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_136.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_137.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_137.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7ae54794b13aa9929998bac98417a7ff635273c2
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_137.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-137
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-137
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-137-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_137.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_138.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_138.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..462a9eea50d664611b6dd14c6063bef9a496d51b
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_138.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-138
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-138
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-138-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_138.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_139.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_139.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b20ec18322261c04b324d5c24d9446b46f6508fd
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_139.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-139
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-139
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-139-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_139.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_141.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_141.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bca693691816386d02e133db917706b9e2f70918
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_141.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-141
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-141
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-141-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_141.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_142.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_142.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b6563b2491dd1a971c95eb2650b976078e676754
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_142.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-142
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-142
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-142-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_142.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_143.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_143.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..928c2eb8651130702d8eda7ea9cd4b445e169655
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_143.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-143
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-143
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-143-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_143.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_145.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_145.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9d8a1d1337290ac86e61cc30cc1217d656dd4201
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_145.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-145
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-145
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-145-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_145.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_147.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_147.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7f5e80c3cb007e5cf1b7ae0815465ca30c22044a
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_147.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-147
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-147
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-147-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_147.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_148.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_148.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ded5faae3d0fb3aa3a9b63a5f48124b32454d0a8
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_148.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-148
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-148
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-148-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_148.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_149.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_149.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a49c7dd0cb6e151037a34a17527a462d00611893
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_149.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-149
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-149
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-149-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_149.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_150.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_150.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d7183ae0965caef0c4c36f7bedabcd7e24ca0a8a
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_150.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-150
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-150
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-150-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_150.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_151.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_151.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..43e643508e22b2209101732901b40648c5254a34
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_151.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-151
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-151
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-151-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_151.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_152.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_152.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..af11134f02daf85a7933a87177ab0398cc5defb7
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_152.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-152
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-152
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-152-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_152.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_153.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_153.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1db6a3997474e6737054e82e60ed9e6f22655ecd
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_153.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-153
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-153
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-153-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_153.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_155.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_155.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..43c48800f219e53ade21e23704b2583b454c9069
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_155.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-155
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-155
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-155-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_155.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_156.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_156.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ec3db2bef7b04dc4c288b6e613a12af602302220
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_156.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-156
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-156
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-156-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_156.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_157.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_157.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c33165f029346aefadf54d35b1d1438508d30033
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_157.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-157
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-157
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-157-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_157.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_158.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_158.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c726525b290affea3fc9a56540f54b4d2e09ca0b
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_158.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-158
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-158
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-158-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_158.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_159.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_159.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..81c84105247be4144e010c7453a449b6d9733579
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_159.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-159
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-159
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-159-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_159.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_160.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_160.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f519cb9d6c7c8bdf71aef5598977e5f54feeeacd
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_160.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-160
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-160
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-160-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_160.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_162.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_162.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..95de1cdf68325b711e466d76a0b75e58b25379f6
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_162.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-162
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-162
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-162-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_162.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_163.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_163.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f3dda66fadd8aeb930f408808c5873426a6cd2b4
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_163.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-163
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-163
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-163-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_163.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_165.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_165.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..301ecbafc13c6063135ed1094f64e01875e337b8
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_165.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-165
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-165
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-165-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_165.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_166.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_166.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fea8de88ef2d3b993912ec346c06ea6346a2b7d5
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_166.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-166
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-166
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-166-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_166.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_168.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_168.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e00a99fc7d3d037f6952c05842046ae1d11b7b0b
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_168.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-168
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-168
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-168-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_168.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_169.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_169.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fc2af2ed17322fa84f6c5a4a106cf24fb4fc9bba
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_169.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-169
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-169
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-169-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_169.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_170.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_170.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d87c2d218e3f4248d45c297d62580cc51d6c2e8
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_170.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-170
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-170
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-170-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_170.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_171.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_171.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9cf621bed2ec4c54be8f950955e1c864f164117e
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_171.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-171
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-171
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-171-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_171.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_173.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_173.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5508200fa31eb38b7a80e7d9511cba28d1b7c215
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_173.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-173
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-173
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-173-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_173.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_174.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_174.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..efc25ec044a9a7aa9192780fcbf1011fb2b68331
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_174.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-174
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-174
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-174-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_174.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_175.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_175.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f0c158f8b4d295737013bdb9b25ab928b37dbadc
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_175.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-175
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-175
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-175-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_175.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_179.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_179.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b941c54ffbe60fdd4ba90a922df5469e29804b82
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_179.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-179
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-179
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-179-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_179.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_182.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_182.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..828fc1307249710677a0b435ea5085c976f68a9b
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_182.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-182
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-182
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-182-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_182.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_184.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_184.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..179a314da4bec940ab60ece0ddf4fb40639dc724
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_184.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-184
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-184
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-184-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_184.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_185.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_185.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..68401733570ba28f717591347495cd6941b97c46
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_185.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-185
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-185
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-185-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_185.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_186.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_186.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..70f26f9756b8fb60e42d065650f11b2b98fd09e3
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_186.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-186
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-186
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-186-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_186.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_187.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_187.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..081b6a6bb264a65165a5203fa500be15cbc79862
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_187.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-187
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-187
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-187-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_187.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_188.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_188.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..57728cb0d5942ec36c6c735bdbb5e6d1047479f8
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_188.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-188
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-188
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-188-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_188.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_190.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_190.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..470ab1b6ed18fadb0e4452cf7674f2f06dc8cbaf
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_190.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-190
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-190
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-190-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_190.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_191.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_191.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3f12c2bfd2493f01049e601edcd8a42c7cd59166
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_191.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-191
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-191
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-191-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_191.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_194.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_194.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..533015fba09a612310238b8b82d1f5c236e3c23f
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_194.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-194
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-194
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-194-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_194.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_195.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_195.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f1ff460404bd8ce8e104402b665c4d4162622907
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_195.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-195
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-195
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-195-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_195.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_196.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_196.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9d6d0fe1a9979571cd0ed0b4754196e52ebd6e33
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_196.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-196
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-196
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-196-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_196.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_197.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_197.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5be32c6f1fde4b3ca230c91d5bbf73995c5d7038
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_197.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-197
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-197
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-197-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_197.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_198.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_198.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..53ee645c8400489bf306978a5e7d56e4c3912488
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_198.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-198
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-198
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-198-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_198.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
diff --git a/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_199.yaml b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_199.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..38bf1a14c3c347027528104ea8374733af7088d3
--- /dev/null
+++ b/hn_eng_bn/tok_en-bn-hn/tok_jobs_2/tokenizer_199.yaml
@@ -0,0 +1,104 @@
+apiVersion: kubeflow.org/v2beta1
+kind: MPIJob
+metadata:
+  name: peacock-tokenizer-en-bn-hn-199
+  namespace: peacock
+  labels:
+    app: peacock-tokenizer-en-bn-hn-199
+spec:
+  slotsPerWorker: 8
+  runPolicy:
+    backoffLimit: 1
+    cleanPodPolicy: Running
+  mpiReplicaSpecs:
+    Launcher:
+      replicas: 1
+      template:
+        spec:
+          hostIPC: true
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-tokenizer-en-bn-hn-199-launcher
+              imagePullPolicy: IfNotPresent
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock
+              command: ["/bin/bash", "-c"]
+              args:
+                - >-
+                  
+                  export SYNAPSE_VERSION="1.15.1";
+                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
+
+                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
+                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_199.sh
+                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
+                  echo "HOSTSFILE=$HOSTSFILE";
+                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
+                  NUM_NODES=$(wc -l < $HOSTSFILE);
+                  CARDS_PER_NODE=8;
+                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
+                  echo "MPI_ROOT=$MPI_ROOT";
+                  echo "N_CARDS=$N_CARDS";
+                  echo "MASTER_ADDR=$MASTER_ADDR";
+                  sleep 20;
+
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
+
+                  mpirun -np $N_CARDS  -npernode 8 \
+                    --tag-output \
+                    --allow-run-as-root \
+                    --prefix $MPI_ROOT \
+                    -x WORKER_DIR=$WORKER_DIR \
+                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
+                    
+                   
+    Worker:
+      replicas: 1
+      template:
+        spec:
+          volumes:
+            - name: work-dir
+              persistentVolumeClaim:
+                claimName: peacock-fs-pvc
+          tolerations:
+            - key: "habana.ai/gaudi"
+              operator: "Exists"
+              effect: "NoSchedule"
+            - key: "k8s/namespace"
+              operator: "Equal"
+              value: "peacock"
+              effect: "NoSchedule"
+          hostIPC: true
+          containers:
+            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
+              name: peacock-llama-worker
+              imagePullPolicy: IfNotPresent
+              securityContext:
+                capabilities:
+                  add:
+                    - SYSLOG
+              resources:
+                limits:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+                requests:
+                  habana.ai/gaudi: 8
+                  hugepages-2Mi: 300Gi
+                  memory: 700Gi
+                  cpu: 150
+              volumeMounts:
+                - name: work-dir
+                  mountPath: /mnt/weka/peacock