File size: 3,640 Bytes
770274f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
apiVersion: kubeflow.org/v2beta1
kind: MPIJob
metadata:
  name: peacock-tokenizer-dravid-103
  namespace: peacock
  labels:
    app: peacock-tokenizer-dravid-103
spec:
  slotsPerWorker: 8
  runPolicy:
    backoffLimit: 1
    cleanPodPolicy: Running
  mpiReplicaSpecs:
    Launcher:
      replicas: 1
      template:
        spec:
          hostIPC: true
          volumes:
            - name: work-dir
              persistentVolumeClaim:
                claimName: peacock-fs-pvc
          containers:
            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
              name: peacock-tokenizer-dravid-103-launcher
              imagePullPolicy: IfNotPresent
              volumeMounts:
                - name: work-dir
                  mountPath: /mnt/weka/peacock
              command: ["/bin/bash", "-c"]
              args:
                - >-
                  
                  export SYNAPSE_VERSION="1.15.1";
                  export WORKER_DIR="/mnt/weka/peacock/experiments/llama";

                  export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
                  export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_103.sh
                  HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
                  echo "HOSTSFILE=$HOSTSFILE";
                  MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
                  NUM_NODES=$(wc -l < $HOSTSFILE);
                  CARDS_PER_NODE=8;
                  N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
                  echo "MPI_ROOT=$MPI_ROOT";
                  echo "N_CARDS=$N_CARDS";
                  echo "MASTER_ADDR=$MASTER_ADDR";
                  sleep 20;


                  mpirun -np $N_CARDS  -npernode 8 \
                    --tag-output \
                    --allow-run-as-root \
                    --prefix $MPI_ROOT \
                    -x WORKER_DIR=$WORKER_DIR \
                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;

                  mpirun -np $N_CARDS  -npernode 8 \
                    --tag-output \
                    --allow-run-as-root \
                    --prefix $MPI_ROOT \
                    -x WORKER_DIR=$WORKER_DIR \
                    -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
                    
                   
    Worker:
      replicas: 1
      template:
        spec:
          volumes:
            - name: work-dir
              persistentVolumeClaim:
                claimName: peacock-fs-pvc
          tolerations:
            - key: "habana.ai/gaudi"
              operator: "Exists"
              effect: "NoSchedule"
            - key: "k8s/namespace"
              operator: "Equal"
              value: "peacock"
              effect: "NoSchedule"
          hostIPC: true
          containers:
            - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
              name: peacock-llama-worker
              imagePullPolicy: IfNotPresent
              securityContext:
                capabilities:
                  add:
                    - SYSLOG
              resources:
                limits:
                  habana.ai/gaudi: 8
                  hugepages-2Mi: 300Gi
                  memory: 700Gi
                  cpu: 150
                requests:
                  habana.ai/gaudi: 8
                  hugepages-2Mi: 300Gi
                  memory: 700Gi
                  cpu: 150
              volumeMounts:
                - name: work-dir
                  mountPath: /mnt/weka/peacock