File size: 1,260 Bytes
ddedaad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
data_root_dir: /hai/scratch/belkhale/datasets
hf_token: .hf_token
image_aug: false
is_resume: true
pretrained_checkpoint: null
resume_epoch: null
resume_step: null
run_id: prism-qwen25-dinosiglip-224px-wrist+0_5b+mx-libero-90+n1+b16+x7--vq_extra_tokenizer
run_id_note: vq_extra_tokenizer
run_root_dir: runs
save_interval: 2500
seed: 7
trackers:
- jsonl
- wandb
vla:
  action_tokenizer: libero_vq_extra_action_tokenizer
  base_vlm: runs/prism-qwen25-extra-dinosiglip-224px+0_5b+stage-finetune+x7/
  data_mix: libero_90
  enable_gradient_checkpointing: true
  enable_mixed_precision_training: true
  epochs: 1000
  expected_world_size: 8
  freeze_llm_backbone: false
  freeze_vision_backbone: false
  global_batch_size: 128
  image_sequence_len: 2
  learning_rate: 2.0e-05
  lr_scheduler_type: constant
  max_grad_norm: 1.0
  max_steps: null
  per_device_batch_size: 16
  reduce_in_full_precision: true
  save_every_n_steps: 25000
  shuffle_buffer_size: 256000
  train_strategy: fsdp-full-shard
  type: prism-qwen25-dinosiglip-224px-wrist+0_5b+mx-libero-90
  unfreeze_last_llm_layer: false
  use_wrist_image: true
  vla_id: prism-qwen25-dinosiglip-224px-wrist+0_5b+mx-libero-90
  warmup_ratio: 0.0
  weight_decay: 0.0
wandb_entity: null
wandb_project: prismatic