File size: 2,522 Bytes
221eab4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
{
  "best_metric": 0.11714969575405121,
  "best_model_checkpoint": "outputs/checkpoint-164",
  "epoch": 2.988610478359909,
  "eval_steps": 500,
  "global_step": 164,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.36446469248291574,
      "grad_norm": 3.342350721359253,
      "learning_rate": 1.8000000000000001e-06,
      "loss": 0.3727,
      "step": 20
    },
    {
      "epoch": 0.7289293849658315,
      "grad_norm": 1.456776738166809,
      "learning_rate": 3.8000000000000005e-06,
      "loss": 0.2857,
      "step": 40
    },
    {
      "epoch": 0.9840546697038725,
      "eval_loss": 0.1871582418680191,
      "eval_runtime": 48.9194,
      "eval_samples_per_second": 3.434,
      "eval_steps_per_second": 0.429,
      "step": 54
    },
    {
      "epoch": 1.0933940774487472,
      "grad_norm": 0.6021007895469666,
      "learning_rate": 5.8e-06,
      "loss": 0.1809,
      "step": 60
    },
    {
      "epoch": 1.4578587699316627,
      "grad_norm": 0.4066278040409088,
      "learning_rate": 7.800000000000002e-06,
      "loss": 0.1344,
      "step": 80
    },
    {
      "epoch": 1.8223234624145785,
      "grad_norm": 1.681560754776001,
      "learning_rate": 9.800000000000001e-06,
      "loss": 0.1242,
      "step": 100
    },
    {
      "epoch": 1.9863325740318907,
      "eval_loss": 0.1359640508890152,
      "eval_runtime": 48.8699,
      "eval_samples_per_second": 3.438,
      "eval_steps_per_second": 0.43,
      "step": 109
    },
    {
      "epoch": 2.1867881548974943,
      "grad_norm": 0.459069162607193,
      "learning_rate": 9.984149663879994e-06,
      "loss": 0.1039,
      "step": 120
    },
    {
      "epoch": 2.55125284738041,
      "grad_norm": 0.594571590423584,
      "learning_rate": 9.929487384240103e-06,
      "loss": 0.0874,
      "step": 140
    },
    {
      "epoch": 2.9157175398633255,
      "grad_norm": 0.40587714314460754,
      "learning_rate": 9.83624518217252e-06,
      "loss": 0.0862,
      "step": 160
    },
    {
      "epoch": 2.988610478359909,
      "eval_loss": 0.11714969575405121,
      "eval_runtime": 48.9241,
      "eval_samples_per_second": 3.434,
      "eval_steps_per_second": 0.429,
      "step": 164
    }
  ],
  "logging_steps": 20,
  "max_steps": 810,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 15,
  "save_steps": 500,
  "total_flos": 6.153062564093952e+16,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}