masatochi commited on
Commit
34044f7
·
verified ·
1 Parent(s): e47ca83

Training in progress, step 150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c6232ed48b9c18e085faf4119484b73bc66018b47f3bd50b5a6e541e5ccc1e03
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b030ba7fcf62e5c346671de13683242d2201eadf998d69e19d12aa7b9cf8ff5
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b26583c118f37f0f901f941c1caddee16c708cd7a19dc546b58c02f36d2f5862
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a26632f11da375f18c00e1ae3853a6812467af817f6fda67137389cbfd9f6f6d
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b362eff1a75d9bf020525cb57285e4b861253ed25664be12ca93f1935aca2a48
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85dcff485150847800368b832b7fbbde9bcca50c94c2ff19cbbdf38672b04cb0
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be04ea4bc9f159499b4a7b296b15e0c0e5c54743663ee8550a26340683f89e32
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a946cb282348d3fba8c242cd51f3b90b3dccbd24720ee6b6397a7e493e7b92c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0709089797664894,
5
  "eval_steps": 34,
6
- "global_step": 145,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1062,6 +1062,41 @@
1062
  "learning_rate": 4.735678371226441e-05,
1063
  "loss": 8.9077,
1064
  "step": 145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1065
  }
1066
  ],
1067
  "logging_steps": 1,
@@ -1081,7 +1116,7 @@
1081
  "attributes": {}
1082
  }
1083
  },
1084
- "total_flos": 3.1964955301380096e+17,
1085
  "train_batch_size": 3,
1086
  "trial_name": null,
1087
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.07335411699981662,
5
  "eval_steps": 34,
6
+ "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1062
  "learning_rate": 4.735678371226441e-05,
1063
  "loss": 8.9077,
1064
  "step": 145
1065
+ },
1066
+ {
1067
+ "epoch": 0.07139800721315484,
1068
+ "grad_norm": Infinity,
1069
+ "learning_rate": 4.5794664352755055e-05,
1070
+ "loss": 8.9045,
1071
+ "step": 146
1072
+ },
1073
+ {
1074
+ "epoch": 0.07188703465982028,
1075
+ "grad_norm": Infinity,
1076
+ "learning_rate": 4.425105606571145e-05,
1077
+ "loss": 10.4432,
1078
+ "step": 147
1079
+ },
1080
+ {
1081
+ "epoch": 0.07237606210648573,
1082
+ "grad_norm": Infinity,
1083
+ "learning_rate": 4.272648599194948e-05,
1084
+ "loss": 10.0172,
1085
+ "step": 148
1086
+ },
1087
+ {
1088
+ "epoch": 0.07286508955315117,
1089
+ "grad_norm": Infinity,
1090
+ "learning_rate": 4.12214747707527e-05,
1091
+ "loss": 9.1662,
1092
+ "step": 149
1093
+ },
1094
+ {
1095
+ "epoch": 0.07335411699981662,
1096
+ "grad_norm": Infinity,
1097
+ "learning_rate": 3.973653636207437e-05,
1098
+ "loss": 8.9558,
1099
+ "step": 150
1100
  }
1101
  ],
1102
  "logging_steps": 1,
 
1116
  "attributes": {}
1117
  }
1118
  },
1119
+ "total_flos": 3.306719513935872e+17,
1120
  "train_batch_size": 3,
1121
  "trial_name": null,
1122
  "trial_params": null