AlekseyKorshuk commited on
Commit
b2b44b1
·
1 Parent(s): fc38921

huggingartists

Browse files
README.md CHANGED
@@ -45,15 +45,15 @@ from datasets import load_dataset
45
  dataset = load_dataset("huggingartists/eminem")
46
  ```
47
 
48
- [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/3rukw5ac/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on Eminem's lyrics.
53
 
54
- Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/2mua6tz8) for full transparency and reproducibility.
55
 
56
- At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/2mua6tz8/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
 
45
  dataset = load_dataset("huggingartists/eminem")
46
  ```
47
 
48
+ [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/1hflh7u6/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on Eminem's lyrics.
53
 
54
+ Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/4gvmxrna) for full transparency and reproducibility.
55
 
56
+ At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/4gvmxrna/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
evaluation.txt CHANGED
@@ -1 +1 @@
1
- {"eval_loss": 0.22213919460773468, "eval_runtime": 29.4043, "eval_samples_per_second": 21.936, "eval_steps_per_second": 2.755, "epoch": 3.0}
 
1
+ {"eval_loss": 0.17033791542053223, "eval_runtime": 13.3261, "eval_samples_per_second": 45.099, "eval_steps_per_second": 5.703, "epoch": 2.0}
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2c3b167b74a63ec8fb077ce24d21025947fef9ecb32878a46ee4d6977cc15f0
3
  size 497764120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84c1e0b5ec78700742efecda3264cb6c05363dcd0cfd2603f9208a76f99a8e57
3
  size 497764120
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26834f0e4208938d811a9dfa581c60096828db9664c29122cbe7c108c2266f68
3
  size 995604017
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dd1f75aa87ace65ff5eabf8bf35c462a5ea435576d30fa3ae79699f3a222ffd
3
  size 995604017
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c21f7e095359913b7c6cf81d01da61fcc6b0f7f55c7374a09691747e7e324411
3
  size 510396521
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05239515b946d463e5c208905abbc650450660236b44874d528bf2e8ff9404e4
3
  size 510396521
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:091b8c645ee921fc1fb21bf66e87b6404370564c2f1e26f8e92fd9a5afcce570
3
  size 14567
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a51ff024d36f5b16e406a7ff85689d3f4ab6243d4b5396f02ca94ed03ed314b
3
  size 14567
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c3e6d52e9eda83b53901112c80d072ee8b7df374009cb1437cc93a39ef8c5f83
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68dcd37ec9f598b778dcfcdd0e6ff8990fdc855c792ae8971ac9bb4e2c9c2527
3
  size 623
trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "best_metric": 0.22213919460773468,
3
- "best_model_checkpoint": "output/eminem/checkpoint-455",
4
  "epoch": 1.0,
5
- "global_step": 455,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -560,11 +560,25 @@
560
  "eval_samples_per_second": 21.946,
561
  "eval_steps_per_second": 2.756,
562
  "step": 455
 
 
 
 
 
 
 
 
 
 
 
 
 
 
563
  }
564
  ],
565
- "max_steps": 1365,
566
- "num_train_epochs": 3,
567
- "total_flos": 474636976128000.0,
568
  "trial_name": null,
569
  "trial_params": null
570
  }
 
1
  {
2
+ "best_metric": 0.17033791542053223,
3
+ "best_model_checkpoint": "output/eminem/checkpoint-460",
4
  "epoch": 1.0,
5
+ "global_step": 460,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
560
  "eval_samples_per_second": 21.946,
561
  "eval_steps_per_second": 2.756,
562
  "step": 455
563
+ },
564
+ {
565
+ "epoch": 1.0,
566
+ "learning_rate": 0.00010892206830726497,
567
+ "loss": 0.4359,
568
+ "step": 460
569
+ },
570
+ {
571
+ "epoch": 1.0,
572
+ "eval_loss": 0.17033791542053223,
573
+ "eval_runtime": 12.2909,
574
+ "eval_samples_per_second": 48.898,
575
+ "eval_steps_per_second": 6.183,
576
+ "step": 460
577
  }
578
  ],
579
+ "max_steps": 920,
580
+ "num_train_epochs": 2,
581
+ "total_flos": 479470878720000.0,
582
  "trial_name": null,
583
  "trial_params": null
584
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5bc291dc42b38a7b304b7daeb4b46cfc64328ad099cb5a00588b0246f606e58b
3
  size 3247
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2478f6ac251ccbd0eb17112df1fdac929c3ed4c313a16aa904d226b8a9b10b5
3
  size 3247