PEFT
TensorBoard
Safetensors
llama
alignment-handbook
trl
sft
Generated from Trainer
lillian039 commited on
Commit
7b39559
·
verified ·
1 Parent(s): 10aa50f

End of training

Browse files
README.md CHANGED
@@ -1,8 +1,13 @@
1
  ---
2
  base_model: barc0/cot-transduction-arc-heavy
 
 
 
 
3
  library_name: peft
4
  license: llama3.1
5
  tags:
 
6
  - trl
7
  - sft
8
  - generated_from_trainer
@@ -16,9 +21,9 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # cot-trainset-ft-transduction-v3-lora-train
18
 
19
- This model is a fine-tuned version of [barc0/cot-transduction-arc-heavy](https://huggingface.co/barc0/cot-transduction-arc-heavy) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.1333
22
 
23
  ## Model description
24
 
 
1
  ---
2
  base_model: barc0/cot-transduction-arc-heavy
3
+ datasets:
4
+ - barc0/cot_train_dataset_960_ms10_v2
5
+ - barc0/cot_rearc_dataset_100_ms10
6
+ - barc0/seeds_cot
7
  library_name: peft
8
  license: llama3.1
9
  tags:
10
+ - alignment-handbook
11
  - trl
12
  - sft
13
  - generated_from_trainer
 
21
 
22
  # cot-trainset-ft-transduction-v3-lora-train
23
 
24
+ This model is a fine-tuned version of [barc0/cot-transduction-arc-heavy](https://huggingface.co/barc0/cot-transduction-arc-heavy) on the barc0/cot_train_dataset_960_ms10_v2, the barc0/cot_rearc_dataset_100_ms10 and the barc0/seeds_cot datasets.
25
  It achieves the following results on the evaluation set:
26
+ - Loss: 0.1921
27
 
28
  ## Model description
29
 
all_results.json CHANGED
@@ -1,5 +1,10 @@
1
  {
2
  "epoch": 1.9982608695652173,
 
 
 
 
 
3
  "total_flos": 466200922914816.0,
4
  "train_loss": 0.0,
5
  "train_runtime": 1.1199,
 
1
  {
2
  "epoch": 1.9982608695652173,
3
+ "eval_loss": 0.1921355128288269,
4
+ "eval_runtime": 54.3907,
5
+ "eval_samples": 239,
6
+ "eval_samples_per_second": 4.394,
7
+ "eval_steps_per_second": 0.552,
8
  "total_flos": 466200922914816.0,
9
  "train_loss": 0.0,
10
  "train_runtime": 1.1199,
config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "barc0/cot-transduction-arc-heavy",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 128000,
9
+ "eos_token_id": [
10
+ 128001,
11
+ 128008,
12
+ 128009
13
+ ],
14
+ "head_dim": 128,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 4096,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 14336,
19
+ "max_position_embeddings": 131072,
20
+ "mlp_bias": false,
21
+ "model_type": "llama",
22
+ "num_attention_heads": 32,
23
+ "num_hidden_layers": 32,
24
+ "num_key_value_heads": 8,
25
+ "pretraining_tp": 1,
26
+ "rms_norm_eps": 1e-05,
27
+ "rope_scaling": {
28
+ "factor": 8.0,
29
+ "high_freq_factor": 4.0,
30
+ "low_freq_factor": 1.0,
31
+ "original_max_position_embeddings": 8192,
32
+ "rope_type": "llama3"
33
+ },
34
+ "rope_theta": 500000.0,
35
+ "tie_word_embeddings": false,
36
+ "torch_dtype": "bfloat16",
37
+ "transformers_version": "4.45.0.dev0",
38
+ "use_cache": true,
39
+ "vocab_size": 128256
40
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.9982608695652173,
3
+ "eval_loss": 0.1921355128288269,
4
+ "eval_runtime": 54.3907,
5
+ "eval_samples": 239,
6
+ "eval_samples_per_second": 4.394,
7
+ "eval_steps_per_second": 0.552
8
+ }
runs/Dec17_23-46-54_ellis-compute-02.cs.cornell.edu/events.out.tfevents.1734497324.ellis-compute-02.cs.cornell.edu.267747.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b037ef8e4dd21a2d1ce19626e976c827cd9670b7ca5a6227fec3c60512920c40
3
+ size 359