ssgplabon commited on
Commit
af229cb
·
verified ·
1 Parent(s): d3a4bf6

End of training

Browse files
README.md CHANGED
@@ -29,17 +29,18 @@ print(output["generated_text"])
29
 
30
  ## Training procedure
31
 
32
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/ssg-plabon-fidelity-investments/huggingface/runs/jfvq780l)
 
33
 
34
  This model was trained with SFT.
35
 
36
  ### Framework versions
37
 
38
- - TRL: 0.12.2
39
- - Transformers: 4.46.3
40
  - Pytorch: 2.5.1+cu121
41
  - Datasets: 3.2.0
42
- - Tokenizers: 0.20.3
43
 
44
  ## Citations
45
 
 
29
 
30
  ## Training procedure
31
 
32
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/ssg-plabon-fidelity-investments/huggingface/runs/p3jzjwwe)
33
+
34
 
35
  This model was trained with SFT.
36
 
37
  ### Framework versions
38
 
39
+ - TRL: 0.13.0
40
+ - Transformers: 4.47.0
41
  - Pytorch: 2.5.1+cu121
42
  - Datasets: 3.2.0
43
+ - Tokenizers: 0.21.0
44
 
45
  ## Citations
46
 
config.json CHANGED
@@ -27,7 +27,7 @@
27
  "rope_theta": 100000,
28
  "tie_word_embeddings": true,
29
  "torch_dtype": "float32",
30
- "transformers_version": "4.46.3",
31
  "use_cache": true,
32
  "vocab_size": 49152
33
  }
 
27
  "rope_theta": 100000,
28
  "tie_word_embeddings": true,
29
  "torch_dtype": "float32",
30
+ "transformers_version": "4.47.0",
31
  "use_cache": true,
32
  "vocab_size": 49152
33
  }
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
  "pad_token_id": 2,
6
- "transformers_version": "4.46.3"
7
  }
 
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
  "pad_token_id": 2,
6
+ "transformers_version": "4.47.0"
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:86545a8e845d5a5ed160a76320ace59b0e2ad5d78b809c4ad88ffa5e76ac2971
3
  size 538090408
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba241aa7802dad18ba306ded9a8798aab4c7bf7e8853c0d9f85e37dd5708aa1f
3
  size 538090408
runs/Dec17_04-50-23_8763990c91f5/events.out.tfevents.1734411025.8763990c91f5.642.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f7aaf313b1a7fe8f615e461993416e90d3ced66e8bf6f228c82d5bf595db06b
3
+ size 32385
tokenizer_config.json CHANGED
@@ -146,6 +146,7 @@
146
  "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
  "clean_up_tokenization_spaces": false,
148
  "eos_token": "<|im_end|>",
 
149
  "model_max_length": 8192,
150
  "pad_token": "<|im_end|>",
151
  "tokenizer_class": "GPT2Tokenizer",
 
146
  "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
  "clean_up_tokenization_spaces": false,
148
  "eos_token": "<|im_end|>",
149
+ "extra_special_tokens": {},
150
  "model_max_length": 8192,
151
  "pad_token": "<|im_end|>",
152
  "tokenizer_class": "GPT2Tokenizer",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a1d7a48a0d5eedddee16e7082777918cd9911a479a2d8916888fd0f9e63c05c
3
- size 5560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f9c41d480b033627e7a4a7fd5503bede2163f6e808a24f481aefc9e70716164
3
+ size 5624