kavinduw commited on
Commit
428eff9
·
verified ·
1 Parent(s): 3b966d0

Upload cfg.yaml

Browse files
Files changed (1) hide show
  1. cfg.yaml +106 -0
cfg.yaml ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ architecture:
2
+ backbone_dtype: int4
3
+ force_embedding_gradients: false
4
+ gradient_checkpointing: true
5
+ intermediate_dropout: 0.0
6
+ pretrained: true
7
+ pretrained_weights: ''
8
+ augmentation:
9
+ neftune_noise_alpha: 0.0
10
+ random_parent_probability: 0.0
11
+ skip_parent_probability: 0.0
12
+ token_mask_probability: 0.0
13
+ dataset:
14
+ add_eos_token_to_answer: true
15
+ add_eos_token_to_prompt: true
16
+ add_eos_token_to_system: true
17
+ answer_column: translated_response
18
+ chatbot_author: H2O.ai
19
+ chatbot_name: h2oGPT
20
+ data_sample: 1.0
21
+ data_sample_choice:
22
+ - Train
23
+ - Validation
24
+ limit_chained_samples: false
25
+ mask_prompt_labels: true
26
+ parent_id_column: None
27
+ personalize: false
28
+ prompt_column:
29
+ - translated_question
30
+ system_column: system_prompt
31
+ text_answer_separator: <|answer|>
32
+ text_prompt_start: <|prompt|>
33
+ text_system_start: <|system|>
34
+ train_dataframe: /home/llmstudio/mount/data/user/oraca_with_long/train_orca_with_long.parquet
35
+ validation_dataframe: /home/llmstudio/mount/data/user/oraca_with_long/test_orca_with_long.parquet
36
+ validation_size: 0.01
37
+ validation_strategy: custom
38
+ environment:
39
+ compile_model: false
40
+ deepspeed_reduce_bucket_size: 1000000
41
+ deepspeed_stage3_param_persistence_threshold: 1000000
42
+ deepspeed_stage3_prefetch_bucket_size: 1000000
43
+ find_unused_parameters: false
44
+ gpus:
45
+ - '0'
46
+ huggingface_branch: main
47
+ mixed_precision: false
48
+ number_of_workers: 8
49
+ seed: -1
50
+ trust_remote_code: true
51
+ use_deepspeed: false
52
+ experiment_name: singGPT-danube2-1-8b-v6 with sys prompt
53
+ llm_backbone: h2oai/h2o-danube2-1.8b-chat
54
+ logging:
55
+ logger: None
56
+ neptune_project: ''
57
+ output_directory: /home/llmstudio/mount/output/user/singGPT-danube2-1-8b-v6 with sys
58
+ prompt/
59
+ prediction:
60
+ batch_size_inference: 0
61
+ do_sample: false
62
+ max_length_inference: 256
63
+ metric: Perplexity
64
+ metric_gpt_model: gpt-3.5-turbo-0301
65
+ metric_gpt_template: general
66
+ min_length_inference: 2
67
+ num_beams: 1
68
+ num_history: 4
69
+ repetition_penalty: 1.0
70
+ stop_tokens: ''
71
+ temperature: 0.0
72
+ top_k: 0
73
+ top_p: 1.0
74
+ problem_type: text_causal_language_modeling
75
+ tokenizer:
76
+ add_prefix_space: false
77
+ add_prompt_answer_tokens: false
78
+ max_length: 7904
79
+ max_length_answer: 1152
80
+ max_length_prompt: 6560
81
+ padding_quantile: 1.0
82
+ use_fast: true
83
+ training:
84
+ batch_size: 2
85
+ differential_learning_rate: 1.0e-05
86
+ differential_learning_rate_layers: []
87
+ drop_last_batch: true
88
+ epochs: 1
89
+ evaluate_before_training: true
90
+ evaluation_epochs: 0.25
91
+ grad_accumulation: 1
92
+ gradient_clip: 0.5
93
+ learning_rate: 5.0e-05
94
+ lora: true
95
+ lora_alpha: 16
96
+ lora_dropout: 0.05
97
+ lora_r: 4
98
+ lora_target_modules: ''
99
+ loss_function: TokenAveragedCrossEntropy
100
+ optimizer: AdamW
101
+ save_best_checkpoint: false
102
+ schedule: Cosine
103
+ train_validation_data: false
104
+ use_flash_attention_2: false
105
+ warmup_epochs: 0.0
106
+ weight_decay: 0.0