aromal commited on
Commit
82eb335
·
1 Parent(s): f60aa78

Delete config.json

Browse files
Files changed (1) hide show
  1. config.json +0 -46
config.json DELETED
@@ -1,46 +0,0 @@
1
- # Model Configuration
2
-
3
- # General Settings
4
- model_name: "potato101/mistralengft"
5
- adapters_name: "potato101/mistralEnFT"
6
- device: "cuda"
7
-
8
- # BitsAndBytes Config
9
- bits_and_bytes_config:
10
- load_in_4bit: true
11
- bnb_4bit_use_double_quant: true
12
- bnb_4bit_quant_type: "nf4"
13
- bnb_4bit_compute_dtype: torch.bfloat16
14
-
15
- # AutoModelForCausalLM Config
16
- auto_model_config:
17
- load_in_4bit: true
18
- torch_dtype: torch.bfloat16
19
-
20
- # PeftModel Config
21
- peft_model_config:
22
- # Add PeftModel-specific configuration if needed
23
-
24
- # AutoTokenizer Config
25
- auto_tokenizer_config:
26
- bos_token_id: 1
27
-
28
- # Inference Settings
29
- max_new_tokens: 200
30
- do_sample: true
31
-
32
- # Miscellaneous
33
- prompt_prefix: "[INST]"
34
- exit_command: "exit"
35
-
36
- # Logging
37
- log_success_message: "Successfully loaded the model {model_name} into memory"
38
-
39
- # Model Input Processing
40
- model_input_processing:
41
- add_special_tokens: false
42
-
43
- # Output Display
44
- output_display:
45
- generated_output_message: "Generated Output:"
46
- separator_line: "=" * 50