Datasets:

Modalities:
Text
Formats:
json
Size:
< 1K
Libraries:
Datasets
Dask
Paul Hager commited on
Commit
8105302
·
1 Parent(s): 1c8e20b

Added config values

Browse files
MIMIC-CDM-FI/ClinicalCamel/results_2024-07-04T00-00-00.json CHANGED
@@ -4,8 +4,8 @@
4
  "model_quantization_group_size": 32,
5
  "model_quantization_act_order": "True",
6
  "model_quantization_damp": 0.1,
7
- "model_sequence_length": 4096,
8
- "#Params (B)": 70,
9
  "model_name": "wanglab/ClinicalCamel-70B"
10
  },
11
  "results": {
 
4
  "model_quantization_group_size": 32,
5
  "model_quantization_act_order": "True",
6
  "model_quantization_damp": 0.1,
7
+ "max_sequence_length": 4096,
8
+ "params": 70,
9
  "model_name": "wanglab/ClinicalCamel-70B"
10
  },
11
  "results": {
MIMIC-CDM-FI/Llama2Chat/results_2024-07-04T00-00-00.json CHANGED
@@ -4,8 +4,8 @@
4
  "model_quantization_group_size": 32,
5
  "model_quantization_act_order": "True",
6
  "model_quantization_damp": 0.1,
7
- "model_sequence_length": 4096,
8
- "#Params (B)": 70,
9
  "model_name": "TheBloke/Llama-2-70B-Chat-GPTQ"
10
  },
11
  "results": {
 
4
  "model_quantization_group_size": 32,
5
  "model_quantization_act_order": "True",
6
  "model_quantization_damp": 0.1,
7
+ "max_sequence_length": 4096,
8
+ "params": 70,
9
  "model_name": "TheBloke/Llama-2-70B-Chat-GPTQ"
10
  },
11
  "results": {
MIMIC-CDM-FI/Llama3Instruct/results_2024-07-04T00-00-00.json CHANGED
@@ -4,8 +4,8 @@
4
  "model_quantization_group_size": 32,
5
  "model_quantization_act_order": "True",
6
  "model_quantization_damp": 0.1,
7
- "model_sequence_length": 4096,
8
- "#Params (B)": 70,
9
  "model_name": "meta-llama/Meta-Llama-3-70B-Instruct"
10
  },
11
  "results": {
 
4
  "model_quantization_group_size": 32,
5
  "model_quantization_act_order": "True",
6
  "model_quantization_damp": 0.1,
7
+ "max_sequence_length": 4096,
8
+ "params": 70,
9
  "model_name": "meta-llama/Meta-Llama-3-70B-Instruct"
10
  },
11
  "results": {
MIMIC-CDM-FI/Meditron/results_2024-07-04T00-00-00.json CHANGED
@@ -4,8 +4,8 @@
4
  "model_quantization_group_size": 32,
5
  "model_quantization_act_order": "True",
6
  "model_quantization_damp": 0.1,
7
- "model_sequence_length": 4096,
8
- "#Params (B)": 70,
9
  "model_name": "TheBloke/meditron-70B-GPTQ"
10
  },
11
  "results": {
 
4
  "model_quantization_group_size": 32,
5
  "model_quantization_act_order": "True",
6
  "model_quantization_damp": 0.1,
7
+ "max_sequence_length": 4096,
8
+ "params": 70,
9
  "model_name": "TheBloke/meditron-70B-GPTQ"
10
  },
11
  "results": {
MIMIC-CDM-FI/OASST/results_2024-07-04T00-00-00.json CHANGED
@@ -4,8 +4,8 @@
4
  "model_quantization_group_size": 32,
5
  "model_quantization_act_order": "True",
6
  "model_quantization_damp": 0.1,
7
- "model_sequence_length": 4096,
8
- "#Params (B)": 70,
9
  "model_name": "TheBloke/Llama2-70B-OASST-SFT-v10-GPTQ"
10
  },
11
  "results": {
 
4
  "model_quantization_group_size": 32,
5
  "model_quantization_act_order": "True",
6
  "model_quantization_damp": 0.1,
7
+ "max_sequence_length": 4096,
8
+ "params": 70,
9
  "model_name": "TheBloke/Llama2-70B-OASST-SFT-v10-GPTQ"
10
  },
11
  "results": {
MIMIC-CDM-FI/WizardLM/results_2024-07-04T00-00-00.json CHANGED
@@ -4,8 +4,8 @@
4
  "model_quantization_group_size": 32,
5
  "model_quantization_act_order": "True",
6
  "model_quantization_damp": 0.1,
7
- "model_sequence_length": 4096,
8
- "#Params (B)": 70,
9
  "model_name": "TheBloke/WizardLM-70B-V1.0-GPTQ"
10
  },
11
  "results": {
 
4
  "model_quantization_group_size": 32,
5
  "model_quantization_act_order": "True",
6
  "model_quantization_damp": 0.1,
7
+ "max_sequence_length": 4096,
8
+ "params": 70,
9
  "model_name": "TheBloke/WizardLM-70B-V1.0-GPTQ"
10
  },
11
  "results": {