Upload folder using huggingface_hub

#3
by sharpenb - opened
Files changed (4) hide show
  1. config.json +1 -1
  2. plots.png +0 -0
  3. results.json +2 -2
  4. smash_config.json +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/tmp/tmpm62_83d4",
3
  "_remove_final_layer_norm": false,
4
  "activation_dropout": 0.0,
5
  "activation_function": "relu",
 
1
  {
2
+ "_name_or_path": "/tmp/tmphdgsp0z1",
3
  "_remove_final_layer_norm": false,
4
  "activation_dropout": 0.0,
5
  "activation_function": "relu",
plots.png CHANGED
results.json CHANGED
@@ -3,10 +3,10 @@
3
  "base_current_gpu_total_memory": 40339.3125,
4
  "base_memory_inference_first": 690.0,
5
  "base_memory_inference": 570.0,
6
- "base_token_generation_latency_sync": 25.87100715637207,
7
  "smashed_current_gpu_type": "NVIDIA A100-PCIE-40GB",
8
  "smashed_current_gpu_total_memory": 40339.3125,
9
  "smashed_memory_inference_first": 186.0,
10
  "smashed_memory_inference": 206.0,
11
- "smashed_token_generation_latency_sync": 20.37329444885254
12
  }
 
3
  "base_current_gpu_total_memory": 40339.3125,
4
  "base_memory_inference_first": 690.0,
5
  "base_memory_inference": 570.0,
6
+ "base_token_generation_latency_sync": 25.69969253540039,
7
  "smashed_current_gpu_type": "NVIDIA A100-PCIE-40GB",
8
  "smashed_current_gpu_total_memory": 40339.3125,
9
  "smashed_memory_inference_first": 186.0,
10
  "smashed_memory_inference": 206.0,
11
+ "smashed_token_generation_latency_sync": 20.34065399169922
12
  }
smash_config.json CHANGED
@@ -14,7 +14,7 @@
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
- "cache_dir": "/ceph/hdd/staff/charpent/.cache/modelszx3uvh3_",
18
  "batch_size": 1,
19
  "tokenizer": "GPT2TokenizerFast(name_or_path='facebook/opt-125m', vocab_size=50265, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '</s>', 'eos_token': '</s>', 'unk_token': '</s>', 'pad_token': '<pad>'}, clean_up_tokenization_spaces=True), added_tokens_decoder={\n\t1: AddedToken(\"<pad>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n\t2: AddedToken(\"</s>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n}",
20
  "task": "text_text_generation",
 
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
+ "cache_dir": "/ceph/hdd/staff/charpent/.cache/models0oxtgr2f",
18
  "batch_size": 1,
19
  "tokenizer": "GPT2TokenizerFast(name_or_path='facebook/opt-125m', vocab_size=50265, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '</s>', 'eos_token': '</s>', 'unk_token': '</s>', 'pad_token': '<pad>'}, clean_up_tokenization_spaces=True), added_tokens_decoder={\n\t1: AddedToken(\"<pad>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n\t2: AddedToken(\"</s>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n}",
20
  "task": "text_text_generation",