LeeHarrold commited on
Commit
50e2012
·
verified ·
1 Parent(s): ff732bc

Upload folder using huggingface_hub

Browse files
.github/workflows/update_space.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run Python script
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - y
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Checkout
14
+ uses: actions/checkout@v2
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v2
18
+ with:
19
+ python-version: '3.9'
20
+
21
+ - name: Install Gradio
22
+ run: python -m pip install gradio
23
+
24
+ - name: Log in to Hugging Face
25
+ run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
26
+
27
+ - name: Deploy to Spaces
28
+ run: gradio deploy
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ venv
2
+ *.safetensors
3
+ *.pyc
4
+ .DS_Store
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Steer Hexter
3
- emoji: 🦀
4
- colorFrom: red
5
- colorTo: purple
6
  sdk: gradio
7
  sdk_version: 4.38.1
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: steer-hexter
3
+ app_file: app.py
 
 
4
  sdk: gradio
5
  sdk_version: 4.38.1
 
 
6
  ---
 
 
app.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformer_lens import HookedTransformer
2
+ from sae_lens import SAE
3
+ import torch
4
+
5
+ if torch.backends.mps.is_available():
6
+ device = "mps"
7
+ else:
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
9
+
10
+ class Inference:
11
+
12
+ def __init__(self, model, pretrained_sae, layer):
13
+ self.layer = layer
14
+ if model == "gemma-2b":
15
+ self.sae_id = f"blocks.{layer}.hook_resid_post"
16
+ elif model == "gpt2-small":
17
+ print(f"using {model}")
18
+ self.sae_id = f"blocks.{0}.hook_resid_pre"
19
+ self.sampling_kwargs = dict(temperature=1.0, top_p=0.1, freq_penalty=1.0)
20
+ self.set_coeff(1)
21
+ self.set_model(model)
22
+ self.set_SAE(pretrained_sae)
23
+
24
+
25
+ def set_model(self, model):
26
+ self.model = HookedTransformer.from_pretrained(model, device = device)
27
+
28
+ def set_coeff(self, coeff):
29
+ self.coeff = coeff
30
+
31
+ def set_temperature(self, temperature):
32
+ self.sampling_kwargs['temperature'] = temperature
33
+
34
+ def set_steering_vector_prompt(self, prompt: str):
35
+ self.steering_vector_prompt = prompt
36
+
37
+ def set_SAE(self, sae_name):
38
+ sae, cfg_dict, _ = SAE.from_pretrained(
39
+ release = sae_name,
40
+ sae_id = self.sae_id,
41
+ device = device
42
+ )
43
+ self.sae = sae
44
+ self.cfg_dict = cfg_dict
45
+
46
+ def _get_sae_out_and_feature_activations(self):
47
+ # given the words in steering_vectore_prompt, the SAE predicts that the neurons(aka features) in activateCache will be activated
48
+ sv_logits, activationCache = self.model.run_with_cache(self.steering_vector_prompt, prepend_bos=True)
49
+ sv_feature_acts = self.sae.encode(activationCache[self.sae.cfg.hook_name])
50
+ # get top_k of 1
51
+ # self.sae_out = sae.decode(sv_feature_acts)
52
+ return self.sae.decode(sv_feature_acts), sv_feature_acts
53
+
54
+ def _hooked_generate(self, prompt_batch, fwd_hooks, seed=None, **kwargs):
55
+ if seed is not None:
56
+ torch.manual_seed(seed)
57
+
58
+ with self.model.hooks(fwd_hooks=fwd_hooks):
59
+ tokenized = self.model.to_tokens(prompt_batch)
60
+ result = self.model.generate(
61
+ stop_at_eos=False, # avoids a bug on MPS
62
+ input=tokenized,
63
+ max_new_tokens=50,
64
+ do_sample=True,
65
+ **kwargs)
66
+ return result
67
+
68
+ def _get_features(self, sv_feature_activations):
69
+ # return torch.topk(sv_feature_acts, 1).indices.tolist()
70
+ features = torch.topk(sv_feature_activations, 1).indices
71
+ print(f'features that align with the text prompt: {features}')
72
+ print("pump the features into the tool that gives you the words associated with each feature")
73
+ return features
74
+
75
+
76
+ def _get_steering_hook(self, feature, sae_out):
77
+ coeff = self.coeff
78
+ steering_vector = self.sae.W_dec[feature]
79
+ steering_vector = steering_vector[0]
80
+ def steering_hook(resid_pre, hook):
81
+ if resid_pre.shape[1] == 1:
82
+ return
83
+
84
+ position = sae_out.shape[1]
85
+ # using our steering vector and applying the coefficient
86
+ resid_pre[:, :position - 1, :] += coeff * steering_vector
87
+
88
+ return steering_hook
89
+
90
+ def _get_steering_hooks(self):
91
+ # TODO: refactor this. It works because sae_out.shape[1] = sv_feature_acts.shape[1] = len(features[0])
92
+ # you can manipulate views to retrieve hooks more cleanly
93
+ # and not use the seperate function _get_steering_hook()
94
+ sae_out, sv_feature_acts = self._get_sae_out_and_feature_activations()
95
+ features = self._get_features(sv_feature_acts)
96
+ steering_hooks = [self._get_steering_hook(feature, sae_out) for feature in features[0]]
97
+
98
+ return steering_hooks
99
+
100
+
101
+ def _run_generate(self, example_prompt, steering_on: bool):
102
+
103
+ self.model.reset_hooks()
104
+ steer_hooks = self._get_steering_hooks()
105
+ editing_hooks = [ (self.sae_id, steer_hook) for steer_hook in steer_hooks]
106
+ # editing_hooks = [(self.sae_id, steer_hook)]
107
+ # ^^change this to support steer_hooks being a list of steer_hooks
108
+ print(f"steering by {len(editing_hooks)} hooks")
109
+ if steering_on:
110
+ res = self._hooked_generate([example_prompt] * 3, editing_hooks, seed=None, **self.sampling_kwargs)
111
+ else:
112
+ tokenized = self.model.to_tokens([example_prompt])
113
+ res = self.model.generate(
114
+ stop_at_eos=False, # avoids a bug on MPS
115
+ input=tokenized,
116
+ max_new_tokens=50,
117
+ do_sample=True,
118
+ **self.sampling_kwargs)
119
+
120
+ # Print results, removing the ugly beginning of sequence token
121
+ res_str = self.model.to_string(res[:, 1:])
122
+ response = ("\n\n" + "-" * 80 + "\n\n").join(res_str)
123
+ print(response)
124
+ return response
125
+
126
+
127
+ def generate(self, message: str, steering_on: bool):
128
+ return self._run_generate(message, steering_on)
129
+
130
+
131
+
132
+ MODEL = "gemma-2b"
133
+ PRETRAINED_SAE = "gemma-2b-res-jb"
134
+ MODEL = "gpt2-small"
135
+ PRETRAINED_SAE = "gpt2-small-res-jb"
136
+ LAYER = 10
137
+ chatbot_model = Inference(MODEL,PRETRAINED_SAE, LAYER)
138
+
139
+
140
+ import time
141
+ import gradio as gr
142
+
143
+ default_image = "Hexter-Hackathon.png"
144
+
145
+ def slow_echo(message, history):
146
+ result = chatbot_model.generate(message, False)
147
+ for i in range(len(result)):
148
+ time.sleep(0.01)
149
+ yield result[: i + 1]
150
+ def slow_echo_steering(message, history):
151
+ result = chatbot_model.generate(message, True)
152
+ for i in range(len(result)):
153
+ time.sleep(0.01)
154
+ yield result[: i + 1]
155
+
156
+ with gr.Blocks() as demo:
157
+ with gr.Row():
158
+ gr.Markdown("*STANDARD HEXTER BOT*")
159
+ with gr.Row():
160
+ chatbot = gr.ChatInterface(
161
+ slow_echo,
162
+ chatbot=gr.Chatbot(min_width=1000),
163
+ textbox=gr.Textbox(placeholder="Ask Hexter anything!", min_width=1000),
164
+ theme="soft",
165
+ cache_examples=False,
166
+ retry_btn=None,
167
+ clear_btn=None,
168
+ undo_btn=None,
169
+ )
170
+ with gr.Row():
171
+ gr.Markdown("*STEERED HEXTER BOT*")
172
+ with gr.Row():
173
+ chatbot_steered = gr.ChatInterface(
174
+ slow_echo_steering,
175
+ chatbot=gr.Chatbot(min_width=1000),
176
+ textbox=gr.Textbox(placeholder="Ask Hexter anything!", min_width=1000),
177
+ theme="soft",
178
+ cache_examples=False,
179
+ retry_btn=None,
180
+ clear_btn=None,
181
+ undo_btn=None,
182
+ )
183
+ with gr.Row():
184
+ steering_prompt = gr.Textbox(label="Steering prompt", value="Golden Gate Bridge")
185
+ with gr.Row():
186
+ coeff = gr.Slider(1, 1000, 300, label="Coefficient", info="Coefficient is..", interactive=True)
187
+ with gr.Row():
188
+ temp = gr.Slider(0, 5, 1, label="Temperature", info="Temperature is..", interactive=True)
189
+
190
+ # Set up an action when the sliders change
191
+ temp.change(chatbot_model.set_temperature, inputs=[temp], outputs=[])
192
+ coeff.change(chatbot_model.set_coeff, inputs=[coeff], outputs=[])
193
+ chatbot_model.set_steering_vector_prompt(steering_prompt)
194
+ steering_prompt.change(chatbot_model.set_steering_vector_prompt, inputs=[steering_prompt], outputs=[])
195
+
196
+ demo.queue()
197
+ demo.launch(debug=True)
198
+
199
+ if __name__ == "__main__":
200
+ demo.launch(allowed_paths=["/"])
demo_feature_dashboards.html ADDED
The diff for this file is too large to render. See raw diff
 
loading_analyzing.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ COLAB = False
3
+ # from IPython import get_ipython # type: ignore
4
+ # ipython = get_ipython(); assert ipython is not None
5
+ # ipython.run_line_magic("load_ext", "autoreload")
6
+ # ipython.run_line_magic("autoreload", "2")
7
+
8
+
9
+ # Standard imports
10
+ import os
11
+ import torch
12
+ from tqdm import tqdm
13
+ import plotly.express as px
14
+
15
+ # Imports for displaying vis in Colab / notebook
16
+ import webbrowser
17
+ import http.server
18
+ import socketserver
19
+ import threading
20
+ PORT = 8000
21
+
22
+ torch.set_grad_enabled(False);
23
+
24
+ # For the most part I'll try to import functions and classes near where they are used
25
+ # to make it clear where they come from.
26
+
27
+ if torch.backends.mps.is_available():
28
+ device = "mps"
29
+ else:
30
+ device = "cuda" if torch.cuda.is_available() else "cpu"
31
+
32
+ print(f"Device: {device}")
33
+
34
+
35
+ def display_vis_inline(filename: str, height: int = 850):
36
+ '''
37
+ Displays the HTML files in Colab. Uses global `PORT` variable defined in prev cell, so that each
38
+ vis has a unique port without having to define a port within the function.
39
+ '''
40
+ if not(COLAB):
41
+ webbrowser.open(filename);
42
+
43
+ else:
44
+ global PORT
45
+
46
+ def serve(directory):
47
+ os.chdir(directory)
48
+
49
+ # Create a handler for serving files
50
+ handler = http.server.SimpleHTTPRequestHandler
51
+
52
+ # Create a socket server with the handler
53
+ with socketserver.TCPServer(("", PORT), handler) as httpd:
54
+ print(f"Serving files from {directory} on port {PORT}")
55
+ httpd.serve_forever()
56
+
57
+ thread = threading.Thread(target=serve, args=("/content",))
58
+ thread.start()
59
+
60
+ # output.serve_kernel_port_as_iframe(PORT, path=f"/{filename}", height=height, cache_in_notebook=True)
61
+
62
+ PORT += 1
63
+
64
+
65
+
66
+
67
+ from datasets import load_dataset
68
+ from transformer_lens import HookedTransformer
69
+ from sae_lens import SAE
70
+
71
+ model = HookedTransformer.from_pretrained("gpt2-small", device = device)
72
+
73
+ # the cfg dict is returned alongside the SAE since it may contain useful information for analysing the SAE (eg: instantiating an activation store)
74
+ # Note that this is not the same as the SAEs config dict, rather it is whatever was in the HF repo, from which we can extract the SAE config dict
75
+ # We also return the feature sparsities which are stored in HF for convenience.
76
+ sae, cfg_dict, sparsity = SAE.from_pretrained(
77
+ release = "gpt2-small-res-jb", # see other options in sae_lens/pretrained_saes.yaml
78
+ sae_id = "blocks.8.hook_resid_pre", # won't always be a hook point
79
+ device = device
80
+ )
81
+
82
+
83
+ from transformer_lens.utils import tokenize_and_concatenate
84
+
85
+ dataset = load_dataset(
86
+ path = "NeelNanda/pile-10k",
87
+ split="train",
88
+ streaming=False,
89
+ )
90
+
91
+ token_dataset = tokenize_and_concatenate(
92
+ dataset= dataset,# type: ignore
93
+ tokenizer = model.tokenizer, # type: ignore
94
+ streaming=True,
95
+ max_length=sae.cfg.context_size,
96
+ add_bos_token=sae.cfg.prepend_bos,
97
+ )
98
+
99
+
100
+ sae.eval() # prevents error if we're expecting a dead neuron mask for who grads
101
+
102
+ with torch.no_grad():
103
+ # activation store can give us tokens.
104
+ batch_tokens = token_dataset[:32]["tokens"]
105
+ _, cache = model.run_with_cache(batch_tokens, prepend_bos=True)
106
+
107
+ # Use the SAE
108
+ feature_acts = sae.encode(cache[sae.cfg.hook_name])
109
+ sae_out = sae.decode(feature_acts)
110
+
111
+ # save some room
112
+ del cache
113
+
114
+ # ignore the bos token, get the number of features that activated in each token, averaged accross batch and position
115
+ l0 = (feature_acts[:, 1:] > 0).float().sum(-1).detach()
116
+ print("average l0", l0.mean().item())
117
+ px.histogram(l0.flatten().cpu().numpy()).show()
118
+
119
+
120
+
121
+
122
+ from transformer_lens import utils
123
+ from functools import partial
124
+
125
+ # next we want to do a reconstruction test.
126
+ def reconstr_hook(activation, hook, sae_out):
127
+ return sae_out
128
+
129
+
130
+ def zero_abl_hook(activation, hook):
131
+ return torch.zeros_like(activation)
132
+
133
+
134
+ print("Orig", model(batch_tokens, return_type="loss").item())
135
+ print(
136
+ "reconstr",
137
+ model.run_with_hooks(
138
+ batch_tokens,
139
+ fwd_hooks=[
140
+ (
141
+ sae.cfg.hook_name,
142
+ partial(reconstr_hook, sae_out=sae_out),
143
+ )
144
+ ],
145
+ return_type="loss",
146
+ ).item(),
147
+ )
148
+ print(
149
+ "Zero",
150
+ model.run_with_hooks(
151
+ batch_tokens,
152
+ return_type="loss",
153
+ fwd_hooks=[(sae.cfg.hook_name, zero_abl_hook)],
154
+ ).item(),
155
+ )
156
+
157
+
158
+
159
+
160
+ example_prompt = "When John and Mary went to the shops, John gave the bag to"
161
+ example_answer = " Mary"
162
+ utils.test_prompt(example_prompt, example_answer, model, prepend_bos=True)
163
+
164
+ logits, cache = model.run_with_cache(example_prompt, prepend_bos=True)
165
+ tokens = model.to_tokens(example_prompt)
166
+ sae_out = sae(cache[sae.cfg.hook_name])
167
+
168
+
169
+ def reconstr_hook(activations, hook, sae_out):
170
+ return sae_out
171
+
172
+
173
+ def zero_abl_hook(mlp_out, hook):
174
+ return torch.zeros_like(mlp_out)
175
+
176
+
177
+ hook_name = sae.cfg.hook_name
178
+
179
+ print("Orig", model(tokens, return_type="loss").item())
180
+ print(
181
+ "reconstr",
182
+ model.run_with_hooks(
183
+ tokens,
184
+ fwd_hooks=[
185
+ (
186
+ hook_name,
187
+ partial(reconstr_hook, sae_out=sae_out),
188
+ )
189
+ ],
190
+ return_type="loss",
191
+ ).item(),
192
+ )
193
+ print(
194
+ "Zero",
195
+ model.run_with_hooks(
196
+ tokens,
197
+ return_type="loss",
198
+ fwd_hooks=[(hook_name, zero_abl_hook)],
199
+ ).item(),
200
+ )
201
+
202
+
203
+ with model.hooks(
204
+ fwd_hooks=[
205
+ (
206
+ hook_name,
207
+ partial(reconstr_hook, sae_out=sae_out),
208
+ )
209
+ ]
210
+ ):
211
+ utils.test_prompt(example_prompt, example_answer, model, prepend_bos=True)
212
+
213
+
214
+
215
+
216
+ from sae_dashboard.sae_vis_data import SaeVisConfig
217
+ from sae_dashboard.sae_vis_runner import SaeVisRunner
218
+
219
+ test_feature_idx_gpt = list(range(10)) + [14057]
220
+
221
+ feature_vis_config_gpt = SaeVisConfig(
222
+ hook_point=hook_name,
223
+ features=test_feature_idx_gpt,
224
+ minibatch_size_features=64,
225
+ minibatch_size_tokens=256,
226
+ verbose=True,
227
+ device=device,
228
+ )
229
+
230
+ visualization_data_gpt = SaeVisRunner(feature_vis_config_gpt).run(
231
+ encoder=sae, # type: ignore
232
+ model=model,
233
+ tokens=token_dataset[:10000]["tokens"], # type: ignore
234
+ )
235
+ # SaeVisData.create(
236
+ # encoder=sae,
237
+ # model=model, # type: ignore
238
+ # tokens=token_dataset[:10000]["tokens"], # type: ignore
239
+ # cfg=feature_vis_config_gpt,
240
+ # )
241
+
242
+
243
+
244
+
245
+
246
+ from sae_dashboard.data_writing_fns import save_feature_centric_vis
247
+
248
+ filename = f"demo_feature_dashboards.html"
249
+ save_feature_centric_vis(sae_vis_data=visualization_data_gpt, filename=filename)
250
+
251
+
252
+
253
+ from sae_lens.analysis.neuronpedia_integration import get_neuronpedia_quick_list
254
+
255
+ # this function should open
256
+ neuronpedia_quick_list = get_neuronpedia_quick_list(
257
+ test_feature_idx_gpt,
258
+ layer=sae.cfg.hook_layer,
259
+ model="gpt2-small",
260
+ dataset="res-jb",
261
+ name="A quick list we made",
262
+ )
263
+
264
+ if COLAB:
265
+ # If you're on colab, click the link below
266
+ print(neuronpedia_quick_list)
sae_tiny-stories-1L-21M_blocks.0.hook_mlp_out_16384/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "tiny-stories-1L-21M", "model_class_name": "HookedTransformer", "hook_name": "blocks.0.hook_mlp_out", "hook_eval": "NOT_IN_USE", "hook_layer": 0, "hook_head_index": null, "dataset_path": "apollo-research/roneneldan-TinyStories-tokenizer-gpt2", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 512, "use_cached_activations": false, "cached_activations_path": null, "architecture": "standard", "d_in": 1024, "d_sae": 16384, "b_dec_init_method": "zeros", "expansion_factor": 16, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": false, "noise_scale": 0.0, "from_pretrained_path": null, "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": true, "init_encoder_as_decoder_transpose": true, "n_batches_in_buffer": 64, "training_tokens": 122880000, "finetuning_tokens": 0, "store_batch_size_prompts": 16, "train_batch_size_tokens": 4096, "normalize_activations": "expected_average_only_in", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": true, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0.9, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 5, "lp_norm": 1.0, "scale_sparsity_penalty_by_decoder_norm": true, "l1_warm_up_steps": 1500, "lr": 5e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 5e-06, "lr_decay_steps": 6000, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 1000, "dead_feature_window": 1000, "dead_feature_threshold": 0.0001, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae_lens_tutorial", "wandb_id": null, "run_name": "16384-L1-5-LR-5e-05-Tokens-1.229e+08", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 20, "resume": false, "n_checkpoints": 0, "checkpoint_path": "checkpoints/q5ut4uqw", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.13.0", "sae_lens_training_version": "3.13.0", "tokens_per_buffer": 134217728}
steer_stories.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # general imports
2
+ import os
3
+ import torch
4
+ from tqdm import tqdm
5
+ import plotly.express as px
6
+
7
+ torch.set_grad_enabled(False);
8
+
9
+
10
+ # package import
11
+ from torch import Tensor
12
+ from transformer_lens import utils
13
+ from functools import partial
14
+ from jaxtyping import Int, Float
15
+
16
+ # device setup
17
+ if torch.backends.mps.is_available():
18
+ device = "mps"
19
+ else:
20
+ device = "cuda" if torch.cuda.is_available() else "cpu"
21
+
22
+ print(f"Device: {device}")
23
+
24
+
25
+
26
+
27
+
28
+ from transformer_lens import HookedTransformer
29
+ from sae_lens import SAE
30
+
31
+ # Choose a layer you want to focus on
32
+ # For this tutorial, we're going to use layer ????
33
+ layer = 0
34
+
35
+ # get model
36
+ model = HookedTransformer.from_pretrained("tiny-stories-1L-21M", device = device)
37
+
38
+ # get the SAE for this layer
39
+ sae = SAE.load_from_pretrained("sae_tiny-stories-1L-21M_blocks.0.hook_mlp_out_16384", device = device)
40
+
41
+ # get hook point
42
+ hook_point = sae.cfg.hook_name
43
+ print(hook_point)
44
+
45
+
46
+
47
+ sv_prompt = " Lily"
48
+ sv_logits, activationCache = model.run_with_cache(sv_prompt, prepend_bos=True)
49
+ sv_feature_acts = sae.encode(activationCache[hook_point])
50
+ print(torch.topk(sv_feature_acts, 3).indices.tolist())
51
+
52
+ # Generate
53
+
54
+
55
+ sv_prompt = " Lily"
56
+ sv_logits, activationCache = model.run_with_cache(sv_prompt, prepend_bos=True)
57
+ tokens = model.to_tokens(sv_prompt)
58
+ print(tokens)
59
+
60
+ # get the feature activations from our SAE
61
+ sv_feature_acts = sae.encode(activationCache[hook_point])
62
+
63
+ # get sae_out
64
+ sae_out = sae.decode(sv_feature_acts)
65
+
66
+ # print out the top activations, focus on the indices
67
+ print(torch.topk(sv_feature_acts, 3))
68
+
69
+
70
+
71
+ # get the neurons to use;
72
+ print(torch.topk(sv_feature_acts, 3).indices.tolist())
73
+
74
+
75
+ # choose the vector -- find this from the above section
76
+ #
77
+ steering_vector = sae.W_dec[10284]
78
+
79
+ example_prompt = "Once upon a time"
80
+ coeff = 1000
81
+ sampling_kwargs = dict(temperature=1.0, top_p=0.1, freq_penalty=1.0)
82
+
83
+
84
+
85
+ # apply steering vector when the model generates
86
+
87
+ def steering_hook(resid_pre, hook):
88
+ if resid_pre.shape[1] == 1:
89
+ return
90
+
91
+ position = sae_out.shape[1]
92
+ if steering_on:
93
+ breakpoint()
94
+ # using our steering vector and applying the coefficient
95
+ resid_pre[:, :position - 1, :] += coeff * steering_vector
96
+
97
+
98
+ def hooked_generate(prompt_batch, fwd_hooks=[], seed=None, **kwargs):
99
+ if seed is not None:
100
+ torch.manual_seed(seed)
101
+
102
+ with model.hooks(fwd_hooks=fwd_hooks):
103
+ tokenized = model.to_tokens(prompt_batch)
104
+ result = model.generate(
105
+ stop_at_eos=False, # avoids a bug on MPS
106
+ input=tokenized,
107
+ max_new_tokens=50,
108
+ do_sample=True,
109
+ **kwargs)
110
+ return result
111
+
112
+
113
+ def run_generate(example_prompt):
114
+ model.reset_hooks()
115
+ editing_hooks = [(f"blocks.{layer}.hook_resid_post", steering_hook)]
116
+ res = hooked_generate([example_prompt] * 3, editing_hooks, seed=None, **sampling_kwargs)
117
+
118
+ # Print results, removing the ugly beginning of sequence token
119
+ res_str = model.to_string(res[:, 1:])
120
+ print(("\n\n" + "-" * 80 + "\n\n").join(res_str))
121
+
122
+
123
+ steering_on = True
124
+ run_generate(example_prompt)
125
+
126
+
127
+
128
+ # evaluate features
129
+
130
+ import pandas as pd
131
+
132
+ # Let's start by getting the top 10 logits for each feature
133
+ projection_onto_unembed = sae.W_dec @ model.W_U
134
+
135
+
136
+ # get the top 10 logits.
137
+ vals, inds = torch.topk(projection_onto_unembed, 10, dim=1)
138
+
139
+ # get 10 random features
140
+ random_indices = torch.randint(0, projection_onto_unembed.shape[0], (10,))
141
+
142
+ # Show the top 10 logits promoted by those features
143
+ top_10_logits_df = pd.DataFrame(
144
+ [model.to_str_tokens(i) for i in inds[random_indices]],
145
+ index=random_indices.tolist(),
146
+ ).T
147
+ top_10_logits_df
148
+ # [7195, 5910, 2041]
149
+ top_10_associated_words_logits_df = model.to_str_tokens(inds[5910])
150
+ # See the words associated with feature 7195 (Should be "Golden")
using_an_sae_as_a_steering_vector.ipynb ADDED
@@ -0,0 +1,2171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "GoXn14ltnGh3"
7
+ },
8
+ "source": [
9
+ "# Using an SAE as a steering vector\n",
10
+ "\n",
11
+ "This notebook demonstrates how to use SAE lens to identify a feature on a pretrained model, and then construct a steering vector to affect the models output to various prompts. This notebook will also make use of Neuronpedia for identifying features of interest.\n",
12
+ "\n",
13
+ "The steps below include:\n",
14
+ "\n",
15
+ "\n",
16
+ "\n",
17
+ "* Installing relevant packages (Colab or locally)\n",
18
+ "* Load your SAE and the model it used\n",
19
+ "* Determining your feature of interest and its index\n",
20
+ "* Implementing your steering vector\n",
21
+ "\n",
22
+ "\n",
23
+ "\n"
24
+ ]
25
+ },
26
+ {
27
+ "cell_type": "markdown",
28
+ "metadata": {
29
+ "id": "gf3lJYPEXh0v"
30
+ },
31
+ "source": [
32
+ "## Setting up packages and notebook"
33
+ ]
34
+ },
35
+ {
36
+ "cell_type": "markdown",
37
+ "metadata": {
38
+ "id": "l9k5iGyOXtuN"
39
+ },
40
+ "source": [
41
+ "### Import and installs"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "markdown",
46
+ "metadata": {
47
+ "id": "fapxk8MDrs6R"
48
+ },
49
+ "source": [
50
+ "#### Environment Setup\n"
51
+ ]
52
+ },
53
+ {
54
+ "cell_type": "code",
55
+ "execution_count": 1,
56
+ "metadata": {
57
+ "colab": {
58
+ "base_uri": "https://localhost:8080/"
59
+ },
60
+ "collapsed": true,
61
+ "id": "0TwNmRkRUgR7",
62
+ "outputId": "ffeb827a-9af2-4b09-b8dd-78e0d594ddf6"
63
+ },
64
+ "outputs": [],
65
+ "source": [
66
+ "try:\n",
67
+ " # for google colab users\n",
68
+ " import google.colab # type: ignore\n",
69
+ " from google.colab import output\n",
70
+ " COLAB = True\n",
71
+ " %pip install sae-lens transformer-lens\n",
72
+ "except:\n",
73
+ " # for local setup\n",
74
+ " COLAB = False\n",
75
+ " from IPython import get_ipython # type: ignore\n",
76
+ " ipython = get_ipython(); assert ipython is not None\n",
77
+ " ipython.run_line_magic(\"load_ext\", \"autoreload\")\n",
78
+ " ipython.run_line_magic(\"autoreload\", \"2\")\n",
79
+ "\n",
80
+ "# Imports for displaying vis in Colab / notebook\n",
81
+ "import webbrowser\n",
82
+ "import http.server\n",
83
+ "import socketserver\n",
84
+ "import threading\n",
85
+ "PORT = 8000\n",
86
+ "\n",
87
+ "# general imports\n",
88
+ "import os\n",
89
+ "import torch\n",
90
+ "from tqdm import tqdm\n",
91
+ "import plotly.express as px\n",
92
+ "\n",
93
+ "torch.set_grad_enabled(False);"
94
+ ]
95
+ },
96
+ {
97
+ "cell_type": "code",
98
+ "execution_count": 2,
99
+ "metadata": {
100
+ "id": "NGgIu1ZVYDub"
101
+ },
102
+ "outputs": [],
103
+ "source": [
104
+ "def display_vis_inline(filename: str, height: int = 850):\n",
105
+ " '''\n",
106
+ " Displays the HTML files in Colab. Uses global `PORT` variable defined in prev cell, so that each\n",
107
+ " vis has a unique port without having to define a port within the function.\n",
108
+ " '''\n",
109
+ " if not(COLAB):\n",
110
+ " webbrowser.open(filename);\n",
111
+ "\n",
112
+ " else:\n",
113
+ " global PORT\n",
114
+ "\n",
115
+ " def serve(directory):\n",
116
+ " os.chdir(directory)\n",
117
+ "\n",
118
+ " # Create a handler for serving files\n",
119
+ " handler = http.server.SimpleHTTPRequestHandler\n",
120
+ "\n",
121
+ " # Create a socket server with the handler\n",
122
+ " with socketserver.TCPServer((\"\", PORT), handler) as httpd:\n",
123
+ " print(f\"Serving files from {directory} on port {PORT}\")\n",
124
+ " httpd.serve_forever()\n",
125
+ "\n",
126
+ " thread = threading.Thread(target=serve, args=(\"/content\",))\n",
127
+ " thread.start()\n",
128
+ "\n",
129
+ " output.serve_kernel_port_as_iframe(PORT, path=f\"/{filename}\", height=height, cache_in_notebook=True)\n",
130
+ "\n",
131
+ " PORT += 1"
132
+ ]
133
+ },
134
+ {
135
+ "cell_type": "markdown",
136
+ "metadata": {
137
+ "id": "CmaPYLpGrxbo"
138
+ },
139
+ "source": [
140
+ "#### General Installs and device setup"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": 3,
146
+ "metadata": {
147
+ "colab": {
148
+ "base_uri": "https://localhost:8080/"
149
+ },
150
+ "id": "tdUm9rZKr1Qb",
151
+ "outputId": "9b73b762-1356-437b-8925-91c514093b43"
152
+ },
153
+ "outputs": [
154
+ {
155
+ "name": "stdout",
156
+ "output_type": "stream",
157
+ "text": [
158
+ "Device: mps\n"
159
+ ]
160
+ }
161
+ ],
162
+ "source": [
163
+ "# package import\n",
164
+ "from torch import Tensor\n",
165
+ "from transformer_lens import utils\n",
166
+ "from functools import partial\n",
167
+ "from jaxtyping import Int, Float\n",
168
+ "\n",
169
+ "# device setup\n",
170
+ "if torch.backends.mps.is_available():\n",
171
+ " device = \"mps\"\n",
172
+ "else:\n",
173
+ " device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
174
+ "\n",
175
+ "print(f\"Device: {device}\")"
176
+ ]
177
+ },
178
+ {
179
+ "cell_type": "markdown",
180
+ "metadata": {
181
+ "id": "lsB0qORUaXiK"
182
+ },
183
+ "source": [
184
+ "### Load your model and SAE\n",
185
+ "\n",
186
+ "We're going to work with a pretrained GPT2-small model, and the RES-JB SAE set which is for the residual stream."
187
+ ]
188
+ },
189
+ {
190
+ "cell_type": "code",
191
+ "execution_count": 40,
192
+ "metadata": {
193
+ "colab": {
194
+ "base_uri": "https://localhost:8080/"
195
+ },
196
+ "collapsed": true,
197
+ "id": "bCvNtm1OOhlR",
198
+ "outputId": "e6fd27ab-ee94-46ec-a07e-ee48c8f30da3"
199
+ },
200
+ "outputs": [
201
+ {
202
+ "data": {
203
+ "application/vnd.jupyter.widget-view+json": {
204
+ "model_id": "8607cfc3f17548078c7b3ff7ebcca055",
205
+ "version_major": 2,
206
+ "version_minor": 0
207
+ },
208
+ "text/plain": [
209
+ "Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
210
+ ]
211
+ },
212
+ "metadata": {},
213
+ "output_type": "display_data"
214
+ },
215
+ {
216
+ "name": "stderr",
217
+ "output_type": "stream",
218
+ "text": [
219
+ "WARNING:root:You are not using LayerNorm, so the writing weights can't be centered! Skipping\n"
220
+ ]
221
+ },
222
+ {
223
+ "name": "stdout",
224
+ "output_type": "stream",
225
+ "text": [
226
+ "Loaded pretrained model gemma-2b into HookedTransformer\n",
227
+ "blocks.6.hook_resid_post\n"
228
+ ]
229
+ }
230
+ ],
231
+ "source": [
232
+ "from transformer_lens import HookedTransformer\n",
233
+ "from sae_lens import SAE\n",
234
+ "from sae_lens.toolkit.pretrained_saes import get_gpt2_res_jb_saes\n",
235
+ "\n",
236
+ "# Choose a layer you want to focus on\n",
237
+ "# For this tutorial, we're going to use layer 2\n",
238
+ "layer = 6\n",
239
+ "\n",
240
+ "# get model\n",
241
+ "model = HookedTransformer.from_pretrained(\"gemma-2b\", device = device)\n",
242
+ "\n",
243
+ "# get the SAE for this layer\n",
244
+ "sae, cfg_dict, _ = SAE.from_pretrained(\n",
245
+ " release = \"gemma-2b-res-jb\",\n",
246
+ " sae_id = f\"blocks.{layer}.hook_resid_post\",\n",
247
+ " device = device\n",
248
+ ")\n",
249
+ "\n",
250
+ "# get hook point\n",
251
+ "hook_point = sae.cfg.hook_name\n",
252
+ "print(hook_point)"
253
+ ]
254
+ },
255
+ {
256
+ "cell_type": "markdown",
257
+ "metadata": {
258
+ "id": "NkAAoyFbu5a5"
259
+ },
260
+ "source": [
261
+ "## Determine your feature of interest and its index"
262
+ ]
263
+ },
264
+ {
265
+ "cell_type": "markdown",
266
+ "metadata": {
267
+ "id": "DkQNvdd54q4S"
268
+ },
269
+ "source": [
270
+ "### Find your feature"
271
+ ]
272
+ },
273
+ {
274
+ "cell_type": "markdown",
275
+ "metadata": {
276
+ "id": "wzeY2D13xRjY"
277
+ },
278
+ "source": [
279
+ "#### Explore through code by using the feature activations for a prompt\n",
280
+ "\n",
281
+ "For the purpose of the tutorial, we are selecting a simple token prompt.\n",
282
+ "\n",
283
+ "In this example we will look trying to find and steer a \"Jedi\" feature.\n",
284
+ "\n",
285
+ "We run our prompt on our model and get the cache, which we then use with our sae to get our feature activations.\n",
286
+ "\n",
287
+ "Now we'll look at the top feature activations and look them up on Neuronpedia to determine what they have been intepreted as."
288
+ ]
289
+ },
290
+ {
291
+ "cell_type": "code",
292
+ "execution_count": 41,
293
+ "metadata": {
294
+ "colab": {
295
+ "base_uri": "https://localhost:8080/"
296
+ },
297
+ "id": "IIrdJ36mlXgB",
298
+ "outputId": "c4014b87-3af6-4c27-8f79-3b5a3c2c03dc"
299
+ },
300
+ "outputs": [
301
+ {
302
+ "name": "stdout",
303
+ "output_type": "stream",
304
+ "text": [
305
+ "tensor([[ 2, 714, 17489, 22352, 16125]], device='mps:0')\n",
306
+ "torch.return_types.topk(\n",
307
+ "values=tensor([[[72.5067, 70.9109, 68.8217],\n",
308
+ " [37.8954, 31.1813, 15.6114],\n",
309
+ " [65.9133, 14.2098, 13.3081],\n",
310
+ " [22.8078, 21.7340, 17.3972],\n",
311
+ " [43.6480, 12.6739, 10.7545]]], device='mps:0'),\n",
312
+ "indices=tensor([[[ 3390, 15881, 5347],\n",
313
+ " [ 6518, 13743, 1959],\n",
314
+ " [ 1571, 12529, 15173],\n",
315
+ " [12773, 10200, 15173],\n",
316
+ " [ 5192, 15173, 12030]]], device='mps:0'))\n"
317
+ ]
318
+ }
319
+ ],
320
+ "source": [
321
+ "sv_prompt = \" The Golden Gate Bridge\"\n",
322
+ "sv_logits, cache = model.run_with_cache(sv_prompt, prepend_bos=True)\n",
323
+ "tokens = model.to_tokens(sv_prompt)\n",
324
+ "print(tokens)\n",
325
+ "\n",
326
+ "# get the feature activations from our SAE\n",
327
+ "sv_feature_acts = sae.encode(cache[hook_point])\n",
328
+ "\n",
329
+ "# get sae_out\n",
330
+ "sae_out = sae.decode(sv_feature_acts)\n",
331
+ "\n",
332
+ "# print out the top activations, focus on the indices\n",
333
+ "print(torch.topk(sv_feature_acts, 3))"
334
+ ]
335
+ },
336
+ {
337
+ "cell_type": "code",
338
+ "execution_count": 16,
339
+ "metadata": {},
340
+ "outputs": [
341
+ {
342
+ "name": "stderr",
343
+ "output_type": "stream",
344
+ "text": [
345
+ "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
346
+ "To disable this warning, you can either:\n",
347
+ "\t- Avoid using `tokenizers` before the fork if possible\n",
348
+ "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
349
+ ]
350
+ },
351
+ {
352
+ "data": {
353
+ "text/plain": [
354
+ "'https://neuronpedia.org/quick-list/?name=temporary_list&features=%5B%7B%22modelId%22%3A%20%22gemma-2b%22%2C%20%22layer%22%3A%20%220-res-jb%22%2C%20%22index%22%3A%20%22%5B%5B9036%2C%20347%2C%205775%5D%2C%20%5B12244%2C%208086%2C%2015895%5D%5D%22%7D%5D'"
355
+ ]
356
+ },
357
+ "execution_count": 16,
358
+ "metadata": {},
359
+ "output_type": "execute_result"
360
+ }
361
+ ],
362
+ "source": [
363
+ "from sae_lens.analysis.neuronpedia_integration import get_neuronpedia_quick_list\n",
364
+ "get_neuronpedia_quick_list(torch.topk(sv_feature_acts, 3).indices.tolist(), layer = layer, model = \"gemma-2b\", dataset=\"res-jb\")"
365
+ ]
366
+ },
367
+ {
368
+ "cell_type": "markdown",
369
+ "metadata": {
370
+ "id": "7hy8RbbyTb8n"
371
+ },
372
+ "source": [
373
+ "As we can see from our print out of tokens, the prompt is made of three tokens in total - \"<endoftext>\", \"J\", and \"edi\".\n",
374
+ "\n",
375
+ "Our feature activation indexes at sv_feature_acts[2] - for \"edi\" - are of most interest to us.\n",
376
+ "\n",
377
+ "Because we are using pretrained saes that have published feature maps, you can search on Neuronpedia for a feature of interest."
378
+ ]
379
+ },
380
+ {
381
+ "cell_type": "markdown",
382
+ "metadata": {
383
+ "id": "gFv4iBHFcOmE"
384
+ },
385
+ "source": [
386
+ "### Steps for Neuronpedia use\n",
387
+ "\n",
388
+ "Use the interface to search for a specific concept or item and determine which layer and at what index it is.\n",
389
+ "\n",
390
+ "1. Open the [Neuronpedia](https://www.neuronpedia.org/) homepage.\n",
391
+ "2. Using the \"Models\" dropdown, select your model. Here we are using GPT2-SM (GPT2-small).\n",
392
+ "3. The next page will have a search bar, which allows you to enter your index of interest. We're interested in the \"RES-JB\" SAE set, make sure to select it.\n",
393
+ "4. We found these indices in the previous step: [ 7650, 718, 22372]. Select them in the search to see the feature dashboard for each.\n",
394
+ "5. As we'll see, some of the indices may relate to features you don't care about.\n",
395
+ "\n",
396
+ "From using Neuronpedia, I have determined that my feature of interest is in layer 2, at index 7650: [here](https://www.neuronpedia.org/gpt2-small/2-res-jb/7650) is the feature."
397
+ ]
398
+ },
399
+ {
400
+ "cell_type": "markdown",
401
+ "metadata": {
402
+ "id": "KX0rXziniH9O"
403
+ },
404
+ "source": [
405
+ "### Note: 2nd Option - Starting with Neuronpedia\n",
406
+ "\n",
407
+ "Another option here is that you can start with Neuronpedia to identify features of interest. By using your prompt in the interface you can explore which features were involved and search across all the layers. This allows you to first determine your layer and index of interest in Neuronpedia before focusing them in your code. Start [here](https://www.neuronpedia.org/search) if you want to begin with search."
408
+ ]
409
+ },
410
+ {
411
+ "cell_type": "markdown",
412
+ "metadata": {
413
+ "id": "YACtNFzGcNua"
414
+ },
415
+ "source": [
416
+ "## Implement your steering vector and affect the output"
417
+ ]
418
+ },
419
+ {
420
+ "cell_type": "markdown",
421
+ "metadata": {
422
+ "id": "pO8hjg8j5bb-"
423
+ },
424
+ "source": [
425
+ "### Define values for your steering vector\n",
426
+ "To create our steering vector, we now need to get the decoder weights from our sparse autoencoder found at our index of interest.\n",
427
+ "\n",
428
+ "Then to use our steering vector, we want a prompt for text generation, as well as a scaling factor coefficent to apply with the steering vector\n",
429
+ "\n",
430
+ "We also set common sampling kwargs - temperature, top_p and freq_penalty"
431
+ ]
432
+ },
433
+ {
434
+ "cell_type": "code",
435
+ "execution_count": 46,
436
+ "metadata": {
437
+ "id": "rgYEWGV0t0L2"
438
+ },
439
+ "outputs": [],
440
+ "source": [
441
+ "steering_vector = sae.W_dec[10200]\n",
442
+ "\n",
443
+ "example_prompt = \"What is the most iconic structure known to man?\"\n",
444
+ "coeff = 300\n",
445
+ "sampling_kwargs = dict(temperature=1.0, top_p=0.1, freq_penalty=1.0)"
446
+ ]
447
+ },
448
+ {
449
+ "cell_type": "markdown",
450
+ "metadata": {
451
+ "id": "cexaoBR65lIa"
452
+ },
453
+ "source": [
454
+ "### Set up hook functions\n",
455
+ "\n",
456
+ "Finally, we need to create a hook that allows us to apply the steering vector when our model runs generate() on our defined prompt. We have also added a boolean value 'steering_on' that allows us to easily toggle the steering vector on and off for each prompt\n"
457
+ ]
458
+ },
459
+ {
460
+ "cell_type": "code",
461
+ "execution_count": 47,
462
+ "metadata": {
463
+ "collapsed": true,
464
+ "id": "3kcVWeJoIAlC"
465
+ },
466
+ "outputs": [],
467
+ "source": [
468
+ "def steering_hook(resid_pre, hook):\n",
469
+ " if resid_pre.shape[1] == 1:\n",
470
+ " return\n",
471
+ "\n",
472
+ " position = sae_out.shape[1]\n",
473
+ " if steering_on:\n",
474
+ " # using our steering vector and applying the coefficient\n",
475
+ " resid_pre[:, :position - 1, :] += coeff * steering_vector\n",
476
+ "\n",
477
+ "\n",
478
+ "def hooked_generate(prompt_batch, fwd_hooks=[], seed=None, **kwargs):\n",
479
+ " if seed is not None:\n",
480
+ " torch.manual_seed(seed)\n",
481
+ "\n",
482
+ " with model.hooks(fwd_hooks=fwd_hooks):\n",
483
+ " tokenized = model.to_tokens(prompt_batch)\n",
484
+ " result = model.generate(\n",
485
+ " stop_at_eos=False, # avoids a bug on MPS\n",
486
+ " input=tokenized,\n",
487
+ " max_new_tokens=50,\n",
488
+ " do_sample=True,\n",
489
+ " **kwargs)\n",
490
+ " return result\n"
491
+ ]
492
+ },
493
+ {
494
+ "cell_type": "code",
495
+ "execution_count": 48,
496
+ "metadata": {
497
+ "id": "VcuRkX0yA2WH"
498
+ },
499
+ "outputs": [],
500
+ "source": [
501
+ "def run_generate(example_prompt):\n",
502
+ " model.reset_hooks()\n",
503
+ " editing_hooks = [(f\"blocks.{layer}.hook_resid_post\", steering_hook)]\n",
504
+ " res = hooked_generate([example_prompt] * 3, editing_hooks, seed=None, **sampling_kwargs)\n",
505
+ "\n",
506
+ " # Print results, removing the ugly beginning of sequence token\n",
507
+ " res_str = model.to_string(res[:, 1:])\n",
508
+ " print((\"\\n\\n\" + \"-\" * 80 + \"\\n\\n\").join(res_str))"
509
+ ]
510
+ },
511
+ {
512
+ "cell_type": "markdown",
513
+ "metadata": {
514
+ "id": "XYx--hIn61VQ"
515
+ },
516
+ "source": [
517
+ "### Generate text influenced by steering vector\n",
518
+ "\n",
519
+ "You may want to experiment with the scaling factor coefficient value that you set and see how it affects the generated output."
520
+ ]
521
+ },
522
+ {
523
+ "cell_type": "code",
524
+ "execution_count": 49,
525
+ "metadata": {
526
+ "colab": {
527
+ "base_uri": "https://localhost:8080/",
528
+ "height": 337,
529
+ "referenced_widgets": [
530
+ "9f555c5ada38495eb4281cbb49169abe",
531
+ "79b59cbde9444bf892931d31afec7f2a",
532
+ "a157870318114d459a33d795850967ef",
533
+ "635162e10abc441797d4e5b74713bf44",
534
+ "720b4d010c364e3fbf72a53b267e8db9",
535
+ "d9c33fbfb3164cbbb7b9a4cd172d20ae",
536
+ "df53331cce124bd1ada5aa9e9a977015",
537
+ "229dad8e29f04c279c5603286e2c0643",
538
+ "83d947fc3338491ab4155b87c443884c",
539
+ "5e9700580d6b4ad0bfac34bf3b3919fc",
540
+ "a2c30462ef8d41fd9158f194a746d5a7"
541
+ ]
542
+ },
543
+ "id": "hN_YOzBE6lz8",
544
+ "outputId": "e263b8ff-86ce-439e-81e5-bbecb0d7e187"
545
+ },
546
+ "outputs": [
547
+ {
548
+ "data": {
549
+ "application/vnd.jupyter.widget-view+json": {
550
+ "model_id": "634ddfad68cb49208e63733402859842",
551
+ "version_major": 2,
552
+ "version_minor": 0
553
+ },
554
+ "text/plain": [
555
+ " 0%| | 0/50 [00:00<?, ?it/s]"
556
+ ]
557
+ },
558
+ "metadata": {},
559
+ "output_type": "display_data"
560
+ },
561
+ {
562
+ "name": "stdout",
563
+ "output_type": "stream",
564
+ "text": [
565
+ "What is the most iconic structure known to man? The Golden Gate Bridge. The Golden Gate Bridge is a cable- suspension bridge that connects San Francisco and the Presidio of San Francisco, connecting the northern part of San Francisco with the northern part of Marin County. It’s one of the most famous\n",
566
+ "\n",
567
+ "--------------------------------------------------------------------------------\n",
568
+ "\n",
569
+ "What is the most iconic structure known to man? The Golden Gate Bridge. The Golden Gate Bridge is a cable- suspension bridge that connects San Francisco and the northern part of San Francisco’s famous “Golden” Coast. It’s one of the most iconic landmarks in the whole world, and it\n",
570
+ "\n",
571
+ "--------------------------------------------------------------------------------\n",
572
+ "\n",
573
+ "What is the most iconic structure known to man? The Golden Gate Bridge. The Golden Gate Bridge is a cable- suspension bridge that connects San Francisco and the Presidio of San Francisco, connecting the northern part of San Francisco with the city’s northernmost part. It’s one of the most\n"
574
+ ]
575
+ }
576
+ ],
577
+ "source": [
578
+ "steering_on = True\n",
579
+ "run_generate(example_prompt)"
580
+ ]
581
+ },
582
+ {
583
+ "cell_type": "markdown",
584
+ "metadata": {
585
+ "id": "ltZEm1VW7Tsr"
586
+ },
587
+ "source": [
588
+ "### Generate text with no steering"
589
+ ]
590
+ },
591
+ {
592
+ "cell_type": "code",
593
+ "execution_count": 50,
594
+ "metadata": {
595
+ "colab": {
596
+ "base_uri": "https://localhost:8080/",
597
+ "height": 337,
598
+ "referenced_widgets": [
599
+ "1a5dd5f7c9d340b6ab00ecaf43525ae9",
600
+ "8211cc6c973a43fcaf18e14f6d7f08a2",
601
+ "3d3584d1feec459287ffa24c4ef790c3",
602
+ "5f03835168e64ec588c50ee21fedd198",
603
+ "b833db18729f422cb86deed4be6f1900",
604
+ "66d406d6eb1f49699ee09c9a2fd4ffa9",
605
+ "38341454dd6b4e9ca2fe5b85d2e371e1",
606
+ "a30c82833f55441995744300c2ef538d",
607
+ "4932983d4f1a4199b3d24c730c765a24",
608
+ "c20e9e14100d45f3bdff1b6df943940f",
609
+ "5c53f97287d54c03a378fc44ab791cd7"
610
+ ]
611
+ },
612
+ "collapsed": true,
613
+ "id": "nA9cs1BY7XhS",
614
+ "outputId": "22a03d47-1afb-4217-d77a-979c94392f2a"
615
+ },
616
+ "outputs": [
617
+ {
618
+ "data": {
619
+ "application/vnd.jupyter.widget-view+json": {
620
+ "model_id": "f581d3a3b4d44a5e92dae704116e4445",
621
+ "version_major": 2,
622
+ "version_minor": 0
623
+ },
624
+ "text/plain": [
625
+ " 0%| | 0/50 [00:00<?, ?it/s]"
626
+ ]
627
+ },
628
+ "metadata": {},
629
+ "output_type": "display_data"
630
+ },
631
+ {
632
+ "name": "stdout",
633
+ "output_type": "stream",
634
+ "text": [
635
+ "What is the most iconic structure known to man? The Eiffel Tower, of course!\n",
636
+ "\n",
637
+ "The Eiffel Tower is a symbol of Paris and France. It was built in 1889 for the World’s Fair and has been a popular tourist attraction ever since.\n",
638
+ "\n",
639
+ "The tower stands at \n",
640
+ "\n",
641
+ "--------------------------------------------------------------------------------\n",
642
+ "\n",
643
+ "What is the most iconic structure known to man? The Eiffel Tower, of course!\n",
644
+ "\n",
645
+ "The Eiffel Tower is a wrought iron lattice tower located in Paris, France. It was built for the 1889 World’s Fair and has become one of the most recognizable symbols of Paris.\n",
646
+ "\n",
647
+ "\n",
648
+ "\n",
649
+ "--------------------------------------------------------------------------------\n",
650
+ "\n",
651
+ "What is the most iconic structure known to man? The Eiffel Tower, of course!\n",
652
+ "\n",
653
+ "The Eiffel Tower is a symbol of Paris and France. It was built in 1889 for the World’s Fair and has been a popular tourist attraction ever since.\n",
654
+ "\n",
655
+ "The tower stands at \n"
656
+ ]
657
+ }
658
+ ],
659
+ "source": [
660
+ "steering_on = False\n",
661
+ "run_generate(example_prompt)"
662
+ ]
663
+ },
664
+ {
665
+ "cell_type": "markdown",
666
+ "metadata": {
667
+ "id": "Q_duIXtnAcj9"
668
+ },
669
+ "source": [
670
+ "### General Question test\n",
671
+ "We'll also attempt a more general prompt which is a better indication of whether our steering vector is having an effect or not"
672
+ ]
673
+ },
674
+ {
675
+ "cell_type": "code",
676
+ "execution_count": null,
677
+ "metadata": {
678
+ "id": "UmqQEAM3Ab0i"
679
+ },
680
+ "outputs": [],
681
+ "source": [
682
+ "question_prompt = \"What is on your mind?\"\n",
683
+ "coeff = 100\n",
684
+ "sampling_kwargs = dict(temperature=1.0, top_p=0.1, freq_penalty=1.0)"
685
+ ]
686
+ },
687
+ {
688
+ "cell_type": "code",
689
+ "execution_count": null,
690
+ "metadata": {
691
+ "colab": {
692
+ "base_uri": "https://localhost:8080/",
693
+ "height": 337,
694
+ "referenced_widgets": [
695
+ "106650a69f4c4bd0a340d58c4bd4f1bb",
696
+ "06d77984a1a64d39938bfe68e114539b",
697
+ "6571f57262c447ce9177223fb583e707",
698
+ "a2179cafb63f475db0162cd990a17ff7",
699
+ "c0bb81765e93420796cd5f959e9d3534",
700
+ "fe6cae73e861414eaff54680113676bc",
701
+ "3f5f9cad86e24dd489146215c3a208c9",
702
+ "70006fb01d6a49fb909e4a3bfc5b940a",
703
+ "7980b120d41247548f49667cea6156a5",
704
+ "359ef2b8a4ac4a9c9a91edc4a2dd1326",
705
+ "c66dc6c14a4c4274900abe8fc993266a"
706
+ ]
707
+ },
708
+ "id": "HUanDPQeAss3",
709
+ "outputId": "ecb100a3-d855-4c3e-a758-bd7a3cfebd23"
710
+ },
711
+ "outputs": [],
712
+ "source": [
713
+ "steering_on = True\n",
714
+ "run_generate(question_prompt)"
715
+ ]
716
+ },
717
+ {
718
+ "cell_type": "code",
719
+ "execution_count": null,
720
+ "metadata": {
721
+ "colab": {
722
+ "base_uri": "https://localhost:8080/",
723
+ "height": 337,
724
+ "referenced_widgets": [
725
+ "a8bdc4ecce4f48e0ba6483ea9e679336",
726
+ "60604227dac34e37a0a9f3bfb3984317",
727
+ "4024c181581c485abd3181586afc2574",
728
+ "7761a50a602f41f1a21aa826c491eb9d",
729
+ "25ebd285de2e49c483c3b22b5c8364c0",
730
+ "3b74befc8d70471697ce6686ab4ac5c3",
731
+ "b2ff537e768b43ef98c412e633ab9e49",
732
+ "3fdf0c5e62f24f30b02bcdc37b17c2e7",
733
+ "07c0dd1a8de149408b981a8892f6e46d",
734
+ "b272384164504fa5b81d5502c12f8800",
735
+ "f525b9f19c334fe6b2305ad6bcfa20bf"
736
+ ]
737
+ },
738
+ "id": "W07bAiWqBlXh",
739
+ "outputId": "a6b074e6-8183-41ec-c390-2d6430eefdc7"
740
+ },
741
+ "outputs": [],
742
+ "source": [
743
+ "steering_on = False\n",
744
+ "run_generate(question_prompt)"
745
+ ]
746
+ },
747
+ {
748
+ "cell_type": "markdown",
749
+ "metadata": {
750
+ "id": "JVTbMgMzCLB9"
751
+ },
752
+ "source": [
753
+ "## Next Steps\n",
754
+ "\n",
755
+ "Ideas you could take for further exploration:\n",
756
+ "\n",
757
+ "* Try ablating the feature\n",
758
+ "* Try and get a response where just the feature token prints over and over\n",
759
+ "* Investigate other features with more complex usage\n",
760
+ "\n"
761
+ ]
762
+ }
763
+ ],
764
+ "metadata": {
765
+ "accelerator": "GPU",
766
+ "colab": {
767
+ "collapsed_sections": [
768
+ "fapxk8MDrs6R",
769
+ "CmaPYLpGrxbo"
770
+ ],
771
+ "gpuType": "T4",
772
+ "provenance": []
773
+ },
774
+ "kernelspec": {
775
+ "display_name": "Python 3",
776
+ "language": "python",
777
+ "name": "python3"
778
+ },
779
+ "language_info": {
780
+ "codemirror_mode": {
781
+ "name": "ipython",
782
+ "version": 3
783
+ },
784
+ "file_extension": ".py",
785
+ "mimetype": "text/x-python",
786
+ "name": "python",
787
+ "nbconvert_exporter": "python",
788
+ "pygments_lexer": "ipython3",
789
+ "version": "3.11.7"
790
+ },
791
+ "vscode": {
792
+ "interpreter": {
793
+ "hash": "088c6e4e32c1710b3b346fe2c9e3084abd3190c888871e6e5b66f23c765b3959"
794
+ }
795
+ },
796
+ "widgets": {
797
+ "application/vnd.jupyter.widget-state+json": {
798
+ "06d77984a1a64d39938bfe68e114539b": {
799
+ "model_module": "@jupyter-widgets/controls",
800
+ "model_module_version": "1.5.0",
801
+ "model_name": "HTMLModel",
802
+ "state": {
803
+ "_dom_classes": [],
804
+ "_model_module": "@jupyter-widgets/controls",
805
+ "_model_module_version": "1.5.0",
806
+ "_model_name": "HTMLModel",
807
+ "_view_count": null,
808
+ "_view_module": "@jupyter-widgets/controls",
809
+ "_view_module_version": "1.5.0",
810
+ "_view_name": "HTMLView",
811
+ "description": "",
812
+ "description_tooltip": null,
813
+ "layout": "IPY_MODEL_fe6cae73e861414eaff54680113676bc",
814
+ "placeholder": "​",
815
+ "style": "IPY_MODEL_3f5f9cad86e24dd489146215c3a208c9",
816
+ "value": "100%"
817
+ }
818
+ },
819
+ "07c0dd1a8de149408b981a8892f6e46d": {
820
+ "model_module": "@jupyter-widgets/controls",
821
+ "model_module_version": "1.5.0",
822
+ "model_name": "ProgressStyleModel",
823
+ "state": {
824
+ "_model_module": "@jupyter-widgets/controls",
825
+ "_model_module_version": "1.5.0",
826
+ "_model_name": "ProgressStyleModel",
827
+ "_view_count": null,
828
+ "_view_module": "@jupyter-widgets/base",
829
+ "_view_module_version": "1.2.0",
830
+ "_view_name": "StyleView",
831
+ "bar_color": null,
832
+ "description_width": ""
833
+ }
834
+ },
835
+ "106650a69f4c4bd0a340d58c4bd4f1bb": {
836
+ "model_module": "@jupyter-widgets/controls",
837
+ "model_module_version": "1.5.0",
838
+ "model_name": "HBoxModel",
839
+ "state": {
840
+ "_dom_classes": [],
841
+ "_model_module": "@jupyter-widgets/controls",
842
+ "_model_module_version": "1.5.0",
843
+ "_model_name": "HBoxModel",
844
+ "_view_count": null,
845
+ "_view_module": "@jupyter-widgets/controls",
846
+ "_view_module_version": "1.5.0",
847
+ "_view_name": "HBoxView",
848
+ "box_style": "",
849
+ "children": [
850
+ "IPY_MODEL_06d77984a1a64d39938bfe68e114539b",
851
+ "IPY_MODEL_6571f57262c447ce9177223fb583e707",
852
+ "IPY_MODEL_a2179cafb63f475db0162cd990a17ff7"
853
+ ],
854
+ "layout": "IPY_MODEL_c0bb81765e93420796cd5f959e9d3534"
855
+ }
856
+ },
857
+ "1a5dd5f7c9d340b6ab00ecaf43525ae9": {
858
+ "model_module": "@jupyter-widgets/controls",
859
+ "model_module_version": "1.5.0",
860
+ "model_name": "HBoxModel",
861
+ "state": {
862
+ "_dom_classes": [],
863
+ "_model_module": "@jupyter-widgets/controls",
864
+ "_model_module_version": "1.5.0",
865
+ "_model_name": "HBoxModel",
866
+ "_view_count": null,
867
+ "_view_module": "@jupyter-widgets/controls",
868
+ "_view_module_version": "1.5.0",
869
+ "_view_name": "HBoxView",
870
+ "box_style": "",
871
+ "children": [
872
+ "IPY_MODEL_8211cc6c973a43fcaf18e14f6d7f08a2",
873
+ "IPY_MODEL_3d3584d1feec459287ffa24c4ef790c3",
874
+ "IPY_MODEL_5f03835168e64ec588c50ee21fedd198"
875
+ ],
876
+ "layout": "IPY_MODEL_b833db18729f422cb86deed4be6f1900"
877
+ }
878
+ },
879
+ "229dad8e29f04c279c5603286e2c0643": {
880
+ "model_module": "@jupyter-widgets/base",
881
+ "model_module_version": "1.2.0",
882
+ "model_name": "LayoutModel",
883
+ "state": {
884
+ "_model_module": "@jupyter-widgets/base",
885
+ "_model_module_version": "1.2.0",
886
+ "_model_name": "LayoutModel",
887
+ "_view_count": null,
888
+ "_view_module": "@jupyter-widgets/base",
889
+ "_view_module_version": "1.2.0",
890
+ "_view_name": "LayoutView",
891
+ "align_content": null,
892
+ "align_items": null,
893
+ "align_self": null,
894
+ "border": null,
895
+ "bottom": null,
896
+ "display": null,
897
+ "flex": null,
898
+ "flex_flow": null,
899
+ "grid_area": null,
900
+ "grid_auto_columns": null,
901
+ "grid_auto_flow": null,
902
+ "grid_auto_rows": null,
903
+ "grid_column": null,
904
+ "grid_gap": null,
905
+ "grid_row": null,
906
+ "grid_template_areas": null,
907
+ "grid_template_columns": null,
908
+ "grid_template_rows": null,
909
+ "height": null,
910
+ "justify_content": null,
911
+ "justify_items": null,
912
+ "left": null,
913
+ "margin": null,
914
+ "max_height": null,
915
+ "max_width": null,
916
+ "min_height": null,
917
+ "min_width": null,
918
+ "object_fit": null,
919
+ "object_position": null,
920
+ "order": null,
921
+ "overflow": null,
922
+ "overflow_x": null,
923
+ "overflow_y": null,
924
+ "padding": null,
925
+ "right": null,
926
+ "top": null,
927
+ "visibility": null,
928
+ "width": null
929
+ }
930
+ },
931
+ "25ebd285de2e49c483c3b22b5c8364c0": {
932
+ "model_module": "@jupyter-widgets/base",
933
+ "model_module_version": "1.2.0",
934
+ "model_name": "LayoutModel",
935
+ "state": {
936
+ "_model_module": "@jupyter-widgets/base",
937
+ "_model_module_version": "1.2.0",
938
+ "_model_name": "LayoutModel",
939
+ "_view_count": null,
940
+ "_view_module": "@jupyter-widgets/base",
941
+ "_view_module_version": "1.2.0",
942
+ "_view_name": "LayoutView",
943
+ "align_content": null,
944
+ "align_items": null,
945
+ "align_self": null,
946
+ "border": null,
947
+ "bottom": null,
948
+ "display": null,
949
+ "flex": null,
950
+ "flex_flow": null,
951
+ "grid_area": null,
952
+ "grid_auto_columns": null,
953
+ "grid_auto_flow": null,
954
+ "grid_auto_rows": null,
955
+ "grid_column": null,
956
+ "grid_gap": null,
957
+ "grid_row": null,
958
+ "grid_template_areas": null,
959
+ "grid_template_columns": null,
960
+ "grid_template_rows": null,
961
+ "height": null,
962
+ "justify_content": null,
963
+ "justify_items": null,
964
+ "left": null,
965
+ "margin": null,
966
+ "max_height": null,
967
+ "max_width": null,
968
+ "min_height": null,
969
+ "min_width": null,
970
+ "object_fit": null,
971
+ "object_position": null,
972
+ "order": null,
973
+ "overflow": null,
974
+ "overflow_x": null,
975
+ "overflow_y": null,
976
+ "padding": null,
977
+ "right": null,
978
+ "top": null,
979
+ "visibility": null,
980
+ "width": null
981
+ }
982
+ },
983
+ "359ef2b8a4ac4a9c9a91edc4a2dd1326": {
984
+ "model_module": "@jupyter-widgets/base",
985
+ "model_module_version": "1.2.0",
986
+ "model_name": "LayoutModel",
987
+ "state": {
988
+ "_model_module": "@jupyter-widgets/base",
989
+ "_model_module_version": "1.2.0",
990
+ "_model_name": "LayoutModel",
991
+ "_view_count": null,
992
+ "_view_module": "@jupyter-widgets/base",
993
+ "_view_module_version": "1.2.0",
994
+ "_view_name": "LayoutView",
995
+ "align_content": null,
996
+ "align_items": null,
997
+ "align_self": null,
998
+ "border": null,
999
+ "bottom": null,
1000
+ "display": null,
1001
+ "flex": null,
1002
+ "flex_flow": null,
1003
+ "grid_area": null,
1004
+ "grid_auto_columns": null,
1005
+ "grid_auto_flow": null,
1006
+ "grid_auto_rows": null,
1007
+ "grid_column": null,
1008
+ "grid_gap": null,
1009
+ "grid_row": null,
1010
+ "grid_template_areas": null,
1011
+ "grid_template_columns": null,
1012
+ "grid_template_rows": null,
1013
+ "height": null,
1014
+ "justify_content": null,
1015
+ "justify_items": null,
1016
+ "left": null,
1017
+ "margin": null,
1018
+ "max_height": null,
1019
+ "max_width": null,
1020
+ "min_height": null,
1021
+ "min_width": null,
1022
+ "object_fit": null,
1023
+ "object_position": null,
1024
+ "order": null,
1025
+ "overflow": null,
1026
+ "overflow_x": null,
1027
+ "overflow_y": null,
1028
+ "padding": null,
1029
+ "right": null,
1030
+ "top": null,
1031
+ "visibility": null,
1032
+ "width": null
1033
+ }
1034
+ },
1035
+ "38341454dd6b4e9ca2fe5b85d2e371e1": {
1036
+ "model_module": "@jupyter-widgets/controls",
1037
+ "model_module_version": "1.5.0",
1038
+ "model_name": "DescriptionStyleModel",
1039
+ "state": {
1040
+ "_model_module": "@jupyter-widgets/controls",
1041
+ "_model_module_version": "1.5.0",
1042
+ "_model_name": "DescriptionStyleModel",
1043
+ "_view_count": null,
1044
+ "_view_module": "@jupyter-widgets/base",
1045
+ "_view_module_version": "1.2.0",
1046
+ "_view_name": "StyleView",
1047
+ "description_width": ""
1048
+ }
1049
+ },
1050
+ "3b74befc8d70471697ce6686ab4ac5c3": {
1051
+ "model_module": "@jupyter-widgets/base",
1052
+ "model_module_version": "1.2.0",
1053
+ "model_name": "LayoutModel",
1054
+ "state": {
1055
+ "_model_module": "@jupyter-widgets/base",
1056
+ "_model_module_version": "1.2.0",
1057
+ "_model_name": "LayoutModel",
1058
+ "_view_count": null,
1059
+ "_view_module": "@jupyter-widgets/base",
1060
+ "_view_module_version": "1.2.0",
1061
+ "_view_name": "LayoutView",
1062
+ "align_content": null,
1063
+ "align_items": null,
1064
+ "align_self": null,
1065
+ "border": null,
1066
+ "bottom": null,
1067
+ "display": null,
1068
+ "flex": null,
1069
+ "flex_flow": null,
1070
+ "grid_area": null,
1071
+ "grid_auto_columns": null,
1072
+ "grid_auto_flow": null,
1073
+ "grid_auto_rows": null,
1074
+ "grid_column": null,
1075
+ "grid_gap": null,
1076
+ "grid_row": null,
1077
+ "grid_template_areas": null,
1078
+ "grid_template_columns": null,
1079
+ "grid_template_rows": null,
1080
+ "height": null,
1081
+ "justify_content": null,
1082
+ "justify_items": null,
1083
+ "left": null,
1084
+ "margin": null,
1085
+ "max_height": null,
1086
+ "max_width": null,
1087
+ "min_height": null,
1088
+ "min_width": null,
1089
+ "object_fit": null,
1090
+ "object_position": null,
1091
+ "order": null,
1092
+ "overflow": null,
1093
+ "overflow_x": null,
1094
+ "overflow_y": null,
1095
+ "padding": null,
1096
+ "right": null,
1097
+ "top": null,
1098
+ "visibility": null,
1099
+ "width": null
1100
+ }
1101
+ },
1102
+ "3d3584d1feec459287ffa24c4ef790c3": {
1103
+ "model_module": "@jupyter-widgets/controls",
1104
+ "model_module_version": "1.5.0",
1105
+ "model_name": "FloatProgressModel",
1106
+ "state": {
1107
+ "_dom_classes": [],
1108
+ "_model_module": "@jupyter-widgets/controls",
1109
+ "_model_module_version": "1.5.0",
1110
+ "_model_name": "FloatProgressModel",
1111
+ "_view_count": null,
1112
+ "_view_module": "@jupyter-widgets/controls",
1113
+ "_view_module_version": "1.5.0",
1114
+ "_view_name": "ProgressView",
1115
+ "bar_style": "success",
1116
+ "description": "",
1117
+ "description_tooltip": null,
1118
+ "layout": "IPY_MODEL_a30c82833f55441995744300c2ef538d",
1119
+ "max": 50,
1120
+ "min": 0,
1121
+ "orientation": "horizontal",
1122
+ "style": "IPY_MODEL_4932983d4f1a4199b3d24c730c765a24",
1123
+ "value": 50
1124
+ }
1125
+ },
1126
+ "3f5f9cad86e24dd489146215c3a208c9": {
1127
+ "model_module": "@jupyter-widgets/controls",
1128
+ "model_module_version": "1.5.0",
1129
+ "model_name": "DescriptionStyleModel",
1130
+ "state": {
1131
+ "_model_module": "@jupyter-widgets/controls",
1132
+ "_model_module_version": "1.5.0",
1133
+ "_model_name": "DescriptionStyleModel",
1134
+ "_view_count": null,
1135
+ "_view_module": "@jupyter-widgets/base",
1136
+ "_view_module_version": "1.2.0",
1137
+ "_view_name": "StyleView",
1138
+ "description_width": ""
1139
+ }
1140
+ },
1141
+ "3fdf0c5e62f24f30b02bcdc37b17c2e7": {
1142
+ "model_module": "@jupyter-widgets/base",
1143
+ "model_module_version": "1.2.0",
1144
+ "model_name": "LayoutModel",
1145
+ "state": {
1146
+ "_model_module": "@jupyter-widgets/base",
1147
+ "_model_module_version": "1.2.0",
1148
+ "_model_name": "LayoutModel",
1149
+ "_view_count": null,
1150
+ "_view_module": "@jupyter-widgets/base",
1151
+ "_view_module_version": "1.2.0",
1152
+ "_view_name": "LayoutView",
1153
+ "align_content": null,
1154
+ "align_items": null,
1155
+ "align_self": null,
1156
+ "border": null,
1157
+ "bottom": null,
1158
+ "display": null,
1159
+ "flex": null,
1160
+ "flex_flow": null,
1161
+ "grid_area": null,
1162
+ "grid_auto_columns": null,
1163
+ "grid_auto_flow": null,
1164
+ "grid_auto_rows": null,
1165
+ "grid_column": null,
1166
+ "grid_gap": null,
1167
+ "grid_row": null,
1168
+ "grid_template_areas": null,
1169
+ "grid_template_columns": null,
1170
+ "grid_template_rows": null,
1171
+ "height": null,
1172
+ "justify_content": null,
1173
+ "justify_items": null,
1174
+ "left": null,
1175
+ "margin": null,
1176
+ "max_height": null,
1177
+ "max_width": null,
1178
+ "min_height": null,
1179
+ "min_width": null,
1180
+ "object_fit": null,
1181
+ "object_position": null,
1182
+ "order": null,
1183
+ "overflow": null,
1184
+ "overflow_x": null,
1185
+ "overflow_y": null,
1186
+ "padding": null,
1187
+ "right": null,
1188
+ "top": null,
1189
+ "visibility": null,
1190
+ "width": null
1191
+ }
1192
+ },
1193
+ "4024c181581c485abd3181586afc2574": {
1194
+ "model_module": "@jupyter-widgets/controls",
1195
+ "model_module_version": "1.5.0",
1196
+ "model_name": "FloatProgressModel",
1197
+ "state": {
1198
+ "_dom_classes": [],
1199
+ "_model_module": "@jupyter-widgets/controls",
1200
+ "_model_module_version": "1.5.0",
1201
+ "_model_name": "FloatProgressModel",
1202
+ "_view_count": null,
1203
+ "_view_module": "@jupyter-widgets/controls",
1204
+ "_view_module_version": "1.5.0",
1205
+ "_view_name": "ProgressView",
1206
+ "bar_style": "success",
1207
+ "description": "",
1208
+ "description_tooltip": null,
1209
+ "layout": "IPY_MODEL_3fdf0c5e62f24f30b02bcdc37b17c2e7",
1210
+ "max": 50,
1211
+ "min": 0,
1212
+ "orientation": "horizontal",
1213
+ "style": "IPY_MODEL_07c0dd1a8de149408b981a8892f6e46d",
1214
+ "value": 50
1215
+ }
1216
+ },
1217
+ "4932983d4f1a4199b3d24c730c765a24": {
1218
+ "model_module": "@jupyter-widgets/controls",
1219
+ "model_module_version": "1.5.0",
1220
+ "model_name": "ProgressStyleModel",
1221
+ "state": {
1222
+ "_model_module": "@jupyter-widgets/controls",
1223
+ "_model_module_version": "1.5.0",
1224
+ "_model_name": "ProgressStyleModel",
1225
+ "_view_count": null,
1226
+ "_view_module": "@jupyter-widgets/base",
1227
+ "_view_module_version": "1.2.0",
1228
+ "_view_name": "StyleView",
1229
+ "bar_color": null,
1230
+ "description_width": ""
1231
+ }
1232
+ },
1233
+ "5c53f97287d54c03a378fc44ab791cd7": {
1234
+ "model_module": "@jupyter-widgets/controls",
1235
+ "model_module_version": "1.5.0",
1236
+ "model_name": "DescriptionStyleModel",
1237
+ "state": {
1238
+ "_model_module": "@jupyter-widgets/controls",
1239
+ "_model_module_version": "1.5.0",
1240
+ "_model_name": "DescriptionStyleModel",
1241
+ "_view_count": null,
1242
+ "_view_module": "@jupyter-widgets/base",
1243
+ "_view_module_version": "1.2.0",
1244
+ "_view_name": "StyleView",
1245
+ "description_width": ""
1246
+ }
1247
+ },
1248
+ "5e9700580d6b4ad0bfac34bf3b3919fc": {
1249
+ "model_module": "@jupyter-widgets/base",
1250
+ "model_module_version": "1.2.0",
1251
+ "model_name": "LayoutModel",
1252
+ "state": {
1253
+ "_model_module": "@jupyter-widgets/base",
1254
+ "_model_module_version": "1.2.0",
1255
+ "_model_name": "LayoutModel",
1256
+ "_view_count": null,
1257
+ "_view_module": "@jupyter-widgets/base",
1258
+ "_view_module_version": "1.2.0",
1259
+ "_view_name": "LayoutView",
1260
+ "align_content": null,
1261
+ "align_items": null,
1262
+ "align_self": null,
1263
+ "border": null,
1264
+ "bottom": null,
1265
+ "display": null,
1266
+ "flex": null,
1267
+ "flex_flow": null,
1268
+ "grid_area": null,
1269
+ "grid_auto_columns": null,
1270
+ "grid_auto_flow": null,
1271
+ "grid_auto_rows": null,
1272
+ "grid_column": null,
1273
+ "grid_gap": null,
1274
+ "grid_row": null,
1275
+ "grid_template_areas": null,
1276
+ "grid_template_columns": null,
1277
+ "grid_template_rows": null,
1278
+ "height": null,
1279
+ "justify_content": null,
1280
+ "justify_items": null,
1281
+ "left": null,
1282
+ "margin": null,
1283
+ "max_height": null,
1284
+ "max_width": null,
1285
+ "min_height": null,
1286
+ "min_width": null,
1287
+ "object_fit": null,
1288
+ "object_position": null,
1289
+ "order": null,
1290
+ "overflow": null,
1291
+ "overflow_x": null,
1292
+ "overflow_y": null,
1293
+ "padding": null,
1294
+ "right": null,
1295
+ "top": null,
1296
+ "visibility": null,
1297
+ "width": null
1298
+ }
1299
+ },
1300
+ "5f03835168e64ec588c50ee21fedd198": {
1301
+ "model_module": "@jupyter-widgets/controls",
1302
+ "model_module_version": "1.5.0",
1303
+ "model_name": "HTMLModel",
1304
+ "state": {
1305
+ "_dom_classes": [],
1306
+ "_model_module": "@jupyter-widgets/controls",
1307
+ "_model_module_version": "1.5.0",
1308
+ "_model_name": "HTMLModel",
1309
+ "_view_count": null,
1310
+ "_view_module": "@jupyter-widgets/controls",
1311
+ "_view_module_version": "1.5.0",
1312
+ "_view_name": "HTMLView",
1313
+ "description": "",
1314
+ "description_tooltip": null,
1315
+ "layout": "IPY_MODEL_c20e9e14100d45f3bdff1b6df943940f",
1316
+ "placeholder": "​",
1317
+ "style": "IPY_MODEL_5c53f97287d54c03a378fc44ab791cd7",
1318
+ "value": " 50/50 [00:01&lt;00:00, 29.69it/s]"
1319
+ }
1320
+ },
1321
+ "60604227dac34e37a0a9f3bfb3984317": {
1322
+ "model_module": "@jupyter-widgets/controls",
1323
+ "model_module_version": "1.5.0",
1324
+ "model_name": "HTMLModel",
1325
+ "state": {
1326
+ "_dom_classes": [],
1327
+ "_model_module": "@jupyter-widgets/controls",
1328
+ "_model_module_version": "1.5.0",
1329
+ "_model_name": "HTMLModel",
1330
+ "_view_count": null,
1331
+ "_view_module": "@jupyter-widgets/controls",
1332
+ "_view_module_version": "1.5.0",
1333
+ "_view_name": "HTMLView",
1334
+ "description": "",
1335
+ "description_tooltip": null,
1336
+ "layout": "IPY_MODEL_3b74befc8d70471697ce6686ab4ac5c3",
1337
+ "placeholder": "​",
1338
+ "style": "IPY_MODEL_b2ff537e768b43ef98c412e633ab9e49",
1339
+ "value": "100%"
1340
+ }
1341
+ },
1342
+ "635162e10abc441797d4e5b74713bf44": {
1343
+ "model_module": "@jupyter-widgets/controls",
1344
+ "model_module_version": "1.5.0",
1345
+ "model_name": "HTMLModel",
1346
+ "state": {
1347
+ "_dom_classes": [],
1348
+ "_model_module": "@jupyter-widgets/controls",
1349
+ "_model_module_version": "1.5.0",
1350
+ "_model_name": "HTMLModel",
1351
+ "_view_count": null,
1352
+ "_view_module": "@jupyter-widgets/controls",
1353
+ "_view_module_version": "1.5.0",
1354
+ "_view_name": "HTMLView",
1355
+ "description": "",
1356
+ "description_tooltip": null,
1357
+ "layout": "IPY_MODEL_5e9700580d6b4ad0bfac34bf3b3919fc",
1358
+ "placeholder": "​",
1359
+ "style": "IPY_MODEL_a2c30462ef8d41fd9158f194a746d5a7",
1360
+ "value": " 50/50 [00:02&lt;00:00, 29.94it/s]"
1361
+ }
1362
+ },
1363
+ "6571f57262c447ce9177223fb583e707": {
1364
+ "model_module": "@jupyter-widgets/controls",
1365
+ "model_module_version": "1.5.0",
1366
+ "model_name": "FloatProgressModel",
1367
+ "state": {
1368
+ "_dom_classes": [],
1369
+ "_model_module": "@jupyter-widgets/controls",
1370
+ "_model_module_version": "1.5.0",
1371
+ "_model_name": "FloatProgressModel",
1372
+ "_view_count": null,
1373
+ "_view_module": "@jupyter-widgets/controls",
1374
+ "_view_module_version": "1.5.0",
1375
+ "_view_name": "ProgressView",
1376
+ "bar_style": "success",
1377
+ "description": "",
1378
+ "description_tooltip": null,
1379
+ "layout": "IPY_MODEL_70006fb01d6a49fb909e4a3bfc5b940a",
1380
+ "max": 50,
1381
+ "min": 0,
1382
+ "orientation": "horizontal",
1383
+ "style": "IPY_MODEL_7980b120d41247548f49667cea6156a5",
1384
+ "value": 50
1385
+ }
1386
+ },
1387
+ "66d406d6eb1f49699ee09c9a2fd4ffa9": {
1388
+ "model_module": "@jupyter-widgets/base",
1389
+ "model_module_version": "1.2.0",
1390
+ "model_name": "LayoutModel",
1391
+ "state": {
1392
+ "_model_module": "@jupyter-widgets/base",
1393
+ "_model_module_version": "1.2.0",
1394
+ "_model_name": "LayoutModel",
1395
+ "_view_count": null,
1396
+ "_view_module": "@jupyter-widgets/base",
1397
+ "_view_module_version": "1.2.0",
1398
+ "_view_name": "LayoutView",
1399
+ "align_content": null,
1400
+ "align_items": null,
1401
+ "align_self": null,
1402
+ "border": null,
1403
+ "bottom": null,
1404
+ "display": null,
1405
+ "flex": null,
1406
+ "flex_flow": null,
1407
+ "grid_area": null,
1408
+ "grid_auto_columns": null,
1409
+ "grid_auto_flow": null,
1410
+ "grid_auto_rows": null,
1411
+ "grid_column": null,
1412
+ "grid_gap": null,
1413
+ "grid_row": null,
1414
+ "grid_template_areas": null,
1415
+ "grid_template_columns": null,
1416
+ "grid_template_rows": null,
1417
+ "height": null,
1418
+ "justify_content": null,
1419
+ "justify_items": null,
1420
+ "left": null,
1421
+ "margin": null,
1422
+ "max_height": null,
1423
+ "max_width": null,
1424
+ "min_height": null,
1425
+ "min_width": null,
1426
+ "object_fit": null,
1427
+ "object_position": null,
1428
+ "order": null,
1429
+ "overflow": null,
1430
+ "overflow_x": null,
1431
+ "overflow_y": null,
1432
+ "padding": null,
1433
+ "right": null,
1434
+ "top": null,
1435
+ "visibility": null,
1436
+ "width": null
1437
+ }
1438
+ },
1439
+ "70006fb01d6a49fb909e4a3bfc5b940a": {
1440
+ "model_module": "@jupyter-widgets/base",
1441
+ "model_module_version": "1.2.0",
1442
+ "model_name": "LayoutModel",
1443
+ "state": {
1444
+ "_model_module": "@jupyter-widgets/base",
1445
+ "_model_module_version": "1.2.0",
1446
+ "_model_name": "LayoutModel",
1447
+ "_view_count": null,
1448
+ "_view_module": "@jupyter-widgets/base",
1449
+ "_view_module_version": "1.2.0",
1450
+ "_view_name": "LayoutView",
1451
+ "align_content": null,
1452
+ "align_items": null,
1453
+ "align_self": null,
1454
+ "border": null,
1455
+ "bottom": null,
1456
+ "display": null,
1457
+ "flex": null,
1458
+ "flex_flow": null,
1459
+ "grid_area": null,
1460
+ "grid_auto_columns": null,
1461
+ "grid_auto_flow": null,
1462
+ "grid_auto_rows": null,
1463
+ "grid_column": null,
1464
+ "grid_gap": null,
1465
+ "grid_row": null,
1466
+ "grid_template_areas": null,
1467
+ "grid_template_columns": null,
1468
+ "grid_template_rows": null,
1469
+ "height": null,
1470
+ "justify_content": null,
1471
+ "justify_items": null,
1472
+ "left": null,
1473
+ "margin": null,
1474
+ "max_height": null,
1475
+ "max_width": null,
1476
+ "min_height": null,
1477
+ "min_width": null,
1478
+ "object_fit": null,
1479
+ "object_position": null,
1480
+ "order": null,
1481
+ "overflow": null,
1482
+ "overflow_x": null,
1483
+ "overflow_y": null,
1484
+ "padding": null,
1485
+ "right": null,
1486
+ "top": null,
1487
+ "visibility": null,
1488
+ "width": null
1489
+ }
1490
+ },
1491
+ "720b4d010c364e3fbf72a53b267e8db9": {
1492
+ "model_module": "@jupyter-widgets/base",
1493
+ "model_module_version": "1.2.0",
1494
+ "model_name": "LayoutModel",
1495
+ "state": {
1496
+ "_model_module": "@jupyter-widgets/base",
1497
+ "_model_module_version": "1.2.0",
1498
+ "_model_name": "LayoutModel",
1499
+ "_view_count": null,
1500
+ "_view_module": "@jupyter-widgets/base",
1501
+ "_view_module_version": "1.2.0",
1502
+ "_view_name": "LayoutView",
1503
+ "align_content": null,
1504
+ "align_items": null,
1505
+ "align_self": null,
1506
+ "border": null,
1507
+ "bottom": null,
1508
+ "display": null,
1509
+ "flex": null,
1510
+ "flex_flow": null,
1511
+ "grid_area": null,
1512
+ "grid_auto_columns": null,
1513
+ "grid_auto_flow": null,
1514
+ "grid_auto_rows": null,
1515
+ "grid_column": null,
1516
+ "grid_gap": null,
1517
+ "grid_row": null,
1518
+ "grid_template_areas": null,
1519
+ "grid_template_columns": null,
1520
+ "grid_template_rows": null,
1521
+ "height": null,
1522
+ "justify_content": null,
1523
+ "justify_items": null,
1524
+ "left": null,
1525
+ "margin": null,
1526
+ "max_height": null,
1527
+ "max_width": null,
1528
+ "min_height": null,
1529
+ "min_width": null,
1530
+ "object_fit": null,
1531
+ "object_position": null,
1532
+ "order": null,
1533
+ "overflow": null,
1534
+ "overflow_x": null,
1535
+ "overflow_y": null,
1536
+ "padding": null,
1537
+ "right": null,
1538
+ "top": null,
1539
+ "visibility": null,
1540
+ "width": null
1541
+ }
1542
+ },
1543
+ "7761a50a602f41f1a21aa826c491eb9d": {
1544
+ "model_module": "@jupyter-widgets/controls",
1545
+ "model_module_version": "1.5.0",
1546
+ "model_name": "HTMLModel",
1547
+ "state": {
1548
+ "_dom_classes": [],
1549
+ "_model_module": "@jupyter-widgets/controls",
1550
+ "_model_module_version": "1.5.0",
1551
+ "_model_name": "HTMLModel",
1552
+ "_view_count": null,
1553
+ "_view_module": "@jupyter-widgets/controls",
1554
+ "_view_module_version": "1.5.0",
1555
+ "_view_name": "HTMLView",
1556
+ "description": "",
1557
+ "description_tooltip": null,
1558
+ "layout": "IPY_MODEL_b272384164504fa5b81d5502c12f8800",
1559
+ "placeholder": "​",
1560
+ "style": "IPY_MODEL_f525b9f19c334fe6b2305ad6bcfa20bf",
1561
+ "value": " 50/50 [00:01&lt;00:00, 28.55it/s]"
1562
+ }
1563
+ },
1564
+ "7980b120d41247548f49667cea6156a5": {
1565
+ "model_module": "@jupyter-widgets/controls",
1566
+ "model_module_version": "1.5.0",
1567
+ "model_name": "ProgressStyleModel",
1568
+ "state": {
1569
+ "_model_module": "@jupyter-widgets/controls",
1570
+ "_model_module_version": "1.5.0",
1571
+ "_model_name": "ProgressStyleModel",
1572
+ "_view_count": null,
1573
+ "_view_module": "@jupyter-widgets/base",
1574
+ "_view_module_version": "1.2.0",
1575
+ "_view_name": "StyleView",
1576
+ "bar_color": null,
1577
+ "description_width": ""
1578
+ }
1579
+ },
1580
+ "79b59cbde9444bf892931d31afec7f2a": {
1581
+ "model_module": "@jupyter-widgets/controls",
1582
+ "model_module_version": "1.5.0",
1583
+ "model_name": "HTMLModel",
1584
+ "state": {
1585
+ "_dom_classes": [],
1586
+ "_model_module": "@jupyter-widgets/controls",
1587
+ "_model_module_version": "1.5.0",
1588
+ "_model_name": "HTMLModel",
1589
+ "_view_count": null,
1590
+ "_view_module": "@jupyter-widgets/controls",
1591
+ "_view_module_version": "1.5.0",
1592
+ "_view_name": "HTMLView",
1593
+ "description": "",
1594
+ "description_tooltip": null,
1595
+ "layout": "IPY_MODEL_d9c33fbfb3164cbbb7b9a4cd172d20ae",
1596
+ "placeholder": "​",
1597
+ "style": "IPY_MODEL_df53331cce124bd1ada5aa9e9a977015",
1598
+ "value": "100%"
1599
+ }
1600
+ },
1601
+ "8211cc6c973a43fcaf18e14f6d7f08a2": {
1602
+ "model_module": "@jupyter-widgets/controls",
1603
+ "model_module_version": "1.5.0",
1604
+ "model_name": "HTMLModel",
1605
+ "state": {
1606
+ "_dom_classes": [],
1607
+ "_model_module": "@jupyter-widgets/controls",
1608
+ "_model_module_version": "1.5.0",
1609
+ "_model_name": "HTMLModel",
1610
+ "_view_count": null,
1611
+ "_view_module": "@jupyter-widgets/controls",
1612
+ "_view_module_version": "1.5.0",
1613
+ "_view_name": "HTMLView",
1614
+ "description": "",
1615
+ "description_tooltip": null,
1616
+ "layout": "IPY_MODEL_66d406d6eb1f49699ee09c9a2fd4ffa9",
1617
+ "placeholder": "​",
1618
+ "style": "IPY_MODEL_38341454dd6b4e9ca2fe5b85d2e371e1",
1619
+ "value": "100%"
1620
+ }
1621
+ },
1622
+ "83d947fc3338491ab4155b87c443884c": {
1623
+ "model_module": "@jupyter-widgets/controls",
1624
+ "model_module_version": "1.5.0",
1625
+ "model_name": "ProgressStyleModel",
1626
+ "state": {
1627
+ "_model_module": "@jupyter-widgets/controls",
1628
+ "_model_module_version": "1.5.0",
1629
+ "_model_name": "ProgressStyleModel",
1630
+ "_view_count": null,
1631
+ "_view_module": "@jupyter-widgets/base",
1632
+ "_view_module_version": "1.2.0",
1633
+ "_view_name": "StyleView",
1634
+ "bar_color": null,
1635
+ "description_width": ""
1636
+ }
1637
+ },
1638
+ "9f555c5ada38495eb4281cbb49169abe": {
1639
+ "model_module": "@jupyter-widgets/controls",
1640
+ "model_module_version": "1.5.0",
1641
+ "model_name": "HBoxModel",
1642
+ "state": {
1643
+ "_dom_classes": [],
1644
+ "_model_module": "@jupyter-widgets/controls",
1645
+ "_model_module_version": "1.5.0",
1646
+ "_model_name": "HBoxModel",
1647
+ "_view_count": null,
1648
+ "_view_module": "@jupyter-widgets/controls",
1649
+ "_view_module_version": "1.5.0",
1650
+ "_view_name": "HBoxView",
1651
+ "box_style": "",
1652
+ "children": [
1653
+ "IPY_MODEL_79b59cbde9444bf892931d31afec7f2a",
1654
+ "IPY_MODEL_a157870318114d459a33d795850967ef",
1655
+ "IPY_MODEL_635162e10abc441797d4e5b74713bf44"
1656
+ ],
1657
+ "layout": "IPY_MODEL_720b4d010c364e3fbf72a53b267e8db9"
1658
+ }
1659
+ },
1660
+ "a157870318114d459a33d795850967ef": {
1661
+ "model_module": "@jupyter-widgets/controls",
1662
+ "model_module_version": "1.5.0",
1663
+ "model_name": "FloatProgressModel",
1664
+ "state": {
1665
+ "_dom_classes": [],
1666
+ "_model_module": "@jupyter-widgets/controls",
1667
+ "_model_module_version": "1.5.0",
1668
+ "_model_name": "FloatProgressModel",
1669
+ "_view_count": null,
1670
+ "_view_module": "@jupyter-widgets/controls",
1671
+ "_view_module_version": "1.5.0",
1672
+ "_view_name": "ProgressView",
1673
+ "bar_style": "success",
1674
+ "description": "",
1675
+ "description_tooltip": null,
1676
+ "layout": "IPY_MODEL_229dad8e29f04c279c5603286e2c0643",
1677
+ "max": 50,
1678
+ "min": 0,
1679
+ "orientation": "horizontal",
1680
+ "style": "IPY_MODEL_83d947fc3338491ab4155b87c443884c",
1681
+ "value": 50
1682
+ }
1683
+ },
1684
+ "a2179cafb63f475db0162cd990a17ff7": {
1685
+ "model_module": "@jupyter-widgets/controls",
1686
+ "model_module_version": "1.5.0",
1687
+ "model_name": "HTMLModel",
1688
+ "state": {
1689
+ "_dom_classes": [],
1690
+ "_model_module": "@jupyter-widgets/controls",
1691
+ "_model_module_version": "1.5.0",
1692
+ "_model_name": "HTMLModel",
1693
+ "_view_count": null,
1694
+ "_view_module": "@jupyter-widgets/controls",
1695
+ "_view_module_version": "1.5.0",
1696
+ "_view_name": "HTMLView",
1697
+ "description": "",
1698
+ "description_tooltip": null,
1699
+ "layout": "IPY_MODEL_359ef2b8a4ac4a9c9a91edc4a2dd1326",
1700
+ "placeholder": "​",
1701
+ "style": "IPY_MODEL_c66dc6c14a4c4274900abe8fc993266a",
1702
+ "value": " 50/50 [00:01&lt;00:00, 28.62it/s]"
1703
+ }
1704
+ },
1705
+ "a2c30462ef8d41fd9158f194a746d5a7": {
1706
+ "model_module": "@jupyter-widgets/controls",
1707
+ "model_module_version": "1.5.0",
1708
+ "model_name": "DescriptionStyleModel",
1709
+ "state": {
1710
+ "_model_module": "@jupyter-widgets/controls",
1711
+ "_model_module_version": "1.5.0",
1712
+ "_model_name": "DescriptionStyleModel",
1713
+ "_view_count": null,
1714
+ "_view_module": "@jupyter-widgets/base",
1715
+ "_view_module_version": "1.2.0",
1716
+ "_view_name": "StyleView",
1717
+ "description_width": ""
1718
+ }
1719
+ },
1720
+ "a30c82833f55441995744300c2ef538d": {
1721
+ "model_module": "@jupyter-widgets/base",
1722
+ "model_module_version": "1.2.0",
1723
+ "model_name": "LayoutModel",
1724
+ "state": {
1725
+ "_model_module": "@jupyter-widgets/base",
1726
+ "_model_module_version": "1.2.0",
1727
+ "_model_name": "LayoutModel",
1728
+ "_view_count": null,
1729
+ "_view_module": "@jupyter-widgets/base",
1730
+ "_view_module_version": "1.2.0",
1731
+ "_view_name": "LayoutView",
1732
+ "align_content": null,
1733
+ "align_items": null,
1734
+ "align_self": null,
1735
+ "border": null,
1736
+ "bottom": null,
1737
+ "display": null,
1738
+ "flex": null,
1739
+ "flex_flow": null,
1740
+ "grid_area": null,
1741
+ "grid_auto_columns": null,
1742
+ "grid_auto_flow": null,
1743
+ "grid_auto_rows": null,
1744
+ "grid_column": null,
1745
+ "grid_gap": null,
1746
+ "grid_row": null,
1747
+ "grid_template_areas": null,
1748
+ "grid_template_columns": null,
1749
+ "grid_template_rows": null,
1750
+ "height": null,
1751
+ "justify_content": null,
1752
+ "justify_items": null,
1753
+ "left": null,
1754
+ "margin": null,
1755
+ "max_height": null,
1756
+ "max_width": null,
1757
+ "min_height": null,
1758
+ "min_width": null,
1759
+ "object_fit": null,
1760
+ "object_position": null,
1761
+ "order": null,
1762
+ "overflow": null,
1763
+ "overflow_x": null,
1764
+ "overflow_y": null,
1765
+ "padding": null,
1766
+ "right": null,
1767
+ "top": null,
1768
+ "visibility": null,
1769
+ "width": null
1770
+ }
1771
+ },
1772
+ "a8bdc4ecce4f48e0ba6483ea9e679336": {
1773
+ "model_module": "@jupyter-widgets/controls",
1774
+ "model_module_version": "1.5.0",
1775
+ "model_name": "HBoxModel",
1776
+ "state": {
1777
+ "_dom_classes": [],
1778
+ "_model_module": "@jupyter-widgets/controls",
1779
+ "_model_module_version": "1.5.0",
1780
+ "_model_name": "HBoxModel",
1781
+ "_view_count": null,
1782
+ "_view_module": "@jupyter-widgets/controls",
1783
+ "_view_module_version": "1.5.0",
1784
+ "_view_name": "HBoxView",
1785
+ "box_style": "",
1786
+ "children": [
1787
+ "IPY_MODEL_60604227dac34e37a0a9f3bfb3984317",
1788
+ "IPY_MODEL_4024c181581c485abd3181586afc2574",
1789
+ "IPY_MODEL_7761a50a602f41f1a21aa826c491eb9d"
1790
+ ],
1791
+ "layout": "IPY_MODEL_25ebd285de2e49c483c3b22b5c8364c0"
1792
+ }
1793
+ },
1794
+ "b272384164504fa5b81d5502c12f8800": {
1795
+ "model_module": "@jupyter-widgets/base",
1796
+ "model_module_version": "1.2.0",
1797
+ "model_name": "LayoutModel",
1798
+ "state": {
1799
+ "_model_module": "@jupyter-widgets/base",
1800
+ "_model_module_version": "1.2.0",
1801
+ "_model_name": "LayoutModel",
1802
+ "_view_count": null,
1803
+ "_view_module": "@jupyter-widgets/base",
1804
+ "_view_module_version": "1.2.0",
1805
+ "_view_name": "LayoutView",
1806
+ "align_content": null,
1807
+ "align_items": null,
1808
+ "align_self": null,
1809
+ "border": null,
1810
+ "bottom": null,
1811
+ "display": null,
1812
+ "flex": null,
1813
+ "flex_flow": null,
1814
+ "grid_area": null,
1815
+ "grid_auto_columns": null,
1816
+ "grid_auto_flow": null,
1817
+ "grid_auto_rows": null,
1818
+ "grid_column": null,
1819
+ "grid_gap": null,
1820
+ "grid_row": null,
1821
+ "grid_template_areas": null,
1822
+ "grid_template_columns": null,
1823
+ "grid_template_rows": null,
1824
+ "height": null,
1825
+ "justify_content": null,
1826
+ "justify_items": null,
1827
+ "left": null,
1828
+ "margin": null,
1829
+ "max_height": null,
1830
+ "max_width": null,
1831
+ "min_height": null,
1832
+ "min_width": null,
1833
+ "object_fit": null,
1834
+ "object_position": null,
1835
+ "order": null,
1836
+ "overflow": null,
1837
+ "overflow_x": null,
1838
+ "overflow_y": null,
1839
+ "padding": null,
1840
+ "right": null,
1841
+ "top": null,
1842
+ "visibility": null,
1843
+ "width": null
1844
+ }
1845
+ },
1846
+ "b2ff537e768b43ef98c412e633ab9e49": {
1847
+ "model_module": "@jupyter-widgets/controls",
1848
+ "model_module_version": "1.5.0",
1849
+ "model_name": "DescriptionStyleModel",
1850
+ "state": {
1851
+ "_model_module": "@jupyter-widgets/controls",
1852
+ "_model_module_version": "1.5.0",
1853
+ "_model_name": "DescriptionStyleModel",
1854
+ "_view_count": null,
1855
+ "_view_module": "@jupyter-widgets/base",
1856
+ "_view_module_version": "1.2.0",
1857
+ "_view_name": "StyleView",
1858
+ "description_width": ""
1859
+ }
1860
+ },
1861
+ "b833db18729f422cb86deed4be6f1900": {
1862
+ "model_module": "@jupyter-widgets/base",
1863
+ "model_module_version": "1.2.0",
1864
+ "model_name": "LayoutModel",
1865
+ "state": {
1866
+ "_model_module": "@jupyter-widgets/base",
1867
+ "_model_module_version": "1.2.0",
1868
+ "_model_name": "LayoutModel",
1869
+ "_view_count": null,
1870
+ "_view_module": "@jupyter-widgets/base",
1871
+ "_view_module_version": "1.2.0",
1872
+ "_view_name": "LayoutView",
1873
+ "align_content": null,
1874
+ "align_items": null,
1875
+ "align_self": null,
1876
+ "border": null,
1877
+ "bottom": null,
1878
+ "display": null,
1879
+ "flex": null,
1880
+ "flex_flow": null,
1881
+ "grid_area": null,
1882
+ "grid_auto_columns": null,
1883
+ "grid_auto_flow": null,
1884
+ "grid_auto_rows": null,
1885
+ "grid_column": null,
1886
+ "grid_gap": null,
1887
+ "grid_row": null,
1888
+ "grid_template_areas": null,
1889
+ "grid_template_columns": null,
1890
+ "grid_template_rows": null,
1891
+ "height": null,
1892
+ "justify_content": null,
1893
+ "justify_items": null,
1894
+ "left": null,
1895
+ "margin": null,
1896
+ "max_height": null,
1897
+ "max_width": null,
1898
+ "min_height": null,
1899
+ "min_width": null,
1900
+ "object_fit": null,
1901
+ "object_position": null,
1902
+ "order": null,
1903
+ "overflow": null,
1904
+ "overflow_x": null,
1905
+ "overflow_y": null,
1906
+ "padding": null,
1907
+ "right": null,
1908
+ "top": null,
1909
+ "visibility": null,
1910
+ "width": null
1911
+ }
1912
+ },
1913
+ "c0bb81765e93420796cd5f959e9d3534": {
1914
+ "model_module": "@jupyter-widgets/base",
1915
+ "model_module_version": "1.2.0",
1916
+ "model_name": "LayoutModel",
1917
+ "state": {
1918
+ "_model_module": "@jupyter-widgets/base",
1919
+ "_model_module_version": "1.2.0",
1920
+ "_model_name": "LayoutModel",
1921
+ "_view_count": null,
1922
+ "_view_module": "@jupyter-widgets/base",
1923
+ "_view_module_version": "1.2.0",
1924
+ "_view_name": "LayoutView",
1925
+ "align_content": null,
1926
+ "align_items": null,
1927
+ "align_self": null,
1928
+ "border": null,
1929
+ "bottom": null,
1930
+ "display": null,
1931
+ "flex": null,
1932
+ "flex_flow": null,
1933
+ "grid_area": null,
1934
+ "grid_auto_columns": null,
1935
+ "grid_auto_flow": null,
1936
+ "grid_auto_rows": null,
1937
+ "grid_column": null,
1938
+ "grid_gap": null,
1939
+ "grid_row": null,
1940
+ "grid_template_areas": null,
1941
+ "grid_template_columns": null,
1942
+ "grid_template_rows": null,
1943
+ "height": null,
1944
+ "justify_content": null,
1945
+ "justify_items": null,
1946
+ "left": null,
1947
+ "margin": null,
1948
+ "max_height": null,
1949
+ "max_width": null,
1950
+ "min_height": null,
1951
+ "min_width": null,
1952
+ "object_fit": null,
1953
+ "object_position": null,
1954
+ "order": null,
1955
+ "overflow": null,
1956
+ "overflow_x": null,
1957
+ "overflow_y": null,
1958
+ "padding": null,
1959
+ "right": null,
1960
+ "top": null,
1961
+ "visibility": null,
1962
+ "width": null
1963
+ }
1964
+ },
1965
+ "c20e9e14100d45f3bdff1b6df943940f": {
1966
+ "model_module": "@jupyter-widgets/base",
1967
+ "model_module_version": "1.2.0",
1968
+ "model_name": "LayoutModel",
1969
+ "state": {
1970
+ "_model_module": "@jupyter-widgets/base",
1971
+ "_model_module_version": "1.2.0",
1972
+ "_model_name": "LayoutModel",
1973
+ "_view_count": null,
1974
+ "_view_module": "@jupyter-widgets/base",
1975
+ "_view_module_version": "1.2.0",
1976
+ "_view_name": "LayoutView",
1977
+ "align_content": null,
1978
+ "align_items": null,
1979
+ "align_self": null,
1980
+ "border": null,
1981
+ "bottom": null,
1982
+ "display": null,
1983
+ "flex": null,
1984
+ "flex_flow": null,
1985
+ "grid_area": null,
1986
+ "grid_auto_columns": null,
1987
+ "grid_auto_flow": null,
1988
+ "grid_auto_rows": null,
1989
+ "grid_column": null,
1990
+ "grid_gap": null,
1991
+ "grid_row": null,
1992
+ "grid_template_areas": null,
1993
+ "grid_template_columns": null,
1994
+ "grid_template_rows": null,
1995
+ "height": null,
1996
+ "justify_content": null,
1997
+ "justify_items": null,
1998
+ "left": null,
1999
+ "margin": null,
2000
+ "max_height": null,
2001
+ "max_width": null,
2002
+ "min_height": null,
2003
+ "min_width": null,
2004
+ "object_fit": null,
2005
+ "object_position": null,
2006
+ "order": null,
2007
+ "overflow": null,
2008
+ "overflow_x": null,
2009
+ "overflow_y": null,
2010
+ "padding": null,
2011
+ "right": null,
2012
+ "top": null,
2013
+ "visibility": null,
2014
+ "width": null
2015
+ }
2016
+ },
2017
+ "c66dc6c14a4c4274900abe8fc993266a": {
2018
+ "model_module": "@jupyter-widgets/controls",
2019
+ "model_module_version": "1.5.0",
2020
+ "model_name": "DescriptionStyleModel",
2021
+ "state": {
2022
+ "_model_module": "@jupyter-widgets/controls",
2023
+ "_model_module_version": "1.5.0",
2024
+ "_model_name": "DescriptionStyleModel",
2025
+ "_view_count": null,
2026
+ "_view_module": "@jupyter-widgets/base",
2027
+ "_view_module_version": "1.2.0",
2028
+ "_view_name": "StyleView",
2029
+ "description_width": ""
2030
+ }
2031
+ },
2032
+ "d9c33fbfb3164cbbb7b9a4cd172d20ae": {
2033
+ "model_module": "@jupyter-widgets/base",
2034
+ "model_module_version": "1.2.0",
2035
+ "model_name": "LayoutModel",
2036
+ "state": {
2037
+ "_model_module": "@jupyter-widgets/base",
2038
+ "_model_module_version": "1.2.0",
2039
+ "_model_name": "LayoutModel",
2040
+ "_view_count": null,
2041
+ "_view_module": "@jupyter-widgets/base",
2042
+ "_view_module_version": "1.2.0",
2043
+ "_view_name": "LayoutView",
2044
+ "align_content": null,
2045
+ "align_items": null,
2046
+ "align_self": null,
2047
+ "border": null,
2048
+ "bottom": null,
2049
+ "display": null,
2050
+ "flex": null,
2051
+ "flex_flow": null,
2052
+ "grid_area": null,
2053
+ "grid_auto_columns": null,
2054
+ "grid_auto_flow": null,
2055
+ "grid_auto_rows": null,
2056
+ "grid_column": null,
2057
+ "grid_gap": null,
2058
+ "grid_row": null,
2059
+ "grid_template_areas": null,
2060
+ "grid_template_columns": null,
2061
+ "grid_template_rows": null,
2062
+ "height": null,
2063
+ "justify_content": null,
2064
+ "justify_items": null,
2065
+ "left": null,
2066
+ "margin": null,
2067
+ "max_height": null,
2068
+ "max_width": null,
2069
+ "min_height": null,
2070
+ "min_width": null,
2071
+ "object_fit": null,
2072
+ "object_position": null,
2073
+ "order": null,
2074
+ "overflow": null,
2075
+ "overflow_x": null,
2076
+ "overflow_y": null,
2077
+ "padding": null,
2078
+ "right": null,
2079
+ "top": null,
2080
+ "visibility": null,
2081
+ "width": null
2082
+ }
2083
+ },
2084
+ "df53331cce124bd1ada5aa9e9a977015": {
2085
+ "model_module": "@jupyter-widgets/controls",
2086
+ "model_module_version": "1.5.0",
2087
+ "model_name": "DescriptionStyleModel",
2088
+ "state": {
2089
+ "_model_module": "@jupyter-widgets/controls",
2090
+ "_model_module_version": "1.5.0",
2091
+ "_model_name": "DescriptionStyleModel",
2092
+ "_view_count": null,
2093
+ "_view_module": "@jupyter-widgets/base",
2094
+ "_view_module_version": "1.2.0",
2095
+ "_view_name": "StyleView",
2096
+ "description_width": ""
2097
+ }
2098
+ },
2099
+ "f525b9f19c334fe6b2305ad6bcfa20bf": {
2100
+ "model_module": "@jupyter-widgets/controls",
2101
+ "model_module_version": "1.5.0",
2102
+ "model_name": "DescriptionStyleModel",
2103
+ "state": {
2104
+ "_model_module": "@jupyter-widgets/controls",
2105
+ "_model_module_version": "1.5.0",
2106
+ "_model_name": "DescriptionStyleModel",
2107
+ "_view_count": null,
2108
+ "_view_module": "@jupyter-widgets/base",
2109
+ "_view_module_version": "1.2.0",
2110
+ "_view_name": "StyleView",
2111
+ "description_width": ""
2112
+ }
2113
+ },
2114
+ "fe6cae73e861414eaff54680113676bc": {
2115
+ "model_module": "@jupyter-widgets/base",
2116
+ "model_module_version": "1.2.0",
2117
+ "model_name": "LayoutModel",
2118
+ "state": {
2119
+ "_model_module": "@jupyter-widgets/base",
2120
+ "_model_module_version": "1.2.0",
2121
+ "_model_name": "LayoutModel",
2122
+ "_view_count": null,
2123
+ "_view_module": "@jupyter-widgets/base",
2124
+ "_view_module_version": "1.2.0",
2125
+ "_view_name": "LayoutView",
2126
+ "align_content": null,
2127
+ "align_items": null,
2128
+ "align_self": null,
2129
+ "border": null,
2130
+ "bottom": null,
2131
+ "display": null,
2132
+ "flex": null,
2133
+ "flex_flow": null,
2134
+ "grid_area": null,
2135
+ "grid_auto_columns": null,
2136
+ "grid_auto_flow": null,
2137
+ "grid_auto_rows": null,
2138
+ "grid_column": null,
2139
+ "grid_gap": null,
2140
+ "grid_row": null,
2141
+ "grid_template_areas": null,
2142
+ "grid_template_columns": null,
2143
+ "grid_template_rows": null,
2144
+ "height": null,
2145
+ "justify_content": null,
2146
+ "justify_items": null,
2147
+ "left": null,
2148
+ "margin": null,
2149
+ "max_height": null,
2150
+ "max_width": null,
2151
+ "min_height": null,
2152
+ "min_width": null,
2153
+ "object_fit": null,
2154
+ "object_position": null,
2155
+ "order": null,
2156
+ "overflow": null,
2157
+ "overflow_x": null,
2158
+ "overflow_y": null,
2159
+ "padding": null,
2160
+ "right": null,
2161
+ "top": null,
2162
+ "visibility": null,
2163
+ "width": null
2164
+ }
2165
+ }
2166
+ }
2167
+ }
2168
+ },
2169
+ "nbformat": 4,
2170
+ "nbformat_minor": 0
2171
+ }