-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathget_model_activations_transformerlens.py
More file actions
288 lines (251 loc) · 10.3 KB
/
get_model_activations_transformerlens.py
File metadata and controls
288 lines (251 loc) · 10.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
import argparse
import os
import math
import zarr
import torch
from tqdm import tqdm
from datasets import load_dataset, Dataset
from transformers import AutoTokenizer
from transformer_lens import HookedTransformer
import itertools
import shutil
###############################################################################
# Helper: parse_layers
###############################################################################
def parse_layers(layers_str: str, max_layer: int = None):
"""
Parse a string that may contain comma-separated integers or dash-separated ranges
into a sorted list of unique layer indices.
Examples:
"2,4,5" -> [2, 4, 5]
"1-3" -> [1, 2, 3]
"1-3,5,7-8" -> [1, 2, 3, 5, 7, 8]
If max_layer is provided, any layer index exceeding max_layer is ignored.
Note that in Transformer Lens, layers are 0-based. If a user provides 1-3,
that means blocks 1,2,3. But ensure it doesn't exceed `model.cfg.n_layers - 1`.
"""
if not layers_str:
return []
parts = layers_str.split(",")
layer_set = set()
for p in parts:
p = p.strip()
if "-" in p:
start_str, end_str = p.split("-")
start_val, end_val = int(start_str), int(end_str)
if start_val > end_val:
start_val, end_val = end_val, start_val
for val in range(start_val, end_val + 1):
layer_set.add(val)
else:
layer_set.add(int(p))
if max_layer is not None:
layer_set = {l for l in layer_set if 0 <= l <= max_layer}
return sorted(layer_set)
###############################################################################
# get_activations_tl (main function)
###############################################################################
def get_activations_tl(
model_name: str,
dataset_name: str,
activations_path: str,
seq_len: int,
batch_size: int,
device: torch.device,
num_examples: int,
layers_str: str = None,
tokenizer_name: str = None,
):
"""
Collects 'resid_post' activations from a Transformer Lens HookedTransformer
for up to `num_examples` text items from `dataset_name`,
and stores them in a Zarr directory structure.
By default, we gather all layers [0..n_layers-1] unless `layers_str` is specified
(e.g. "0,2,4-6"). The final Zarr group has sub-datasets called "layer_{layer_idx}"
with shape (N, seq_len, hidden_dim).
"""
# -------------------------------------------------------------------------
# 1. Load the model and tokenizer
# -------------------------------------------------------------------------
print(f"Loading model {model_name} via Transformer Lens...")
model = HookedTransformer.from_pretrained(model_name, device=device)
model.eval()
n_layers = model.cfg.n_layers
hidden_dim = model.cfg.d_model
# If no custom tokenizer provided, use the same name as the model
if tokenizer_name is None:
tokenizer_name = model_name
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
if tokenizer.pad_token_id is None:
tokenizer.pad_token = tokenizer.eos_token
# -------------------------------------------------------------------------
# 2. Figure out which layers to collect
# -------------------------------------------------------------------------
if layers_str:
collect_layers = parse_layers(layers_str, max_layer=n_layers - 1)
if not collect_layers:
print("No valid layers after parsing; aborting.")
return
else:
# By default, collect all layers
collect_layers = list(range(n_layers))
print(f"Model has {n_layers} layers; hidden_dim={hidden_dim}")
print(f"Will collect resid_post for layers: {collect_layers}")
# -------------------------------------------------------------------------
# 3. Prepare dataset
# -------------------------------------------------------------------------
print(f"Loading dataset {dataset_name} for {num_examples} examples...")
ds_stream = load_dataset(dataset_name, split="train", streaming=True)
# Collect first `num_examples` from the streaming dataset
limited_samples = list(itertools.islice(ds_stream, num_examples))
if len(limited_samples) == 0:
print(f"No data available in dataset {dataset_name}!")
return
# Convert to an in-memory Dataset
ds_local = Dataset.from_list(limited_samples)
# Tokenize
def tokenize_fn(example):
return tokenizer(
example["text"],
max_length=seq_len,
truncation=True,
padding="max_length",
)
# Remove original text column
ds_local = ds_local.map(tokenize_fn, batched=True, remove_columns=["text"])
ds_local.set_format(type="torch", columns=["input_ids"])
if len(ds_local) == 0:
print(f"No data left after tokenization in dataset {dataset_name}!")
return
# -------------------------------------------------------------------------
# 4. Prepare Zarr directory
# -------------------------------------------------------------------------
save_dir = os.path.join(activations_path, model_name, dataset_name)
if os.path.exists(save_dir):
print(f"Activations already exist at {save_dir}, skipping.")
return
print(f"Creating directory {save_dir} to store Zarr arrays...")
created_dir = False
try:
os.makedirs(save_dir, exist_ok=False)
created_dir = True
store = zarr.DirectoryStore(save_dir)
zf = zarr.open_group(store=store, mode="w")
# Create one dataset per layer
dsets = {}
for layer_idx in collect_layers:
dset = zf.create_dataset(
f"layer_{layer_idx}",
shape=(0, seq_len, hidden_dim),
maxshape=(None, seq_len, hidden_dim),
chunks=(batch_size, seq_len, hidden_dim),
dtype="float32",
)
dsets[layer_idx] = dset
# ---------------------------------------------------------------------
# 5. Collect activations
# ---------------------------------------------------------------------
# We'll create a list of hook names to pass to run_with_cache
hook_names = [f"blocks.{ly}.hook_resid_post" for ly in collect_layers]
max_needed_layer = max(collect_layers)
print("Collecting activations (resid_post) via run_with_cache...")
num_processed = 0
total_batches = math.ceil(len(ds_local) / batch_size)
with torch.no_grad():
for start_idx in tqdm(range(0, len(ds_local), batch_size), total=total_batches):
end_idx = min(start_idx + batch_size, len(ds_local))
batch_tokens = ds_local[start_idx:end_idx]["input_ids"].to(device)
# forward pass
_, cache = model.run_with_cache(
batch_tokens,
stop_at_layer=max_needed_layer + 1, # ensure we go up to max layer
names_filter=hook_names,
)
# for each layer, retrieve its 'resid_post'
for ly in collect_layers:
acts = cache[f"blocks.{ly}.hook_resid_post"] # shape [B, S, d_model]
# append to the Zarr dataset
old_size = dsets[ly].shape[0]
new_size = old_size + acts.size(0)
dsets[ly].resize((new_size, seq_len, hidden_dim))
dsets[ly][old_size:new_size, :, :] = acts.cpu().numpy()
num_processed += batch_tokens.size(0)
print(f"Done! Saved activations for {num_processed} sequences to {save_dir}.")
except Exception as e:
# If something fails and we created the directory, remove it
if created_dir:
print(f"An error occurred, removing directory {save_dir} ...")
shutil.rmtree(save_dir, ignore_errors=True)
raise e
###############################################################################
# Main / CLI
###############################################################################
if __name__ == "__main__":
torch.backends.cudnn.allow_tf32 = True
torch.backends.cuda.matmul.allow_tf32 = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.set_grad_enabled(False)
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
type=str,
default="EleutherAI/pythia-70m-deduped",
help="Model name or local path recognized by Transformer Lens.",
)
parser.add_argument(
"--dataset",
type=str,
default="NeelNanda/pile-10k",
help="HuggingFace dataset name (split=train) for tokenization/activation collection.",
)
parser.add_argument(
"--batch_size",
type=int,
default=32,
help="Batch size for forward passes.",
)
parser.add_argument(
"--seq_len",
type=int,
default=128,
help="Maximum sequence length for tokenization.",
)
parser.add_argument(
"--activations_path",
type=str,
default="artifacts/data",
help="Where to store final Zarr outputs. The final path is <activations_path>/<model>/<dataset>.",
)
parser.add_argument(
"--num_examples",
type=int,
default=10_000,
help="How many total examples to read from the dataset (and load into memory).",
)
parser.add_argument(
"--layers",
type=str,
default=None,
help=(
"Comma-separated list or dash-separated ranges of 0-based layer indices "
"to collect. E.g. '0,2,4-6'. If not set, all layers are collected."
),
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Optional: override the default tokenizer. If None, use same name as --model.",
)
args = parser.parse_args()
get_activations_tl(
model_name=args.model,
dataset_name=args.dataset,
activations_path=args.activations_path,
seq_len=args.seq_len,
batch_size=args.batch_size,
device=device,
num_examples=args.num_examples,
layers_str=args.layers,
tokenizer_name=args.tokenizer_name,
)