-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathbenchmark.py
More file actions
178 lines (163 loc) · 5.6 KB
/
benchmark.py
File metadata and controls
178 lines (163 loc) · 5.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
import os
import argparse
import json
from tqdm import tqdm
import numpy as np
import torch
from torch.utils.data import DataLoader
from utils.dataset import TVSD_Dataset
from utils.hooks import Activations
from utils.load_model import load_model, resolve_transform
from utils.brainscore import compute_brain_score
def main(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
model, model_name, _ = load_model(args.model_config)
# layers = [name for name, module in model.named_modules() if 'relu' not in name]
tvsd_dataset = TVSD_Dataset(
root_dir=args.root_dir, monkey=args.monkey, region=args.region
)
layer_scores = {}
activation_dir = f"{args.output_dir}/activations/TVSD/{model_name}"
layers = os.listdir(activation_dir)
for layer in layers:
print(f"===== EVALUATING LAYER: {layer} =========")
layer_dir = f"{activation_dir}/{layer}"
activation_path = f"{layer_dir}/activations.pt"
if not os.path.exists(activation_path):
print(
f"Activations for layer {layer} not found at {activation_path}. Skipping."
)
continue
activations = torch.load(f"{layer_dir}/activations.pt", map_location=device)
if activations is not None:
print(f"Layer: {layer}, Activations shape: {activations.shape}")
else:
print(f"No activations found for layer: {layer}")
continue
activations = (
activations.reshape(activations.shape[0], -1).detach().cpu().numpy()
) # [B, H * W * C]
neural_responses, reliability = tvsd_dataset[: activations.shape[0]]
reliability_mask = reliability > args.reliability_threshold
neural_responses = neural_responses[:, reliability_mask] # [B, C']
if args.noise_test:
activations = np.random.normal(
size=activations.shape
) # Use random noise for null results
if args.permutation_test:
np.random.shuffle(neural_responses)
print(
f"{neural_responses.shape[1]} neural responses retained with reliability > {args.reliability_threshold}"
)
layer_score, layer_std = compute_brain_score(
X=activations,
Y=neural_responses,
n_splits=args.n_splits,
reducer=args.reducer,
correlation_fn=args.correlation_fn,
pca_components=args.pca_components,
preprocessed=args.preprocessed,
)
layer_scores[layer] = {"score": layer_score, "std": layer_std}
print(f"Score: {layer_score}, Std: {layer_std}")
print("Final Layer Scores:")
for layer, scores in layer_scores.items():
print(f"{layer}: Score = {scores['score']}, Std = {scores['std']}")
results_file = (
f"{args.output_dir}/results/{model_name}/{args.monkey}_arr_{args.region}.csv"
)
os.makedirs(os.path.dirname(results_file), exist_ok=True)
with open(results_file, "w") as f:
f.write("Layer,Score,Std\n")
for layer, scores in layer_scores.items():
f.write(f"{layer},{scores['score']},{scores['std']}\n")
print(f"Results saved to {results_file}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="TVSD alignment pipeline.")
parser.add_argument(
"--model_config",
type=str,
required=True,
help="Path to the model configuration file.",
)
parser.add_argument(
"--root_dir",
type=str,
default=f"{os.getcwd()}/data/TVSD",
help="Root directory of the TVSD dataset.",
)
parser.add_argument(
"--monkey",
type=str,
default="monkeyF",
help="Monkey name to use in the dataset.",
)
parser.add_argument(
"--region",
type=str,
default="IT",
choices=["V1", "V4", "IT"],
help="Which brain region to benchmark.",
)
parser.add_argument(
"--output_dir",
type=str,
default=f"{os.getcwd()}/outputs",
help="Directory to save activations.",
)
parser.add_argument(
"--reliability_threshold",
type=float,
default=0.3,
help="Reliability threshold for neural responses.",
)
parser.add_argument(
"--reducer",
type=str,
default="median",
choices=["mean", "median"],
help="Reduction method for brain score.",
)
parser.add_argument(
"--correlation_fn",
type=str,
default="pearson",
choices=["pearson", "spearman"],
help="Correlation function to use for brain score computation.",
)
parser.add_argument(
"--n_splits",
type=int,
default=5,
help="Number of splits for KFold cross-validation.",
)
parser.add_argument(
"--pca_components",
type=int,
default=100,
help="Number of PCA components to use.",
)
parser.add_argument(
"--skip_interval",
type=int,
default=1,
help="Skip every n-th image in the dataset.",
)
parser.add_argument(
"--preprocessed",
action="store_true",
help="Whether the data is preprocessed (scaled and PCA applied).",
)
parser.add_argument(
"--noise_test",
action="store_true",
help="Run with pure noise to test. Useful for debugging.",
)
parser.add_argument(
"--permutation_test",
action="store_true",
help="Randomly permute neural responses. Useful for debugging.",
)
args = parser.parse_args()
main(args)