| """ |
| Phase 0B: single-target bottleneck trainer for kinematics-vs-mechanics. |
| |
| Same sender architecture as `_killer_experiment.py` (4-agent × 2-head |
| Gumbel-Softmax, K=5, HIDDEN_DIM=128) but the receiver is a 3-way classifier |
| that reads a single message and predicts the binned target class. |
| |
| Takes `--target <name>` and `--dataset <collision|ramp>` and pulls the |
| corresponding `_bin` column from the label .npz. Feature file and per-dataset |
| config are selected by `--backbone <vjepa2|dinov2>`. |
| |
| Run: |
| /usr/bin/python3 _kinematics_train.py --dataset collision --backbone vjepa2 \ |
| --target mass --seed 0 |
| """ |
| import argparse, json, math, os, sys, time, warnings |
| warnings.filterwarnings("ignore") |
|
|
| import numpy as np |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
|
|
| sys.path.insert(0, os.path.dirname(__file__)) |
| from _killer_experiment import ( |
| TemporalEncoder, DiscreteSender, DiscreteMultiSender |
| ) |
|
|
| DEVICE = torch.device("mps" if torch.backends.mps.is_available() else "cpu") |
|
|
| HIDDEN_DIM = 128 |
| VOCAB_SIZE = 5 |
| N_HEADS = 2 |
| N_AGENTS = 4 |
| MSG_DIM = N_AGENTS * N_HEADS * VOCAB_SIZE |
| N_POS = N_AGENTS * N_HEADS |
| BATCH_SIZE = 32 |
| SENDER_LR = 1e-3 |
| RECEIVER_LR = 3e-3 |
| EARLY_STOP_PATIENCE = 50 |
|
|
|
|
| |
|
|
| class ClassifierReceiver(nn.Module): |
| """Reads one message and predicts 3 class logits.""" |
|
|
| def __init__(self, msg_dim, hidden_dim, n_classes=3): |
| super().__init__() |
| self.net = nn.Sequential( |
| nn.Linear(msg_dim, hidden_dim), nn.ReLU(), |
| nn.Linear(hidden_dim, hidden_dim // 2), nn.ReLU(), |
| nn.Linear(hidden_dim // 2, n_classes), |
| ) |
|
|
| def forward(self, msg): |
| return self.net(msg) |
|
|
|
|
| |
|
|
| FEATURE_FILES = { |
| ("collision", "vjepa2"): "results/vjepa2_collision_pooled.pt", |
| ("collision", "dinov2"): "results/collision_dinov2_features.pt", |
| ("ramp", "vjepa2"): "results/vjepa2_ramp_temporal.pt", |
| ("ramp", "dinov2"): "results/phase54b_dino_features.pt", |
| } |
|
|
| LABEL_FILES = { |
| "collision": "results/kinematics_vs_mechanics/labels_collision.npz", |
| "ramp": "results/kinematics_vs_mechanics/labels_ramp.npz", |
| } |
|
|
|
|
| def load_features(dataset, backbone): |
| path = FEATURE_FILES[(dataset, backbone)] |
| d = torch.load(path, weights_only=False, map_location="cpu") |
| feat = d["features"].float() |
| return feat |
|
|
|
|
| def load_labels(dataset, target): |
| """Return bin labels (int [N]).""" |
| z = np.load(LABEL_FILES[dataset]) |
| key = f"{target}_bin" |
| if key not in z: |
| raise ValueError(f"Unknown target '{target}' for dataset '{dataset}'. " |
| f"Available: {[k.replace('_bin', '') for k in z.files if k.endswith('_bin')]}") |
| return z[key].astype(np.int64) |
|
|
|
|
| |
|
|
| def train_one(dataset, backbone, target, seed, |
| n_epochs=150, verbose=False): |
| """ |
| Returns dict: task_acc, posdis, elapsed_s, seed, dataset, backbone, target. |
| """ |
| t0 = time.time() |
| feat = load_features(dataset, backbone) |
| labels = load_labels(dataset, target) |
|
|
| N, nf, dim = feat.shape |
| fpa = max(1, nf // N_AGENTS) |
| agent_views = [feat[:, (i * fpa):(i + 1) * fpa, :] for i in range(N_AGENTS)] |
|
|
| torch.manual_seed(seed) |
| np.random.seed(seed) |
|
|
| |
| rng = np.random.RandomState(seed * 1000 + 42) |
| train_ids = [] |
| holdout_ids = [] |
| for c in np.unique(labels): |
| ids_c = np.where(labels == c)[0] |
| rng.shuffle(ids_c) |
| split = max(1, len(ids_c) // 5) |
| holdout_ids.extend(ids_c[:split]) |
| train_ids.extend(ids_c[split:]) |
| train_ids = np.array(train_ids) |
| holdout_ids = np.array(holdout_ids) |
|
|
| n_classes = int(labels.max()) + 1 |
| chance = 1.0 / n_classes |
|
|
| senders = [DiscreteSender(TemporalEncoder(HIDDEN_DIM, dim, fpa), |
| HIDDEN_DIM, VOCAB_SIZE, N_HEADS) |
| for _ in range(N_AGENTS)] |
| sender = DiscreteMultiSender(senders).to(DEVICE) |
| receivers = [ClassifierReceiver(MSG_DIM, HIDDEN_DIM, n_classes).to(DEVICE) |
| for _ in range(3)] |
| so = torch.optim.Adam(sender.parameters(), lr=SENDER_LR) |
| ros = [torch.optim.Adam(r.parameters(), lr=RECEIVER_LR) for r in receivers] |
|
|
| labels_dev = torch.tensor(labels, dtype=torch.long).to(DEVICE) |
| me = math.log(VOCAB_SIZE) |
| n_batches = max(1, len(train_ids) // BATCH_SIZE) |
|
|
| best_acc, best_state, best_ep = 0.0, None, 0 |
|
|
| for ep in range(n_epochs): |
| if ep - best_ep > EARLY_STOP_PATIENCE and best_acc > chance + 0.05: |
| break |
| if ep > 0 and ep % 40 == 0: |
| for i in range(len(receivers)): |
| receivers[i] = ClassifierReceiver(MSG_DIM, HIDDEN_DIM, n_classes).to(DEVICE) |
| ros[i] = torch.optim.Adam(receivers[i].parameters(), lr=RECEIVER_LR) |
|
|
| sender.train(); [r.train() for r in receivers] |
| tau = 3.0 + (1.0 - 3.0) * ep / max(1, n_epochs - 1) |
| hard = ep >= 30 |
|
|
| rng_ep = np.random.RandomState(seed * 10000 + ep) |
| perm = rng_ep.permutation(train_ids) |
|
|
| for b in range(n_batches): |
| batch_ids = perm[b * BATCH_SIZE:(b + 1) * BATCH_SIZE] |
| if len(batch_ids) < 4: |
| continue |
| views = [v[batch_ids].to(DEVICE) for v in agent_views] |
| target_batch = labels_dev[batch_ids] |
|
|
| msg, logits_list = sender(views, tau=tau, hard=hard) |
| loss = torch.tensor(0.0, device=DEVICE) |
| for r in receivers: |
| pred = r(msg) |
| loss = loss + F.cross_entropy(pred, target_batch) |
| loss = loss / len(receivers) |
|
|
| |
| for lg in logits_list: |
| lp = F.log_softmax(lg, -1) |
| p = lp.exp().clamp(min=1e-8) |
| ent = -(p * lp).sum(-1).mean() |
| if ent / me < 0.1: |
| loss = loss - 0.03 * ent |
|
|
| if torch.isnan(loss): |
| so.zero_grad(); [o.zero_grad() for o in ros] |
| continue |
| so.zero_grad(); [o.zero_grad() for o in ros] |
| loss.backward() |
| torch.nn.utils.clip_grad_norm_(sender.parameters(), 1.0) |
| so.step(); [o.step() for o in ros] |
|
|
| if ep % 50 == 0 and DEVICE.type == "mps": |
| torch.mps.empty_cache() |
|
|
| |
| if (ep + 1) % 10 == 0 or ep == 0: |
| sender.eval(); [r.eval() for r in receivers] |
| with torch.no_grad(): |
| v_ho = [v[holdout_ids].to(DEVICE) for v in agent_views] |
| msg_ho, _ = sender(v_ho) |
| target_ho = labels_dev[holdout_ids] |
| best_per_recv = 0.0 |
| for r in receivers: |
| preds = r(msg_ho).argmax(-1) |
| acc = (preds == target_ho).float().mean().item() |
| best_per_recv = max(best_per_recv, acc) |
| if verbose and ep % 50 == 0: |
| print(f" ep={ep} holdout_acc={best_per_recv:.1%}", |
| flush=True) |
| if best_per_recv > best_acc: |
| best_acc = best_per_recv |
| best_ep = ep |
| best_state = {k: v.cpu().clone() |
| for k, v in sender.state_dict().items()} |
|
|
| if best_state: |
| sender.load_state_dict(best_state) |
| sender.eval() |
|
|
| |
| with torch.no_grad(): |
| toks_list = [] |
| for i in range(0, N, BATCH_SIZE): |
| vs = [v[i:i + BATCH_SIZE].to(DEVICE) for v in agent_views] |
| _, logits = sender(vs) |
| toks_list.append(np.stack([l.argmax(-1).cpu().numpy() for l in logits], 1)) |
| tokens = np.concatenate(toks_list, 0) |
|
|
| |
| try: |
| from _killer_experiment import positional_disentanglement |
| attrs = np.stack([labels, labels], axis=1) |
| posdis, _, _ = positional_disentanglement(tokens, attrs, VOCAB_SIZE) |
| except Exception as e: |
| posdis = 0.0 |
|
|
| return { |
| "dataset": dataset, |
| "backbone": backbone, |
| "target": target, |
| "seed": int(seed), |
| "n_classes": int(n_classes), |
| "chance": float(chance), |
| "task_acc": float(best_acc), |
| "posdis": float(posdis), |
| "elapsed_s": float(time.time() - t0), |
| "best_ep": int(best_ep), |
| "n_train": int(len(train_ids)), |
| "n_holdout": int(len(holdout_ids)), |
| } |
|
|
|
|
| if __name__ == "__main__": |
| ap = argparse.ArgumentParser() |
| ap.add_argument("--dataset", required=True, choices=["collision", "ramp"]) |
| ap.add_argument("--backbone", required=True, choices=["vjepa2", "dinov2"]) |
| ap.add_argument("--target", required=True) |
| ap.add_argument("--seed", type=int, default=0) |
| ap.add_argument("--epochs", type=int, default=150) |
| ap.add_argument("--verbose", action="store_true") |
| args = ap.parse_args() |
| r = train_one(args.dataset, args.backbone, args.target, args.seed, |
| n_epochs=args.epochs, verbose=args.verbose) |
| print(json.dumps(r, indent=2), flush=True) |
|
|