cross-scenario-physics-code-transfer / code /_overnight_p1_transfer.py
physics-code-transfer-bench's picture
Initial anonymous release for NeurIPS 2026 E&D submission
189f45b verified
"""
PRIORITY 1: Cross-scenario transfer test.
All runs use fpa=1 (each of 4 agents sees 1 frame), 4 evenly-spaced frames
per scene from each dataset. This makes collision (24 frames) and ramp
(16 or 8 frames) architecture-compatible.
Protocol:
- Base training: train sender + receiver on (dataset_src, target).
- Zero-shot transfer: apply source-trained receiver directly to
dataset_tgt codes. No retraining.
- 16-shot calibration: freeze source sender, train new receiver on
16 stratified examples from dataset_tgt, evaluate on dataset_tgt holdout.
- Cross-property: freeze source sender, train new receiver on full
dataset_src train with a different target.
Writes: results/cross_scenario_transfer/
"""
import json, time, sys, os, math, copy
from pathlib import Path
from datetime import datetime, timezone
import numpy as np
import torch
import torch.nn.functional as F
sys.path.insert(0, os.path.dirname(__file__))
from _kinematics_train import (
load_labels, ClassifierReceiver,
HIDDEN_DIM, VOCAB_SIZE, N_HEADS, N_AGENTS, MSG_DIM, BATCH_SIZE,
SENDER_LR, RECEIVER_LR, EARLY_STOP_PATIENCE, DEVICE,
)
from _killer_experiment import (
TemporalEncoder, DiscreteSender, DiscreteMultiSender,
)
OUT = Path("results/cross_scenario_transfer")
OUT.mkdir(parents=True, exist_ok=True)
LOG = Path("results/overnight_log.txt")
FEATURE_FILES = {
("collision", "vjepa2"): "results/vjepa2_collision_pooled.pt",
("collision", "dinov2"): "results/collision_dinov2_features.pt",
("ramp", "vjepa2"): "results/vjepa2_ramp_temporal.pt",
("ramp", "dinov2"): "results/phase54b_dino_features.pt",
("collision", "clip"): "results/kinematics_vs_mechanics/clip_collision_features.pt",
("ramp", "clip"): "results/kinematics_vs_mechanics/clip_ramp_features.pt",
}
N_EPOCHS = 150
N_EPOCHS_RECEIVER_ONLY = 100
N_SEEDS = 2
N_FRAMES_SUBSAMPLE = 4 # fpa=1 × N_AGENTS=4 → 4 frames per scene
def log(msg):
ts = datetime.now(timezone.utc).strftime("%H:%M:%SZ")
line = f"[{ts}] P1-transfer: {msg}"
print(line, flush=True)
LOG.parent.mkdir(parents=True, exist_ok=True)
with open(LOG, "a") as f: f.write(line + "\n")
def load_and_subsample(dataset, backbone):
path = FEATURE_FILES[(dataset, backbone)]
d = torch.load(path, weights_only=False, map_location="cpu")
feat = d["features"].float() # (N, T_full, D)
T_full = feat.shape[1]
if T_full < N_FRAMES_SUBSAMPLE:
# Pad by repeating last frame
pad = feat[:, -1:, :].repeat(1, N_FRAMES_SUBSAMPLE - T_full, 1)
feat = torch.cat([feat, pad], dim=1)
idx = list(range(T_full)) + [T_full - 1] * (N_FRAMES_SUBSAMPLE - T_full)
else:
idx = np.linspace(0, T_full - 1, N_FRAMES_SUBSAMPLE).astype(int).tolist()
feat = feat[:, idx, :].contiguous()
return feat, idx
def extract_clip_ramp():
"""Extract CLIP features for 300 ramp scenes: 4 evenly-spaced frames directly."""
import timm
from torchvision import transforms
from PIL import Image
out_path = Path("results/kinematics_vs_mechanics/clip_ramp_features.pt")
if out_path.exists():
log("CLIP ramp features already cached")
return
log("Extracting CLIP ramp features (300 scenes × 24 frames at stride 1)...")
model = timm.create_model("vit_large_patch14_clip_224.openai",
pretrained=True, num_classes=0).to(DEVICE).eval()
tfm = transforms.Compose([
transforms.Resize(224), transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711]),
])
DATASET = Path("kubric/output/ramp_dataset")
n_scenes = 300
sample = sorted((DATASET / "scene_0000").glob("rgba_*.png"))
total = len(sample)
step = max(1, total // 24)
frame_indices = list(range(0, total, step))[:24]
feat_out = torch.zeros(n_scenes, 24, 1024, dtype=torch.float32)
t0 = time.time()
for si in range(n_scenes):
sd = DATASET / f"scene_{si:04d}"
imgs = [tfm(Image.open(sd / f"rgba_{fi:05d}.png").convert("RGB"))
for fi in frame_indices]
batch = torch.stack(imgs, 0).to(DEVICE)
with torch.no_grad():
feat_out[si] = model(batch).cpu().float()
if (si + 1) % 100 == 0:
log(f" clip-ramp [{si+1}/{n_scenes}] rate={(si+1)/(time.time()-t0):.1f}/s")
if DEVICE.type == "mps": torch.mps.empty_cache()
torch.save({"features": feat_out, "frame_indices": frame_indices,
"model": "vit_large_patch14_clip_224.openai"}, out_path)
log(f"CLIP ramp done in {time.time()-t0:.0f}s")
def build_sender(feat_dim, fpa):
senders = [DiscreteSender(TemporalEncoder(HIDDEN_DIM, feat_dim, fpa),
HIDDEN_DIM, VOCAB_SIZE, N_HEADS)
for _ in range(N_AGENTS)]
return DiscreteMultiSender(senders).to(DEVICE)
def train_base(feat, labels, seed, n_epochs=N_EPOCHS):
"""Train fresh sender+receiver on (feat, labels). Return (sender_state, receiver_state, train_ids, holdout_ids, best_acc)."""
N, nf, dim = feat.shape
fpa = 1
agent_views = [feat[:, i:i+1, :] for i in range(N_AGENTS)]
torch.manual_seed(seed); np.random.seed(seed)
rng = np.random.RandomState(seed * 1000 + 42)
train_ids, holdout_ids = [], []
for c in np.unique(labels):
ids_c = np.where(labels == c)[0]
rng.shuffle(ids_c)
split = max(1, len(ids_c) // 5)
holdout_ids.extend(ids_c[:split]); train_ids.extend(ids_c[split:])
train_ids = np.array(train_ids); holdout_ids = np.array(holdout_ids)
n_classes = int(labels.max()) + 1
chance = 1.0 / n_classes
sender = build_sender(dim, fpa)
receivers = [ClassifierReceiver(MSG_DIM, HIDDEN_DIM, n_classes).to(DEVICE) for _ in range(3)]
so = torch.optim.Adam(sender.parameters(), lr=SENDER_LR)
ros = [torch.optim.Adam(r.parameters(), lr=RECEIVER_LR) for r in receivers]
labels_dev = torch.tensor(labels, dtype=torch.long).to(DEVICE)
me = math.log(VOCAB_SIZE)
n_batches = max(1, len(train_ids) // BATCH_SIZE)
best_acc, best_ep = 0.0, 0
best_sender_state, best_receiver_states = None, None
for ep in range(n_epochs):
if ep - best_ep > EARLY_STOP_PATIENCE and best_acc > chance + 0.05: break
if ep > 0 and ep % 40 == 0:
for i in range(len(receivers)):
receivers[i] = ClassifierReceiver(MSG_DIM, HIDDEN_DIM, n_classes).to(DEVICE)
ros[i] = torch.optim.Adam(receivers[i].parameters(), lr=RECEIVER_LR)
sender.train(); [r.train() for r in receivers]
tau = 3.0 + (1.0 - 3.0) * ep / max(1, n_epochs - 1)
hard = ep >= 30
rng_ep = np.random.RandomState(seed * 10000 + ep)
perm = rng_ep.permutation(train_ids)
for b in range(n_batches):
batch_ids = perm[b*BATCH_SIZE:(b+1)*BATCH_SIZE]
if len(batch_ids) < 4: continue
views = [v[batch_ids].to(DEVICE) for v in agent_views]
tgt = labels_dev[batch_ids]
msg, logits_list = sender(views, tau=tau, hard=hard)
loss = torch.tensor(0.0, device=DEVICE)
for r in receivers: loss = loss + F.cross_entropy(r(msg), tgt)
loss = loss / len(receivers)
for lg in logits_list:
lp = F.log_softmax(lg, -1); p = lp.exp().clamp(min=1e-8)
ent = -(p * lp).sum(-1).mean()
if ent / me < 0.1: loss = loss - 0.03 * ent
if torch.isnan(loss):
so.zero_grad(); [o.zero_grad() for o in ros]; continue
so.zero_grad(); [o.zero_grad() for o in ros]
loss.backward()
torch.nn.utils.clip_grad_norm_(sender.parameters(), 1.0)
so.step(); [o.step() for o in ros]
if ep % 50 == 0 and DEVICE.type == "mps": torch.mps.empty_cache()
if (ep + 1) % 10 == 0 or ep == 0:
sender.eval(); [r.eval() for r in receivers]
with torch.no_grad():
v_ho = [v[holdout_ids].to(DEVICE) for v in agent_views]
msg_ho, _ = sender(v_ho)
tgt_ho = labels_dev[holdout_ids]
best_per_recv, best_recv_idx = 0.0, 0
for ri, r in enumerate(receivers):
preds = r(msg_ho).argmax(-1)
acc = (preds == tgt_ho).float().mean().item()
if acc > best_per_recv:
best_per_recv, best_recv_idx = acc, ri
if best_per_recv > best_acc:
best_acc, best_ep = best_per_recv, ep
best_sender_state = {k: v.cpu().clone() for k, v in sender.state_dict().items()}
best_receiver_states = [{k: v.cpu().clone() for k, v in r.state_dict().items()}
for r in receivers]
best_recv_idx_saved = best_recv_idx
return {
"sender_state": best_sender_state,
"receiver_states": best_receiver_states,
"best_recv_idx": best_recv_idx_saved if best_receiver_states else 0,
"train_ids": train_ids, "holdout_ids": holdout_ids,
"task_acc": best_acc, "chance": chance,
"n_classes": n_classes, "fpa": 1, "dim": dim,
}
def eval_zero_shot(base, feat_tgt, labels_tgt, holdout_ids_tgt):
"""Apply base sender + base receiver directly to target data."""
N, nf, dim = feat_tgt.shape
assert dim == base["dim"], f"dim mismatch {dim} vs {base['dim']}"
sender = build_sender(dim, base["fpa"])
sender.load_state_dict(base["sender_state"])
sender.eval().to(DEVICE)
receivers = [ClassifierReceiver(MSG_DIM, HIDDEN_DIM, base["n_classes"]).to(DEVICE)
for _ in range(len(base["receiver_states"]))]
for r, s in zip(receivers, base["receiver_states"]): r.load_state_dict(s)
[r.eval() for r in receivers]
agent_views = [feat_tgt[:, i:i+1, :] for i in range(N_AGENTS)]
labels_dev = torch.tensor(labels_tgt, dtype=torch.long).to(DEVICE)
with torch.no_grad():
v_ho = [v[holdout_ids_tgt].to(DEVICE) for v in agent_views]
msg_ho, _ = sender(v_ho)
tgt_ho = labels_dev[holdout_ids_tgt]
best = 0.0
for r in receivers:
preds = r(msg_ho).argmax(-1)
acc = (preds == tgt_ho).float().mean().item()
best = max(best, acc)
return best
def train_receiver_frozen_sender(base, feat_tgt, labels_tgt, train_ids_tgt,
holdout_ids_tgt, seed, n_epochs=N_EPOCHS_RECEIVER_ONLY,
max_examples=None):
"""Freeze base sender. Train NEW receiver on (subset of) train_ids_tgt."""
N, nf, dim = feat_tgt.shape
assert dim == base["dim"]
if max_examples is not None and len(train_ids_tgt) > max_examples:
rng = np.random.RandomState(seed * 311 + 7)
picks = []
per_class = max(1, max_examples // base["n_classes"])
for c in range(base["n_classes"]):
ids_c = np.array([i for i in train_ids_tgt if labels_tgt[i] == c])
if len(ids_c) == 0: continue
rng.shuffle(ids_c)
picks.extend(ids_c[:per_class])
train_ids_tgt = np.array(picks)
sender = build_sender(dim, base["fpa"])
sender.load_state_dict(base["sender_state"])
sender.to(DEVICE).eval()
for p in sender.parameters(): p.requires_grad = False
receivers = [ClassifierReceiver(MSG_DIM, HIDDEN_DIM, base["n_classes"]).to(DEVICE) for _ in range(3)]
ros = [torch.optim.Adam(r.parameters(), lr=RECEIVER_LR) for r in receivers]
agent_views = [feat_tgt[:, i:i+1, :] for i in range(N_AGENTS)]
labels_dev = torch.tensor(labels_tgt, dtype=torch.long).to(DEVICE)
n_batches = max(1, len(train_ids_tgt) // min(BATCH_SIZE, len(train_ids_tgt)))
best_acc, best_ep = 0.0, 0
for ep in range(n_epochs):
if ep - best_ep > EARLY_STOP_PATIENCE and best_acc > base["chance"] + 0.05: break
[r.train() for r in receivers]
rng_ep = np.random.RandomState(seed * 10000 + ep)
perm = rng_ep.permutation(train_ids_tgt)
bs = min(BATCH_SIZE, len(train_ids_tgt))
for b in range(max(1, len(train_ids_tgt) // bs)):
batch_ids = perm[b*bs:(b+1)*bs]
if len(batch_ids) < 2: continue
views = [v[batch_ids].to(DEVICE) for v in agent_views]
with torch.no_grad():
msg, _ = sender(views)
for r, o in zip(receivers, ros):
pred = r(msg)
loss = F.cross_entropy(pred, labels_dev[batch_ids])
if torch.isnan(loss): continue
o.zero_grad(); loss.backward(); o.step()
if (ep + 1) % 10 == 0 or ep == 0:
[r.eval() for r in receivers]
with torch.no_grad():
v_ho = [v[holdout_ids_tgt].to(DEVICE) for v in agent_views]
msg_ho, _ = sender(v_ho)
tgt_ho = labels_dev[holdout_ids_tgt]
best = 0.0
for r in receivers:
preds = r(msg_ho).argmax(-1)
best = max(best, (preds == tgt_ho).float().mean().item())
if best > best_acc: best_acc, best_ep = best, ep
return best_acc
def make_splits(labels, seed):
rng = np.random.RandomState(seed * 1000 + 42)
train_ids, holdout_ids = [], []
for c in np.unique(labels):
ids_c = np.where(labels == c)[0]
rng.shuffle(ids_c)
split = max(1, len(ids_c) // 5)
holdout_ids.extend(ids_c[:split]); train_ids.extend(ids_c[split:])
return np.array(train_ids), np.array(holdout_ids)
# ── Main ──
def main():
t_start = time.time()
log(f"=== OVERNIGHT PRIORITY 1: Cross-Scenario Transfer ===")
# Extract CLIP ramp if needed
extract_clip_ramp()
# Load all features once
log("Loading features...")
feats = {}
for (ds, bb), path in FEATURE_FILES.items():
if Path(path).exists():
f, idx = load_and_subsample(ds, bb)
feats[(ds, bb)] = f
log(f" {ds}/{bb}: {tuple(f.shape)} sampled from T={torch.load(path, weights_only=False, map_location='cpu')['features'].shape[1]}")
labels_col_restit = load_labels("collision", "restitution")
labels_col_mass = load_labels("collision", "mass")
labels_ramp_restit = load_labels("ramp", "restitution")
all_results = []
records = [] # for table
# ── A. Within-scenario sanity (V-JEPA only) ──
log("\n--- A. Within-scenario sanity ---")
for seed in range(N_SEEDS):
log(f" within collision-restit V-JEPA seed={seed}")
t0 = time.time()
r = train_base(feats[("collision", "vjepa2")], labels_col_restit, seed)
dt = time.time() - t0
log(f" acc={r['task_acc']:.3f} [{dt:.0f}s]")
records.append({"row": "within_collision_vjepa", "bb": "vjepa2",
"seed": seed, "acc": r["task_acc"], "elapsed_s": dt})
all_results.append({"condition": "within_collision", "backbone": "vjepa2",
"seed": seed, "acc": r["task_acc"], "elapsed_s": dt})
for seed in range(N_SEEDS):
log(f" within ramp-restit V-JEPA seed={seed}")
t0 = time.time()
r = train_base(feats[("ramp", "vjepa2")], labels_ramp_restit, seed)
dt = time.time() - t0
log(f" acc={r['task_acc']:.3f} [{dt:.0f}s]")
records.append({"row": "within_ramp_vjepa", "bb": "vjepa2",
"seed": seed, "acc": r["task_acc"], "elapsed_s": dt})
all_results.append({"condition": "within_ramp", "backbone": "vjepa2",
"seed": seed, "acc": r["task_acc"], "elapsed_s": dt})
# ── Cache base senders needed for transfer ──
# col_restit for each backbone × seed
# ramp_restit for each backbone × seed
# col_mass for V-JEPA × seed (for cross-property)
log("\n--- Training base senders for transfer ---")
bases = {} # (bb, src_ds, target, seed) -> base dict
for bb in ["vjepa2", "dinov2", "clip"]:
if ("collision", bb) not in feats or ("ramp", bb) not in feats: continue
for seed in range(N_SEEDS):
log(f" base {bb} collision-restit seed={seed}")
t0 = time.time()
bases[(bb, "collision", "restitution", seed)] = train_base(
feats[("collision", bb)], labels_col_restit, seed)
log(f" acc={bases[(bb, 'collision', 'restitution', seed)]['task_acc']:.3f} [{time.time()-t0:.0f}s]")
log(f" base {bb} ramp-restit seed={seed}")
t0 = time.time()
bases[(bb, "ramp", "restitution", seed)] = train_base(
feats[("ramp", bb)], labels_ramp_restit, seed)
log(f" acc={bases[(bb, 'ramp', 'restitution', seed)]['task_acc']:.3f} [{time.time()-t0:.0f}s]")
# V-JEPA collision-mass for cross-property
for seed in range(N_SEEDS):
log(f" base vjepa2 collision-mass seed={seed}")
t0 = time.time()
bases[("vjepa2", "collision", "mass", seed)] = train_base(
feats[("collision", "vjepa2")], labels_col_mass, seed)
log(f" acc={bases[('vjepa2', 'collision', 'mass', seed)]['task_acc']:.3f} [{time.time()-t0:.0f}s]")
# ── B. Cross-scenario transfer ──
log("\n--- B. Cross-scenario transfer ---")
for bb in ["vjepa2", "dinov2", "clip"]:
if (bb, "collision", "restitution", 0) not in bases: continue
for direction, src_ds, tgt_ds, tgt_labels in [
("col_to_ramp", "collision", "ramp", labels_ramp_restit),
("ramp_to_col", "ramp", "collision", labels_col_restit),
]:
for seed in range(N_SEEDS):
base = bases[(bb, src_ds, "restitution", seed)]
# Splits on the TARGET dataset
train_ids_tgt, holdout_ids_tgt = make_splits(tgt_labels, seed)
# Zero-shot
t0 = time.time()
acc_zs = eval_zero_shot(base, feats[(tgt_ds, bb)], tgt_labels, holdout_ids_tgt)
dt_zs = time.time() - t0
log(f" {bb} {direction} zero-shot seed={seed}: acc={acc_zs:.3f} [{dt_zs:.1f}s]")
records.append({"row": f"{direction}_zero_shot", "bb": bb, "seed": seed,
"acc": acc_zs, "elapsed_s": dt_zs})
all_results.append({"condition": f"{direction}_zero_shot", "backbone": bb,
"seed": seed, "acc": acc_zs, "elapsed_s": dt_zs})
# 16-shot
t0 = time.time()
acc_16 = train_receiver_frozen_sender(
base, feats[(tgt_ds, bb)], tgt_labels,
train_ids_tgt, holdout_ids_tgt, seed, max_examples=16)
dt_16 = time.time() - t0
log(f" {bb} {direction} 16-shot seed={seed}: acc={acc_16:.3f} [{dt_16:.0f}s]")
records.append({"row": f"{direction}_16shot", "bb": bb, "seed": seed,
"acc": acc_16, "elapsed_s": dt_16})
all_results.append({"condition": f"{direction}_16shot", "backbone": bb,
"seed": seed, "acc": acc_16, "elapsed_s": dt_16})
# ── C. Cross-property controls (V-JEPA, within collision) ──
log("\n--- C. Cross-property controls (V-JEPA collision) ---")
# restit-sender → mass
for seed in range(N_SEEDS):
base = bases[("vjepa2", "collision", "restitution", seed)]
train_ids, holdout_ids = make_splits(labels_col_mass, seed)
t0 = time.time()
acc = train_receiver_frozen_sender(
base, feats[("collision", "vjepa2")], labels_col_mass,
train_ids, holdout_ids, seed, max_examples=None)
dt = time.time() - t0
log(f" V-JEPA restit→mass seed={seed}: acc={acc:.3f} [{dt:.0f}s]")
records.append({"row": "cross_prop_restit_to_mass", "bb": "vjepa2",
"seed": seed, "acc": acc, "elapsed_s": dt})
all_results.append({"condition": "cross_prop_restit_to_mass",
"backbone": "vjepa2", "seed": seed,
"acc": acc, "elapsed_s": dt})
# mass-sender → restit
for seed in range(N_SEEDS):
base = bases[("vjepa2", "collision", "mass", seed)]
train_ids, holdout_ids = make_splits(labels_col_restit, seed)
t0 = time.time()
acc = train_receiver_frozen_sender(
base, feats[("collision", "vjepa2")], labels_col_restit,
train_ids, holdout_ids, seed, max_examples=None)
dt = time.time() - t0
log(f" V-JEPA mass→restit seed={seed}: acc={acc:.3f} [{dt:.0f}s]")
records.append({"row": "cross_prop_mass_to_restit", "bb": "vjepa2",
"seed": seed, "acc": acc, "elapsed_s": dt})
all_results.append({"condition": "cross_prop_mass_to_restit",
"backbone": "vjepa2", "seed": seed,
"acc": acc, "elapsed_s": dt})
# ── Aggregate + write summary ──
def agg(cond, bb):
vals = [r["acc"] for r in all_results
if r["condition"] == cond and r["backbone"] == bb]
if not vals: return (float("nan"), float("nan"))
return (float(np.mean(vals)*100), float(np.std(vals)*100))
lines = []
lines.append("CROSS-SCENARIO RESTITUTION TRANSFER")
lines.append(f"Config: fpa=1, 4 frames (evenly spaced), K=5, 2 seeds/cell.")
lines.append("")
header = "Condition | V-JEPA 2 | DINOv2 | CLIP | Chance"
lines.append(header)
lines.append("-" * len(header))
def row(name, cond, bbs=("vjepa2", "dinov2", "clip")):
cells = []
for bb in bbs:
m, s = agg(cond, bb)
if np.isnan(m):
cells.append(" — ")
else:
cells.append(f"{m:5.1f}% ± {s:4.1f} ")
return f"{name:<39s}| {cells[0]}| {cells[1]}| {cells[2]}| 33.3%"
lines.append(row("Within collision (sanity)", "within_collision", ("vjepa2",)))
lines.append(row("Within ramp (sanity)", "within_ramp", ("vjepa2",)))
lines.append(row("Collision→Ramp (zero-shot)", "col_to_ramp_zero_shot"))
lines.append(row("Ramp→Collision (zero-shot)", "ramp_to_col_zero_shot"))
lines.append(row("Collision→Ramp (16-shot)", "col_to_ramp_16shot"))
lines.append(row("Ramp→Collision (16-shot)", "ramp_to_col_16shot"))
lines.append(row("Cross-property: restit→mass","cross_prop_restit_to_mass", ("vjepa2",)))
lines.append(row("Cross-property: mass→restit","cross_prop_mass_to_restit", ("vjepa2",)))
total_s = time.time() - t_start
lines.append("")
lines.append(f"Total runtime: {total_s/60:.1f} min ({total_s:.0f}s)")
lines.append(f"Runs: {len(all_results)}")
summary = "\n".join(lines)
(OUT / "p1_summary.txt").write_text(summary + "\n")
with open(OUT / "p1_raw.json", "w") as f:
# remove state dicts from bases for serialization
json.dump({"runs": all_results,
"n_runs": len(all_results),
"total_runtime_s": total_s},
f, indent=2, default=str)
log(f"\n{summary}")
log(f"Saved: {OUT / 'p1_summary.txt'}")
if __name__ == "__main__":
main()