| """ |
| EXP REV-LP-MV: Linear probes on matched-visual conditions (R2 highest-value fix). |
| |
| Adds linear-probe baselines to the gradient figure for: |
| 1. Velocity interpolation (matched visuals, kinematic split) |
| 2. Elastic vs inelastic restitution split (matched visuals, dynamics-class split) |
| 3. Standard-gravity vs low-gravity (matched visuals, dynamics shift) |
| |
| Each is a logistic regression on l2-pooled V-JEPA 2 features at N in {16, 192}, 5 seeds. |
| """ |
| import json |
| import time |
| import os |
| from pathlib import Path |
| from datetime import datetime, timezone |
|
|
| import numpy as np |
| import torch |
| from sklearn.linear_model import LogisticRegression |
| from sklearn.preprocessing import StandardScaler |
|
|
| PROMPT_RECEIVED_TIME = datetime.now(timezone.utc).isoformat() |
| print(f"PROMPT_RECEIVED_TIME = {PROMPT_RECEIVED_TIME}", flush=True) |
| T0 = time.time() |
|
|
| OUT = Path("results/reviewer_response/exp_lp_matched_visual") |
| OUT.mkdir(parents=True, exist_ok=True) |
| N_LIST = [16, 192] |
| N_SEEDS = 5 |
| RNG_BASE = 1234 |
|
|
|
|
| def log(msg): |
| ts = datetime.now(timezone.utc).strftime("%H:%M:%SZ") |
| print(f"[{ts}] LP-MV: {msg}", flush=True) |
|
|
|
|
| def pool_l2(features_3d): |
| """L2-pool features along temporal axis: (N, T, D) -> (N, D).""" |
| f = features_3d |
| if f.ndim == 3: |
| return f.mean(dim=1).numpy() |
| return f.numpy() |
|
|
|
|
| def stratified_subset(rng, y, n_per_class): |
| """Indices of n_per_class examples per class.""" |
| idxs = [] |
| for c in np.unique(y): |
| cand = np.where(y == c)[0] |
| if len(cand) == 0: |
| continue |
| chosen = rng.choice(cand, size=min(n_per_class, len(cand)), replace=False) |
| idxs.extend(chosen.tolist()) |
| return np.array(sorted(idxs)) |
|
|
|
|
| def train_lp(X_tr, y_tr, X_te, y_te): |
| sc = StandardScaler().fit(X_tr) |
| Xs_tr = sc.transform(X_tr) |
| Xs_te = sc.transform(X_te) |
| model = LogisticRegression(max_iter=2000, C=1.0, multi_class="auto", |
| solver="lbfgs") |
| model.fit(Xs_tr, y_tr) |
| return float((model.predict(Xs_te) == y_te).mean()) |
|
|
|
|
| def stats(vals): |
| v = np.array(vals) |
| return float(v.mean()), float(v.std(ddof=1) if len(v) > 1 else 0.0) |
|
|
|
|
| def run_split(name, X_src, y_src, X_tgt, y_tgt, n_classes): |
| """Evaluate linear probe at N in N_LIST. |
| |
| Source-train-only baseline: train on full source, evaluate on target (N=0). |
| N>0: train on full source + N stratified target examples, evaluate on remaining target. |
| """ |
| log(f"=== {name}: src={X_src.shape}, tgt={X_tgt.shape}, n_classes={n_classes}") |
| results = {"N0_source_only": [], "curve": {N: [] for N in N_LIST}} |
|
|
| |
| for s in range(N_SEEDS): |
| |
| acc = train_lp(X_src, y_src, X_tgt, y_tgt) |
| results["N0_source_only"].append(acc) |
| log(f" N=0 src-only: {stats(results['N0_source_only'])[0]:.3f} ± {stats(results['N0_source_only'])[1]:.3f}") |
|
|
| |
| smallest = min(int(np.sum(y_tgt == c)) for c in np.unique(y_tgt)) |
| for N in N_LIST: |
| per_class = max(1, N // n_classes) |
| |
| per_class = min(per_class, int(0.7 * smallest)) |
| for s in range(N_SEEDS): |
| rng = np.random.default_rng(RNG_BASE + s) |
| tgt_idx_train = stratified_subset(rng, y_tgt, per_class) |
| mask = np.ones(len(y_tgt), bool); mask[tgt_idx_train] = False |
| X_eval = X_tgt[mask]; y_eval = y_tgt[mask] |
| if len(y_eval) == 0: |
| continue |
| X_tr = np.concatenate([X_src, X_tgt[tgt_idx_train]], axis=0) |
| y_tr = np.concatenate([y_src, y_tgt[tgt_idx_train]], axis=0) |
| acc = train_lp(X_tr, y_tr, X_eval, y_eval) |
| results["curve"][N].append(acc) |
| if results["curve"][N]: |
| m, sd = stats(results["curve"][N]) |
| log(f" N={N:>3d}: {m:.3f} ± {sd:.3f} (per_class={per_class})") |
| else: |
| log(f" N={N:>3d}: SKIPPED (insufficient target data)") |
| return results |
|
|
|
|
| |
| |
| |
| log("Loading standard collision features ...") |
| std_feat = torch.load( |
| "results/acceptance_boost/exp2_cache/feat_vjepa2_collision_orig.pt", |
| map_location="cpu", weights_only=False)["features"] |
| log(f" std collision features: {tuple(std_feat.shape)}") |
|
|
| labels = np.load("results/kinematics_vs_mechanics/labels_collision.npz") |
| restitution_bin = labels["restitution_bin"] |
| mass_bin = labels["mass_bin"] |
| velocity_pre_scalar = labels["velocity_pre_scalar"] |
| restitution_scalar = labels["restitution_scalar"] |
| log(f" labels: restit_bin classes {sorted(set(restitution_bin))}, mass_bin classes {sorted(set(mass_bin))}") |
|
|
| X_std = pool_l2(std_feat) |
| log(f" X_std shape: {X_std.shape}") |
|
|
|
|
| |
| |
| |
| |
| log("=== Velocity interpolation split ===") |
| vmed = float(np.median(velocity_pre_scalar)) |
| log(f" velocity median = {vmed:.3f}") |
| mask_lo = velocity_pre_scalar < vmed |
| mask_hi = ~mask_lo |
|
|
| |
| res_velocity_lo2hi = run_split( |
| "velocity lo->hi", |
| X_std[mask_lo], restitution_bin[mask_lo], |
| X_std[mask_hi], restitution_bin[mask_hi], |
| n_classes=3, |
| ) |
| res_velocity_hi2lo = run_split( |
| "velocity hi->lo", |
| X_std[mask_hi], restitution_bin[mask_hi], |
| X_std[mask_lo], restitution_bin[mask_lo], |
| n_classes=3, |
| ) |
|
|
|
|
| |
| |
| |
| |
| log("=== Elastic <-> inelastic split ===") |
| mask_elas = restitution_scalar >= 0.5 |
| mask_inelas = ~mask_elas |
| log(f" n elastic: {mask_elas.sum()}, n inelastic: {mask_inelas.sum()}") |
|
|
| res_elas2inelas = run_split( |
| "elas->inelas", |
| X_std[mask_elas], mass_bin[mask_elas], |
| X_std[mask_inelas], mass_bin[mask_inelas], |
| n_classes=3, |
| ) |
| res_inelas2elas = run_split( |
| "inelas->elas", |
| X_std[mask_inelas], mass_bin[mask_inelas], |
| X_std[mask_elas], mass_bin[mask_elas], |
| n_classes=3, |
| ) |
|
|
|
|
| |
| |
| |
| |
| |
| |
| log("=== Std gravity <-> low gravity ===") |
| lg_path = "results/reviewer_response/exp_p1/feat_vjepa2_lowgrav.pt" |
| lg_feat = torch.load(lg_path, map_location="cpu", weights_only=False)["features"] |
| log(f" low-grav features: {tuple(lg_feat.shape)}") |
| X_lg = pool_l2(lg_feat) |
|
|
| |
| with open("kubric/output/collision_low_gravity_dataset/index.json") as fh: |
| lg_idx = json.load(fh) |
| lg_restitution_scalar = np.array([s["restitution"] for s in lg_idx]) |
| |
| restit_bin_edges = np.percentile(restitution_scalar, [33.333, 66.667]) |
| log(f" union restit bin edges: {restit_bin_edges}") |
| def to_bin(scalar, edges): |
| return np.searchsorted(edges, scalar) |
| y_lg_restit = to_bin(lg_restitution_scalar, restit_bin_edges).astype(np.int64) |
| y_std_restit = to_bin(restitution_scalar, restit_bin_edges).astype(np.int64) |
| log(f" lg restit bin distribution: {np.bincount(y_lg_restit)}") |
| log(f" std restit bin distribution: {np.bincount(y_std_restit)}") |
|
|
| |
| res_std2lg = run_split( |
| "std->lg", |
| X_std[:75], y_std_restit[:75], |
| X_lg, y_lg_restit, |
| n_classes=3, |
| ) |
| res_lg2std = run_split( |
| "lg->std", |
| X_lg, y_lg_restit, |
| X_std[:75], y_std_restit[:75], |
| n_classes=3, |
| ) |
|
|
|
|
| |
| |
| |
| def merge_dirs(a, b): |
| """Average two directional results.""" |
| out = {"N0_source_only": [], "curve": {N: [] for N in N_LIST}} |
| out["N0_source_only"] = a["N0_source_only"] + b["N0_source_only"] |
| for N in N_LIST: |
| out["curve"][N] = a["curve"][N] + b["curve"][N] |
| return out |
|
|
|
|
| full = { |
| "velocity_lo2hi": res_velocity_lo2hi, |
| "velocity_hi2lo": res_velocity_hi2lo, |
| "velocity_mean": merge_dirs(res_velocity_lo2hi, res_velocity_hi2lo), |
| "elas2inelas": res_elas2inelas, |
| "inelas2elas": res_inelas2elas, |
| "elastic_mean": merge_dirs(res_elas2inelas, res_inelas2elas), |
| "std2lg": res_std2lg, |
| "lg2std": res_lg2std, |
| "gravity_mean": merge_dirs(res_std2lg, res_lg2std), |
| } |
|
|
| |
| SUMMARY = ["EXP REV-LP-MV -- linear probes on matched-visual conditions (5 seeds, predict restitution/mass)", |
| "", |
| f"{'Condition':<30s} | {'N=0 (src-only)':>18s} | {'N=16':>14s} | {'N=192':>14s}", |
| "-" * 86] |
| for name, r in full.items(): |
| if "_mean" not in name and not name in ("std2lg", "lg2std", "elas2inelas", "inelas2elas"): |
| continue |
| n0_m, n0_s = stats(r["N0_source_only"]) |
| n16_m, n16_s = stats(r["curve"][16]) |
| n192_m, n192_s = stats(r["curve"][192]) |
| SUMMARY.append(f"{name:<30s} | {n0_m*100:>5.1f}% +/- {n0_s*100:>4.1f}% | {n16_m*100:>5.1f}% +/- {n16_s*100:>4.1f}% | {n192_m*100:>5.1f}% +/- {n192_s*100:>4.1f}%") |
|
|
| print("\n".join(SUMMARY), flush=True) |
| with open(OUT / "exp_lp_matched_visual_summary.txt", "w") as fh: |
| fh.write("\n".join(SUMMARY) + "\n") |
| with open(OUT / "exp_lp_matched_visual_summary.json", "w") as fh: |
| json.dump(full, fh, indent=2) |
|
|
| end_ts = datetime.now(timezone.utc).isoformat() |
| runtime_min = (time.time() - T0) / 60.0 |
| print(f"\nEND_TIME = {end_ts}", flush=True) |
| print(f"Total runtime: {runtime_min:.2f} min", flush=True) |
|
|