| |
| """ |
| extract_models.py β Standardized Hidden State Extraction |
| ========================================================= |
| Extracts last-token hidden states from any model in models.yaml. |
| Produces a standardized cache (.npz) with identical schema across all models. |
| |
| Features: |
| - Per-model chat template handling via tokenizer.apply_chat_template() |
| - Saves lm_head + layer_norm weights for logit lens |
| - Incremental checkpointing every 500 samples |
| - Skip if cache already exists (--skip_if_cached) |
| - Reuse existing Qwen2.5 cache with field remapping |
| |
| Usage: |
| # Extract Qwen2.5 (reuses existing cache) |
| python extract_models.py --model qwen25 --skip_if_cached |
| |
| # Extract LLaMA-3.1 (fresh extraction, ~2-3h on 1 GPU) |
| CUDA_VISIBLE_DEVICES=0 python extract_models.py --model llama31 |
| |
| # Extract all models sequentially |
| python extract_models.py --all --skip_if_cached |
| |
| # Migrate existing Qwen2.5 cache to v4 format |
| python extract_models.py --model qwen25 --migrate_from data/experiments/v3/qwen25/cached_qwen25.npz |
| """ |
|
|
| import argparse |
| import json |
| import logging |
| import os |
| import sys |
| import time |
| import warnings |
| from datetime import datetime |
| from pathlib import Path |
|
|
| import numpy as np |
| import torch |
| import yaml |
|
|
| warnings.filterwarnings("ignore") |
| logging.basicConfig( |
| level=logging.INFO, |
| format="%(asctime)s [%(levelname)s] %(message)s", |
| handlers=[logging.StreamHandler()]) |
| logger = logging.getLogger(__name__) |
|
|
|
|
| |
| |
| |
|
|
| def load_config(config_path="models.yaml"): |
| with open(config_path) as f: |
| cfg = yaml.safe_load(f) |
| return cfg |
|
|
|
|
| def get_model_cfg(cfg, model_key): |
| if model_key not in cfg["models"]: |
| raise ValueError(f"Unknown model '{model_key}'. " |
| f"Available: {list(cfg['models'].keys())}") |
| mcfg = cfg["models"][model_key] |
| mcfg["key"] = model_key |
| return mcfg |
|
|
|
|
| |
| |
| |
|
|
| def load_dataset(dataset_path, model_key, drift_key): |
| """Load and prepare dataset with model-specific drift labels.""" |
| logger.info(f"Loading dataset: {dataset_path}") |
| with open(dataset_path) as f: |
| raw = json.load(f) |
| samples = raw.get("samples", raw) |
|
|
| |
| for s in samples: |
| val = s.get(drift_key, s.get("is_drifted", False)) |
| if isinstance(val, str): |
| s["is_drifted"] = val.lower() in ("true", "1", "yes") |
| else: |
| s["is_drifted"] = bool(val) |
|
|
| n_d = sum(1 for s in samples if s["is_drifted"]) |
| n_s = len(samples) - n_d |
| logger.info(f" Total={len(samples)} Drifted={n_d} Stable={n_s}") |
|
|
| if n_d == 0: |
| logger.error(f"No drifted samples for drift_key='{drift_key}'. Aborting.") |
| sys.exit(1) |
|
|
| return samples |
|
|
|
|
| |
| |
| |
|
|
| def load_model(model_name, device="auto"): |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
| logger.info(f"Loading model: {model_name}") |
| tok = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) |
| if tok.pad_token is None: |
| tok.pad_token = tok.eos_token |
|
|
| mdl = AutoModelForCausalLM.from_pretrained( |
| model_name, |
| device_map=device, |
| trust_remote_code=True, |
| output_hidden_states=True, |
| torch_dtype=torch.float16, |
| ) |
| mdl.eval() |
| n_layers = mdl.config.num_hidden_layers |
| h_dim = mdl.config.hidden_size |
| logger.info(f" Loaded: L={n_layers} D={h_dim}") |
| return mdl, tok |
|
|
|
|
| |
| |
| |
|
|
| def save_lm_head(model, out_dir, model_key): |
| """Save lm_head weight + final layernorm for logit lens analysis.""" |
| lm_path = Path(out_dir) / f"lm_head_{model_key}.npz" |
| if lm_path.exists(): |
| logger.info(f" lm_head already saved: {lm_path}") |
| return |
|
|
| lm_w = model.lm_head.weight.detach().float().cpu().numpy() |
|
|
| |
| ln = None |
| for attr in ["norm", "final_layernorm", "model.norm", |
| "model.final_layernorm", "ln_f"]: |
| parts = attr.split(".") |
| obj = model |
| try: |
| for p in parts: |
| obj = getattr(obj, p) |
| ln = obj |
| break |
| except AttributeError: |
| continue |
|
|
| if ln is not None and hasattr(ln, "weight"): |
| ln_w = ln.weight.detach().float().cpu().numpy() |
| ln_b = (ln.bias.detach().float().cpu().numpy() |
| if hasattr(ln, "bias") and ln.bias is not None |
| else np.zeros_like(ln_w)) |
| else: |
| logger.warning(" Could not find final layernorm β using identity") |
| ln_w = np.ones(model.config.hidden_size, dtype=np.float32) |
| ln_b = np.zeros(model.config.hidden_size, dtype=np.float32) |
|
|
| np.savez_compressed(str(lm_path), lm_head=lm_w, ln_weight=ln_w, ln_bias=ln_b) |
| logger.info(f" lm_head saved: {lm_path} (shape={lm_w.shape})") |
|
|
|
|
| |
| |
| |
|
|
| def tokenize_query(tokenizer, query, model_cfg, max_length=512): |
| """Tokenize query using proper chat template for each model.""" |
| is_instruct = model_cfg.get("is_instruct", True) |
|
|
| if is_instruct and hasattr(tokenizer, "apply_chat_template"): |
| try: |
| messages = [{"role": "user", "content": query}] |
| input_ids = tokenizer.apply_chat_template( |
| messages, tokenize=True, add_generation_prompt=True, |
| return_tensors="pt", max_length=max_length, truncation=True) |
| attention_mask = torch.ones_like(input_ids) |
| return {"input_ids": input_ids, "attention_mask": attention_mask} |
| except Exception: |
| pass |
|
|
| |
| return tokenizer(query, return_tensors="pt", |
| truncation=True, max_length=max_length) |
|
|
|
|
| |
| |
| |
|
|
| def extract_single(model, tokenizer, sample, model_cfg, max_length=512): |
| """Extract hidden states + logit info for a single sample.""" |
| query = sample.get("query", sample.get("question", "")) |
| answer = sample.get("expected_answer", sample.get("answer", "")) |
|
|
| inp = tokenize_query(tokenizer, query, model_cfg, max_length) |
| inp = {k: v.to(model.device) for k, v in inp.items()} |
|
|
| with torch.no_grad(): |
| out = model(**inp) |
|
|
| n_layers = model.config.num_hidden_layers |
|
|
| |
| hidden_states = {} |
| for l in range(n_layers): |
| h = out.hidden_states[l + 1][0, -1, :].float().cpu() |
| h = torch.clamp(h, -1e6, 1e6) |
| h[torch.isnan(h)] = 0.0 |
| hidden_states[l] = h.numpy() |
|
|
| |
| logits = out.logits[0, -1, :].float().cpu() |
| logits = torch.clamp(logits, -1e4, 1e4) |
| logits[torch.isnan(logits)] = 0.0 |
| probs = torch.softmax(logits, dim=-1) |
|
|
| top_prob = probs.max().item() |
| top_idx = probs.argmax().item() |
| top_token = tokenizer.decode([top_idx]).strip() |
| entropy = -(probs * torch.log(probs + 1e-12)).sum().item() |
|
|
| |
| ans_lower = answer.lower().strip() |
| tok_lower = top_token.lower().strip() |
| correct = (ans_lower in tok_lower or tok_lower in ans_lower or |
| any(w in tok_lower for w in ans_lower.split()[:3] if len(w) > 3)) |
|
|
| return { |
| "hidden_states": hidden_states, |
| "top_prob": top_prob, |
| "top_token": top_token, |
| "entropy": entropy, |
| "correct": correct, |
| } |
|
|
|
|
| def run_extraction(model, tokenizer, samples, model_cfg, out_dir, model_key, |
| max_length=512, checkpoint_every=500): |
| """Full extraction loop with incremental checkpointing.""" |
| out_dir = Path(out_dir) |
| n_layers = model.config.num_hidden_layers |
| t0 = time.time() |
|
|
| |
| save_lm_head(model, out_dir, model_key) |
|
|
| results = [] |
| for idx, s in enumerate(samples): |
| try: |
| ext = extract_single(model, tokenizer, s, model_cfg, max_length) |
| except Exception as e: |
| logger.error(f" Sample {idx} error: {e}") |
| continue |
|
|
| result = { |
| "idx": idx, |
| "sample_id": s.get("sample_id", f"s_{idx}"), |
| "query": s.get("query", ""), |
| "expected_answer": s.get("expected_answer", ""), |
| "is_drifted": s["is_drifted"], |
| "relation": s.get("relation", "unknown"), |
| "category": s.get("category", "unknown"), |
| "entity": s.get("entity", ""), |
| "knowledge_type": s.get("knowledge_type", ""), |
| "drift_date": s.get("drift_date", ""), |
| "year": s.get("year", ""), |
| "dataset_source": s.get("dataset_source", ""), |
| "hidden_states": ext["hidden_states"], |
| "top_prob": ext["top_prob"], |
| "top_token": ext["top_token"], |
| "entropy": ext["entropy"], |
| "correct": ext["correct"], |
| } |
| results.append(result) |
|
|
| if (idx + 1) % 100 == 0: |
| elapsed = time.time() - t0 |
| rate = (idx + 1) / elapsed |
| eta = (len(samples) - idx - 1) / rate / 60 |
| logger.info(f" {idx+1}/{len(samples)} " |
| f"({rate:.1f} samp/s, ETA {eta:.0f}m)") |
|
|
| if (idx + 1) % checkpoint_every == 0: |
| ckpt = out_dir / f"checkpoint_{model_key}_{idx+1}.npz" |
| np.savez_compressed(str(ckpt), |
| results=np.array(results, dtype=object)) |
| logger.info(f" Checkpoint: {ckpt}") |
|
|
| |
| cache_path = out_dir / f"cached_{model_key}.npz" |
| logger.info(f"Saving final cache ({len(results)} samples)...") |
| np.savez_compressed(str(cache_path), |
| results=np.array(results, dtype=object)) |
| elapsed = time.time() - t0 |
| logger.info(f"Done: {cache_path} ({elapsed/60:.1f}m)") |
|
|
| |
| for ckpt in out_dir.glob(f"checkpoint_{model_key}_*.npz"): |
| ckpt.unlink() |
| logger.info(f" Removed checkpoint: {ckpt.name}") |
|
|
| |
| n_correct = sum(1 for r in results if r["correct"]) |
| n_drifted = sum(1 for r in results if r["is_drifted"]) |
| logger.info(f"\n Summary for {model_key}:") |
| logger.info(f" Samples: {len(results)}") |
| logger.info(f" Drifted: {n_drifted}") |
| logger.info(f" Stable: {len(results) - n_drifted}") |
| logger.info(f" Correct: {n_correct} ({n_correct/len(results):.1%})") |
| logger.info(f" Layers: {n_layers}") |
| logger.info(f" H-dim: {model.config.hidden_size}") |
|
|
| |
| del model |
| if torch.cuda.is_available(): |
| torch.cuda.empty_cache() |
|
|
| return results |
|
|
|
|
| |
| |
| |
|
|
| def migrate_cache(src_path, dst_dir, model_key, dataset_path, drift_key): |
| """ |
| Migrate an existing cache to v4 format. |
| Adds missing fields (correct, drift_date, entity, etc.) by joining |
| from the unified dataset. |
| """ |
| logger.info(f"Migrating cache: {src_path} -> v4 format") |
|
|
| |
| d = np.load(src_path, allow_pickle=True) |
| results = d["results"].tolist() |
| logger.info(f" Loaded {len(results)} cached samples") |
| logger.info(f" Fields: {list(results[0].keys())}") |
|
|
| |
| with open(dataset_path) as f: |
| raw = json.load(f) |
| samples = raw.get("samples", raw) |
| lookup = {s.get("query", ""): s for s in samples} |
| logger.info(f" Dataset: {len(samples)} samples, {len(lookup)} unique queries") |
|
|
| |
| required = ["correct", "is_drifted", "relation", "category", "entity", |
| "knowledge_type", "drift_date", "year", "dataset_source", |
| "sample_id", "expected_answer"] |
|
|
| enriched = 0 |
| for r in results: |
| |
| if "correct" not in r: |
| r["correct"] = r.get("top_answer_matches", False) |
|
|
| |
| src = lookup.get(r.get("query", "")) |
| if src is not None: |
| |
| val = src.get(drift_key, src.get("is_drifted", False)) |
| if isinstance(val, str): |
| r["is_drifted"] = val.lower() in ("true", "1", "yes") |
| else: |
| r["is_drifted"] = bool(val) |
|
|
| |
| for field in required: |
| if r.get(field) in (None, "", "None") and field in src: |
| r[field] = src[field] |
| enriched += 1 |
|
|
| logger.info(f" Enriched {enriched} field values") |
|
|
| |
| has_correct = sum(1 for r in results if "correct" in r) |
| has_drift_date = sum(1 for r in results |
| if r.get("drift_date") not in (None, "", "None")) |
| n_drifted = sum(1 for r in results if r.get("is_drifted")) |
| n_correct = sum(1 for r in results if r.get("correct")) |
|
|
| logger.info(f" After migration:") |
| logger.info(f" has_correct: {has_correct}/{len(results)}") |
| logger.info(f" has_drift_date: {has_drift_date}/{len(results)}") |
| logger.info(f" n_drifted: {n_drifted}") |
| logger.info(f" n_correct: {n_correct}") |
|
|
| |
| dst_dir = Path(dst_dir) |
| dst_dir.mkdir(parents=True, exist_ok=True) |
| dst_path = dst_dir / f"cached_{model_key}.npz" |
| logger.info(f" Saving to {dst_path}...") |
| np.savez_compressed(str(dst_path), |
| results=np.array(results, dtype=object)) |
| logger.info(f" Done.") |
| return results |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| p = argparse.ArgumentParser( |
| description="Extract hidden states from LLMs for drift detection", |
| formatter_class=argparse.ArgumentDefaultsHelpFormatter) |
| p.add_argument("--model", default="qwen25", |
| help="Model key from models.yaml") |
| p.add_argument("--config", default="models.yaml", |
| help="Path to models.yaml config") |
| p.add_argument("--dataset", default=None, |
| help="Override dataset path from config") |
| p.add_argument("--output_dir", default=None, |
| help="Override output dir from config") |
| p.add_argument("--device", default="auto", |
| help="Device for model loading") |
| p.add_argument("--skip_if_cached", action="store_true", |
| help="Skip extraction if cache already exists") |
| p.add_argument("--migrate_from", default=None, |
| help="Migrate existing cache to v4 format") |
| p.add_argument("--all", action="store_true", |
| help="Extract all models sequentially") |
| args = p.parse_args() |
|
|
| cfg = load_config(args.config) |
| defaults = cfg.get("defaults", {}) |
| dataset_path = args.dataset or defaults.get("dataset", |
| "data/knowledge_drift_unified_tier1.json") |
| output_base = args.output_dir or defaults.get("output_dir", |
| "data/experiments/v4") |
|
|
| models_to_run = (list(cfg["models"].keys()) if args.all |
| else [args.model]) |
|
|
| for model_key in models_to_run: |
| mcfg = get_model_cfg(cfg, model_key) |
| drift_key = mcfg["drift_key"] |
| model_out = Path(output_base) / model_key |
| model_out.mkdir(parents=True, exist_ok=True) |
|
|
| cache_path = model_out / f"cached_{model_key}.npz" |
|
|
| |
| if args.migrate_from and model_key == args.model: |
| migrate_cache(args.migrate_from, str(model_out), |
| model_key, dataset_path, drift_key) |
| continue |
|
|
| |
| if args.skip_if_cached and cache_path.exists(): |
| logger.info(f"[{model_key}] Cache exists: {cache_path} β skipping") |
| continue |
|
|
| |
| samples = load_dataset(dataset_path, model_key, drift_key) |
|
|
| |
| logger.info(f"\n{'='*60}") |
| logger.info(f" Extracting: {model_key} ({mcfg['name']})") |
| logger.info(f"{'='*60}") |
|
|
| mdl, tok = load_model(mcfg["name"], args.device) |
|
|
| max_length = defaults.get("max_length", 512) |
| run_extraction(mdl, tok, samples, mcfg, str(model_out), |
| model_key, max_length=max_length) |
|
|
| logger.info("\nAll extractions complete.") |
|
|
|
|
| if __name__ == "__main__": |
| main() |