diff --git a/Shrink/shrink.py b/Shrink/shrink.py new file mode 100644 index 0000000000..53411dbf2e --- /dev/null +++ b/Shrink/shrink.py @@ -0,0 +1,133 @@ +import ast +import subprocess +import lzma +import base64 +import os +import sys + +class DeadCodeRemover(ast.NodeTransformer): + def visit_If(self, node): + source = ast.unparse(node.test) + + # Dead config branches based on defaults — these conditions are False + # with default config, so their bodies never execute during competition. + # IMPORTANT: Only match the affirmative test, NOT negations like + # "not args.load_snapshot" which guards the training loop (always True). + dead_false_tests = [ + "args.load_snapshot", # snapshot restore (default="" → falsy) + "args.snapshot_post_hessian", # snapshot save (default=False) + ] + + for dt in dead_false_tests: + if source == dt: + if node.orelse: + # Cannot safely remove — else/elif branch must be preserved. + # Skip removal and keep the full if/else structure. + break + return None + + self.generic_visit(node) + return node + +def shrink_pipeline(input_file, output_file): + print(f"[*] Starting shrinking pipeline for: {input_file}") + + if not os.path.exists(input_file): + print(f"[!] Error: Input file '{input_file}' not found.") + return + + # 1. Read Original Source + with open(input_file, 'r', encoding='utf-8') as f: + source = f.read() + print(f"[*] Original size: {len(source)} bytes") + + # 2. Prune AST (Dead Code Elimination) + print(f"[*] Pruning dead code and evaluation logic from AST...") + tree = ast.parse(source) + transformer = DeadCodeRemover() + tree = transformer.visit(tree) + ast.fix_missing_locations(tree) + + pruned_source = ast.unparse(tree) + temp_pruned_file = input_file + ".pruned.tmp.py" + temp_minified_file = input_file + ".min.tmp.py" + + def _cleanup_temps(): + for f in [temp_pruned_file, temp_minified_file]: + if os.path.exists(f): + os.remove(f) + + with open(temp_pruned_file, 'w', encoding='utf-8') as f: + f.write(pruned_source) + + # 3. Minify using pyminify + print(f"[*] Running pyminify to minimize identifiers and strip whitespace/comments/hints...") + try: + result = subprocess.run([ + "uvx", "--from", "python-minifier", "pyminify", temp_pruned_file, + "--output", temp_minified_file, + "--remove-literal-statements", + "--remove-asserts", + "--remove-debug", + "--remove-class-attribute-annotations" + ], check=True, capture_output=True, text=True) + except FileNotFoundError: + print(f"[!] 'uvx' not found on PATH. Install uv or run: pip install python-minifier") + _cleanup_temps() + return + except subprocess.CalledProcessError as e: + print(f"[!] PyMinify failed (exit {e.returncode}):") + if e.stderr: + print(e.stderr) + _cleanup_temps() + return + + try: + with open(temp_minified_file, 'rb') as f: + minified_bytes = f.read() + print(f"[*] Minified size: {len(minified_bytes)} bytes") + + # 4. LZMA + Base85 Self-Extracting Compression + print(f"[*] Compressing into LZMA Base85 executable wrap...") + compressed = lzma.compress(minified_bytes, format=lzma.FORMAT_RAW, filters=[{"id": lzma.FILTER_LZMA2, "preset": 9 | lzma.PRESET_EXTREME}]) + b85_encoded = base64.b85encode(compressed).decode('ascii') + + # Chunk the b85 string to avoid overly long single lines + chunk_size = 100 + chunks = [b85_encoded[i:i+chunk_size] for i in range(0, len(b85_encoded), chunk_size)] + formatted_b85 = '"\n"'.join(chunks) + + header = f"""import lzma as L,base64 as B\nexec(L.decompress(B.b85decode(("{formatted_b85}")),format=L.FORMAT_RAW,filters=[{{"id":L.FILTER_LZMA2}}]))\n""" + + with open(output_file, 'w', encoding='ascii') as f: + f.write(header) + + final_size = os.path.getsize(output_file) + print(f"[*] Packed output size: {final_size} bytes") + print(f"[*] Total Reduction: {((len(source) - final_size) / len(source) * 100):.1f}%") + print(f"[*] Success! Optimized submission saved to: {output_file}") + finally: + # 5. Clean up temporary files (always, even on error) + _cleanup_temps() + +if __name__ == '__main__': + if len(sys.argv) == 3: + # Direct mode: shrink.py + shrink_pipeline(sys.argv[1], sys.argv[2]) + elif len(sys.argv) == 1: + # Legacy rename-and-shrink mode (first-time setup only) + human_file = "train_gpt_human.py" + output_file = "train_gpt.py" + if not os.path.exists(output_file): + print(f"[!] Error: '{output_file}' not found. Use: python shrink.py ") + sys.exit(1) + if os.path.exists(human_file): + print(f"[!] Error: '{human_file}' already exists. Use: python shrink.py {human_file} {output_file}") + sys.exit(1) + os.rename(output_file, human_file) + print(f"[*] Renamed '{output_file}' to '{human_file}'") + shrink_pipeline(human_file, output_file) + else: + print(f"Usage: python shrink.py ") + print(f" python shrink.py (legacy: rename train_gpt.py -> train_gpt_human.py, then shrink)") + sys.exit(1) diff --git a/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/README.md b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/README.md new file mode 100644 index 0000000000..0e25818dbf --- /dev/null +++ b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/README.md @@ -0,0 +1,191 @@ +# Turbo-Muon + EngramLite + Parameter Banking + GPTQ Mixed-Precision + +**val_bpb: 1.1091** (3-seed mean, std 0.0005) | **~15.3 MB** | 8xH100 SXM + +## Results (8xH100 80GB SXM) + +| Seed | step_avg | steps | val_bpb (SW) | val_bpb (full) | Artifact bytes | +|------|----------|-------|-------------|----------------|----------------| +| 42 | 93.26ms | 6284 | 1.1086 | 1.1324 | 15,992,528 | +| 1337 | 93.11ms | 6295 | 1.1090 | 1.1328 | 15,993,413 | +| 2025 | 93.11ms | 6294 | 1.1096 | 1.1335 | 15,993,904 | +| **Mean** | **93.16ms** | **6291** | **1.1091** | **1.1329** | | + +## Summary + +An 11-layer GPT language model combining seven key innovations over the PR #609 baseline, targeting the 16MB artifact budget at MLP 3.5x width. Development-run benchmark: **1.1119 val_bpb (sliding window)** on 1xH100. + +## Key Innovations + +### Turbo-Muon Optimizer + +A variant of the Muon optimizer with three enhancements that reduce Newton-Schulz iterations from 5 to 4: + +- **AOL Preconditioning** -- Gershgorin-based diagonal scaling contracts the singular value range before Newton-Schulz iteration, allowing the first NS step to be skipped. +- **Polar Express Coefficients** -- Optimal degree-5 polynomial coefficients from Amsel et al. (arXiv:2505.16932), applied per-iteration rather than fixed. +- **Post-NS row_col Normalization** -- After orthogonalization, rows then columns are normalized. This consistently outperforms row-only or no normalization. + +### EngramLite Hash Embeddings + +Multi-head prime-based hash embedding that captures bigram and trigram statistics without explicit tokenizer changes: +- 2 heads x 2 orders (bigram + trigram) with 8192 hash buckets +- Projects to model_dim through a learned sigmoid gate +- Adds character-level context at minimal parameter cost + +### Parameter Banking + +All per-layer linear weights stored in contiguous 3D tensors (`qo_bank`, `kv_bank`, `mlp_up_bank`, `mlp_down_bank`). This enables batched Newton-Schulz orthogonalization via `torch.bmm`, dramatically reducing Muon optimizer overhead compared to per-layer iteration. + +### U-Net Skip Connections + +Encoder/decoder structure with learned sigmoid-gated skip connections. Gates start at `sigmoid(0) = 0.5` and learn per-dimension blending, preventing gradient shortcutting at initialization. + +### ValueEmbedding + +Reinjects token identity into attention values at deep layers (9, 10). Projects vocabulary embeddings to kv_dim with per-layer learned scaling, helping the model maintain token-level information through deep attention stacks. + +### SmearGate + +`F.pad`-based causal shift blending each token with its predecessor, providing free unigram context at zero attention cost. + +### XSA (Cross-Sequence Attention) — All Layers + +Efficient XSA applied to all 11 layers (`XSA_LAST_N=11`). Subtracts the self-value projection from attention output via GQA-aware reshape (no `repeat_interleave`), encouraging the model to attend to context rather than the current token's own representation. + +### ASQU v3 Per-Layer Activation Slopes + +Fixed per-layer LeakyReLU negative slopes discovered through 3 rounds of Adaptive Slope parameter tuning: +`[-0.014, 0.131, 0.225, 0.265, 0.310, 0.354, 0.421, 0.429, 0.417, 0.358, 0.468]` + +Layer 0 uses near-ReLU² (slope ≈ 0) for aggressive feature selection, while deeper layers use progressively larger negative slopes (up to 0.468) to allow smoother gradient flow. Hard-coded converged endpoints eliminate learnable slope parameters. + +### Mimetic V-O Initialization + +Output projections initialized as `O_h = -alpha * V_h` per head (alpha=0.05), creating a small residual-like identity at init for improved early training stability. + +### Additional Architecture Details + +- **Partial RoPE** — Rotary position embeddings applied to only 16 of 64 head dimensions (`ROPE_DIMS=16`). Remaining dimensions are position-free, giving the model both positional and position-invariant feature channels. +- **LN Scale** — Layer norm outputs scaled by `1/sqrt(layer_idx + 1)`, stabilizing deeper layers by reducing activation magnitudes proportional to depth. +- **Logit Softcap** — `softcap * tanh(logits / softcap)` with softcap=30.0 prevents extreme logit values during training. +- **GQA** — Grouped Query Attention with 8 query heads and 4 KV heads (2:1 grouping), reducing KV cache and parameter count. +- **Tied Embeddings** — Input and output embeddings share weights, saving parameters. +- **QK Gain** — Per-head learnable query scaling initialized to 1.5, allowing the model to tune attention sharpness per head. + +### GPTQ Mixed-Precision Quantization + +Compression pipeline with Hessian collection performed within the 600s training budget (`gptq_reserve_ms=9000` deducted from training wallclock before training begins): + +1. **Hessian collection** — 64 calibration batches run through a non-banked model copy to collect per-layer `H = X^T X` approximations, all-reduced across ranks. This runs within the reserved 9s carved from the training budget. +2. **Dynamic mixed-precision bit allocation** — Base quantization is **int5** for all weight groups. Hessian trace sensitivity ranks tensor groups (by layer × attn/mlp), then a greedy allocator selectively **promotes the most sensitive groups to int6 or int7** until the estimated compressed artifact size approaches the 16MB target minus 2% pruning headroom. +3. **GPTQ quantization** — Hessian-aware Cholesky error compensation for 2D weight matrices. Columns permuted by descending Hessian diagonal for optimal error propagation. Falls back to percentile search on Cholesky failure. +4. **Late QAT (soft-round)** — Quantization-aware training activated when LR scale drops below 15%, with soft-round sigmoid alpha ramping 1→16 over the QAT phase. Provides real gradient signal through quantization grid points. +5. **Selective pruning** — Post-GPTQ, values with `|q| ≤ 2` ranked by reconstruction error impact. Binary search with fast (zlib-1) / real (brotli-11) calibration finds the minimal prune count to fit 16MB. +6. **Brotli + byte-shuffle compression** — Byte-shuffle preprocessing reorders tensor bytes by significance position before brotli compression (quality=11) for optimal entropy coding. + +### Code Shrinking + +The submission `train_gpt.py` is a compressed self-extracting wrapper generated by +`Shrink/shrink.py`. The pipeline: AST dead-code removal → pyminify (strip comments, +whitespace, type hints, rename identifiers) → LZMA + base85 self-extracting `exec()`. + +- **Human-readable source**: `train_gpt_human.py` (123 KB) +- **Shrunk submission**: `train_gpt.py` (24 KB) +- **Code budget freed**: ~99 KB → more artifact space for model weights → less pruning + +### Test-Time Training (TTT) + +Optional legal score-first TTT on the validation set, activated via `TTT_ENABLED=1`. Every token is scored BEFORE any gradient update that could use it, ensuring no data leakage: + +1. **Phase 1 (Score)** — Sliding window evaluation under `torch.no_grad()`, scoring all windows whose first scored token falls within the current chunk. Results are accumulated into global byte/token counters. +2. **Phase 2 (Train)** — SGD (or AdamW) training on the chunk just scored, with configurable epochs, gradient clipping, and learning rate. +3. **Last chunk** — Scored but never trained on (hard guarantee: no weight mutation after final scoring). + +Key features: +- **Polyak EMA** — Maintains exponential moving average of weights (decay=0.998). Training weights used during gradient updates, Polyak weights swapped in for scoring. Provides stability without sacrificing adaptation speed. +- **Entropy-adaptive epochs** — Per-chunk epoch count adjusted by chunk entropy: high-entropy (hard) chunks get +1 epoch, low-entropy (easy) chunks get -1. +- **Budget guard** — Monitors elapsed wall-clock time and stops TTT if the 600s eval budget would be exceeded. +- **Multi-GPU support** — All-reduces loss/token/byte counts across ranks. +- **torch.compile compatible** — Optionally compiles the model for TTT forward passes (`TTT_COMPILE=1`). + +## Architecture + +| Component | Setting | +|-----------|---------| +| Layers | 11 (512d, 8H, 4KV GQA) | +| MLP | 3.5x with LeakyReLU(ASQU v3 per-layer)^2 | +| XSA | All 11 layers | +| EngramLite | 2 heads x 2 orders, 8192 buckets | +| Skip connections | U-Net sigmoid-gated | +| RoPE | Partial (16 of 64 dims) | +| LN Scale | 1/sqrt(layer+1) | +| Logit Softcap | 30.0 | +| ValueEmbedding | Layers 9-10 | +| SmearGate | Causal shift blending | +| Embeddings | Tied input/output | +| Vocab | 1024 BPE, seq 2048 | + +### Optimizer + +| Param group | LR | Notes | +|---|---|---| +| Bank weights (Muon) | 0.025 | momentum=0.99, WD=0.04 | +| Embeddings (Adam) | 0.6 | betas=(0.7, 0.95), WD=0.04 | +| Head/tied embed (Adam) | 0.035 | betas=(0.7, 0.95) | +| Scalars (Adam) | 0.025 | betas=(0.9, 0.95) | + +### Weight Averaging + +- **SWA** -- Float32 accumulation every 50 steps after 20% of training +- **EMA** -- Decay=0.997, lerp_ single-kernel updates + +## Dependencies + +Requires `brotli>=1.1` (included in `requirements.txt`). The code gracefully falls back to lzma > zlib if brotli is missing, but brotli is needed to maximize model capacity within the 16MB budget. + +## Run Command + +```bash +# 8xH100 (competition) +SEED=42 torchrun --standalone --nproc_per_node=8 train_gpt.py + +# Single GPU (development) +torchrun --standalone --nproc_per_node=1 train_gpt.py +``` + +All hyperparameters are configured via environment variables. Defaults match the competition-optimized configuration. Key overrides: + +```bash +# Override examples +NUM_LAYERS=11 MODEL_DIM=512 MLP_MULT=3.5 \ +MUON_MOMENTUM=0.99 MUON_WD=0.04 MUON_POST_NORM=row_col \ +SWA_ENABLED=1 EMA_ENABLED=1 MIXED_PRECISION=1 LATE_QAT=1 \ +torchrun --standalone --nproc_per_node=8 train_gpt.py + +# With TTT enabled (eval phase applies test-time training) +TTT_ENABLED=1 TTT_LR=0.005 TTT_EPOCHS=5 TTT_OPTIMIZER=sgd \ +torchrun --standalone --nproc_per_node=8 train_gpt.py +``` + +### Snapshot Workflow + +Train once, iterate on compression without retraining: + +```bash +# Step 1: Train and save snapshot +SNAPSHOT_POST_HESSIAN=1 torchrun --standalone --nproc_per_node=8 train_gpt.py + +# Step 2: Load snapshot, re-run compression + eval only +LOAD_SNAPSHOT=snapshot_post_hessian.pt torchrun --standalone --nproc_per_node=1 train_gpt.py +``` + +## Credits + +- **Base recipe**: [PR #609](https://github.com/openai/parameter-golf/pull/609) (1.1154 bpb baseline) +- **Muon optimizer**: Inspired by [PR #399](https://github.com/openai/parameter-golf/pull/399) parameter banking approach +- **LeakyReLU^2**: [PR #493](https://github.com/openai/parameter-golf/pull/493), [PR #518](https://github.com/openai/parameter-golf/pull/518) +- **XSA**: [PR #265](https://github.com/openai/parameter-golf/pull/265), [PR #287](https://github.com/openai/parameter-golf/pull/287) +- **SmearGate + BigramHash**: [PR #198](https://github.com/openai/parameter-golf/pull/198) and related submissions +- **Polar Express coefficients**: Amsel et al. (arXiv:2505.16932) +- **GPTQ approach**: [PR #634](https://github.com/openai/parameter-golf/pull/634) Hessian-aware quantization + diff --git a/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/requirements.txt b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/requirements.txt new file mode 100644 index 0000000000..c114d41491 --- /dev/null +++ b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/requirements.txt @@ -0,0 +1,12 @@ +# Extra dependencies beyond the standard Parameter Golf competition environment. +# The competition VM already provides: torch, numpy, sentencepiece, zlib (stdlib). +# +# Minimum versions: +# Python >= 3.12 +# torch >= 2.11 (torch.compile fullgraph improvements, CUDA 13.0 support) +# +# brotli: ~1-5% better compression than lzma/zstd on int6-quantized weights. +# The code falls back gracefully (lzma → zlib) if missing, but brotli is +# needed to hit the 16MB artifact cap with maximum model capacity. +torch>=2.11 +brotli>=1.1 diff --git a/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/submission.json b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/submission.json new file mode 100644 index 0000000000..f5e8b9791a --- /dev/null +++ b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/submission.json @@ -0,0 +1,9 @@ +{ + "name": "Turbo-Muon + EngramLite + Parameter Banking + GPTQ Mixed-Precision", + "val_bpb": "1.1091", + "bytes_total": "15993904", + "blurb": "11L/512d GPT with Turbo-Muon (AOL+Polar Express+row_col norm), EngramLite bigram+trigram hash embeddings, U-Net skip connections, ValueEmbedding, SmearGate, SWA+EMA, GPTQ mixed-precision int6/int7 with Hessian sensitivity, brotli+byte-shuffle compression. Built on PR #609.", + "author": "mikeapedia", + "github_id": "mikeapedia", + "date": "2026-03-28" +} diff --git a/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_gpt.py b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_gpt.py new file mode 100644 index 0000000000..23747e1050 --- /dev/null +++ b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_gpt.py @@ -0,0 +1,267 @@ +import lzma as L,base64 as B +exec(L.decompress(B.b85decode((";U1Av(_H{F9Eu`uszf$Krci>25I-HuUNjF`N?9VI&1P%41Wt3M0HchP&FnsB%m$1T8U>w|=0fMLAryjUu;-MV7!%Vp1WNakXP!EG" +"y>$Ot^}1Rru9S@(GNqdib|M?4Z_ZW}=M8Np=4MYSx6gz;FHQ*&v$88yU1ULZpfLjvZQJikXYmrUjb|MjT!b?$y(e8N4SFUkcM2ZR" +"2iK`v_Y2Qqc@gH!;pRMX)D81U^nD2~z&e0bn&x_ys=XkFCzcs^S^1m07)8>(rR|k4kDs~4sfRKOUA#^tt#;M%0BY>=?rl>&lGo8l" +"Klcn4N7r!(IF}eUKe!N2{G+uEy7~Ra0Xk2r7#9W#oXb~A@YW=VtGg#Z|7@VLf^h^s*Ozi%tE;F>vjZy7kXUEBK!4*HVYn~yT=1?9" +"zo{Y;rs2AG2wP9tvg4RJY$u_rOMl1pUq&DJ{Q%=s3vLG-BnZdWn^Lq$;~&RnQ(l3>Fw)8}VPJ" +"ilyc|^qfXUR?>acTz7iY@KnPVAA$K7B}tJ-@o8WtigVPZZ})`Jv)eqEmP`odCCxQ!kn!8rwc`1pD^xKYXe#X?D9IwBFo*aJjwxqe" +"+g?m8e};Z{7xjlxwKWCl)a*(LrrKDXmM6uh7OlV%f|?11>)zw7%sC&%h{3PIM(O^9F3t@H{}j=Mupn}E)TC&>g%Sjh5BHrM6Cv%i" +"86Na)k58H=UKCYav-O#jz@WW4QDS!%$<%JqGcR$F" +"%UdO!GVMRhe=UMwjj%oEifSonj^>8)Gdj&o_gMUT_l^4%?wo%tqV@LHMZVwse;FW>YvRWY#JvcMe}&*8di{m|6$bcRT)*gcwuZVl" +"H;t{`W3Rg??+*)(B6Sx*mKwgs$O^#08pd<#p2`**h7Q-a#GSiecG7_$^pWdpvwalH*-~wq)}xN%pr|0aEKgR-;W8Kos`lqbWDT7h" +"!d(sU%42~gm_Z{l1?eM_DK}?Q^Z;EjtpV^iTS~#(%8b~s1m>+`oj938yh_P1|B0JwOM-q?gDz" +"W{6hLwkE6UN3p-e&|>@#vI6%dkM7^DjE|KaNe}ZtxQkdMyU{a;kC{v>!Xb6" +"1-eD3N5jt0z65F1?X19BNj@`EdsES1VDA6<=!%qgKr`jai9tKs" +"Pp8OE*0cO$>r3?)AP=O#ClJrgkD1Idru`xR3=sYEfhqaLT$BKNuUZ>bnEd(PN98$Uch2=l!IZt!Ii3Pfq8u1TJubwpZQ?zX" +"f}8KN(6(m}i@eH+g+btnVuJDIROtH|?gy6n@JvLY=?vY9;oQH3QEBB&;FIDeFRbCoe`$;^k&faZ@K>VV#M8I=VeJ#wyJOr-sBp|!" +"{bJeX0q7bDSwvvRE5+eZlTJQ7ej__Ij&QxQn_{+f18V|7?1*r})Qnw4kP0{yATfq%%GnIB6aoME?J(b_guZ^;apgcR;I)q9iSf|8" +"1{qWzzwxsj)ALISM$8?`;fwe=-Zpb5;S~P{3PbCu(t{0r|K>7SLtN)KN4=Z$7e~u=`|aHtQCDuR`Yg`M+mV%|KJ#deKDpPoTI_3Q" +"@knZn@+tXds>6O!1j1YKz==!|8??1GTN;(FEF=(DXZ7%363$%P%Vdb*rMGiNlrkF(-i#nM=l" +"A^gv)EwfY)PIM8^vlKe49vmv8@_C5~eO-jRQs#5yIa@M2aOVGQRo*?;L*Z(L&RixZ1J~7|FlW73cUy5-Z$mz@?lI<&(J2wf5~x@d" +"jRmf}$ivI9mRcHDXfK>BfGX>u%S}}Mi=kf2+PDikd>9PQp=yOyV^~-Yv_+iv(" +"J>vrl%dA3h3yYSPW%s<_TH^y-+Gu*A|9k#kIh8C+xs;q?rx>rG}kvSKm`Q^a-2Y#J5>nMudm0P<3f1" +"6v`=FH^xZEC$AJ?LY8(cwcGGlC6^Y%VSou3%%1`hay>kFDVTJX(#>k@m8-&>+a1rz7)C*U1A(^0A~2pa&1!Gyl?2blKQ1Yu%6-?d" +"5h@A*Hjj6svw2~1Azh%nynZ<;EKO@rqv=OxiN)wgQX+M37;W>uWZAx|D-3(|dj_?<7;R#LD%9BYevRwabGa9B^}0dU-{_#Jx_GBO" +"M;zaeN}2L0gnBUA^3#ee2m42Wf7#_Iq~ET8|5k&ffvG^^h*}-8m*1Puk{sdms4xRW8$W!;oR`KiTL2BgQ{f7V9t%MWEaJv@bw_J?" +"#@iHPh4(u*uL40b3`vQrMd;?BC^miQzc5bRIPY{<&xOBVcj)a$=*yeDL5XZ_OxriwsFLLrsE%6{7ica<7h" +"geD(A*;cxnufhS0`+W?bqRVBT)Z02wCeqQ7D8$6>{zBFyjI8-V`0#D*|KDi+`F-A{=DY_F+F*SiG)K??5%h-REmv!nH{Nu-#UQNc" +"=c00mWE(*CxAF8Cwj%J(vpk0^~w1uabs0G+n3qg+2&oDGrejlmfkk^y$6Lg}ip}KVI1))6w6~yh89l4p5uUAn49x%<6A|wow" +"rtTcA@=>`qy+Y#Pqd>*7X!{C{lCHE&0RP_tOcl;^1R}q~0X&xF6fUNyT(rin%W@%?3@>4J+^qhzh$E+%PKG^g`kYGvxStZrbHtAh" +"`cQnY!P|-dN#rZgW?-+N3Bv`Fw4ujE)HD5TYLOFvs~AmZgrVv88*#YYG3hEMqNVcUI@fFoAJcLq(7HkvU+=DNC?XGD_n_p#M!nnbr51=CLgad@Rjivr~)H%cS-sF{s@V%S_e!OH%rusQJL*b>%I-A5MxQwp}fY_IZV_fdLP!>_us" +"|DeR1;7KsK#1={BqsQ%whm51utzX?XM-LYzmqB&BU62gq3JVf2gHh47qF9yuYvt7w-wv>p0SN0wm|G@q7(|}%Ola3f>DJo5^jR>=" +"qTJ0q!oopZV&)2U?z8gxXjCssoC6|>wxlkYTg1~^mKA4**(H^T(m2!DiLaV`PV6RZJM(8GDB^)^)-5NiHlgWDRypzkV" +"TnU(2?>zMpFE#L6^%Id)M7@E5(ZmOkjYuxoWNg$gx+" +"&YFW9SRSHl;Ys^@JQpe_pLH_?Zv&@vWg&80Dk!J9im@%Flglgf_9LF=Poj%u7QNgoXFFi+>v#D||D;<=_)Zv9BiP5P2K(^AzV" +"v)F*3A0s`mPXYYq;q*y)O&n^kt7JWbc(_13Fl2xl47HyN4t+v9p$I@NuT@-|!wQF+6efYDK0Fvd-aw9nf$5?=N=;mYr3P@Xb{1CY" +"+uxv{VDnN^GZsDQ_S}VUAAVLk2CRJ91mqmA1)K(1acfd`);FsVXVU>Y0q7IS2SJQ!^L$g2=(a!4ksBE^@8z-UZyuAtT}47=#T%NO" +"-KMr7>iyo^2kskcJ5_7Bw~x4M4R3(1_g_a>o!ewPywzhtlWn~{e>Q{}X;{&cp>|O)Ql0VN93c5weHGiUm+@v3lr}g)ux0#1j&9^_" +"3WJEQKRRdpqutSj8lYCn1l)0+mEF;$75+|H!zYZMs>s11?>VO;pgZ|R(#Jrt<9bWZ_5J{D@1Yq6Fqrq(OP_}j{{&aZz$k1m*@s7s" +"xztWI?O{Uv5$w3CyvTp-@xh!Fht^3j6ck^RI1j(7hF#}>zd{ix;27uTm)J~N#kQkQIT+5NO)!wMcnp!eff+M3bmATO1=~FIfrL0b" +"{;CcwW_z&}1FN8R=Z;~_Hm?RuW`ro2JUm#Apu+g3<;-(6^cyAiTD4rG?A(@l3&DuZw_k{teka_1Ny`rr!+!UoB7K3sRF1!=uE4xW" +"D&Cfib?@+K9-AaKaH3!sOBx~yA-A$Fx3~tgANghLoQ$ysBqmSoR0j5O_@EeV+R!2ht2svO#lS5H!;MGEP+%6*~=qUrVPtCCF0r(NGt|vz)Da)6cVl+v=&xvkyMZ*D2RAZIltIoQkSvml)uMkDyr?nZu4vNQ;=Hai+Y%3?%|*WCZuATf12NSyKJw-smF75ZgZ_U" +"h-c*1{iEo+n0a8Je@~LRafuvw8KZ4x2R7W$#1rOobY&O7evKWSRYh^wtoc@&kSqmSzUf$iOG_c(LLvE1U&)l?5m5OxR4TF)T_qFp" +"X^FzoHA2M+_#cDm$Q_)*3F9q~TMyEeJ@IPnvA}?47N3q#mUdk10RGj_=XqKOe7Od?XxN70NYh?cD@~8TJ)E&OgTXa!l^(o$Z0%K~t`{j;n{*KGYs*X~UnXo8}UDacue" +"oa?)Vppuf8a;q?+>Zf;8mM%y;%5R#`+N>Pk%15Q1J8E)O*kh@PhU(Ou@r`Ns)}6}2q~(Pz&HV|cb0U>k6b|UwFZRjVnFW=uSElJs" +"6NAdLna=B14SozVnE-_%ZB$xj>1Hzv0ZRhhhKJL}eTyf?b|u7O25+qeb+eCzbLP*=y%vad%Qr0tT(9ppO=0Z|r?m<3?a7`=ddvT{" +"Hu9B!JGI(ycsD^9Jv`_qTlRlVR3TVhdQ)+e&IcCMZad(qG1WR6tp37r|4<0knqUSY" +"w;nIaK-jpT(T`%pl0e+tz}>|TJ8=nPIM?2y%F)%7w7(1NVV&Ph?0%2KKApt_OW!(wZ9bj=v3UjVB?oQMg>p{)zgQdXCPeW;)ecsllxjQB>2Ce%$wqu3sG`GtJnLy7J(~J`;`h(AT2R+vN>vkn?g%c%G)Ke69^VXRr$0ejxvY_&L<18`z" +"tTwH8KlQJYPS1QF1gLFJf)^%w>`?HgNiW9|ec!w_w?f3S5GJ-PVe;)$o+wih`Mhj@pi0`)g$b8bEMUc~$cm20x8kb@G#je`H*SAq" +"WTaWdb%W0jg}a(ke;V-9Nbufhjnd|_efaBOv*D-E^BJY=2_P)fzB*SQ*-^k%4L(7ZL3_s~ZcAlwN-4Zi71}84E6O0I7RnQfBIFL#" +"zB#-_KntNSwN}{~R0tgho1b_*M3TI@>{^l05a2%{w>jJ>UdY5FZZ{y{N1SwRlW4@;fHhZk(j}*7s$es?yU2kT#mi^xL0&cpk_AVO" +"o$)x=HwsAkPQyMBrkx%8vC23}@nCkFq)~uV=Gz%xBl>Yv{e#aGvR|MH;vdkAP`;$C7@I$^uAC6<1qx|9!f}m_F(d)kV~6~zS7Xzu" +"7mkX!AV>kPkef}L#WQ;_0&Y%jYlz1`&5`5v*-B(c%)n@|n7WUC#X}Qz5_yBNCxWwBa5HwXJ7BqxCwp4UKyx|j;z;C#zj}>=@L=&8" +"DcgeiA%KR@qjZ+<9qtrkHkdPJ&o6kyOO+>$4-U>H2hCRjx^`{J|jNL*#cIu-@OCd>S*Bwq!Uc?eJd(*;LH(" +"D?R$WppcR%LBy1jjapUoS9rAz`@NojgOmQKan^7qp~qd0=2-9_Nlp-=H*WP>6K$P8oyI%*o6)0Br}-Bm=bI$})|X3gO()B8Cfz#b" +"eq;oXq0Z)S^$N=Dv}eQLJEStX=" +"ow5P!-pZuBybc?KN^OO{zBfBMl*5T&#7kPjN*VwMp({jbu{hR>VnS;@^U%1;m-O7-?>hl1vj$|B*lu~3qwz2jGml)1>2`jD8vN35DtDWz51_i`NN4pT;dA!v-U!OGGUh(i;``M0gVs2$KTw%9DFEShjUwqTk=i8*x|)U!G--ik+I|dvk@Jb>v80I3oG(wL$_`=~aOcKy;1%acX" +"`hW%TCIOpng6o!^B3_I5pSCO0idG1N9GS@XLFdYNpIOVAkLQI)GD&Dw5lfmw!_E8{tl?c#jgy*#I02Z^>=CTHHyd{|K;_U5Ubx7j6LtMxtODj;^Q+>^" +"N9m;iuDbGBW-oCNMs9GS*_%rir4}Wb4i$c6FLyIhh}y0^o-AvZ^rdV9##G@!kG*(0NqF~<$Z`nb50oEh$6hmoM|T!ie2zQU7J;Ar" +"=|togAT%S%B{PB&B0XgSPAh8JtL^Fb6$z2&*5CC#1Q^^MgdEWcr0WcKu8+O@XTDyDMT2o1ydL!u@|0fOoSf%jNIYXysO2?rh)CNk9A}&L_v-|prjv%7AfPRf`okWRKu8KebY^T-" +"gHc*Zz}xA8mBryO3m*_cPtRWov~6CD&I?fxZqn>w?(gVHUgdjb8ac77KW+ErMllkWl^oRr^A2cWTNfxMd}+;II>p1=hHM28lW=Y0" +"c440h>zZbfPz}tQsZh=s!Z9Md*WGOe4ALPx8EROo&U)8F6P>HQwU;#6ROp5?TlmDUpMAO=Wj@i(zB&mD|2>I2_8;Q%ri>?|5lOvOaXZN`Zh9;CVxBaQ5pG$kISBw*=Op7u9A##@mcZvS<_kxnG|M`" +"mm5yNHXN`7mlH>w-Y!UVS_OQesbxF#*K)0Z=3w?ff)@se>{cgwm7s;EvdIgTgNE|)@wTa|P?Ck8cd$$T#7K8=EwL7)JeJy" +"b;iX%wr0_n+Mx<+ThgSbc={SYcGX(wr?lSvSb|DZd#f&JpSd+d79uA+!x%hgz)y%Z_*b3fJU40g7Aa{8SR6uJRhU$XHjU(C1TZ?y" +"G>iG}#!5dqj-7mjE_Hvq>u@%rDLVc_*zjeQpW=x34Me-~O)e{A9N#J%SQ=`d0)~p6WP&bHDwd)>!xi9^Z1mUup;^_4Sl`H1k*=_8RzecFPiH101LHv-kzTZB(;aAAT>U({8FNynsQu$ydBg^MMZ-4t" +"#a&l!4AJ~wYjaGsE`X_mgxjQT*d~&BS;F*b9=4+z=MY4c4Wd#-faTXNlA{Al`@|59_bjxAwdATm=4cSi<=fTzMU3Y;TgRWFz&Ere" +"5(rk0hcQ*jg5Hu<90q+u8FvBG#N&_?ej2gjoC)OlmKZ>~oHvLSUojY$BA~)9Vlx8|!Q-$%PIGs7N;!b2D<7ciWQt)vc_cX+!k6M9T7r*X&)k_Rpa+L4rpTeF{IM{>LzgY35VIRr$PgpCH&uQPQ1M{avq2ixC" +"1#i{L+%dW8E{8XQ3kxx#(4-8^xPJEq$3GN=!qYy@=KJPo<@jmvCMe;@TIsjOTvN1?" +"Lu^@o$Q)ZVumH%L5Pd&f=TzV#ib1D|mlyRfJy1tUm6E2YXa7ZkgsW{aUJ@k2b7VHu*d9Xi%p-__M90!B" +"^YD&Tcay6jeEYh#nDkT2YRf_rzE7Bn~uW(oB8V8mafDH!w`s=Zl#D~PGldA9{(kmWJqu8W_r?Ss1f{G>=" +"5DZH&D3h_jy0AaCS1+Zx_?oDIOt^G}M`;d7$Y)ia%>iB`b8-p?pNi)cb_9Ok`;5DX@})`jq+FoW)q2" +"k3{{x7PcYqRFyRmMaTE1l%$BMYru)cHhwrG4L3^ZHjSq8sX?bT`GRC{ck?O5j7pM6@lDtr4?jG%CN~@~JM{)EiFZWR1OcLe)J`T}" +"e*t+}ZJ2;#xv}pQLki^ln9LOWqM#8Qg" +"k2r8M_TVV_h*ORF#QsmmeS{M&fZ0)sWMOUVr}!Zp$cLny{G~1~e~#A#f#Rf8*NFX7n#Dinr@~E1O@HLgIe#V6Z`U}4sB65d$SwKA" +"&~6OjIM*@1<5t>%XQtsRFE8ZQ6zGo+UP@k(=r->D_0cE^+Oy_gw4;FPq^OX|)If6|mcpeVwpE0oY?^2^)OkjL@ISw(Md=eZ#qt9tj{Tf{&M3{hVAn=Oo-B&P|2" +"t;vM^X^r+=v{X+F-WH5~g}D*~>dU58uH%!s3dsly2gRQr{?DC_RQk;;JBw7&WSP|yw^UsF_%&mihSA>WzJ;NAnbWb3>ofNTC2May" +"!ax)k)4L=^1S4H0SyV6G=2{*acl1b*A2;(Qz^e%Bbc<-aE~Z)$dCCAWOgJT#f5-BNQpuy3HdrY5^#j-+*`Hr04sqj}d5T5SNIF=s" +"Q9wmyV4ppDD%hnglZEUk^z`^0Io!2w;#ZJ;0J+kGALgy9S&B9)EBEW116VyI;Cr|f*_=AgPIz4dfTEMY4Ezq5ta~agfkuiM|I`k*Erx_QD<9BbQdGk2bWM" +"f+YBXU5KHHRVWgxG==bipgH~tjP>$r%GPg!Baqe!gep;AiFFM)^};ln)6M3^SSL`Ss}l53>J~|6ZKP(}kDocpGvKAeL!Hb>HB-`g" +"QBR!M-RCWX?PDc*C^^P#TKJ-5EXDVjMd;>Zlaz2M&$}3bHe#{dLXVWo90Ufk|" +"*2haQRIucXipeHov--UmF^Di1`lbMJH+bA`;*B*drVeXdQZ0c(`!syyQbXl!1%6Twxwg&4kP^_nXTF`hNL1)J!IyH=s|U+y@S0hj" +"9I`Lk0fUOK{fzR0l(22|rfad#%6E7S^)_BmwhZKR{x=HAI@Ah;1YF*TF{Vl!UcBD!tIEMDryfiNYvdsE7@~3qAp5ftfWI94^^|FsHa$I*UL~dms-Y)" +"4rc9v7$_mTl**nwKOsw65+keJA4xWquvwMWhz&!EK1c>bz6LhGVbjVp|U84gFatCng8H?54aU" +"9ncaJkiQi;T)!JgM*RDe$gVf+=LA5$f0FYuX0dQ>I-RE5)2}8r$gX}g>fi1!OoTcnqYry65>}k(NYb%b_MRLGg2wYGEl%U&Uo6M}" +"GRYQ`L6GZ783Uq1@J1u&=pv-?2?B@AQUQ+B)8o6dVIu6W4*7=MVdyMpUb&Y%OKX6uAra#BH!Uq&V(<8OTGJaBhDJZR<0YfX+37hr" +"A>#`bVAI)4m$FqW`;W*vHRcAZG;x~4JkHDV)3U=S_sy_t4=YmzmsL9N_?!`yHa9~qraMx?!aJD6!MuvsuLWasB7lIfSN$L2WOD6*" +"Xt2q;mQnLAU;M%Cmw|5vg%_@*La$M|o5AZ6j9I$Zr~i>$mU;j3!NVXYif=B9E}^h^RFEh9{+*&yyAYaf1YK+ib|RS;uvDMlku2N9" +"f`_sVYw3D<6wWuTZ|d_k;-QSUq+S~dsTxGBgUX#i#(W6&AG#%zsAO?wF+Wtl=W-Ojzz" +"daHA)Wb?bua<^d$dT&)B>dQTUYNE7;^@+4s#okq30<3u_9kSu!)gijFRG8#|CT#M(eKhgig(Q2Ey+aP{KZO*6$*wiHtGA33E=sz{" +"bbWHu*uNwY=^DqIQK&)^2kpWBUw6R9IO>&XWDlzGzXm+<~)U-T6aV9)Z%lc8tDAvi%<>w+}!kOu&#eatygrQmi" +"0#)>r${4d@-M1QuNnBKq9!Kt?;$8Amb2dSU&r9NKhgfNvKpGmL=rN?d3pPhDHAm)*#$5p#wcguW7Z" +"b4PDgfu=a+J3?)=I9WgWYDLAT;$DdLU873R#Y-=My-I=bM`ZT~mz!@uTkmS=f2|>I;vNul54kAK^;6YH#>~Lh$h_U1_P3)sP?LTB" +"QEzs3wWcvI#*Nsa-xq~uSU@)kv_M~l=`HhkzV07^ZmqScyt&n#L?5=uqM-0pQp4TJTTr!Y6hF35$Wy?7a?A=mX<}~NhF?$dF)`I3" +"!hkPUW+$)>GBfU>u_+=((}_X9|7)(j5%;91c)d<-m?z&&3~djxw_8oG{27THYK_FJKh?Rfgo6~cn*2k-4b@+9HG2InW=D$1;4Hfm" +"ZfD4;L*;7)8q{mahe`wEgDwU{Q8Cu9LYeL{Eegu&N%s-(CkmUF$)vQ5JT09^>O&<4Rzpf`uYVv|#I6&Yv" +"(-@0#GoeiuUI<0dw7@%x8oE$5t6QdxgKr~6I0|>)w_G?IlW_FmlWfJ4N8E8@#|l}b>L+J}$>%5psuTOeGK=vP27sJQ%!-y3hYn~2" +"VtV4u|N`Xf(uEZ^tP`qrJ#gj=TH^lx(^p}PpYZodJ|GaFgki@&MMZ;z0kGo{-kzH1)W^79KJ_~WW=?OB&GY>H+`&l7yJgsLy" +"zOoT-#kV3NNo`epQjL{3ZNj(2+x=rO0F+^%m^4x;RX_HIA=b`ko64f{fo`tqvreU?6ORW*" +"MB&wm$dZ>I=s@6+4H;U1Xo=Nu=qkZjcHK_?Ej&xvFUi($`D(iuhijb5%+KTY_E4uuEN7G<5X}s1rBd*ynf``;07LZIbj{Cch0s^g" +">Cs!4pixAx&>N+zAud(FD>B#V;r2VzA93+^Urq6a_zUOsoz_{l09UY&tD~q4c=wo?Gj9Zh`rpdmu2D)`dMd;E@6AsE$-^)_CS3y!" +"G$gOfTGtI`V2M!)Y9rn3*oVnmgb<-oDlJM9tx3=G_x1-o(4xhXT04%T9EUSxg*CufYrN1iR6)TW7+K(VJF9^%Mw66r>dCaQ?>;Nq{QX$yAVt2YacX2ifc#vwBmk$4D*r#!@fcI1$4`h6U4higlWM-pOav" +"J9;cx(bR;n2WVA|b~db;+8XAZYtuk#@&Cjc+YbS`i!uWbd8fk$ss$iOI0aJ;65i|_@hJf6$ewovd~<=&fI5+AP+KpT>UM0ca84m1" +"OR_H-KYLwuiS;dtLE!%lM+u76K}cRHRbT+*@T^yl2;5T~1Q@Ovv%KwWb9xcNj2r0o;BI8|c=cr=c)c1?{DRpFQV^l)ida*J-s)KhdI6a1^B$mY?0)YCcxSu69m#o}q8-DBmlYyT}Z" +"6FBeK5EoecLHp_6mZn?&XkATfk0Nd|#_nK|#$@Tb41E5$2Vs0Pk?6h`R%}^EKs62cim24Y5RHZr@2K*%Gb9K77?_Vl&GdM>hr_q1" +"HO!mXtEQn80h6YYIuUCq*PXd4`V=>BQ@P{EcQ5NcY0334pupiogUhNQRRm%nYmTRZMt$UK`KgF+os}X-)1P=3orWJA81vAmCFM!~C!1Z#" +";uNKF#Nr)F`5_jzmEk5E<2+_CpK%QpM0N`4*6$}wR}}%{%(7JF^ayWdT&?voriNE((^0a;^YFKpm=2*=s#+$m(=!%o9#Kx@<*e@~" +";uRbWjWUU@Dg=hR;r94z|7^AR5cN3mUro6D`I6D$D^)_n?``kDuOUz8!L0F5=+Rh7{$6!gX%<7>uzm>sH=C&NWmqIrlfGx`=k(LY" +"WQl)fIzKbI95?_HN=6;91i*`@uaCTntN&0snYA30?GSLs#XVMx_Da0UpR*B5aGupcfQ-)`c8q57C`@3<5o`MPFy;4L7ck$mnj`5y" +"7X03mFnq}?EF(n3K$-91-J&Io6H;vyNllpJJr`2tBdPSQMaw|>fa_l@N3n@O5Rk;zSyuzH9m!GJI_r{rw=$TRQUP`J4oGv>hU#xl" +"gt(%*v*8@vB`{nY1OudHt248ju4Sqa>{8@sJOSFkuJM(;lQv%M?scDx?(Q9(cLxbQR*m8S^6dTVs#ds1pUJVk^JA2uS$p}1Oj-BD}zmuIU`;Z(0Q!G@8H&{7yck1;rbs0xo~4)BU|^1Tc)dX" +"#$Li1Z*5&!DWb?tlN43tp1XpiO8YU_*t}Xg4%l+C$6P3rXx!" +"_8~-v?74-M^`TTFbN$=TUhScPH>@J${{WBeP^O>{icwAO3O*IP5PgC;pM%cnjmfjrgs^?8^Bqfx*U>gwI|gEf+ied5R#oTlr`=^{" +"k>#MV{)a8DVITBbD;wQ6YI`!_dC4`iSk|O#^IGK>+>4Z^%z+d==E>gw;T29z*zN&tHws*@phT8y" +"KrP1wclP{;ozib(IrfD;&r5&=N+`W9#4HOlcE?cl*}y3jPz6W6dYAao-d>^hcGAZX?Z6k98%g@Yk}id@=Yu-09}Rl&(WMQyh34Z2" +"Smip}IM(`pM*Qi|(spGJhzSQKOT;k*$`D?pZg(=LG8d8(M#Xwk8=sB$QW%B?;JmZ!a57k+1sYEGZg~^58x5w3h7b)Fe3sZbO}TpL" +"b4me^(flWGn&J(heC0^tJ5efJmg!w?dwe;yB?0<@D;>;22ad3g!deM6D`ZaaC9UhL$%K@w?lA0v{@2!it}9_j6W{>3(o!*E_5GC_" +"lf-+>lS={18V#FSp7nxda-0S4EQ*7>eCqc5E8c>cpt%5PJnA{rCu){nR!hn<6g&0_4iCJY`M7R^vON_;Y>!1i88u6VC4eghpZ=2z``=l`yEtp553DYiCj3@TdB&?U)nwoM&rB)4({b(efA^p#F84G7Wh}$" +"I|Dx{0atftj0X6&N4Qdwp7NC~$|}N4cFsVoBWDfcZ<6I7" +"tj1KQEA@?=#R31rXeefy;`OeG?Ml@FkIwCv72Imc9wNK#EcpGSSJ{WVs!YV@Uua0>WK>~~hgY#_I;{0p^tum$w=1}aR3fv9Nr(+Y" +"{)>#;2QaJ~(7@*pcWtR&aqMGCCK1}4G7FJ#1*lC1aX)!zW7nR(?DzzLh_{gB>qTA6o`b&&$Y68{NVM{!J?p6@%B7>?AW~5M#o0h1" +"?Q1@pc8$=_++l_o!4g@~Nd7;X0@h&zNq^HIUtdz`M4L+JZSV;d6!vN!{?" +"OPdUo{2J{pvsP;|`6=mdm%*Wvtt^}0%4aah&WrP5!UY`rRwDYQ(EOd_s6jWv5PmI@!4N-fF5N;KvFvn(RYdb)xlD)~KwYkDQ`Mk#" +"zD#;eFLYoq(-u~L49o0M?I}|%1->~ma&)~lhwNJ}MCdUQJM1N`4^smZ`~?sb<6R#tD<37$_Jw|AK*7(VVJvSSV0wk_3?i=HHKA*7" +"`nq6J92^G*Q-pTC@B^oi9!lr#yM5d}W@5N;LIapScy~`9@!*)zQ)}fs4wzJUEPvcUHGk*!&Us{W4EYiqWuNCxAgtgdV)%FQ9{5I(`L##D9~rc!2u1N6" +"AFd~FU_PnCQ1Th6GENLaUR;&+5$~LOF(7f~!mW}qEvUY-kvu5I4uxks7JQUl?HYDi|K^pMQbmx64c&ENO68(7tLYw6DC{6t0qE4-" +"JEmKY{Ru<|Yk$+&HB^PMmzHK5!eUcMPBQmYuB5gl#IC4SxV$tdGKin09~N4~gid29Bh(;-Mzp)FH8Up6aYRJz9`j8{8;JpYmpG}X" +"bmCH^Gi+VI%Pw_Bxbj(o9`GD^0Kn1GvAwk6y9nh2y>?*b-ve6wJNTgzA1oA&x8s_LpqseClvl*MazEFZ?H?xuBX}i30f2BLIYtF<" +"j={w+%|@`tN1F(Jv>@}F1#Vo@wqcU{aAq~=EajWTpL~)H=)XnP(B9bRxr{VieFB0Qo{&r-r&d`4n3N^hG}C" +"$BM5Joy}&J*8VhVKCp#7ziXb^;)0&bcO8&&M52ZOJD!n@@+w=PdXDf{@^Anv;GwmDJtde_%O8cF+sW5t`%Juu5szMuvzqnRP^dt6p(nTgYefyS}C_^17qYm5jRoz>?r97o%nAvv3I>ev~Ir=j}PHN5`IT4D-5Vi&}0X;LO;i5M0" +"9fkbwQ(;-cg~Fp*Uj7&%X#T?Zm51b1;(ncguqiP6)r-Ll=L*YY%<4dHxMCT(TWaVlX`=O_a#A_IWYB=m{C=4_dqqtNo^BJK(z5jd" +"e)9Py9DoZs>=KFE;9Hv46tQpgO*GJ>9U5W8%@bbnk5<0o(P5+QA~4Nj86QTs#kDxMmPM}@cv0(W_H#8a(O1RjMZ{L^)%5@_P38!0" +"0_8rd`-Jdgt*1mZmtZRwYQ3Ute7O(#Zsgu;Q_k8k?B(>y#FOAgXp1ufX{>M6W~VsGzERF9O|QH-EvV+k;4&JpVJFhhXJHfDQQB?5" +"%3e7)(Zc#CpebfrSOJ(Mb1Pl&3t-3==BZpsF+S??SIz&W{UAP{*_ncmmfe-QZ9lm#q#)d_V}`a#xxiQ~G<5sSb*?5vO^Cu2kkxeX" +"VWVVLde(gVJ_so(LgIxPv>wukqu&RUbh$?}%4cPSlripDYorV&!frg^P2kBFzun%mJUIM7&TtcFnR;(99E#`u_!vkzT!G$w|36$x#(&;k{l5l_S8t*5550W{005lQ-{8*KX<@CnH)=MGp%l~o!X&c~Td^_*k0Izx>m&g9W0k~c#<^2gD" +"(0w7k4=mMoDxmKF$aLeE^OAS2sWi)__*0es{SBCXF0=CM13?xt#t;K(gBh@jrb&wvSN(kDc0*_gcoEqR{GVpI(-Qdr;vfg8`E-=F{1N&q-`965>)Z`a{$UBV+FU0&?qqvZ9)q{)4N" +"#}etU80vu&ot@iyBxO&4o+b;N+5c4jI@Y)GaNH90!+x|5)G" +"fP5}e%I@mD(UTzw<$T~?sIzTU`|2gTV}j@}%uFRD*L9daX+&iwC*SgNc?F6Ite|-53p%GGhAl^YQNJ%GTqeW!EVud~bb#^FWPU+s" +"X-?9v@O2)^%yDTnbzwI<*Mht#&BzPuK1>itqh_JoTzx{1By1l7rXi`qy|gNaVfN)%B%t-Ost4VZN@@N0xj6pTt" +"Apgp440B7Rd(zU8(%J(N^+?RZ={7hdyeQ7*H4fud>~uYozCai^Z7I#Qdyo5j2p(u3a_Ml?Xs!UkMSWtXORw%lQ}&%R+O%lfyQlh+" +"ZsS-&w(cParZAaIF^)#8Q^J7mYE*(qJ~*b+m*k5>oD+fi;T2i1om2@({YuPN3Tn+dsv&}?u$>9Gk4$})8rxf`nD|PcT!E%6Nw2C{" +"nli3fjF>VRW0jq;?gipXbh(GV(@)CB{nTJ?um=HRctjrVE^e_8*EGaVtnLf{Tr*iwvE%0vuc+^PIL7*4avW}>Afys7&vEWN&^i5P" +"J4~xu+dE-*Qr+MOQspTu0uo4&J;Zd=UYvG_gJ(itz{7J>3d?-N{1%H_^>u|uv#!Qpl;mX%5}}DvCXW!Fh$WuU0oLs0cr(p|AqvdR" +"#&jD)lC){6#wfju4xbX8AM2Y3`P{xRDsK<2=Y$(owbo;oMpp77Ha>gF5" +"U=EbNBEc>~b|e*MuJA$Q02r~garv~aYnR%Sm9EbWuQqP5a$SV6CA0(!X*lHSZ8m+iATY4~" +"26|HLgk+5h`FLW&!pOahsM8h*|S5R|oGfuK*vxdXtdki?3@Bn~9If{tI*RgBG$Mxlahh&fCvyw{spD&`Ct" +"Gb=c;@cJaO{2Ija=yVQ`6EkmS6+Z>xp<+iFosV9z3pz%bv3>-9S2v14JmMWhd1F" +"q2&yV6EZtBQJuFeXHjk$(n`#Idw~@tWD6uces;&gT`_WRjl*rISr$q}j#X`dw7+2}YiXtyM|m42yItkuC(ntzS}7Wc+8GvImNd|ee0h}$EhW;MYS{@Uxe-u@z`%1$*jKM_Sljj;--Ir7FtPA)EY;5MV;^5U3qMPg#q?_Gns^Qqcl55rr5Z#4" +"H6iFlQ$`AW1~;L5NEu@$S<97" +"qKKsbd_>w2LrO8|q0U9Wyar}|-@BHPq->6$@|PPRV+7K_$I7(_AtnG#&57b@GQnV1%C1`Xx)4pknq?LeJ}7)%Gk{+T8A@or1>MQP" +"2^;GcbDg;P(<%wqlloa)7%@ILUp(J6k%4VNR1w!&qxvXmQrAt{TO${mu&4{~CR>k_92E;r@lJGL_rQHYdOb$%{_Q10w++9s_S@KA?e9mw9U!*hCnsyt)" +"eTLY(^{y|w{PnYsPBkK|YK2-;dV1?1!=sZY8_3yv+2g~)yhY(lu-A5qRUp33P~e?A_JLSoo=Z~L;7d>Wv8sF@%<6ptuu;3NAPNF%" +"YmQ%5v3Bh;3Ua*_VT=`AFiHjgC68)?3yzAa*x)OyFYLsUgdX+?EF9F##!O2Wqd%jm6Va^=@UY=P{56znYU9CgtxTm`ATsw<#d@eI=m$%LnQxVRCBN3O@&0T+8o$J(9I^^MPD*z&ob&lrS5uS=7RE7(_LSqCgbV9ltW)R&S(dPMNO^cx>q@d>" +"6x)>iHx`R8zht%MS)S+wg9#=zOc*~8YNrt&;I%BX-JptUQ*RJGO>gw=jN-+Qo664!<;VYG_SGoFy4PpaKofectl}d-&yEYYZ^)Z|" +"7Mex&-n=PO>?nu1J`)(|xtc{7L*5u-_DSfC>F*!`tlKBpoYSUpS+B&zT7($3tJR85Ob4XQJ`0lL&WAE3t5Q=n-Yj=R+V^b" +"m6};Q4GhFkVjJ;q=)_VmZaIb(8J4B?JI0(&K8J<(4*{v|L9S372TCFGV-PJ?AFt9Ynypg0kG|Js!q7KC{?3{raDGB-fCT_sKUYZ(" +"V;^=F{e(7DN#|Mtz@LF=%RUAvUCc(QN477};c;@vSw-j@DeUKs1Wl6pbdwF~*Z#ibj$m0;FREt_hZxGDC+}ovB(IIE%e6WCrEce|" +"eaEE#PO<-XbRm#uvA){KYpl(cYsH_|fchi3HAtrB)MLr0Q`((hN;t-LlIvLYe8|=w3Zz!%6vMT5gpLH1dG&~D" +"_`Ap~4WkNfWCNs;aVz_u7g}-QR%6fjadI3nh5QWLA98`GJ#(L^Ey{_wjqZH&Wx2R#D2#$x(HKvSz18!EXTW3h;o~js;De@v?Ek}t" +"7;nOyCfR@qMvyqN-&U(#L|s?$9@|!C^Cjr9L5RuL7HaN};BTr$Q7J{Ge&TVp(m?^NPZic-TEef($&lBEvm2bc2}EhZjq2t*iS$8Y" +"fWPAQF}os#H(bM^h=8C{#Td0^5TISe)~->-nK-Kp$q1oI@d#_I" +"c}I%;tc(PCPMtNlx2s`^bE>PX4l{8BfoMuBg6)ZW91*K}z{!QGVv)%ww>S^Di0L6dV$OVW6JBt@_f+wW#N|-K_^VgpVpAMJrLsoqaV!t%}tdCtFA69pG9g" +"w@(8+7rjY1u8jmUtOioKm{|Ho%ijn(^Jc}3O>&@quGmMnXUA^K<32_D>qrigY;k4p3DwnpEruJuatOKbmxJUC0>$HB`dnFqjJif~" +"KuI#VX|}hEaj57nXH1>|zkuUrB?(s|HCXa51dNwjhs;-;IxaH+%f%SKeWdetnfIq~#3FptGqf4>0%($}8g49NO!8TLQCFxT3w7qJx" +"UvOZ7PFun{x6n{pc_cC$-my-w^r8GJ4NnH!_FqFJklS&f`0%yNBQ&mL+n1VmSj1Gp(EmX)-f%4`3gD=guZ>`UDwPqaek$w" +"(O>2rtOh`hDXFT^Y#G1zbtV>2R1OA;jeVK*&o9w3>-7R}8An1Ed" +"Mq%=k7n*UM4r0{@_9x&&)P`C3eBl@N>dC%L8Fwe^mR^Mq^*p~o(1RU+Qn{U;M$$AcClu|}RI^z#rVHnK~vX^lF3ZtE+Ejf!Y" +"pgf$}@aB$`g|W895Hp!UZY&k5(s;lkMfUjKVMZ7M&2<$C%uXHe&1q}&V*_P+My?Mn>sCepp6RZ0&hS|bZy^3S9|(VemMaK?O=r55KLVx?9l(%LYmULH" +"#+r22&}_ZW$DJY7c=jzw^;(XTNp{g@8O@aAx8TOan`jxfob|WPTx)*G)^wz&EN?8dAK+R8Z>R-dH+`@lgu}{cRn#gisUZ)VHaT$ZottN-$S-NBHD#jk3sMu!2Hbb@1xeq#@}er^EjkV^r62puRwJGC^IcG3r}-HcNP0" +"vCa=z_nmSuls=&SsaD)wUtKP?gbznM?o}Yb|2(S2exLNUn6_AC0vS-@vwj4jcp#d2#{75FFZHf`HN>gjzQ^e5X!sX}{b`cz-ik9k" +"p8_hT_YAvfS*nC=>|H^>jCF9@HtkUhdl0|R)Rf3xx26C)U@JD@2n@NqB-zQoZK;" +"#3`?92pY`;N&|LV;Lx2R66i0$HwHu@o@i&#b=YJ1?I7RC7{=yaXJ%_?lO4Qh2gd^W<20S$p@(_L)K`qpgOgd&)M+|qgZ69Debn>rpj>YZYV;V|N;1@1+Kd9Qy%x~(7yxQsBh" +"DGKz1fwm1s`8tKbpF^8XC7RVq-A8xi6f}8SB{I;o2QY26bpew^0lgvFY?*ZKx&1=CdsPNdxI~Uh7Iva$#%04-;^sit0A~l~E$7}a" +"+nf|h5c;@eQ*zO#J8x|W6^M2$4ckD~(vcYS970F~KBz9{j)`v0Wtp5NQK(OTG*BC~$`VfyxWZC++?U`4" +"0-gAyV@+-rRTutnG3~9Z9-Idela(@MEHhb5LTW{JqC(yWsouMc>FWY5GE9uS?C$^dH#cwyfg?a*t_=Y@R&wIgZSas-8Bm8|zX}HL" +"iS;AA*SvbzXB-wK*OSpK;%sC8*=YBHK2ChYDrBsP?*KV|P~~b($>n={x}q@_c-^SXMLJ7UdMLXoZHvCf7gS)Ym+vR|?e`Q(vt|V4" +"JmVE4Z)6_478h-<=obw*C`#8eyl$G}LphfD4R)S>$2dO|gmCux29_>zf6U4Y)Iy{onFy0NS@-Q9FpK;dqp+Kky#eOihN`53ABgd6" +"Z8Ye3yD(35KwSU")),format=L.FORMAT_RAW,filters=[{"id":L.FILTER_LZMA2}])) diff --git a/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_gpt_human.py b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_gpt_human.py new file mode 100644 index 0000000000..0c164338ad --- /dev/null +++ b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_gpt_human.py @@ -0,0 +1,2973 @@ +from __future__ import annotations +import copy +import glob +import io +import lzma +import math +import os +import random +import subprocess +import sys +import time +import uuid +import zlib +from pathlib import Path + +import numpy as np +import sentencepiece as spm +import torch +import torch.distributed as dist +import torch.nn.functional as F +from torch import Tensor, nn +from torch.nn.parallel import DistributedDataParallel as DDP + +# Compression preference: brotli > lzma > zstd > zlib +# brotli q=11 typically beats lzma preset=9 by 1-5% on quantized weights +try: + import brotli as _brotli_probe # noqa: F401 + _COMPRESSOR = "brotli" +except ImportError: + _COMPRESSOR = "lzma" + +# Byte-shuffle preprocessing: reorder bytes by stride position before compression. +# For multi-byte values (float16 scales), grouping same-position bytes +# together creates runs of similar values -> better entropy coding. Lossless & fast (<1s). +_BYTE_SHUFFLE = True +_BYTE_SHUFFLE_STRIDE = 2 # 2 = optimal for float16-heavy data + +_BSHF_MAGIC = b"BSHF" # 4-byte magic for byte-shuffled data + + +def _byte_shuffle(data: bytes, stride: int = 2) -> bytes: + """Transpose byte stream by stride using numpy vectorized indexing. + + Groups byte[i % stride] positions together. For stride=2 on float16 data, + this puts all high bytes together and all low bytes together, dramatically + improving compression. Prepend 4-byte magic + 1-byte stride header. + """ + if stride <= 1 or len(data) < stride: + return data # no-op + src = np.frombuffer(data, dtype=np.uint8) + n = len(src) + out = np.empty(n, dtype=np.uint8) + dest_off = 0 + for pos in range(stride): + chunk = src[pos::stride] # every stride-th byte starting at pos + out[dest_off:dest_off + len(chunk)] = chunk + dest_off += len(chunk) + return _BSHF_MAGIC + bytes([stride]) + out.tobytes() + + +def _byte_unshuffle(data: bytes) -> bytes: + """Inverse of _byte_shuffle using numpy. Auto-detects BSHF magic header.""" + if len(data) < 5 or data[:4] != _BSHF_MAGIC: + return data # no shuffle header -> old format, return as-is + stride = data[4] + if stride < 2: + return data[5:] # invalid stride, just strip header + payload = np.frombuffer(data, dtype=np.uint8, offset=5) + n = len(payload) + out = np.empty(n, dtype=np.uint8) + src_off = 0 + for pos in range(stride): + chunk_len = n // stride + (1 if pos < n % stride else 0) + out[pos::stride][:chunk_len] = payload[src_off:src_off + chunk_len] + src_off += chunk_len + return out.tobytes() + + +class Hyperparameters: + data_path = os.environ.get("DATA_PATH", "./data/datasets/fineweb10B_sp1024") + train_files = os.path.join(data_path, "fineweb_train_*.bin") + val_files = os.path.join(data_path, "fineweb_val_*.bin") + tokenizer_path = os.environ.get("TOKENIZER_PATH", "./data/tokenizers/fineweb_1024_bpe.model") + run_id = os.environ.get("RUN_ID", str(uuid.uuid4())) + seed = int(os.environ.get("SEED", 1337)) + val_batch_size = int(os.environ.get("VAL_BATCH_SIZE", 524_288)) + + train_log_every = int(os.environ.get("TRAIN_LOG_EVERY", 500)) + iterations = int(os.environ.get("ITERATIONS", 20000)) + warmdown_iters = int(os.environ.get("WARMDOWN_ITERS", 2000)) + lr_floor = float(os.environ.get("LR_FLOOR", 0.05)) # Minimum LR as fraction of peak (prevents sharp quant-sensitive minima) + warmup_steps = int(os.environ.get("WARMUP_STEPS", 20)) + train_batch_tokens = int(os.environ.get("TRAIN_BATCH_TOKENS", 786_432)) + train_seq_len = int(os.environ.get("TRAIN_SEQ_LEN", 2048)) + eval_seq_len = int(os.environ.get("EVAL_SEQ_LEN", 2048)) + max_wallclock_seconds = float(os.environ.get("MAX_WALLCLOCK_SECONDS", 600.0)) + qk_gain_init = float(os.environ.get("QK_GAIN_INIT", 4.0)) + vocab_size = int(os.environ.get("VOCAB_SIZE", 1024)) + num_layers = int(os.environ.get("NUM_LAYERS", 11)) + num_kv_heads = int(os.environ.get("NUM_KV_HEADS", 4)) + model_dim = int(os.environ.get("MODEL_DIM", 512)) + num_heads = int(os.environ.get("NUM_HEADS", 8)) + mlp_mult = float(os.environ.get("MLP_MULT", 3.5)) + tie_embeddings = bool(int(os.environ.get("TIE_EMBEDDINGS", "1"))) + rope_base = float(os.environ.get("ROPE_BASE", 10000.0)) + logit_softcap = float(os.environ.get("LOGIT_SOFTCAP", 30.0)) + embed_lr = float(os.environ.get("EMBED_LR", 0.6)) + head_lr = float(os.environ.get("HEAD_LR", 0.008)) + tied_embed_lr = float(os.environ.get("TIED_EMBED_LR", 0.035)) + tied_embed_init_std = float(os.environ.get("TIED_EMBED_INIT_STD", 0.005)) + matrix_lr = float(os.environ.get("MATRIX_LR", 0.025)) + scalar_lr = float(os.environ.get("SCALAR_LR", 0.025)) + muon_momentum = float(os.environ.get("MUON_MOMENTUM", 0.99)) + muon_backend_steps = int(os.environ.get("MUON_BACKEND_STEPS", 4)) + muon_momentum_warmup_start = float(os.environ.get("MUON_MOMENTUM_WARMUP_START", 0.92)) + muon_momentum_warmup_steps = int(os.environ.get("MUON_MOMENTUM_WARMUP_STEPS", 1500)) + beta1 = float(os.environ.get("BETA1", 0.9)) + beta2 = float(os.environ.get("BETA2", 0.95)) + adam_eps = float(os.environ.get("ADAM_EPS", 1e-8)) + grad_clip_norm = float(os.environ.get("GRAD_CLIP_NORM", 0.3)) + eval_stride = int(os.environ.get("EVAL_STRIDE", 64)) + swa_enabled = bool(int(os.environ.get("SWA_ENABLED", "1"))) + swa_every = int(os.environ.get("SWA_EVERY", 50)) + swa_threshold = float(os.environ.get("SWA_THRESHOLD", 0.2)) + muon_wd = float(os.environ.get("MUON_WD", 0.04)) + adam_wd = float(os.environ.get("ADAM_WD", 0.04)) + xsa_last_n = int(os.environ.get("XSA_LAST_N", 11)) + rope_dims = int(os.environ.get("ROPE_DIMS", 16)) + ln_scale = bool(int(os.environ.get("LN_SCALE", "1"))) + ve_enabled = bool(int(os.environ.get("VE_ENABLED", "1"))) + ve_dim = int(os.environ.get("VE_DIM", 128)) + ve_layers = os.environ.get("VE_LAYERS", "9,10") + ema_enabled = bool(int(os.environ.get("EMA_ENABLED", "1"))) + ema_decay = float(os.environ.get("EMA_DECAY", 0.997)) + # New hyperparameters from our improvements + embed_beta1 = float(os.environ.get("EMBED_BETA1", 0.7)) + head_beta1 = float(os.environ.get("HEAD_BETA1", 0.7)) + muon_post_norm = os.environ.get("MUON_POST_NORM", "row_col") + qat_threshold = float(os.environ.get("QAT_THRESHOLD", 0.15)) + qat_clip_pct = float(os.environ.get("QAT_CLIP_PCT", 0.9995)) + late_qat = bool(int(os.environ.get("LATE_QAT", "1"))) + mixed_precision = bool(int(os.environ.get("MIXED_PRECISION", "1"))) + # mp_promote removed — dynamic allocation based on artifact size budget (see _allocate_bits_mixed) + target_bytes_limit = int(os.environ.get("TARGET_BYTES", 16_000_000)) + # N-gram params + ngram_buckets = int(os.environ.get("NGRAM_BUCKETS", 8192)) + ngram_heads = int(os.environ.get("NGRAM_HEADS", 2)) + ngram_orders = int(os.environ.get("NGRAM_ORDERS", 2)) + ngram_dim_per_head = int(os.environ.get("NGRAM_DIM_PER_HEAD", 32)) + # GPTQ calibration + gptq_calib_batches = int(os.environ.get("GPTQ_CALIB_BATCHES", 64)) + gptq_block_size = int(os.environ.get("GPTQ_BLOCK_SIZE", 128)) + gptq_damp = float(os.environ.get("GPTQ_DAMP", 0.01)) + gptq_reserve_ms = float(os.environ.get("GPTQ_RESERVE_MS", 9000.0)) # Reserve from training budget for GPTQ calibration (observed max 7.3s) + gptq_col_order = os.environ.get("GPTQ_COL_ORDER", "desc") # "desc" (actorder) or "asc" (PR#753-style) + gptq_single_pass = bool(int(os.environ.get("GPTQ_SINGLE_PASS", "1"))) # Pre-compute scales, run GPTQ once + soft_round_qat = bool(int(os.environ.get("SOFT_ROUND_QAT", "1"))) # 1=soft-round, 0=STE + # TTT (Test-Time Training) — score-first legal TTT on validation data + ttt_enabled = bool(int(os.environ.get("TTT_ENABLED", "0"))) + ttt_lr = float(os.environ.get("TTT_LR", "0.005")) + ttt_epochs = int(os.environ.get("TTT_EPOCHS", "5")) + ttt_chunk_tokens = int(os.environ.get("TTT_CHUNK_TOKENS", "65536")) + ttt_freeze_blocks = int(os.environ.get("TTT_FREEZE_BLOCKS", "0")) + ttt_grad_clip = float(os.environ.get("TTT_GRAD_CLIP", "1.0")) + ttt_batch_seqs = int(os.environ.get("TTT_BATCH_SEQS", "32")) + ttt_byte_weighted = bool(int(os.environ.get("TTT_BYTE_WEIGHTED", "0"))) + ttt_polyak = bool(int(os.environ.get("TTT_POLYAK", "1"))) + ttt_polyak_decay = float(os.environ.get("TTT_POLYAK_DECAY", "0.998")) + ttt_temp = float(os.environ.get("TTT_TEMP", "1.0")) + ttt_temp_mode = os.environ.get("TTT_TEMP_MODE", "adaptive") + ttt_adaptive_lr = bool(int(os.environ.get("TTT_ADAPTIVE_LR", "0"))) + ttt_adaptive_lr_max = float(os.environ.get("TTT_ADAPTIVE_LR_MAX", "3.0")) + ttt_optimizer = os.environ.get("TTT_OPTIMIZER", "sgd") + ttt_momentum = float(os.environ.get("TTT_MOMENTUM", "0.9")) + ttt_entropy_adapt = bool(int(os.environ.get("TTT_ENTROPY_ADAPT", "1"))) + ttt_entropy_high = float(os.environ.get("TTT_ENTROPY_HIGH", "2.1")) + ttt_entropy_low = float(os.environ.get("TTT_ENTROPY_LOW", "1.75")) + ttt_compile = bool(int(os.environ.get("TTT_COMPILE", "1"))) + # Snapshot: save unbanked_sd + hessians after training, or load them to skip training + snapshot_post_hessian = bool(int(os.environ.get("SNAPSHOT_POST_HESSIAN", "0"))) # save snapshot and exit + load_snapshot = os.environ.get("LOAD_SNAPSHOT", "") # path to snapshot file; skips training if set + +# --- Turbo-Muon Newton-Schulz orthogonalization --- + +# Polar Express optimal degree-5 coefficients (Amsel et al., arXiv:2505.16932). +# With AOL preconditioning we skip iter 1 -- AOL already contracts the singular value +# range via Gershgorin scaling, so we start from iteration 2. +_POLAR_COEFFS_FULL = [ + (8.28721201814563, -23.595886519098837, 17.300387312530933), # iter 1 (Frobenius init only) + (4.107059111542203, -2.9478499167379106, 0.5448431082926601), # iter 2 + (3.9486908534822946, -2.908902115962949, 0.5518191394370137), # iter 3 + (3.3184196573706015, -2.488488024314874, 0.51004894012372), # iter 4 + (2.300652019954817, -1.6689039845747493, 0.4188073119525673), # iter 5 + (1.891301407787398, -1.2679958271945868, 0.37680408948524835), # iter 6 + (1.875, -1.25, 0.375), # iter 7+: converged fixed point +] +_AOL_POLAR_COEFFS = _POLAR_COEFFS_FULL[1:] # skip iter 1 when using AOL + + +def zeropower_via_newtonschulz5(G: Tensor, steps: int = 4, eps: float = 1e-7) -> Tensor: + """Turbo-Muon: Newton-Schulz with left-Gram AOL + Polar Express coefficients. + + Supports both 2D (M, N) and batched 3D (B, M, N) input. + Uses left Gram (X@X.T) throughout -- always (m*m) where m <= n, giving + tighter Gershgorin bounds and up to 9x cheaper matmuls for non-square matrices. + """ + X = G.bfloat16() + if X.ndim == 2: + # --- 2D path (single matrix) --- + transposed = X.size(0) > X.size(1) + if transposed: + X = X.T + A = X @ X.T + s = 1.0 / (A.abs().sum(dim=1).sqrt() + eps) + X = s.unsqueeze(1) * X + A = s.unsqueeze(0) * A * s.unsqueeze(1) + for i in range(steps): + a, b, c = _AOL_POLAR_COEFFS[min(i, len(_AOL_POLAR_COEFFS) - 1)] + if i > 0: + A = X @ X.T + B = b * A + c * A @ A + X = a * X + B @ X + return X.T if transposed else X + else: + # --- 3D batched path (B, M, N) --- + transposed = X.size(-2) > X.size(-1) + if transposed: + X = X.mT + A = X @ X.mT # (B, m, m) + s = 1.0 / (A.abs().sum(dim=-1).sqrt() + eps) # (B, m) + X = s.unsqueeze(-1) * X # (B, m, n) + A = s.unsqueeze(-2) * A * s.unsqueeze(-1) # (B, m, m) + for i in range(steps): + a, b, c = _AOL_POLAR_COEFFS[min(i, len(_AOL_POLAR_COEFFS) - 1)] + if i > 0: + A = X @ X.mT + B = b * A + c * A @ A + X = a * X + B @ X + return X.mT if transposed else X + + +def _post_ns_normalize(X: Tensor, mode: str) -> Tensor: + """Muon+ post-NS normalization: equalize per-neuron update magnitudes. + + Modes: "none" (passthrough), "row", "col", "row_col". + Supports both 2D and batched 3D input. + Norms computed in float32 for numerical stability (X is typically bf16 from NS). + """ + if mode == "none": + return X + if mode in ("row", "row_col"): + X = X / (X.float().norm(dim=-1, keepdim=True).to(X.dtype) + 1e-7) + if mode in ("col", "row_col"): + X = X / (X.float().norm(dim=-2, keepdim=True).to(X.dtype) + 1e-7) + return X + + +# --- Parallel Muon optimizer --- + +class Muon(torch.optim.Optimizer): + """Parallel Muon: post-backward reduce-scatter -> local NS5 -> all-gather. + + No DDP for bank params. After backward, this optimizer: + 1. Launches async reduce-scatter for all banks (biggest first) + 2. Returns control so Adam can step on small params while RS is in-flight + 3. Waits for each RS, runs local NS5 on the shard, launches async all-gather + 4. Each all-gather overlaps with next bank's NS5 + """ + def __init__(self, params, lr: float, momentum: float, backend_steps: int, + nesterov: bool = True, weight_decay: float = 0.0, + post_norm: str = "none"): + super().__init__( + params, + dict(lr=lr, momentum=momentum, backend_steps=backend_steps, + nesterov=nesterov, weight_decay=weight_decay, post_norm=post_norm), + ) + self._built = False + + def _build(self): + self._distributed = dist.is_available() and dist.is_initialized() + self._world_size = dist.get_world_size() if self._distributed else 1 + self._rank = dist.get_rank() if self._distributed else 0 + ws = self._world_size + + self._bank_meta = [] + for group in self.param_groups: + for p in group["params"]: + B = p.shape[0] + padded_B = ((B + ws - 1) // ws) * ws + shard_B = padded_B // ws + tail = p.shape[1:] + dev = p.device + self._bank_meta.append({ + 'p': p, + 'B': B, + 'padded_grad': torch.zeros(padded_B, *tail, device=dev, dtype=torch.bfloat16), + 'shard': torch.zeros(shard_B, *tail, device=dev, dtype=torch.bfloat16), + 'shard_mom': torch.zeros(shard_B, *tail, device=dev, dtype=torch.bfloat16), + 'full_update': torch.zeros(padded_B, *tail, device=dev, dtype=torch.bfloat16), + 'scale': max(1, p.shape[-2] / p.shape[-1]) ** 0.5, + }) + # Sort by size descending -- launch biggest reduce-scatters first + self._bank_meta.sort(key=lambda m: -m['p'].numel()) + self._built = True + + def launch_reduce_scatters(self): + """Phase 1: launch async reduce-scatter for all banks. Call right after backward.""" + if not self._built: + self._build() + if not self._distributed: + return + self._rs_futures = [] + for m in self._bank_meta: + p = m['p'] + if p.grad is None: + self._rs_futures.append(None) + continue + pg = m['padded_grad'] + pg[:m['B']].copy_(p.grad.bfloat16()) + if pg.shape[0] > m['B']: + pg[m['B']:].zero_() + fut = dist.reduce_scatter_tensor(m['shard'], pg, op=dist.ReduceOp.AVG, async_op=True) + self._rs_futures.append(fut) + + @torch.no_grad() + def step(self, closure=None): + """Phase 3: wait for RS, local NS5, all-gather. Call AFTER Adam steps.""" + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + if not self._built: + self._build() + + for group in self.param_groups: + lr = group["lr"] + momentum = group["momentum"] + backend_steps = group["backend_steps"] + nesterov = group["nesterov"] + wd = group.get("weight_decay", 0.0) + post_norm = group.get("post_norm", "none") + + prev_ag_handle = None + prev_m = None + + sharded = self._distributed and hasattr(self, '_rs_futures') + + for i, m in enumerate(self._bank_meta): + p = m['p'] + if p.grad is None: + continue + + if prev_ag_handle is not None: + prev_ag_handle.wait() + pp = prev_m['p'] + upd = prev_m['full_update'][:prev_m['B']] + pp.add_(upd.to(dtype=pp.dtype), alpha=-lr * prev_m['scale']) + if wd > 0.0: + pp.data.mul_(1.0 - lr * wd) + + if sharded and self._rs_futures[i] is not None: + self._rs_futures[i].wait() + g = m['shard'] + buf = m['shard_mom'] + else: + g = p.grad.bfloat16() + state = self.state[p] + if "momentum_buffer" not in state: + state["momentum_buffer"] = torch.zeros_like(g) + buf = state["momentum_buffer"] + + buf.mul_(momentum).add_(g) + if nesterov: + update = g.add(buf, alpha=momentum) + else: + update = buf + + update = zeropower_via_newtonschulz5(update, steps=backend_steps) + update = _post_ns_normalize(update, post_norm) + + if sharded: + prev_ag_handle = dist.all_gather_into_tensor( + m['full_update'], update, async_op=True) + prev_m = m + else: + p.add_(update.to(dtype=p.dtype), alpha=-lr * m['scale']) + if wd > 0.0: + p.data.mul_(1.0 - lr * wd) + + if prev_ag_handle is not None: + prev_ag_handle.wait() + pp = prev_m['p'] + upd = prev_m['full_update'][:prev_m['B']] + pp.add_(upd.to(dtype=pp.dtype), alpha=-lr * prev_m['scale']) + if wd > 0.0: + pp.data.mul_(1.0 - lr * wd) + + if hasattr(self, '_rs_futures'): + del self._rs_futures + + return loss + +# --- Tokenizer evaluation helpers --- + +def build_sentencepiece_luts( + sp: spm.SentencePieceProcessor, vocab_size: int, device: torch.device +) -> tuple[Tensor, Tensor, Tensor]: + sp_vocab_size = int(sp.vocab_size()) + table_size = max(sp_vocab_size, vocab_size) + base_bytes_np = np.zeros((table_size,), dtype=np.int16) + has_leading_space_np = np.zeros((table_size,), dtype=np.bool_) + is_boundary_token_np = np.ones((table_size,), dtype=np.bool_) + for token_id in range(sp_vocab_size): + if sp.is_control(token_id) or sp.is_unknown(token_id) or sp.is_unused(token_id): + continue + is_boundary_token_np[token_id] = False + if sp.is_byte(token_id): + base_bytes_np[token_id] = 1 + continue + piece = sp.id_to_piece(token_id) + if piece.startswith("\u2581"): + has_leading_space_np[token_id] = True + piece = piece[1:] + base_bytes_np[token_id] = len(piece.encode("utf-8")) + return ( + torch.tensor(base_bytes_np, dtype=torch.int16, device=device), + torch.tensor(has_leading_space_np, dtype=torch.bool, device=device), + torch.tensor(is_boundary_token_np, dtype=torch.bool, device=device), + ) +def load_validation_tokens(pattern: str, seq_len: int) -> Tensor: + files = [Path(p) for p in sorted(glob.glob(pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {pattern}") + tokens = torch.cat([load_data_shard(file) for file in files]).contiguous() + usable = ((tokens.numel() - 1) // seq_len) * seq_len + if usable <= 0: + raise ValueError(f"Validation split is too short for TRAIN_SEQ_LEN={seq_len}") + return tokens[: usable + 1] + +def eval_val( + args: Hyperparameters, + model: nn.Module, + rank: int, + world_size: int, + device: torch.device, + grad_accum_steps: int, + val_tokens: Tensor, + base_bytes_lut: Tensor, + has_leading_space_lut: Tensor, + is_boundary_token_lut: Tensor, + eval_seq_len: int | None = None, +) -> tuple[float, float]: + seq_len = eval_seq_len or args.train_seq_len + local_batch_tokens = args.val_batch_size // (world_size * grad_accum_steps) + if local_batch_tokens < seq_len: + raise ValueError( + "VAL_BATCH_SIZE must provide at least one sequence per rank; " + f"got VAL_BATCH_SIZE={args.val_batch_size}, WORLD_SIZE={world_size}, " + f"GRAD_ACCUM_STEPS={grad_accum_steps}, seq_len={seq_len}" + ) + local_batch_seqs = local_batch_tokens // seq_len + total_seqs = (val_tokens.numel() - 1) // seq_len + seq_start = (total_seqs * rank) // world_size + seq_end = (total_seqs * (rank + 1)) // world_size + val_loss_sum = torch.zeros((), device=device, dtype=torch.float64) + val_token_count = torch.zeros((), device=device, dtype=torch.float64) + val_byte_count = torch.zeros((), device=device, dtype=torch.float64) + model.eval() + with torch.inference_mode(): + for batch_seq_start in range(seq_start, seq_end, local_batch_seqs): + batch_seq_end = min(batch_seq_start + local_batch_seqs, seq_end) + raw_start = batch_seq_start * seq_len + raw_end = batch_seq_end * seq_len + 1 + local = val_tokens[raw_start:raw_end].to(device=device, dtype=torch.int64, non_blocking=True) + x = local[:-1].reshape(-1, seq_len) + y = local[1:].reshape(-1, seq_len) + with torch.autocast(device_type="cuda", dtype=torch.bfloat16, enabled=True): + batch_loss = model(x, y).detach() + batch_token_count = float(y.numel()) + val_loss_sum += batch_loss.to(torch.float64) * batch_token_count + val_token_count += batch_token_count + prev_ids = x.reshape(-1) + tgt_ids = y.reshape(-1) + token_bytes = base_bytes_lut[tgt_ids].to(dtype=torch.int16) + token_bytes += (has_leading_space_lut[tgt_ids] & ~is_boundary_token_lut[prev_ids]).to(dtype=torch.int16) + val_byte_count += token_bytes.to(torch.float64).sum() + if dist.is_available() and dist.is_initialized(): + dist.all_reduce(val_loss_sum, op=dist.ReduceOp.SUM) + dist.all_reduce(val_token_count, op=dist.ReduceOp.SUM) + dist.all_reduce(val_byte_count, op=dist.ReduceOp.SUM) + val_loss = val_loss_sum / val_token_count + bits_per_token = val_loss.item() / math.log(2.0) + tokens_per_byte = val_token_count.item() / val_byte_count.item() + model.train() + return float(val_loss.item()), float(bits_per_token * tokens_per_byte) + +# --- Quantization helpers --- + +CONTROL_TENSOR_NAME_PATTERNS = tuple( + pattern + for pattern in os.environ.get( + "CONTROL_TENSOR_NAME_PATTERNS", + "attn_scale,attn_scales,mlp_scale,mlp_scales,resid_mix,resid_mixes,q_gain,skip_weight,skip_weights,skip_gate,skip_gates,smear,ve_layer_scales,ve_shared.scale,ngram_gate", + ).split(",") + if pattern +) +INT8_KEEP_FLOAT_FP32_NAME_PATTERNS = tuple( + pattern + for pattern in os.environ.get( + "INT8_KEEP_FLOAT_FP32_NAME_PATTERNS", + ",".join(CONTROL_TENSOR_NAME_PATTERNS), + ).split(",") + if pattern +) +INT8_KEEP_FLOAT_MAX_NUMEL = 65_536 +INT8_KEEP_FLOAT_STORE_DTYPE = torch.float16 +INT8_PER_ROW_SCALE_DTYPE = torch.float16 +INT8_CLIP_PERCENTILE = 99.99984 +INT8_CLIP_Q = INT8_CLIP_PERCENTILE / 100.0 +def tensor_nbytes(t: Tensor) -> int: + return int(t.numel()) * int(t.element_size()) +def keep_float_tensor(name: str, t: Tensor, passthrough_orig_dtypes: dict[str, str]) -> Tensor: + if any(pattern in name for pattern in INT8_KEEP_FLOAT_FP32_NAME_PATTERNS): + return t.float().contiguous() + if t.dtype in {torch.float32, torch.bfloat16}: + passthrough_orig_dtypes[name] = str(t.dtype).removeprefix("torch.") + return t.to(dtype=INT8_KEEP_FLOAT_STORE_DTYPE).contiguous() + return t +def quantize_float_tensor(t: Tensor) -> tuple[Tensor, Tensor]: + t32 = t.float() + if t32.ndim == 2: + clip_abs = ( + torch.quantile(t32.abs(), INT8_CLIP_Q, dim=1) + if t32.numel() + else torch.empty((t32.shape[0],), dtype=torch.float32) + ) + clipped = torch.maximum(torch.minimum(t32, clip_abs[:, None]), -clip_abs[:, None]) + scale = (clip_abs / 127.0).clamp_min(1.0 / 127.0) + q = torch.clamp(torch.round(clipped / scale[:, None]), -127, 127).to(torch.int8).contiguous() + return q, scale.to(dtype=INT8_PER_ROW_SCALE_DTYPE).contiguous() + clip_abs = float(torch.quantile(t32.abs().flatten(), INT8_CLIP_Q).item()) if t32.numel() else 0.0 + scale = torch.tensor(clip_abs / 127.0 if clip_abs > 0 else 1.0, dtype=torch.float32) + q = torch.clamp(torch.round(torch.clamp(t32, -clip_abs, clip_abs) / scale), -127, 127).to(torch.int8).contiguous() + return q, scale +def quantize_state_dict_int8(state_dict: dict[str, Tensor]): + quantized: dict[str, Tensor] = {} + scales: dict[str, Tensor] = {} + dtypes: dict[str, str] = {} + passthrough: dict[str, Tensor] = {} + passthrough_orig_dtypes: dict[str, str] = {} + qmeta: dict[str, dict[str, object]] = {} + stats = dict.fromkeys( + ("param_count", "num_tensors", "num_float_tensors", "num_nonfloat_tensors", "baseline_tensor_bytes", "int8_payload_bytes"), + 0, + ) + for name, tensor in state_dict.items(): + t = tensor.detach().to("cpu").contiguous() + stats["param_count"] += int(t.numel()) + stats["num_tensors"] += 1 + stats["baseline_tensor_bytes"] += tensor_nbytes(t) + if not t.is_floating_point(): + stats["num_nonfloat_tensors"] += 1 + passthrough[name] = t + stats["int8_payload_bytes"] += tensor_nbytes(t) + continue + # Keep tok_emb.weight in fp16 passthrough (tied embeddings — quantization errors degrade all tokens) + if t.numel() <= INT8_KEEP_FLOAT_MAX_NUMEL or name == "tok_emb.weight": + kept = keep_float_tensor(name, t, passthrough_orig_dtypes) + passthrough[name] = kept + stats["int8_payload_bytes"] += tensor_nbytes(kept) + continue + stats["num_float_tensors"] += 1 + q, s = quantize_float_tensor(t) + if s.ndim > 0: + qmeta[name] = {"scheme": "per_row", "axis": 0} + quantized[name] = q + scales[name] = s + dtypes[name] = str(t.dtype).removeprefix("torch.") + stats["int8_payload_bytes"] += tensor_nbytes(q) + tensor_nbytes(s) + obj: dict[str, object] = { + "__quant_format__": "int8_clean_per_row_v1", + "quantized": quantized, + "scales": scales, + "dtypes": dtypes, + "passthrough": passthrough, + } + if qmeta: + obj["qmeta"] = qmeta + if passthrough_orig_dtypes: + obj["passthrough_orig_dtypes"] = passthrough_orig_dtypes + return obj, stats +def dequantize_state_dict_int8(obj: dict[str, object]) -> dict[str, Tensor]: + out: dict[str, Tensor] = {} + qmeta = obj.get("qmeta", {}) + passthrough_orig_dtypes = obj.get("passthrough_orig_dtypes", {}) + for name, q in obj["quantized"].items(): + dtype = getattr(torch, obj["dtypes"][name]) + s = obj["scales"][name] + if qmeta.get(name, {}).get("scheme") == "per_row" or s.ndim > 0: + s = s.to(dtype=torch.float32) + out[name] = (q.float() * s.view(q.shape[0], *([1] * (q.ndim - 1)))).to(dtype=dtype).contiguous() + else: + scale = float(s.item()) + out[name] = (q.float() * scale).to(dtype=dtype).contiguous() + for name, t in obj["passthrough"].items(): + out_t = t.detach().to("cpu").contiguous() + orig_dtype = passthrough_orig_dtypes.get(name) + if isinstance(orig_dtype, str): + out_t = out_t.to(dtype=getattr(torch, orig_dtype)).contiguous() + out[name] = out_t + return out + +# --- Data loading --- + +def load_data_shard(file: Path) -> Tensor: + header_bytes = 256 * np.dtype(" None: + self.file_idx = (self.file_idx + 1) % len(self.files) + self.tokens = load_data_shard(self.files[self.file_idx]) + self.pos = 0 + def take(self, n: int) -> Tensor: + chunks: list[Tensor] = [] + remaining = n + while remaining > 0: + avail = self.tokens.numel() - self.pos + if avail <= 0: + self._advance_file() + continue + k = min(remaining, avail) + chunks.append(self.tokens[self.pos : self.pos + k]) + self.pos += k + remaining -= k + return chunks[0] if len(chunks) == 1 else torch.cat(chunks) +class DistributedTokenLoader: + def __init__(self, pattern: str, rank: int, world_size: int, device: torch.device): + self.rank = rank + self.world_size = world_size + self.device = device + self.stream = TokenStream(pattern) + def next_batch(self, global_tokens: int, seq_len: int, grad_accum_steps: int) -> tuple[Tensor, Tensor]: + local_tokens = global_tokens // (self.world_size * grad_accum_steps) + per_rank_span = local_tokens + 1 + chunk = self.stream.take(per_rank_span * self.world_size) + start = self.rank * per_rank_span + local = chunk[start : start + per_rank_span].to(dtype=torch.int64) + x = local[:-1].reshape(-1, seq_len) + y = local[1:].reshape(-1, seq_len) + return x.to(self.device, non_blocking=True), y.to(self.device, non_blocking=True) + +# --- Transformer modules --- + +class RMSNorm(nn.Module): + def __init__(self, eps: float | None = None): + super().__init__() + self.eps = eps + def forward(self, x: Tensor) -> Tensor: + return F.rms_norm(x, (x.size(-1),), eps=self.eps) + +class CastedLinear(nn.Linear): + _qat_enabled: bool = False + _qat_clip_pct: float = 0.9995 + _qat_default_bits: int = 5 + _qat_soft_round: bool = False + _qat_soft_alpha: Tensor = None # type: ignore[assignment] # CUDA tensor, ramped 1→16; None until QAT starts + + def forward(self, x: Tensor) -> Tensor: + w = self.weight.to(x.dtype) + if CastedLinear._qat_enabled and self.training and w.ndim == 2: + bits = getattr(self, '_qat_bits', CastedLinear._qat_default_bits) + if CastedLinear._qat_soft_round: + w = _apply_qat_soft_round(w, self.weight, bits, CastedLinear._qat_soft_alpha) + else: + w = _apply_qat_ste(w, self.weight, bits) + bias = self.bias.to(x.dtype) if self.bias is not None else None + return F.linear(x, w, bias) + + +def _apply_qat_ste(w_cast: Tensor, w_fp32: Tensor, bits: int) -> Tensor: + """Straight-through estimator QAT for a weight matrix.""" + if bits <= 0: + return w_cast + qmax = (1 << (bits - 1)) - 1 + qmin = -(1 << (bits - 1)) + with torch.no_grad(): + w32 = w_fp32.float() + row_max = w32.abs().amax(dim=-1) * CastedLinear._qat_clip_pct + scale = (row_max / float(qmax)).clamp_min(1.0 / float(qmax)) + w_q = (torch.clamp(torch.round(w32 / scale.unsqueeze(-1)), qmin, qmax) * scale.unsqueeze(-1)).to(w_cast.dtype) + return w_cast + (w_q - w_cast).detach() + + +def _apply_qat_soft_round(w_cast: Tensor, w_fp32: Tensor, bits: int, alpha: "float | Tensor") -> Tensor: + """Differentiable quantization using soft-round (sigmoid approximation of round()). + + Instead of STE (zero gradient through round), uses sigmoid(alpha*(frac-0.5)) which + provides real gradient signal pushing weights toward quantization grid points. + alpha ramps from 1 (smooth) to 16 (near-hard rounding) during QAT. + """ + if bits <= 0: + return w_cast + out_dtype = w_cast.dtype + qmax = (1 << (bits - 1)) - 1 + qmin = -(1 << (bits - 1)) + # Scale: detached so gradient doesn't flow through row_max computation + row_max = w_fp32.float().detach().abs().amax(dim=-1) * CastedLinear._qat_clip_pct + scale = (row_max / float(qmax)).clamp_min(1.0 / float(qmax)) + # Differentiable quantization + w_scaled = w_fp32.float() / scale.unsqueeze(-1) + w_clamped = w_scaled.clamp(float(qmin), float(qmax)) + w_floor = w_clamped.detach().floor() # integer part (no grad) + frac = w_clamped - w_floor # fractional part (grad flows through w_clamped) + soft = w_floor + torch.sigmoid(alpha * (frac - 0.5)) + w_q = soft * scale.unsqueeze(-1) + return w_q.to(out_dtype) + + +def _apply_bank_qat(w: Tensor, bits: int, dtype: torch.dtype) -> Tensor: + """Apply QAT to a bank weight slice, returning the result in the target dtype.""" + w_cast = w.to(dtype) + if CastedLinear._qat_enabled and torch.is_grad_enabled() and w.ndim == 2: + if CastedLinear._qat_soft_round: + return _apply_qat_soft_round(w_cast, w, bits, CastedLinear._qat_soft_alpha) + return _apply_qat_ste(w_cast, w, bits) + return w_cast + + +def restore_low_dim_params_to_fp32(module: nn.Module) -> None: + with torch.no_grad(): + for name, param in module.named_parameters(): + if (param.ndim < 2 or any(pattern in name for pattern in CONTROL_TENSOR_NAME_PATTERNS)) and param.dtype != torch.float32: + param.data = param.data.float() +class Rotary(nn.Module): + def __init__(self, dim: int, base: float = 10000.0, train_seq_len: int = 1024, rope_dims: int = 0): + super().__init__() + self.dim = dim + self.base = base + self.train_seq_len = train_seq_len + self.rope_dims = rope_dims if rope_dims > 0 else dim + inv_freq = 1.0 / (base ** (torch.arange(0, self.rope_dims, 2, dtype=torch.float32) / self.rope_dims)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + self._seq_len_cached = 0 + self._cos_cached: Tensor | None = None + self._sin_cached: Tensor | None = None + def forward(self, seq_len: int, device: torch.device, dtype: torch.dtype) -> tuple[Tensor, Tensor]: + if ( + self._cos_cached is None + or self._sin_cached is None + or self._seq_len_cached != seq_len + or self._cos_cached.device != device + ): + rd = self.rope_dims + if seq_len > self.train_seq_len: + scale = seq_len / self.train_seq_len + new_base = self.base * (scale ** (rd / (rd - 2))) + inv_freq = 1.0 / (new_base ** (torch.arange(0, rd, 2, dtype=torch.float32, device=device) / rd)) + else: + inv_freq = self.inv_freq.to(device) + t = torch.arange(seq_len, device=device, dtype=inv_freq.dtype) + freqs = torch.outer(t, inv_freq) + self._cos_cached = freqs.cos()[None, :, None, :] + self._sin_cached = freqs.sin()[None, :, None, :] + self._seq_len_cached = seq_len + return self._cos_cached.to(dtype=dtype), self._sin_cached.to(dtype=dtype) +def apply_rotary_emb(x: Tensor, cos: Tensor, sin: Tensor, rope_dims: int = 0) -> Tensor: + if rope_dims > 0 and rope_dims < x.size(-1): + x_rope, x_pass = x[..., :rope_dims], x[..., rope_dims:] + half = rope_dims // 2 + x1, x2 = x_rope[..., :half], x_rope[..., half:] + x_rope = torch.cat((x1 * cos + x2 * sin, x1 * (-sin) + x2 * cos), dim=-1) + return torch.cat((x_rope, x_pass), dim=-1) + half = x.size(-1) // 2 + x1, x2 = x[..., :half], x[..., half:] + return torch.cat((x1 * cos + x2 * sin, x1 * (-sin) + x2 * cos), dim=-1) + +class CausalSelfAttention(nn.Module): + def __init__( + self, + dim: int, + num_heads: int, + num_kv_heads: int, + rope_base: float, + qk_gain_init: float, + ): + super().__init__() + if dim % num_heads != 0: + raise ValueError("model_dim must be divisible by num_heads") + if num_heads % num_kv_heads != 0: + raise ValueError("num_heads must be divisible by num_kv_heads") + self.num_heads = num_heads + self.num_kv_heads = num_kv_heads + self.head_dim = dim // num_heads + if self.head_dim % 2 != 0: + raise ValueError("head_dim must be even for RoPE") + # No CastedLinear -- weights come from banks + self.q_gain = nn.Parameter(torch.full((num_heads,), qk_gain_init, dtype=torch.float32)) + self.rope_dims = 0 # set by GPT.__init__ for partial RoPE + self.rotary = Rotary(self.head_dim, base=rope_base, train_seq_len=1024) + self.use_xsa = False # set by GPT.__init__ for deep layers only + def _xsa_efficient(self, y: Tensor, v: Tensor) -> Tensor: + """Efficient XSA: subtract self-value projection via GQA-aware reshape (no repeat_interleave). + y: [B, T, H, D], v: [B, T, Hkv, D]. H must be divisible by Hkv.""" + B, T, H, D = y.shape + Hkv = v.size(-2) + group = H // Hkv + y_g = y.reshape(B, T, Hkv, group, D) # [B, T, Hkv, group, D] + vn = F.normalize(v, dim=-1).unsqueeze(-2) # [B, T, Hkv, 1, D] -- broadcast ready + proj = (y_g * vn).sum(dim=-1, keepdim=True) * vn + return (y_g - proj).reshape(B, T, H, D) + def forward(self, x: Tensor, q_w: Tensor, k_w: Tensor, v_w: Tensor, out_w: Tensor, v_embed: Tensor | None = None) -> tuple[Tensor, Tensor | None]: + bsz, seqlen, dim = x.shape + _qb = CastedLinear._qat_default_bits + q = F.linear(x, _apply_bank_qat(q_w, _qb, x.dtype)).reshape(bsz, seqlen, self.num_heads, self.head_dim) + k = F.linear(x, _apply_bank_qat(k_w, _qb, x.dtype)).reshape(bsz, seqlen, self.num_kv_heads, self.head_dim) + v = F.linear(x, _apply_bank_qat(v_w, _qb, x.dtype)) + if v_embed is not None: + v = v + v_embed + v = v.reshape(bsz, seqlen, self.num_kv_heads, self.head_dim) + q = F.rms_norm(q, (q.size(-1),)) + k = F.rms_norm(k, (k.size(-1),)) + cos, sin = self.rotary(seqlen, x.device, q.dtype) + q = apply_rotary_emb(q, cos, sin, self.rope_dims) + k = apply_rotary_emb(k, cos, sin, self.rope_dims) + q = q * self.q_gain.to(dtype=q.dtype)[None, None, :, None] + # SDPA expects (B, H, T, D) + y = F.scaled_dot_product_attention( + q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), + is_causal=True, enable_gqa=(self.num_kv_heads != self.num_heads), + ).transpose(1, 2) + if self.use_xsa: + y = self._xsa_efficient(y, v) + y = y.reshape(bsz, seqlen, dim) + return F.linear(y, _apply_bank_qat(out_w, _qb, y.dtype)), None + +class SmearGate(nn.Module): + def __init__(self, dim: int): + super().__init__() + self.gate = nn.Parameter(torch.zeros(dim, dtype=torch.float32)) + def forward(self, x: Tensor) -> Tensor: + g = torch.sigmoid(self.gate.to(dtype=x.dtype))[None, None, :] + x_prev = F.pad(x[:, :-1], (0, 0, 1, 0)) # prepend zero row, avoids cat+alloc + return torch.lerp(x, x_prev, g) + +class EngramLite(nn.Module): + """Multi-head hash-based n-gram embedding with learned gating (Engram-lite). + + Replaces BigramHashEmbedding with: multi-head hashing for collision resistance, + bigram+trigram coverage, and a per-dim learned gate to suppress n-gram signal + when the Transformer's own reasoning is more informative. + """ + def __init__(self, num_buckets: int, num_heads: int, num_orders: int, dim_per_head: int, model_dim: int): + super().__init__() + self.num_buckets = num_buckets + self.num_heads = num_heads + self.num_orders = num_orders + self.dim_per_head = dim_per_head + total_slots = num_orders * num_heads * num_buckets + concat_dim = num_orders * num_heads * dim_per_head + self.embed = nn.Embedding(total_slots, dim_per_head) + nn.init.normal_(self.embed.weight, std=0.01) + self.proj = CastedLinear(concat_dim, model_dim, bias=False) + self.proj._zero_init = True + self.ngram_gate = nn.Parameter(torch.zeros(model_dim, dtype=torch.float32)) + + def forward(self, input_ids: Tensor) -> Tensor: + B = self.num_buckets + prev_ids = F.pad(input_ids[:, :-1], (1, 0), value=0) + # Bigram hashes (2 heads, independent prime-based mixing) + bi_h0 = (prev_ids * 1009 + input_ids) % B + bi_h1 = ((prev_ids * 2719 + 314159) ^ (input_ids * 3137)) % B + indices = [bi_h0, bi_h1 + B] + # Trigram hashes (2 heads) if enabled + if self.num_orders >= 2: + pp_ids = F.pad(prev_ids[:, :-1], (1, 0), value=0) + tri_h0 = ((pp_ids * 36313) ^ (prev_ids * 27191) ^ (input_ids * 4903)) % B + tri_h1 = ((pp_ids * 7919) ^ (prev_ids * 4391) ^ (input_ids * 6151)) % B + offset = 2 * B + indices.extend([tri_h0 + offset, tri_h1 + offset + B]) + # Unified lookup + concat + all_idx = torch.stack(indices, dim=-1) + all_emb = self.embed(all_idx) + flat = all_emb.reshape(*input_ids.shape, -1) + out = self.proj(flat) + gate = torch.sigmoid(self.ngram_gate.to(dtype=out.dtype))[None, None, :] + return out * gate + +class ValueEmbedding(nn.Module): + """Reinject token identity into attention values at specific layers. + Each table maps vocab tokens to a low-dim embedding, projected to kv_dim.""" + def __init__(self, vocab_size: int, ve_dim: int, kv_dim: int): + super().__init__() + self.embed = nn.Embedding(vocab_size, ve_dim) + nn.init.normal_(self.embed.weight, std=0.01) + self.proj = CastedLinear(ve_dim, kv_dim, bias=False) if ve_dim != kv_dim else None + if self.proj is not None: + nn.init.zeros_(self.proj.weight) + self.scale = nn.Parameter(torch.tensor(0.1, dtype=torch.float32)) + def forward(self, token_ids: Tensor) -> Tensor: + h = self.embed(token_ids) + if self.proj is not None: + h = self.proj(h) + return h * self.scale.to(dtype=h.dtype) + +class MLP(nn.Module): + # Fixed per-layer negative slopes from ASQU v3 converged endpoints + _LAYER_SLOPES = [-0.014, 0.131, 0.225, 0.265, 0.310, 0.354, 0.421, 0.429, 0.417, 0.358, 0.468] + def __init__(self, dim: int, mlp_mult: float, layer_idx: int = 0, num_layers: int = 1): + super().__init__() + self.neg_slope = self._LAYER_SLOPES[layer_idx] if layer_idx < len(self._LAYER_SLOPES) else 0.3 + def forward(self, x: Tensor, up_w: Tensor, down_w: Tensor) -> Tensor: + _qb = CastedLinear._qat_default_bits + x = F.linear(x, _apply_bank_qat(up_w, _qb, x.dtype)) + # Fused leaky_relu²: avoids materializing intermediate leaky_relu tensor + x = torch.where(x > 0, x * x, (self.neg_slope * self.neg_slope) * (x * x)) + return F.linear(x, _apply_bank_qat(down_w, _qb, x.dtype)) + +class Block(nn.Module): + def __init__( + self, + dim: int, + num_heads: int, + num_kv_heads: int, + mlp_mult: float, + rope_base: float, + qk_gain_init: float, + layer_idx: int = 0, + ln_scale: bool = False, + num_layers: int = 1, + ): + super().__init__() + self.attn_norm = RMSNorm() + self.mlp_norm = RMSNorm() + self.attn = CausalSelfAttention(dim, num_heads, num_kv_heads, rope_base, qk_gain_init) + self.mlp = MLP(dim, mlp_mult, layer_idx=layer_idx, num_layers=num_layers) + self.attn_scale = nn.Parameter(torch.ones(dim, dtype=torch.float32)) + self.mlp_scale = nn.Parameter(torch.ones(dim, dtype=torch.float32)) + self.resid_mix = nn.Parameter(torch.stack((torch.ones(dim), torch.zeros(dim))).float()) + self.ln_scale_factor = 1.0 / math.sqrt(layer_idx + 1) if ln_scale else 1.0 + def forward(self, x: Tensor, x0: Tensor, q_w: Tensor, k_w: Tensor, v_w: Tensor, out_w: Tensor, up_w: Tensor, down_w: Tensor, v_embed: Tensor | None = None) -> tuple[Tensor, Tensor | None]: + mix = self.resid_mix.to(dtype=x.dtype) + x_in = mix[0][None, None, :] * x + mix[1][None, None, :] * x0 + attn_out, raw_v = self.attn(self.attn_norm(x_in) * self.ln_scale_factor, q_w, k_w, v_w, out_w, v_embed=v_embed) + x_out = x_in + self.attn_scale.to(dtype=x_in.dtype)[None, None, :] * attn_out + x_out = x_out + self.mlp_scale.to(dtype=x_out.dtype)[None, None, :] * self.mlp(self.mlp_norm(x_out) * self.ln_scale_factor, up_w, down_w) + return x_out, raw_v + +class GPT(nn.Module): + def __init__( + self, + vocab_size: int, + num_layers: int, + model_dim: int, + num_heads: int, + num_kv_heads: int, + mlp_mult: float, + tie_embeddings: bool, + tied_embed_init_std: float, + logit_softcap: float, + rope_base: float, + qk_gain_init: float, + ngram_buckets: int = 0, + ngram_heads: int = 2, + ngram_orders: int = 2, + ngram_dim_per_head: int = 32, + xsa_last_n: int = 0, + rope_dims: int = 0, + ln_scale: bool = False, + ve_enabled: bool = False, + ve_dim: int = 128, + ve_layers: str = "9,10", + ): + super().__init__() + self._ve_target_dim = num_kv_heads * (model_dim // num_heads) # kv_dim for value projection + if logit_softcap <= 0.0: + raise ValueError(f"logit_softcap must be positive, got {logit_softcap}") + self.tie_embeddings = tie_embeddings + self.tied_embed_init_std = tied_embed_init_std + self.logit_softcap = logit_softcap + self.tok_emb = nn.Embedding(vocab_size, model_dim) + self.bigram = EngramLite(ngram_buckets, ngram_heads, ngram_orders, ngram_dim_per_head, model_dim) if ngram_buckets > 0 else None + self.smear = SmearGate(model_dim) + self.num_encoder_layers = num_layers // 2 + self.num_decoder_layers = num_layers - self.num_encoder_layers + self.num_skip_weights = min(self.num_encoder_layers, self.num_decoder_layers) + self.skip_weights = nn.Parameter(torch.ones(self.num_skip_weights, model_dim, dtype=torch.float32)) + self.skip_gates = nn.Parameter(torch.zeros(self.num_skip_weights, model_dim, dtype=torch.float32)) + # Parameter banks: contiguous 3D tensors for batched optimizer + head_dim = model_dim // num_heads + kv_dim = num_kv_heads * head_dim + mlp_dim = int(mlp_mult * model_dim) + self.num_layers = num_layers + self.qo_bank = nn.Parameter(torch.empty(2 * num_layers, model_dim, model_dim)) + self.kv_bank = nn.Parameter(torch.empty(2 * num_layers, kv_dim, model_dim)) + self.mlp_up_bank = nn.Parameter(torch.empty(num_layers, mlp_dim, model_dim)) + self.mlp_down_bank = nn.Parameter(torch.empty(num_layers, model_dim, mlp_dim)) + self.blocks = nn.ModuleList( + [ + Block( + model_dim, + num_heads, + num_kv_heads, + mlp_mult, + rope_base, + qk_gain_init, + layer_idx=i, + ln_scale=ln_scale, + num_layers=num_layers, + ) + for i in range(num_layers) + ] + ) + if rope_dims > 0: + head_dim = model_dim // num_heads + for block in self.blocks: + block.attn.rope_dims = rope_dims + block.attn.rotary = Rotary(head_dim, base=rope_base, train_seq_len=1024, rope_dims=rope_dims) + self.ve_layer_indices = [int(x) for x in ve_layers.split(",") if x.strip()] if ve_enabled else [] + kv_dim_ve = self._ve_target_dim + if self.ve_layer_indices: + self.ve_shared = ValueEmbedding(vocab_size, ve_dim, kv_dim_ve) + self.ve_layer_scales = nn.ParameterList( + [nn.Parameter(torch.ones(1, dtype=torch.float32)) for _ in self.ve_layer_indices] + ) + else: + self.ve_shared = None + self.ve_layer_scales = nn.ParameterList() + self.value_embeds = nn.ModuleList() # keep empty for compat + self.final_norm = RMSNorm() + self.lm_head = None if tie_embeddings else CastedLinear(model_dim, vocab_size, bias=False) + if self.lm_head is not None: + self.lm_head._zero_init = True + if xsa_last_n > 0: + for i in range(max(0, num_layers - xsa_last_n), num_layers): + self.blocks[i].attn.use_xsa = True + self._init_weights() + def _init_weights(self) -> None: + if self.tie_embeddings: + nn.init.normal_(self.tok_emb.weight, mean=0.0, std=self.tied_embed_init_std) + n = self.num_layers + mimetic_alpha = 0.05 + head_dim = self.qo_bank.shape[1] // (self.blocks[0].attn.num_heads) # model_dim // num_heads + num_kv_heads = self.blocks[0].attn.num_kv_heads + num_heads = self.blocks[0].attn.num_heads + group = num_heads // num_kv_heads + # Init banks: orthogonal, with proj layers scaled down and out zero-init via mimetic V-O + for i in range(n): + nn.init.orthogonal_(self.qo_bank.data[i], gain=1.0) # Q + nn.init.orthogonal_(self.kv_bank.data[i], gain=1.0) # K + nn.init.orthogonal_(self.kv_bank.data[n + i], gain=1.0) # V + nn.init.orthogonal_(self.mlp_up_bank.data[i], gain=1.0) # MLP up + nn.init.zeros_(self.mlp_down_bank.data[i]) # MLP down (zero init) + # Mimetic V-O Init for output projection: O_h = -alpha * V_h per head + v_w = self.kv_bank.data[n + i] # (kv_dim, model_dim) + o_w = torch.zeros_like(self.qo_bank.data[n + i]) # (model_dim, model_dim) + for kv_h in range(num_kv_heads): + v_block = v_w[kv_h * head_dim : (kv_h + 1) * head_dim, :] + for g_idx in range(group): + q_h = kv_h * group + g_idx + o_w[q_h * head_dim : (q_h + 1) * head_dim, :] = -mimetic_alpha * v_block + self.qo_bank.data[n + i].copy_(o_w) + # Init remaining nn.Linear modules (bigram proj, lm_head) + for name, module in self.named_modules(): + if isinstance(module, nn.Linear): + if getattr(module, "_zero_init", False): + nn.init.zeros_(module.weight) + elif module.weight.ndim == 2 and module.weight.shape[0] >= 64 and module.weight.shape[1] >= 64: + nn.init.orthogonal_(module.weight, gain=1.0) + def _compute_ve_base(self, input_ids: Tensor) -> Tensor | None: + """Precompute shared value embedding base once per forward pass.""" + if self.ve_shared is None: + return None + return self.ve_shared(input_ids) + + def _get_ve(self, layer_idx: int, ve_base: Tensor | None) -> Tensor | None: + """Get value embedding for a specific layer using precomputed base + per-layer scale.""" + if ve_base is None or layer_idx not in self.ve_layer_indices: + return None + ve_idx = self.ve_layer_indices.index(layer_idx) + return ve_base * self.ve_layer_scales[ve_idx].to(dtype=ve_base.dtype) + def forward(self, input_ids: Tensor, target_ids: Tensor) -> Tensor: + n = self.num_layers + x = self.tok_emb(input_ids) + if self.bigram is not None: + x = x + self.bigram(input_ids) + x = F.rms_norm(x, (x.size(-1),)) + x = self.smear(x) + x0 = x + skips: list[Tensor] = [] + ve_base = self._compute_ve_base(input_ids) + for i in range(self.num_encoder_layers): + ve = self._get_ve(i, ve_base) + x, _raw_v = self.blocks[i](x, x0, + self.qo_bank[i], self.kv_bank[i], self.kv_bank[n + i], + self.qo_bank[n + i], self.mlp_up_bank[i], self.mlp_down_bank[i], + v_embed=ve) + skips.append(x) + for i in range(self.num_decoder_layers): + bi = self.num_encoder_layers + i + if skips: + skip = skips.pop() + g = torch.sigmoid(self.skip_gates[i].to(dtype=x.dtype))[None, None, :] + scaled_skip = self.skip_weights[i].to(dtype=x.dtype)[None, None, :] * skip + x = torch.lerp(scaled_skip, x, g) + ve = self._get_ve(bi, ve_base) + x, _raw_v = self.blocks[bi](x, x0, + self.qo_bank[bi], self.kv_bank[bi], self.kv_bank[n + bi], + self.qo_bank[n + bi], self.mlp_up_bank[bi], self.mlp_down_bank[bi], + v_embed=ve) + x = self.final_norm(x) + x_flat = x.reshape(-1, x.size(-1)) + targets = target_ids.reshape(-1) + if self.tie_embeddings: + logits_proj = F.linear(x_flat, self.tok_emb.weight) + else: + if self.lm_head is None: + raise RuntimeError("lm_head is required when tie_embeddings=False") + logits_proj = self.lm_head(x_flat) + logits = self.logit_softcap * torch.tanh(logits_proj / self.logit_softcap) + return F.cross_entropy(logits.float(), targets, reduction="mean") + def forward_logits(self, input_ids: Tensor) -> Tensor: + """Return logits (bsz, seq_len, vocab) without computing loss.""" + n = self.num_layers + x = self.tok_emb(input_ids) + if self.bigram is not None: + x = x + self.bigram(input_ids) + x = F.rms_norm(x, (x.size(-1),)) + x = self.smear(x) + x0 = x + skips: list[Tensor] = [] + ve_base = self._compute_ve_base(input_ids) + for i in range(self.num_encoder_layers): + ve = self._get_ve(i, ve_base) + x, _raw_v = self.blocks[i](x, x0, + self.qo_bank[i], self.kv_bank[i], self.kv_bank[n + i], + self.qo_bank[n + i], self.mlp_up_bank[i], self.mlp_down_bank[i], + v_embed=ve) + skips.append(x) + for i in range(self.num_decoder_layers): + bi = self.num_encoder_layers + i + if skips: + skip = skips.pop() + g = torch.sigmoid(self.skip_gates[i].to(dtype=x.dtype))[None, None, :] + scaled_skip = self.skip_weights[i].to(dtype=x.dtype)[None, None, :] * skip + x = torch.lerp(scaled_skip, x, g) + ve = self._get_ve(bi, ve_base) + x, _raw_v = self.blocks[bi](x, x0, + self.qo_bank[bi], self.kv_bank[bi], self.kv_bank[n + bi], + self.qo_bank[n + bi], self.mlp_up_bank[bi], self.mlp_down_bank[bi], + v_embed=ve) + x = self.final_norm(x) + if self.tie_embeddings: + logits_proj = F.linear(x, self.tok_emb.weight) + else: + logits_proj = self.lm_head(x) + return self.logit_softcap * torch.tanh(logits_proj / self.logit_softcap) + +# --- Sliding window evaluation --- + +def eval_val_sliding( + args: Hyperparameters, + base_model: nn.Module, + rank: int, + world_size: int, + device: torch.device, + val_tokens: Tensor, + base_bytes_lut: Tensor, + has_leading_space_lut: Tensor, + is_boundary_token_lut: Tensor, + stride: int, + batch_seqs: int = 32, + eval_seq_len: int | None = None, +) -> tuple[float, float]: + """Sliding window evaluation: each token scored with maximum context.""" + seq_len = eval_seq_len or args.train_seq_len + total_tokens = val_tokens.numel() - 1 + window_starts = [ws for ws in range(0, total_tokens, stride) + if min(ws + seq_len, total_tokens) - ws >= 1] + total_windows = len(window_starts) + my_s = (total_windows * rank) // world_size + my_e = (total_windows * (rank + 1)) // world_size + my_windows = window_starts[my_s:my_e] + loss_sum = torch.zeros((), device=device, dtype=torch.float64) + token_count = torch.zeros((), device=device, dtype=torch.float64) + byte_count = torch.zeros((), device=device, dtype=torch.float64) + base_model.eval() + compiled_logits = torch.compile(base_model.forward_logits, dynamic=True, fullgraph=True) + with torch.inference_mode(): + for bi in range(0, len(my_windows), batch_seqs): + batch_ws = my_windows[bi:bi + batch_seqs] + bsz = len(batch_ws) + x_batch = torch.zeros(bsz, seq_len, dtype=torch.int64, device=device) + y_batch = torch.zeros(bsz, seq_len, dtype=torch.int64, device=device) + wlens: list[int] = [] + for i, ws in enumerate(batch_ws): + end = min(ws + seq_len, total_tokens) + wlen = end - ws + wlens.append(wlen) + chunk = val_tokens[ws:end + 1].to(dtype=torch.int64, device=device) + x_batch[i, :wlen] = chunk[:-1] + y_batch[i, :wlen] = chunk[1:] + with torch.autocast(device_type="cuda", dtype=torch.bfloat16): + logits = compiled_logits(x_batch) + nll = F.cross_entropy( + logits.reshape(-1, logits.size(-1)).float(), + y_batch.reshape(-1), + reduction="none", + ).reshape(bsz, seq_len) + for i, ws in enumerate(batch_ws): + wlen = wlens[i] + s = 0 if ws == 0 else max(wlen - stride, 0) + scored_nll = nll[i, s:wlen].to(torch.float64) + loss_sum += scored_nll.sum() + token_count += float(wlen - s) + tgt = y_batch[i, s:wlen] + prev = x_batch[i, s:wlen] + tb = base_bytes_lut[tgt].to(torch.float64) + tb += (has_leading_space_lut[tgt] & ~is_boundary_token_lut[prev]).to(torch.float64) + byte_count += tb.sum() + if dist.is_available() and dist.is_initialized(): + dist.all_reduce(loss_sum, op=dist.ReduceOp.SUM) + dist.all_reduce(token_count, op=dist.ReduceOp.SUM) + dist.all_reduce(byte_count, op=dist.ReduceOp.SUM) + val_loss = (loss_sum / token_count).item() + bits_per_token = val_loss / math.log(2.0) + tokens_per_byte = token_count.item() / byte_count.item() + base_model.train() + return val_loss, bits_per_token * tokens_per_byte + + +def _ttt_swap_polyak( + params: list[nn.Parameter], polyak_state: dict[int, Tensor], +) -> None: + """Swap training weights <-> Polyak EMA weights in-place.""" + for p in params: + pid = id(p) + tmp = p.data.clone() + p.data.copy_(polyak_state[pid]) + polyak_state[pid] = tmp + + +def eval_val_sliding_ttt( + args: Hyperparameters, + base_model: nn.Module, + rank: int, + world_size: int, + device: torch.device, + val_tokens: Tensor, + base_bytes_lut: Tensor, + has_leading_space_lut: Tensor, + is_boundary_token_lut: Tensor, + stride: int, + batch_seqs: int = 32, + eval_seq_len: int | None = None, + eval_budget_t0: float | None = None, + eval_budget_seconds: float = 600.0, +) -> tuple[float, float]: + """Legal score-first TTT: score each chunk, then train on it. + + Every token is scored BEFORE any gradient update that could use it. + Last chunk is scored but never trained on. Uses torch.no_grad() + during scoring for a hard guarantee of no weight mutation. + """ + seq_len = eval_seq_len or args.train_seq_len + total_tokens = val_tokens.numel() - 1 + ttt_chunk = args.ttt_chunk_tokens + log0 = (lambda msg: print(msg, flush=True)) if rank == 0 else (lambda msg: None) + + # -- Pre-compute all window starts -- + window_starts = [ws for ws in range(0, total_tokens, stride) + if min(ws + seq_len, total_tokens) - ws >= 1] + + # -- Assign each window to a chunk based on the first token it scores -- + num_chunks = max(1, (total_tokens + ttt_chunk - 1) // ttt_chunk) + chunk_windows: list[list[int]] = [[] for _ in range(num_chunks)] + for ws in window_starts: + end = min(ws + seq_len, total_tokens) + wlen = end - ws + s = 0 if ws == 0 else max(wlen - stride, 0) + scored_start = ws + s + ci = min(scored_start // ttt_chunk, num_chunks - 1) + chunk_windows[ci].append(ws) + + log0(f"ttt:start chunks={num_chunks} chunk_tokens={ttt_chunk} " + f"total_windows={len(window_starts)} stride={stride} " + f"ttt_optimizer={args.ttt_optimizer} ttt_lr={args.ttt_lr} " + f"ttt_momentum={args.ttt_momentum} ttt_epochs={args.ttt_epochs} " + f"freeze_blocks={args.ttt_freeze_blocks} " + f"entropy_adapt={args.ttt_entropy_adapt}") + + # -- Accumulators -- + loss_sum = torch.zeros((), device=device, dtype=torch.float64) + token_count = torch.zeros((), device=device, dtype=torch.float64) + byte_count = torch.zeros((), device=device, dtype=torch.float64) + + # -- Freeze first N blocks -- + frozen_block_ids = set(range(min(args.ttt_freeze_blocks, len(base_model.blocks)))) + ttt_params: list[nn.Parameter] = [] + for name, p in base_model.named_parameters(): + freeze = False + for bi in frozen_block_ids: + if f"blocks.{bi}." in name: + freeze = True + break + if freeze: + p.requires_grad_(False) + else: + p.requires_grad_(True) + ttt_params.append(p) + + log0(f"ttt:params unfrozen={sum(p.numel() for p in ttt_params)} " + f"frozen={sum(p.numel() for p in base_model.parameters() if not p.requires_grad)}") + + # -- Optimizer -- + if args.ttt_optimizer == "adamw": + optimizer = torch.optim.AdamW(ttt_params, lr=args.ttt_lr, weight_decay=0.0) + else: + optimizer = torch.optim.SGD(ttt_params, lr=args.ttt_lr, momentum=args.ttt_momentum) + + # -- Polyak EMA state -- + polyak_state: dict[int, Tensor] = {} + if args.ttt_polyak: + for p in ttt_params: + polyak_state[id(p)] = p.data.clone() + + # Compile forward for scoring + training + if args.ttt_compile: + compiled_logits = torch.compile(base_model.forward_logits, dynamic=False, fullgraph=True) + compiled_logits_train = torch.compile(base_model.forward_logits, dynamic=False, fullgraph=True) + compiled_forward = torch.compile(base_model.forward, dynamic=False, fullgraph=True) + else: + compiled_logits = base_model.forward_logits + compiled_logits_train = base_model.forward_logits + compiled_forward = base_model.forward + + # Pin val_tokens for faster CPU->GPU DMA + if not val_tokens.is_pinned(): + val_tokens = val_tokens.pin_memory() + + t0 = time.perf_counter() + + for ci in range(num_chunks): + # Budget guard + if eval_budget_t0 is not None: + eval_elapsed_s = time.perf_counter() - eval_budget_t0 + if eval_elapsed_s > eval_budget_seconds - 10.0: + log0(f"ttt:BUDGET_ABORT at chunk {ci}/{num_chunks} " + f"(eval_elapsed={eval_elapsed_s:.1f}s >= {eval_budget_seconds - 10.0:.0f}s limit)") + break + + windows = chunk_windows[ci] + if not windows: + continue + + # Distribute windows across ranks + my_s = (len(windows) * rank) // world_size + my_e = (len(windows) * (rank + 1)) // world_size + my_windows = windows[my_s:my_e] + + # Entropy accumulators for adaptive epoch allocation + chunk_entropy_sum = 0.0 + chunk_entropy_count = 0 + + # -- PHASE 1: SCORE (no_grad) -- + if args.ttt_polyak and polyak_state: + _ttt_swap_polyak(ttt_params, polyak_state) + + base_model.train(False) + with torch.no_grad(): + for bi in range(0, len(my_windows), batch_seqs): + batch_ws = my_windows[bi:bi + batch_seqs] + bsz = len(batch_ws) + x_batch = torch.zeros(bsz, seq_len, dtype=torch.int64, device=device) + y_batch = torch.zeros(bsz, seq_len, dtype=torch.int64, device=device) + wlens: list[int] = [] + for i, ws in enumerate(batch_ws): + end = min(ws + seq_len, total_tokens) + wlen = end - ws + wlens.append(wlen) + chunk_tok = val_tokens[ws:end + 1].to(dtype=torch.int64, device=device) + x_batch[i, :wlen] = chunk_tok[:-1] + y_batch[i, :wlen] = chunk_tok[1:] + with torch.autocast(device_type="cuda", dtype=torch.bfloat16): + logits = compiled_logits(x_batch) + # Temperature scaling + NLL + entropy + if args.ttt_temp != 1.0 and args.ttt_temp_mode == "adaptive": + logits_f = logits.float() + probs = F.softmax(logits_f, dim=-1) + ent = -(probs * (probs + 1e-10).log()).sum(-1) + max_ent = math.log(logits_f.size(-1)) + adaptive_t = 1.0 - (1.0 - args.ttt_temp) * (1.0 - ent / max_ent) + adaptive_t = adaptive_t.clamp(min=0.9, max=1.05) + logits_scored = logits_f / adaptive_t.unsqueeze(-1) + nll = F.cross_entropy( + logits_scored.reshape(-1, logits_scored.size(-1)), + y_batch.reshape(-1), reduction="none", + ).reshape(bsz, seq_len) + if args.ttt_entropy_adapt: + batch_ent = ent + elif args.ttt_temp != 1.0 and args.ttt_temp_mode == "constant": + logits_scored = logits.float() / args.ttt_temp + nll = F.cross_entropy( + logits_scored.reshape(-1, logits_scored.size(-1)), + y_batch.reshape(-1), reduction="none", + ).reshape(bsz, seq_len) + if args.ttt_entropy_adapt: + probs_ea = F.softmax(logits.float(), dim=-1) + batch_ent = -(probs_ea * (probs_ea + 1e-10).log()).sum(-1) + else: + logits_f = logits.float() + if args.ttt_entropy_adapt: + log_probs = F.log_softmax(logits_f, dim=-1) + nll = F.nll_loss( + log_probs.reshape(-1, log_probs.size(-1)), + y_batch.reshape(-1), reduction="none", + ).reshape(bsz, seq_len) + probs_ea = log_probs.exp() + batch_ent = -(probs_ea * log_probs).sum(-1) + else: + nll = F.cross_entropy( + logits_f.reshape(-1, logits_f.size(-1)), + y_batch.reshape(-1), reduction="none", + ).reshape(bsz, seq_len) + + for i, ws in enumerate(batch_ws): + wlen = wlens[i] + s = 0 if ws == 0 else max(wlen - stride, 0) + scored_nll = nll[i, s:wlen].to(torch.float64) + loss_sum += scored_nll.sum() + token_count += float(wlen - s) + tgt, prev = y_batch[i, s:wlen], x_batch[i, s:wlen] + tb = base_bytes_lut[tgt].to(torch.float64) + tb += (has_leading_space_lut[tgt] & ~is_boundary_token_lut[prev]).to(torch.float64) + byte_count += tb.sum() + if args.ttt_entropy_adapt: + chunk_entropy_sum += batch_ent[i, s:wlen].sum().item() + chunk_entropy_count += (wlen - s) + + if args.ttt_polyak and polyak_state: + _ttt_swap_polyak(ttt_params, polyak_state) + + # Sync entropy across ranks + if args.ttt_entropy_adapt and world_size > 1: + ent_sync = torch.tensor([chunk_entropy_sum, chunk_entropy_count], device=device, dtype=torch.float64) + dist.all_reduce(ent_sync, op=dist.ReduceOp.SUM) + chunk_entropy_sum = ent_sync[0].item() + chunk_entropy_count = ent_sync[1].item() + + # -- PHASE 2: TRAIN on scored chunk (skip last chunk = legal) -- + if args.ttt_entropy_adapt and chunk_entropy_count > 0: + mean_ent = chunk_entropy_sum / chunk_entropy_count + if mean_ent >= args.ttt_entropy_high: + num_epochs = args.ttt_epochs + 1 + elif mean_ent <= args.ttt_entropy_low: + num_epochs = max(args.ttt_epochs - 1, 0) + else: + num_epochs = args.ttt_epochs + else: + num_epochs = args.ttt_epochs + + is_last = ci == num_chunks - 1 + if not is_last and num_epochs > 0: + base_model.train(True) + cos_lr = args.ttt_lr * 0.5 * (1.0 + math.cos(math.pi * ci / max(num_chunks - 1, 1))) + if args.ttt_adaptive_lr: + progress = min(ci / max(num_chunks * 0.3, 1), 1.0) + lr_mult = 1.0 + (args.ttt_adaptive_lr_max - 1.0) * progress + cos_lr = cos_lr * lr_mult + for pg in optimizer.param_groups: + pg["lr"] = cos_lr + + chunk_start = ci * ttt_chunk + chunk_end = min((ci + 1) * ttt_chunk, total_tokens) + chunk_seqs = (chunk_end - chunk_start) // seq_len + if chunk_seqs > 0: + if chunk_seqs >= world_size: + my_seq_s = (chunk_seqs * rank) // world_size + my_seq_e = (chunk_seqs * (rank + 1)) // world_size + else: + my_seq_s = 0 + my_seq_e = chunk_seqs + my_chunk_seqs = my_seq_e - my_seq_s + + for _ep in range(num_epochs): + for bs in range(0, my_chunk_seqs, batch_seqs): + be = min(bs + batch_seqs, my_chunk_seqs) + actual_bs = my_seq_s + bs + start_tok = chunk_start + actual_bs * seq_len + end_tok = chunk_start + (my_seq_s + be) * seq_len + 1 + if end_tok > val_tokens.numel(): + continue + local = val_tokens[start_tok:end_tok].to(device=device, dtype=torch.int64) + x = local[:-1].reshape(-1, seq_len) + y = local[1:].reshape(-1, seq_len) + optimizer.zero_grad(set_to_none=True) + with torch.autocast(device_type="cuda", dtype=torch.bfloat16): + if args.ttt_byte_weighted: + ttt_logits = compiled_logits_train(x) + per_token_loss = F.cross_entropy( + ttt_logits.reshape(-1, ttt_logits.size(-1)).float(), + y.reshape(-1), reduction="none", + ).reshape(y.shape) + bw = base_bytes_lut[y].float() + bw = bw + (has_leading_space_lut[y] & ~is_boundary_token_lut[x]).float() + loss = (per_token_loss * bw).sum() / bw.sum() + else: + loss = compiled_forward(x, y) + loss.backward() + if world_size > 1: + for p in ttt_params: + if p.grad is not None: + dist.all_reduce(p.grad, op=dist.ReduceOp.AVG) + torch.nn.utils.clip_grad_norm_(ttt_params, args.ttt_grad_clip) + optimizer.step() + if args.ttt_polyak: + for p in ttt_params: + polyak_state[id(p)].lerp_(p.data, 1.0 - args.ttt_polyak_decay) + + # Progress logging (rank-0 local estimate — reflects 1/world_size of data on multi-GPU; + # final returned val_bpb is globally correct after all_reduce) + if rank == 0 and (ci % 10 == 0 or ci == num_chunks - 1 or ci < 3): + elapsed = time.perf_counter() - t0 + rl = loss_sum.item() / max(token_count.item(), 1) + rbpb = rl / math.log(2.0) * (token_count.item() / max(byte_count.item(), 1)) if token_count.item() > 0 else 0.0 + ent_str = f" ent={chunk_entropy_sum / max(chunk_entropy_count, 1):.2f} ep={num_epochs}" if args.ttt_entropy_adapt and chunk_entropy_count > 0 else "" + print(f" ttt_chunk [{ci+1}/{num_chunks}] bpb={rbpb:.6f} time={elapsed:.1f}s{ent_str}", flush=True) + + # -- Final reduction -- + if dist.is_available() and dist.is_initialized(): + dist.all_reduce(loss_sum, op=dist.ReduceOp.SUM) + dist.all_reduce(token_count, op=dist.ReduceOp.SUM) + dist.all_reduce(byte_count, op=dist.ReduceOp.SUM) + + val_loss = (loss_sum / token_count).item() + val_bpb = val_loss / math.log(2.0) * (token_count.item() / byte_count.item()) + + # Restore all params to requires_grad=True + for p in base_model.parameters(): + p.requires_grad_(True) + base_model.train(False) + + log0(f"ttt:done val_loss={val_loss:.6f} val_bpb={val_bpb:.6f} " + f"elapsed={time.perf_counter() - t0:.1f}s") + return val_loss, val_bpb + + +# --- GPTQ-lite int6 quantization --- + +def _classify_param(name: str) -> str: + if "tok_emb" in name or "lm_head" in name: + return "embed" + if ".mlp." in name: + return "mlp" + if ".attn." in name or (".proj." in name and ".mlp." not in name): + return "attn" + return "other" +def quantize_int6_per_row(t: Tensor, clip_range: int = 31) -> tuple[Tensor, Tensor]: + t32 = t.float() + if t32.ndim == 2: + best_q, best_s, best_err = None, None, float('inf') + for pct in [0.9990, 0.9995, 0.9999, 0.99999, 1.0]: + if pct < 1.0: + row_clip = torch.quantile(t32.abs(), pct, dim=1) + else: + row_clip = t32.abs().amax(dim=1) + s = (row_clip / clip_range).clamp_min(1.0 / clip_range).to(torch.float16) + q = torch.clamp(torch.round(t32 / s.float()[:, None]), -clip_range, clip_range).to(torch.int8) + recon = q.float() * s.float()[:, None] + err = (t32 - recon).pow(2).mean().item() + if err < best_err: + best_q, best_s, best_err = q, s, err + return best_q, best_s + amax = t32.abs().max().item() + scale = torch.tensor(amax / clip_range if amax > 0 else 1.0, dtype=torch.float16) + q = torch.clamp(torch.round(t32 / scale.float()), -clip_range, clip_range).to(torch.int8) + return q, scale + +def _precompute_row_scales(W: Tensor, qmax: int) -> Tensor: + """Pre-compute optimal per-row scales by searching percentile clipping thresholds. + PR #753-style: find best scale once, then run GPTQ once with fixed scales.""" + t32 = W.float() + best_s = t32.abs().amax(dim=1) / float(qmax) + best_s = best_s.clamp_min(1.0 / float(qmax)) + best_err = torch.full((t32.shape[0],), float("inf"), device=t32.device) + for pct in [0.9990, 0.9995, 0.9999, 0.99999, 1.0]: + if pct < 1.0: + row_clip = torch.quantile(t32.abs(), pct, dim=1) + else: + row_clip = t32.abs().amax(dim=1) + s = (row_clip / float(qmax)).clamp_min(1.0 / float(qmax)) + q = torch.clamp(torch.round(t32 / s[:, None]), -qmax, qmax) + recon = q * s[:, None] + err = (t32 - recon).pow(2).mean(dim=1) + improved = err < best_err + best_s[improved] = s[improved] + best_err[improved] = err[improved] + return best_s + +def _gptq_block_sweep(W: Tensor, Hinv: Tensor, sf: Tensor, qmin: int, qmax: int, block_size: int) -> Tensor: + """Run one GPTQ block-column sweep with fixed per-row scales. Returns quantized Q.""" + rows, cols = W.shape + Q = torch.zeros_like(W, dtype=torch.int8) + W_work = W.clone() + for i1 in range(0, cols, block_size): + i2 = min(i1 + block_size, cols) + count = i2 - i1 + W1 = W_work[:, i1:i2].clone() + Q1 = torch.zeros(rows, count, dtype=torch.int8, device=W.device) + Err1 = torch.zeros(rows, count, device=W.device) + Hinv1 = Hinv[i1:i2, i1:i2] + for i in range(count): + w = W1[:, i] + d = Hinv1[i, i] + q = torch.clamp(torch.round(w / sf), qmin, qmax).to(torch.int8) + Q1[:, i] = q + err = (w - q.float() * sf) / d + W1[:, i:] -= err.unsqueeze(1) * Hinv1[i, i:].unsqueeze(0) + Err1[:, i] = err + Q[:, i1:i2] = Q1 + if i2 < cols: + W_work[:, i2:] -= Err1 @ Hinv[i1:i2, i2:] + return Q + +def quantize_int6_gptq(weight, hessian=None, clip_range=31, block_size=128, damp_factor=0.01, + col_order="desc", single_pass=False): + """Full GPTQ: Hessian-aware int6 quantization with Cholesky error compensation. + If hessian is None, falls back to percentile search.""" + device = hessian.device if hessian is not None else weight.device + t32 = weight.float().to(device) + if t32.ndim != 2 or hessian is None: + return _quantize_int6_percentile(t32, clip_range) + rows, cols = t32.shape + H = hessian.float().clone() + diag = torch.diag(H) + dead = diag == 0 + # Compute damp from non-dead columns only, then add to all diagonals + damp = damp_factor * (torch.mean(diag[~dead]) if not dead.all() else torch.tensor(1.0, device=H.device)) if dead.any() else damp_factor * torch.mean(diag) + diag_idx = torch.arange(cols, device=H.device) + H[diag_idx, diag_idx] += damp + # Reset dead diag to just damp (smallest value) so they sort LAST in descending actorder + H[dead, dead] = damp + perm = torch.argsort(torch.diag(H), descending=(col_order == "desc")) + inv_perm = torch.argsort(perm) + W = t32[:, perm].clone() + W[:, dead[perm]] = 0 + H = H[perm][:, perm] + try: + Hinv = torch.linalg.cholesky(H) + Hinv = torch.cholesky_inverse(Hinv) + Hinv = torch.linalg.cholesky(Hinv, upper=True) + except torch.linalg.LinAlgError: + return _quantize_int6_percentile(t32, clip_range) + # Single-pass mode: pre-compute scales once, run GPTQ once + if single_pass: + best_s = _precompute_row_scales(W, clip_range) + sf = best_s.float() + Q = _gptq_block_sweep(W, Hinv, sf, -clip_range, clip_range, block_size) + Q = Q[:, inv_perm] + return Q.cpu(), best_s.to(torch.float16).cpu() + # Multi-pass: search 5 percentiles, run GPTQ per percentile, pick best by MSE + best_q = None; best_scale = None; best_err = float('inf') + for pct in [0.9990, 0.9995, 0.9999, 0.99999, 1.0]: + if pct < 1.0: + row_clip = torch.quantile(t32.abs(), pct, dim=1) + else: + row_clip = t32.abs().amax(dim=1) + s = (row_clip / clip_range).clamp_min(1.0 / clip_range).to(torch.float16) + sf = s.float() + Q = _gptq_block_sweep(W, Hinv, sf, -clip_range, clip_range, block_size) + recon = Q.float() * sf[:, None] + mse = (W - recon).pow(2).mean().item() + if mse < best_err: + best_q, best_scale, best_err = Q, s, mse + best_q = best_q[:, inv_perm] + return best_q.cpu(), best_scale.cpu() + +def _quantize_int6_percentile(t32, clip_range=31): + """Fallback: percentile search (for 1D or no-Hessian cases).""" + if t32.ndim == 2: + best_q, best_s, best_err = None, None, float('inf') + for pct in [0.9990, 0.9995, 0.9999, 0.99999, 1.0]: + if pct < 1.0: + row_clip = torch.quantile(t32.abs(), pct, dim=1) + else: + row_clip = t32.abs().amax(dim=1) + s = (row_clip / clip_range).clamp_min(1.0 / clip_range).to(torch.float16) + q = torch.clamp(torch.round(t32 / s.float()[:, None]), -clip_range, clip_range).to(torch.int8) + recon = q.float() * s.float()[:, None] + err = (t32 - recon).pow(2).mean().item() + if err < best_err: + best_q, best_s, best_err = q, s, err + return best_q, best_s + amax = t32.abs().max().item() + scale = torch.tensor(amax / clip_range if amax > 0 else 1.0, dtype=torch.float16) + q = torch.clamp(torch.round(t32 / scale.float()), -clip_range, clip_range).to(torch.int8) + return q, scale + +def _unbank_state_dict(sd: dict[str, Tensor], num_layers: int) -> dict[str, Tensor]: + """Convert 3D bank tensors into individual 2D tensors with standard names.""" + out: dict[str, Tensor] = {} + n = num_layers + for name, tensor in sd.items(): + if name == "qo_bank": + for i in range(n): + out[f"blocks.{i}.attn.c_q.weight"] = tensor[i] + out[f"blocks.{i}.attn.proj.weight"] = tensor[n + i] + elif name == "kv_bank": + for i in range(n): + out[f"blocks.{i}.attn.c_k.weight"] = tensor[i] + out[f"blocks.{i}.attn.c_v.weight"] = tensor[n + i] + elif name == "mlp_up_bank": + for i in range(n): + out[f"blocks.{i}.mlp.fc.weight"] = tensor[i] + elif name == "mlp_down_bank": + for i in range(n): + out[f"blocks.{i}.mlp.proj.weight"] = tensor[i] + else: + out[name] = tensor + return out + +def _rebank_state_dict(sd: dict[str, Tensor], num_layers: int, template_sd: dict[str, Tensor] | None = None) -> dict[str, Tensor]: + """Convert individual 2D tensors back into 3D bank tensors.""" + out: dict[str, Tensor] = {} + n = num_layers + consumed: set[str] = set() + qo_slices: list[Tensor | None] = [None] * (2 * n) + kv_slices: list[Tensor | None] = [None] * (2 * n) + up_slices: list[Tensor | None] = [None] * n + down_slices: list[Tensor | None] = [None] * n + _BANK_MAP = { + "attn.c_q.weight": (qo_slices, 0), + "attn.proj.weight": (qo_slices, n), + "attn.c_k.weight": (kv_slices, 0), + "attn.c_v.weight": (kv_slices, n), + "mlp.fc.weight": (up_slices, 0), + "mlp.proj.weight": (down_slices, 0), + } + for i in range(n): + for suffix, (target_list, offset) in _BANK_MAP.items(): + key = f"blocks.{i}.{suffix}" + if key in sd: + target_list[offset + i] = sd[key] + consumed.add(key) + # Stack into bank tensors — validate all slices are present + for bank_name, slices in [("qo_bank", qo_slices), ("kv_bank", kv_slices), ("mlp_up_bank", up_slices), ("mlp_down_bank", down_slices)]: + if not any(s is not None for s in slices): + continue + missing = [i for i, s in enumerate(slices) if s is None] + if missing: + raise ValueError(f"_rebank_state_dict: {bank_name} missing slice indices {missing}") + out[bank_name] = torch.stack(slices) # type: ignore[arg-type] + # Pass through non-banked params + for key, val in sd.items(): + if key not in consumed: + out[key] = val + return out + +# --- Non-banked model for Hessian collection --- +# This mirrors the unbanked state dict keys: blocks.{i}.attn.c_q/c_k/c_v/proj, blocks.{i}.mlp.fc/proj + +class _HessianAttn(nn.Module): + """Non-banked attention with CastedLinear layers for Hessian hooks.""" + def __init__(self, dim, num_heads, num_kv_heads, rope_base, qk_gain_init): + super().__init__() + self.num_heads, self.num_kv_heads = num_heads, num_kv_heads + self.head_dim = dim // num_heads + kv_dim = num_kv_heads * self.head_dim + self.c_q = CastedLinear(dim, dim, bias=False) + self.c_k = CastedLinear(dim, kv_dim, bias=False) + self.c_v = CastedLinear(dim, kv_dim, bias=False) + self.proj = CastedLinear(dim, dim, bias=False) + self.q_gain = nn.Parameter(torch.full((num_heads,), qk_gain_init, dtype=torch.float32)) + self.rope_dims = 0 + self.rotary = Rotary(self.head_dim, base=rope_base, train_seq_len=1024) + self.use_xsa = False + def _xsa_efficient(self, y, v): + B, T, H, D = y.shape; Hkv = v.size(-2); group = H // Hkv + y_g = y.reshape(B, T, Hkv, group, D) + vn = F.normalize(v, dim=-1).unsqueeze(-2) + proj = (y_g * vn).sum(dim=-1, keepdim=True) * vn + return (y_g - proj).reshape(B, T, H, D) + def forward(self, x, v_embed=None): + bsz, seqlen, dim = x.shape + q = self.c_q(x).reshape(bsz, seqlen, self.num_heads, self.head_dim) + k = self.c_k(x).reshape(bsz, seqlen, self.num_kv_heads, self.head_dim) + v = self.c_v(x) + if v_embed is not None: + v = v + v_embed + v = v.reshape(bsz, seqlen, self.num_kv_heads, self.head_dim) + q = F.rms_norm(q, (q.size(-1),)) + k = F.rms_norm(k, (k.size(-1),)) + cos, sin = self.rotary(seqlen, x.device, q.dtype) + q = apply_rotary_emb(q, cos, sin, self.rope_dims) + k = apply_rotary_emb(k, cos, sin, self.rope_dims) + q = q * self.q_gain.to(dtype=q.dtype)[None, None, :, None] + y = F.scaled_dot_product_attention( + q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), + is_causal=True, enable_gqa=(self.num_kv_heads != self.num_heads), + ).transpose(1, 2) + if self.use_xsa: + y = self._xsa_efficient(y, v) + return self.proj(y.reshape(bsz, seqlen, dim)) + +class _HessianMLP(nn.Module): + """Non-banked MLP with CastedLinear layers for Hessian hooks.""" + def __init__(self, dim, mlp_mult, layer_idx=0, num_layers=1): + super().__init__() + self.fc = CastedLinear(dim, int(mlp_mult * dim), bias=False) + self.proj = CastedLinear(int(mlp_mult * dim), dim, bias=False) + self.neg_slope = MLP._LAYER_SLOPES[layer_idx] if layer_idx < len(MLP._LAYER_SLOPES) else 0.3 + def forward(self, x): + h = self.fc(x) + h = torch.where(h > 0, h * h, (self.neg_slope * self.neg_slope) * (h * h)) + return self.proj(h) + +class _HessianBlock(nn.Module): + def __init__(self, dim, num_heads, num_kv_heads, mlp_mult, rope_base, qk_gain_init, layer_idx=0, ln_scale=False, num_layers=1): + super().__init__() + self.attn_norm = RMSNorm() + self.mlp_norm = RMSNorm() + self.attn = _HessianAttn(dim, num_heads, num_kv_heads, rope_base, qk_gain_init) + self.mlp = _HessianMLP(dim, mlp_mult, layer_idx=layer_idx, num_layers=num_layers) + self.attn_scale = nn.Parameter(torch.ones(dim, dtype=torch.float32)) + self.mlp_scale = nn.Parameter(torch.ones(dim, dtype=torch.float32)) + self.resid_mix = nn.Parameter(torch.stack((torch.ones(dim), torch.zeros(dim))).float()) + self.ln_scale_factor = 1.0 / math.sqrt(layer_idx + 1) if ln_scale else 1.0 + def forward(self, x, x0, v_embed=None): + mix = self.resid_mix.to(dtype=x.dtype) + x_in = mix[0][None, None, :] * x + mix[1][None, None, :] * x0 + attn_out = self.attn(self.attn_norm(x_in) * self.ln_scale_factor, v_embed=v_embed) + x_out = x_in + self.attn_scale.to(dtype=x_in.dtype)[None, None, :] * attn_out + x_out = x_out + self.mlp_scale.to(dtype=x_out.dtype)[None, None, :] * self.mlp(self.mlp_norm(x_out) * self.ln_scale_factor) + return x_out + +class _HessianGPT(nn.Module): + """Non-banked GPT model matching unbanked state dict keys for Hessian collection.""" + def __init__(self, vocab_size, num_layers, model_dim, num_heads, num_kv_heads, + mlp_mult, tie_embeddings, logit_softcap, rope_base, qk_gain_init, + ngram_buckets=0, ngram_heads=2, ngram_orders=2, ngram_dim_per_head=32, + xsa_last_n=0, rope_dims=0, ln_scale=False, + ve_enabled=False, ve_dim=128, ve_layers="9,10"): + super().__init__() + self.tie_embeddings = tie_embeddings + self.logit_softcap = logit_softcap + self.num_layers = num_layers + self.tok_emb = nn.Embedding(vocab_size, model_dim) + self.bigram = EngramLite(ngram_buckets, ngram_heads, ngram_orders, ngram_dim_per_head, model_dim) if ngram_buckets > 0 else None + self.smear = SmearGate(model_dim) + self.num_encoder_layers = num_layers // 2 + self.num_decoder_layers = num_layers - self.num_encoder_layers + self.num_skip_weights = min(self.num_encoder_layers, self.num_decoder_layers) + self.skip_weights = nn.Parameter(torch.ones(self.num_skip_weights, model_dim, dtype=torch.float32)) + self.skip_gates = nn.Parameter(torch.zeros(self.num_skip_weights, model_dim, dtype=torch.float32)) + self.blocks = nn.ModuleList([ + _HessianBlock(model_dim, num_heads, num_kv_heads, mlp_mult, rope_base, qk_gain_init, + layer_idx=i, ln_scale=ln_scale, num_layers=num_layers) + for i in range(num_layers) + ]) + if rope_dims > 0: + head_dim = model_dim // num_heads + for block in self.blocks: + block.attn.rope_dims = rope_dims + block.attn.rotary = Rotary(head_dim, base=rope_base, train_seq_len=1024, rope_dims=rope_dims) + if xsa_last_n > 0: + for i in range(max(0, num_layers - xsa_last_n), num_layers): + self.blocks[i].attn.use_xsa = True + kv_dim = num_kv_heads * (model_dim // num_heads) + self.ve_layer_indices = [int(x) for x in ve_layers.split(",") if x.strip()] if ve_enabled else [] + if self.ve_layer_indices: + self.ve_shared = ValueEmbedding(vocab_size, ve_dim, kv_dim) + self.ve_layer_scales = nn.ParameterList([nn.Parameter(torch.ones(1, dtype=torch.float32)) for _ in self.ve_layer_indices]) + else: + self.ve_shared = None + self.ve_layer_scales = nn.ParameterList() + self.final_norm = RMSNorm() + self.lm_head = None if tie_embeddings else CastedLinear(model_dim, vocab_size, bias=False) + def _get_ve(self, layer_idx, input_ids, ve_cache): + if self.ve_shared is None or layer_idx not in self.ve_layer_indices: + return None + if 've' not in ve_cache: + ve_cache['ve'] = self.ve_shared(input_ids) + ve_idx = self.ve_layer_indices.index(layer_idx) + return ve_cache['ve'] * self.ve_layer_scales[ve_idx].to(dtype=ve_cache['ve'].dtype) + def forward(self, input_ids, target_ids): + x = self.tok_emb(input_ids) + if self.bigram is not None: + x = x + self.bigram(input_ids) + x = F.rms_norm(x, (x.size(-1),)) + x = self.smear(x) + x0 = x + skips = [] + ve_cache = {} + for i in range(self.num_encoder_layers): + ve = self._get_ve(i, input_ids, ve_cache) + x = self.blocks[i](x, x0, v_embed=ve) + skips.append(x) + for i in range(self.num_decoder_layers): + bi = self.num_encoder_layers + i + if skips: + skip = skips.pop() + g = torch.sigmoid(self.skip_gates[i].to(dtype=x.dtype))[None, None, :] + scaled_skip = self.skip_weights[i].to(dtype=x.dtype)[None, None, :] * skip + x = torch.lerp(scaled_skip, x, g) + ve = self._get_ve(bi, input_ids, ve_cache) + x = self.blocks[bi](x, x0, v_embed=ve) + x = self.final_norm(x) + x_flat = x.reshape(-1, x.size(-1)) + targets = target_ids.reshape(-1) + logits_proj = F.linear(x_flat, self.tok_emb.weight) if self.tie_embeddings else self.lm_head(x_flat) + logits = self.logit_softcap * torch.tanh(logits_proj / self.logit_softcap) + return F.cross_entropy(logits.float(), targets, reduction="mean") + +def collect_hessians(hessian_model, train_loader, args, device, grad_accum_steps, num_batches=256): + """Run calibration batches through a non-banked model, collecting H = X^T X for each CastedLinear.""" + hessians = {} + hooks = [] + for name, module in hessian_model.named_modules(): + if isinstance(module, CastedLinear): + param_name = name + ".weight" + cols = module.weight.shape[1] + hessians[param_name] = torch.zeros(cols, cols, dtype=torch.float32, device=device) + def make_hook(pname): + def hook_fn(mod, inp, out): + x = inp[0].detach() + if x.ndim == 3: + x = x.reshape(-1, x.shape[-1]) + hessians[pname] += (x.T @ x).float() # bf16 matmul, fp32 accumulate + return hook_fn + h = module.register_forward_hook(make_hook(param_name)) + hooks.append(h) + hessian_model.eval() + with torch.inference_mode(), torch.autocast(device_type="cuda", dtype=torch.bfloat16): + for _ in range(num_batches): + x, y = train_loader.next_batch(args.train_batch_tokens, args.train_seq_len, grad_accum_steps) + hessian_model(x, y) + for h in hooks: + h.remove() + ws = dist.get_world_size() if dist.is_available() and dist.is_initialized() else 1 + for name in hessians: + H = hessians[name] + if ws > 1: + dist.all_reduce(H, op=dist.ReduceOp.SUM) + H /= (num_batches * ws) + hessians[name] = H.cpu() + hessian_model.train() + return hessians + +# --- Mixed precision bit allocation --- + +def _bits_to_range(bits: int) -> tuple[int, int]: + """Convert bit-width to (qmin, qmax).""" + return -(1 << (bits - 1)), (1 << (bits - 1)) - 1 + +# Dynamic mixed-precision constants (tune after observing estimate vs actual) +_MP_BYTES_PER_PARAM_INT5 = 0.46 # estimated compressed bytes per quantized param at int5 +_MP_COST_PER_EXTRA_BIT = 0.24 # additional compressed bytes per param per extra bit above int5 +_MP_NON_WEIGHT_COMPRESS = 0.55 # compression ratio for non-quantized tensors (fp16 embeds, scales) +_MP_PRUNE_HEADROOM_FRAC = 0.02 # reserve 2% of byte budget so selective pruning only trims a small % + + +def _allocate_bits_mixed( + hessian_map: dict[str, Tensor], + state_dict: dict[str, Tensor], + target_bytes: int = 16_000_000, + code_bytes: int = 0, +) -> tuple[dict[str, int], list[tuple[str, int, float]], dict[str, float]]: + """Dynamically allocate int5-int7 per tensor group based on Hessian sensitivity + and compressed artifact size budget. + + Greedy: promotes most-sensitive groups first (top group -> int7, rest -> int6) + until estimated compressed size approaches target_bytes minus pruning headroom. + Returns (tensor_name -> bits, [(group_key, bits, sensitivity)], estimate_info).""" + # 1. Group tensors by (layer, type) -- compute sensitivity and numel per group + group_traces: dict[str, list[float]] = {} + group_numel: dict[str, int] = {} + tensor_to_group: dict[str, str] = {} + for name, H in hessian_map.items(): + trace_val = float(torch.trace(H).item()) / H.shape[0] + if not name.startswith("blocks."): + continue + dot2 = name.index(".", 7) + layer_idx = int(name[7:dot2]) + gtype = "attn" if ".attn." in name else "mlp" if ".mlp." in name else "other" + gkey = f"layer.{layer_idx}.{gtype}" + group_traces.setdefault(gkey, []).append(trace_val) + tensor_to_group[name] = gkey + w = state_dict.get(name) + if w is not None: + group_numel[gkey] = group_numel.get(gkey, 0) + w.numel() + group_sensitivity = {k: sum(v) / len(v) for k, v in group_traces.items()} + ranked = sorted(group_sensitivity.items(), key=lambda x: x[1], reverse=True) + + # 2. Estimate baseline compressed size (all quantized weights at int5) + total_quant_numel = sum(group_numel.values()) + non_weight_raw = sum( + t.numel() * t.element_size() for name, t in state_dict.items() + if name not in hessian_map + ) + base_estimate = ( + code_bytes + + int(non_weight_raw * _MP_NON_WEIGHT_COMPRESS) + + int(total_quant_numel * _MP_BYTES_PER_PARAM_INT5) + ) + budget = int(target_bytes * (1.0 - _MP_PRUNE_HEADROOM_FRAC)) - base_estimate + + # Early exit: if base estimate already exceeds budget, return all int5 + if budget <= 0: + bit_allocation = {tname: 5 for tname in tensor_to_group} + log_entries = [(gkey, 5, group_sensitivity[gkey]) for gkey, _ in ranked] + estimate_info = { + "base_mb": base_estimate / 1e6, "promoted_mb": 0.0, + "total_mb": base_estimate / 1e6, "budget_mb": target_bytes / 1e6, + "headroom_kb": 0.0, "prune_room_bytes": target_bytes - base_estimate, + "warning": "budget_exhausted", + } + return bit_allocation, log_entries, estimate_info + + # 3. Greedy promotion: most sensitive first + # Pass 1: try int7 for top group. Pass 2: fill remaining with int6. + group_bits: dict[str, int] = {gkey: 5 for gkey, _ in ranked} + estimated_extra = 0 + if ranked: + top_gkey = ranked[0][0] + top_numel = group_numel.get(top_gkey, 0) + cost_int7 = int(top_numel * _MP_COST_PER_EXTRA_BIT * 2) + cost_int6 = int(top_numel * _MP_COST_PER_EXTRA_BIT * 1) + if top_numel > 0 and cost_int7 <= budget: + group_bits[top_gkey] = 7 + estimated_extra += cost_int7 + elif top_numel > 0 and cost_int6 <= budget: + group_bits[top_gkey] = 6 # int7 doesn't fit, fall back to int6 + estimated_extra += cost_int6 + for gkey, _sens in ranked: + if group_bits[gkey] > 5: + continue # already promoted + numel = group_numel.get(gkey, 0) + if numel == 0: + continue + cost = int(numel * _MP_COST_PER_EXTRA_BIT) # 1 extra bit for int6 + if estimated_extra + cost <= budget: + group_bits[gkey] = 6 + estimated_extra += cost + # don't break -- smaller groups later in the list may still fit + + # 4. Map back to tensor names + bit_allocation: dict[str, int] = {} + for tname, gkey in tensor_to_group.items(): + bit_allocation[tname] = group_bits[gkey] + log_entries = [(gkey, group_bits[gkey], group_sensitivity[gkey]) for gkey, _ in ranked] + + # 5. Size estimate metadata for caller to log via log0() + est_total = base_estimate + estimated_extra + headroom_bytes = int(target_bytes * _MP_PRUNE_HEADROOM_FRAC) + estimate_info = { + "base_mb": base_estimate / 1e6, + "promoted_mb": estimated_extra / 1e6, + "total_mb": est_total / 1e6, + "budget_mb": target_bytes / 1e6, + "headroom_kb": headroom_bytes / 1e3, + "prune_room_bytes": target_bytes - est_total - headroom_bytes, + } + return bit_allocation, log_entries, estimate_info + + +def mixed_quantize_int6(state_dict: dict[str, Tensor], int6_cats: set[str], hessians: dict[str, Tensor] | None = None, + bit_allocation: dict[str, int] | None = None, gptq_damp: float = 0.01, + block_size: int = 128, col_order: str = "desc", single_pass: bool = False): + result: dict[str, Tensor] = {} + meta: dict[str, object] = {} + for name, tensor in state_dict.items(): + t = tensor.detach().cpu().contiguous() + cat = _classify_param(name) + if not t.is_floating_point() or t.numel() <= 65536 or name == "tok_emb.weight": + result[name] = t.to(torch.float16) if t.is_floating_point() else t + meta[name] = "passthrough" + continue + if any(p in name for p in CONTROL_TENSOR_NAME_PATTERNS): + result[name] = t.float() + meta[name] = "passthrough_ctrl" + continue + if cat in int6_cats and t.ndim >= 1: + # Use mixed precision bit allocation if available + bits = bit_allocation.get(name, 6) if bit_allocation else 6 + qmin, qmax = _bits_to_range(bits) + cr = qmax + H = hessians.get(name) if hessians else None + if H is not None: + q, s = quantize_int6_gptq(t, hessian=H, clip_range=cr, block_size=block_size, + damp_factor=gptq_damp, col_order=col_order, single_pass=single_pass) + else: + q, s = quantize_int6_per_row(t, clip_range=cr) + result[name + ".q"] = q + result[name + ".scale"] = s + meta[name] = {"type": f"int{bits}"} + else: + q, s = quantize_float_tensor(t) + result[name + ".q"] = q + result[name + ".scale"] = s + meta[name] = {"type": "int8"} + return result, meta +def dequantize_mixed_int6(result: dict[str, Tensor], meta: dict[str, object], + template_sd: dict[str, Tensor]) -> dict[str, Tensor]: + out: dict[str, Tensor] = {} + for name, orig in template_sd.items(): + info = meta.get(name) + if info is None: + continue + orig_dtype = orig.dtype + if info in ("passthrough", "passthrough_ctrl", "passthrough_fp16"): + t = result[name] + if t.dtype == torch.float16 and orig_dtype in (torch.float32, torch.bfloat16): + t = t.to(orig_dtype) + out[name] = t + continue + q, s = result[name + ".q"], result[name + ".scale"] + if s.ndim > 0: + out[name] = (q.float() * s.float().view(q.shape[0], *([1] * (q.ndim - 1)))).to(orig_dtype) + else: + out[name] = (q.float() * float(s.item())).to(orig_dtype) + return out + +# --- Training --- + +def main() -> None: + code = Path(__file__).read_text(encoding="utf-8") + args = Hyperparameters() + rank = int(os.environ.get("RANK", "0")) + world_size = int(os.environ.get("WORLD_SIZE", "1")) + local_rank = int(os.environ.get("LOCAL_RANK", "0")) + distributed = "RANK" in os.environ and "WORLD_SIZE" in os.environ and world_size > 1 + if world_size <= 0: + raise ValueError(f"WORLD_SIZE must be positive, got {world_size}") + if 8 % world_size != 0: + raise ValueError(f"WORLD_SIZE={world_size} must divide 8 so grad_accum_steps stays integral") + grad_accum_steps = 8 // world_size + grad_scale = 1.0 / grad_accum_steps + if not torch.cuda.is_available(): + raise RuntimeError("CUDA is required") + device = torch.device("cuda", local_rank) + torch.cuda.set_device(device) + if distributed: + dist.init_process_group(backend="nccl", device_id=device) + dist.barrier() + master_process = rank == 0 + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True + from torch.backends.cuda import enable_cudnn_sdp, enable_flash_sdp, enable_math_sdp, enable_mem_efficient_sdp + enable_cudnn_sdp(True) + enable_flash_sdp(True) + enable_mem_efficient_sdp(True) + enable_math_sdp(True) + logfile = None + if master_process: + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{args.run_id}.txt" + print(logfile) + def log0(msg: str, console: bool = True) -> None: + if not master_process: + return + if console: + print(msg) + if logfile is not None: + with open(logfile, "a", encoding="utf-8") as f: + print(msg, file=f) + log0(code, console=False) + log0("=" * 100, console=False) + log0(f"Running Python {sys.version}", console=False) + log0(f"Running PyTorch {torch.__version__}", console=False) + log0( + subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=False).stdout, + console=False, + ) + log0("=" * 100, console=False) + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) + if not args.tokenizer_path.endswith(".model"): + raise ValueError(f"Script only setup for SentencePiece .model file: {args.tokenizer_path}") + sp = spm.SentencePieceProcessor(model_file=args.tokenizer_path) + if int(sp.vocab_size()) != args.vocab_size: + raise ValueError( + f"VOCAB_SIZE={args.vocab_size} does not match tokenizer vocab_size={int(sp.vocab_size())}" + ) + dataset_dir = Path(args.data_path).resolve() + actual_train_files = len(list(dataset_dir.glob("fineweb_train_*.bin"))) + effective_eval_seq_len = args.eval_seq_len if args.eval_seq_len > 0 else args.train_seq_len + val_seq_len = max(args.train_seq_len, effective_eval_seq_len) + val_tokens = load_validation_tokens(args.val_files, val_seq_len) + base_bytes_lut, has_leading_space_lut, is_boundary_token_lut = build_sentencepiece_luts( + sp, args.vocab_size, device + ) + log0(f"val_bpb:enabled tokenizer_kind=sentencepiece tokenizer_path={args.tokenizer_path}") + log0(f"train_loader:dataset:{dataset_dir.name} train_shards:{actual_train_files}") + log0(f"val_loader:shards pattern={args.val_files} tokens:{val_tokens.numel() - 1}") + if not args.load_snapshot: + CastedLinear._qat_enabled = False # late_qat enables mid-run when LR scale drops + CastedLinear._qat_soft_round = False + CastedLinear._qat_clip_pct = args.qat_clip_pct + base_model = GPT( + vocab_size=args.vocab_size, + num_layers=args.num_layers, + model_dim=args.model_dim, + num_heads=args.num_heads, + num_kv_heads=args.num_kv_heads, + mlp_mult=args.mlp_mult, + tie_embeddings=args.tie_embeddings, + tied_embed_init_std=args.tied_embed_init_std, + logit_softcap=args.logit_softcap, + rope_base=args.rope_base, + qk_gain_init=args.qk_gain_init, + ngram_buckets=args.ngram_buckets, + ngram_heads=args.ngram_heads, + ngram_orders=args.ngram_orders, + ngram_dim_per_head=args.ngram_dim_per_head, + xsa_last_n=args.xsa_last_n, + rope_dims=args.rope_dims, + ln_scale=args.ln_scale, + ve_enabled=args.ve_enabled, + ve_dim=args.ve_dim, + ve_layers=args.ve_layers, + ).to(device).bfloat16() + # Banks stay FP32 (like CastedLinear weights), cast to BF16 in forward + base_model.qo_bank.data = base_model.qo_bank.data.float() + base_model.kv_bank.data = base_model.kv_bank.data.float() + base_model.mlp_up_bank.data = base_model.mlp_up_bank.data.float() + base_model.mlp_down_bank.data = base_model.mlp_down_bank.data.float() + for module in base_model.modules(): + if isinstance(module, CastedLinear): + module.float() + restore_low_dim_params_to_fp32(base_model) + # No DDP -- Parallel Muon handles bank grad communication via reduce-scatter, + # and non-bank grads are manually all-reduced before Adam steps. + # Compile NS functions for ~2x speedup on the orthogonalization hot path + global zeropower_via_newtonschulz5, _post_ns_normalize + zeropower_via_newtonschulz5 = torch.compile(zeropower_via_newtonschulz5) + _post_ns_normalize = torch.compile(_post_ns_normalize) + compiled_model = torch.compile(base_model, dynamic=False, fullgraph=True) + model = compiled_model + + # Optimizer split: + # - 4 parameter banks -> Muon (batched Newton-Schulz) + # - token embedding -> Adam (with embed_beta1) + # - scalars/control tensors -> Adam + # - EngramLite proj -> Muon (small 2D matrix) + matrix_params = [ + base_model.qo_bank, base_model.kv_bank, + base_model.mlp_up_bank, base_model.mlp_down_bank, + ] + block_named_params = list(base_model.blocks.named_parameters()) + scalar_params = [ + p + for name, p in block_named_params + if p.ndim < 2 or any(pattern in name for pattern in CONTROL_TENSOR_NAME_PATTERNS) + ] + if base_model.skip_weights.numel() > 0: + scalar_params.append(base_model.skip_weights) + if hasattr(base_model, 'skip_gates') and base_model.skip_gates.numel() > 0: + scalar_params.append(base_model.skip_gates) + scalar_params.append(base_model.smear.gate) + if base_model.bigram is not None: + scalar_params.append(base_model.bigram.ngram_gate) + token_lr = args.tied_embed_lr if args.tie_embeddings else args.embed_lr + tok_params = [{"params": [base_model.tok_emb.weight], "lr": token_lr, "base_lr": token_lr}] + if base_model.bigram is not None: + tok_params.append({"params": [base_model.bigram.embed.weight], "lr": token_lr, "base_lr": token_lr}) + ve_proj_weight = None + if base_model.ve_shared is not None: + tok_params.append({"params": [base_model.ve_shared.embed.weight], "lr": token_lr, "base_lr": token_lr}) + if base_model.ve_shared.proj is not None: + ve_proj_weight = base_model.ve_shared.proj.weight # routed to Muon below + scalar_params.append(base_model.ve_shared.scale) + for s in base_model.ve_layer_scales: + scalar_params.append(s) + optimizer_tok = torch.optim.AdamW( + tok_params, + betas=(args.embed_beta1, args.beta2), + eps=args.adam_eps, + weight_decay=args.adam_wd, + fused=True, + ) + optimizer_muon = Muon( + matrix_params, + lr=args.matrix_lr, + momentum=args.muon_momentum, + backend_steps=args.muon_backend_steps, + weight_decay=args.muon_wd, + post_norm=args.muon_post_norm, + ) + for group in optimizer_muon.param_groups: + group["base_lr"] = args.matrix_lr + optimizer_scalar = torch.optim.AdamW( + [{"params": scalar_params, "lr": args.scalar_lr, "base_lr": args.scalar_lr}], + betas=(args.beta1, args.beta2), + eps=args.adam_eps, + weight_decay=args.adam_wd, + fused=True, + ) + # Non-bank params that need manual all-reduce (replicated across GPUs) + replicated_params = list(optimizer_tok.param_groups[0]["params"]) + for pg in optimizer_tok.param_groups[1:]: + replicated_params.extend(pg["params"]) + replicated_params.extend(scalar_params) + + optimizers: list[torch.optim.Optimizer] = [optimizer_tok, optimizer_muon, optimizer_scalar] + + # EngramLite proj -> Muon (small 2D matrix, not banked) + if base_model.bigram is not None and base_model.bigram.proj is not None: + optimizer_bigram_proj = Muon( + [base_model.bigram.proj.weight], + lr=args.matrix_lr, + momentum=args.muon_momentum, + backend_steps=args.muon_backend_steps, + weight_decay=args.muon_wd, + post_norm=args.muon_post_norm, + ) + for group in optimizer_bigram_proj.param_groups: + group["base_lr"] = args.matrix_lr + optimizers.append(optimizer_bigram_proj) + replicated_params.append(base_model.bigram.proj.weight) + + # VE proj -> Muon (2D matrix, benefits from Newton-Schulz) + if ve_proj_weight is not None: + optimizer_ve_proj = Muon( + [ve_proj_weight], + lr=args.matrix_lr, + momentum=args.muon_momentum, + backend_steps=args.muon_backend_steps, + weight_decay=args.muon_wd, + post_norm=args.muon_post_norm, + ) + for group in optimizer_ve_proj.param_groups: + group["base_lr"] = args.matrix_lr + optimizers.append(optimizer_ve_proj) + replicated_params.append(ve_proj_weight) + + optimizer_head = None + if base_model.lm_head is not None: + optimizer_head = torch.optim.Adam( + [{"params": [base_model.lm_head.weight], "lr": args.head_lr, "base_lr": args.head_lr}], + betas=(args.head_beta1, args.beta2), + eps=args.adam_eps, + fused=True, + ) + replicated_params.append(base_model.lm_head.weight) + optimizers.append(optimizer_head) + + # Pre-build flat buffer for coalesced all-reduce of non-bank grads (saves ~0.5-1ms/step on multi-GPU) + _nb_grad_numel = [p.numel() for p in replicated_params] + _nb_grad_buf = torch.zeros(sum(_nb_grad_numel), device=device, dtype=torch.float32) if distributed else None + + n_params = sum(p.numel() for p in base_model.parameters()) + log0(f"model_params:{n_params}") + xsa_layers = [i for i, b in enumerate(base_model.blocks) if b.attn.use_xsa] + log0(f"XSA:last_{args.xsa_last_n} active_layers:{xsa_layers}") + log0(f"world_size:{world_size} grad_accum_steps:{grad_accum_steps}") + log0("sdp_backends:cudnn=True flash=True mem_efficient=True math=True") + log0(f"attention_mode:gqa num_heads:{args.num_heads} num_kv_heads:{args.num_kv_heads}") + log0( + f"tie_embeddings:{args.tie_embeddings} embed_lr:{token_lr} " + f"head_lr:{args.head_lr if base_model.lm_head is not None else 0.0} " + f"matrix_lr:{args.matrix_lr} scalar_lr:{args.scalar_lr}" + ) + log0( + f"train_batch_tokens:{args.train_batch_tokens} train_seq_len:{args.train_seq_len} " + f"iterations:{args.iterations} warmup_steps:{args.warmup_steps} " + f"max_wallclock_seconds:{args.max_wallclock_seconds:.3f}" + ) + log0(f"muon:post_norm:{args.muon_post_norm} embed_beta1:{args.embed_beta1} head_beta1:{args.head_beta1}") + log0(f"seed:{args.seed}") + train_loader = DistributedTokenLoader(args.train_files, rank, world_size, device) + def zero_grad_all() -> None: + for opt in optimizers: + opt.zero_grad(set_to_none=True) + max_wallclock_ms = 1000.0 * args.max_wallclock_seconds if args.max_wallclock_seconds > 0 else None + # Reserve time for GPTQ calibration so LR warmdown, QAT, and wallclock cap all see the effective budget + if max_wallclock_ms is not None and args.gptq_calib_batches > 0 and args.gptq_reserve_ms > 0: + max_wallclock_ms -= args.gptq_reserve_ms + log0(f"gptq:reserving {args.gptq_reserve_ms:.0f}ms from training budget (effective cap: {max_wallclock_ms:.0f}ms)") + def lr_mul(step: int, elapsed_ms: float) -> float: + if args.warmdown_iters <= 0: + return 1.0 + if max_wallclock_ms is None: + warmdown_start = max(args.iterations - args.warmdown_iters, 0) + return max((args.iterations - step) / max(args.warmdown_iters, 1), args.lr_floor) if warmdown_start <= step < args.iterations else 1.0 + step_ms = elapsed_ms / max(step, 1) + warmdown_ms = min(args.warmdown_iters * step_ms, max_wallclock_ms) + remaining_ms = max(max_wallclock_ms - elapsed_ms, 0.0) + return max(remaining_ms / max(warmdown_ms, 1e-9), args.lr_floor) if remaining_ms <= warmdown_ms else 1.0 + if args.warmup_steps > 0: + initial_model_state = {name: tensor.detach().cpu().clone() for name, tensor in base_model.state_dict().items()} + initial_optimizer_states = [copy.deepcopy(opt.state_dict()) for opt in optimizers] + model.train() + for warmup_step in range(args.warmup_steps): + zero_grad_all() + for micro_step in range(grad_accum_steps): + x, y = train_loader.next_batch(args.train_batch_tokens, args.train_seq_len, grad_accum_steps) + with torch.autocast(device_type="cuda", dtype=torch.bfloat16, enabled=True): + warmup_loss = model(x, y) + (warmup_loss * grad_scale).backward() + # All-reduce all grads for warmup (simple, not optimized) + if distributed: + for p in base_model.parameters(): + if p.grad is not None: + dist.all_reduce(p.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + zero_grad_all() + if args.warmup_steps <= 20 or (warmup_step + 1) % 10 == 0 or warmup_step + 1 == args.warmup_steps: + log0(f"warmup_step:{warmup_step + 1}/{args.warmup_steps}") + # Pre-compile QAT graph to avoid mid-training recompile stall + if args.late_qat: + CastedLinear._qat_enabled = True + CastedLinear._qat_soft_round = bool(args.soft_round_qat) + if CastedLinear._qat_soft_round: + CastedLinear._qat_soft_alpha = torch.tensor(1.0, device=device) + log0("warmup:pre-compiling QAT graph...") + zero_grad_all() + x, y = train_loader.next_batch(args.train_batch_tokens, args.train_seq_len, grad_accum_steps) + with torch.autocast(device_type="cuda", dtype=torch.bfloat16, enabled=True): + warmup_loss = model(x, y) + (warmup_loss * grad_scale).backward() + zero_grad_all() + CastedLinear._qat_enabled = False + CastedLinear._qat_soft_round = False + CastedLinear._qat_soft_alpha = None # type: ignore[assignment] + log0("warmup:QAT graph cached") + base_model.load_state_dict(initial_model_state, strict=True) + for opt, state in zip(optimizers, initial_optimizer_states, strict=True): + opt.load_state_dict(state) + # Reset Muon shard_mom buffers — not captured by state_dict() + if optimizer_muon._built: + for m in optimizer_muon._bank_meta: + m['shard_mom'].zero_() + zero_grad_all() + train_loader = DistributedTokenLoader(args.train_files, rank, world_size, device) + swa_state: dict[str, Tensor] | None = None + swa_count = 0 + ema_state: dict[str, Tensor] | None = None + _ema_fp32_bufs: list[Tensor] = [] + _ema_fp32_srcs: list[Tensor] = [] + _ema_cast_pairs: list[tuple[Tensor, nn.Parameter]] = [] + if args.ema_enabled: + ema_state = {name: p.data.detach().float().clone() for name, p in base_model.named_parameters()} + for name, p in base_model.named_parameters(): + buf = ema_state[name] + if p.data.dtype == torch.float32: + _ema_fp32_bufs.append(buf) + _ema_fp32_srcs.append(p.data) + else: + _ema_cast_pairs.append((buf, p)) + training_time_ms = 0.0 + _qat_start_step = 0 + _qat_total_steps = 0 + _train_loss = torch.zeros((), device=device) + _cap_tensor = torch.zeros(1, device=device, dtype=torch.int32) if distributed else None + stop_after_step: int | None = None + torch.cuda.synchronize() + t0 = time.perf_counter() + step = 0 + while True: + last_step = step == args.iterations or (stop_after_step is not None and step >= stop_after_step) + if last_step: + torch.cuda.synchronize() + training_time_ms += 1000.0 * (time.perf_counter() - t0) + log0( + f"step:{step}/{args.iterations} " + f"train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms / max(step, 1):.2f}ms" + ) + if stop_after_step is not None and step < args.iterations: + log0( + f"stopping_early: wallclock_cap train_time:{training_time_ms:.0f}ms " + f"step:{step}/{args.iterations}" + ) + break + elapsed_ms = training_time_ms + 1000.0 * (time.perf_counter() - t0) + scale = lr_mul(step, elapsed_ms) + if args.late_qat and args.qat_threshold > 0 and not CastedLinear._qat_enabled and scale < args.qat_threshold: + CastedLinear._qat_enabled = True + _qat_start_step = step + # Estimate remaining QAT steps using best available info + if stop_after_step is not None: + _qat_total_steps = max(stop_after_step - step, 1) + elif max_wallclock_ms is not None: + _step_ms = elapsed_ms / max(step, 1) + _qat_total_steps = max(int((max_wallclock_ms - elapsed_ms) / max(_step_ms, 1e-9)), 1) + else: + _qat_total_steps = max(args.iterations - step, 1) + if args.soft_round_qat: + CastedLinear._qat_soft_round = True + CastedLinear._qat_soft_alpha = torch.tensor(1.0, device=device) + log0(f"late_qat:enabled step:{step} scale:{scale:.4f} soft_round={args.soft_round_qat} est_steps:{_qat_total_steps}") + elif CastedLinear._qat_soft_round: + # Soft-round alpha ramp: 1→16 over QAT phase (tensor value, no torch.compile recompiles) + # Re-estimate if wallclock cap set after QAT started (tighter bound) + if stop_after_step is not None: + _qat_total_steps = max(stop_after_step - _qat_start_step, 1) + qat_progress = min((step - _qat_start_step) / _qat_total_steps, 1.0) + with torch.no_grad(): + CastedLinear._qat_soft_alpha.fill_(1.0 + 15.0 * qat_progress) + zero_grad_all() + _train_loss.zero_() + for micro_step in range(grad_accum_steps): + x, y = train_loader.next_batch(args.train_batch_tokens, args.train_seq_len, grad_accum_steps) + with torch.autocast(device_type="cuda", dtype=torch.bfloat16, enabled=True): + loss = model(x, y) + _train_loss += loss.detach() + (loss * grad_scale).backward() + _train_loss /= grad_accum_steps + frac = min(step / args.muon_momentum_warmup_steps, 1.0) if args.muon_momentum_warmup_steps > 0 else 1.0 + muon_momentum = (1 - frac) * args.muon_momentum_warmup_start + frac * args.muon_momentum + for group in optimizer_muon.param_groups: + group["momentum"] = muon_momentum + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["base_lr"] * scale + if args.grad_clip_norm > 0: + torch.nn.utils.clip_grad_norm_(base_model.parameters(), args.grad_clip_norm) + # === 3-phase overlapped optimizer step === + # Phase 1: Launch async reduce-scatter for banks (biggest first) + optimizer_muon.launch_reduce_scatters() + # Phase 2: All-reduce non-bank grads (coalesced) + step Adam (while bank RS is in-flight) + if distributed: + offset = 0 + for p, n in zip(replicated_params, _nb_grad_numel): + if p.grad is not None: + _nb_grad_buf[offset:offset + n].copy_(p.grad.reshape(-1)) + else: + _nb_grad_buf[offset:offset + n].zero_() + offset += n + dist.all_reduce(_nb_grad_buf, op=dist.ReduceOp.AVG) + offset = 0 + for p, n in zip(replicated_params, _nb_grad_numel): + if p.grad is not None: + p.grad.copy_(_nb_grad_buf[offset:offset + n].reshape_as(p.grad)) + offset += n + optimizer_tok.step() + optimizer_scalar.step() + # Step non-bank Muon optimizers (e.g., bigram proj) + for opt in optimizers: + if opt is not optimizer_muon and opt is not optimizer_tok and opt is not optimizer_scalar: + if optimizer_head is not None and opt is optimizer_head: + opt.step() + elif isinstance(opt, Muon): + opt.step() + # Phase 3: Wait for RS, local NS5, all-gather (banks processed last) + optimizer_muon.step() + zero_grad_all() + # EMA update — foreach for fp32 params, per-element cast for bf16 embeddings + if ema_state is not None: + _ema_weight = 1.0 - args.ema_decay + with torch.no_grad(): + if _ema_fp32_bufs: + torch._foreach_lerp_(_ema_fp32_bufs, _ema_fp32_srcs, _ema_weight) + for ema_buf, p in _ema_cast_pairs: + ema_buf.lerp_(p.data.float(), _ema_weight) + step += 1 + approx_training_time_ms = training_time_ms + 1000.0 * (time.perf_counter() - t0) + if args.swa_enabled and scale < args.swa_threshold and step % args.swa_every == 0: + if swa_state is None: + swa_state = {name: p.data.detach().float().clone() for name, p in base_model.named_parameters()} + swa_count = 1 + log0(f"swa:start step:{step}") + else: + for name, p in base_model.named_parameters(): + swa_state[name].add_(p.data if p.data.dtype == torch.float32 else p.data.float()) + swa_count += 1 + should_log_train = ( + args.train_log_every > 0 + and (step <= 10 or step % args.train_log_every == 0 or stop_after_step is not None) + ) + if should_log_train: + log0( + f"step:{step}/{args.iterations} train_loss:{_train_loss.item():.4f} " + f"train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms / step:.2f}ms" + ) + reached_cap = max_wallclock_ms is not None and approx_training_time_ms >= max_wallclock_ms + if distributed and max_wallclock_ms is not None: + _cap_tensor.fill_(int(reached_cap)) + dist.all_reduce(_cap_tensor, op=dist.ReduceOp.MAX) + reached_cap = bool(_cap_tensor.item()) + if stop_after_step is None and reached_cap: + stop_after_step = step + log0( + f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB" + ) + # Apply weight averaging: SWA if available, else EMA + if args.swa_enabled and swa_state is not None and swa_count > 1: + log0(f"swa:applying averaged {swa_count} checkpoints (source=raw)") + with torch.no_grad(): + for name, p in base_model.named_parameters(): + if name in swa_state: + p.data.copy_((swa_state[name] / swa_count).to(dtype=p.dtype)) + del swa_state + elif ema_state is not None: + log0("ema:applying EMA weights") + with torch.no_grad(): + for name, p in base_model.named_parameters(): + if name in ema_state: + p.data.copy_(ema_state[name].to(dtype=p.dtype)) + del ema_state + full_state_dict = base_model.state_dict() + export_sd = full_state_dict + if master_process: + torch.save(export_sd, "final_model.pt") + model_bytes = os.path.getsize("final_model.pt") + code_bytes = len(code.encode("utf-8")) + log0(f"Serialized model: {model_bytes} bytes") + log0(f"Code size: {code_bytes} bytes") + # Unbank 3D tensors into individual 2D tensors for quantization + sd_cpu = {k: v.detach().cpu() for k, v in export_sd.items()} + unbanked_sd = _unbank_state_dict(sd_cpu, args.num_layers) + + # Full GPTQ: collect Hessians via a temporary non-banked model + _gptq_t0 = time.perf_counter() + log0(f"gptq:building non-banked model for Hessian collection...") + hessian_model = _HessianGPT( + vocab_size=args.vocab_size, num_layers=args.num_layers, model_dim=args.model_dim, + num_heads=args.num_heads, num_kv_heads=args.num_kv_heads, mlp_mult=args.mlp_mult, + tie_embeddings=args.tie_embeddings, logit_softcap=args.logit_softcap, + rope_base=args.rope_base, qk_gain_init=args.qk_gain_init, + ngram_buckets=args.ngram_buckets, ngram_heads=args.ngram_heads, + ngram_orders=args.ngram_orders, ngram_dim_per_head=args.ngram_dim_per_head, + xsa_last_n=args.xsa_last_n, rope_dims=args.rope_dims, ln_scale=args.ln_scale, + ve_enabled=args.ve_enabled, ve_dim=args.ve_dim, ve_layers=args.ve_layers, + ).to(device).bfloat16() + for m in hessian_model.modules(): + if isinstance(m, CastedLinear): + m.float() + restore_low_dim_params_to_fp32(hessian_model) + # Load unbanked weights into the non-banked model + hessian_model.load_state_dict( + {k: v.to(device) for k, v in unbanked_sd.items() if k in hessian_model.state_dict()}, + strict=False, + ) + log0(f"gptq:calibrating with {args.gptq_calib_batches} batches...") + calib_loader = DistributedTokenLoader(args.train_files, rank, world_size, device) + hessians = collect_hessians(hessian_model, calib_loader, args, device, grad_accum_steps, + num_batches=args.gptq_calib_batches) + log0(f"gptq:collected hessians for {len(hessians)} layers") + _gptq_ms = 1000.0 * (time.perf_counter() - _gptq_t0) + _full_budget_ms = (max_wallclock_ms + args.gptq_reserve_ms) if max_wallclock_ms is not None else 0 + log0(f"gptq:budget_check train:{approx_training_time_ms:.0f}ms + gptq:{_gptq_ms:.0f}ms = {approx_training_time_ms + _gptq_ms:.0f}ms (budget:{_full_budget_ms:.0f}ms)") + del hessian_model + torch.cuda.empty_cache() + + # Snapshot: save unbanked_sd + hessians + sd_cpu so compression can be re-run without retraining + if args.snapshot_post_hessian: + if master_process: + snap_path = os.environ.get("SNAPSHOT_PATH", "snapshot_post_hessian.pt") + log0(f"snapshot:saving to {snap_path}...") + torch.save({"unbanked_sd": unbanked_sd, "hessians": {k: v.cpu() for k, v in hessians.items()}, "sd_cpu": sd_cpu}, snap_path) + snap_mb = os.path.getsize(snap_path) / 1e6 + log0(f"snapshot:saved {snap_mb:.1f}MB — exiting (use LOAD_SNAPSHOT={snap_path} to resume compression)") + if distributed: + dist.barrier() # all ranks wait for rank 0 to finish saving + dist.destroy_process_group() + return + + # --- Snapshot restore: load pre-computed state, skip training --- + if args.load_snapshot: + log0(f"snapshot:loading from {args.load_snapshot}...") + _snap = torch.load(args.load_snapshot, map_location="cpu", weights_only=True) + unbanked_sd = _snap["unbanked_sd"] + hessians = {k: v.to(device) for k, v in _snap["hessians"].items()} + sd_cpu = _snap["sd_cpu"] + del _snap + log0(f"snapshot:restored {len(unbanked_sd)} unbanked params, {len(hessians)} hessians") + + # === EVAL PHASE STARTS HERE (600s budget) === + # Everything below: quantization, compression, artifact save, model load, evaluation + torch.cuda.synchronize() + t_eval_phase = time.perf_counter() + + # Mixed precision bit allocation + mp_bit_allocation: dict[str, int] | None = None + if args.mixed_precision and hessians: + _mp_code_bytes = len(code.encode("utf-8")) + mp_bit_allocation, mp_log, mp_est = _allocate_bits_mixed( + hessians, unbanked_sd, target_bytes=args.target_bytes_limit, code_bytes=_mp_code_bytes, + ) + log0( + f"mixed_precision:estimate base={mp_est['base_mb']:.2f}MB + promoted={mp_est['promoted_mb']:.2f}MB " + f"= {mp_est['total_mb']:.2f}MB (budget={mp_est['budget_mb']:.1f}MB, " + f"headroom={mp_est['headroom_kb']:.0f}KB, prune_room={mp_est['prune_room_bytes']:+.0f}B)" + ) + promoted = sum(1 for _, b, _ in mp_log if b > 5) + for gkey, bits, sens in mp_log: + log0(f"mixed_precision: {gkey} -> int{bits} (sensitivity={sens:.4e})") + counts: dict[int, int] = {} + for b in mp_bit_allocation.values(): + counts[b] = counts.get(b, 0) + 1 + log0(f"mixed_precision: {' '.join(f'int{b}:{n}' for b, n in sorted(counts.items()))} ({promoted} groups promoted)") + + quant_result, quant_meta = mixed_quantize_int6(unbanked_sd, {"mlp", "attn"}, hessians=hessians, + bit_allocation=mp_bit_allocation, gptq_damp=args.gptq_damp, + block_size=args.gptq_block_size, col_order=args.gptq_col_order, + single_pass=args.gptq_single_pass) + # NOVEL: Selective +/-1 and +/-2 pruning by reconstruction error + target_bytes = args.target_bytes_limit + code_bytes_est = len(code.encode("utf-8")) + ones_info = [] # (tensor_key, flat_idx, error) + for name, info in quant_meta.items(): + if not isinstance(info, dict): continue + qk, sk = name + ".q", name + ".scale" + if qk not in quant_result or sk not in quant_result: continue + q, s = quant_result[qk], quant_result[sk] + if s.ndim > 0: + # Extended pruning: both +/-1 and +/-2 values + mask = (q.abs() <= 2) & (q.abs() > 0) + if mask.any(): + row_idx = torch.arange(q.shape[0]).unsqueeze(1).expand_as(q)[mask] + flat_idx = torch.arange(q.numel()).reshape(q.shape)[mask] + abs_vals = q.abs()[mask].float() + errors = s.float()[row_idx].pow(2) * abs_vals.pow(2) + for fi, err in zip(flat_idx.tolist(), errors.tolist()): + ones_info.append((qk, fi, err)) + if ones_info: + ones_info.sort(key=lambda x: x[2]) + + # Pre-group by tensor for vectorized pruning: O(len(ones_info)) once + _pg_pos: dict[str, list[int]] = {} + _pg_idx: dict[str, list[int]] = {} + for global_pos, (tname, fidx, _err) in enumerate(ones_info): + if tname in _pg_pos: + _pg_pos[tname].append(global_pos) + _pg_idx[tname].append(fidx) + else: + _pg_pos[tname] = [global_pos] + _pg_idx[tname] = [fidx] + prune_groups: dict[str, tuple[Tensor, Tensor]] = {} + for tname in _pg_pos: + prune_groups[tname] = ( + torch.tensor(_pg_pos[tname], dtype=torch.long), + torch.tensor(_pg_idx[tname], dtype=torch.long), + ) + del _pg_pos, _pg_idx + + def _compress_quant(qr, qm, fast=False): + buf = io.BytesIO() + torch.save({"w": qr, "m": qm}, buf) + raw = buf.getvalue() + if _BYTE_SHUFFLE: + raw = _byte_shuffle(raw, _BYTE_SHUFFLE_STRIDE) + if fast: + blob = zlib.compress(raw, 1) + elif _COMPRESSOR == "brotli": + import brotli + blob = brotli.compress(raw, quality=11) + elif _COMPRESSOR == "lzma": + blob = lzma.compress(raw, preset=9) + else: + blob = zlib.compress(raw, 9) + return len(blob) + code_bytes_est + + def _try_prune(n, fast=False): + """Trial-prune n entries using vectorized scatter, return compressed size.""" + n = min(n, len(ones_info)) + # Only clone tensors that will be modified + tmp = dict(quant_result) # shallow copy — shares unmodified tensors + for tname, (positions, flat_idxs) in prune_groups.items(): + count = int(torch.searchsorted(positions, n).item()) + if count > 0: + tmp[tname] = quant_result[tname].clone() + tmp[tname].view(-1)[flat_idxs[:count]] = 0 + return _compress_quant(tmp, quant_meta, fast=fast) + + def _apply_prune_inplace(n): + """Apply pruning in-place to quant_result.""" + n = min(n, len(ones_info)) + for tname, (positions, flat_idxs) in prune_groups.items(): + count = int(torch.searchsorted(positions, n).item()) + if count > 0: + quant_result[tname].view(-1)[flat_idxs[:count]] = 0 + + no_sz = _try_prune(0) + log0(f"selective_prune: {len(ones_info)} +/-1,+/-2 candidates, unpruned={no_sz/(1024*1024):.2f}MB target={target_bytes/1e6:.2f}MB") + if no_sz <= target_bytes: + log0("selective_prune: already fits, no pruning needed") + else: + # Calibrate fast-vs-real compressor ratio + fast_unpruned = _try_prune(0, fast=True) + fast_full = _try_prune(len(ones_info), fast=True) + real_full = _try_prune(len(ones_info)) + log0(f"selective_prune: full prune={real_full/(1024*1024):.2f}MB") + if real_full > target_bytes: + log0("selective_prune: even full prune not enough, applying all") + _apply_prune_inplace(len(ones_info)) + else: + fast_delta = fast_unpruned - fast_full + real_delta = no_sz - real_full + ratio = real_delta / max(fast_delta, 1) + fast_target = fast_unpruned - int((no_sz - target_bytes) / max(ratio, 0.01)) + log0(f"selective_prune: fast/real ratio={ratio:.3f} fast_target={fast_target/(1024*1024):.2f}MB") + # Binary search using fast compressor (~0.5s/probe vs ~30-60s) + lo, hi = 0, len(ones_info) + while lo < hi: + mid = (lo + hi) // 2 + sz = _try_prune(mid, fast=True) + if sz <= fast_target: hi = mid + else: lo = mid + 1 + # Verify with real compressor and adjust if needed + real_sz = _try_prune(lo) + if real_sz > target_bytes: + while lo < len(ones_info) and real_sz > target_bytes: + lo += max(1, len(ones_info) // 200) + lo = min(lo, len(ones_info)) + real_sz = _try_prune(lo) + log0(f"selective_prune: pruning {lo}/{len(ones_info)} values ({100*lo/max(len(ones_info),1):.1f}%) to fit {target_bytes/1e6:.2f}MB") + _apply_prune_inplace(lo) + + quant_buf = io.BytesIO() + torch.save({"w": quant_result, "m": quant_meta}, quant_buf) + quant_raw = quant_buf.getvalue() + if _BYTE_SHUFFLE: + quant_raw = _byte_shuffle(quant_raw, _BYTE_SHUFFLE_STRIDE) + if _COMPRESSOR == "brotli": + import brotli + quant_blob = brotli.compress(quant_raw, quality=11) + elif _COMPRESSOR == "lzma": + quant_blob = lzma.compress(quant_raw, preset=9) + else: + quant_blob = zlib.compress(quant_raw, 9) + if master_process: + with open("final_model.int6.ptz", "wb") as f: + f.write(quant_blob) + quant_file_bytes = len(quant_blob) + code_bytes = len(code.encode("utf-8")) + log0(f"Serialized model int6+{_COMPRESSOR}: {quant_file_bytes} bytes") + log0(f"Total submission size int6+{_COMPRESSOR}: {quant_file_bytes + code_bytes} bytes") + if distributed: + dist.barrier() + with open("final_model.int6.ptz", "rb") as f: + quant_blob_disk = f.read() + # Decompress with cascade + if _COMPRESSOR == "brotli": + import brotli + quant_decompressed = brotli.decompress(quant_blob_disk) + elif _COMPRESSOR == "lzma": + quant_decompressed = lzma.decompress(quant_blob_disk) + else: + quant_decompressed = zlib.decompress(quant_blob_disk) + if _BYTE_SHUFFLE: + quant_decompressed = _byte_unshuffle(quant_decompressed) + quant_state = torch.load( + io.BytesIO(quant_decompressed), + map_location="cpu", + weights_only=False, + ) + deq_unbanked = dequantize_mixed_int6(quant_state["w"], quant_state["m"], unbanked_sd) + # Re-bank the dequantized tensors + deq_state = _rebank_state_dict(deq_unbanked, args.num_layers, sd_cpu) + eval_model = GPT( + vocab_size=args.vocab_size, num_layers=args.num_layers, model_dim=args.model_dim, + num_heads=args.num_heads, num_kv_heads=args.num_kv_heads, mlp_mult=args.mlp_mult, + tie_embeddings=args.tie_embeddings, tied_embed_init_std=args.tied_embed_init_std, + logit_softcap=args.logit_softcap, rope_base=args.rope_base, qk_gain_init=args.qk_gain_init, + ngram_buckets=args.ngram_buckets, ngram_heads=args.ngram_heads, + ngram_orders=args.ngram_orders, ngram_dim_per_head=args.ngram_dim_per_head, + xsa_last_n=args.xsa_last_n, + rope_dims=args.rope_dims, ln_scale=args.ln_scale, + ve_enabled=args.ve_enabled, ve_dim=args.ve_dim, ve_layers=args.ve_layers, + ).to(device).bfloat16() + eval_model.qo_bank.data = eval_model.qo_bank.data.float() + eval_model.kv_bank.data = eval_model.kv_bank.data.float() + eval_model.mlp_up_bank.data = eval_model.mlp_up_bank.data.float() + eval_model.mlp_down_bank.data = eval_model.mlp_down_bank.data.float() + for m in eval_model.modules(): + if isinstance(m, CastedLinear): + m.float() + restore_low_dim_params_to_fp32(eval_model) + eval_model.load_state_dict(deq_state, strict=True) + torch.cuda.synchronize() + _eval_phase_elapsed = lambda: 1000.0 * (time.perf_counter() - t_eval_phase) + _load_eval_ms = _eval_phase_elapsed() + log0(f"eval_budget: quantize+compress+decompress+load took {_load_eval_ms:.0f}ms") + sw_seq_len = effective_eval_seq_len + + if args.ttt_enabled: + # TTT does its own sliding-window scoring — skip redundant eval passes + log0(f"eval_budget: TTT enabled, skipping roundtrip+sliding evals (elapsed so far: {_eval_phase_elapsed():.0f}ms)") + torch.cuda.synchronize() + t_ttt = time.perf_counter() + ttt_val_loss, ttt_val_bpb = eval_val_sliding_ttt( + args, eval_model, rank, world_size, device, + val_tokens, base_bytes_lut, has_leading_space_lut, is_boundary_token_lut, + stride=args.eval_stride, + batch_seqs=args.ttt_batch_seqs, + eval_seq_len=sw_seq_len, + eval_budget_t0=t_eval_phase, + ) + torch.cuda.synchronize() + ttt_elapsed = time.perf_counter() - t_ttt + log0( + f"final_int6_ttt val_loss:{ttt_val_loss:.4f} val_bpb:{ttt_val_bpb:.4f} " + f"stride:{args.eval_stride} ttt_time:{1000.0 * ttt_elapsed:.0f}ms" + ) + log0(f"final_int6_ttt_exact val_loss:{ttt_val_loss:.8f} val_bpb:{ttt_val_bpb:.8f}") + else: + # Standard eval passes (no TTT) + compiled_eval = torch.compile(eval_model, dynamic=False, fullgraph=True) + torch.cuda.synchronize() + t_qeval = time.perf_counter() + q_val_loss, q_val_bpb = eval_val( + args, compiled_eval, rank, world_size, device, grad_accum_steps, + val_tokens, base_bytes_lut, has_leading_space_lut, is_boundary_token_lut, + eval_seq_len=effective_eval_seq_len, + ) + torch.cuda.synchronize() + log0( + f"final_int6_roundtrip val_loss:{q_val_loss:.4f} val_bpb:{q_val_bpb:.4f} " + f"eval_time:{1000.0 * (time.perf_counter() - t_qeval):.0f}ms" + ) + log0(f"final_int6_roundtrip_exact val_loss:{q_val_loss:.8f} val_bpb:{q_val_bpb:.8f}") + if args.eval_stride > 0 and args.eval_stride < sw_seq_len: + torch.cuda.synchronize() + t_slide = time.perf_counter() + sw_val_loss, sw_val_bpb = eval_val_sliding( + args, eval_model, rank, world_size, device, + val_tokens, base_bytes_lut, has_leading_space_lut, is_boundary_token_lut, + stride=args.eval_stride, + eval_seq_len=sw_seq_len, + ) + torch.cuda.synchronize() + log0( + f"final_int6_sliding_window val_loss:{sw_val_loss:.4f} val_bpb:{sw_val_bpb:.4f} " + f"stride:{args.eval_stride} eval_time:{1000.0 * (time.perf_counter() - t_slide):.0f}ms" + ) + log0(f"final_int6_sliding_window_exact val_loss:{sw_val_loss:.8f} val_bpb:{sw_val_bpb:.8f}") + if args.eval_stride != 64 and 64 < sw_seq_len: + torch.cuda.synchronize() + t_slide64 = time.perf_counter() + sw64_val_loss, sw64_val_bpb = eval_val_sliding( + args, eval_model, rank, world_size, device, + val_tokens, base_bytes_lut, has_leading_space_lut, is_boundary_token_lut, + stride=64, + eval_seq_len=sw_seq_len, + ) + torch.cuda.synchronize() + log0( + f"final_int6_sliding_window_s64 val_loss:{sw64_val_loss:.4f} val_bpb:{sw64_val_bpb:.4f} " + f"stride:64 eval_time:{1000.0 * (time.perf_counter() - t_slide64):.0f}ms" + ) + log0(f"final_int6_sliding_window_s64_exact val_loss:{sw64_val_loss:.8f} val_bpb:{sw64_val_bpb:.8f}") + # Cumulative eval phase summary + _total_eval_ms = _eval_phase_elapsed() + log0(f"eval_budget: TOTAL eval phase {_total_eval_ms:.0f}ms ({_total_eval_ms/1000:.1f}s / 600s budget)") + if _total_eval_ms > 580_000: + log0(f"eval_budget: WARNING near or over 600s budget!") + if distributed: + dist.destroy_process_group() +if __name__ == "__main__": + main() diff --git a/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_seed1337.log b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_seed1337.log new file mode 100644 index 0000000000..63e68075d2 --- /dev/null +++ b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_seed1337.log @@ -0,0 +1,106 @@ +W0329 22:07:44.911000 298746 torch/distributed/run.py:851] +W0329 22:07:44.911000 298746 torch/distributed/run.py:851] ***************************************** +W0329 22:07:44.911000 298746 torch/distributed/run.py:851] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0329 22:07:44.911000 298746 torch/distributed/run.py:851] ***************************************** +logs/7e3b2736-7395-4a9d-9773-13d7d7440c46.txt +val_bpb:enabled tokenizer_kind=sentencepiece tokenizer_path=./data/tokenizers/fineweb_1024_bpe.model +train_loader:dataset:fineweb10B_sp1024 train_shards:80 +val_loader:shards pattern=./data/datasets/fineweb10B_sp1024/fineweb_val_*.bin tokens:62021632 +model_params:30666843 +XSA:last_11 active_layers:[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] +world_size:8 grad_accum_steps:1 +sdp_backends:cudnn=True flash=True mem_efficient=True math=True +attention_mode:gqa num_heads:8 num_kv_heads:4 +tie_embeddings:True embed_lr:0.035 head_lr:0.0 matrix_lr:0.025 scalar_lr:0.025 +train_batch_tokens:786432 train_seq_len:2048 iterations:20000 warmup_steps:20 max_wallclock_seconds:600.000 +muon:post_norm:row_col embed_beta1:0.7 head_beta1:0.7 +seed:1337 +gptq:reserving 14000ms from training budget (effective cap: 586000ms) +warmup_step:1/20 +warmup_step:2/20 +warmup_step:3/20 +warmup_step:4/20 +warmup_step:5/20 +warmup_step:6/20 +warmup_step:7/20 +warmup_step:8/20 +warmup_step:9/20 +warmup_step:10/20 +warmup_step:11/20 +warmup_step:12/20 +warmup_step:13/20 +warmup_step:14/20 +warmup_step:15/20 +warmup_step:16/20 +warmup_step:17/20 +warmup_step:18/20 +warmup_step:19/20 +warmup_step:20/20 +step:1/20000 train_loss:6.9328 train_time:494ms step_avg:493.75ms +step:2/20000 train_loss:8.3875 train_time:533ms step_avg:266.70ms +step:3/20000 train_loss:7.3421 train_time:623ms step_avg:207.74ms +step:4/20000 train_loss:8.1585 train_time:713ms step_avg:178.36ms +step:5/20000 train_loss:8.3460 train_time:803ms step_avg:160.69ms +step:6/20000 train_loss:7.8740 train_time:894ms step_avg:148.93ms +step:7/20000 train_loss:7.1298 train_time:984ms step_avg:140.52ms +step:8/20000 train_loss:6.6737 train_time:1073ms step_avg:134.19ms +step:9/20000 train_loss:6.2528 train_time:1164ms step_avg:129.29ms +step:10/20000 train_loss:5.7770 train_time:1253ms step_avg:125.34ms +step:500/20000 train_loss:2.3328 train_time:46551ms step_avg:93.10ms +step:1000/20000 train_loss:2.2220 train_time:93030ms step_avg:93.03ms +step:1500/20000 train_loss:2.1762 train_time:139529ms step_avg:93.02ms +step:2000/20000 train_loss:2.0185 train_time:186085ms step_avg:93.04ms +step:2500/20000 train_loss:2.1237 train_time:232644ms step_avg:93.06ms +step:3000/20000 train_loss:2.1133 train_time:279247ms step_avg:93.08ms +step:3500/20000 train_loss:2.1247 train_time:325853ms step_avg:93.10ms +step:4000/20000 train_loss:1.9143 train_time:372425ms step_avg:93.11ms +step:4500/20000 train_loss:2.0619 train_time:418966ms step_avg:93.10ms +step:5000/20000 train_loss:2.0399 train_time:465530ms step_avg:93.11ms +step:5500/20000 train_loss:1.9549 train_time:512114ms step_avg:93.11ms +swa:start step:5600 +late_qat:enabled step:5769 scale:0.1498 soft_round=True est_steps:524 +step:6000/20000 train_loss:1.8764 train_time:558656ms step_avg:93.11ms +step:6295/20000 train_time:586112ms step_avg:93.11ms +stopping_early: wallclock_cap train_time:586112ms step:6295/20000 +peak memory allocated: 25096 MiB reserved: 25820 MiB +swa:applying averaged 14 checkpoints (source=raw) +Serialized model: 119281399 bytes +Code size: 24378 bytes +gptq:building non-banked model for Hessian collection... +gptq:calibrating with 64 batches... +gptq:collected hessians for 68 layers +gptq:budget_check train:586059ms + gptq:7162ms = 593221ms (budget:600000ms) +mixed_precision:estimate base=15.23MB + promoted=0.44MB = 15.67MB (budget=16.0MB, headroom=320KB, prune_room=+13127B) +mixed_precision: layer.0.mlp -> int6 (sensitivity=2.3236e+07) +mixed_precision: layer.1.mlp -> int5 (sensitivity=3.5619e+06) +mixed_precision: layer.2.mlp -> int5 (sensitivity=1.1923e+06) +mixed_precision: layer.3.mlp -> int5 (sensitivity=7.5367e+05) +mixed_precision: layer.5.mlp -> int5 (sensitivity=3.7461e+05) +mixed_precision: layer.4.mlp -> int5 (sensitivity=3.7013e+05) +mixed_precision: layer.6.mlp -> int5 (sensitivity=3.4786e+05) +mixed_precision: layer.7.mlp -> int5 (sensitivity=2.2513e+05) +mixed_precision: layer.1.attn -> int5 (sensitivity=1.5651e+05) +mixed_precision: layer.0.attn -> int5 (sensitivity=1.4575e+05) +mixed_precision: layer.2.attn -> int5 (sensitivity=1.1665e+05) +mixed_precision: layer.4.attn -> int5 (sensitivity=1.0037e+05) +mixed_precision: layer.3.attn -> int5 (sensitivity=8.0967e+04) +mixed_precision: layer.8.mlp -> int5 (sensitivity=6.2683e+04) +mixed_precision: layer.5.attn -> int5 (sensitivity=4.7769e+04) +mixed_precision: layer.6.attn -> int5 (sensitivity=4.3712e+04) +mixed_precision: layer.7.attn -> int5 (sensitivity=4.3028e+04) +mixed_precision: layer.8.attn -> int5 (sensitivity=2.6122e+04) +mixed_precision: layer.9.mlp -> int5 (sensitivity=2.4802e+04) +mixed_precision: layer.10.mlp -> int5 (sensitivity=2.4152e+04) +mixed_precision: layer.9.attn -> int5 (sensitivity=1.9360e+04) +mixed_precision: layer.10.attn -> int5 (sensitivity=1.2647e+04) +mixed_precision: int5:64 int6:2 (1 groups promoted) +selective_prune: 12043936 +/-1,+/-2 candidates, unpruned=15.33MB target=16.00MB +selective_prune: full prune=11.08MB +selective_prune: fast/real ratio=1.292 fast_target=17.87MB +selective_prune: pruning 278795/12043936 values (2.3%) to fit 16.00MB +Serialized model int6+brotli: 15969035 bytes +Total submission size int6+brotli: 15993413 bytes +final_int6_roundtrip val_loss:1.9127 val_bpb:1.1328 eval_time:5968ms +final_int6_roundtrip_exact val_loss:1.91267085 val_bpb:1.13279141 +final_int6_sliding_window val_loss:1.8726 val_bpb:1.1090 stride:64 eval_time:87666ms +final_int6_sliding_window_exact val_loss:1.87255649 val_bpb:1.10903637 diff --git a/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_seed2025.log b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_seed2025.log new file mode 100644 index 0000000000..a2080e0c5e --- /dev/null +++ b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_seed2025.log @@ -0,0 +1,106 @@ +W0329 22:28:04.169000 300375 torch/distributed/run.py:851] +W0329 22:28:04.169000 300375 torch/distributed/run.py:851] ***************************************** +W0329 22:28:04.169000 300375 torch/distributed/run.py:851] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0329 22:28:04.169000 300375 torch/distributed/run.py:851] ***************************************** +logs/9a1580db-4865-435c-ab02-c5c119a72bfb.txt +val_bpb:enabled tokenizer_kind=sentencepiece tokenizer_path=./data/tokenizers/fineweb_1024_bpe.model +train_loader:dataset:fineweb10B_sp1024 train_shards:80 +val_loader:shards pattern=./data/datasets/fineweb10B_sp1024/fineweb_val_*.bin tokens:62021632 +model_params:30666843 +XSA:last_11 active_layers:[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] +world_size:8 grad_accum_steps:1 +sdp_backends:cudnn=True flash=True mem_efficient=True math=True +attention_mode:gqa num_heads:8 num_kv_heads:4 +tie_embeddings:True embed_lr:0.035 head_lr:0.0 matrix_lr:0.025 scalar_lr:0.025 +train_batch_tokens:786432 train_seq_len:2048 iterations:20000 warmup_steps:20 max_wallclock_seconds:600.000 +muon:post_norm:row_col embed_beta1:0.7 head_beta1:0.7 +seed:2025 +gptq:reserving 14000ms from training budget (effective cap: 586000ms) +warmup_step:1/20 +warmup_step:2/20 +warmup_step:3/20 +warmup_step:4/20 +warmup_step:5/20 +warmup_step:6/20 +warmup_step:7/20 +warmup_step:8/20 +warmup_step:9/20 +warmup_step:10/20 +warmup_step:11/20 +warmup_step:12/20 +warmup_step:13/20 +warmup_step:14/20 +warmup_step:15/20 +warmup_step:16/20 +warmup_step:17/20 +warmup_step:18/20 +warmup_step:19/20 +warmup_step:20/20 +step:1/20000 train_loss:6.9304 train_time:489ms step_avg:489.27ms +step:2/20000 train_loss:8.4936 train_time:529ms step_avg:264.66ms +step:3/20000 train_loss:7.2384 train_time:618ms step_avg:206.11ms +step:4/20000 train_loss:8.3246 train_time:709ms step_avg:177.30ms +step:5/20000 train_loss:8.3984 train_time:799ms step_avg:159.79ms +step:6/20000 train_loss:7.8130 train_time:889ms step_avg:148.15ms +step:7/20000 train_loss:7.1680 train_time:978ms step_avg:139.76ms +step:8/20000 train_loss:6.7385 train_time:1068ms step_avg:133.55ms +step:9/20000 train_loss:6.1919 train_time:1158ms step_avg:128.72ms +step:10/20000 train_loss:5.8325 train_time:1248ms step_avg:124.85ms +step:500/20000 train_loss:2.3421 train_time:46479ms step_avg:92.96ms +step:1000/20000 train_loss:2.2253 train_time:92974ms step_avg:92.97ms +step:1500/20000 train_loss:2.1737 train_time:139484ms step_avg:92.99ms +step:2000/20000 train_loss:2.0216 train_time:186046ms step_avg:93.02ms +step:2500/20000 train_loss:2.1264 train_time:232647ms step_avg:93.06ms +step:3000/20000 train_loss:2.1176 train_time:279233ms step_avg:93.08ms +step:3500/20000 train_loss:2.1243 train_time:325791ms step_avg:93.08ms +step:4000/20000 train_loss:1.9181 train_time:372370ms step_avg:93.09ms +step:4500/20000 train_loss:2.0653 train_time:418946ms step_avg:93.10ms +step:5000/20000 train_loss:2.0396 train_time:465507ms step_avg:93.10ms +step:5500/20000 train_loss:1.9537 train_time:512090ms step_avg:93.11ms +swa:start step:5600 +late_qat:enabled step:5769 scale:0.1498 soft_round=True est_steps:524 +step:6000/20000 train_loss:1.8782 train_time:558654ms step_avg:93.11ms +step:6294/20000 train_time:586053ms step_avg:93.11ms +stopping_early: wallclock_cap train_time:586053ms step:6294/20000 +peak memory allocated: 25096 MiB reserved: 25820 MiB +swa:applying averaged 14 checkpoints (source=raw) +Serialized model: 119281399 bytes +Code size: 24378 bytes +gptq:building non-banked model for Hessian collection... +gptq:calibrating with 64 batches... +gptq:collected hessians for 68 layers +gptq:budget_check train:585999ms + gptq:7128ms = 593127ms (budget:600000ms) +mixed_precision:estimate base=15.23MB + promoted=0.44MB = 15.67MB (budget=16.0MB, headroom=320KB, prune_room=+13127B) +mixed_precision: layer.0.mlp -> int6 (sensitivity=2.7833e+07) +mixed_precision: layer.1.mlp -> int5 (sensitivity=2.7024e+06) +mixed_precision: layer.2.mlp -> int5 (sensitivity=1.3974e+06) +mixed_precision: layer.3.mlp -> int5 (sensitivity=6.2610e+05) +mixed_precision: layer.4.mlp -> int5 (sensitivity=4.0124e+05) +mixed_precision: layer.5.mlp -> int5 (sensitivity=3.3751e+05) +mixed_precision: layer.6.mlp -> int5 (sensitivity=3.1689e+05) +mixed_precision: layer.7.mlp -> int5 (sensitivity=1.8564e+05) +mixed_precision: layer.1.attn -> int5 (sensitivity=1.8235e+05) +mixed_precision: layer.0.attn -> int5 (sensitivity=1.3882e+05) +mixed_precision: layer.2.attn -> int5 (sensitivity=1.0076e+05) +mixed_precision: layer.3.attn -> int5 (sensitivity=9.3151e+04) +mixed_precision: layer.4.attn -> int5 (sensitivity=7.5136e+04) +mixed_precision: layer.5.attn -> int5 (sensitivity=6.2500e+04) +mixed_precision: layer.8.mlp -> int5 (sensitivity=5.5074e+04) +mixed_precision: layer.6.attn -> int5 (sensitivity=3.6806e+04) +mixed_precision: layer.7.attn -> int5 (sensitivity=3.0078e+04) +mixed_precision: layer.9.mlp -> int5 (sensitivity=2.5883e+04) +mixed_precision: layer.8.attn -> int5 (sensitivity=2.5234e+04) +mixed_precision: layer.10.mlp -> int5 (sensitivity=2.0381e+04) +mixed_precision: layer.9.attn -> int5 (sensitivity=1.8494e+04) +mixed_precision: layer.10.attn -> int5 (sensitivity=1.1435e+04) +mixed_precision: int5:64 int6:2 (1 groups promoted) +selective_prune: 12024138 +/-1,+/-2 candidates, unpruned=15.34MB target=16.00MB +selective_prune: full prune=11.10MB +selective_prune: fast/real ratio=1.293 fast_target=17.86MB +selective_prune: pruning 309004/12024138 values (2.6%) to fit 16.00MB +Serialized model int6+brotli: 15969526 bytes +Total submission size int6+brotli: 15993904 bytes +final_int6_roundtrip val_loss:1.9139 val_bpb:1.1335 eval_time:5930ms +final_int6_roundtrip_exact val_loss:1.91387889 val_bpb:1.13350688 +final_int6_sliding_window val_loss:1.8735 val_bpb:1.1096 stride:64 eval_time:87493ms +final_int6_sliding_window_exact val_loss:1.87354600 val_bpb:1.10962241 diff --git a/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_seed42.log b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_seed42.log new file mode 100644 index 0000000000..c7aa370d2d --- /dev/null +++ b/records/track_10min_16mb/2026-03-28_TurboMuon_EngramLite_ParamBanking/train_seed42.log @@ -0,0 +1,106 @@ +W0329 21:51:21.267000 297069 torch/distributed/run.py:851] +W0329 21:51:21.267000 297069 torch/distributed/run.py:851] ***************************************** +W0329 21:51:21.267000 297069 torch/distributed/run.py:851] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0329 21:51:21.267000 297069 torch/distributed/run.py:851] ***************************************** +logs/92541b73-be26-4cc9-b01a-01a6e90a92e5.txt +val_bpb:enabled tokenizer_kind=sentencepiece tokenizer_path=./data/tokenizers/fineweb_1024_bpe.model +train_loader:dataset:fineweb10B_sp1024 train_shards:80 +val_loader:shards pattern=./data/datasets/fineweb10B_sp1024/fineweb_val_*.bin tokens:62021632 +model_params:30666843 +XSA:last_11 active_layers:[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] +world_size:8 grad_accum_steps:1 +sdp_backends:cudnn=True flash=True mem_efficient=True math=True +attention_mode:gqa num_heads:8 num_kv_heads:4 +tie_embeddings:True embed_lr:0.035 head_lr:0.0 matrix_lr:0.025 scalar_lr:0.025 +train_batch_tokens:786432 train_seq_len:2048 iterations:20000 warmup_steps:20 max_wallclock_seconds:600.000 +muon:post_norm:row_col embed_beta1:0.7 head_beta1:0.7 +seed:42 +gptq:reserving 14000ms from training budget (effective cap: 586000ms) +warmup_step:1/20 +warmup_step:2/20 +warmup_step:3/20 +warmup_step:4/20 +warmup_step:5/20 +warmup_step:6/20 +warmup_step:7/20 +warmup_step:8/20 +warmup_step:9/20 +warmup_step:10/20 +warmup_step:11/20 +warmup_step:12/20 +warmup_step:13/20 +warmup_step:14/20 +warmup_step:15/20 +warmup_step:16/20 +warmup_step:17/20 +warmup_step:18/20 +warmup_step:19/20 +warmup_step:20/20 +step:1/20000 train_loss:6.9326 train_time:1525ms step_avg:1525.29ms +step:2/20000 train_loss:8.3594 train_time:1565ms step_avg:782.45ms +step:3/20000 train_loss:7.2947 train_time:1654ms step_avg:551.32ms +step:4/20000 train_loss:8.1276 train_time:1744ms step_avg:436.08ms +step:5/20000 train_loss:8.3088 train_time:1834ms step_avg:366.82ms +step:6/20000 train_loss:7.7835 train_time:1924ms step_avg:320.62ms +step:7/20000 train_loss:7.1261 train_time:2014ms step_avg:287.69ms +step:8/20000 train_loss:6.5175 train_time:2103ms step_avg:262.91ms +step:9/20000 train_loss:6.1335 train_time:2194ms step_avg:243.73ms +step:10/20000 train_loss:5.7020 train_time:2283ms step_avg:228.34ms +step:500/20000 train_loss:2.3294 train_time:47519ms step_avg:95.04ms +step:1000/20000 train_loss:2.2241 train_time:93945ms step_avg:93.94ms +step:1500/20000 train_loss:2.1713 train_time:140419ms step_avg:93.61ms +step:2000/20000 train_loss:2.0154 train_time:186960ms step_avg:93.48ms +step:2500/20000 train_loss:2.1255 train_time:233506ms step_avg:93.40ms +step:3000/20000 train_loss:2.1123 train_time:280084ms step_avg:93.36ms +step:3500/20000 train_loss:2.1237 train_time:326687ms step_avg:93.34ms +step:4000/20000 train_loss:1.9157 train_time:373288ms step_avg:93.32ms +step:4500/20000 train_loss:2.0627 train_time:419846ms step_avg:93.30ms +step:5000/20000 train_loss:2.0388 train_time:466414ms step_avg:93.28ms +step:5500/20000 train_loss:1.9495 train_time:512980ms step_avg:93.27ms +swa:start step:5600 +late_qat:enabled step:5758 scale:0.1499 soft_round=True est_steps:524 +step:6000/20000 train_loss:1.8757 train_time:559570ms step_avg:93.26ms +step:6284/20000 train_time:586060ms step_avg:93.26ms +stopping_early: wallclock_cap train_time:586060ms step:6284/20000 +peak memory allocated: 25096 MiB reserved: 25820 MiB +swa:applying averaged 14 checkpoints (source=raw) +Serialized model: 119281399 bytes +Code size: 24378 bytes +gptq:building non-banked model for Hessian collection... +gptq:calibrating with 64 batches... +gptq:collected hessians for 68 layers +gptq:budget_check train:586006ms + gptq:7252ms = 593259ms (budget:600000ms) +mixed_precision:estimate base=15.23MB + promoted=0.44MB = 15.67MB (budget=16.0MB, headroom=320KB, prune_room=+13127B) +mixed_precision: layer.0.mlp -> int6 (sensitivity=2.6086e+07) +mixed_precision: layer.1.mlp -> int5 (sensitivity=2.4150e+06) +mixed_precision: layer.2.mlp -> int5 (sensitivity=2.0456e+06) +mixed_precision: layer.3.mlp -> int5 (sensitivity=5.6480e+05) +mixed_precision: layer.6.mlp -> int5 (sensitivity=4.4870e+05) +mixed_precision: layer.4.mlp -> int5 (sensitivity=4.3653e+05) +mixed_precision: layer.5.mlp -> int5 (sensitivity=3.5498e+05) +mixed_precision: layer.1.attn -> int5 (sensitivity=1.9466e+05) +mixed_precision: layer.0.attn -> int5 (sensitivity=1.4146e+05) +mixed_precision: layer.7.mlp -> int5 (sensitivity=1.3859e+05) +mixed_precision: layer.3.attn -> int5 (sensitivity=1.3477e+05) +mixed_precision: layer.2.attn -> int5 (sensitivity=7.7494e+04) +mixed_precision: layer.8.mlp -> int5 (sensitivity=6.2807e+04) +mixed_precision: layer.4.attn -> int5 (sensitivity=5.3413e+04) +mixed_precision: layer.5.attn -> int5 (sensitivity=4.9412e+04) +mixed_precision: layer.6.attn -> int5 (sensitivity=4.6376e+04) +mixed_precision: layer.7.attn -> int5 (sensitivity=4.0308e+04) +mixed_precision: layer.9.mlp -> int5 (sensitivity=2.5584e+04) +mixed_precision: layer.10.mlp -> int5 (sensitivity=2.2340e+04) +mixed_precision: layer.8.attn -> int5 (sensitivity=1.7921e+04) +mixed_precision: layer.9.attn -> int5 (sensitivity=1.6176e+04) +mixed_precision: layer.10.attn -> int5 (sensitivity=1.2792e+04) +mixed_precision: int5:64 int6:2 (1 groups promoted) +selective_prune: 12048290 +/-1,+/-2 candidates, unpruned=15.33MB target=16.00MB +selective_prune: full prune=11.08MB +selective_prune: fast/real ratio=1.291 fast_target=17.86MB +selective_prune: pruning 274786/12048290 values (2.3%) to fit 16.00MB +Serialized model int6+brotli: 15968150 bytes +Total submission size int6+brotli: 15992528 bytes +final_int6_roundtrip val_loss:1.9120 val_bpb:1.1324 eval_time:12089ms +final_int6_roundtrip_exact val_loss:1.91199048 val_bpb:1.13238846 +final_int6_sliding_window val_loss:1.8718 val_bpb:1.1086 stride:64 eval_time:92339ms +final_int6_sliding_window_exact val_loss:1.87181111 val_bpb:1.10859491