titans_NPC / examples /train_qwen_titans_babilong_v4.py
ChipYTY's picture
Add v4 Qwen3+Titans code snapshot and README
a517ecd verified
"""
Qwen3 + Titans v4 - BABILong QA1 (32k) with Cross-Chunk Gradients
Key design:
1. Freeze Qwen backbone EXCEPT embed_tokens (trainable for input adaptation)
2. Untie lm_head from embed_tokens if they share weights
3. Train: Memory modules + embed_tokens + lm_head
4. Use chunkwise_backward=False + detach_mem_state=False for TRUE cross-chunk gradients
5. Enable gradient_checkpointing to manage memory usage
Cross-chunk gradient flow:
- chunkwise_backward=False: entire sequence backward together
- detach_mem_state=False: memory state keeps gradient graph
- cross_chunk_gradient_steps: controls how many chunks back gradient flows
"""
import os
import sys
# =============================================================================
# CRITICAL: Disable torchao BEFORE importing transformers to avoid version conflicts
# =============================================================================
os.environ["TRANSFORMERS_NO_TORCHAO"] = "1"
# Mock torchao to prevent import errors
class _MockTorchAO:
def __getattr__(self, name):
return _MockTorchAO()
def __call__(self, *args, **kwargs):
return _MockTorchAO()
sys.modules['torchao'] = _MockTorchAO()
sys.modules['torchao.quantization'] = _MockTorchAO()
import json
import math
import argparse
import logging
import weakref
from contextlib import nullcontext
from dataclasses import dataclass, asdict, field
from typing import Optional, Dict, Any, List, Tuple, Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.utils.data import Dataset, DataLoader
from torch.optim import AdamW
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.nn.parallel import DistributedDataParallel as DDP
from tqdm import tqdm
from einops import rearrange, repeat
# add repo root to sys.path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Titans components
from titans_pytorch import NeuralMemory, MemoryMLP
from titans_pytorch.neural_memory import NeuralMemState
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
# =============================================================================
# Configuration
# =============================================================================
@dataclass
class TrainingConfig:
# paths
model_path: str = "/data/huangyifei/huggingface_cache/hub/models--Qwen--Qwen3-4B-Instruct-2507/snapshots/cdbee75f17c01a7cc42f958dc650907174af0554"
data_path: str = "/data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json"
output_dir: str = "./outputs/qwen_titans_babilong_v4"
# training
num_epochs: int = 10
batch_size: int = 1
gradient_accumulation_steps: int = 16
max_grad_norm: float = 1.0
# learning rates (v4: separate rates for memory, embed, head)
lr_memory: float = 1e-4
lr_memory_attention: float = 5e-5
lr_embed: float = 1e-5 # Learning rate for embed_tokens
lr_lm_head: float = 1e-4 # Learning rate for lm_head
weight_decay: float = 0.01
warmup_steps: int = 100
# streaming / memory
chunk_size: int = 4096
use_memory: bool = True
memory_chunk_size: int = 128
memory_batch_size: int = 128
memory_heads: int = 8
memory_dim_head: int = 64
memory_depth: int = 1
memory_layer_stride: int = 8
memory_fp32: bool = True
# Memory state detachment - controls cross-chunk gradient flow
# False = allow gradient flow through memory state (requires chunkwise_backward=False)
# True = detach memory state each chunk (no cross-chunk gradients)
detach_mem_state: bool = False # Enable cross-chunk gradients!
deep_memory_integration: bool = False
memory_as_context: bool = False
num_memory_tokens: int = 16
memory_gate_bias: float = -2.0
use_momentum: bool = True
momentum_order: int = 1
# Gradient flow control - NOW ACTIVE with chunkwise_backward=False
# cross_chunk_gradient_steps: how many chunks back gradient can flow through memory
gradient_checkpoint_memory: bool = False
cross_chunk_gradient_steps: int = 2 # Allow gradient through 2 recent chunks
# evaluation / logging
eval_steps: int = 200
eval_topk: int = 0
logging_steps: int = 10
log_every_batches: int = 80
final_eval_print_examples: int = 10
debug_data_samples: int = 0
debug_label_batches: int = 0
debug_eval_stats: bool = False
debug_grad_norm: bool = False
# precision
bf16: bool = True
fp16: bool = False
use_tf32: bool = True
gradient_checkpointing: bool = True # Enable to manage memory with full-sequence backward
chunkwise_backward: bool = False # Disable for cross-chunk gradients
# data
max_length: int = 32768
answer_reserve_tokens: int = 64
label_prefix_tokens: int = 0
max_samples: Optional[int] = 500
# distributed
use_fsdp: bool = False
fsdp_use_orig_params: bool = True
ddp_find_unused_parameters: bool = False
# checkpoint
save_full_checkpoint: bool = True
final_ckpt_name: str = "final_memory_checkpoint.pt"
final_full_ckpt_name: str = "final_full_checkpoint.pt"
seed: int = 42
# =============================================================================
# Dataset
# =============================================================================
class BABILongDataset(Dataset):
def __init__(
self,
data_path: str,
tokenizer,
max_length: int = 32768,
answer_reserve_tokens: int = 64,
label_prefix_tokens: int = 0,
max_samples: Optional[int] = None,
):
self.tokenizer = tokenizer
self.max_length = max_length
self.answer_reserve_tokens = answer_reserve_tokens
self.label_prefix_tokens = int(label_prefix_tokens)
logger.info(f"Loading dataset: {data_path}")
with open(data_path, "r") as f:
self.data = json.load(f)
if max_samples:
self.data = self.data[:max_samples]
logger.info(f"Dataset size: {len(self.data)}")
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
item = self.data[idx]
text = f"{item['input']}\n\nQuestion: {item['question']}\nAnswer:"
target = item["target"]
pad_id = self.tokenizer.pad_token_id or 0
reserve = int(self.answer_reserve_tokens)
prompt_ids = self.tokenizer(
text,
max_length=max(self.max_length - reserve, 1),
truncation=True,
add_special_tokens=True,
return_tensors="pt",
).input_ids.squeeze(0)
answer_ids = self.tokenizer(
f" {target}",
add_special_tokens=False,
return_tensors="pt",
).input_ids.squeeze(0)
available = max(self.max_length - prompt_ids.numel(), 0)
answer_ids = answer_ids[:available]
input_ids = torch.cat([prompt_ids, answer_ids], dim=0)[: self.max_length]
labels = torch.full_like(input_ids, fill_value=-100)
if answer_ids.numel() > 0:
start = prompt_ids.numel()
end = min(start + answer_ids.numel(), labels.numel())
labels[start:end] = input_ids[start:end]
if self.label_prefix_tokens > 0:
prefix = min(start, self.label_prefix_tokens)
if prefix > 0:
labels[start - prefix:start] = input_ids[start - prefix:start]
seq_len = input_ids.numel()
if seq_len < self.max_length:
pad_len = self.max_length - seq_len
input_ids = F.pad(input_ids, (0, pad_len), value=int(pad_id))
labels = F.pad(labels, (0, pad_len), value=-100)
attention_mask = torch.cat(
[torch.ones(seq_len, dtype=torch.long), torch.zeros(pad_len, dtype=torch.long)],
dim=0,
)
else:
attention_mask = torch.ones(self.max_length, dtype=torch.long)
return {
"input_ids": input_ids.to(dtype=torch.long),
"labels": labels.to(dtype=torch.long),
"attention_mask": attention_mask,
}
def collate_fn(batch):
keys = batch[0].keys()
return {k: torch.stack([b[k] for b in batch], dim=0) for k in keys}
# =============================================================================
# Memory-Augmented Attention Module (from v3)
# =============================================================================
class MemoryAugmentedAttention(nn.Module):
"""
Deep integration of memory into attention mechanism.
Memory provides additional context that enhances hidden states.
"""
def __init__(
self,
hidden_size: int,
num_attention_heads: int,
num_memory_tokens: int = 16,
memory_dim_head: int = 64,
memory_fp32: bool = True,
gate_bias: float = -2.0,
):
super().__init__()
self.hidden_size = hidden_size
self.num_heads = num_attention_heads
self.head_dim = hidden_size // num_attention_heads
self.memory_fp32 = memory_fp32
# Memory transformation: projects and mixes memory with hidden states
self.memory_transform = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.SiLU(),
nn.Linear(hidden_size, hidden_size),
)
# Learnable memory gate per head
self.memory_gate = nn.Parameter(torch.full((num_attention_heads, 1, 1), gate_bias))
# Output projection
self.memory_output_proj = nn.Linear(hidden_size, hidden_size, bias=False)
nn.init.zeros_(self.memory_output_proj.weight) # Start as identity
def forward(
self,
hidden_states: torch.Tensor,
memory_context: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Args:
hidden_states: [batch, seq_len, hidden_size] - current hidden states
memory_context: [batch, seq_len, hidden_size] - retrieved memory
attention_mask: optional attention mask
Returns:
enhanced_hidden: [batch, seq_len, hidden_size]
"""
batch_size, seq_len, _ = hidden_states.shape
# Transform memory
mem_transformed = self.memory_transform(memory_context)
# Reshape to multi-head format for gating
mem_heads = rearrange(mem_transformed, 'b n (h d) -> b h n d', h=self.num_heads)
# Compute memory gate (sigmoid for smooth interpolation)
gate = torch.sigmoid(self.memory_gate) # [num_heads, 1, 1]
# Apply gated memory enhancement
mem_contribution = mem_heads * gate
mem_contribution = rearrange(mem_contribution, 'b h n d -> b n (h d)')
# Project and add to hidden states
enhanced = hidden_states + self.memory_output_proj(mem_contribution)
return enhanced
# =============================================================================
# Deep Memory Layer (from v3, with cross-chunk gradient flow)
# =============================================================================
class QwenDecoderLayerWithDeepMemory(nn.Module):
"""
v4: Uses v3's deep memory integration with cross-chunk gradient flow.
The base Qwen layer will be frozen in v4.
"""
def __init__(
self,
base_layer: nn.Module,
layer_idx: int,
*,
hidden_size: int,
num_attention_heads: int,
chunk_size: int,
batch_size: int,
dim_head: int,
num_heads: int,
memory_depth: int,
memory_fp32: bool,
detach_mem_state: bool,
deep_integration: bool,
memory_as_context: bool,
num_memory_tokens: int,
memory_gate_bias: float,
use_momentum: bool,
momentum_order: int,
parent_model: Optional[nn.Module] = None,
):
super().__init__()
self.layer = base_layer
self.layer_idx = layer_idx
self.memory_fp32 = memory_fp32
self.detach_mem_state = bool(detach_mem_state)
self.deep_integration = deep_integration
self.memory_as_context = memory_as_context
self.memory_state: Optional[NeuralMemState] = None
self.parent_model_ref = weakref.ref(parent_model) if parent_model is not None else None
# Chunk counter for gradient flow control (v3 feature)
self._chunk_counter = 0
self._gradient_steps_back = 2 # Allow gradient through 2 chunks
# Core Neural Memory module
memory_model = MemoryMLP(
dim=dim_head,
depth=memory_depth,
expansion_factor=2.0,
)
self.neural_memory = NeuralMemory(
dim=hidden_size,
chunk_size=chunk_size,
batch_size=batch_size,
dim_head=dim_head,
heads=num_heads,
model=memory_model,
momentum=use_momentum,
momentum_order=momentum_order,
qk_rmsnorm=True,
pre_rmsnorm=True,
default_step_transform_max_lr=1e-2,
init_adaptive_step_bias=-4.0,
max_grad_norm=1.0,
spectral_norm_surprises=True,
use_accelerated_scan=False,
)
# Layer-level memory gate
self.mem_gate = nn.Sequential(
nn.Linear(hidden_size * 2, hidden_size),
nn.SiLU(),
nn.Linear(hidden_size, hidden_size),
nn.Sigmoid(),
)
# Initialize gate to be conservative
nn.init.zeros_(self.mem_gate[-2].weight)
nn.init.constant_(self.mem_gate[-2].bias, memory_gate_bias)
# Deep attention integration (from v3)
if deep_integration:
self.memory_attention = MemoryAugmentedAttention(
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
num_memory_tokens=num_memory_tokens,
memory_dim_head=dim_head,
memory_fp32=memory_fp32,
gate_bias=memory_gate_bias,
)
else:
self.memory_attention = None
# Pre-attention memory projection (from v3)
if memory_as_context:
self.memory_context_proj = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.SiLU(),
nn.Linear(hidden_size, hidden_size),
)
nn.init.zeros_(self.memory_context_proj[-1].weight)
nn.init.zeros_(self.memory_context_proj[-1].bias)
else:
self.memory_context_proj = None
# Move to appropriate device/dtype
try:
layer_device = next(base_layer.parameters()).device
layer_dtype = next(base_layer.parameters()).dtype
except StopIteration:
layer_device = None
layer_dtype = None
if layer_device is not None:
mem_dtype = torch.float32 if memory_fp32 else layer_dtype
self.neural_memory = self.neural_memory.to(device=layer_device, dtype=mem_dtype)
if layer_dtype is not None:
self.mem_gate = self.mem_gate.to(device=layer_device, dtype=layer_dtype)
if self.memory_attention is not None:
self.memory_attention = self.memory_attention.to(device=layer_device, dtype=layer_dtype)
if self.memory_context_proj is not None:
self.memory_context_proj = self.memory_context_proj.to(device=layer_device, dtype=layer_dtype)
def reset_memory_state(self):
self.memory_state = None
self._chunk_counter = 0
def set_gradient_steps_back(self, steps: int):
"""Control how many chunks back gradient can flow (v3 feature)."""
self._gradient_steps_back = steps
def _get_store_mask(self, hidden_states: torch.Tensor) -> Optional[torch.Tensor]:
parent_model = self.parent_model_ref() if self.parent_model_ref is not None else None
if parent_model is None or not hasattr(parent_model, "_mem_store_mask"):
return None
store_mask = getattr(parent_model, "_mem_store_mask")
if store_mask is None:
return None
store_mask = store_mask.to(device=hidden_states.device).bool()
if store_mask.shape[:2] != hidden_states.shape[:2]:
return None
return store_mask
def _should_detach_state(self) -> bool:
"""
Determine if memory state should be detached based on chunk counter (v3 feature).
FIXED logic:
- If detach_mem_state=True: Always detach (blocks all cross-chunk gradients)
- If detach_mem_state=False: Allow gradient flow through N recent chunks
where N = _gradient_steps_back (controlled by cross_chunk_gradient_steps)
For cross-chunk gradient flow to work, must set detach_mem_state=False!
"""
if self.detach_mem_state:
return True # Legacy behavior: always detach
# Allow gradient flow through recent chunks
self._chunk_counter += 1
return self._chunk_counter > self._gradient_steps_back
def forward(self, *args, **kwargs):
# Get original layer output
outputs = self.layer(*args, **kwargs)
if isinstance(outputs, (tuple, list)):
hidden_states = outputs[0]
rest = outputs[1:]
else:
hidden_states = outputs
rest = None
# Get store mask
full_store_mask = self._get_store_mask(hidden_states)
# Prepare memory input
mem_inp = hidden_states.float() if self.memory_fp32 else hidden_states
# Prepare store sequence and mask
store_seq = None
store_mask = full_store_mask
if store_mask is not None:
store_seq = mem_inp
# Skip first token if not the first chunk
if store_mask.shape[1] > 0 and not store_mask[:, 0].any():
store_seq = store_seq[:, 1:]
store_mask = store_mask[:, 1:]
# Align to chunk size
store_chunk = self.neural_memory.store_chunk_size
remainder = store_seq.shape[1] % store_chunk
if remainder != 0:
store_seq = store_seq[:, :-remainder]
store_mask = store_mask[:, :-remainder]
if store_mask is not None and store_seq is not None:
if store_mask.shape[1] != store_seq.shape[1]:
min_len = min(store_mask.shape[1], store_seq.shape[1])
store_seq = store_seq[:, :min_len]
store_mask = store_mask[:, :min_len]
if store_seq.shape[1] == 0:
store_seq = None
store_mask = None
# Memory computation context
mem_ctx = (
torch.amp.autocast(device_type=hidden_states.device.type, enabled=False)
if self.memory_fp32
else nullcontext()
)
# Determine if we should detach memory state (v3 feature)
should_detach = self._should_detach_state()
with mem_ctx:
retrieved, next_state = self.neural_memory(
mem_inp,
store_seq=store_seq,
state=self.memory_state,
store_mask=store_mask,
detach_mem_state=should_detach,
)
self.memory_state = next_state
if retrieved is not None:
retrieved = retrieved.to(dtype=hidden_states.dtype)
# Apply store mask to retrieved memory
if full_store_mask is not None and full_store_mask.shape[:2] == retrieved.shape[:2]:
retrieved = retrieved * full_store_mask.unsqueeze(-1).to(dtype=retrieved.dtype)
# ===== v3 Deep Integration =====
# Path 1: Memory-augmented attention (if enabled)
if self.memory_attention is not None:
hidden_states = self.memory_attention(
hidden_states=hidden_states,
memory_context=retrieved,
attention_mask=None,
)
# Path 2: Memory as context projection (if enabled)
if self.memory_context_proj is not None:
context_enhancement = self.memory_context_proj(retrieved)
hidden_states = hidden_states + context_enhancement
# Path 3: Layer-level gated fusion (always active)
gate = self.mem_gate(torch.cat([hidden_states, retrieved], dim=-1))
hidden_states = hidden_states + gate * retrieved
if rest is None:
return hidden_states
return (hidden_states, *rest)
# =============================================================================
# Main Model Wrapper (v4 with frozen backbone)
# =============================================================================
class QwenTitansForBABILongV4(nn.Module):
"""
v4: Qwen3 with deep Titans memory integration and cross-chunk gradients.
Trains: memory modules + embed_tokens + lm_head
Frozen: Qwen transformer layers (attention, MLP, etc.)
"""
def __init__(self, qwen_model, config: TrainingConfig):
super().__init__()
self.qwen = qwen_model
self.config = config
self.hidden_size = qwen_model.config.hidden_size
self.num_attention_heads = qwen_model.config.num_attention_heads
self.use_memory = bool(getattr(config, "use_memory", True))
if self.use_memory:
self.memory_layer_stride = int(getattr(config, "memory_layer_stride", 6))
self.memory_layer_indices = [
idx for idx in range(len(self.qwen.model.layers))
if idx % self.memory_layer_stride == 0
]
for layer_idx in self.memory_layer_indices:
base_layer = self.qwen.model.layers[layer_idx]
wrapped = QwenDecoderLayerWithDeepMemory(
base_layer,
layer_idx=layer_idx,
hidden_size=self.hidden_size,
num_attention_heads=self.num_attention_heads,
chunk_size=config.memory_chunk_size,
batch_size=config.memory_batch_size,
dim_head=config.memory_dim_head,
num_heads=config.memory_heads,
memory_depth=config.memory_depth,
memory_fp32=config.memory_fp32,
detach_mem_state=config.detach_mem_state,
deep_integration=config.deep_memory_integration,
memory_as_context=config.memory_as_context,
num_memory_tokens=config.num_memory_tokens,
memory_gate_bias=config.memory_gate_bias,
use_momentum=config.use_momentum,
momentum_order=config.momentum_order,
parent_model=self.qwen.model,
)
self.qwen.model.layers[layer_idx] = wrapped
else:
self.memory_layer_stride = 0
self.memory_layer_indices = []
# ===== v4 FREEZING LOGIC =====
self._freeze_backbone()
if self.use_memory:
logger.info("[QwenTitansForBABILongV4] Initialized with FROZEN backbone")
logger.info(f" - hidden_size: {self.hidden_size}")
logger.info(f" - num_attention_heads: {self.num_attention_heads}")
logger.info(f" - chunk_size: {config.chunk_size}")
logger.info(f" - memory_layer_stride: {self.memory_layer_stride}")
logger.info(f" - memory_layers: {self.memory_layer_indices}")
logger.info(f" - deep_memory_integration: {config.deep_memory_integration}")
logger.info(f" - memory_as_context: {config.memory_as_context}")
logger.info(f" - detach_mem_state: {config.detach_mem_state}")
logger.info(f" - cross_chunk_gradient_steps: {config.cross_chunk_gradient_steps}")
else:
logger.info("[QwenTitansForBABILongV4] Initialized (memory disabled)")
self._memory_layers = [
layer for layer in self.qwen.model.layers
if isinstance(layer, QwenDecoderLayerWithDeepMemory)
]
self.qwen.model._mem_store_mask = None
# Set gradient steps for cross-chunk gradient flow (v3 feature)
for layer in self._memory_layers:
layer.set_gradient_steps_back(config.cross_chunk_gradient_steps)
def _freeze_backbone(self):
"""
v4: Freeze Qwen transformer layers, keep embed_tokens + lm_head trainable.
Trainable:
- memory modules (neural_memory, mem_gate, memory_attention)
- embed_tokens (for input adaptation)
- lm_head (for output adaptation)
Frozen:
- All transformer layers (attention, MLP, layernorm)
"""
frozen_count = 0
trainable_count = 0
embed_count = 0
lm_head_count = 0
# CRITICAL: Untie lm_head from embed_tokens if they share weights
# This allows them to be trained independently
if hasattr(self.qwen, 'lm_head') and hasattr(self.qwen.model, 'embed_tokens'):
lm_head_weight = self.qwen.lm_head.weight
embed_weight = self.qwen.model.embed_tokens.weight
has_tied_weights = lm_head_weight.data_ptr() == embed_weight.data_ptr()
if has_tied_weights:
logger.info("[v4] Detected tied weights - untying lm_head from embed_tokens")
# Create independent lm_head with copied weights
new_lm_head = nn.Linear(
self.qwen.lm_head.in_features,
self.qwen.lm_head.out_features,
bias=self.qwen.lm_head.bias is not None,
device=lm_head_weight.device,
dtype=lm_head_weight.dtype,
)
# Copy weights
with torch.no_grad():
new_lm_head.weight.copy_(lm_head_weight)
if self.qwen.lm_head.bias is not None and new_lm_head.bias is not None:
new_lm_head.bias.copy_(self.qwen.lm_head.bias)
# Replace lm_head
self.qwen.lm_head = new_lm_head
logger.info(f"[v4] Created independent lm_head: {new_lm_head.weight.shape}")
# Freeze/unfreeze parameters
for name, param in self.named_parameters():
is_memory = "neural_memory" in name or "mem_gate" in name
is_memory_attention = "memory_attention" in name or "memory_context_proj" in name
is_embed_tokens = "embed_tokens" in name
is_lm_head = "lm_head" in name
if is_memory or is_memory_attention:
param.requires_grad = True
trainable_count += 1
elif is_embed_tokens:
# embed_tokens is TRAINABLE for input adaptation
param.requires_grad = True
trainable_count += 1
embed_count += 1
logger.info(f"[v4] embed_tokens trainable: {name}")
elif is_lm_head:
# lm_head is TRAINABLE for output adaptation
param.requires_grad = True
trainable_count += 1
lm_head_count += 1
logger.info(f"[v4] lm_head trainable: {name}")
else:
# Freeze transformer layers
param.requires_grad = False
frozen_count += 1
logger.info(f"[v4] Frozen {frozen_count} transformer layer parameters")
logger.info(f"[v4] Trainable {trainable_count} parameters (memory + embed: {embed_count} + lm_head: {lm_head_count})")
def _split_into_chunks(self, tensor, chunk_size):
seq_len = tensor.shape[1]
chunks = []
for start in range(0, seq_len, chunk_size):
end = min(start + chunk_size, seq_len)
chunks.append((start, end, tensor[:, start:end]))
return chunks
def reset_memory_states(self):
for layer in self._memory_layers:
layer.reset_memory_state()
def _set_mem_store_mask(
self,
chunk_ids: torch.Tensor,
chunk_mask: Optional[torch.Tensor],
chunk_start: int,
) -> None:
if not self.use_memory:
self.qwen.model._mem_store_mask = None
return
if chunk_mask is None:
if chunk_start > 0:
store_mask = torch.ones_like(chunk_ids, dtype=torch.bool)
store_mask[:, 0] = False
else:
store_mask = None
else:
store_mask = chunk_mask.to(device=chunk_ids.device).bool()
if chunk_start > 0:
store_mask[:, 0] = False
self.qwen.model._mem_store_mask = store_mask
def get_memory_modules(self) -> List[nn.Module]:
if not self._memory_layers:
return []
modules = []
for layer in self._memory_layers:
modules.append(layer.neural_memory)
modules.append(layer.mem_gate)
if layer.memory_attention is not None:
modules.append(layer.memory_attention)
if layer.memory_context_proj is not None:
modules.append(layer.memory_context_proj)
return modules
def forward(
self,
input_ids: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
return_pred_tokens: bool = False,
topk: int = 0,
chunk_start: Optional[int] = None,
chunk_end: Optional[int] = None,
reset_mem_state: bool = False,
) -> Dict[str, torch.Tensor]:
# Single chunk forward (for chunkwise backward)
if chunk_start is not None or chunk_end is not None:
start = 0 if chunk_start is None else int(chunk_start)
end = int(chunk_end) if chunk_end is not None else None
return self._forward_single_chunk(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
chunk_start=start,
chunk_end=end,
reset_mem_state=reset_mem_state,
)
# Full sequence forward
batch_size, seq_len = input_ids.shape
chunk_size = self.config.chunk_size
chunks = self._split_into_chunks(input_ids, chunk_size)
self.reset_memory_states()
loss_fct_sum = nn.CrossEntropyLoss(reduction="sum")
total_loss_sum = None
total_loss_tokens = 0
topk_correct = None
topk_total = None
pred_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)]
target_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)]
if topk and topk > 0:
device = input_ids.device
topk_correct = torch.tensor(0.0, device=device, dtype=torch.float32)
topk_total = torch.tensor(0.0, device=device, dtype=torch.float32)
for start, end, _ in chunks:
proc_start = max(0, start - 1)
chunk_ids = input_ids[:, proc_start:end]
chunk_labels = labels[:, proc_start:end] if labels is not None else None
chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None
self._set_mem_store_mask(chunk_ids, chunk_mask, start)
hidden_full = self._process_chunk(chunk_ids, chunk_mask)
if self.use_memory:
self.qwen.model._mem_store_mask = None
if chunk_labels is not None and (chunk_labels != -100).any():
chunk_labels_local = chunk_labels.to(device=hidden_full.device)
shift_hidden = hidden_full[:, :-1, :].contiguous()
shift_labels = chunk_labels_local[:, 1:].contiguous()
valid = shift_labels != -100
if valid.any():
hs = shift_hidden[valid]
targets = shift_labels[valid]
hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0)
logits = self.qwen.lm_head(hs)
logits = logits.float()
logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0)
targets = targets.to(device=logits.device)
chunk_loss_sum = loss_fct_sum(logits, targets)
if total_loss_sum is None:
total_loss_sum = chunk_loss_sum
else:
total_loss_sum = total_loss_sum + chunk_loss_sum
total_loss_tokens += targets.numel()
if topk and topk > 0:
k = min(int(topk), logits.shape[-1])
topk_ids = torch.topk(logits, k=k, dim=-1).indices
correct = (topk_ids == targets.unsqueeze(-1)).any(dim=-1)
topk_correct = topk_correct + correct.float().sum()
topk_total = topk_total + torch.tensor(float(targets.numel()), device=topk_total.device)
if return_pred_tokens:
idx = valid.nonzero(as_tuple=False)
pred_flat = torch.argmax(logits, dim=-1).detach().to("cpu", dtype=torch.long).tolist()
tgt_flat = targets.detach().to("cpu", dtype=torch.long).tolist()
b_idx_flat = idx[:, 0].detach().to("cpu", dtype=torch.long).tolist()
for i, b_idx in enumerate(b_idx_flat):
pred_tokens_by_sample[b_idx].append(int(pred_flat[i]))
target_tokens_by_sample[b_idx].append(int(tgt_flat[i]))
if total_loss_sum is None or total_loss_tokens == 0:
device = next(self.qwen.parameters()).device
loss = torch.zeros((), device=device, dtype=torch.float32)
else:
loss = total_loss_sum / total_loss_tokens
out: Dict[str, torch.Tensor] = {"loss": loss}
if return_pred_tokens:
lengths = torch.tensor([len(x) for x in target_tokens_by_sample], dtype=torch.long)
max_len = int(lengths.max().item()) if lengths.numel() > 0 else 0
if max_len > 0:
pred_mat = torch.full((batch_size, max_len), -1, dtype=torch.long)
tgt_mat = torch.full((batch_size, max_len), -1, dtype=torch.long)
for b in range(batch_size):
L = int(lengths[b].item())
if L > 0:
pred_mat[b, :L] = torch.tensor(pred_tokens_by_sample[b], dtype=torch.long)
tgt_mat[b, :L] = torch.tensor(target_tokens_by_sample[b], dtype=torch.long)
else:
pred_mat = torch.empty((batch_size, 0), dtype=torch.long)
tgt_mat = torch.empty((batch_size, 0), dtype=torch.long)
out["pred_ids"] = pred_mat
out["target_ids"] = tgt_mat
out["target_lengths"] = lengths
if topk and topk > 0 and topk_correct is not None and topk_total is not None:
out["topk_correct"] = topk_correct
out["topk_total"] = topk_total
return out
def _forward_single_chunk(
self,
input_ids: torch.Tensor,
attention_mask: Optional[torch.Tensor],
labels: Optional[torch.Tensor],
chunk_start: int,
chunk_end: Optional[int],
reset_mem_state: bool,
) -> Dict[str, torch.Tensor]:
if reset_mem_state:
self.reset_memory_states()
seq_len = input_ids.shape[1]
end = chunk_end if chunk_end is not None else min(chunk_start + self.config.chunk_size, seq_len)
end = min(int(end), seq_len)
start = max(0, int(chunk_start))
proc_start = max(0, start - 1)
chunk_ids = input_ids[:, proc_start:end]
chunk_labels = labels[:, proc_start:end] if labels is not None else None
chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None
self._set_mem_store_mask(chunk_ids, chunk_mask, start)
hidden_full = self._process_chunk(chunk_ids, chunk_mask)
if self.use_memory:
self.qwen.model._mem_store_mask = None
loss_fct_sum = nn.CrossEntropyLoss(reduction="sum")
total_loss_sum = None
total_loss_tokens = 0
if chunk_labels is not None and (chunk_labels != -100).any():
chunk_labels_local = chunk_labels.to(device=hidden_full.device)
shift_hidden = hidden_full[:, :-1, :].contiguous()
shift_labels = chunk_labels_local[:, 1:].contiguous()
valid = shift_labels != -100
if valid.any():
hs = shift_hidden[valid]
targets = shift_labels[valid]
hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0)
logits = self.qwen.lm_head(hs)
logits = logits.float()
logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0)
targets = targets.to(device=logits.device)
total_loss_sum = loss_fct_sum(logits, targets)
total_loss_tokens = targets.numel()
if total_loss_sum is None:
total_loss_sum = (hidden_full.float().sum() * 0.0)
return {
"loss_sum": total_loss_sum,
"loss_tokens": total_loss_tokens,
"has_grad": True,
}
def _process_chunk(
self,
chunk_ids: torch.Tensor,
chunk_attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if hasattr(self.qwen.model, "embed_tokens"):
token_embeds = self.qwen.model.embed_tokens(chunk_ids)
else:
token_embeds = self.qwen.get_input_embeddings()(chunk_ids)
outputs = self.qwen.model(
inputs_embeds=token_embeds,
attention_mask=chunk_attention_mask,
use_cache=False,
output_hidden_states=False,
return_dict=True,
)
return outputs.last_hidden_state
def get_param_groups(self, config: TrainingConfig):
"""
v4: Four parameter groups with different learning rates.
- memory_core: neural_memory, mem_gate
- memory_attention: memory_attention, memory_context_proj (if exists)
- embed_tokens: input embeddings
- lm_head: output head
"""
memory_core_params = []
memory_attention_params = []
embed_params = []
lm_head_params = []
for name, param in self.named_parameters():
if not param.requires_grad:
continue
if "neural_memory" in name or "mem_gate" in name:
memory_core_params.append(param)
elif "memory_attention" in name or "memory_context_proj" in name:
memory_attention_params.append(param)
elif "embed_tokens" in name:
embed_params.append(param)
elif "lm_head" in name:
lm_head_params.append(param)
param_groups = []
if len(memory_core_params) > 0:
param_groups.append({
"params": memory_core_params,
"lr": config.lr_memory,
"weight_decay": config.weight_decay,
"name": "memory_core"
})
if len(memory_attention_params) > 0:
param_groups.append({
"params": memory_attention_params,
"lr": config.lr_memory_attention,
"weight_decay": config.weight_decay,
"name": "memory_attention"
})
if len(embed_params) > 0:
param_groups.append({
"params": embed_params,
"lr": config.lr_embed,
"weight_decay": config.weight_decay,
"name": "embed_tokens"
})
if len(lm_head_params) > 0:
param_groups.append({
"params": lm_head_params,
"lr": config.lr_lm_head,
"weight_decay": config.weight_decay,
"name": "lm_head"
})
logger.info(f"[v4 Param groups] memory_core={len(memory_core_params)}, "
f"memory_attention={len(memory_attention_params)}, "
f"embed_tokens={len(embed_params)}, lm_head={len(lm_head_params)}")
return param_groups
# =============================================================================
# Distributed Training Utilities (unchanged from v3)
# =============================================================================
def init_distributed() -> tuple:
if "RANK" not in os.environ or "WORLD_SIZE" not in os.environ:
return False, 0, 0, 1
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
local_rank = int(os.environ.get("LOCAL_RANK", 0))
if not dist.is_available():
raise RuntimeError("torch.distributed not available")
if not dist.is_initialized():
dist.init_process_group(backend="nccl", init_method="env://")
torch.cuda.set_device(local_rank)
return True, rank, local_rank, world_size
def cleanup_distributed():
if dist.is_available() and dist.is_initialized():
dist.barrier()
dist.destroy_process_group()
def unwrap_model(model: nn.Module) -> nn.Module:
if hasattr(model, "module"):
return model.module
if hasattr(model, "_fsdp_wrapped_module"):
wrapped = getattr(model, "_fsdp_wrapped_module", None)
if wrapped is not None and hasattr(wrapped, "module"):
return wrapped.module
return model
def is_fsdp_model(model: nn.Module) -> bool:
try:
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
return isinstance(model, FSDP)
except Exception:
return False
def manual_all_reduce_gradients(model: nn.Module, world_size: int) -> None:
"""
Manually synchronize gradients across GPUs without DDP.
This allows cross-chunk gradients to work with multi-GPU training.
Key insight: DDP fails because it tracks parameter usage during forward.
By not wrapping the model with DDP, we avoid this tracking.
We then manually all-reduce gradients before optimizer step.
"""
if world_size <= 1:
return
# Collect all gradients that need syncing
grads_to_reduce = []
for param in model.parameters():
if param.grad is not None:
grads_to_reduce.append(param.grad)
if len(grads_to_reduce) == 0:
return
# Flatten all gradients into a single buffer for efficiency
total_numel = sum(g.numel() for g in grads_to_reduce)
flat_grads = torch.zeros(total_numel, dtype=grads_to_reduce[0].dtype,
device=grads_to_reduce[0].device)
# Pack gradients into flat buffer
offset = 0
for grad in grads_to_reduce:
numel = grad.numel()
flat_grads[offset:offset + numel] = grad.view(-1)
offset += numel
# All-reduce (sum) then divide by world_size
dist.all_reduce(flat_grads, op=dist.ReduceOp.SUM)
flat_grads.div_(world_size)
# Unpack gradients back
offset = 0
for grad in grads_to_reduce:
numel = grad.numel()
grad.copy_(flat_grads[offset:offset + numel].view_as(grad))
offset += numel
# =============================================================================
# Trainer (unchanged from v3, works with v4's frozen backbone)
# =============================================================================
class Trainer:
def __init__(
self,
model: QwenTitansForBABILongV4,
train_dataloader: DataLoader,
eval_dataloader: DataLoader,
config: TrainingConfig,
rank: int = 0,
world_size: int = 1,
is_distributed: bool = False,
tokenizer=None,
use_manual_grad_sync: bool = False, # v4: for cross-chunk gradients with multi-GPU
):
self.model = model
self.train_dataloader = train_dataloader
self.eval_dataloader = eval_dataloader
self.config = config
self.device = next(model.parameters()).device
self.rank = rank
self.world_size = world_size
self.is_distributed = is_distributed
self.is_main_process = (rank == 0)
self.tokenizer = tokenizer
self.use_manual_grad_sync = use_manual_grad_sync # v4: manual gradient sync mode
base_model = unwrap_model(self.model)
param_groups = base_model.get_param_groups(config)
self.optimizer = AdamW(param_groups)
total_steps = math.ceil(
(len(train_dataloader) * config.num_epochs) / max(config.gradient_accumulation_steps, 1)
)
self.scheduler = CosineAnnealingLR(self.optimizer, T_max=total_steps, eta_min=1e-7)
self.scaler = torch.cuda.amp.GradScaler(enabled=config.fp16)
self.global_step = 0
def _get_group_lr(self, group_name: str) -> Optional[float]:
for group in self.optimizer.param_groups:
if group.get("name") == group_name:
return group.get("lr")
return None
def train(self):
self.model.train()
if self.is_main_process:
logger.info("=" * 60)
logger.info("Starting v4 training with FROZEN backbone")
logger.info("=" * 60)
last_epoch_loss = None
for epoch in range(self.config.num_epochs):
sampler = getattr(self.train_dataloader, "sampler", None)
if sampler is not None and hasattr(sampler, "set_epoch"):
sampler.set_epoch(epoch)
if self.is_main_process:
logger.info(f"Epoch {epoch + 1}/{self.config.num_epochs}")
epoch_loss = 0.0
num_batches = 0
pbar = self.train_dataloader
if self.is_main_process:
pbar = tqdm(
self.train_dataloader,
desc=f"Epoch {epoch + 1}/{self.config.num_epochs}",
leave=False,
dynamic_ncols=True,
)
for step, batch in enumerate(pbar):
batch = {k: v.to(self.device) for k, v in batch.items()}
ga = max(self.config.gradient_accumulation_steps, 1)
sync_gradients = ((step + 1) % ga == 0)
amp_enabled = self.config.fp16 or self.config.bf16
amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16
with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype):
if self.config.chunkwise_backward:
labels = batch.get("labels")
if labels is not None:
total_tokens = int((labels[:, 1:] != -100).sum().item())
else:
total_tokens = 0
loss_scale = 0.0 if total_tokens == 0 else (1.0 / total_tokens / ga)
seq_len = batch["input_ids"].shape[1]
chunk_size = int(self.config.chunk_size)
chunk_ranges = [
(start, min(start + chunk_size, seq_len))
for start in range(0, seq_len, chunk_size)
]
raw_loss_sum = None
for idx, (start, end) in enumerate(chunk_ranges):
is_last_chunk = (idx == len(chunk_ranges) - 1)
sync_chunk = sync_gradients and is_last_chunk
# no_sync only available when model is wrapped with DDP/FSDP
use_no_sync = (
self.is_distributed and
not sync_chunk and
not self.use_manual_grad_sync and
hasattr(self.model, 'no_sync')
)
chunk_ctx = self.model.no_sync if use_no_sync else nullcontext
with chunk_ctx():
outputs = self.model(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
labels=labels,
chunk_start=start,
chunk_end=end,
reset_mem_state=(idx == 0),
)
chunk_loss_sum = outputs["loss_sum"]
if raw_loss_sum is None:
raw_loss_sum = chunk_loss_sum.detach()
else:
raw_loss_sum = raw_loss_sum + chunk_loss_sum.detach()
scaled_loss = chunk_loss_sum * float(loss_scale)
if self.config.fp16:
self.scaler.scale(scaled_loss).backward()
else:
scaled_loss.backward()
if raw_loss_sum is None or total_tokens == 0:
raw_loss = torch.zeros((), device=self.device, dtype=torch.float32)
else:
raw_loss = raw_loss_sum / total_tokens
loss = raw_loss / ga
else:
# For manual grad sync mode, no_sync is not available (model not wrapped)
# For DDP/FSDP, use no_sync during gradient accumulation
use_no_sync = (
self.is_distributed and
not sync_gradients and
not self.use_manual_grad_sync and # no_sync not available in manual mode
hasattr(self.model, 'no_sync')
)
ctx = self.model.no_sync if use_no_sync else nullcontext
with ctx():
outputs = self.model(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
labels=batch["labels"],
)
raw_loss = outputs["loss"]
loss = raw_loss / ga
if self.config.fp16:
self.scaler.scale(loss).backward()
else:
loss.backward()
epoch_loss += raw_loss.detach().float().item()
num_batches += 1
if sync_gradients:
grad_norm = None
# v4: Manual gradient sync for cross-chunk gradients with multi-GPU
# This replaces DDP's automatic gradient sync
if self.use_manual_grad_sync and self.world_size > 1:
if self.config.fp16:
self.scaler.unscale_(self.optimizer)
manual_all_reduce_gradients(self.model, self.world_size)
if self.config.fp16:
if not self.use_manual_grad_sync: # Only unscale if not already done
self.scaler.unscale_(self.optimizer)
grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad(set_to_none=True)
self.global_step += 1
if self.is_main_process:
avg_loss = epoch_loss / max(num_batches, 1)
pbar.set_postfix({"gstep": self.global_step, "loss": f"{avg_loss:.4f}"})
if self.global_step % self.config.logging_steps == 0 and self.is_main_process:
lr_mem = self._get_group_lr("memory_core") or 0.0
lr_embed = self._get_group_lr("embed_tokens") or 0.0
lr_lm_head = self._get_group_lr("lm_head") or 0.0
grad_note = ""
if self.config.debug_grad_norm and grad_norm is not None:
grad_note = f" | grad_norm={float(grad_norm):.4f}"
logger.info(
f"Step {self.global_step} | loss={epoch_loss / max(num_batches, 1):.4f} | "
f"lr_mem={lr_mem:.2e} | lr_embed={lr_embed:.2e} | lr_lm_head={lr_lm_head:.2e}{grad_note}"
)
if self.global_step % self.config.eval_steps == 0:
eval_metrics = self.evaluate()
if self.is_main_process:
logger.info(
f"Step {self.global_step}: "
f"eval_loss={eval_metrics['loss']:.4f}, "
f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
)
self.model.train()
avg_epoch_loss = epoch_loss / max(num_batches, 1)
if self.is_distributed:
t = torch.tensor(avg_epoch_loss, device=self.device, dtype=torch.float32)
dist.all_reduce(t, op=dist.ReduceOp.SUM)
avg_epoch_loss = (t / self.world_size).item()
if self.is_main_process:
logger.info(f"Epoch {epoch + 1} done, avg loss={avg_epoch_loss:.4f}")
last_epoch_loss = avg_epoch_loss
eval_metrics = self.evaluate()
if self.is_main_process:
logger.info(
f"[EPOCH {epoch + 1} EVAL] "
f"eval_loss={eval_metrics['loss']:.4f}, "
f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
)
self._append_eval_metrics(
eval_metrics,
phase="epoch",
epoch=int(epoch + 1),
train_avg_loss=avg_epoch_loss,
)
self.model.train()
if self.is_main_process:
logger.info("Training done, final evaluation")
final_eval = self.evaluate(print_examples=int(self.config.final_eval_print_examples))
if self.is_main_process:
ppl = float(math.exp(min(20.0, final_eval["loss"])))
logger.info(
f"[FINAL EVAL] loss={final_eval['loss']:.4f}, ppl={ppl:.3f}, "
f"em_acc={final_eval['em_acc'] * 100:.2f}%, "
f"tok_acc={final_eval['tok_acc'] * 100:.2f}%"
)
logger.info("Saving final checkpoint")
self._append_eval_metrics(
final_eval,
phase="final",
epoch=int(self.config.num_epochs),
train_avg_loss=last_epoch_loss,
)
self.save_final_checkpoint()
@torch.no_grad()
def evaluate(self, print_examples: int = 0) -> Dict[str, float]:
self.model.eval()
total_loss = torch.tensor(0.0, device=self.device, dtype=torch.float32)
total_batches = torch.tensor(0.0, device=self.device, dtype=torch.float32)
total_tok_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32)
total_tok_total = torch.tensor(0.0, device=self.device, dtype=torch.float32)
total_em_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32)
total_em_total = torch.tensor(0.0, device=self.device, dtype=torch.float32)
printed = 0
for batch in self.eval_dataloader:
batch = {k: v.to(self.device) for k, v in batch.items()}
amp_enabled = self.config.fp16 or self.config.bf16
amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16
with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype):
outputs = self.model(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
labels=batch["labels"],
return_pred_tokens=True,
topk=int(self.config.eval_topk) if self.config.eval_topk else 0,
)
if torch.isfinite(outputs["loss"]):
total_loss += outputs["loss"].detach().float()
total_batches += 1.0
pred_ids = outputs.get("pred_ids", None)
target_ids = outputs.get("target_ids", None)
lengths = outputs.get("target_lengths", None)
if (
pred_ids is not None
and target_ids is not None
and lengths is not None
and pred_ids.ndim == 2
and target_ids.ndim == 2
and lengths.ndim == 1
and pred_ids.shape == target_ids.shape
and pred_ids.shape[0] == lengths.shape[0]
):
pred_cpu = pred_ids.to("cpu", dtype=torch.long)
tgt_cpu = target_ids.to("cpu", dtype=torch.long)
len_cpu = lengths.to("cpu", dtype=torch.long)
for i in range(int(len_cpu.shape[0])):
L = int(len_cpu[i].item())
if L <= 0:
continue
p = pred_cpu[i, :L]
t = tgt_cpu[i, :L]
total_tok_correct += torch.tensor(float((p == t).sum().item()), device=self.device, dtype=torch.float32)
total_tok_total += torch.tensor(float(L), device=self.device, dtype=torch.float32)
if self.tokenizer is not None:
pred_text = self.tokenizer.decode(p.tolist(), skip_special_tokens=True).strip()
tgt_text = self.tokenizer.decode(t.tolist(), skip_special_tokens=True).strip()
em = float(pred_text == tgt_text)
total_em_correct += torch.tensor(em, device=self.device, dtype=torch.float32)
total_em_total += torch.tensor(1.0, device=self.device, dtype=torch.float32)
if self.is_main_process and printed < print_examples:
logger.info(f"[EVAL SAMPLE] pred={repr(pred_text)} | label={repr(tgt_text)} | match={bool(em)}")
printed += 1
if self.is_distributed:
dist.all_reduce(total_loss, op=dist.ReduceOp.SUM)
dist.all_reduce(total_batches, op=dist.ReduceOp.SUM)
dist.all_reduce(total_tok_correct, op=dist.ReduceOp.SUM)
dist.all_reduce(total_tok_total, op=dist.ReduceOp.SUM)
dist.all_reduce(total_em_correct, op=dist.ReduceOp.SUM)
dist.all_reduce(total_em_total, op=dist.ReduceOp.SUM)
avg_loss = (total_loss / total_batches.clamp(min=1.0)).item()
tok_acc = (total_tok_correct / total_tok_total.clamp(min=1.0)).item()
em_acc = (total_em_correct / total_em_total.clamp(min=1.0)).item()
return {"loss": avg_loss, "tok_acc": tok_acc, "em_acc": em_acc}
def _append_eval_metrics(
self,
metrics: Dict[str, float],
*,
phase: str,
epoch: Optional[int],
train_avg_loss: Optional[float],
) -> None:
if not self.is_main_process:
return
os.makedirs(self.config.output_dir, exist_ok=True)
record = {
"phase": phase,
"epoch": epoch,
"global_step": int(self.global_step),
"train_avg_loss": None if train_avg_loss is None else float(train_avg_loss),
"eval_loss": float(metrics.get("loss", 0.0)),
"em_acc_pct": float(metrics.get("em_acc", 0.0) * 100.0),
"tok_acc_pct": float(metrics.get("tok_acc", 0.0) * 100.0),
}
metrics_path = os.path.join(self.config.output_dir, "eval_metrics.jsonl")
with open(metrics_path, "a") as f:
f.write(json.dumps(record) + "\n")
def save_final_checkpoint(self):
ckpt_path = os.path.join(self.config.output_dir, self.config.final_ckpt_name)
base_model = unwrap_model(self.model)
# Save memory-related and trainable parameters
memory_sd = {
name: p.detach().cpu()
for name, p in base_model.named_parameters()
if p.requires_grad and (
("neural_memory" in name) or ("mem_gate" in name) or
("memory_attention" in name) or ("memory_context_proj" in name) or
("embed_tokens" in name) or ("lm_head" in name)
)
}
if is_fsdp_model(self.model) and len(memory_sd) == 0:
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg):
full_sd = self.model.state_dict()
memory_sd = {
k: v for k, v in full_sd.items()
if ("neural_memory" in k) or ("mem_gate" in k) or
("memory_attention" in k) or ("memory_context_proj" in k) or
("embed_tokens" in k) or ("lm_head" in k)
}
if self.is_main_process:
torch.save(
{"memory_state_dict": memory_sd, "global_step": self.global_step, "config": asdict(self.config)},
ckpt_path,
)
logger.info(f"Saved memory checkpoint: {ckpt_path}")
if self.is_distributed:
dist.barrier()
if self.config.save_full_checkpoint:
full_ckpt_path = os.path.join(self.config.output_dir, self.config.final_full_ckpt_name)
if is_fsdp_model(self.model):
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg):
full_sd = self.model.state_dict()
else:
full_sd = unwrap_model(self.model).state_dict()
if self.is_main_process:
torch.save(
{"model_state_dict": full_sd, "global_step": self.global_step, "config": asdict(self.config)},
full_ckpt_path,
)
logger.info(f"Saved full checkpoint: {full_ckpt_path}")
if self.is_distributed:
dist.barrier()
# =============================================================================
# Main
# =============================================================================
def main():
from transformers import AutoModelForCausalLM, AutoTokenizer
parser = argparse.ArgumentParser(description="Qwen3 + Titans v4 - Frozen Backbone Training")
parser.add_argument("--fsdp", action="store_true")
parser.add_argument("--eval_only", action="store_true")
parser.add_argument("--ckpt_path", type=str, default=None)
parser.add_argument("--max_samples", type=int, default=None)
parser.add_argument("--max_length", type=int, default=None)
parser.add_argument("--output_dir", type=str, default=None)
parser.add_argument("--num_epochs", type=int, default=None)
parser.add_argument("--eval_steps", type=int, default=None)
parser.add_argument("--batch_size", type=int, default=None)
parser.add_argument("--gradient_accumulation_steps", type=int, default=None)
parser.add_argument("--chunk_size", type=int, default=None)
parser.add_argument("--memory_layer_stride", type=int, default=None)
parser.add_argument("--no_memory", action="store_true")
parser.add_argument("--gradient_checkpointing", action="store_true")
parser.add_argument("--no_chunkwise_backward", action="store_true")
# v4 specific arguments
parser.add_argument("--detach_mem_state", action="store_true",
help="Detach memory state (disable cross-chunk gradients)")
parser.add_argument("--no_deep_integration", action="store_true",
help="Disable deep attention integration")
parser.add_argument("--no_memory_as_context", action="store_true",
help="Disable memory-as-context projection")
parser.add_argument("--cross_chunk_gradient_steps", type=int, default=None,
help="Number of chunks to allow gradient flow through")
parser.add_argument("--memory_depth", type=int, default=None)
parser.add_argument("--num_memory_tokens", type=int, default=None)
parser.add_argument("--lr_embed", type=float, default=None,
help="Learning rate for embed_tokens")
parser.add_argument("--lr_lm_head", type=float, default=None,
help="Learning rate for lm_head")
parser.add_argument("--debug_grad_norm", action="store_true")
args = parser.parse_args()
config = TrainingConfig()
# Apply arguments
if args.fsdp:
config.use_fsdp = True
if args.no_memory:
config.use_memory = False
if args.max_samples is not None:
config.max_samples = args.max_samples
if args.max_length is not None:
config.max_length = int(args.max_length)
if args.output_dir is not None:
config.output_dir = args.output_dir
elif not config.use_memory:
config.output_dir = "./outputs/qwen_babilong_no_memory_v4"
if args.num_epochs is not None:
config.num_epochs = args.num_epochs
if args.eval_steps is not None:
config.eval_steps = args.eval_steps
if args.batch_size is not None:
config.batch_size = int(args.batch_size)
if args.gradient_accumulation_steps is not None:
config.gradient_accumulation_steps = int(args.gradient_accumulation_steps)
if args.chunk_size is not None:
config.chunk_size = int(args.chunk_size)
if args.memory_layer_stride is not None:
config.memory_layer_stride = int(args.memory_layer_stride)
if args.gradient_checkpointing:
config.gradient_checkpointing = True
if args.no_chunkwise_backward:
config.chunkwise_backward = False
# v4 specific
if args.detach_mem_state:
config.detach_mem_state = True
if args.no_deep_integration:
config.deep_memory_integration = False
if args.no_memory_as_context:
config.memory_as_context = False
if args.cross_chunk_gradient_steps is not None:
config.cross_chunk_gradient_steps = int(args.cross_chunk_gradient_steps)
if args.memory_depth is not None:
config.memory_depth = int(args.memory_depth)
if args.num_memory_tokens is not None:
config.num_memory_tokens = int(args.num_memory_tokens)
if args.lr_embed is not None:
config.lr_embed = float(args.lr_embed)
if args.lr_lm_head is not None:
config.lr_lm_head = float(args.lr_lm_head)
if args.debug_grad_norm:
config.debug_grad_norm = True
is_distributed, rank, local_rank, world_size = init_distributed()
is_main = (rank == 0)
if config.use_fsdp and config.chunkwise_backward:
if is_main:
logger.warning("chunkwise_backward is incompatible with FSDP; disabling it.")
config.chunkwise_backward = False
# Note: gradient_checkpointing is REQUIRED for full-sequence backward with 32k tokens
# Keeping it enabled even with DDP for v4's cross-chunk gradient mode
if is_distributed and (not config.use_fsdp) and config.gradient_checkpointing:
if is_main:
logger.info("gradient_checkpointing enabled for cross-chunk gradient mode")
if is_distributed and (not config.use_fsdp):
if not config.ddp_find_unused_parameters:
config.ddp_find_unused_parameters = True
if is_main:
logger.warning("Enabling DDP find_unused_parameters.")
torch.manual_seed(config.seed + rank)
if torch.cuda.is_available():
device = torch.device(f"cuda:{local_rank}" if is_distributed else "cuda")
else:
device = torch.device("cpu")
if torch.cuda.is_available() and config.bf16:
bf16_supported = False
try:
bf16_supported = torch.cuda.is_bf16_supported()
except Exception:
bf16_supported = False
if not bf16_supported:
if is_main:
logger.warning("bf16 not supported; falling back to fp16.")
config.bf16 = False
if not config.fp16:
config.fp16 = True
if torch.cuda.is_available() and getattr(config, "use_tf32", False):
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
try:
torch.set_float32_matmul_precision("high")
except Exception:
pass
if is_main:
logger.info("=" * 70)
logger.info("Qwen3-4B + Titans v4 Training (FROZEN BACKBONE)")
logger.info("=" * 70)
logger.info(f"distributed={is_distributed}, world_size={world_size}")
logger.info(f"model_path={config.model_path}")
logger.info(f"data_path={config.data_path}")
logger.info(f"output_dir={config.output_dir}")
logger.info(f"max_samples={config.max_samples}")
logger.info(f"max_length={config.max_length}")
logger.info(f"num_epochs={config.num_epochs}")
logger.info(f"chunk_size={config.chunk_size}")
logger.info(f"use_memory={config.use_memory}")
if config.use_memory:
logger.info(f"memory_layer_stride={config.memory_layer_stride}")
logger.info(f"memory_depth={config.memory_depth}")
logger.info(f"deep_memory_integration={config.deep_memory_integration}")
logger.info(f"memory_as_context={config.memory_as_context}")
logger.info(f"detach_mem_state={config.detach_mem_state}")
logger.info(f"cross_chunk_gradient_steps={config.cross_chunk_gradient_steps}")
logger.info(f"num_memory_tokens={config.num_memory_tokens}")
logger.info("=" * 70)
logger.info("v4 FEATURE: Cross-chunk gradients enabled")
logger.info(f"chunkwise_backward={config.chunkwise_backward}, detach_mem_state={config.detach_mem_state}")
logger.info("Trainable: Memory + embed_tokens + lm_head")
logger.info("=" * 70)
tokenizer = AutoTokenizer.from_pretrained(config.model_path, trust_remote_code=True)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# Disable flash-attn detection
try:
import transformers
from transformers.utils import import_utils as _import_utils
def _disabled(*args, **kwargs):
return False
_import_utils.is_flash_attn_2_available = _disabled
if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_flash_attn_2_available"):
transformers.utils.is_flash_attn_2_available = _disabled
if hasattr(_import_utils, "is_torchao_available"):
_import_utils.is_torchao_available = _disabled
if hasattr(_import_utils, "is_torchvision_available"):
_import_utils.is_torchvision_available = _disabled
except Exception as e:
if is_main:
logger.warning(f"Disable checks failed (ignored): {e}")
torch_dtype = torch.bfloat16 if config.bf16 else (torch.float16 if config.fp16 else torch.float32)
qwen_model = AutoModelForCausalLM.from_pretrained(
config.model_path,
torch_dtype=torch_dtype,
device_map=None,
trust_remote_code=True,
attn_implementation="sdpa",
low_cpu_mem_usage=True,
)
qwen_model.to(device)
qwen_model.config.use_cache = False
if config.gradient_checkpointing and hasattr(qwen_model, "gradient_checkpointing_enable"):
qwen_model.gradient_checkpointing_enable()
train_dataset = BABILongDataset(
config.data_path,
tokenizer,
max_length=config.max_length,
answer_reserve_tokens=config.answer_reserve_tokens,
label_prefix_tokens=config.label_prefix_tokens,
max_samples=config.max_samples,
)
train_size = int(0.9 * len(train_dataset))
eval_size = len(train_dataset) - train_size
train_dataset, eval_dataset = torch.utils.data.random_split(
train_dataset,
[train_size, eval_size],
generator=torch.Generator().manual_seed(config.seed),
)
train_sampler = None
eval_sampler = None
if is_distributed:
from torch.utils.data.distributed import DistributedSampler
train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank, shuffle=True, seed=config.seed)
eval_sampler = DistributedSampler(eval_dataset, num_replicas=world_size, rank=rank, shuffle=False)
train_dataloader = DataLoader(
train_dataset,
batch_size=config.batch_size,
shuffle=(train_sampler is None),
sampler=train_sampler,
collate_fn=collate_fn,
num_workers=0,
)
eval_dataloader = DataLoader(
eval_dataset,
batch_size=config.batch_size,
shuffle=False,
sampler=eval_sampler,
collate_fn=collate_fn,
num_workers=0,
)
model = QwenTitansForBABILongV4(qwen_model, config)
model.to(device)
# ==========================================================================
# v4 Multi-GPU Strategy for Cross-Chunk Gradients
# ==========================================================================
# DDP is incompatible with cross-chunk gradients because it tracks parameter
# usage during forward. Memory modules are used multiple times across chunks,
# causing "ready twice" errors.
#
# Solution: Manual gradient synchronization
# - Don't wrap model with DDP when chunkwise_backward=False
# - Manually call all_reduce on gradients before optimizer step
# - This allows true cross-chunk gradient flow with multi-GPU training
# ==========================================================================
use_ddp = is_distributed and world_size > 1
use_manual_grad_sync = False # Track if we're using manual sync
if use_ddp and not config.chunkwise_backward:
# Cross-chunk gradients mode: use manual gradient sync instead of DDP
if is_main:
logger.info("=" * 70)
logger.info("Cross-chunk gradients with multi-GPU: using MANUAL gradient sync")
logger.info("Model NOT wrapped with DDP - gradients will be all-reduced manually")
logger.info("=" * 70)
use_ddp = False # Don't wrap with DDP
use_manual_grad_sync = True # Use manual gradient sync instead
# Keep is_distributed=True for DistributedSampler to work
if use_ddp:
if config.use_fsdp:
from functools import partial
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, MixedPrecision
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
from transformers.models.qwen3.modeling_qwen3 import Qwen3DecoderLayer
mp_policy = MixedPrecision(param_dtype=torch_dtype, reduce_dtype=torch_dtype, buffer_dtype=torch_dtype)
auto_wrap = partial(
transformer_auto_wrap_policy,
transformer_layer_cls={Qwen3DecoderLayer, QwenDecoderLayerWithDeepMemory}
)
model = FSDP(
model,
auto_wrap_policy=auto_wrap,
mixed_precision=mp_policy,
device_id=torch.cuda.current_device(),
use_orig_params=config.fsdp_use_orig_params,
ignored_modules=model.get_memory_modules(),
)
else:
model = DDP(
model,
device_ids=[local_rank],
output_device=local_rank,
find_unused_parameters=config.ddp_find_unused_parameters,
)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
config=config,
rank=rank,
world_size=world_size,
is_distributed=is_distributed, # Keep True for DistributedSampler
tokenizer=tokenizer,
use_manual_grad_sync=use_manual_grad_sync, # v4: manual gradient sync for cross-chunk
)
if args.eval_only:
ckpt_path = args.ckpt_path or os.path.join(config.output_dir, config.final_ckpt_name)
if is_main:
logger.info(f"eval_only: loading checkpoint: {ckpt_path}")
ckpt = torch.load(ckpt_path, map_location="cpu")
memory_sd = ckpt.get("memory_state_dict", {})
if len(memory_sd) > 0:
unwrap_model(model).load_state_dict(memory_sd, strict=False)
eval_metrics = trainer.evaluate()
if is_main:
ppl = float(math.exp(min(20.0, eval_metrics["loss"])))
logger.info(
f"[EVAL] loss={eval_metrics['loss']:.4f}, ppl={ppl:.3f}, "
f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
)
cleanup_distributed()
return
trainer.train()
cleanup_distributed()
if __name__ == "__main__":
main()