Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions src/maxtext/common/common_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,10 @@
BATCH = "activation_batch"

ATTN_LENGTH = "activation_attn_length"
ATTN_LENGTH_NO_EXP = "activation_attn_length_no_exp"

LENGTH = "activation_length"
PREFILL_LENGTH = "prefill_activation_length"
Q_LENGTH = "activation_q_length"
Q_LENGTH_NO_EXP = "activation_q_length_no_exp"
Q_LORA_UP_PROJ = "q_lora_up_proj"
KV_LENGTH = "activation_kv_length"
KV_LORA_UP_PROJ = "kv_lora_up_proj"
Expand All @@ -48,7 +46,6 @@
HEAD = "activation_heads"
PREFILL_KV_BATCH = "activation_prefill_kv_batch"
KV_BATCH = "activation_kv_batch"
KV_BATCH_NO_EXP = "activation_kv_batch_no_exp"
KV_HEAD = "activation_kv_heads"
KV_HEAD_DIM = "activation_kv_head_dim"
D_KV = "activation_kv"
Expand Down
27 changes: 9 additions & 18 deletions src/maxtext/configs/base.yml
Original file line number Diff line number Diff line change
Expand Up @@ -454,8 +454,7 @@ custom_mesh_and_rule: "" # replace default mesh and logical rule by specifying y
mesh_axes: ['diloco', 'data', 'stage', 'fsdp', 'fsdp_transpose', 'sequence', 'context', 'context_autoregressive', 'tensor', 'tensor_transpose', 'tensor_sequence', 'expert', 'autoregressive']
logical_axis_rules: [
['activation_batch', ['data', 'fsdp', 'fsdp_transpose', 'expert']],
['activation_batch_moe', ['data', 'fsdp', 'fsdp_transpose', 'expert']],
['activation_batch_no_exp_moe', ['data', 'fsdp', 'fsdp_transpose']],
['activation_batch_moe', ['data', 'fsdp', 'fsdp_transpose']],
['activation_embed_and_logits_batch', ['data', 'stage', 'fsdp', 'fsdp_transpose', 'expert']],
['activation_embed_and_logits_batch_sequence', ['data', 'stage', 'fsdp', 'fsdp_transpose', 'sequence', 'context', 'expert']],
['activation_heads', ['tensor', 'tensor_transpose', 'sequence','tensor_sequence','autoregressive']],
Expand All @@ -468,19 +467,18 @@ logical_axis_rules: [
['activation_length_moe', ['context']],
['activation_norm_length', ['tensor_sequence', 'context', 'sequence']],
['activation_norm_length_moe', ['tensor_sequence', 'context', 'sequence']],
['activation_q_length', ['context', 'expert']],
['activation_q_length_no_exp', ['context']],
['activation_q_length', ['context']],
['prefill_activation_length', ['sequence', 'context']],
['prefill_activation_norm_length', ['tensor_sequence', 'context', 'sequence']],
['activation_kv_length', []],
['activation_attn_embed', ['tensor', 'tensor_transpose']],
['activation_embed', ['tensor', 'tensor_transpose']],
['activation_embed_moe', ['tensor', 'tensor_transpose']],
['activation_mlp', ['tensor', 'tensor_transpose', 'tensor_sequence']],
['activation_mlp_moe', ['tensor', 'tensor_transpose', 'tensor_sequence']],
['activation_kv', ['tensor', 'tensor_transpose', 'tensor_sequence']],
['activation_prefill_kv_batch', ['data', 'fsdp', 'fsdp_transpose', 'expert']],
['activation_kv_batch', ['data', 'fsdp', 'fsdp_transpose', 'expert']],
['activation_kv_batch_no_exp', ['data', 'fsdp', 'fsdp_transpose']],
['activation_kv_batch', ['data', 'fsdp', 'fsdp_transpose']],
['activation_kv_head_dim', ['tensor', 'tensor_transpose', 'tensor_sequence']],
['activation_vocab', ['tensor', 'tensor_transpose', 'tensor_sequence']],
['activation_vocab', ['tensor', 'tensor_transpose']],
Expand All @@ -491,6 +489,7 @@ logical_axis_rules: [
['decode_batch', ['data', 'fsdp', 'fsdp_transpose', 'expert']],
['decode_length', ['sequence']],
['mlp', ['fsdp_transpose', 'tensor', 'tensor_sequence', 'autoregressive']],
['mlp_moe', ['fsdp_transpose', 'tensor', 'tensor_sequence', 'autoregressive']],
['mlp_no_fsdp', ['tensor', 'tensor_sequence', 'autoregressive']],
['vocab', ['tensor', 'tensor_transpose', 'tensor_sequence', 'autoregressive']],
['heads', ['tensor', 'tensor_transpose', 'tensor_sequence', 'autoregressive']],
Expand All @@ -500,18 +499,10 @@ logical_axis_rules: [
['embed', ['fsdp', 'sequence', 'tensor_transpose', 'context' , 'expert']],
['embed', ['fsdp', 'fsdp_transpose', 'sequence', 'context', 'expert']],
['embed', ['fsdp', 'sequence', 'context', 'expert']],
['embed_no_exp', ['fsdp', 'fsdp_transpose', 'sequence', 'tensor_transpose', 'context']],
['embed_no_exp', ['fsdp', 'sequence', 'tensor_transpose', 'context']],
['embed_no_exp', ['fsdp', 'fsdp_transpose', 'sequence', 'context']],
['embed_no_exp', ['fsdp', 'sequence', 'context']],
['embed_moe', ['fsdp', 'fsdp_transpose', 'sequence', 'tensor_transpose', 'context', 'expert']],
['embed_moe', ['fsdp', 'sequence', 'tensor_transpose', 'context' , 'expert']],
['embed_moe', ['fsdp', 'fsdp_transpose', 'sequence', 'context', 'expert']],
['embed_moe', ['fsdp', 'sequence', 'context', 'expert']],
['embed_no_exp_moe', ['fsdp', 'fsdp_transpose', 'sequence', 'tensor_transpose', 'context']],
['embed_no_exp_moe', ['fsdp', 'sequence', 'tensor_transpose', 'context']],
['embed_no_exp_moe', ['fsdp', 'fsdp_transpose', 'sequence', 'context']],
['embed_no_exp_moe', ['fsdp', 'sequence', 'context']],
['embed_moe', ['fsdp', 'fsdp_transpose', 'sequence', 'tensor_transpose', 'context']],
['embed_moe', ['fsdp', 'sequence', 'tensor_transpose', 'context']],
['embed_moe', ['fsdp', 'fsdp_transpose', 'sequence', 'context']],
['embed_moe', ['fsdp', 'sequence', 'context']],
['embed_tensor_transpose', ['tensor_transpose']],
['q_lora', ['fsdp', 'fsdp_transpose', 'sequence', 'context', 'tensor_transpose', 'expert']],
['q_lora', ['fsdp', 'sequence', 'context', 'tensor_transpose', 'expert']],
Expand Down
18 changes: 8 additions & 10 deletions src/maxtext/configs/custom_mesh_and_rule/pipeline-large-moe.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,38 +30,36 @@ mesh_axes: ['data', 'stage', 'fsdp', 'context', 'tensor', 'expert']
data_sharding: [['data', 'stage', 'fsdp', 'context', 'tensor', 'expert']]
logical_axis_rules: [
['activation_batch', ['data', 'fsdp', 'expert']],
['activation_batch_moe', ['data', 'fsdp', 'expert']],
['activation_batch_no_exp_moe', ['data', 'fsdp']],
['activation_batch_moe', ['data', 'fsdp']],
['activation_embed_and_logits_batch', ['data', 'stage', 'fsdp', 'expert']],
['activation_embed_and_logits_batch_sequence', ['data', 'stage', 'fsdp', 'context', 'expert']],
['activation_heads', ['tensor']],
['activation_kv_heads', ['tensor']],
['activation_length', ['context', 'expert']],
['activation_attn_length', ['context', 'expert']],
['activation_q_length', ['context', 'expert']],
['activation_length', ['context']],
['activation_attn_length', ['context']],
['activation_q_length', ['context']],
['activation_attn_embed', ['tensor']],
['activation_norm_length', ['context']],
['activation_norm_length_moe', ['context']],
['activation_embed', ['tensor']],
['activation_embed_moe', ['tensor']],
['activation_mlp', ['tensor']],
['activation_mlp_moe', ['tensor']],
['activation_kv', ['tensor']],
['activation_kv_batch', ['data', 'fsdp', 'expert']],
['activation_kv_batch_no_exp', ['data', 'fsdp']],
['activation_kv_batch', ['data', 'fsdp']],
['activation_kv_head_dim', ['tensor']],
['activation_vocab', ['tensor']],
['activation_stage', 'stage'],
['activation_exp', ['expert']],
['mlp', ['tensor']],
['mlp_moe', ['tensor']],
['mlp_no_fsdp', ['tensor']],
['vocab', ['tensor']],
['heads', ['tensor']],
['q_heads', ['tensor']],
['kv_heads', ['tensor']],
['embed', ['fsdp', 'expert']], # remove context from embed sharding
['embed_moe', ['fsdp', 'expert']],
['embed_no_exp', ['fsdp']],
['embed_no_exp_moe', ['fsdp']],
['embed_moe', ['fsdp']],
['q_lora', ['fsdp']],
['kv_lora', ['fsdp']],
['norm', ['tensor']],
Expand Down
4 changes: 0 additions & 4 deletions src/maxtext/configs/custom_mesh_and_rule/pure-fsdp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,13 @@ data_sharding: [['fsdp']]
logical_axis_rules: [
['activation_batch', ['fsdp']],
['activation_batch_moe', ['fsdp']],
['activation_batch_no_exp_moe', ['fsdp']],
['activation_embed_and_logits_batch', ['fsdp']],
['activation_embed_and_logits_batch_sequence', ['fsdp']],
['activation_prefill_kv_batch', ['fsdp']],
['activation_kv_batch', ['fsdp']],
['activation_kv_batch_no_exp', ['fsdp']],
['decode_batch', ['fsdp']],
['embed', ['fsdp']],
['embed_no_exp', ['fsdp']],
['embed_moe', ['fsdp']],
['embed_no_exp_moe', ['fsdp']],
['q_lora', ['fsdp']],
['kv_lora', ['fsdp']],
['exp_with_fsdp', 'fsdp'],
Expand Down
10 changes: 6 additions & 4 deletions src/maxtext/configs/inference/inference.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ logical_axis_rules: [
['activation_norm_length', ['tensor_sequence', 'sequence']],
['activation_embed', ['tensor_transpose']],
['activation_mlp', ['tensor', 'tensor_transpose', 'tensor_sequence']],
['activation_mlp_moe', ['tensor', 'tensor_transpose', 'tensor_sequence']],
['activation_kv', ['tensor', 'tensor_transpose', 'tensor_sequence']],
['activation_prefill_kv_batch', ['data', 'fsdp', 'fsdp_transpose', 'expert']],
['activation_kv_batch', ['data', 'fsdp', 'fsdp_transpose', 'expert', 'context_autoregressive']],
Expand All @@ -25,6 +26,7 @@ logical_axis_rules: [
['decode_batch', ['data', 'fsdp', 'fsdp_transpose', 'expert', 'context_autoregressive']],
['decode_length', []],
['mlp', ['fsdp_transpose', 'tensor', 'tensor_sequence', 'autoregressive']],
['mlp_moe', ['fsdp_transpose', 'tensor', 'tensor_sequence', 'autoregressive']],
['vocab', ['tensor', 'tensor_transpose', 'tensor_sequence', 'autoregressive','context_autoregressive']],
['heads', ['tensor', 'tensor_transpose', 'tensor_sequence', 'autoregressive']],
['q_heads', ['tensor', 'tensor_transpose', 'tensor_sequence', 'autoregressive']],
Expand All @@ -33,10 +35,10 @@ logical_axis_rules: [
['embed', ['fsdp', 'sequence', 'tensor_transpose', 'expert']],
['embed', ['fsdp', 'fsdp_transpose', 'sequence', 'expert']],
['embed', ['fsdp', 'sequence', 'expert']],
['embed_no_exp', ['fsdp', 'fsdp_transpose', 'sequence', 'context_autoregressive', 'tensor_transpose']],
['embed_no_exp', ['fsdp', 'sequence', 'context_autoregressive', 'tensor_transpose']],
['embed_no_exp', ['fsdp', 'fsdp_transpose', 'sequence', 'context_autoregressive']],
['embed_no_exp', ['fsdp', 'sequence', 'context_autoregressive']],
['embed_moe', ['fsdp', 'fsdp_transpose', 'sequence', 'context_autoregressive', 'tensor_transpose']],
['embed_moe', ['fsdp', 'sequence', 'context_autoregressive', 'tensor_transpose']],
['embed_moe', ['fsdp', 'fsdp_transpose', 'sequence', 'context_autoregressive']],
['embed_moe', ['fsdp', 'sequence', 'context_autoregressive']],
['norm', ['tensor', 'tensor_transpose', 'tensor_sequence']],
['layers', 'stage'],
['kv', []],
Expand Down
16 changes: 6 additions & 10 deletions src/maxtext/configs/inference/vllm.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,11 @@ mesh_axes: ['data', 'attn_dp', 'model', 'expert', 'attn_dp_expert']
logical_axis_rules: [
['activation_batch', ['data']],
['activation_batch_moe', []],
['activation_batch_no_exp_moe', []],
['activation_embed_and_logits_batch', ['data', 'expert']],
['activation_embed_and_logits_batch_sequence', ['data', 'expert']],
['activation_heads', ['model', 'expert']],
['activation_kv_heads', ['model', 'expert']],
['activation_attn_length', ['expert']],
['activation_attn_length_no_exp', []],
['activation_attn_length', []],
['activation_length', ['data']],
['activation_length_moe', ['data', 'expert']],
['activation_length_moe', 'data'],
Expand All @@ -46,10 +44,10 @@ logical_axis_rules: [
['activation_embed', ['model', 'attn_dp']],
['activation_embed_moe', ['model', 'attn_dp']],
['activation_mlp', ['model', 'attn_dp']],
['activation_mlp_moe', ['model', 'attn_dp']],
['activation_kv', ['model']],
['activation_prefill_kv_batch', ['expert', 'attn_dp_expert']],
['activation_kv_batch', ['data', 'expert', 'attn_dp_expert']],
['activation_kv_batch_no_exp', ['data']],
['activation_kv_batch', ['data']],
['activation_kv_head_dim', ['model']],
['activation_vocab', ['model', 'attn_dp']],
['activation_norm_length', []],
Expand All @@ -58,8 +56,8 @@ logical_axis_rules: [
['decode_batch', ['expert', 'attn_dp_expert']],
['decode_length', []],
['mlp', ['model', 'attn_dp']],
['mlp_moe', ['model', 'attn_dp']],
['mlp_no_fsdp', ['model', 'attn_dp']],
['moe_mlp', ['model', 'attn_dp']],
['vocab', ['model', 'attn_dp']],
['heads', ['model']],
['q_heads', ['model', 'expert']],
Expand All @@ -68,11 +66,9 @@ logical_axis_rules: [
['kv', []],
['embed', ['expert', 'attn_dp_expert']],
['embed', ['attn_dp_expert']],
['embed_moe', ['expert', 'attn_dp_expert']],
['embed_moe', ['attn_dp_expert']],
['embed_moe', []],
['embed_moe', []],
['embed_tensor_transpose', ['attn_dp', 'model']],
['embed_no_exp', []],
['embed_no_exp_moe', []],
['q_lora', ['expert', 'attn_dp_expert']],
['kv_lora', ['expert', 'attn_dp_expert']],
['norm', []],
Expand Down
2 changes: 0 additions & 2 deletions src/maxtext/configs/models/deepseek3-671b-2dfsdp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,6 @@ logical_axis_rules: [
['activation_stage', 'stage'],
['embed', ['fsdp']],
['embed_moe', ['fsdp']],
['embed_no_exp', ['fsdp']],
['embed_no_exp_moe', ['fsdp']],
['q_lora', ['fsdp']],
['kv_lora', ['fsdp']],
['layers', 'stage'],
Expand Down
2 changes: 0 additions & 2 deletions src/maxtext/configs/models/deepseek3-671b-batchsplit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,6 @@ logical_axis_rules: [
['activation_stage', 'stage'],
['embed', ['fsdp']],
['embed_moe', ['fsdp']],
['embed_no_exp', ['fsdp']],
['embed_no_exp_moe', ['fsdp']],
['q_lora', ['fsdp']],
['kv_lora', ['fsdp']],
['layers', 'stage'],
Expand Down
8 changes: 4 additions & 4 deletions src/maxtext/configs/post_train/rl_mt_jt.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,10 @@ logical_axis_rules: [
['embed', ['fsdp', 'sequence', 'tensor_transpose', 'expert']],
['embed', ['fsdp', 'fsdp_transpose', 'sequence', 'expert']],
['embed', ['fsdp', 'sequence', 'expert']],
['embed_no_exp', ['fsdp', 'fsdp_transpose', 'sequence', 'context_autoregressive', 'tensor_transpose']],
['embed_no_exp', ['fsdp', 'sequence', 'context_autoregressive', 'tensor_transpose']],
['embed_no_exp', ['fsdp', 'fsdp_transpose', 'sequence', 'context_autoregressive']],
['embed_no_exp', ['fsdp', 'sequence', 'context_autoregressive']],
['embed_moe', ['fsdp', 'fsdp_transpose', 'sequence', 'context_autoregressive', 'tensor_transpose']],
['embed_moe', ['fsdp', 'sequence', 'context_autoregressive', 'tensor_transpose']],
['embed_moe', ['fsdp', 'fsdp_transpose', 'sequence', 'context_autoregressive']],
['embed_moe', ['fsdp', 'sequence', 'context_autoregressive']],
['norm', ['tensor', 'tensor_transpose', 'tensor_sequence']],
['layers', 'stage'],
['kv', []],
Expand Down
10 changes: 5 additions & 5 deletions src/maxtext/layers/attention_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
MODEL_MODE_PREFILL,
MODEL_MODE_TRAIN,
PREFILL_LENGTH,
Q_LENGTH_NO_EXP,
Q_LENGTH,
)
from maxtext.inference import page_manager
from maxtext.inference.kvcache import KVQuant, KVTensor
Expand Down Expand Up @@ -1134,13 +1134,13 @@ def tpu_flash_attention(
segment_axis_names_kv = None
sink_axis_names = self._logical_to_mesh_axes((HEAD,))
if decoder_segment_ids is not None:
segment_axis_names_q = self._logical_to_mesh_axes((BATCH, Q_LENGTH_NO_EXP))
segment_axis_names_q = self._logical_to_mesh_axes((BATCH, Q_LENGTH))
segment_axis_names_kv = self._logical_to_mesh_axes((BATCH, KV_LENGTH))

axis_names_splash_kernel = self._logical_to_mesh_axes(self.flash_axis_names_splash_kernel)
axis_names_q = self._logical_to_mesh_axes(self.flash_axis_names_q)
axis_names_kv = self._logical_to_mesh_axes(self.flash_axis_names_kv)
indexer_mask_axis_names = self._logical_to_mesh_axes((BATCH, Q_LENGTH_NO_EXP, KV_LENGTH))
indexer_mask_axis_names = self._logical_to_mesh_axes((BATCH, Q_LENGTH, KV_LENGTH))

global global_block_q, global_block_kv, global_block_kv_compute, global_block_q_dkv, global_block_kv_dkv
global global_block_kv_dkv_compute, global_block_q_dq, global_block_kv_dq, global_use_fused_bwd_kernel
Expand Down Expand Up @@ -1269,11 +1269,11 @@ def wrap_splash_kernel(single_head_mask):
return splash_kernel

splash_kernel = wrap_splash_kernel(single_head_mask)
segment_axis_names_splash_kernel = self._logical_to_mesh_axes((Q_LENGTH_NO_EXP,))
segment_axis_names_splash_kernel = self._logical_to_mesh_axes((Q_LENGTH,))
elif self.config.use_jax_splash and self.config.expert_shard_attention_option == EP_AS_FSDP:
if self.config.use_max_logit_estimate > 0:
sa_config = dataclasses.replace(sa_config, max_logit_const=self.config.use_max_logit_estimate)
segment_axis_names_splash_kernel = nn.logical_to_mesh_axes((Q_LENGTH_NO_EXP,))
segment_axis_names_splash_kernel = nn.logical_to_mesh_axes((Q_LENGTH,))
else:
# Create multi-head mask
multi_head_mask = splash_attention_mask.MultiHeadMask(masks=(mask,) * query.shape[1])
Expand Down
Loading
Loading