|
| 1 | +"""Unit tests for the Qwen3MoeArchitectureAdapter. |
| 2 | +
|
| 3 | +All tests use programmatic TransformerBridgeConfig instances — no network access |
| 4 | +or model downloads. |
| 5 | +""" |
| 6 | + |
| 7 | +import pytest |
| 8 | + |
| 9 | +from transformer_lens.config import TransformerBridgeConfig |
| 10 | +from transformer_lens.conversion_utils.conversion_steps.rearrange_tensor_conversion import ( |
| 11 | + RearrangeTensorConversion, |
| 12 | +) |
| 13 | +from transformer_lens.conversion_utils.param_processing_conversion import ( |
| 14 | + ParamProcessingConversion, |
| 15 | +) |
| 16 | +from transformer_lens.factories.architecture_adapter_factory import ( |
| 17 | + SUPPORTED_ARCHITECTURES, |
| 18 | +) |
| 19 | +from transformer_lens.model_bridge.generalized_components import ( |
| 20 | + MoEBridge, |
| 21 | + RMSNormalizationBridge, |
| 22 | +) |
| 23 | +from transformer_lens.model_bridge.supported_architectures.qwen3_moe import ( |
| 24 | + Qwen3MoeArchitectureAdapter, |
| 25 | +) |
| 26 | + |
| 27 | + |
| 28 | +@pytest.fixture |
| 29 | +def cfg() -> TransformerBridgeConfig: |
| 30 | + return TransformerBridgeConfig( |
| 31 | + d_model=64, |
| 32 | + d_head=16, |
| 33 | + n_layers=2, |
| 34 | + n_ctx=128, |
| 35 | + n_heads=4, |
| 36 | + n_key_value_heads=2, |
| 37 | + d_vocab=256, |
| 38 | + architecture="Qwen3MoeForCausalLM", |
| 39 | + ) |
| 40 | + |
| 41 | + |
| 42 | +@pytest.fixture |
| 43 | +def adapter(cfg: TransformerBridgeConfig) -> Qwen3MoeArchitectureAdapter: |
| 44 | + return Qwen3MoeArchitectureAdapter(cfg) |
| 45 | + |
| 46 | + |
| 47 | +class TestQwen3MoeAdapterConfig: |
| 48 | + def test_normalization_type_is_rms(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 49 | + assert adapter.cfg.normalization_type == "RMS" |
| 50 | + |
| 51 | + def test_positional_embedding_type_is_rotary( |
| 52 | + self, adapter: Qwen3MoeArchitectureAdapter |
| 53 | + ) -> None: |
| 54 | + assert adapter.cfg.positional_embedding_type == "rotary" |
| 55 | + |
| 56 | + def test_final_rms_is_true(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 57 | + """Qwen3MoE uses final_rms=True; OLMoE uses False.""" |
| 58 | + assert adapter.cfg.final_rms is True |
| 59 | + |
| 60 | + def test_gated_mlp_is_true(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 61 | + assert adapter.cfg.gated_mlp is True |
| 62 | + |
| 63 | + def test_uses_rms_norm_is_true(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 64 | + assert adapter.cfg.uses_rms_norm is True |
| 65 | + |
| 66 | + def test_attn_implementation_is_eager(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 67 | + assert adapter.cfg.attn_implementation == "eager" |
| 68 | + |
| 69 | + def test_default_prepend_bos_is_false(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 70 | + assert adapter.cfg.default_prepend_bos is False |
| 71 | + |
| 72 | + def test_n_kv_heads_propagated(self) -> None: |
| 73 | + """n_key_value_heads from the loaded config is preserved.""" |
| 74 | + cfg = TransformerBridgeConfig( |
| 75 | + d_model=64, |
| 76 | + d_head=16, |
| 77 | + n_layers=2, |
| 78 | + n_ctx=128, |
| 79 | + n_heads=4, |
| 80 | + n_key_value_heads=2, |
| 81 | + d_vocab=256, |
| 82 | + architecture="Qwen3MoeForCausalLM", |
| 83 | + ) |
| 84 | + adapter = Qwen3MoeArchitectureAdapter(cfg) |
| 85 | + assert adapter.cfg.n_key_value_heads == 2 |
| 86 | + |
| 87 | + |
| 88 | +class TestQwen3MoeWeightConversions: |
| 89 | + def test_has_qkvo_keys(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 90 | + convs = adapter.weight_processing_conversions |
| 91 | + assert convs is not None |
| 92 | + assert "blocks.{i}.attn.q.weight" in convs |
| 93 | + assert "blocks.{i}.attn.k.weight" in convs |
| 94 | + assert "blocks.{i}.attn.v.weight" in convs |
| 95 | + assert "blocks.{i}.attn.o.weight" in convs |
| 96 | + |
| 97 | + def test_q_rearrange_uses_n_heads(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 98 | + """Q rearrange uses n_heads (4).""" |
| 99 | + convs = adapter.weight_processing_conversions |
| 100 | + assert convs is not None |
| 101 | + q_conv = convs["blocks.{i}.attn.q.weight"] |
| 102 | + assert isinstance(q_conv, ParamProcessingConversion) |
| 103 | + assert isinstance(q_conv.tensor_conversion, RearrangeTensorConversion) |
| 104 | + axes = q_conv.tensor_conversion.axes_lengths |
| 105 | + assert axes.get("n") == 4 |
| 106 | + |
| 107 | + def test_kv_rearrange_uses_n_kv_heads(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 108 | + """K/V rearrange uses n_key_value_heads (2) for GQA.""" |
| 109 | + convs = adapter.weight_processing_conversions |
| 110 | + assert convs is not None |
| 111 | + k_conv = convs["blocks.{i}.attn.k.weight"] |
| 112 | + v_conv = convs["blocks.{i}.attn.v.weight"] |
| 113 | + assert isinstance(k_conv, ParamProcessingConversion) |
| 114 | + assert isinstance(v_conv, ParamProcessingConversion) |
| 115 | + assert isinstance(k_conv.tensor_conversion, RearrangeTensorConversion) |
| 116 | + assert isinstance(v_conv.tensor_conversion, RearrangeTensorConversion) |
| 117 | + assert k_conv.tensor_conversion.axes_lengths.get("n") == 2 |
| 118 | + assert v_conv.tensor_conversion.axes_lengths.get("n") == 2 |
| 119 | + |
| 120 | + def test_o_rearrange_uses_n_heads(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 121 | + """O rearrange uses n_heads (4).""" |
| 122 | + convs = adapter.weight_processing_conversions |
| 123 | + assert convs is not None |
| 124 | + o_conv = convs["blocks.{i}.attn.o.weight"] |
| 125 | + assert isinstance(o_conv, ParamProcessingConversion) |
| 126 | + assert isinstance(o_conv.tensor_conversion, RearrangeTensorConversion) |
| 127 | + assert o_conv.tensor_conversion.axes_lengths.get("n") == 4 |
| 128 | + |
| 129 | + |
| 130 | +class TestQwen3MoeComponentMapping: |
| 131 | + def test_has_required_top_level_keys(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 132 | + mapping = adapter.component_mapping |
| 133 | + assert mapping is not None |
| 134 | + for key in ("embed", "rotary_emb", "blocks", "ln_final", "unembed"): |
| 135 | + assert key in mapping, f"Missing top-level key: {key!r}" |
| 136 | + |
| 137 | + def test_blocks_has_required_submodules(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 138 | + mapping = adapter.component_mapping |
| 139 | + assert mapping is not None |
| 140 | + blocks = mapping["blocks"] |
| 141 | + for key in ("ln1", "ln2", "attn", "mlp"): |
| 142 | + assert key in blocks.submodules, f"Missing blocks submodule: {key!r}" |
| 143 | + |
| 144 | + def test_attn_has_all_submodules(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 145 | + mapping = adapter.component_mapping |
| 146 | + assert mapping is not None |
| 147 | + attn = mapping["blocks"].submodules["attn"] |
| 148 | + for key in ("q", "k", "v", "o", "q_norm", "k_norm"): |
| 149 | + assert key in attn.submodules, f"Missing attn submodule: {key!r}" |
| 150 | + |
| 151 | + def test_ln1_ln2_are_rms_norm_bridges(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 152 | + mapping = adapter.component_mapping |
| 153 | + assert mapping is not None |
| 154 | + subs = mapping["blocks"].submodules |
| 155 | + assert isinstance(subs["ln1"], RMSNormalizationBridge) |
| 156 | + assert isinstance(subs["ln2"], RMSNormalizationBridge) |
| 157 | + |
| 158 | + def test_mlp_is_moe_bridge(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 159 | + mapping = adapter.component_mapping |
| 160 | + assert mapping is not None |
| 161 | + mlp = mapping["blocks"].submodules["mlp"] |
| 162 | + assert isinstance(mlp, MoEBridge) |
| 163 | + |
| 164 | + def test_mlp_has_gate_submodule(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 165 | + mapping = adapter.component_mapping |
| 166 | + assert mapping is not None |
| 167 | + mlp = mapping["blocks"].submodules["mlp"] |
| 168 | + assert "gate" in mlp.submodules |
| 169 | + |
| 170 | + def test_q_norm_k_norm_are_rms_norm_bridges(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 171 | + mapping = adapter.component_mapping |
| 172 | + assert mapping is not None |
| 173 | + attn_subs = mapping["blocks"].submodules["attn"].submodules |
| 174 | + assert isinstance(attn_subs["q_norm"], RMSNormalizationBridge) |
| 175 | + assert isinstance(attn_subs["k_norm"], RMSNormalizationBridge) |
| 176 | + |
| 177 | + def test_hf_module_paths(self, adapter: Qwen3MoeArchitectureAdapter) -> None: |
| 178 | + """HF module path names are mapped correctly.""" |
| 179 | + mapping = adapter.component_mapping |
| 180 | + assert mapping is not None |
| 181 | + assert mapping["embed"].name == "model.embed_tokens" |
| 182 | + assert mapping["ln_final"].name == "model.norm" |
| 183 | + assert mapping["unembed"].name == "lm_head" |
| 184 | + assert mapping["blocks"].name == "model.layers" |
| 185 | + subs = mapping["blocks"].submodules |
| 186 | + assert subs["ln1"].name == "input_layernorm" |
| 187 | + assert subs["ln2"].name == "post_attention_layernorm" |
| 188 | + assert subs["attn"].name == "self_attn" |
| 189 | + assert subs["mlp"].name == "mlp" |
| 190 | + |
| 191 | + |
| 192 | +class TestQwen3MoeFactoryRegistration: |
| 193 | + def test_factory_lookup_returns_adapter_class(self) -> None: |
| 194 | + assert SUPPORTED_ARCHITECTURES["Qwen3MoeForCausalLM"] is Qwen3MoeArchitectureAdapter |
0 commit comments