Skip to content

Commit 4ef44f1

Browse files
authored
[bugfix] fix gptq_bridge (#19)
1 parent 975c8e7 commit 4ef44f1

1 file changed

Lines changed: 4 additions & 3 deletions

File tree

src/mcore_bridge/model/gpts/qwen3_next.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -516,12 +516,13 @@ def _set_layer_attn(self, mg_layer, hf_state_dict, layer_idx: int, to_mcore: boo
516516
self._set_state_dict(mg_layer, 'input_layernorm.weight', hf_state_dict, 'input_layernorm.weight', to_mcore)
517517
return hf_state_dict
518518

519-
def _set_layer_mlp(self, mg_layer, hf_state_dict, layer_idx: int, to_mcore: bool):
519+
def _set_layer_mlp(self, mg_layer, hf_state_dict, layer_idx: int, to_mcore: bool, is_mtp: bool = False):
520520
if self.model_type != 'qwen3_5':
521-
return super()._set_layer_mlp(mg_layer, hf_state_dict, layer_idx, to_mcore)
521+
return super()._set_layer_mlp(mg_layer, hf_state_dict, layer_idx, to_mcore, is_mtp=is_mtp)
522522
# dense
523523
mg_mlp = None if mg_layer is None else mg_layer.mlp
524-
hf_state_dict.update(self._set_mlp_state(mg_mlp, hf_state_dict, f'{self.hf_mlp_prefix}.', layer_idx, to_mcore))
524+
hf_state_dict.update(
525+
self._set_mlp_state(mg_mlp, hf_state_dict, f'{self.hf_mlp_prefix}.', layer_idx, to_mcore, is_mtp=is_mtp))
525526
self._set_state_dict(mg_layer, 'pre_mlp_layernorm.weight', hf_state_dict, 'post_attention_layernorm.weight',
526527
to_mcore)
527528
return hf_state_dict

0 commit comments

Comments
 (0)