From bc36e6cb2cfb81a5c5387b136414294661f42019 Mon Sep 17 00:00:00 2001 From: Zhaodong Bing <45478848+aaab8b@users.noreply.github.com> Date: Wed, 24 Dec 2025 17:52:56 +0800 Subject: [PATCH 1/2] Update unquantized_fused_moe_method.py Change assignment of unquantized moe weights when using aiter on rocm, making it safer for reloading the weights. This will solve the random output case after wake-up and reloading weights in reinforcement learning. --- .../layers/fused_moe/unquantized_fused_moe_method.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py b/vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py index 6182f10aa70f..84862416ee2d 100644 --- a/vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +++ b/vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py @@ -207,16 +207,16 @@ def process_weights_after_loading(self, layer: torch.nn.Module) -> None: super().process_weights_after_loading(layer) # Padding the weight for better performance on ROCm - layer.w13_weight.data = self._maybe_pad_weight(layer.w13_weight.data) - layer.w2_weight.data = self._maybe_pad_weight(layer.w2_weight.data) + layer.w13_weight.data.copy_(self._maybe_pad_weight(layer.w13_weight.data)) + layer.w2_weight.data.copy_(self._maybe_pad_weight(layer.w2_weight.data)) if self.rocm_aiter_moe_enabled: shuffled_w13, shuffled_w2 = rocm_aiter_ops.shuffle_weights( layer.w13_weight.data, layer.w2_weight.data ) - layer.w13_weight.data = shuffled_w13 - layer.w2_weight.data = shuffled_w2 + layer.w13_weight.data.copy_(shuffled_w13) + layer.w2_weight.data.copy_(shuffled_w2) if self.flashinfer_cutlass_moe_enabled: # Swap halves to arrange as [w3; w1] (kernel expectation) From e1a762a3962372e22c802308c9519baa7be8526b Mon Sep 17 00:00:00 2001 From: Zhaodong Bing <45478848+aaab8b@users.noreply.github.com> Date: Wed, 24 Dec 2025 17:56:01 +0800 Subject: [PATCH 2/2] Update unquantized_fused_moe_method.py Signed-off-by: Zhaodong Bing <45478848+aaab8b@users.noreply.github.com> --- .../layers/fused_moe/unquantized_fused_moe_method.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py b/vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py index 84862416ee2d..bb7c50fb3c1a 100644 --- a/vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +++ b/vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py @@ -208,7 +208,7 @@ def process_weights_after_loading(self, layer: torch.nn.Module) -> None: # Padding the weight for better performance on ROCm layer.w13_weight.data.copy_(self._maybe_pad_weight(layer.w13_weight.data)) - layer.w2_weight.data.copy_(self._maybe_pad_weight(layer.w2_weight.data)) + layer.w2_weight.data.copy_(self._maybe_pad_weight(layer.w2_weight.data)) if self.rocm_aiter_moe_enabled: shuffled_w13, shuffled_w2 = rocm_aiter_ops.shuffle_weights(