Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ def set_nemotron_3_nano_common_configs(cfg: ConfigContainer) -> None:
cfg.mixed_precision.grad_reduce_in_fp32 = False
cfg.ddp.grad_reduce_in_fp32 = False

cfg.model.moe_router_force_load_balancing = True


def nemotron_3_nano_pretrain_config_gb300(
precision: str = "bf16", mock: bool = True, config_variant: str = "v1"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,37 +31,38 @@
BASE_NEMOTRON_3_NANO_CONFIG = WorkloadBaseConfig(
num_gpus=8,
global_batch_size=512,
micro_batch_size=2,
tensor_model_parallel_size=1,
expert_tensor_parallel_size=1,
expert_model_parallel_size=8,
moe_flex_dispatcher_backend="hybridep",
cuda_graph_impl="transformer_engine",
cuda_graph_scope=["attn", "mamba", "moe_router", "moe_preprocess"],
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

# Find the WorkloadBaseConfig class definition and cuda_graph_scope type annotation
rg -A 20 "class WorkloadBaseConfig" scripts/performance/utils/utils.py | head -50

Repository: NVIDIA-NeMo/Megatron-Bridge

Length of output: 1046


🏁 Script executed:

# Find all cuda_graph_scope assignments in config files
rg -n "cuda_graph_scope\s*=" scripts/performance/configs/

Repository: NVIDIA-NeMo/Megatron-Bridge

Length of output: 8390


🏁 Script executed:

# Check the specific file and lines mentioned in the review
sed -n '35,90p' scripts/performance/configs/nemotronh/nemotron_3_nano_workload_base_configs.py | cat -n

Repository: NVIDIA-NeMo/Megatron-Bridge

Length of output: 2564


🏁 Script executed:

# Search for cuda_graph_scope field in WorkloadBaseConfig class
rg -A 100 "class WorkloadBaseConfig" scripts/performance/utils/utils.py | grep -A 5 -B 5 "cuda_graph_scope"

Repository: NVIDIA-NeMo/Megatron-Bridge

Length of output: 440


🏁 Script executed:

# Alternative: search more broadly for all fields in the class
wc -l scripts/performance/utils/utils.py

Repository: NVIDIA-NeMo/Megatron-Bridge

Length of output: 109


🏁 Script executed:

# Read the entire utils.py file to see the full WorkloadBaseConfig definition
cat -n scripts/performance/utils/utils.py | head -200

Repository: NVIDIA-NeMo/Megatron-Bridge

Length of output: 9205


Fix type annotation for cuda_graph_scope to accept both string and list values.

Line 55 in scripts/performance/utils/utils.py defines cuda_graph_scope: Optional[str] = None, but the actual usage across config files assigns both list[str] values (e.g., ["attn", "mamba"]) and str values (e.g., "full_iteration"). Update the type annotation to str | list[str] | None to match the actual usage pattern. This also applies to lines 81 and 85 of the file under review.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In
`@scripts/performance/configs/nemotronh/nemotron_3_nano_workload_base_configs.py`
at line 39, Change the type annotation for the cuda_graph_scope variable in
scripts/performance/utils/utils.py from Optional[str] to allow both strings and
lists by using the union type str | list[str] | None; update every occurrence of
the cuda_graph_scope annotation (the variable named cuda_graph_scope and any
function signatures or defaults referencing it) so the annotation matches actual
usage (accepting values like "full_iteration" or ["attn","mamba","moe_router"]).
Ensure imports or typing usage remain valid for the project's Python version
(use PEP 604 union syntax).

)

NEMOTRON_3_NANO_PRETRAIN_CONFIG_GB300_BF16_V1 = replace(
BASE_NEMOTRON_3_NANO_CONFIG,
tensor_model_parallel_size=1,
micro_batch_size=4,
)
NEMOTRON_3_NANO_PRETRAIN_CONFIG_GB300_FP8_MX_V1 = NEMOTRON_3_NANO_PRETRAIN_CONFIG_GB300_BF16_V1
NEMOTRON_3_NANO_PRETRAIN_CONFIG_GB300_NVFP4_V1 = NEMOTRON_3_NANO_PRETRAIN_CONFIG_GB300_BF16_V1

NEMOTRON_3_NANO_PRETRAIN_CONFIG_GB200_BF16_V1 = replace(
BASE_NEMOTRON_3_NANO_CONFIG,
tensor_model_parallel_size=1,
micro_batch_size=2,
)
NEMOTRON_3_NANO_PRETRAIN_CONFIG_GB200_FP8_MX_V1 = NEMOTRON_3_NANO_PRETRAIN_CONFIG_GB200_BF16_V1
NEMOTRON_3_NANO_PRETRAIN_CONFIG_GB200_NVFP4_V1 = NEMOTRON_3_NANO_PRETRAIN_CONFIG_GB200_BF16_V1

NEMOTRON_3_NANO_PRETRAIN_CONFIG_B300_BF16_V1 = replace(
BASE_NEMOTRON_3_NANO_CONFIG,
tensor_model_parallel_size=1,
micro_batch_size=4,
)
NEMOTRON_3_NANO_PRETRAIN_CONFIG_B300_FP8_MX_V1 = NEMOTRON_3_NANO_PRETRAIN_CONFIG_B300_BF16_V1
NEMOTRON_3_NANO_PRETRAIN_CONFIG_B300_NVFP4_V1 = NEMOTRON_3_NANO_PRETRAIN_CONFIG_B300_BF16_V1

NEMOTRON_3_NANO_PRETRAIN_CONFIG_B200_BF16_V1 = replace(
BASE_NEMOTRON_3_NANO_CONFIG,
tensor_model_parallel_size=1,
micro_batch_size=2,
)
NEMOTRON_3_NANO_PRETRAIN_CONFIG_B200_FP8_MX_V1 = NEMOTRON_3_NANO_PRETRAIN_CONFIG_B200_BF16_V1
NEMOTRON_3_NANO_PRETRAIN_CONFIG_B200_NVFP4_V1 = NEMOTRON_3_NANO_PRETRAIN_CONFIG_B200_BF16_V1
Expand All @@ -71,11 +72,19 @@
num_gpus=16,
global_batch_size=1024,
micro_batch_size=1,
recompute_modules=["moe", "layernorm"],
cuda_graph_impl="transformer_engine",
)

NEMOTRON_3_NANO_PRETRAIN_CONFIG_H100_BF16_V1 = _NEMOTRON_3_NANO_PRETRAIN_CONFIG_H100
NEMOTRON_3_NANO_PRETRAIN_CONFIG_H100_FP8_CS_V1 = _NEMOTRON_3_NANO_PRETRAIN_CONFIG_H100
NEMOTRON_3_NANO_PRETRAIN_CONFIG_H100_BF16_V1 = replace(
_NEMOTRON_3_NANO_PRETRAIN_CONFIG_H100,
recompute_modules=["moe", "layernorm"],
cuda_graph_scope=["attn", "mamba"],
)
NEMOTRON_3_NANO_PRETRAIN_CONFIG_H100_FP8_CS_V1 = replace(
_NEMOTRON_3_NANO_PRETRAIN_CONFIG_H100,
cuda_graph_scope=["mamba"],
recompute_modules=["moe", "layernorm", "core_attn", "moe_act"],
)

__all__ = [
"NEMOTRON_3_NANO_PRETRAIN_CONFIG_GB300_BF16_V1",
Expand Down
3 changes: 3 additions & 0 deletions scripts/performance/perf_plugins.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,9 @@ def _set_model_specific_environment_variables(
if gpu in ["h100"] and model_recipe_name in ["llama3_70b"] and compute_dtype == "fp8_cs":
executor.env_vars["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
executor.env_vars["NCCL_GRAPH_REGISTER"] = "0"
if model_recipe_name in ["nemotron_3_nano"]:
del_cudnn_ln = False

if del_cudnn_ln:
if "NVTE_NORM_FWD_USE_CUDNN" in executor.env_vars:
executor.env_vars.pop("NVTE_NORM_FWD_USE_CUDNN")
Expand Down
Loading