From 15711470c7bcc9457a3570158ab4500b95ce6562 Mon Sep 17 00:00:00 2001 From: Raiden129 <101210316+Raiden129@users.noreply.github.com> Date: Thu, 26 Mar 2026 22:02:23 +0530 Subject: [PATCH 1/2] Fix for Issue #205 Prevent `_prompt_inference_settings()` from raising `UnboundLocalError` when the resolved backend is not `torch` (for example MLX on Apple Silicon). The function always returned `InferenceSettings`, but `generate_comp` and `gpu_post_processing` were only assigned inside the torch-specific branch. On MLX this left both locals undefined and caused inference to crash before startup. Initialize those fields from `InferenceSettings` defaults before the backend check, then continue overriding them only for the torch path. prompt helper returns valid settings instead of crashing. --- corridorkey_cli.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/corridorkey_cli.py b/corridorkey_cli.py index b040b3fe..0ccc509f 100644 --- a/corridorkey_cli.py +++ b/corridorkey_cli.py @@ -143,6 +143,10 @@ def _prompt_inference_settings( ) -> InferenceSettings: """Interactively prompt for inference settings, skipping any pre-filled values.""" console.print(Panel("Inference Settings", style="bold cyan")) + generate_comp = default_comp if default_comp is not None else InferenceSettings.generate_comp + gpu_post_processing = ( + default_gpu_post if default_gpu_post is not None else InferenceSettings.gpu_post_processing + ) if default_linear is not None: input_is_linear = default_linear From 9be76a2f9230cff9dde2dad04ed5b88515ed4dab Mon Sep 17 00:00:00 2001 From: Your Name Date: Fri, 27 Mar 2026 01:00:34 +0530 Subject: [PATCH 2/2] reformat --- corridorkey_cli.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/corridorkey_cli.py b/corridorkey_cli.py index 0ccc509f..09329deb 100644 --- a/corridorkey_cli.py +++ b/corridorkey_cli.py @@ -144,9 +144,7 @@ def _prompt_inference_settings( """Interactively prompt for inference settings, skipping any pre-filled values.""" console.print(Panel("Inference Settings", style="bold cyan")) generate_comp = default_comp if default_comp is not None else InferenceSettings.generate_comp - gpu_post_processing = ( - default_gpu_post if default_gpu_post is not None else InferenceSettings.gpu_post_processing - ) + gpu_post_processing = default_gpu_post if default_gpu_post is not None else InferenceSettings.gpu_post_processing if default_linear is not None: input_is_linear = default_linear