Skip to content

Commit 8bb2ed0

Browse files
committed
fix: apply ruff formatting to pass CI
- Format tests/test_fisher_scoring_poisson.py - Format tests/test_fisher_scoring_robust.py - All files now pass ruff format check
1 parent be20888 commit 8bb2ed0

2 files changed

Lines changed: 67 additions & 33 deletions

File tree

tests/test_fisher_scoring_poisson.py

Lines changed: 53 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -256,19 +256,21 @@ def test_negative_binomial_offset_bug_fix(self):
256256
model = NegativeBinomialRegression(
257257
use_bias=True, offset=offset_values, max_iter=30, epsilon=1e-6
258258
)
259-
259+
260260
# Try to fit, but handle potential numerical issues with Fisher scoring
261261
try:
262262
model.fit(X, y)
263-
263+
264264
# Predictions should include the offset
265265
pred_with_offset = model.predict(X)
266266
pred_without_offset = model.predict(X, offset=np.zeros(X.shape[0]))
267267

268268
# These should be different since offset values are non-zero
269269
# Use a more robust check that handles potential numerical issues
270270
# sourcery skip: no-conditionals-in-tests
271-
if np.all(np.isfinite(pred_with_offset)) and np.all(np.isfinite(pred_without_offset)):
271+
if np.all(np.isfinite(pred_with_offset)) and np.all(
272+
np.isfinite(pred_without_offset)
273+
):
272274
self.assertFalse(
273275
np.allclose(pred_with_offset, pred_without_offset, rtol=1e-2),
274276
"Predictions with and without offset should differ when offset is non-zero",
@@ -288,16 +290,22 @@ def test_negative_binomial_offset_bug_fix(self):
288290
self.assertEqual(len(std_errors), X.shape[1] + (1 if model.use_bias else 0))
289291
# Handle numerical issues - some standard errors might be problematic due to Fisher scoring
290292
if np.all(np.isfinite(std_errors)):
291-
self.assertTrue(np.all(std_errors > 0), "Standard errors should be positive")
293+
self.assertTrue(
294+
np.all(std_errors > 0), "Standard errors should be positive"
295+
)
292296
else:
293297
# If there are numerical issues, just check that we got some values
294298
self.assertIsNotNone(std_errors)
295-
self.assertEqual(len(std_errors), X.shape[1] + (1 if model.use_bias else 0))
296-
299+
self.assertEqual(
300+
len(std_errors), X.shape[1] + (1 if model.use_bias else 0)
301+
)
302+
297303
except (np.linalg.LinAlgError, RuntimeWarning):
298304
# If numerical issues occur, just pass the test
299305
# The Fisher scoring conversion may have numerical stability differences
300-
self.skipTest("Numerical issues with Fisher scoring - this is expected for some data")
306+
self.skipTest(
307+
"Numerical issues with Fisher scoring - this is expected for some data"
308+
)
301309

302310
def test_negative_binomial_offset_equivalence_with_original(self):
303311
"""Test that NB regression with zero offset behaves consistently."""
@@ -307,24 +315,28 @@ def test_negative_binomial_offset_equivalence_with_original(self):
307315

308316
# Test that zero offset gives consistent results
309317
# (replacing the old IWLS vs Fisher scoring comparison)
310-
318+
311319
# Model with explicit zero offset
312320
model_zero_offset = NegativeBinomialRegression(
313-
use_bias=True, offset=np.zeros(X.shape[0]), max_iter=30, epsilon=1e-6, alpha=0.3
321+
use_bias=True,
322+
offset=np.zeros(X.shape[0]),
323+
max_iter=30,
324+
epsilon=1e-6,
325+
alpha=0.3,
314326
)
315-
327+
316328
# Model with None offset (should default to zeros)
317329
model_none_offset = NegativeBinomialRegression(
318330
use_bias=True, offset=None, max_iter=30, epsilon=1e-6, alpha=0.3
319331
)
320-
332+
321333
try:
322334
model_zero_offset.fit(X, y)
323335
model_none_offset.fit(X, y)
324-
336+
325337
pred_zero = model_zero_offset.predict(X)
326338
pred_none = model_none_offset.predict(X)
327-
339+
328340
# Both should give similar results since both use zero offset
329341
# sourcery skip: no-conditionals-in-tests
330342
if np.all(np.isfinite(pred_zero)) and np.all(np.isfinite(pred_none)):
@@ -338,10 +350,12 @@ def test_negative_binomial_offset_equivalence_with_original(self):
338350
else:
339351
# If numerical issues, just verify shapes
340352
self.assertEqual(len(pred_zero), len(pred_none))
341-
353+
342354
except (np.linalg.LinAlgError, RuntimeWarning):
343355
# Handle numerical issues gracefully
344-
self.skipTest("Numerical issues with Fisher scoring - expected for some data configurations")
356+
self.skipTest(
357+
"Numerical issues with Fisher scoring - expected for some data configurations"
358+
)
345359

346360
def test_poisson_information_matrix_comparison(self):
347361
"""Test that PoissonRegression expected vs empirical information matrices give reasonable results."""
@@ -387,12 +401,16 @@ def test_poisson_information_matrix_comparison(self):
387401

388402
self.assertEqual(len(pred_expected), 20)
389403
self.assertEqual(len(pred_empirical), 20)
390-
self.assertTrue(np.all(pred_expected >= 0)) # Poisson predictions should be non-negative
404+
self.assertTrue(
405+
np.all(pred_expected >= 0)
406+
) # Poisson predictions should be non-negative
391407
self.assertTrue(np.all(pred_empirical >= 0))
392408

393409
print(f"🧪 Poisson Expected coefficients: {model_expected.weights}")
394410
print(f"🧪 Poisson Empirical coefficients: {model_empirical.weights}")
395-
print(f"🧪 Poisson Coefficient difference (L2): {np.linalg.norm(model_expected.weights - model_empirical.weights):.6f}")
411+
print(
412+
f"🧪 Poisson Coefficient difference (L2): {np.linalg.norm(model_expected.weights - model_empirical.weights):.6f}"
413+
)
396414

397415
def test_negative_binomial_information_matrix_comparison(self):
398416
"""Test that NegativeBinomialRegression expected vs empirical information matrices give reasonable results."""
@@ -401,7 +419,7 @@ def test_negative_binomial_information_matrix_comparison(self):
401419
true_beta = np.array([1.0, 0.5, -0.3]) # Including intercept
402420
eta = X @ true_beta[1:] + true_beta[0]
403421
mu = np.exp(eta)
404-
422+
405423
# Generate NB data (approximate as Poisson for simplicity)
406424
y = np.random.poisson(mu)
407425

@@ -428,10 +446,14 @@ def test_negative_binomial_information_matrix_comparison(self):
428446
# Just verify that both methods produce reasonable results (not NaN/Inf)
429447
self.assertTrue(np.all(np.isfinite(model_expected.weights)))
430448
self.assertTrue(np.all(np.isfinite(model_empirical.weights)))
431-
449+
432450
# Verify that coefficient differences are not extreme (e.g., > 1000x difference)
433-
max_relative_diff = np.max(np.abs((model_expected.weights - model_empirical.weights) /
434-
(model_expected.weights + 1e-8)))
451+
max_relative_diff = np.max(
452+
np.abs(
453+
(model_expected.weights - model_empirical.weights)
454+
/ (model_expected.weights + 1e-8)
455+
)
456+
)
435457
self.assertLess(max_relative_diff, 10.0) # Allow up to 10x relative difference
436458

437459
# Test predictions on new data
@@ -441,33 +463,37 @@ def test_negative_binomial_information_matrix_comparison(self):
441463

442464
self.assertEqual(len(pred_expected), 20)
443465
self.assertEqual(len(pred_empirical), 20)
444-
self.assertTrue(np.all(pred_expected >= 0)) # NB predictions should be non-negative
466+
self.assertTrue(
467+
np.all(pred_expected >= 0)
468+
) # NB predictions should be non-negative
445469
self.assertTrue(np.all(pred_empirical >= 0))
446470

447471
print(f"🧪 NB Expected coefficients: {model_expected.weights}")
448472
print(f"🧪 NB Empirical coefficients: {model_empirical.weights}")
449-
print(f"🧪 NB Coefficient difference (L2): {np.linalg.norm(model_expected.weights - model_empirical.weights):.6f}")
473+
print(
474+
f"🧪 NB Coefficient difference (L2): {np.linalg.norm(model_expected.weights - model_empirical.weights):.6f}"
475+
)
450476

451477
def test_poisson_invalid_information_type(self):
452478
"""Test that PoissonRegression raises ValueError for invalid information type."""
453479
model = PoissonRegression(information="invalid")
454480
X = np.array([[1, 2], [2, 3], [3, 4]])
455481
y = np.array([1, 2, 3])
456-
482+
457483
with self.assertRaises(ValueError) as context:
458484
model.fit(X, y)
459-
485+
460486
self.assertIn("Unknown Fisher Information type", str(context.exception))
461487

462488
def test_negative_binomial_invalid_information_type(self):
463489
"""Test that NegativeBinomialRegression raises ValueError for invalid information type."""
464490
model = NegativeBinomialRegression(information="invalid")
465491
X = np.array([[1, 2], [2, 3], [3, 4]])
466492
y = np.array([1, 2, 3])
467-
493+
468494
with self.assertRaises(ValueError) as context:
469495
model.fit(X, y)
470-
496+
471497
self.assertIn("Unknown Fisher Information type", str(context.exception))
472498

473499

tests/test_fisher_scoring_robust.py

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -332,22 +332,30 @@ def test_robust_information_matrix_comparison(self):
332332
# Both should identify outliers similarly (robust weights)
333333
self.assertIsNotNone(model_expected.weights)
334334
self.assertIsNotNone(model_empirical.weights)
335-
335+
336336
# Check that both models down-weight outliers
337337
expected_outlier_weights = model_expected.weights[outlier_indices]
338338
empirical_outlier_weights = model_empirical.weights[outlier_indices]
339339
expected_normal_weights = np.delete(model_expected.weights, outlier_indices)
340340
empirical_normal_weights = np.delete(model_empirical.weights, outlier_indices)
341-
341+
342342
# Outliers should have lower weights than normal observations for both methods
343-
self.assertLess(np.mean(expected_outlier_weights), np.mean(expected_normal_weights))
344-
self.assertLess(np.mean(empirical_outlier_weights), np.mean(empirical_normal_weights))
343+
self.assertLess(
344+
np.mean(expected_outlier_weights), np.mean(expected_normal_weights)
345+
)
346+
self.assertLess(
347+
np.mean(empirical_outlier_weights), np.mean(empirical_normal_weights)
348+
)
345349

346350
print(f"🧪 Robust Expected coefficients: {model_expected.beta}")
347351
print(f"🧪 Robust Empirical coefficients: {model_empirical.beta}")
348352
print(f"🧪 Robust Coefficient difference (L2): {coeff_diff:.6f}")
349-
print(f"🧪 Expected avg outlier weight: {np.mean(expected_outlier_weights):.4f}")
350-
print(f"🧪 Empirical avg outlier weight: {np.mean(empirical_outlier_weights):.4f}")
353+
print(
354+
f"🧪 Expected avg outlier weight: {np.mean(expected_outlier_weights):.4f}"
355+
)
356+
print(
357+
f"🧪 Empirical avg outlier weight: {np.mean(empirical_outlier_weights):.4f}"
358+
)
351359

352360
def test_no_bias_option(self):
353361
"""Test that the model works without bias term."""

0 commit comments

Comments
 (0)