@@ -130,16 +130,25 @@ def _solve_least_squares_tikhonov(
130130 has_l2_penalty = np .any (P2 != 0 )
131131
132132 if not has_l2_penalty :
133- # Use direct WLS via sqrt-weighted data (avoids squaring the condition
134- # number of the normal equations) for maximum numerical stability.
133+ # Use direct WLS via the sqrt-weighted data matrix (avoids squaring the
134+ # condition number of the normal equations). We set cond explicitly to
135+ # n_cols * eps (matching sklearn's LinearRegression) rather than using
136+ # the default (eps), which sits right at the boundary of the near-zero
137+ # singular values. The default threshold is fragile: different LAPACK
138+ # implementations for differently-shaped matrices (e.g. 15×31 weighted
139+ # vs 27×31 repeated-rows) can compute near-zero singular values
140+ # slightly differently, leading to inconsistent rank detection across
141+ # platforms. n_cols * eps comfortably zeros out all null-space
142+ # directions regardless of platform.
135143 sw_sqrt = np .sqrt (sample_weight )
136144 X_arr = X .toarray ()
137145 A = X_arr * sw_sqrt [:, None ]
138- b = sw_sqrt * y_minus_offset
146+ b = sw_sqrt * ( y_minus_offset )
139147 if fit_intercept :
140148 # Prepend a column of sqrt(w) for the unpenalised intercept.
141149 A = np .column_stack ([sw_sqrt , A ])
142- return linalg .lstsq (A , b )[0 ]
150+ cond = A .shape [1 ] * np .finfo (A .dtype ).eps
151+ return linalg .lstsq (A , b , cond = cond )[0 ]
143152
144153 # ------------------------------------------------------------------
145154 # P2 != 0: the L2 penalty makes the normal equations well-conditioned
0 commit comments