diff --git a/ramanujantools/asymptotics/__init__.py b/ramanujantools/asymptotics/__init__.py new file mode 100644 index 0000000..166a5b8 --- /dev/null +++ b/ramanujantools/asymptotics/__init__.py @@ -0,0 +1,10 @@ +from .series_matrix import SeriesMatrix +from .growth_rate import GrowthRate +from .reducer import PrecisionExhaustedError, Reducer + +__all__ = [ + "PrecisionExhaustedError", + "GrowthRate", + "SeriesMatrix", + "Reducer", +] diff --git a/ramanujantools/asymptotics/growth_rate.py b/ramanujantools/asymptotics/growth_rate.py new file mode 100644 index 0000000..9876c73 --- /dev/null +++ b/ramanujantools/asymptotics/growth_rate.py @@ -0,0 +1,235 @@ +from __future__ import annotations + +import sympy as sp +from sympy.abc import t, n + +from functools import total_ordering + + +@total_ordering +class GrowthRate: + r""" + Represents the formal asymptotic growth rate of a solution to a linear difference equation. + + The asymptotic behavior is captured by the Birkhoff-Trjitzinsky formal series representation, + which defines a canonical basis solution $E(n)$ as: + $$E(n) = (n!)^{d} \cdot \lambda^n \cdot e^{Q(n)} \cdot n^{D} \cdot (\log n)^{m}$$ + + This class mathematically isolates the distinct orders of infinity (the exponents and bases) + into a structured object. It acts as an element in a Tropical Semiring, where addition (`+`) + filters for the strictly dominant growth rate, and multiplication (`*`) algebraically + combines the formal exponents. + + By default, an uninitialized `GrowthRate` represents the additive identity (a "zero" or + "dead" growth), as $\lambda = 0$ collapses the entire expression to zero. + + Args: + factorial_power: The exponent $d$ applied to the factorial term $(n!)^d$. + Strictly dominates all other growth components. + exp_base: The base $\lambda$ of the primary exponential growth $\lambda^n$. + Evaluated by its absolute magnitude. A value of $0$ nullifies the entire solution. + sub_exp: The fractional exponent $Q(n)$ applied to $e^{Q(n)}$. + Must be strictly sub-linear (e.g., contains fractional powers like $n^{1/p}$). + polynomial_degree: The exponent $D$ applied to the polynomial term $n^D$. + Can be a fractional rational shift introduced by gauge transformations. + log_power: The exponent $m$ applied to the logarithmic term $(\log n)^m$. + Typically represents the Jordan block depth of degenerate eigenvalues. + """ + + def __init__( + self, + factorial_power: sp.Rational = sp.S.Zero, + exp_base: sp.Expr = sp.S.Zero, + sub_exp: sp.Expr = sp.S.Zero, + polynomial_degree: sp.Expr = sp.S.Zero, + log_power: sp.Rational = sp.S.Zero, + ): + self.factorial_power: sp.Expr = sp.S(factorial_power) + self.exp_base: sp.Expr = sp.S(exp_base) + self.sub_exp: sp.Expr = sp.S(sub_exp) + self.polynomial_degree: sp.Expr = sp.S(polynomial_degree) + self.log_power: sp.Expr = sp.S(log_power) + + if not self.factorial_power.is_number: + raise ValueError("Factorial power must be an integer.") + + if not self.log_power.is_number: + raise ValueError("Factorial power must be an integer.") + + syms = ( + self.exp_base.free_symbols + | self.sub_exp.free_symbols + | self.polynomial_degree.free_symbols + ) + + if not syms.issubset({n}): + raise ValueError( + "Only 'n' can be a free symbol in the growth rate components." + ) + + def __add__(self, other: GrowthRate) -> GrowthRate: + """Addition acts as a max() filter, keeping only the dominant GrowthRate.""" + if not isinstance(other, GrowthRate): + return NotImplemented + return self if self > other else other + + def __radd__(self, other: GrowthRate) -> GrowthRate: + return self.__add__(other) + + def __mul__(self, other: GrowthRate) -> GrowthRate: + """Strictly combines two GrowthRates by adding their formal exponents.""" + if not isinstance(other, GrowthRate): + return NotImplemented + + return GrowthRate( + factorial_power=sp.simplify(self.factorial_power + other.factorial_power), + exp_base=sp.simplify(self.exp_base * other.exp_base), + sub_exp=sp.simplify(self.sub_exp + other.sub_exp), + polynomial_degree=sp.simplify( + self.polynomial_degree + other.polynomial_degree + ), + log_power=self.log_power + other.log_power, + ) + + def __rmul__(self, other: GrowthRate) -> GrowthRate: + return self.__mul__(other) + + def __eq__(self, other: GrowthRate) -> bool: + if not isinstance(other, GrowthRate): + return NotImplemented + + return ( + self.factorial_power == other.factorial_power + and self.exp_base == other.exp_base + and self.sub_exp == other.sub_exp + and self.polynomial_degree == other.polynomial_degree + and self.log_power == other.log_power + ) + + def __gt__(self, other: GrowthRate) -> bool: + if not isinstance(other, GrowthRate): + return NotImplemented + + n_real = sp.Symbol(n.name, real=True, positive=True) + + def is_greater(a, b): + diff = sp.simplify(a - b) + if diff.is_zero: + return None + + diff_real = diff.subs(n, n_real) + diff_re = sp.re(diff_real) + + lim = sp.limit(diff_re, n_real, sp.oo) + + if lim == sp.oo or lim.is_positive: + return True + if lim == -sp.oo or lim.is_negative: + return False + + if lim.is_number: + try: + val = float(lim.evalf()) + if val > 0: + return True + if val < 0: + return False + except TypeError: + pass + + return None + + cmp_d = is_greater(self.factorial_power, other.factorial_power) + if cmp_d is not None: + return cmp_d + + cmp_lam = is_greater(sp.Abs(self.exp_base), sp.Abs(other.exp_base)) + if cmp_lam is not None: + return cmp_lam + + cmp_Q = is_greater(self.sub_exp, other.sub_exp) + if cmp_Q is not None: + return cmp_Q + + cmp_D = is_greater(self.polynomial_degree, other.polynomial_degree) + if cmp_D is not None: + return cmp_D + + cmp_log = is_greater(self.log_power, other.log_power) + if cmp_log is not None: + return cmp_log + + return sp.default_sort_key(self.as_expr(n)) > sp.default_sort_key( + other.as_expr(n) + ) + + def __repr__(self) -> str: + return ( + f"GrowthRate(factorial_power={self.factorial_power}, exp_base={self.exp_base}, " + f"sub_exp={self.sub_exp}, polynomial_degree={self.polynomial_degree}, log_power={self.log_power})" + ) + + def __str__(self) -> str: + return str(self.as_expr(sp.Symbol("n"))) + + def as_expr(self, n: sp.Symbol) -> sp.Expr: + """Renders the formal growth as a SymPy expression.""" + expr = ( + (sp.factorial(n) ** self.factorial_power) + * (self.exp_base**n if self.exp_base != 0 else 0) + * sp.exp(self.sub_exp) + * (n**self.polynomial_degree) + * sp.log(n) ** self.log_power + ) + return sp.simplify(expr).rewrite(sp.factorial) + + def simplify(self) -> GrowthRate: + """Returns a new GrowthRate with all components simplified.""" + return GrowthRate( + factorial_power=sp.simplify(self.factorial_power), + exp_base=sp.simplify(self.exp_base), + sub_exp=sp.simplify(self.sub_exp), + polynomial_degree=sp.simplify(self.polynomial_degree), + log_power=self.log_power, + ) + + @classmethod + def from_taylor_coefficients( + cls, coeffs: list[sp.Expr], p: int, log_power: int = 0, factorial_power: int = 0 + ) -> GrowthRate: + """ + Calculates the exact asymptotic bounds of a formal product by extracting + the sub-exponential and polynomial degrees via a logarithmic Maclaurin expansion. + """ + exp_base = sp.cancel(sp.expand(coeffs[0])) + if exp_base == sp.S.Zero: + return cls(exp_base=sp.S.Zero) + + precision = len(coeffs) + + x = sum( + (coeffs[k] / exp_base) * (t**k) for k in range(1, min(precision, p + 1)) + ) + + # Maclaurin series of ln(1+x) up to O(t^(p+1)) + log_series = sp.expand( + sum(((-1) ** (j + 1) / sp.Rational(j)) * (x**j) for j in range(1, p + 1)) + ) + sub_exp, poly_deg = sp.S.Zero, sp.S.Zero + + for k in range(1, p + 1): + c_k = sp.cancel(sp.expand(log_series.coeff(t, k))) + if c_k != sp.S.Zero: + if k < p: + power = 1 - sp.Rational(k, p) + sub_exp += (c_k / power) * (n**power) + else: + poly_deg = c_k + + return cls( + exp_base=exp_base, + sub_exp=sub_exp, + polynomial_degree=poly_deg, + log_power=log_power, + factorial_power=factorial_power, + ) diff --git a/ramanujantools/asymptotics/growth_rate_test.py b/ramanujantools/asymptotics/growth_rate_test.py new file mode 100644 index 0000000..65a9215 --- /dev/null +++ b/ramanujantools/asymptotics/growth_rate_test.py @@ -0,0 +1,144 @@ +import pytest + +import sympy as sp +from sympy.abc import n + +from ramanujantools.asymptotics.growth_rate import GrowthRate + + +def test_equality_type_gate(): + growth_rate = GrowthRate() + assert growth_rate is not None + + assert growth_rate.__eq__(1) == NotImplemented + assert growth_rate.__eq__("Some String") == NotImplemented + + +def test_tropical_dot_product_simulation(): + """Verify the combined workflow used in the final U_inv * CFM matrix multiplication.""" + g_base = GrowthRate(polynomial_degree=2, exp_base=5) + g_shift = GrowthRate(polynomial_degree=7, exp_base=1) + + # The Tropical Dot Product + result = g_base * g_shift + + assert result.polynomial_degree == 9 + assert result.exp_base == 5 + + +def test_simplification(): + """Equality must survive mathematically identical but unsimplified expressions.""" + growth_rate = GrowthRate( + polynomial_degree=n**2 - n**2, + sub_exp=sp.expand((n + 1) ** 2 - n**2 - 2 * n - 1), + ) + + assert GrowthRate(polynomial_degree=0) == growth_rate.simplify() + + +def test_add_type_gate(): + g = GrowthRate() + assert g.__add__(3) == NotImplemented + + +def test_add_max_filter(): + """Addition must act as a strict max() gatekeeper using __gt__.""" + dominant = GrowthRate(factorial_power=2) + weak = GrowthRate(factorial_power=1) + + assert dominant + weak == dominant + assert weak + dominant == dominant + + +def test_add_zero_passthrough(): + """Addition with 0 or None must pass the object through untouched.""" + growth_rate = GrowthRate(factorial_power=2, exp_base=3) + assert growth_rate + GrowthRate() == growth_rate + assert GrowthRate() + growth_rate == growth_rate + + +def test_mul_type_gate(): + g = GrowthRate() + assert g.__mul__(n**2) == NotImplemented + + +def test_mul_growth_combination(): + """Multiplication of two GrowthRates must cleanly combine their formal exponents.""" + g1 = GrowthRate( + factorial_power=1, + exp_base=2, + sub_exp=sp.sqrt(n), + polynomial_degree=2, + log_power=1, + ) + g2 = GrowthRate( + factorial_power=2, exp_base=3, sub_exp=n, polynomial_degree=3, log_power=2 + ) + + result = g1 * g2 + + assert result.factorial_power == 3 + assert result.exp_base == 6 + assert sp.simplify(result.sub_exp - (sp.sqrt(n) + n)) == 0 + assert result.polynomial_degree == 5 + assert result.log_power == 3 + + +def test_gt_level_1_factorial(): + """factorial_power (Factorial) strictly dominates all other bounds.""" + g1 = GrowthRate(factorial_power=2, exp_base=1, polynomial_degree=-100) + g2 = GrowthRate(factorial_power=1, exp_base=1000, polynomial_degree=100) + + assert g1 > g2 + assert not (g2 > g1) + + +def test_gt_level_2_base_exponential(): + """exp_base (Base Exp) strictly dominates sub_exp (Fractional Exp) and polynomials.""" + # g2 has a larger base lambda, so it dominates g1 despite g1's massive fractional sub_exp + g1 = GrowthRate(exp_base=1, sub_exp=1000 * sp.sqrt(n)) + g2 = GrowthRate(exp_base=2, sub_exp=0) + + assert g2 > g1 + + # Complex magnitude check: |2i| = 2 > |1| + g3 = GrowthRate(exp_base=sp.I * 2) + g4 = GrowthRate(exp_base=1) + assert g3 > g4 + + +def test_gt_level_3_fractional_exponential(): + """sub_exp (Fractional Exp) dominates polynomials.""" + # Since lambda is tied at 1, sub_exp triggers + g1 = GrowthRate(exp_base=1, sub_exp=sp.sqrt(n), polynomial_degree=0) + g2 = GrowthRate(exp_base=1, sub_exp=0, polynomial_degree=1000) + + assert g1 > g2 + + +def test_gt_level_4_polynomial(): + """polynomial_degree (Polynomial) dominates logarithmic Jordan depth.""" + # Since lambda and sub_exp are tied, polynomial_degree triggers + g1 = GrowthRate(polynomial_degree=sp.Rational(3, 2), log_power=0) + g2 = GrowthRate(polynomial_degree=1, log_power=10) + + assert g1 > g2 + + +def test_gt_level_5_logarithmic(): + """Jordan depth acts as the final logarithmic tie-breaker.""" + g1 = GrowthRate(log_power=2) + g2 = GrowthRate(log_power=1) + + assert g1 > g2 + + +def test_gt_complex_oscillation_fallthrough(): + """ + Pure imaginary terms in sub_exp (oscillation) have a real limit of 0. + The > operator must recognize the tie and fall through to the next level. + """ + g1 = GrowthRate(sub_exp=sp.I * n, polynomial_degree=2) + g2 = GrowthRate(sub_exp=0, polynomial_degree=1) + + assert g1 > g2 diff --git a/ramanujantools/asymptotics/reducer.py b/ramanujantools/asymptotics/reducer.py new file mode 100644 index 0000000..ac19fc8 --- /dev/null +++ b/ramanujantools/asymptotics/reducer.py @@ -0,0 +1,489 @@ +from __future__ import annotations + +import logging +from functools import lru_cache + +import sympy as sp +from sympy.abc import t, n + +from ramanujantools import Matrix +from ramanujantools.asymptotics import GrowthRate, SeriesMatrix + +logger = logging.getLogger(__name__) +logger.addHandler(logging.NullHandler()) + + +class PrecisionExhaustedError(Exception): + pass + + +class Reducer: + """ + canonical fundamental matrix for linear difference systems. + Implements the Birkhoff-Trjitzinsky algorithm to compute the formal + + Sources: + - Analytic Theory of Singular Difference Equations: George D Birkhoff and Waldemar J Trjitzinsky + - Resurrecting the Asymptotics of Linear Recurrences: Jet Wimp and Doron Zeilberger + - Galois theory of difference equations, chapter 7.2: Marius van der Put and Michael Singer + """ + + def __init__( + self, + series: SeriesMatrix, + factorial_power: int, + precision: int, + p: int, + ) -> None: + """ + Initializes the Reducer with a pre-conditioned formal power series. + """ + self.M = series + self.factorial_power = factorial_power + self.precision = precision + self.p = p + self.dim = series.coeffs[0].shape[0] + self.S_total = SeriesMatrix( + [Matrix.eye(self.dim)], p=self.p, precision=self.precision + ) + self._is_reduced = False + self.children = [] + + @staticmethod + def _solve_sylvester(A: Matrix, B: Matrix, C: Matrix) -> Matrix: + """Solves the Sylvester equation: A*X - X*B = C for X using Kronecker flattening.""" + m, n = A.shape[0], B.shape[0] + sys_mat, C_vec = Matrix.zeros(m * n, m * n), Matrix.zeros(m * n, 1) + + for j in range(n): + for i in range(m): + row_idx = j * m + i + C_vec[row_idx, 0] = C[i, j] + for k in range(m): + sys_mat[row_idx, j * m + k] += A[i, k] + for k in range(n): + sys_mat[row_idx, k * m + i] -= B[k, j] + + vec_X = sys_mat.LUsolve(C_vec) + + X = Matrix.zeros(m, n) + for j in range(n): + for i in range(m): + X[i, j] = vec_X[j * m + i, 0] + return X + + def _get_blocks(self, J_target: Matrix) -> list[tuple[int, int, sp.Expr]]: + """Finds the boundaries (start_idx, end_idx, eigenvalue) of independent blocks.""" + blocks = [] + if self.dim == 0: + return blocks + current_eval, start_idx = J_target[0, 0], 0 + + for i in range(1, self.dim): + if J_target[i, i] != current_eval: + blocks.append((start_idx, i, current_eval)) + current_eval, start_idx = J_target[i, i], i + + blocks.append((start_idx, self.dim, current_eval)) + return blocks + + def reduce(self) -> Reducer: + """ + The core Birkhoff-Trjitzinsky reduction loop. + Iteratively applies block diagonalization (split) or ramified shears + until the matrix reaches a terminal canonical form. + """ + iterations = 0 + zeros_shifted = 0 + + while not self._is_reduced and iterations < self.dim * self.precision: + M0 = self.M.coeffs[0] + + if M0.is_zero_matrix: + if zeros_shifted >= self.precision: + raise ValueError( + f"Series exhausted after {zeros_shifted} shifts. Precision too low." + ) + + self.M = self.M.divide_by_t() + self.factorial_power -= sp.Rational(1, self.p) + zeros_shifted += 1 + iterations += 1 + continue + + k_target = self.M.get_first_non_scalar_index() + if k_target is None: + self._is_reduced = True + break + + M_target = self.M.coeffs[k_target] + + logger.debug(f"REDUCE: {self.dim=} | {self.precision=} | {k_target=}") + logger.debug(f"REDUCE: at {k_target=}: {M_target=}") + logger.debug( + f"REDUCE: Eigenvalues of M_target: {list(M_target.eigenvals().keys())}" + ) + + P, J_target = M_target.jordan_form() + self.S_total = self.S_total * SeriesMatrix( + [P], p=self.p, precision=self.S_total.precision + ) + self.M = self.M.similarity_transform(P, J_target if k_target == 0 else None) + unique_evals = list( + dict.fromkeys([J_target[i, i] for i in range(self.dim)]) + ) + + if len(unique_evals) > 1: + self.split(k_target, J_target) + blocks = self._get_blocks(J_target) + for s_idx, e_idx, _ in blocks: + sub_coeffs = [ + self.M.coeffs[k][s_idx:e_idx, s_idx:e_idx] + for k in range(self.precision) + ] + sub_series = SeriesMatrix( + sub_coeffs, p=self.p, precision=self.precision + ) + sub_reducer = Reducer( + series=sub_series, + factorial_power=self.factorial_power, + precision=self.precision, + p=self.p, + ) + sub_reducer.reduce() + self.children.append(sub_reducer) + + self._is_reduced = True + return self + else: + self.shear() + + iterations += 1 + + if not self._is_reduced: + raise RuntimeError("Failed to reach canonical form within iteration limit.") + return self + + def split(self, k_target: int, J_target: Matrix) -> None: + """ + Performs block diagonalization. + When a leading coefficient matrix has distinct eigenvalues, this method uses + Sylvester equations to decouple the system into independent smaller Jordan blocks. + """ + dim, blocks = self.dim, self._get_blocks(J_target) + + self._check_split_truncation(blocks) + + for m in range(1, self.precision - k_target): + R_k, Y_mat, needs_gauge = ( + self.M.coeffs[k_target + m], + Matrix.zeros(dim, dim), + False, + ) + + for s_i, e_i, eval_i in blocks: + J_ii = J_target[s_i:e_i, s_i:e_i] + for s_j, e_j, eval_j in blocks: + if eval_i == eval_j: + continue + + J_jj, R_ij = J_target[s_j:e_j, s_j:e_j], R_k[s_i:e_i, s_j:e_j] + + if not R_ij.is_zero_matrix: + needs_gauge = True + Y_ij = self._solve_sylvester(J_ii, J_jj, -R_ij) + + for r in range(e_i - s_i): + for c in range(e_j - s_j): + Y_mat[s_i + r, s_j + c] = Y_ij[r, c] + + if needs_gauge: + Y_mat = Y_mat.applyfunc(lambda x: sp.cancel(sp.radsimp(sp.cancel(x)))) + + logger.debug(f"SPLIT: Solving Sylvester at m={m} ...") + logger.debug(f"SPLIT: {J_ii=}") + logger.debug(f"SPLIT: {J_jj=}") + logger.debug(f"SPLIT: {R_ij=}") + logger.debug(f"SPLIT: {Y_ij=}") + + padded_G_short = ( + [Matrix.eye(dim)] + [Matrix.zeros(dim, dim)] * (m - 1) + [Y_mat] + ) + padded_G_short += [Matrix.zeros(dim, dim)] * ( + self.precision - len(padded_G_short) + ) + G_short = SeriesMatrix( + padded_G_short, p=self.p, precision=self.precision + ) + self.M = self.M.coboundary(G_short) + + def shear(self) -> None: + """ + Applies a ramification and shear transformation. + Used when the leading matrix is nilpotent, this shifts the polynomial degrees + of the variables to expose the hidden sub-exponential growths. + """ + slope = self._compute_shear_slope() + + if slope == sp.S.Zero: + self._check_eigenvalue_blindness(self.M.coeffs[0][0, 0]) + self._is_reduced = True + return + + shift, ramification = slope.as_numer_denom() + self.M, self.S_total = ( + self.M.ramify(ramification), + self.S_total.ramify(ramification), + ) + self.p *= ramification + self.precision *= ramification + + true_valid_precision, max_shift = self._check_shear_truncation(shift) + + logger.debug(f"SHEAR: Computed shift {shift=}. Max shift: {max_shift=} terms.") + if max_shift > 0: + padded_coeffs = ( + self.S_total.coeffs + [Matrix.zeros(self.dim, self.dim)] * max_shift + ) + self.S_total = SeriesMatrix( + padded_coeffs, p=self.p, precision=self.S_total.precision + max_shift + ) + + logger.debug( + f"SHEAR: S_total array capacity: {self.S_total.precision=}. " + f"Remaining buffer: {self.S_total.precision - max_shift}" + ) + + S_sym = Matrix.diag(*[t ** (i * shift) for i in range(self.dim)]) + S_series = SeriesMatrix.from_matrix(S_sym, n, self.p, self.S_total.precision) + + self.S_total = self.S_total * S_series + + self.M, h = self.M.shear_coboundary(shift, true_valid_precision) + self.precision = true_valid_precision + + if h != 0: + self.factorial_power += sp.Rational(h, self.p) + + def _compute_shear_slope(self) -> sp.Rational: + """ + Calculates the steepest valid slope for a shear transformation by constructing + a Newton Polygon from the valuation matrix of the shifted series. + """ + exp_base = self.M.coeffs[0][0, 0] + shifted_series = self.M.shift_leading_eigenvalue(exp_base) + vals = shifted_series.valuations() + + points = [] + for i in range(self.dim): + for j in range(self.dim): + v = vals[i, j] + if v != sp.oo: + points.append((j - i, v)) + + logger.debug(f"NEWTON: {self.dim=} | {self.precision=}") + + lowest_points = {} + for x, y in points: + if x not in lowest_points or y < lowest_points[x]: + lowest_points[x] = y + + sorted_x = sorted(lowest_points.keys()) + hull_points = [(x, lowest_points[x]) for x in sorted_x] + + lower_hull = [] + for p in hull_points: + while len(lower_hull) >= 2: + p1 = lower_hull[-2] + p2 = lower_hull[-1] + p3 = p + + slope1 = sp.Rational(p2[1] - p1[1], p2[0] - p1[0]) + slope2 = sp.Rational(p3[1] - p2[1], p3[0] - p2[0]) + + if slope2 <= slope1: + lower_hull.pop() + else: + break + lower_hull.append(p) + + if len(lower_hull) < 2: + return sp.S.Zero + + p1, p2 = lower_hull[0], lower_hull[1] + steepest_slope = sp.Rational(p2[1] - p1[1], p2[0] - p1[0]) + slope = -steepest_slope + + logger.debug(f"NEWTON: Lower hull points: {lower_hull}") + logger.debug(f"NEWTON: Computed {slope=}") + return max(sp.S.Zero, slope) + + def _check_eigenvalue_blindness(self, exp_base: sp.Expr) -> None: + """ + Detects if the matrix is completely nilpotent at the current precision. + """ + if exp_base == sp.S.Zero: + raise PrecisionExhaustedError( + "Zero Eigenvalue Drop! System is completely nilpotent at current precision.", + ) + + def _check_split_truncation(self, blocks: list[tuple[int, int, sp.Expr]]) -> None: + """ + Checks if the current precision is sufficient to solve the Sylvester equations + required for block decoupling. Raises PrecisionExhaustedError if starved. + """ + max_sub_dim = max((e - s) for s, e, _ in blocks) + buffer_needed = 0 if max_sub_dim == 1 else (max_sub_dim * max_sub_dim) + needed_precision = self.p + 1 + buffer_needed + + if self.precision < needed_precision: + raise PrecisionExhaustedError( + "Split decoupling has insufficient precision!" + ) + + def _check_shear_truncation(self, g: sp.Rational | int) -> tuple[int, int]: + """ + Calculates the matrix shift caused by a shear and checks if it exceeds + available precision. Raises PrecisionExhaustedError if the engine starves. + Returns: + tuple[int, int]: (true_valid_precision, max_shift) + """ + max_shift = int(sp.ceiling((self.dim - 1) * g)) + true_valid_precision = self.precision - max_shift + + if true_valid_precision <= 0: + raise PrecisionExhaustedError("Shear shifted matrix out of bounds!") + + return true_valid_precision, max_shift + + def _check_cfm_validity(self, grid: list[list["GrowthRate"]]) -> None: + """ + Checks that no physical variable can completely vanish. + If an entire row is 0, a critical coupling term was starved of precision. + """ + for row in range(self.dim): + # A cell is algebraically zero if its base eigenvalue (exp_base) is 0 + if all(cell.exp_base == sp.S.Zero for cell in grid[row]): + raise PrecisionExhaustedError( + f"Row Nullity Violation! Physical variable at row {row} vanished completely.", + ) + + def asymptotic_growth(self) -> list[GrowthRate]: + """ + Extracts the raw, unmapped asymptotic components of the internal canonical basis. + """ + if not self._is_reduced: + self.reduce() + + if self.children: + return [sol for child in self.children for sol in child.asymptotic_growth()] + + growths, log_power = [], 0 + + for i in range(self.dim): + exp_base = self.M.coeffs[0][i, i] + self._check_eigenvalue_blindness(exp_base) + + if i > 0 and exp_base == self.M.coeffs[0][i - 1, i - 1]: + is_jordan_link = any( + self.M.coeffs[k][i - 1, i] != sp.S.Zero + for k in range(self.precision) + ) + log_power = log_power + 1 if is_jordan_link else 0 + else: + log_power = 0 + + # Isolate the diagonal Taylor coefficients for this variable + diag_coeffs = [self.M.coeffs[k][i, i] for k in range(self.precision)] + + # Let GrowthRate parse its own math! + growths.append( + GrowthRate.from_taylor_coefficients( + coeffs=diag_coeffs, + p=self.p, + log_power=log_power, + factorial_power=self.factorial_power, + ) + ) + + return growths + + def canonical_growth_matrix(self) -> list[list[GrowthRate]]: + r""" + Constructs the 2D Canonical Fundamental Matrix (CFM) using the internal algebra + of `GrowthRate` objects. + + This method maps the raw, 1D independent asymptotic solutions back into the + physical coordinates of the original system by applying the accumulated + gauge transformations $S_{\text{total}}(t)$. + + Mathematically, it computes the dominant asymptotic term for each cell in: + $$Y(n) = S_{\text{total}}(n^{-1/p}) \cdot \text{diag}(E_1(n), \dots, E_N(n))$$ + + For a specific physical variable (row) and independent solution (column), the + gauge matrix $S_{\text{total}}$ shifts the polynomial degree of the solution based + on its first non-zero Taylor coefficient at index $k$. The exact fractional shift + applied to the polynomial degree is: + $$\Delta D = -\frac{k}{p} - d$$ + where $p$ is the ramification index and $d$ is the global factorial power. + + Returns: + A 2D list of `GrowthRate` objects. Each column $j$ represents an independent + solution vector, and each row $i$ represents the asymptotic behavior of the + $i$-th physical variable for that solution. + """ + if not self._is_reduced: + self.reduce() + + base_grid = [[GrowthRate() for _ in range(self.dim)] for _ in range(self.dim)] + if self.children: + offset = 0 + for child in self.children: + c_grid = child.canonical_growth_matrix() + for r, row in enumerate(c_grid): + for c, val in enumerate(row): + base_grid[offset + r][offset + c] = val + offset += child.dim + else: + for i, growth in enumerate(self.asymptotic_growth()): + base_grid[i][i] = growth + + shift_matrix = [ + [GrowthRate() for _ in range(self.dim)] for _ in range(self.dim) + ] + for r in range(self.dim): + for c in range(self.dim): + for k, mat in enumerate(self.S_total.coeffs): + if mat[r, c] != sp.S.Zero: + shift_matrix[r][c] = GrowthRate( + exp_base=sp.S.One, + polynomial_degree=-sp.Rational(k, self.p), + ) + break + + final_grid = [ + [ + sum( + (shift_matrix[row][k] * base_grid[k][col] for k in range(self.dim)), + start=GrowthRate(), + ) + for col in range(self.dim) + ] + for row in range(self.dim) + ] + + sorted_indices = sorted( + range(self.dim), key=lambda c: final_grid[-1][c], reverse=True + ) + final_grid = [[row[c] for c in sorted_indices] for row in final_grid] + + self._check_cfm_validity(final_grid) + return final_grid + + def canonical_fundamental_matrix(self) -> Matrix: + """ + Converts the smart GrowthRate grid into a formal SymPy Matrix of expressions. + This provides the final, human-readable fundamental solution set. + """ + growth_matrix = self.canonical_growth_matrix() + return Matrix([[c.as_expr(n) for c in r] for r in growth_matrix]) diff --git a/ramanujantools/asymptotics/reducer_test.py b/ramanujantools/asymptotics/reducer_test.py new file mode 100644 index 0000000..0c335aa --- /dev/null +++ b/ramanujantools/asymptotics/reducer_test.py @@ -0,0 +1,227 @@ +import pytest +import sympy as sp +from sympy.abc import n + +from ramanujantools import Matrix +from ramanujantools.asymptotics import GrowthRate, PrecisionExhaustedError, Reducer +from ramanujantools.asymptotics.series_matrix import SeriesMatrix + + +def asymptotic_expressions(asymptotic_growth: list[GrowthRate]) -> list[sp.Expr]: + return [g.as_expr(n) if g is not None else sp.S.Zero for g in asymptotic_growth] + + +def get_reducer(matrix: Matrix, precision: int = 5) -> Reducer: + var = n + degrees = [d for d in matrix.degrees(var) if d != -sp.oo] + factorial_power = max(degrees) if degrees else 0 + + normalized = matrix / (var**factorial_power) + series = SeriesMatrix.from_matrix(normalized, var=n, p=1, precision=precision) + + return Reducer( + series=series, + factorial_power=factorial_power, + precision=precision, + p=1, + ) + + +def test_fibonacci(): + M = Matrix([[0, 1], [1, 1]]) + + exprs = asymptotic_expressions(get_reducer(M).asymptotic_growth()) + + expected_exprs = [ + (sp.Rational(1, 2) + sp.sqrt(5) / 2) ** n, + (sp.Rational(1, 2) - sp.sqrt(5) / 2) ** n, + ] + + assert [sp.simplify(e) for e in exprs] == expected_exprs + + +def test_tribonacci(): + R = (sp.sqrt(33) / 9 + sp.Rational(19, 27)) ** sp.Rational(1, 3) + c1 = sp.Rational(-1, 2) - sp.sqrt(3) * sp.I / 2 + c2 = sp.Rational(-1, 2) + sp.sqrt(3) * sp.I / 2 + + expected_bases = [ + sp.Rational(1, 3) + 4 / (9 * R) + R, + sp.Rational(1, 3) + c1 * R + 4 / (9 * c1 * R), + sp.Rational(1, 3) + 4 / (9 * c2 * R) + c2 * R, + ] + + M = Matrix([[0, 0, 1], [1, 0, 1], [0, 1, 1]]) + + growths = get_reducer(M).asymptotic_growth() + assert len(growths) == 3 + + actual_bases = [g.exp_base for g in growths] + + for expected, actual in zip(expected_bases, actual_bases): + assert abs(sp.N(expected - actual, 50)) < 1e-40, ( + f"Expected {expected}, got {actual}" + ) + + +def test_exponential_separation(): + expected_lambda = Matrix.diag(4, 2) + expected_D = Matrix.diag(5, 3) + + M_canonical = expected_lambda * (Matrix.eye(2) + expected_D / n) + + U = Matrix.eye(2) + Matrix([[1, -2], [3, 1]]) / n + M = M_canonical.coboundary(U) + + exprs = asymptotic_expressions(get_reducer(M, precision=5).asymptotic_growth()) + + expected_exprs = [4**n * n**5, 2**n * n**3] + + assert [sp.simplify(e) for e in exprs] == expected_exprs + + +def test_newton_polygon_separation(): + expected_canonical = Matrix([[4 * (1 + 1 / n) ** 3, 0], [0, 2 * (1 + 1 / n) ** 1]]) + U = Matrix([[1, n], [0, 1]]) + M = expected_canonical.coboundary(U) + + exprs = asymptotic_expressions(get_reducer(M, precision=5).asymptotic_growth()) + + assert len(exprs) == 2 + + assert 4**n in sp.simplify(exprs[0]).atoms(sp.Pow) + assert 2**n in sp.simplify(exprs[1]).atoms(sp.Pow) + + +def test_ramified_scalar_peeling_no_block_degeneracy(): + """ + Triggers a Jordan block, hits the Identity Trap, uses Scalar Peeling + to find distinct roots at M_1, and extracts them perfectly. + """ + M = Matrix([[0, -(n - 1) / n], [1, 2]]) + exprs = asymptotic_expressions( + get_reducer(M.transpose(), precision=4).asymptotic_growth() + ) + + expected_exprs = [ + sp.exp(-2 * sp.sqrt(n)) * n ** sp.Rational(-1, 4), + sp.exp(2 * sp.sqrt(n)) * n ** sp.Rational(-1, 4), + ] + + assert [sp.simplify(e) for e in exprs] == expected_exprs + + +@pytest.mark.parametrize( + "U", + [ + Matrix.eye(2), + Matrix([[2, 0], [0, 5]]), + Matrix([[0, 1], [1, 0]]), + Matrix([[1, 1 / n], [0, 1]]), + Matrix([[1, 0], [1 / (n**2), 1]]), + ], +) +def test_gauge_invariance(U): + M = Matrix([[0, -(n - 1) / n], [1, 2]]) + + original_exprs = asymptotic_expressions(get_reducer(M).asymptotic_growth()) + transformed_exprs = asymptotic_expressions( + get_reducer(M.coboundary(U)).asymptotic_growth() + ) + + assert [sp.simplify(e) for e in original_exprs] == [ + sp.simplify(e) for e in transformed_exprs + ], f"Invariance failed for gauge U = {U}" + + +def test_nilpotent_ghost(): + m = Matrix([[0, 1], [0, 0]]) + precision = 3 + + # Must explicitly trigger the Blindness Radar + with pytest.raises(PrecisionExhaustedError): + get_reducer(m.transpose(), precision=precision).reduce() + + +def test_row_nullity(): + """ + Because the reduction strictly multiplies invertible matrices, an entire row + can only vanish if the final extraction process is starved or the input is invalid. + We unit test the radar directly to ensure it guards the exit. + """ + m = Matrix([[1, 0], [0, 1]]) # Dummy valid matrix + reducer = get_reducer(m, precision=5) + + # Craft a physically impossible CFM where Variable 1 has completely vanished + broken_cfm = [ + [GrowthRate(polynomial_degree=2), GrowthRate(polynomial_degree=3)], + [GrowthRate(), GrowthRate()], + ] + + with pytest.raises(PrecisionExhaustedError): + reducer._check_cfm_validity(broken_cfm) + + +def test_input_truncation(): + """ + Tests if the strict boundary alarms catch an aggressive sub-diagonal shear starvation. + The term n^-2 produces g=2. For a 5x5, max_shift = 8. + At precision=3, this organically starves the matrix on iteration 1. + """ + m = Matrix( + [ + [1, 1, 0, 0, 0], + [n**-2, 1, 1, 0, 0], + [0, 0, 1, 1, 0], + [0, 0, 0, 1, 1], + [0, 0, 0, 0, 1], + ] + ) + + with pytest.raises(PrecisionExhaustedError): + get_reducer(m, precision=3).reduce() + + +def test_ramification_exact_expressions(): + """ + Tests that a system requiring fractional powers successfully triggers + ramification and extracts the sub-exponential roots. + """ + M = Matrix([[0, 1], [1 / n, 0]]) + exprs = asymptotic_expressions(get_reducer(M, precision=4).asymptotic_growth()) + + expected_exprs = [ + (-1) ** n * n ** sp.Rational(1, 4) / sp.sqrt(sp.factorial(n)), + n ** sp.Rational(1, 4) / sp.sqrt(sp.factorial(n)), + ] + + assert exprs == expected_exprs + + +def test_ramification_structural_mechanics(): + m = Matrix([[0, 1, 0], [0, 0, 1], [n**2, 0, 0]]) + # f(n) = n^2 * a_(n-3) + + precision = 10 + reducer = get_reducer(m.transpose(), precision=precision) + grid = reducer.canonical_growth_matrix() + + # 1. Verify internal engine state mapped the branch correctly + assert reducer.p == 3 + + # 2. Strict Mathematical Validation (No string parsing needed anymore!) + # We just inspect the strongly-typed GrowthRate objects in the grid. + found_fractional_power = False + + for row in grid: + for cell in row: + if ( + isinstance(cell.polynomial_degree, sp.Rational) + and cell.polynomial_degree.q == 3 + ): + found_fractional_power = True + break + + assert found_fractional_power, ( + "Failed to mathematically verify fractional ramification powers in the GrowthRate objects." + ) diff --git a/ramanujantools/asymptotics/series_matrix.py b/ramanujantools/asymptotics/series_matrix.py new file mode 100644 index 0000000..3d07827 --- /dev/null +++ b/ramanujantools/asymptotics/series_matrix.py @@ -0,0 +1,375 @@ +from __future__ import annotations + +import sympy as sp +from sympy.abc import t, n + +from ramanujantools import Matrix + + +class SeriesMatrix: + r""" + Represents a formal power series (or Puiseux series) with matrix coefficients. + + The series is expanded in terms of a local parameter $t = n^{-1/p}$, taking the form: + $$M(t) = A_0 + A_1 t + A_2 t^2 + \dots + A_k t^k + \mathcal{O}(t^{k+1})$$ + + This class provides the core algebraic ring operations (addition, Cauchy multiplication, + formal inversion) and gauge transformations (shifts, shears, coboundaries) required + for the Birkhoff-Trjitzinsky reduction algorithm. + """ + + def __init__(self, coeffs, p=1, precision=None): + """ + Constructs a SeriesMatrix from a list of matrix coefficients, + with optional ramification `p` such that $t = n^{-1/p}$. + + Args: + coeffs: List of Matrix objects [A_0, A_1, A_2, ...] + p: The ramification index (integer). Default is 1 (t = 1/n). + precision: Maximum number of terms to keep. Defaults to len(coeffs). + """ + self.p = p + self.precision = precision if precision is not None else len(coeffs) + self.shape = coeffs[0].shape if coeffs else (0, 0) + + # Store coefficients up to precision, pad with zero matrices if needed + self.coeffs = [] + for i in range(self.precision): + if i < len(coeffs): + if len(coeffs[i].free_symbols) > 0: + raise ValueError("SeriesMatrix can only receive numeric matrices!") + self.coeffs.append(coeffs[i]) + else: + self.coeffs.append(Matrix.zeros(*self.shape)) + + def __add__(self, other) -> SeriesMatrix: + if self.shape != other.shape or self.p != other.p: + raise ValueError( + "SeriesMatrix dimensions or ramification indices do not match." + ) + new_precision = min(self.precision, other.precision) + new_coeffs = [self.coeffs[i] + other.coeffs[i] for i in range(new_precision)] + return SeriesMatrix(new_coeffs, p=self.p, precision=new_precision) + + def __mul__(self, other: SeriesMatrix) -> SeriesMatrix: + """ + Computes the Cauchy product of two Formal Power Series matrices. + Automatically bounds the result to the lowest precision of the two operands. + """ + if self.shape != other.shape or self.p != other.p: + raise ValueError( + "SeriesMatrix dimensions or ramification indices do not match." + ) + + # Mathematically, the product of O(t^A) and O(t^B) is valid up to O(t^min(A, B)) + out_precision = min(self.precision, other.precision) + new_coeffs = [Matrix.zeros(*self.shape) for _ in range(out_precision)] + + for i in range(out_precision): + if self.coeffs[i].is_zero_matrix: + continue + + for j in range(out_precision - i): + if other.coeffs[j].is_zero_matrix: + continue + + new_coeffs[i + j] += self.coeffs[i] * other.coeffs[j] + + return SeriesMatrix(new_coeffs, p=self.p, precision=out_precision).simplify() + + def inverse(self) -> SeriesMatrix: + r""" + Computes the formal inverse series $V(t) = S(t)^{-1}$. + + By definition, $S(t) \cdot V(t) = I$. Expanding this into a Cauchy product + and equating the coefficients for $t^k$ yields the recurrence relation: + $$\sum_{i=0}^{k} S_i V_{k-i} = 0 \quad \text{for } k > 0$$ + + Isolating the $k$-th coefficient $V_k$ provides the explicit update rule used + by this method: + $$V_k = -S_0^{-1} \sum_{i=1}^{k} S_i V_{k-i}$$ + """ + V_coeffs = [Matrix.zeros(*self.shape) for _ in range(self.precision)] + + V_0 = self.coeffs[0].inv() + V_coeffs[0] = V_0 + + for k in range(1, self.precision): + sum_terms = Matrix.zeros(*self.shape) + for i in range(1, k + 1): + sum_terms += self.coeffs[i] * V_coeffs[k - i] + + # DEFLATE + CRUSH ALGEBRA: Prevent Y^6 from swelling with un-evaluated roots + V_coeffs[k] = (-V_0 * sum_terms).applyfunc( + lambda x: sp.cancel(sp.expand(x)) + ) + + return SeriesMatrix(V_coeffs, p=self.p, precision=self.precision) + + def __repr__(self) -> str: + return ( + f"SeriesMatrix(shape={self.shape}, precision={self.precision}, p={self.p})" + ) + + def __str__(self) -> str: + """Helper to see the series written out symbolically.""" + expr = Matrix.zeros(*self.shape) + for i, coeff in enumerate(self.coeffs): + expr += coeff * (n ** (-sp.Rational(i, self.p))) + return str(expr) + + def shift(self) -> SeriesMatrix: + r""" + Applies the discrete shift operator $n \to n + 1$ to the formal series. + + Given the local parameter $t = n^{-1/p}$, substituting $n+1$ yields the new parameter: + $$t_{\text{new}} = (n + 1)^{-1/p} = (t^{-p} + 1)^{-1/p} = t(1 + t^p)^{-1/p}$$ + + Applying this substitution to the $m$-th term of the series ($A_m t^m$) and expanding + it using the generalized binomial theorem gives: + $$A_m t_{\text{new}}^m = A_m t^m (1 + t^p)^{-m/p} = \sum_{j=0}^{\infty} A_m \binom{-m/p}{j} t^{m + pj}$$ + + This method computes this expansion up to the defined precision and accumulates + the shifted coefficients. + """ + new_coeffs = [Matrix.zeros(*self.shape) for _ in range(self.precision)] + + for m in range(self.precision): + A_m = self.coeffs[m] + if A_m.is_zero_matrix: + continue + + j = 0 + while True: + k = m + self.p * j # The new power of t + if k >= self.precision: + break + + # Generalized binomial coefficient: (-m/p choose j) + binom_coeff = sp.binomial(-sp.Rational(m, self.p), j) + + new_coeffs[k] += A_m * binom_coeff + j += 1 + + return SeriesMatrix(new_coeffs, p=self.p, precision=self.precision) + + def divide_by_t(self) -> SeriesMatrix: + """ + Factors out a power of t from the entire series. + Mathematically equivalent to M(t) / t. This physically shifts all matrix + coefficients one index to the left and pads the tail with a zero matrix. + """ + coeffs = self.coeffs[1:] + [Matrix.zeros(*self.shape)] + return SeriesMatrix(coeffs, p=self.p, precision=self.precision) + + def valuations(self) -> Matrix: + """ + Returns a matrix where each entry (i, j) is the valuation + (the lowest power of t with a non-zero coefficient) of that cell. + Returns sympy.oo (infinity) for cells that are strictly zero. + """ + rows, cols = self.shape + val_matrix = Matrix.zeros(rows, cols) + + for i in range(rows): + for j in range(cols): + val = sp.oo + for k in range(self.precision): + if self.coeffs[k][i, j] != sp.S.Zero: + val = sp.Rational(k) + break + val_matrix[i, j] = val + + return val_matrix + + def similarity_transform(self, P: Matrix, J: Matrix = None) -> SeriesMatrix: + """ + Applies a constant similarity transformation P^{-1} * M(t) * P. + If the series has no tail (is a constant matrix) and J is provided, + it mathematically short-circuits the inversion. + """ + has_tail = any(not C.is_zero_matrix for C in self.coeffs[1:]) + + if not has_tail and J is not None: + new_coeffs = [J] + [ + Matrix.zeros(*self.shape) for _ in range(self.precision - 1) + ] + return SeriesMatrix(new_coeffs, p=self.p, precision=self.precision) + + P_inv = P.inverse() + new_coeffs = [P_inv * C * P for C in self.coeffs] + + return SeriesMatrix(new_coeffs, p=self.p, precision=self.precision) + + def coboundary(self, T: SeriesMatrix) -> SeriesMatrix: + """ + Computes the right-acting discrete coboundary T(n+1)^{-1} * M(n) * T(n). + Assumes T is an invertible formal power series (det(T_0) != 0). + """ + T_shifted = T.shift() + T_inv = T_shifted.inverse() + left_mult = T_inv * self + res = left_mult * T + return res + + def truncate(self, new_precision: int) -> SeriesMatrix: + """Sheds trailing precision terms to optimize performance.""" + if new_precision >= self.precision: + return self + return SeriesMatrix( + self.coeffs[:new_precision], p=self.p, precision=new_precision + ) + + def _shear_row_corrections(self, g: int) -> list[list[sp.Expr]]: + """Pre-computes the generalized binomial coefficients for the row shifts.""" + row_corrections = [] + for i in range(self.shape[0]): + exponent = sp.Rational(i * g, self.p) + coeffs = [sp.S.Zero] * self.precision + + for k in range(self.precision // self.p): + bin_coeff = sp.S.One + for j in range(k): + bin_coeff *= (exponent - j) / sp.Rational(j + 1) + + idx = k * self.p + if idx < self.precision: + coeffs[idx] = bin_coeff + row_corrections.append(coeffs) + return row_corrections + + def shear_coboundary( + self, shift: sp.Integer, target_precision: int | None = None + ) -> tuple[SeriesMatrix, int]: + """ + Applies a shearing transformation S(t) to the series to expose sub-exponential + growth, where $S(t) = diag(1, t^a, t^{2a}, \\dots)$ and $a$ is the shift parameter. + + Args: + shift: Represents a in the above description. + target_precision: If provided, truncates the resulting series to this length, + saving heavy CAS simplification on discarded tail terms. + + Returns: + A tuple containing the sheared SeriesMatrix and the integer `h` representing + the overall degree shift (used to adjust the global factorial power). + """ + if not shift.is_integer: + raise ValueError(f"{shift=} must be an integer!") + + row_corrections = self._shear_row_corrections(shift) + power_dict = {} + + for m in range(self.precision): + for i in range(self.shape[0]): + for j in range(self.shape[0]): + val_M = self.coeffs[m][i, j] + if val_M == sp.S.Zero: + continue + + current = (j - i) * shift + for c in range(self.precision): + val_C = row_corrections[i][c] + if val_C == sp.S.Zero: + continue + + power = m + c + current + if power not in power_dict: + power_dict[power] = Matrix.zeros(*self.shape) + power_dict[power][i, j] += val_C * val_M + + min_power = min(power_dict.keys()) if power_dict else 0 + h = -min_power + + output_precision = ( + target_precision if target_precision is not None else self.precision + ) + new_coeffs = [] + for k in range(output_precision): + target_power = k - h + if target_power in power_dict: + new_coeffs.append(power_dict[target_power]) + else: + new_coeffs.append(Matrix.zeros(*self.shape)) + + return SeriesMatrix( + new_coeffs, p=self.p, precision=output_precision + ).simplify(), h + + def shift_leading_eigenvalue(self, lambda_val: sp.Expr) -> SeriesMatrix: + """ + Shifts the formal series by subtracting a scalar matrix from the leading term. + Mathematically equivalent to M(t) - lambda_val * I. + """ + new_coeffs = list(self.coeffs) + # Shift only the M_0 coefficient by the eigenvalue identity matrix + new_coeffs[0] = new_coeffs[0] - lambda_val * Matrix.eye(self.shape[0]) + + return SeriesMatrix(new_coeffs, p=self.p, precision=self.precision) + + def ramify(self, b: int) -> SeriesMatrix: + """ + Executes Phase 4: Substitutes t = tau^b, effectively spreading the coefficients + to handle fractional powers (Puiseux series). + Returns a new SeriesMatrix with a multiplied ramification index and precision. + """ + new_precision = self.precision * b + new_coeffs = [Matrix.zeros(*self.shape) for _ in range(new_precision)] + + for k in range(self.precision): + new_coeffs[k * b] = self.coeffs[k] + + return SeriesMatrix(new_coeffs, p=self.p * b, precision=new_precision) + + def get_first_non_scalar_index(self) -> int | None: + """ + Scans the series to find the index $k$ of the first matrix $A_k$ that is not a scalar matrix + (i.e., not a multiple of the identity matrix). + + In the Birkhoff-Trjitzinsky algorithm, if the entire series consists only of + scalar matrices, the system is fundamentally reduced and the algorithm terminates. + """ + for k, C in enumerate(self.coeffs): + # A matrix is scalar if it's diagonal and has <= 1 unique diagonal elements + if C.is_diagonal() and len(set(C.diagonal())) <= 1: + continue + return k + + return None + + def simplify(self) -> SeriesMatrix: + def crush(x): + return sp.cancel(sp.expand(x)) + + new_coeffs = [c.applyfunc(crush) for c in self.coeffs] + + return type(self)(new_coeffs, p=self.p, precision=self.precision) + + @classmethod + def from_matrix( + cls, matrix: Matrix, var: sp.Symbol, p: int, precision: int + ) -> SeriesMatrix: + """ + Converts a normalized symbolic matrix into a formal SeriesMatrix at infinity + by executing a formal Taylor expansion: substituting n = t^(-p) and extracting coefficients. + """ + dim = matrix.shape[0] + + if not matrix.free_symbols: + coeffs = [matrix] + [Matrix.zeros(dim, dim) for _ in range(precision - 1)] + return cls(coeffs, p=p, precision=precision) + + M_t = matrix.subs({var: t ** (-p)}) + + expanded_matrix = M_t.applyfunc( + lambda x: sp.series(x, t, 0, precision).removeO() + ) + + coeffs = [] + for i in range(precision): + coeff_matrix = expanded_matrix.applyfunc(lambda x: sp.expand(x).coeff(t, i)) + if coeff_matrix.has(t) or coeff_matrix.has(var): + raise ValueError(f"Coefficient {i} failed to evaluate to a constant.") + coeffs.append(coeff_matrix) + + return cls(coeffs, p=p, precision=precision) diff --git a/ramanujantools/asymptotics/series_matrix_test.py b/ramanujantools/asymptotics/series_matrix_test.py new file mode 100644 index 0000000..6c60c55 --- /dev/null +++ b/ramanujantools/asymptotics/series_matrix_test.py @@ -0,0 +1,206 @@ +import sympy as sp +import random + +from ramanujantools import Matrix +from ramanujantools.asymptotics import SeriesMatrix + + +def generate_test_matrices(seed=42, dim=4, count=8): + random.seed(seed) + matrices = [] + for i in range(count): + mat = Matrix(dim, dim, lambda r, c: random.randint(-5, 5)) + if i == 0: + mat = mat + 20 * Matrix.eye(dim) + matrices.append(mat) + return matrices + + +def test_construction(): + eye = Matrix.eye(2) + p = 1 + precision = 4 + A = Matrix([[1, 2], [3, 4]]) + S = SeriesMatrix([eye, A], p=p, precision=precision) + assert eye == S.coeffs[0] + assert A == S.coeffs[1] + assert p == S.p + assert precision == S.precision + for i in range(2, precision): + assert Matrix.zeros(2, 2) == S.coeffs[i] + + +def test_multiplication(): + precision = 3 + + A_coeffs = [Matrix.diag(1, 1), Matrix.diag(2, 2), Matrix.diag(3, 3)] + B_coeffs = [Matrix.diag(4, 4), Matrix.diag(5, 5), Matrix.diag(6, 6)] + + A = SeriesMatrix(A_coeffs, p=1, precision=precision) + B = SeriesMatrix(B_coeffs, p=1, precision=precision) + + C = A * B + + # C_0 = A_0 * B_0 = 1 * 4 = 4 + assert C.coeffs[0] == Matrix.diag(4, 4) + + # C_1 = A_0 * B_1 + A_1 * B_0 = 1*5 + 2*4 = 13 + assert C.coeffs[1] == Matrix.diag(13, 13) + + # C_2 = A_0 * B_2 + A_1 * B_1 + A_2 * B_0 = 1*6 + 2*5 + 3*4 = 28 + assert C.coeffs[2] == Matrix.diag(28, 28) + + +def test_inverse(): + precision = 10 + matrices = generate_test_matrices() + dim = matrices[0].shape[0] + S = SeriesMatrix(matrices, p=1, precision=precision) + + V = S.inverse() + Identity_Check = S * V + + for i in range(precision): + expected = Matrix.eye(dim) if i == 0 else Matrix.zeros(dim) + assert Identity_Check.coeffs[i] == expected, ( + f"Inverse mismatch at coefficient t^{i}" + ) + + +def test_shift(): + precision = 10 + matrices = generate_test_matrices() + dim = matrices[0].shape[0] + S = SeriesMatrix(matrices, p=1, precision=precision) + + shifted_S = S.shift() + + t = sp.Symbol("t") + expected_coeffs = [Matrix.zeros(dim) for _ in range(precision)] + + for i, C in enumerate(S.coeffs): + if C.is_zero_matrix: + continue + + scalar_series = sp.series((t / (1 + t)) ** i, t, 0, precision).removeO() + for k in range(precision): + coeff_val = scalar_series.coeff(t, k) + if coeff_val != sp.S.Zero: + expected_coeffs[k] += C * coeff_val + + assert shifted_S.coeffs == expected_coeffs + + +def test_series_matrix_coboundary(): + precision = 2 + M_coeffs = [Matrix.eye(2), Matrix.zeros(2, 2)] + M = SeriesMatrix(M_coeffs, p=1, precision=precision) + + Y = Matrix([[0, 1], [0, 0]]) + T_coeffs = [Matrix.eye(2), Y] + T = SeriesMatrix(T_coeffs, p=1, precision=precision) + + # M is the Identity matrix. + # M_new = T(n+1)^{-1} I T(n) = T(n+1)^{-1} T(n) + # T(n+1)^{-1} T(n) evaluates exactly to the Identity matrix + O(t^2) + M_cob = M.coboundary(T) + + assert M_cob.coeffs[0] == Matrix.eye(2) + assert M_cob.coeffs[1] == Matrix.zeros(2, 2) + + +def test_series_matrix_shear_coboundary(): + """ + Validates the analytical discrete shear coboundary S(n+1)^{-1} M(n) S(n). + Proves that the algebraic shift and the discrete (1+t^p) Taylor correction + are simultaneously and correctly applied. + """ + precision = 2 + M_coeffs = [ + Matrix([[1, 0], [0, 1]]), + Matrix([[0, 1], [1, 0]]), + ] + M = SeriesMatrix(M_coeffs, p=1, precision=precision) + + M_sheared, h = M.shear_coboundary(shift=sp.S(1)) + + assert 0 == h + assert Matrix([[1, 0], [1, 1]]) == M_sheared.coeffs[0] + assert Matrix([[0, 0], [1, 1]]) == M_sheared.coeffs[1] + + +def test_series_matrix_valuations(): + """ + Tests that SeriesMatrix correctly identifies the lowest non-zero + power (valuation) for every cell in the matrix series. + """ + # M_0 has a value only at (0, 0) + C0 = Matrix([[1, 0], [0, 0]]) + + # M_1 has a value only at (0, 1) + C1 = Matrix([[0, 2], [0, 0]]) + + # M_2 has a value only at (1, 0) + C2 = Matrix([[0, 0], [3, 0]]) + + # Cell (1, 1) remains 0 across all coefficients + + # Construct the series: C0 + C1*t + C2*t^2 + SM = SeriesMatrix([C0, C1, C2], precision=3) + + vals = SM.valuations() + + # Assertions + assert vals[0, 0] == 0 # Appeared in C0 + assert vals[0, 1] == 1 # Appeared in C1 + assert vals[1, 0] == 2 # Appeared in C2 + assert vals[1, 1] == sp.oo # Never appeared + + +def test_divide_by_t(): + """ + Tests that divide_by_t correctly shifts the series coefficients left by one + and pads the tail with a zero matrix to maintain precision. + """ + dim = 2 + M0 = Matrix([[1, 2], [3, 4]]) + M1 = Matrix([[5, 6], [7, 8]]) + M2 = Matrix([[9, 10], [11, 12]]) + + S = SeriesMatrix([M0, M1, M2], p=1, precision=3) + + # We now capture the new matrix! + S = S.divide_by_t() + + assert S.precision == 3 + assert len(S.coeffs) == 3 + + assert S.coeffs[0] == M1 + assert S.coeffs[1] == M2 + assert S.coeffs[2] == Matrix.zeros(dim, dim) + + +def test_ramify(): + """ + Tests that ramify correctly substitutes t = tau^b, scaling the precision, + updating the ramification index p, and perfectly spacing out the coefficients. + """ + dim = 2 + M0 = Matrix([[1, 2], [3, 4]]) + M1 = Matrix([[5, 6], [7, 8]]) + + S = SeriesMatrix([M0, M1], p=1, precision=2) + + b = 3 + S_ramified = S.ramify(b) + + assert S_ramified.p == 3 + assert S_ramified.precision == 6 + assert len(S_ramified.coeffs) == 6 + + assert S_ramified.coeffs[0] == M0 # tau^0 + assert S_ramified.coeffs[1] == Matrix.zeros(dim) + assert S_ramified.coeffs[2] == Matrix.zeros(dim) + assert S_ramified.coeffs[3] == M1 # tau^3 + assert S_ramified.coeffs[4] == Matrix.zeros(dim) + assert S_ramified.coeffs[5] == Matrix.zeros(dim) diff --git a/ramanujantools/cmf/meijer_g_test.py b/ramanujantools/cmf/meijer_g_test.py index c2f7562..1395248 100644 --- a/ramanujantools/cmf/meijer_g_test.py +++ b/ramanujantools/cmf/meijer_g_test.py @@ -3,7 +3,7 @@ import sympy as sp from sympy.abc import n, z -from ramanujantools import Position, Matrix +from ramanujantools import Position, Matrix, LinearRecurrence from ramanujantools.cmf import MeijerG @@ -28,3 +28,100 @@ def test_gamma(): limit.initial_values = Matrix([[1, 1, 0], [1, 1, 1]]) assert limit.as_float() == approx(limit.mp.euler) + + +def test_asymptotics_fail1(): + cmf = MeijerG(3, 2, 2, 3, 1) + a0, a1 = sp.symbols("a:2") + b0, b1, b2 = sp.symbols("b:3") + start = Position({a0: 0, a1: 0, b0: 0, b1: 0, b2: 0}) + trajectory = Position({a0: 0, a1: 0, b0: 0, b1: 1, b2: 1}) + m = cmf.trajectory_matrix(trajectory, start) + r = LinearRecurrence(m) + + expected = [ + n**4 * sp.factorial(n) ** 2, + (-1) ** n + * n ** (sp.Rational(11, 4)) + * sp.exp(2 * sp.I * sp.sqrt(n)) + * sp.factorial(n), + (-1) ** n + * n ** (sp.Rational(11, 4)) + * sp.exp(-2 * sp.I * sp.sqrt(n)) + * sp.factorial(n), + ] + assert expected == r.asymptotics(precision=10) + + +def test_asymptotics_fail2(): + cmf = MeijerG(3, 2, 2, 3, 1) + a0, a1 = sp.symbols("a:2") + b0, b1, b2 = sp.symbols("b:3") + start = Position({a0: 0, a1: 0, b0: 0, b1: 0, b2: 0}) + trajectory = Position({a0: 0, a1: 0, b0: 0, b1: 0, b2: 1}) + m = cmf.trajectory_matrix(trajectory, start) + r = LinearRecurrence(m) + + expected = [n**2 * sp.log(n) * sp.factorial(n), n**2 * sp.factorial(n), 1] + assert expected == r.asymptotics(precision=11) + + +def test_asymptotics_fail3(): + cmf = MeijerG(3, 2, 2, 3, 1) + a0, a1 = sp.symbols("a:2") + b0, b1, b2 = sp.symbols("b:3") + start = Position({a0: 0, a1: 0, b0: 0, b1: 0, b2: 0}) + trajectory = Position({a0: 0, a1: 0, b0: 1, b1: 2, b2: 2}) + m = cmf.trajectory_matrix(trajectory, start) + r = LinearRecurrence(m) + + expected = [ + n**10 * sp.factorial(n) ** 4, + (-16) ** n + * n ** sp.Rational(31, 4) + * sp.exp(4 * sp.I * sp.sqrt(n)) + * sp.factorial(n) ** 3, + (-16) ** n + * n ** sp.Rational(31, 4) + * sp.exp(-4 * sp.I * sp.sqrt(n)) + * sp.factorial(n) ** 3, + ] + + assert expected == r.asymptotics(precision=18) + + +def test_asymptotics_euler_trajectory(): + cmf = MeijerG(3, 2, 2, 3, 1) + a0, a1 = sp.symbols("a:2") + b0, b1, b2 = sp.symbols("b:3") + start = Position({a0: 0, a1: 0, b0: 0, b1: 0, b2: 0}) + trajectory = Position({a0: 0, a1: 0, b0: 1, b1: 1, b2: 1}) + m = cmf.trajectory_matrix(trajectory, start) + r = LinearRecurrence(m) + + expected = [ + n ** (sp.Rational(16, 3)) + * sp.exp( + -sp.I + * n ** (sp.Rational(1, 3)) + * (6 * n ** (sp.Rational(1, 3)) + 1 - sp.sqrt(3) * sp.I) + / (sp.sqrt(3) - sp.I) + ) + * sp.factorial(n) ** 2, + n ** (sp.Rational(16, 3)) + * sp.exp( + n ** (sp.Rational(1, 3)) + * ( + 3 * sp.sqrt(3) * n ** (sp.Rational(1, 3)) + + 3 * sp.I * n ** (sp.Rational(1, 3)) + + 2 * sp.I + ) + / (sp.sqrt(3) - sp.I) + ) + * sp.factorial(n) ** 2, + n ** (sp.Rational(16, 3)) + * sp.exp(-(n ** (sp.Rational(1, 3))) * (3 * n ** (sp.Rational(1, 3)) - 1)) + * sp.factorial(n) ** 2, + ] + actual = r.asymptotics(precision=12) + assert expected == actual diff --git a/ramanujantools/linear_recurrence.py b/ramanujantools/linear_recurrence.py index e2905cb..26017e0 100644 --- a/ramanujantools/linear_recurrence.py +++ b/ramanujantools/linear_recurrence.py @@ -1,10 +1,13 @@ from __future__ import annotations -from functools import cached_property +from typing import TYPE_CHECKING + import copy import itertools +from functools import cached_property, lru_cache from tqdm import tqdm +import mpmath as mp import sympy as sp from sympy.abc import n from sympy.printing.defaults import Printable @@ -12,6 +15,9 @@ from ramanujantools import Matrix, Limit, GenericPolynomial from ramanujantools.utils import batched, Batchable +if TYPE_CHECKING: + from ramanujantools.asymptotics import Reducer + def trim_trailing_zeros(sequence: list[int]) -> list[int]: ending = len(sequence) @@ -53,12 +59,17 @@ def __init__(self, recurrence: Matrix | list[sp.Expr] | None = None): relation = [sp.factor(sp.simplify(p)) for p in relation] self.relation = trim_trailing_zeros(relation) - def __eq__(self, other: Matrix) -> bool: + def __eq__(self, other: LinearRecurrence) -> bool: """ - Returns True iff two requrences are identical (even up to gcd). + Returns True iff two recurrences are identical (even up to gcd). """ + if not isinstance(other, LinearRecurrence): + return NotImplemented return self.relation == other.relation + def __hash__(self) -> int: + return hash(self.recurrence_matrix) + def __neg__(self) -> LinearRecurrence: return LinearRecurrence([-c for c in self.relation]) @@ -352,11 +363,94 @@ def compose(self, other: LinearRecurrence) -> LinearRecurrence: result += a_i * other._shift(i) return result - def kamidelta(self, depth=20): + def kamidelta(self, depth=20) -> list[mp.mpf]: r""" - Uses the Kamidelta alogrithm to predict possible delta values of the recurrence. + Uses the Kamidelta algorithm to predict possible delta values of the recurrence. Effectively calls kamidelta on `recurrence_matrix`. For more details, see `Matrix.kamidelta` """ return self.recurrence_matrix.kamidelta(depth) + + @lru_cache + def _get_reducer_at_precision(self, precision: int) -> Reducer: + """Pure reduction engine. No boundary logic; just executes the math.""" + from ramanujantools.asymptotics import Reducer + from ramanujantools.asymptotics.series_matrix import SeriesMatrix + + matrix = self.recurrence_matrix.transpose() + factorial_power = max(matrix.degrees(n)) + normalized_matrix = matrix / (n**factorial_power) + + series = SeriesMatrix.from_matrix( + matrix=normalized_matrix, var=n, p=1, precision=precision + ) + reducer = Reducer( + series=series, factorial_power=factorial_power, precision=precision, p=1 + ) + reducer.reduce() + return reducer + + @lru_cache + def _get_reducer(self, precision=None) -> Reducer: + """Executes the precision backoff loop for the linear recurrence.""" + from ramanujantools.asymptotics import PrecisionExhaustedError + import logging + + logger = logging.getLogger(__name__) + + if precision is not None: + return self._get_reducer_at_precision(precision) + + dim = self.order() + + # Calculate bounds directly from the recurrence polynomials! + degrees = [d for d in self.recurrence_matrix.degrees(n) if d != -sp.oo] + S = max(degrees) - min(degrees) + poincare_bound = S * 2 + 1 + negative_bound = -min(degrees, default=0) + 1 + current_precision = max(poincare_bound, negative_bound) + + step_size = 2 * dim + max_precision = (dim**2) * max(S, 1) + dim + step_size + + last_expr_matrix, current_reducer = None, None + + while current_precision <= max_precision: + try: + reducer = self._get_reducer_at_precision(current_precision) + + growth_grid = reducer.canonical_growth_matrix() + expr_matrix = sp.Matrix( + [[c.as_expr(n) for c in r] for r in growth_grid] + ) + + if last_expr_matrix is not None: + diff = (expr_matrix - last_expr_matrix).applyfunc(sp.expand) + if diff.is_zero_matrix: + return current_reducer + + last_expr_matrix = expr_matrix + current_reducer = reducer + current_precision += step_size + + except PrecisionExhaustedError as e: + logger.debug( + f"STABILITY: {e} Jumping to {current_precision + step_size}." + ) + current_precision += step_size + last_expr_matrix = None + + raise PrecisionExhaustedError( + "Asymptotic stability could not be verified for this system. " + f"Reached max_precision={max_precision} without 2 consecutive identical states. " + "The system may be highly degenerate or require more initial terms." + ) + + def asymptotics(self, precision=None) -> list[sp.Expr]: + """ + Returns the formal basis of asymptotic solutions for the recurrence sequence p_n. + """ + reducer = self._get_reducer(precision) + cfm = reducer.canonical_fundamental_matrix().transpose() + return list(cfm.col(-1)) diff --git a/ramanujantools/linear_recurrence_test.py b/ramanujantools/linear_recurrence_test.py index 1907691..981f092 100644 --- a/ramanujantools/linear_recurrence_test.py +++ b/ramanujantools/linear_recurrence_test.py @@ -228,3 +228,14 @@ def constant(mp): limit = r.limit(200, 1) assert Matrix([[0, 0, 17], [-3, -2, 7]]) == limit.identify(constant(limit.mp)) + + +def test_fibonacci_asymptotics(): + r = LinearRecurrence([-1, 1, 1]) + + expected_exprs = [ + (sp.Rational(1, 2) + sp.sqrt(5) / 2) ** n, + (sp.Rational(1, 2) - sp.sqrt(5) / 2) ** n, + ] + + assert expected_exprs == r.asymptotics() diff --git a/ramanujantools/matrix.py b/ramanujantools/matrix.py index 012a68e..07b3dce 100644 --- a/ramanujantools/matrix.py +++ b/ramanujantools/matrix.py @@ -189,6 +189,7 @@ def coboundary(self, U: Matrix, symbol: sp.Symbol = n, sign: bool = True) -> Mat ) ).factor() + @lru_cache def companion_coboundary_matrix(self, symbol: sp.Symbol = n) -> Matrix: r""" Constructs a new matrix U such that `self.coboundary(U)` is a companion matrix. @@ -196,7 +197,8 @@ def companion_coboundary_matrix(self, symbol: sp.Symbol = n) -> Matrix: if not (self.is_square()): raise ValueError("Only square matrices can have a coboundary relation") N = self.rows - ctx = flint_ctx(self.free_symbols, fmpz=True) + free_symbols = self.free_symbols.union({symbol}) + ctx = flint_ctx(free_symbols, fmpz=True) flint_self = SymbolicMatrix.from_sympy(self, ctx) vectors = [SymbolicMatrix.from_sympy(Matrix(N, 1, [1] + (N - 1) * [0]), ctx)] for _ in range(1, N): @@ -460,3 +462,55 @@ def kamidelta(self, depth=20) -> list[mp.mpf]: errors = self.errors() slope = self.gcd_slope(depth) return [-1 + error / slope for error in errors] + + def degrees(self, symbol: sp.Symbol = None) -> Matrix: + r""" + Returns a matrix of the degrees of each cell in the matrix. + For a rational function $f = \frac{p}{q}$, the degree is defined as $deg(f) = deg(p) - deg(q)$. + """ + symbol = symbol or n + return Matrix( + self.rows, + self.cols, + [ + sp.Poly(p, symbol).degree() - sp.Poly(q, symbol).degree() + for p, q in (cell.as_numer_denom() for cell in self) + ], + ) + + def jordan_form( + self, calc_transform=True, **kwargs + ) -> tuple[Matrix, Matrix] | Matrix: + """ + Overloads SymPy's jordan_form to automatically sort the Jordan blocks + in descending order based on the absolute magnitude of the eigenvalues. + """ + result = super().jordan_form(calc_transform=calc_transform, **kwargs) + if calc_transform: + P, J = result + else: + P, J = None, result + + dim = self.shape[0] + starts = [i for i in range(dim) if i == 0 or J[i - 1, i] == 0] + ends = starts[1:] + [dim] + blocks = [ + (J[s, s], P[:, s:e] if calc_transform else None, J[s:e, s:e]) + for s, e in zip(starts, ends) + ] + + def sort_key(b): + try: + return abs(complex(b[0].evalf())) + except TypeError: + return 0 + + blocks.sort(key=sort_key, reverse=True) + + J_sorted = Matrix.diag(*[b[2] for b in blocks]) + + if not calc_transform: + return J_sorted + + P_sorted = Matrix.hstack(*[b[1] for b in blocks]) + return P_sorted, J_sorted diff --git a/ramanujantools/matrix_test.py b/ramanujantools/matrix_test.py index 07bee9a..e709185 100644 --- a/ramanujantools/matrix_test.py +++ b/ramanujantools/matrix_test.py @@ -406,3 +406,10 @@ def test_kamidelta_3f2(): l1, l2 = m.limit({n: 1}, [100, 200], {n: 1}) expected = l1.delta(l2.as_float()) assert actual == approx(expected, abs=1e-1) # at most 0.1 error + + +def test_degrees(): + m = Matrix([[x * y, x**2], [y / x, (x + 1) / (y**2 * (x - 1))]]) + assert m.degrees(x) == Matrix([[1, 2], [-1, 0]]) + assert m.degrees(y) == Matrix([[1, 0], [1, -2]]) + assert m.degrees(n) == Matrix([[0, 0], [0, 0]])