Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 35 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,13 @@ Operator-Partitioned Engine (OP Engine) is a lightweight multiphysics solver cor
- Strong typing, minimal dependencies (NumPy + SciPy for implicit paths).
- Separates state/time management (`ModelCore`) from stepping logic (`CoreSolver`).
- Optional adapters (e.g., flepimop2) without affecting the core API.
- IMEX paths accept externally supplied operator tuples; defaults remain explicit-only.

## Core surface
- `ModelCore`: state/time manager; configure axes, dtype, and optional history.
- `CoreSolver`: explicit + IMEX methods (`euler`, `heun`, `imex-euler`, `imex-heun-tr`, `imex-trbdf2`); accepts `RunConfig` with `AdaptiveConfig`, `DtControllerConfig`, and `OperatorSpecs`.
- `matrix_ops`: Laplacian/Crank–Nicolson/implicit Euler/trapezoidal builders, predictor–corrector, implicit solve cache, Kronecker helpers, grouped aggregations.
- Extras: `OperatorSpecs`, `RunConfig`, `AdaptiveConfig`, `DtControllerConfig`, `Operator`, `GridGeometry`, `DiffusionConfig`.

## Installation

Expand Down Expand Up @@ -42,11 +49,38 @@ solver.run(rhs) # defaults to Heun/RK2
solution = core.state_array # shape (n_timesteps, state, subgroup)
```

### IMEX with operators (tuple form)

```python
import numpy as np
from op_engine import CoreSolver, ModelCore, OperatorSpecs

n = 4
times = np.linspace(0.0, 1.0, 11)
core = ModelCore(n_states=n, n_subgroups=1, time_grid=times)
core.set_initial_state(np.ones((n, 1)))

# Identity implicit operator along state axis
L = np.eye(n)
R = np.eye(n)
ops = OperatorSpecs(default=(L, R))

def rhs(t, y):
return -0.1 * y

solver = CoreSolver(core, operators=ops.default, operator_axis="state")
solver.run(rhs, config=None) # defaults: method="heun" (explicit)

# For IMEX methods set method and operators via RunConfig:
# from op_engine.core_solver import RunConfig, AdaptiveConfig, DtControllerConfig
```

## Public API
- `ModelCore`: state tensor + time grid manager; supports extra axes and optional history.
- `CoreSolver`: explicit and IMEX stepping; methods: `euler`, `heun`, `imex-euler`, `imex-heun-tr`, `imex-trbdf2`.
- Operator utilities (`matrix_ops`): Laplacian builders, Crank–Nicolson/implicit Euler/trapezoidal operators, predictor-corrector builders, implicit solve cache, Kronecker helpers, grouped aggregation utilities.
- Adapters: optional flepimop2 integration (extra dependency) via entrypoints in the adapter package.
- Configuration helpers: `RunConfig`, `OperatorSpecs`, `AdaptiveConfig`, `DtControllerConfig` for method/IMEX/adaptive control.
- Adapters: optional flepimop2 integration (extra dependency) via entrypoints in the adapter package. The adapter merges any `mixing_kernels` already computed by op_system (no automatic generation) and consumes config-supplied IMEX operator specs (dict or `OperatorSpecs`), forwarding the chosen `operator_axis` to `CoreSolver`.

## Development

Expand Down
43 changes: 37 additions & 6 deletions flepimop2-op_engine/src/flepimop2/engine/op_engine/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,7 @@ class OpEngineEngineConfig(BaseModel):

gamma: float | None = Field(default=None, gt=0.0, lt=1.0)

# Future-facing: allow operator specs to be supplied via YAML, even if the
# adapter doesn't fully exploit them yet.
# Operator specs (default/tr/bdf2) for IMEX methods.
operators: dict[str, Any] | None = Field(
default=None,
description=(
Expand All @@ -59,17 +58,33 @@ class OpEngineEngineConfig(BaseModel):
),
)

operator_axis: str | int = Field(
default="state",
description="Axis along which implicit operators act (name or index).",
)

@model_validator(mode="after")
def _validate_imex_requirements(self) -> OpEngineEngineConfig:
method = str(self.method)
if method.startswith("imex-") and not self.operators:
if method.startswith("imex-") and not self._has_any_operator_specs(
self.operators
):
msg = (
f"IMEX method '{method}' requires operator specifications, "
"but no operators were provided in the engine config."
)
raise ValueError(msg)
return self

@staticmethod
def _has_any_operator_specs(operators: dict[str, Any] | None) -> bool:
"""Return True if any operator spec is provided."""
if operators is None:
return False
return any(
operators.get(name) is not None for name in ("default", "tr", "bdf2")
)

def to_run_config(self) -> RunConfig:
"""
Convert to op_engine RunConfig.
Expand All @@ -86,14 +101,30 @@ def to_run_config(self) -> RunConfig:
fac_max=self.fac_max,
)

# Today: keep operators empty/default; later you can translate self.operators
# into OperatorSpecs() here.
op_specs = self._coerce_operator_specs(self.operators)

return RunConfig(
method=self.method,
adaptive=self.adaptive,
strict=self.strict,
adaptive_cfg=adaptive_cfg,
dt_controller=dt_controller,
operators=OperatorSpecs(),
operators=op_specs,
gamma=self.gamma,
)

@staticmethod
def _coerce_operator_specs(operators: dict[str, Any] | None) -> OperatorSpecs:
"""
Normalize operator inputs into OperatorSpecs.

Returns:
OperatorSpecs with default/tr/bdf2 fields populated when provided.
"""
if operators is None:
return OperatorSpecs()
return OperatorSpecs(
default=operators.get("default"),
tr=operators.get("tr"),
bdf2=operators.get("bdf2"),
)
20 changes: 14 additions & 6 deletions flepimop2-op_engine/src/flepimop2/engine/op_engine/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,16 +226,24 @@ def run(
msg = "system does not expose a valid flepimop2 SystemProtocol stepper"
raise TypeError(msg)

rhs = _rhs_from_stepper(stepper, params=params, n_state=n_state)
run_cfg = self.config.to_run_config()

# Merge any precomputed mixing kernels exposed by op_system connector
kernels = getattr(system, "mixing_kernels", {})
kernel_params = kernels if isinstance(kernels, dict) else {}
merged_params: dict[IdentifierString, object] = {**kernel_params, **params}

rhs = _rhs_from_stepper(stepper, params=merged_params, n_state=n_state)

core = _make_core(times, y0)

# Today: operators are not yet wired through the adapter.
# Future: translate self.config.operators into op_engine OperatorSpecs and
# pass them here (and/or via run_cfg) as appropriate.
solver = CoreSolver(core, operators=None, operator_axis="state")
op_default = run_cfg.operators.default

run_cfg = self.config.to_run_config()
solver = CoreSolver(
core,
operators=op_default,
operator_axis=self.config.operator_axis,
)
solver.run(rhs, config=run_cfg)

states = _extract_states_2d(core, n_state=n_state)
Expand Down
18 changes: 16 additions & 2 deletions flepimop2-op_engine/tests/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,21 @@ def test_engine_config_imex_requires_operators() -> None:
)
run = cfg.to_run_config()
assert run.method == "imex-euler"
assert isinstance(run.operators, OperatorSpecs)
assert _has_any_operator_specs(run.operators)
assert run.operators.default == "sentinel"


def test_engine_config_operator_dict_coerces_to_specs() -> None:
"""Operator dict input is coerced into OperatorSpecs with stage keys."""
cfg = OpEngineEngineConfig(
method="imex-trbdf2",
operators={"default": 1, "tr": 2, "bdf2": 3},
gamma=0.6,
)
run = cfg.to_run_config()

# Current implementation does not translate self.operators into OperatorSpecs yet.
assert isinstance(run.operators, OperatorSpecs)
assert not _has_any_operator_specs(run.operators)
assert run.operators.default == 1
assert run.operators.tr == 2
assert run.operators.bdf2 == 3
69 changes: 69 additions & 0 deletions flepimop2-op_engine/tests/test_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,34 @@ def __init__(self) -> None:
self._stepper: object = object()


class _KernelStepper:
"""Stepper that uses a kernel parameter to scale a constant RHS."""

def __call__(
self, time: np.float64, state: np.ndarray, **params: object
) -> np.ndarray:
_ = time
_ = state
k = float(params.get("K", 0.0))
return np.asarray([k], dtype=np.float64)


class _KernelSystem:
"""System exposing a stepper and precomputed mixing_kernels."""

def __init__(self, k: float) -> None:
self._stepper: SystemProtocol = _KernelStepper()
self.mixing_kernels = {"K": k}


class _ImexSystem:
"""System exposing an identity stepper for IMEX tests."""

def __init__(self, n: int) -> None:
self._stepper: SystemProtocol = _GoodStepper()
self.n = n


# -----------------------------------------------------------------------------
# Engine construction
# -----------------------------------------------------------------------------
Expand Down Expand Up @@ -107,6 +135,47 @@ def test_engine_run_identity_rhs_behavior() -> None:
assert state_values[2] >= state_values[1]


def test_engine_passes_mixing_kernels_into_params() -> None:
"""mixing_kernels from the system are merged into RHS params."""
engine = _OpEngineFlepimop2EngineImpl()
system = cast("SystemABC", _KernelSystem(k=2.5))

times = np.array([0.0, 1.0], dtype=np.float64)
y0 = np.array([1.0], dtype=np.float64)

params: dict[IdentifierString, object] = {}

out = engine.run(system, times, y0, params)

# dy/dt = K = 2.5, Heun with dt=1.0 gives y1 = 1 + 0.5*(K+K) = 3.5
np.testing.assert_allclose(out[-1, 1], 3.5, rtol=1e-12, atol=0.0)


def test_engine_imex_identity_with_identity_ops() -> None:
"""IMEX path accepts operator specs and runs with identity operators."""
engine = _OpEngineFlepimop2EngineImpl(
config={
"method": "imex-euler",
"operators": {
"default": (np.eye(1, dtype=np.float64), np.eye(1, dtype=np.float64)),
},
"adaptive": False,
}
)
system = cast("SystemABC", _ImexSystem(n=1))

times = np.array([0.0, 0.5, 1.0], dtype=np.float64)
y0 = np.array([1.0], dtype=np.float64)

params: dict[IdentifierString, object] = {}

out = engine.run(system, times, y0, params)

assert out.shape == (3, 2)
# Identity RHS dy/dt = y; implicit Euler with identity L/R behaves like explicit.
assert np.all(np.isfinite(out))


# -----------------------------------------------------------------------------
# Error handling
# -----------------------------------------------------------------------------
Expand Down
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -91,3 +91,6 @@ exclude = ["examples/"]
[tool.hatch.build.targets.wheel]
packages = ["src/op_engine"]
include = ["src/op_engine/py.typed"]

[tool.uv]
version = "0.4.17"
5 changes: 0 additions & 5 deletions src/op_engine/model_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,11 +288,6 @@ def reshape_for_axis_solve(

Raises:
ValueError: if x does not have shape state_shape.

Contract:
- x2d has shape (axis_len, batch)
- batch is the product of all non-axis dimensions
- unreshape_from_axis_solve(inverse) reconstructs exactly.
"""
x_arr = np.asarray(x, dtype=self.dtype)
if x_arr.shape != self.state_shape:
Expand Down