diff --git a/moonfish/config.py b/moonfish/config.py index 5e2b227..eb8304c 100644 --- a/moonfish/config.py +++ b/moonfish/config.py @@ -22,3 +22,4 @@ class Config: syzygy_pieces: int checkmate_score: int = CHECKMATE_SCORE checkmate_threshold: int = CHECKMATE_THRESHOLD + nn_model_path: str | None = None diff --git a/moonfish/engines/nn_engine.py b/moonfish/engines/nn_engine.py new file mode 100644 index 0000000..b194200 --- /dev/null +++ b/moonfish/engines/nn_engine.py @@ -0,0 +1,86 @@ +""" +Neural network chess engine. + +Uses alpha-beta search with a pluggable neural network evaluator instead of +the classical PeSTO evaluation. Any evaluator implementing the Evaluator +protocol can be used. + +Example: + from moonfish.evaluation.nn import NNEvaluator + + # Load a custom ONNX model + evaluator = NNEvaluator.from_file("my_model.onnx") + engine = NNEngine(config, evaluator=evaluator) + best_move = engine.search_move(board) + + # Or use an LLM for evaluation + def llm_eval(board): + return call_my_llm(board.fen()) + + evaluator = NNEvaluator(eval_fn=llm_eval) + engine = NNEngine(config, evaluator=evaluator) +""" + +from chess import Board, Move + +from moonfish.config import Config +from moonfish.engines.alpha_beta import AlphaBeta +from moonfish.evaluation.base import Evaluator +from moonfish.evaluation.classical import ClassicalEvaluator + + +class NNEngine(AlphaBeta): + """ + Chess engine that uses a pluggable evaluator for position assessment. + + Inherits the full alpha-beta search from AlphaBeta but replaces the + evaluation function with a provided Evaluator instance. This allows + using neural networks, LLMs, or any custom evaluation function while + keeping the same search algorithm. + """ + + def __init__(self, config: Config, evaluator: Evaluator | None = None): + """ + Initialize the NN engine. + + Args: + config: Engine configuration. + evaluator: An Evaluator instance. If None, falls back to + ClassicalEvaluator (PeSTO tables). + """ + super().__init__(config) + self.evaluator = evaluator or ClassicalEvaluator() + + def eval_board(self, board: Board) -> float: + """ + Evaluate the board using the plugged-in evaluator. + + If Syzygy tablebases are available and the position qualifies, + tablebase probing takes priority over the evaluator. + + Args: + board: The chess position to evaluate. + + Returns: + Score from the side-to-move's perspective. + """ + # Syzygy probing still takes priority for endgame positions + if self.tablebase is not None: + from moonfish.psqt import count_pieces + + pieces = sum(count_pieces(board)) + if pieces <= self.config.syzygy_pieces: + try: + import chess.syzygy + + dtz = self.tablebase.probe_dtz(board) + return dtz + except Exception: + pass + + return self.evaluator.evaluate(board) + + def search_move(self, board: Board) -> Move: + """Search for the best move, resetting evaluator state first.""" + self.evaluator.reset() + return super().search_move(board) diff --git a/moonfish/evaluation/__init__.py b/moonfish/evaluation/__init__.py new file mode 100644 index 0000000..2bbc662 --- /dev/null +++ b/moonfish/evaluation/__init__.py @@ -0,0 +1,5 @@ +from moonfish.evaluation.base import Evaluator +from moonfish.evaluation.classical import ClassicalEvaluator +from moonfish.evaluation.nn import NNEvaluator + +__all__ = ["Evaluator", "ClassicalEvaluator", "NNEvaluator"] diff --git a/moonfish/evaluation/base.py b/moonfish/evaluation/base.py new file mode 100644 index 0000000..f7804fd --- /dev/null +++ b/moonfish/evaluation/base.py @@ -0,0 +1,49 @@ +""" +Base evaluator protocol. + +Any evaluation function (classical, NNUE, transformer, LLM) should implement +this protocol to be usable with the alpha-beta search engine. +""" + +from typing import Protocol, runtime_checkable + +from chess import Board + + +@runtime_checkable +class Evaluator(Protocol): + """ + Protocol for board evaluation functions. + + Implementations can range from simple piece-square tables to neural networks + or even LLM-based evaluators. The engine will call `evaluate()` at leaf nodes + of the search tree and in quiescence search. + + The returned score should be from the perspective of the side to move: + - Positive = good for the side to move + - Negative = bad for the side to move + - 0 = roughly equal + + Scores are in centipawns (100 = 1 pawn advantage). + """ + + def evaluate(self, board: Board) -> float: + """ + Evaluate the given board position. + + Args: + board: The chess position to evaluate. + + Returns: + Score in centipawns from the side-to-move's perspective. + """ + ... + + def reset(self) -> None: + """ + Reset any internal state (e.g., caches, accumulators). + + Called at the start of each new search. Implementations that + don't maintain state can make this a no-op. + """ + ... diff --git a/moonfish/evaluation/classical.py b/moonfish/evaluation/classical.py new file mode 100644 index 0000000..4120b0a --- /dev/null +++ b/moonfish/evaluation/classical.py @@ -0,0 +1,25 @@ +""" +Classical evaluator wrapping the existing PeSTO evaluation. + +This is the default evaluator used by the engine. It provides a baseline +for comparing against neural network evaluators. +""" + +from chess import Board + +from moonfish.psqt import BOARD_EVALUATION_CACHE, board_evaluation + + +class ClassicalEvaluator: + """ + Classical evaluation based on PeSTO piece-square tables with + tapered midgame/endgame scoring. + """ + + def evaluate(self, board: Board) -> float: + """Evaluate using PeSTO piece-square tables.""" + return board_evaluation(board) + + def reset(self) -> None: + """Clear the evaluation cache between searches.""" + BOARD_EVALUATION_CACHE.clear() diff --git a/moonfish/evaluation/nn.py b/moonfish/evaluation/nn.py new file mode 100644 index 0000000..149754b --- /dev/null +++ b/moonfish/evaluation/nn.py @@ -0,0 +1,247 @@ +""" +Neural network evaluator framework. + +Supports loading evaluation models in multiple formats: +- ONNX Runtime (.onnx) - recommended for deployment +- PyTorch (.pt, .pth) - for development and fine-tuning +- Custom callables - for LLMs or other exotic evaluators + +The framework handles board-to-tensor conversion and model inference. +To use a custom model, either: + +1. Provide an ONNX or PyTorch model file: + evaluator = NNEvaluator.from_file("model.onnx") + +2. Provide a custom callable: + evaluator = NNEvaluator(eval_fn=my_llm_eval_function) + +3. Subclass and override `_raw_evaluate`: + class MyEvaluator(NNEvaluator): + def _raw_evaluate(self, board): + return my_model(board) + +Board representation for neural models: +- Default: 12-plane bitboard (6 piece types x 2 colors) + metadata +- Each plane is 8x8 = 64 values (1 if piece present, 0 otherwise) +- Metadata: side to move, castling rights, en passant, halfmove clock +- Total input size: 12*64 + 5 = 773 floats +""" + +from typing import Callable + +import chess +from chess import Board + + +def board_to_tensor(board: Board) -> list[float]: + """ + Convert a board to a flat feature vector suitable for neural network input. + + Encoding (773 features): + - 12 bitboard planes (6 piece types x 2 colors), each 64 values: [0..767] + - Side to move (1 = white, 0 = black): [768] + - Castling rights (4 bools): [769..772] + + The board is always encoded from white's perspective. If it's black's turn, + the model output should be negated by the evaluator. + + Args: + board: The chess position to encode. + + Returns: + List of 773 floats representing the position. + """ + features: list[float] = [] + + # 12 bitboard planes: WP, WN, WB, WR, WQ, WK, BP, BN, BB, BR, BQ, BK + for color in [chess.WHITE, chess.BLACK]: + for piece_type in range(1, 7): # PAWN=1 through KING=6 + bb = board.pieces_mask(piece_type, color) + for square in range(64): + features.append(1.0 if bb & (1 << square) else 0.0) + + # Metadata + features.append(1.0 if board.turn == chess.WHITE else 0.0) + features.append(1.0 if board.has_kingside_castling_rights(chess.WHITE) else 0.0) + features.append(1.0 if board.has_queenside_castling_rights(chess.WHITE) else 0.0) + features.append(1.0 if board.has_kingside_castling_rights(chess.BLACK) else 0.0) + features.append(1.0 if board.has_queenside_castling_rights(chess.BLACK) else 0.0) + + return features + + +# Input size for the default board encoding +INPUT_SIZE = 773 + + +class NNEvaluator: + """ + Neural network evaluator with support for multiple model backends. + + This evaluator can use: + - ONNX models (via onnxruntime) + - PyTorch models (via torch) + - Custom callables (e.g., LLM API calls) + + The framework handles board encoding and score normalization. + Models should output a single float: positive = white is better. + The evaluator automatically negates for black's perspective. + + Example usage: + # With an ONNX model: + evaluator = NNEvaluator.from_file("model.onnx") + + # With a custom function (e.g., LLM): + def llm_eval(board: Board) -> float: + prompt = f"Evaluate this chess position: {board.fen()}" + return call_llm(prompt) # returns centipawn score + + evaluator = NNEvaluator(eval_fn=llm_eval) + + # With a custom board encoder: + evaluator = NNEvaluator( + eval_fn=my_fn, + board_encoder=my_custom_encoder, + ) + """ + + def __init__( + self, + eval_fn: Callable[[Board], float] | None = None, + board_encoder: Callable[[Board], list[float]] | None = None, + ): + """ + Create a neural network evaluator. + + Args: + eval_fn: Optional callable that takes a Board and returns a score + from white's perspective. If provided, this is used directly + instead of a model file. + board_encoder: Optional custom board-to-feature-vector function. + Defaults to the standard 773-feature encoding. + """ + self._eval_fn = eval_fn + self._board_encoder = board_encoder or board_to_tensor + self._model = None + self._backend: str | None = None + + @classmethod + def from_file(cls, model_path: str, **kwargs) -> "NNEvaluator": + """ + Load a neural network model from a file. + + Supported formats: + - .onnx: Loaded via ONNX Runtime + - .pt, .pth: Loaded via PyTorch + + Args: + model_path: Path to the model file. + **kwargs: Additional arguments passed to the model loader. + + Returns: + NNEvaluator instance with the model loaded. + """ + evaluator = cls(**kwargs) + evaluator._load_model(model_path) + return evaluator + + def _load_model(self, model_path: str) -> None: + """Load a model from file, auto-detecting the backend.""" + if model_path.endswith(".onnx"): + self._load_onnx(model_path) + elif model_path.endswith((".pt", ".pth")): + self._load_pytorch(model_path) + else: + raise ValueError( + f"Unsupported model format: {model_path}. " + "Supported: .onnx, .pt, .pth" + ) + + def _load_onnx(self, model_path: str) -> None: + """Load an ONNX model.""" + try: + import onnxruntime as ort + except ImportError: + raise ImportError( + "onnxruntime is required for ONNX models. " + "Install it with: pip install onnxruntime" + ) + self._model = ort.InferenceSession(model_path) + self._backend = "onnx" + + def _load_pytorch(self, model_path: str) -> None: + """Load a PyTorch model.""" + try: + import torch + except ImportError: + raise ImportError( + "torch is required for PyTorch models. " + "Install it with: pip install torch" + ) + self._model = torch.jit.load(model_path) + self._model.eval() + self._backend = "pytorch" + + def _raw_evaluate(self, board: Board) -> float: + """ + Get the raw model output for a position. + + Override this method in subclasses for custom evaluation logic. + The returned score should be from white's perspective. + + Args: + board: The chess position to evaluate. + + Returns: + Score from white's perspective (positive = white is better). + """ + if self._eval_fn is not None: + return self._eval_fn(board) + + if self._model is None: + raise RuntimeError( + "No model loaded. Use NNEvaluator.from_file() or provide eval_fn." + ) + + features = self._board_encoder(board) + + if self._backend == "onnx": + import numpy as np + + input_array = np.array([features], dtype=np.float32) + input_name = self._model.get_inputs()[0].name + result = self._model.run(None, {input_name: input_array}) + return float(result[0][0][0]) + + elif self._backend == "pytorch": + import torch + + input_tensor = torch.tensor([features], dtype=torch.float32) + with torch.no_grad(): + result = self._model(input_tensor) + return float(result.item()) + + raise RuntimeError(f"Unknown backend: {self._backend}") + + def evaluate(self, board: Board) -> float: + """ + Evaluate a position from the side-to-move's perspective. + + The raw model output is from white's perspective. This method + negates the score when it's black's turn. + + Args: + board: The chess position to evaluate. + + Returns: + Score in centipawns from the side-to-move's perspective. + """ + score = self._raw_evaluate(board) + # Negate for black's perspective (model outputs white-relative scores) + if board.turn == chess.BLACK: + score = -score + return score + + def reset(self) -> None: + """Reset internal state. No-op for stateless NN evaluators.""" + pass diff --git a/moonfish/helper.py b/moonfish/helper.py index d7fd04d..8e57d09 100644 --- a/moonfish/helper.py +++ b/moonfish/helper.py @@ -10,6 +10,7 @@ from moonfish.engines.l1p_alpha_beta import Layer1ParallelAlphaBeta from moonfish.engines.l2p_alpha_beta import Layer2ParallelAlphaBeta from moonfish.engines.lazy_smp import LazySMP +from moonfish.engines.nn_engine import NNEngine from moonfish.engines.random import RandomEngine @@ -21,6 +22,7 @@ class Algorithm(Enum): parallel_alpha_beta_layer_2 = "parallel_alpha_beta_layer_2" lazy_smp = "lazy_smp" random = "random" + nn = "nn" def get_engine(config: Config): @@ -45,9 +47,26 @@ def get_engine(config: Config): return LazySMP(config) elif algorithm is Algorithm.random: return RandomEngine(config) + elif algorithm is Algorithm.nn: + return _create_nn_engine(config) raise Exception("algorithm not supported") +def _create_nn_engine(config: Config) -> NNEngine: + """ + Create an NN engine with the configured evaluator. + + If nn_model_path is set, loads the model from file. + Otherwise, falls back to classical evaluation. + """ + evaluator = None + if config.nn_model_path: + from moonfish.evaluation.nn import NNEvaluator + + evaluator = NNEvaluator.from_file(config.nn_model_path) + return NNEngine(config, evaluator=evaluator) + + def _opening_book_path() -> str: """ Resolve the opening book path, allowing overrides for CI or custom installs. diff --git a/moonfish/main.py b/moonfish/main.py index a2a36cf..e3783c9 100644 --- a/moonfish/main.py +++ b/moonfish/main.py @@ -62,6 +62,12 @@ def run(config: Config): help="Remaining pieces to use syzygy endgame tablebases.", default=5, ) +@click.option( + "--nn-model-path", + type=str, + help="Path to a neural network model file (.onnx or .pt) for the nn algorithm.", + default=None, +) def main( mode: str, algorithm: str, @@ -71,6 +77,7 @@ def main( quiescence_search_depth: int, syzygy_path: str | None, syzygy_pieces: int, + nn_model_path: str | None, ): """ Starts the engine according to the options provided. @@ -84,6 +91,7 @@ def main( quiescence_search_depth=quiescence_search_depth, syzygy_path=syzygy_path, syzygy_pieces=syzygy_pieces, + nn_model_path=nn_model_path, ) run(config)