From 5efad8304f2ad84854ace7cb6795111068a747ae Mon Sep 17 00:00:00 2001 From: spjuhel Date: Wed, 17 Dec 2025 17:10:19 +0100 Subject: [PATCH 01/37] cherry pick snapshots from feature/risk_trajectories --- climada/trajectories/snapshot.py | 163 +++++++++++++++++++++ climada/trajectories/test/test_snapshot.py | 132 +++++++++++++++++ 2 files changed, 295 insertions(+) create mode 100644 climada/trajectories/snapshot.py create mode 100644 climada/trajectories/test/test_snapshot.py diff --git a/climada/trajectories/snapshot.py b/climada/trajectories/snapshot.py new file mode 100644 index 0000000000..d8c78c0c20 --- /dev/null +++ b/climada/trajectories/snapshot.py @@ -0,0 +1,163 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +This modules implements the Snapshot class. + +Snapshot are used to store a snapshot of Exposure, Hazard and Vulnerability +at a specific date. + +""" + +import copy +import datetime +import logging + +import pandas as pd + +from climada.entity.exposures import Exposures +from climada.entity.impact_funcs import ImpactFuncSet +from climada.entity.measures.base import Measure +from climada.hazard import Hazard + +LOGGER = logging.getLogger(__name__) + +__all__ = ["Snapshot"] + + +class Snapshot: + """ + A snapshot of exposure, hazard, and impact function at a specific date. + + Parameters + ---------- + exposure : Exposures + hazard : Hazard + impfset : ImpactFuncSet + date : int | datetime.date | str + The date of the Snapshot, it can be an integer representing a year, + a datetime object or a string representation of a datetime object + with format "YYYY-MM-DD". + + Attributes + ---------- + date : datetime + Date of the snapshot. + measure: Measure | None + The possible measure applied to the snapshot. + + Notes + ----- + + The object creates deep copies of the exposure hazard and impact function set. + + Also note that exposure, hazard and impfset are read-only properties. + Consider snapshot as immutable objects. + + To create a snapshot with a measure, create a snapshot `snap` without + the measure and call `snap.apply_measure(measure)`, which returns a new Snapshot object + with the measure applied to its risk dimensions. + """ + + def __init__( + self, + *, + exposure: Exposures, + hazard: Hazard, + impfset: ImpactFuncSet, + date: int | datetime.date | str, + ) -> None: + self._exposure = copy.deepcopy(exposure) + self._hazard = copy.deepcopy(hazard) + self._impfset = copy.deepcopy(impfset) + self._measure = None + self._date = self._convert_to_date(date) + + @property + def exposure(self) -> Exposures: + """Exposure data for the snapshot.""" + return self._exposure + + @property + def hazard(self) -> Hazard: + """Hazard data for the snapshot.""" + return self._hazard + + @property + def impfset(self) -> ImpactFuncSet: + """Impact function set data for the snapshot.""" + return self._impfset + + @property + def measure(self) -> Measure | None: + """(Adaptation) Measure data for the snapshot.""" + return self._measure + + @property + def date(self) -> datetime.date: + """Date of the snapshot.""" + return self._date + + @property + def impact_calc_data(self) -> dict: + """Convenience function for ImpactCalc class.""" + return { + "exposures": self.exposure, + "hazard": self.hazard, + "impfset": self.impfset, + } + + @staticmethod + def _convert_to_date(date_arg) -> datetime.date: + """Convert date argument of type int or str to a datetime.date object.""" + if isinstance(date_arg, int): + # Assume the integer represents a year + return datetime.date(date_arg, 1, 1) + elif isinstance(date_arg, str): + # Try to parse the string as a date + try: + return datetime.datetime.strptime(date_arg, "%Y-%m-%d").date() + except ValueError: + raise ValueError("String must be in the format 'YYYY-MM-DD'") + elif isinstance(date_arg, datetime.date): + # Already a date object + return date_arg + else: + raise TypeError("date_arg must be an int, str, or datetime.date") + + def apply_measure(self, measure: Measure) -> "Snapshot": + """Create a new snapshot by applying a Measure object. + + This method creates a new `Snapshot` object by applying a measure on + the current one. + + Parameters + ---------- + measure : Measure + The measure to be applied to the snapshot. + + Returns + ------- + The Snapshot with the measure applied. + + """ + + LOGGER.debug(f"Applying measure {measure.name} on snapshot {id(self)}") + exp, impfset, haz = measure.apply(self.exposure, self.impfset, self.hazard) + snap = Snapshot(exposure=exp, hazard=haz, impfset=impfset, date=self.date) + snap._measure = measure + return snap diff --git a/climada/trajectories/test/test_snapshot.py b/climada/trajectories/test/test_snapshot.py new file mode 100644 index 0000000000..4e3b465d8e --- /dev/null +++ b/climada/trajectories/test/test_snapshot.py @@ -0,0 +1,132 @@ +import datetime +import unittest +from unittest.mock import MagicMock + +import numpy as np +import pandas as pd + +from climada.entity.exposures import Exposures +from climada.entity.impact_funcs import ImpactFunc, ImpactFuncSet +from climada.entity.measures.base import Measure +from climada.hazard import Hazard +from climada.trajectories.snapshot import Snapshot +from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5 + + +class TestSnapshot(unittest.TestCase): + + def setUp(self): + # Create mock objects for testing + self.mock_exposure = Exposures.from_hdf5(EXP_DEMO_H5) + self.mock_hazard = Hazard.from_hdf5(HAZ_DEMO_H5) + self.mock_impfset = ImpactFuncSet( + [ + ImpactFunc( + "TC", + 3, + intensity=np.array([0, 20]), + mdd=np.array([0, 0.5]), + paa=np.array([0, 1]), + ) + ] + ) + self.mock_measure = MagicMock(spec=Measure) + self.mock_measure.name = "Test Measure" + + # Setup mock return values for measure.apply + self.mock_modified_exposure = MagicMock(spec=Exposures) + self.mock_modified_hazard = MagicMock(spec=Hazard) + self.mock_modified_impfset = MagicMock(spec=ImpactFuncSet) + self.mock_measure.apply.return_value = ( + self.mock_modified_exposure, + self.mock_modified_impfset, + self.mock_modified_hazard, + ) + + def test_init_with_int_date(self): + snapshot = Snapshot( + exposure=self.mock_exposure, + hazard=self.mock_hazard, + impfset=self.mock_impfset, + date=2023, + ) + self.assertEqual(snapshot.date, datetime.date(2023, 1, 1)) + + def test_init_with_str_date(self): + snapshot = Snapshot( + exposure=self.mock_exposure, + hazard=self.mock_hazard, + impfset=self.mock_impfset, + date="2023-01-01", + ) + self.assertEqual(snapshot.date, datetime.date(2023, 1, 1)) + + def test_init_with_date_object(self): + date_obj = datetime.date(2023, 1, 1) + snapshot = Snapshot( + exposure=self.mock_exposure, + hazard=self.mock_hazard, + impfset=self.mock_impfset, + date=date_obj, + ) + self.assertEqual(snapshot.date, date_obj) + + def test_init_with_invalid_date(self): + with self.assertRaises(ValueError): + Snapshot( + exposure=self.mock_exposure, + hazard=self.mock_hazard, + impfset=self.mock_impfset, + date="invalid-date", + ) + + def test_init_with_invalid_type(self): + with self.assertRaises(TypeError): + Snapshot( + exposure=self.mock_exposure, + hazard=self.mock_hazard, + impfset=self.mock_impfset, + date=2023.5, # type: ignore + ) + + def test_properties(self): + snapshot = Snapshot( + exposure=self.mock_exposure, + hazard=self.mock_hazard, + impfset=self.mock_impfset, + date=2023, + ) + + # We want a new reference + self.assertIsNot(snapshot.exposure, self.mock_exposure) + self.assertIsNot(snapshot.hazard, self.mock_hazard) + self.assertIsNot(snapshot.impfset, self.mock_impfset) + + # But we want equality + pd.testing.assert_frame_equal(snapshot.exposure.gdf, self.mock_exposure.gdf) + + self.assertEqual(snapshot.hazard.haz_type, self.mock_hazard.haz_type) + self.assertEqual(snapshot.hazard.intensity.nnz, self.mock_hazard.intensity.nnz) + self.assertEqual(snapshot.hazard.size, self.mock_hazard.size) + + self.assertEqual(snapshot.impfset, self.mock_impfset) + + def test_apply_measure(self): + snapshot = Snapshot( + exposure=self.mock_exposure, + hazard=self.mock_hazard, + impfset=self.mock_impfset, + date=2023, + ) + new_snapshot = snapshot.apply_measure(self.mock_measure) + + self.assertIsNotNone(new_snapshot.measure) + self.assertEqual(new_snapshot.measure.name, "Test Measure") # type: ignore + self.assertEqual(new_snapshot.exposure, self.mock_modified_exposure) + self.assertEqual(new_snapshot.hazard, self.mock_modified_hazard) + self.assertEqual(new_snapshot.impfset, self.mock_modified_impfset) + + +if __name__ == "__main__": + TESTS = unittest.TestLoader().loadTestsFromTestCase(TestSnapshot) + unittest.TextTestRunner(verbosity=2).run(TESTS) From ecae36e8b016a82c6447060f0684e89b28a99320 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Wed, 17 Dec 2025 17:30:38 +0100 Subject: [PATCH 02/37] Cherrypicks from risk traj --- climada/trajectories/interpolation.py | 439 ++++++++++++++++++ .../trajectories/test/test_interpolation.py | 352 ++++++++++++++ 2 files changed, 791 insertions(+) create mode 100644 climada/trajectories/interpolation.py create mode 100644 climada/trajectories/test/test_interpolation.py diff --git a/climada/trajectories/interpolation.py b/climada/trajectories/interpolation.py new file mode 100644 index 0000000000..9f6687e449 --- /dev/null +++ b/climada/trajectories/interpolation.py @@ -0,0 +1,439 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +This modules implements different sparce matrices and numpy arrays +interpolation approaches. + +""" + +import logging +from abc import ABC +from collections.abc import Callable +from typing import Any, Dict, List, Optional + +import numpy as np +from scipy import sparse + +LOGGER = logging.getLogger(__name__) + +__all__ = [ + "AllLinearStrategy", + "ExponentialExposureStrategy", + "linear_interp_arrays", + "linear_interp_imp_mat", + "exponential_interp_arrays", + "exponential_interp_imp_mat", +] + + +def linear_interp_imp_mat( + mat_start: sparse.csr_matrix, + mat_end: sparse.csr_matrix, + number_of_interpolation_points: int, +) -> List[sparse.csr_matrix]: + r""" + Linearly interpolates between two sparse impact matrices. + + Creates a sequence of matrices representing a linear transition from a starting + matrix to an ending matrix. The interpolation includes both the start and end + points. + + Parameters + ---------- + mat_start : scipy.sparse.csr_matrix + The starting impact matrix. Must have a shape compatible with `mat_end` + for arithmetic operations. + mat_end : scipy.sparse.csr_matrix + The ending impact matrix. Must have a shape compatible with `mat_start` + for arithmetic operations. + number_of_interpolation_points : int + The total number of matrices to return, including the start and end points. + Must be $\ge 2$. + + Returns + ------- + list of scipy.sparse.csr_matrix + A list of matrices, where the first element is `mat_start` and the last + element is `mat_end`. The total length of the list is + `number_of_interpolation_points`. + + Notes + ----- + The formula used for interpolation at proportion $p$ is: + $$M_p = M_{start} \cdot (1 - p) + M_{end} \cdot p$$ + The proportions $p$ range from 0 to 1, inclusive. + """ + + return [ + mat_start + prop * (mat_end - mat_start) + for prop in np.linspace(0, 1, number_of_interpolation_points) + ] + + +def exponential_interp_imp_mat( + mat_start: sparse.csr_matrix, + mat_end: sparse.csr_matrix, + number_of_interpolation_points: int, +) -> List[sparse.csr_matrix]: + r""" + Exponentially interpolates between two "impact matrices". + + This function performs interpolation in a logarithmic space, effectively + achieving an exponential-like transition between `mat_start` and `mat_end`. + It is designed for objects that wrap NumPy arrays and expose them via a + `.data` attribute. + + Parameters + ---------- + mat_start : object + The starting matrix object. Must have a `.data` attribute that is a + NumPy array of positive values. + mat_end : object + The ending matrix object. Must have a `.data` attribute that is a + NumPy array of positive values and have a compatible shape with `mat_start`. + number_of_interpolation_points : int + The total number of matrix objects to return, including the start and + end points. Must be $\ge 2$. + + Returns + ------- + list of object + A list of interpolated matrix objects. The first element corresponds to + `mat_start` and the last to `mat_end` (after the conversion/reversion). + The list length is `number_of_interpolation_points`. + + Notes + ----- + The interpolation is achieved by: + + 1. Mapping the matrix data to a transformed logarithmic space: + $$M'_{i} = \ln(M_{i})}$$ + (where $\ln$ is the natural logarithm, and $\epsilon$ is added to $M_{i}$ + to prevent $\ln(0)$). + 2. Performing standard linear interpolation on the transformed matrices + $M'_{start}$ and $M'_{end}$ to get $M'_{interp}$: + $$M'_{interp} = M'_{start} \cdot (1 - \text{ratio}) + M'_{end} \cdot \text{ratio}$$ + 3. Mapping the result back to the original domain: + $$M_{interp} = \exp(M'_{interp}$$ + """ + + mat_start = mat_start.copy() + mat_end = mat_end.copy() + mat_start.data = np.log(mat_start.data + np.finfo(float).eps) + mat_end.data = np.log(mat_end.data + np.finfo(float).eps) + + # Perform linear interpolation in the logarithmic domain + res = [] + num_points = number_of_interpolation_points + for point in range(num_points): + ratio = point / (num_points - 1) + mat_interpolated = mat_start * (1 - ratio) + ratio * mat_end + mat_interpolated.data = np.exp(mat_interpolated.data) + res.append(mat_interpolated) + return res + + +def linear_interp_arrays(arr_start: np.ndarray, arr_end: np.ndarray) -> np.ndarray: + r""" + Performs linear interpolation between two NumPy arrays over their first dimension. + + This function interpolates each metric (column) linearly across the time steps + (rows), including both the start and end states. + + Parameters + ---------- + arr_start : numpy.ndarray + The starting array of metrics. The first dimension (rows) is assumed to + represent the interpolation steps (e.g., dates/time points). + arr_end : numpy.ndarray + The ending array of metrics. Must have the exact same shape as `arr_start`. + + Returns + ------- + numpy.ndarray + An array with the same shape as `arr_start` and `arr_end`. The values + in the first dimension transition linearly from those in `arr_start` + to those in `arr_end`. + + Raises + ------ + ValueError + If `arr_start` and `arr_end` do not have the same shape. + + Notes + ----- + The interpolation is performed element-wise along the first dimension + (axis 0). For each row $i$ and proportion $p_i$, the result $R_i$ is calculated as: + + $$R_i = arr\_start_i \cdot (1 - p_i) + arr\_end_i \cdot p_i$$ + + where $p_i$ is generated by $\text{np.linspace}(0, 1, n)$ and $n$ is the + size of the first dimension ($\text{arr\_start.shape}[0]$). + """ + if arr_start.shape != arr_end.shape: + raise ValueError( + f"Cannot interpolate arrays of different shapes: {arr_start.shape} and {arr_end.shape}." + ) + interpolation_range = arr_start.shape[0] + prop1 = np.linspace(0, 1, interpolation_range) + prop0 = 1 - prop1 + if arr_start.ndim > 1: + prop0, prop1 = prop0.reshape(-1, 1), prop1.reshape(-1, 1) + + return np.multiply(arr_start, prop0) + np.multiply(arr_end, prop1) + + +def exponential_interp_arrays(arr_start: np.ndarray, arr_end: np.ndarray) -> np.ndarray: + r""" + Performs exponential interpolation between two NumPy arrays over their first dimension. + + This function achieves an exponential-like transition by performing linear + interpolation in the logarithmic space, suitable to interpolate over a dimension which has + a growth factor. + + Parameters + ---------- + arr_start : numpy.ndarray + The starting array of metrics. Values must be positive. + arr_end : numpy.ndarray + The ending array of metrics. Must have the exact same shape as `arr_start`. + + Returns + ------- + numpy.ndarray + An array with the same shape as `arr_start` and `arr_end`. The values + in the first dimension transition exponentially from those in `arr_start` + to those in `arr_end`. + + Raises + ------ + ValueError + If `arr_start` and `arr_end` do not have the same shape. + + Notes + ----- + The interpolation is performed by transforming the arrays to a logarithmic + domain, linearly interpolating, and then transforming back. + + The formula for the interpolated result $R$ at proportion $\text{prop}$ is: + $$ + R = \exp \left( + \ln(A_{start}) \cdot (1 - \text{prop}) + + \ln(A_{end}) \cdot \text{prop} + \right) + $$ + where $A_{start}$ and $A_{end}$ are the input arrays (with $\epsilon$ added + to prevent $\ln(0)$) and $\text{prop}$ ranges from 0 to 1. + """ + if arr_start.shape != arr_end.shape: + raise ValueError( + f"Cannot interpolate arrays of different shapes: {arr_start.shape} and {arr_end.shape}." + ) + interpolation_range = arr_start.shape[0] + + prop1 = np.linspace(0, 1, interpolation_range) + prop0 = 1 - prop1 + if arr_start.ndim > 1: + prop0, prop1 = prop0.reshape(-1, 1), prop1.reshape(-1, 1) + + # Perform log transformation, linear interpolation, and exponential back-transformation + log_arr_start = np.log(arr_start + np.finfo(float).eps) + log_arr_end = np.log(arr_end + np.finfo(float).eps) + + interpolated_log_arr = np.multiply(log_arr_start, prop0) + np.multiply( + log_arr_end, prop1 + ) + + return np.exp(interpolated_log_arr) + + +class InterpolationStrategyBase(ABC): + r""" + Base abstract class for defining a set of interpolation strategies. + + This class serves as a blueprint for implementing specific interpolation + methods (e.g., 'Linear', 'Exponential') across different impact dimensions: + Exposure (matrices), Hazard, and Vulnerability (arrays/metrics). + + Attributes + ---------- + exposure_interp : Callable + The function used to interpolate sparse impact matrices over the + exposure dimension. + Signature: (mat_start, mat_end, num_points, **kwargs) -> list[sparse.csr_matrix]. + hazard_interp : Callable + The function used to interpolate NumPy arrays of metrics over the + hazard dimension. + Signature: (arr_start, arr_end, **kwargs) -> np.ndarray. + vulnerability_interp : Callable + The function used to interpolate NumPy arrays of metrics over the + vulnerability dimension. + Signature: (arr_start, arr_end, **kwargs) -> np.ndarray. + """ + + exposure_interp: Callable + hazard_interp: Callable + vulnerability_interp: Callable + + def interp_over_exposure_dim( + self, + imp_E0: sparse.csr_matrix, + imp_E1: sparse.csr_matrix, + interpolation_range: int, + /, + **kwargs: Optional[Dict[str, Any]], + ) -> List[sparse.csr_matrix]: + """ + Interpolates between two impact matrices using the defined exposure strategy. + + This method calls the function assigned to :attr:`exposure_interp` to generate + a sequence of matrices. + + Parameters + ---------- + imp_E0 : scipy.sparse.csr_matrix + A sparse matrix of the impacts at the start of the range. + imp_E1 : scipy.sparse.csr_matrix + A sparse matrix of the impacts at the end of the range. + interpolation_range : int + The total number of time points to interpolate, including the start and end. + **kwargs : Optional[Dict[str, Any]] + Keyword arguments to pass to the underlying :attr:`exposure_interp` function. + + Returns + ------- + list of scipy.sparse.csr_matrix + A list of ``interpolation_range`` interpolated impact matrices. + + Raises + ------ + ValueError + If the underlying interpolation function raises a ``ValueError`` + indicating incompatible matrix shapes. + """ + try: + res = self.exposure_interp(imp_E0, imp_E1, interpolation_range, **kwargs) + except ValueError as err: + if str(err) == "inconsistent shapes": + raise ValueError( + "Tried to interpolate impact matrices of different shapes. " + "A possible reason could be Exposures of different shapes." + ) from err + + raise err + + return res + + def interp_over_hazard_dim( + self, + metric_0: np.ndarray, + metric_1: np.ndarray, + /, + **kwargs: Optional[Dict[str, Any]], + ) -> np.ndarray: + """ + Interpolates between two metric arrays using the defined hazard strategy. + + This method calls the function assigned to :attr:`hazard_interp`. + + Parameters + ---------- + metric_0 : numpy.ndarray + The starting array of metrics. + metric_1 : numpy.ndarray + The ending array of metrics. Must have the same shape as ``metric_0``. + **kwargs : Optional [Dict[str, Any]] + Keyword arguments to pass to the underlying :attr:`hazard_interp` function. + + Returns + ------- + numpy.ndarray + The resulting interpolated array. + """ + return self.hazard_interp(metric_0, metric_1, **kwargs) + + def interp_over_vulnerability_dim( + self, + metric_0: np.ndarray, + metric_1: np.ndarray, + /, + **kwargs: Optional[Dict[str, Any]], + ) -> np.ndarray: + """ + Interpolates between two metric arrays using the defined vulnerability strategy. + + This method calls the function assigned to :attr:`vulnerability_interp`. + + Parameters + ---------- + metric_0 : numpy.ndarray + The starting array of metrics. + metric_1 : numpy.ndarray + The ending array of metrics. Must have the same shape as ``metric_0``. + **kwargs : Optional[Dict[str, Any]] + Keyword arguments to pass to the underlying :attr:`vulnerability_interp` function. + + Returns + ------- + numpy.ndarray + The resulting interpolated array. + """ + # Note: Assuming the Callable takes the exact positional arguments + return self.vulnerability_interp(metric_0, metric_1, **kwargs) + + +class InterpolationStrategy(InterpolationStrategyBase): + r"""Interface for interpolation strategies. + + This is the class to use to define your own custom interpolation strategy. + """ + + def __init__( + self, + exposure_interp: Callable, + hazard_interp: Callable, + vulnerability_interp: Callable, + ) -> None: + super().__init__() + self.exposure_interp = exposure_interp + self.hazard_interp = hazard_interp + self.vulnerability_interp = vulnerability_interp + + +class AllLinearStrategy(InterpolationStrategyBase): + r"""Linear interpolation strategy over all dimensions.""" + + def __init__(self) -> None: + super().__init__() + self.exposure_interp = linear_interp_imp_mat + self.hazard_interp = linear_interp_arrays + self.vulnerability_interp = linear_interp_arrays + + +class ExponentialExposureStrategy(InterpolationStrategyBase): + r"""Exponential interpolation strategy for exposure and linear for Hazard and Vulnerability.""" + + def __init__(self) -> None: + super().__init__() + self.exposure_interp = ( + lambda mat_start, mat_end, points: exponential_interp_imp_mat( + mat_start, mat_end, points + ) + ) + self.hazard_interp = linear_interp_arrays + self.vulnerability_interp = linear_interp_arrays diff --git a/climada/trajectories/test/test_interpolation.py b/climada/trajectories/test/test_interpolation.py new file mode 100644 index 0000000000..693c9b9c33 --- /dev/null +++ b/climada/trajectories/test/test_interpolation.py @@ -0,0 +1,352 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +Tests for interpolation + +""" + +import unittest +from unittest.mock import MagicMock + +import numpy as np +from scipy.sparse import csr_matrix + +from climada.trajectories.interpolation import ( + AllLinearStrategy, + ExponentialExposureStrategy, + InterpolationStrategy, + exponential_interp_arrays, + exponential_interp_imp_mat, + linear_interp_arrays, + linear_interp_imp_mat, +) + + +class TestInterpolationFuncs(unittest.TestCase): + def setUp(self): + # Create mock impact matrices for testing + self.imp_mat0 = csr_matrix(np.array([[1, 2], [3, 4]])) + self.imp_mat1 = csr_matrix(np.array([[5, 6], [7, 8]])) + self.imp_mat2 = csr_matrix(np.array([[5, 6, 7], [8, 9, 10]])) # Different shape + self.time_points = 5 + self.interpolation_range_5 = 5 + self.interpolation_range_1 = 1 + self.interpolation_range_2 = 2 + self.rtol = 1e-5 + self.atol = 1e-8 + + def test_linear_interp_arrays(self): + arr_start = np.array([10, 100]) + arr_end = np.array([20, 200]) + expected = np.array([10.0, 200.0]) + result = linear_interp_arrays(arr_start, arr_end) + np.testing.assert_allclose(result, expected, rtol=self.rtol, atol=self.atol) + + def test_linear_interp_arrays2D(self): + arr_start = np.array([[10, 100], [10, 100]]) + arr_end = np.array([[20, 200], [20, 200]]) + expected = np.array([[10.0, 100.0], [20, 200]]) + result = linear_interp_arrays(arr_start, arr_end) + np.testing.assert_allclose(result, expected, rtol=self.rtol, atol=self.atol) + + def test_linear_interp_arrays_shape(self): + arr_start = np.array([10, 100, 5]) + arr_end = np.array([20, 200]) + with self.assertRaises(ValueError): + linear_interp_arrays(arr_start, arr_end) + + def test_linear_interp_arrays_start_equals_end(self): + arr_start = np.array([5, 5]) + arr_end = np.array([5, 5]) + expected = np.array([5.0, 5.0]) + result = linear_interp_arrays(arr_start, arr_end) + np.testing.assert_allclose(result, expected, rtol=self.rtol, atol=self.atol) + + def test_exponential_interp_arrays_1d(self): + arr_start = np.array([1, 10, 100]) + arr_end = np.array([2, 20, 200]) + expected = np.array([1.0, 14.142136, 200.0]) + result = exponential_interp_arrays(arr_start, arr_end) + np.testing.assert_allclose(result, expected, rtol=self.rtol, atol=self.atol) + + def test_exponential_interp_arrays_shape(self): + arr_start = np.array([10, 100, 5]) + arr_end = np.array([20, 200]) + with self.assertRaises(ValueError): + exponential_interp_arrays(arr_start, arr_end) + + def test_exponential_interp_arrays_2d(self): + arr_start = np.array( + [ + [1, 10, 100], # date 1 metric a,b,c + [1, 10, 100], # date 2 metric a,b,c + [1, 10, 100], + ] + ) # date 3 metric a,b,c + arr_end = np.array([[2, 20, 200], [2, 20, 200], [2, 20, 200]]) + expected = np.array( + [[1.0, 10.0, 100.0], [1.4142136, 14.142136, 141.42136], [2, 20, 200]] + ) + result = exponential_interp_arrays(arr_start, arr_end) + np.testing.assert_allclose(result, expected, rtol=self.rtol, atol=self.atol) + + def test_exponential_interp_arrays_start_equals_end(self): + arr_start = np.array([5, 5]) + arr_end = np.array([5, 5]) + expected = np.array([5.0, 5.0]) + result = exponential_interp_arrays(arr_start, arr_end) + np.testing.assert_allclose(result, expected, rtol=self.rtol, atol=self.atol) + + def test_linear_impmat_interpolate(self): + result = linear_interp_imp_mat(self.imp_mat0, self.imp_mat1, self.time_points) + self.assertEqual(len(result), self.time_points) + for mat in result: + self.assertIsInstance(mat, csr_matrix) + + dense = np.array([r.todense() for r in result]) + expected = np.array( + [ + [[1.0, 2.0], [3.0, 4.0]], + [[2.0, 3.0], [4.0, 5.0]], + [[3.0, 4.0], [5.0, 6.0]], + [[4.0, 5.0], [6.0, 7.0]], + [[5.0, 6.0], [7.0, 8.0]], + ] + ) + np.testing.assert_array_equal(dense, expected) + + def test_linear_impmat_interpolate_inconsistent_shape(self): + with self.assertRaises(ValueError): + linear_interp_imp_mat(self.imp_mat0, self.imp_mat2, self.time_points) + + def test_exp_impmat_interpolate(self): + result = exponential_interp_imp_mat( + self.imp_mat0, self.imp_mat1, self.time_points + ) + self.assertEqual(len(result), self.time_points) + for mat in result: + self.assertIsInstance(mat, csr_matrix) + + dense = np.array([r.todense() for r in result]) + expected = np.array( + [ + [[1.0, 2.0], [3.0, 4.0]], + [[1.49534878, 2.63214803], [3.70779275, 4.75682846]], + [[2.23606798, 3.46410162], [4.58257569, 5.65685425]], + [[3.34370152, 4.55901411], [5.66374698, 6.72717132]], + [[5.0, 6.0], [7.0, 8.0]], + ] + ) + np.testing.assert_array_almost_equal(dense, expected) + + def test_exp_impmat_interpolate_inconsistent_shape(self): + with self.assertRaises(ValueError): + exponential_interp_imp_mat(self.imp_mat0, self.imp_mat2, self.time_points) + + +class TestInterpolationStrategies(unittest.TestCase): + + def setUp(self): + self.interpolation_range = 3 + self.dummy_metric_0 = np.array([10, 20]) + self.dummy_metric_1 = np.array([100, 200]) + self.dummy_matrix_0 = csr_matrix(np.array([[1, 2], [3, 4]])) + self.dummy_matrix_1 = csr_matrix(np.array([[10, 20], [30, 40]])) + + def test_InterpolationStrategy_init(self): + def mock_exposure(a, b, r): + return a + b + + def mock_hazard(a, b, r): + return a * b + + def mock_vulnerability(a, b, r): + return a / b + + strategy = InterpolationStrategy(mock_exposure, mock_hazard, mock_vulnerability) + self.assertEqual(strategy.exposure_interp, mock_exposure) + self.assertEqual(strategy.hazard_interp, mock_hazard) + self.assertEqual(strategy.vulnerability_interp, mock_vulnerability) + + def test_InterpolationStrategy_interp_exposure_dim(self): + mock_exposure = MagicMock(return_value=["mock_result"]) + strategy = InterpolationStrategy( + mock_exposure, linear_interp_arrays, linear_interp_arrays + ) + + result = strategy.interp_over_exposure_dim( + self.dummy_matrix_0, self.dummy_matrix_1, self.interpolation_range + ) + mock_exposure.assert_called_once_with( + self.dummy_matrix_0, self.dummy_matrix_1, self.interpolation_range + ) + self.assertEqual(result, ["mock_result"]) + + def test_InterpolationStrategy_interp_exposure_dim_inconsistent_shapes(self): + mock_exposure = MagicMock(side_effect=ValueError("inconsistent shapes")) + strategy = InterpolationStrategy( + mock_exposure, linear_interp_arrays, linear_interp_arrays + ) + + with self.assertRaisesRegex( + ValueError, "Tried to interpolate impact matrices of different shape" + ): + strategy.interp_over_exposure_dim( + self.dummy_matrix_0, + csr_matrix(np.array([[1]])), + self.interpolation_range, + ) + mock_exposure.assert_called_once() # Ensure it was called + + def test_InterpolationStrategy_interp_hazard_dim(self): + mock_hazard = MagicMock(return_value=np.array([1, 2, 3])) + strategy = InterpolationStrategy( + linear_interp_imp_mat, mock_hazard, linear_interp_arrays + ) + + result = strategy.interp_over_hazard_dim( + self.dummy_metric_0, self.dummy_metric_1 + ) + mock_hazard.assert_called_once_with(self.dummy_metric_0, self.dummy_metric_1) + np.testing.assert_array_equal(result, np.array([1, 2, 3])) + + def test_InterpolationStrategy_interp_vulnerability_dim(self): + mock_vulnerability = MagicMock(return_value=np.array([4, 5, 6])) + strategy = InterpolationStrategy( + linear_interp_imp_mat, linear_interp_arrays, mock_vulnerability + ) + + result = strategy.interp_over_vulnerability_dim( + self.dummy_metric_0, self.dummy_metric_1 + ) + mock_vulnerability.assert_called_once_with( + self.dummy_metric_0, self.dummy_metric_1 + ) + np.testing.assert_array_equal(result, np.array([4, 5, 6])) + + +class TestConcreteInterpolationStrategies(unittest.TestCase): + + def setUp(self): + self.interpolation_range = 3 + self.dummy_metric_0 = np.array([10, 20, 30]) + self.dummy_metric_1 = np.array([100, 200, 300]) + self.dummy_matrix_0 = csr_matrix([[1, 2], [3, 4]]) + self.dummy_matrix_1 = csr_matrix([[10, 20], [30, 40]]) + self.dummy_matrix_0_1_lin = csr_matrix([[5.5, 11], [16.5, 22]]) + self.dummy_matrix_0_1_exp = csr_matrix( + [[3.162278, 6.324555], [9.486833, 12.649111]] + ) + self.rtol = 1e-5 + self.atol = 1e-8 + + def test_AllLinearStrategy_init_and_methods(self): + strategy = AllLinearStrategy() + self.assertEqual(strategy.exposure_interp, linear_interp_imp_mat) + self.assertEqual(strategy.hazard_interp, linear_interp_arrays) + self.assertEqual(strategy.vulnerability_interp, linear_interp_arrays) + + # Test hazard interpolation + expected_hazard_interp = linear_interp_arrays( + self.dummy_metric_0, self.dummy_metric_1 + ) + result_hazard = strategy.interp_over_hazard_dim( + self.dummy_metric_0, self.dummy_metric_1 + ) + np.testing.assert_allclose( + result_hazard, expected_hazard_interp, rtol=self.rtol, atol=self.atol + ) + + # Test vulnerability interpolation + expected_vulnerability_interp = linear_interp_arrays( + self.dummy_metric_0, self.dummy_metric_1 + ) + result_vulnerability = strategy.interp_over_vulnerability_dim( + self.dummy_metric_0, self.dummy_metric_1 + ) + np.testing.assert_allclose( + result_vulnerability, + expected_vulnerability_interp, + rtol=self.rtol, + atol=self.atol, + ) + + # Test exposure interpolation (using mock for linear_interp_imp_mat) + result_exposure = strategy.interp_over_exposure_dim( + self.dummy_matrix_0, self.dummy_matrix_1, self.interpolation_range + ) + # Verify the structure/first/last elements of the mock output + self.assertEqual(len(result_exposure), self.interpolation_range) + np.testing.assert_allclose(result_exposure[0].data, self.dummy_matrix_0.data) + np.testing.assert_allclose( + result_exposure[1].data, self.dummy_matrix_0_1_lin.data + ) + np.testing.assert_allclose(result_exposure[2].data, self.dummy_matrix_1.data) + + def test_ExponentialExposureInterpolation_init_and_methods(self): + strategy = ExponentialExposureStrategy() + # Test hazard interpolation (should be linear) + expected_hazard_interp = linear_interp_arrays( + self.dummy_metric_0, self.dummy_metric_1 + ) + result_hazard = strategy.interp_over_hazard_dim( + self.dummy_metric_0, self.dummy_metric_1 + ) + np.testing.assert_allclose( + result_hazard, expected_hazard_interp, rtol=self.rtol, atol=self.atol + ) + + # Test vulnerability interpolation (should be linear) + expected_vulnerability_interp = linear_interp_arrays( + self.dummy_metric_0, self.dummy_metric_1 + ) + result_vulnerability = strategy.interp_over_vulnerability_dim( + self.dummy_metric_0, self.dummy_metric_1 + ) + np.testing.assert_allclose( + result_vulnerability, + expected_vulnerability_interp, + rtol=self.rtol, + atol=self.atol, + ) + + # Test exposure interpolation (using mock for exponential_interp_imp_mat) + result_exposure = strategy.interp_over_exposure_dim( + self.dummy_matrix_0, self.dummy_matrix_1, self.interpolation_range + ) + # Verify the structure/first/last elements of the mock output + self.assertEqual(len(result_exposure), self.interpolation_range) + np.testing.assert_allclose(result_exposure[0].data, self.dummy_matrix_0.data) + np.testing.assert_allclose( + result_exposure[1].data, + self.dummy_matrix_0_1_exp.data, + rtol=self.rtol, + atol=self.atol, + ) + np.testing.assert_allclose(result_exposure[-1].data, self.dummy_matrix_1.data) + + +if __name__ == "__main__": + TESTS = unittest.TestLoader().loadTestsFromTestCase( + TestConcreteInterpolationStrategies + ) + TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestInterpolationFuncs)) + TESTS.addTests( + unittest.TestLoader().loadTestsFromTestCase(TestInterpolationStrategies) + ) + unittest.TextTestRunner(verbosity=2).run(TESTS) From da997e7a0496917fc0412b2207acf04b0b31cf12 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Wed, 17 Dec 2025 17:52:11 +0100 Subject: [PATCH 03/37] Cherrypick from risk traj --- climada/trajectories/impact_calc_strat.py | 137 ++++++++++++++++++ .../test/test_impact_calc_strat.py | 84 +++++++++++ 2 files changed, 221 insertions(+) create mode 100644 climada/trajectories/impact_calc_strat.py create mode 100644 climada/trajectories/test/test_impact_calc_strat.py diff --git a/climada/trajectories/impact_calc_strat.py b/climada/trajectories/impact_calc_strat.py new file mode 100644 index 0000000000..a58aceeab2 --- /dev/null +++ b/climada/trajectories/impact_calc_strat.py @@ -0,0 +1,137 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +This modules implements the impact computation strategy objects for risk +trajectories. + +""" + +from abc import ABC, abstractmethod + +from climada.engine.impact import Impact +from climada.engine.impact_calc import ImpactCalc +from climada.entity.exposures.base import Exposures +from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet +from climada.hazard.base import Hazard + +__all__ = ["ImpactCalcComputation"] + + +class ImpactComputationStrategy(ABC): + """ + Interface for impact computation strategies. + + This abstract class defines the contract for all concrete strategies + responsible for calculating and optionally modifying with a risk transfer, + the impact computation, based on a set of inputs (exposure, hazard, vulnerability). + + It revolves around a `compute_impacts()` method that takes as arguments + the three dimensions of risk (exposure, hazard, vulnerability) and return an + Impact object. + """ + + @abstractmethod + def compute_impacts( + self, + exp: Exposures, + haz: Hazard, + vul: ImpactFuncSet, + ) -> Impact: + """ + Calculates the total impact, including optional risk transfer application. + + Parameters + ---------- + exp : Exposures + The exposure data. + haz : Hazard + The hazard data (e.g., event intensity). + vul : ImpactFuncSet + The set of vulnerability functions. + + Returns + ------- + Impact + An object containing the computed total impact matrix and metrics. + + See Also + -------- + ImpactCalcComputation : The default implementation of this interface. + """ + ... + + +class ImpactCalcComputation(ImpactComputationStrategy): + r""" + Default impact computation strategy using the core engine of climada. + + This strategy first calculates the raw impact using the standard + :class:`ImpactCalc` logic. + + """ + + def compute_impacts( + self, + exp: Exposures, + haz: Hazard, + vul: ImpactFuncSet, + ) -> Impact: + """ + Calculates the impact and applies the "global" risk transfer mechanism. + + Parameters + ---------- + exp : Exposures + The exposure data. + haz : Hazard + The hazard data. + vul : ImpactFuncSet + The set of vulnerability functions. + + Returns + ------- + Impact + The final impact object. + """ + impact = self.compute_impacts_pre_transfer(exp, haz, vul) + return impact + + def compute_impacts_pre_transfer( + self, + exp: Exposures, + haz: Hazard, + vul: ImpactFuncSet, + ) -> Impact: + """ + Calculates the raw impact matrix before any risk transfer is applied. + + Parameters + ---------- + exp : Exposures + The exposure data. + haz : Hazard + The hazard data. + vul : ImpactFuncSet + The set of vulnerability functions. + + Returns + ------- + Impact + An Impact object containing the raw, pre-transfer impact matrix. + """ + return ImpactCalc(exposures=exp, impfset=vul, hazard=haz).impact() diff --git a/climada/trajectories/test/test_impact_calc_strat.py b/climada/trajectories/test/test_impact_calc_strat.py new file mode 100644 index 0000000000..a828ec51e6 --- /dev/null +++ b/climada/trajectories/test/test_impact_calc_strat.py @@ -0,0 +1,84 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +Tests for impact_calc_strat + +""" + +import unittest +from unittest.mock import MagicMock, patch + +from climada.engine import Impact +from climada.entity import ImpactFuncSet +from climada.entity.exposures import Exposures +from climada.hazard import Hazard +from climada.trajectories import Snapshot +from climada.trajectories.impact_calc_strat import ImpactCalcComputation + + +class TestImpactCalcComputation(unittest.TestCase): + def setUp(self): + self.mock_snapshot0 = MagicMock(spec=Snapshot) + self.mock_snapshot0.exposure = MagicMock(spec=Exposures) + self.mock_snapshot0.hazard = MagicMock(spec=Hazard) + self.mock_snapshot0.impfset = MagicMock(spec=ImpactFuncSet) + self.mock_snapshot1 = MagicMock(spec=Snapshot) + self.mock_snapshot1.exposure = MagicMock(spec=Exposures) + self.mock_snapshot1.hazard = MagicMock(spec=Hazard) + self.mock_snapshot1.impfset = MagicMock(spec=ImpactFuncSet) + + self.impact_calc_computation = ImpactCalcComputation() + + @patch.object(ImpactCalcComputation, "compute_impacts_pre_transfer") + def test_compute_impacts(self, mock_calculate_impacts_for_snapshots): + mock_impacts = MagicMock(spec=Impact) + mock_calculate_impacts_for_snapshots.return_value = mock_impacts + + result = self.impact_calc_computation.compute_impacts( + exp=self.mock_snapshot0.exposure, + haz=self.mock_snapshot0.hazard, + vul=self.mock_snapshot0.impfset, + ) + + self.assertEqual(result, mock_impacts) + mock_calculate_impacts_for_snapshots.assert_called_once_with( + self.mock_snapshot0.exposure, + self.mock_snapshot0.hazard, + self.mock_snapshot0.impfset, + ) + + def test_calculate_impacts_for_snapshots(self): + mock_imp_E0H0 = MagicMock(spec=Impact) + + with patch( + "climada.trajectories.impact_calc_strat.ImpactCalc" + ) as mock_impact_calc: + mock_impact_calc.return_value.impact.side_effect = [mock_imp_E0H0] + + result = self.impact_calc_computation.compute_impacts_pre_transfer( + exp=self.mock_snapshot0.exposure, + haz=self.mock_snapshot0.hazard, + vul=self.mock_snapshot0.impfset, + ) + + self.assertEqual(result, mock_imp_E0H0) + + +if __name__ == "__main__": + TESTS = unittest.TestLoader().loadTestsFromTestCase(TestImpactCalcComputation) + unittest.TextTestRunner(verbosity=2).run(TESTS) From a734bfa896b69170a9951b54507b16c6d707d145 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Thu, 18 Dec 2025 10:34:06 +0100 Subject: [PATCH 04/37] cherrypick from risk_traj --- climada/trajectories/calc_risk_metrics.py | 1214 +++++++++++++++ climada/trajectories/constants.py | 55 + climada/trajectories/test/test_riskperiod.py | 1389 ++++++++++++++++++ 3 files changed, 2658 insertions(+) create mode 100644 climada/trajectories/calc_risk_metrics.py create mode 100644 climada/trajectories/constants.py create mode 100644 climada/trajectories/test/test_riskperiod.py diff --git a/climada/trajectories/calc_risk_metrics.py b/climada/trajectories/calc_risk_metrics.py new file mode 100644 index 0000000000..04846d18d3 --- /dev/null +++ b/climada/trajectories/calc_risk_metrics.py @@ -0,0 +1,1214 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +This modules implements the CalcRiskPeriod class. + +CalcRiskPeriod are used to compute risk metrics (and intermediate requirements) +in between two snapshots. + +As these computations are not always required and can become "heavy", a so called "lazy" +approach is used: computation is only done when required, and then stored. + +""" + +import datetime +import itertools +import logging + +import numpy as np +import pandas as pd +from scipy.sparse import csr_matrix + +from climada.engine.impact import Impact, ImpactFreqCurve +from climada.engine.impact_calc import ImpactCalc +from climada.entity.measures.base import Measure +from climada.trajectories.constants import ( + AAI_METRIC_NAME, + CONTRIBUTION_BASE_RISK_NAME, + CONTRIBUTION_EXPOSURE_NAME, + CONTRIBUTION_HAZARD_NAME, + CONTRIBUTION_INTERACTION_TERM_NAME, + CONTRIBUTION_TOTAL_RISK_NAME, + CONTRIBUTION_VULNERABILITY_NAME, + COORD_ID_COL_NAME, + DATE_COL_NAME, + DEFAULT_PERIOD_INDEX_NAME, + EAI_METRIC_NAME, + GROUP_COL_NAME, + GROUP_ID_COL_NAME, + MEASURE_COL_NAME, + METRIC_COL_NAME, + NO_MEASURE_VALUE, + RISK_COL_NAME, + RP_VALUE_PREFIX, + UNIT_COL_NAME, +) +from climada.trajectories.impact_calc_strat import ImpactComputationStrategy +from climada.trajectories.interpolation import ( + InterpolationStrategyBase, + linear_interp_arrays, +) +from climada.trajectories.snapshot import Snapshot + +LOGGER = logging.getLogger(__name__) + +__all__ = [ + "CalcRiskMetricsPoints", + "CalcRiskMetricsPeriod", + "calc_per_date_aais", + "calc_per_date_eais", + "calc_per_date_rps", + "calc_freq_curve", +] + + +def lazy_property(method): + # This function is used as a decorator for properties + # that require "heavy" computation and are not always needed. + # When requested, if a property is none, it uses the corresponding + # computation method and caches the result in the corresponding + # private attribute + attr_name = f"_{method.__name__}" + + @property + def _lazy(self): + if getattr(self, attr_name) is None: + # LOGGER.debug( + # f"Computing {method.__name__} for {self._snapshot0.date}-{self._snapshot1.date} with {meas_n}." + # ) + setattr(self, attr_name, method(self)) + return getattr(self, attr_name) + + return _lazy + + +class CalcRiskMetricsPoints: + """This class handles the computation of impacts for a list of `Snapshot`. + + Note that most attribute like members are properties with their own docstring. + + Attributes + ---------- + + impact_computation_strategy: ImpactComputationStrategy, optional + The method used to calculate the impact from the (Haz,Exp,Vul) of the snapshots. + Defaults to ImpactCalc + measure: Measure, optional + The measure applied to snapshots. Defaults to None. + + Notes + ----- + + This class is intended for internal computation. + """ + + def __init__( + self, + snapshots: list[Snapshot], + impact_computation_strategy: ImpactComputationStrategy, + ) -> None: + """Initialize a new `CalcRiskMetricsPoints` + + This initializes and instantiate a new `CalcRiskMetricsPoints` object. + No computation is done at initialisation and only done "just in time". + + Parameters + ---------- + snapshots : List[Snapshot] + The `Snapshot` list to compute risk for. + impact_computation_strategy: ImpactComputationStrategy, optional + The method used to calculate the impact from the (Haz,Exp,Vul) of the two snapshots. + Defaults to ImpactCalc + + """ + + self._reset_impact_data() + self.snapshots = snapshots + self.impact_computation_strategy = impact_computation_strategy + self._date_idx = pd.DatetimeIndex( + [snap.date for snap in self.snapshots], name=DATE_COL_NAME + ) + self.measure = None + try: + self._group_id = np.unique( + np.concatenate( + [ + snap.exposure.gdf[GROUP_ID_COL_NAME] + for snap in self.snapshots + if GROUP_ID_COL_NAME in snap.exposure.gdf.columns + ] + ) + ) + except ValueError as e: + error_message = str(e).lower() + if "need at least one array to concatenate" in error_message: + self._group_id = np.array([]) + + def _reset_impact_data(self): + """Util method that resets computed data, for instance when changing the computation strategy.""" + self._impacts = None + self._eai_gdf = None + self._per_date_eai = None + self._per_date_aai = None + + @property + def impact_computation_strategy(self) -> ImpactComputationStrategy: + """The method used to calculate the impact from the (Haz,Exp,Vul) of the snapshots.""" + return self._impact_computation_strategy + + @impact_computation_strategy.setter + def impact_computation_strategy(self, value, /): + if not isinstance(value, ImpactComputationStrategy): + raise ValueError("Not an impact computation strategy") + + self._impact_computation_strategy = value + self._reset_impact_data() + + @lazy_property + def impacts(self) -> list[Impact]: + """Return Impact object for the different snapshots.""" + + return [ + self.impact_computation_strategy.compute_impacts( + snap.exposure, snap.hazard, snap.impfset + ) + for snap in self.snapshots + ] + + @lazy_property + def per_date_eai(self) -> np.ndarray: + """Expected annual impacts per snapshot.""" + + return np.array([imp.eai_exp for imp in self.impacts]) + + @lazy_property + def per_date_aai(self) -> np.ndarray: + """Average annual impacts per snapshot.""" + + return np.array([imp.aai_agg for imp in self.impacts]) + + @lazy_property + def eai_gdf(self) -> pd.DataFrame: + """Convenience function returning a DataFrame (with both datetime and coordinates) from `per_date_eai`. + + This can easily be merged with the GeoDataFrame of the exposure object of one of the `Snapshot`. + + Notes + ----- + + The DataFrame from the first snapshot of the list is used as a basis (notably for `value` and `group_id`). + """ + return self.calc_eai_gdf() + + def calc_eai_gdf(self) -> pd.DataFrame: + """Merge the per date EAIs of the risk period with the Dataframe of the exposure of the starting snapshot.""" + + df = pd.DataFrame(self.per_date_eai, index=self._date_idx) + df = df.reset_index().melt( + id_vars=DATE_COL_NAME, var_name=COORD_ID_COL_NAME, value_name=RISK_COL_NAME + ) + eai_gdf = pd.concat( + [ + snap.exposure.gdf.reset_index(names=[COORD_ID_COL_NAME]).assign( + date=pd.to_datetime(snap.date) + ) + for snap in self.snapshots + ] + ) + if GROUP_ID_COL_NAME in eai_gdf.columns: + eai_gdf = eai_gdf[[DATE_COL_NAME, COORD_ID_COL_NAME, GROUP_ID_COL_NAME]] + else: + eai_gdf[[GROUP_ID_COL_NAME]] = pd.NA + eai_gdf = eai_gdf[[DATE_COL_NAME, COORD_ID_COL_NAME, GROUP_ID_COL_NAME]] + + eai_gdf = eai_gdf.merge(df, on=[DATE_COL_NAME, COORD_ID_COL_NAME]) + eai_gdf = eai_gdf.rename(columns={GROUP_ID_COL_NAME: GROUP_COL_NAME}) + eai_gdf[GROUP_COL_NAME] = pd.Categorical( + eai_gdf[GROUP_COL_NAME], categories=self._group_id + ) + eai_gdf[METRIC_COL_NAME] = EAI_METRIC_NAME + eai_gdf[MEASURE_COL_NAME] = ( + self.measure.name if self.measure else NO_MEASURE_VALUE + ) + eai_gdf[UNIT_COL_NAME] = self.snapshots[0].exposure.value_unit + return eai_gdf + + def calc_aai_metric(self) -> pd.DataFrame: + """Compute a DataFrame of the AAI for each snapshot.""" + + aai_df = pd.DataFrame( + index=self._date_idx, columns=[RISK_COL_NAME], data=self.per_date_aai + ) + aai_df[GROUP_COL_NAME] = pd.Categorical( + [pd.NA] * len(aai_df), categories=self._group_id + ) + aai_df[METRIC_COL_NAME] = AAI_METRIC_NAME + aai_df[MEASURE_COL_NAME] = ( + self.measure.name if self.measure else NO_MEASURE_VALUE + ) + aai_df[UNIT_COL_NAME] = self.snapshots[0].exposure.value_unit + aai_df.reset_index(inplace=True) + return aai_df + + def calc_aai_per_group_metric(self) -> pd.DataFrame | None: + """Compute a DataFrame of the AAI distinguised per group id in the exposures, for each snapshot.""" + + if len(self._group_id) < 1: + LOGGER.warning( + "No group id defined in the Exposures object. Per group aai will be empty." + ) + return None + + eai_pres_groups = self.eai_gdf[ + [DATE_COL_NAME, COORD_ID_COL_NAME, GROUP_COL_NAME, RISK_COL_NAME] + ].copy() + aai_per_group_df = eai_pres_groups.groupby( + [DATE_COL_NAME, GROUP_COL_NAME], as_index=False, observed=True + )[RISK_COL_NAME].sum() + aai_per_group_df[METRIC_COL_NAME] = AAI_METRIC_NAME + aai_per_group_df[MEASURE_COL_NAME] = ( + self.measure.name if self.measure else NO_MEASURE_VALUE + ) + aai_per_group_df[UNIT_COL_NAME] = self.snapshots[0].exposure.value_unit + return aai_per_group_df + + def calc_return_periods_metric(self, return_periods: list[int]) -> pd.DataFrame: + """Compute a DataFrame of the estimated impacts for a list of return periods, for each snapshot. + + Parameters + ---------- + + return_periods : list of int + The return periods to estimate impacts for. + """ + + per_date_rp = np.array( + [ + imp.calc_freq_curve(return_per=return_periods).impact + for imp in self.impacts + ] + ) + rp_df = pd.DataFrame( + index=self._date_idx, columns=return_periods, data=per_date_rp + ).melt(value_name=RISK_COL_NAME, var_name="rp", ignore_index=False) + rp_df.reset_index(inplace=True) + rp_df[GROUP_COL_NAME] = pd.Categorical( + [pd.NA] * len(rp_df), categories=self._group_id + ) + rp_df[METRIC_COL_NAME] = RP_VALUE_PREFIX + "_" + rp_df["rp"].astype(str) + rp_df = rp_df.drop("rp", axis=1) + rp_df[MEASURE_COL_NAME] = ( + self.measure.name if self.measure else NO_MEASURE_VALUE + ) + rp_df[UNIT_COL_NAME] = self.snapshots[0].exposure.value_unit + return rp_df + + def apply_measure(self, measure: Measure) -> "CalcRiskMetricsPoints": + """Creates a new `CalcRiskMetricsPoints` object with a measure. + + The given measure is applied to both snapshot of the risk period. + + Parameters + ---------- + measure : Measure + The measure to apply. + + Returns + ------- + + CalcRiskPeriod + The risk period with given measure applied. + + """ + snapshots = [snap.apply_measure(measure) for snap in self.snapshots] + risk_period = CalcRiskMetricsPoints( + snapshots, + self.impact_computation_strategy, + ) + + risk_period.measure = measure + return risk_period + + +class CalcRiskMetricsPeriod: + """This class handles the computation of impacts for a risk period. + + This object handles the interpolations and computations of risk metrics in + between two given snapshots, along a DateTimeIndex build from either a + `time_resolution` (which must be a valid "freq" string to build a DateTimeIndex) + and defaults to "Y" (start of the year) or `time_points` integer argument, in which case + the DateTimeIndex will have that many periods. + + Note that most attribute like members are properties with their own docstring. + + Attributes + ---------- + + date_idx: pd.PeriodIndex + The date index for the different interpolated points between the two snapshots + interpolation_strategy: InterpolationStrategy, optional + The approach used to interpolate impact matrices in between the two snapshots, linear by default. + impact_computation_strategy: ImpactComputationStrategy, optional + The method used to calculate the impact from the (Haz,Exp,Vul) of the two snapshots. + Defaults to ImpactCalc + measure: Measure, optional + The measure to apply to both snapshots. Defaults to None. + + Notes + ----- + + This class is intended for internal computation. + """ + + def __init__( + self, + snapshot0: Snapshot, + snapshot1: Snapshot, + time_resolution: str, + interpolation_strategy: InterpolationStrategyBase, + impact_computation_strategy: ImpactComputationStrategy, + ): + """Initialize a new `CalcRiskMetricsPeriod` + + This initializes and instantiate a new `CalcRiskMetricsPeriod` object. + No computation is done at initialisation and only done "just in time". + + Parameters + ---------- + snapshot0 : Snapshot + The `Snapshot` at the start of the risk period. + snapshot1 : Snapshot + The `Snapshot` at the end of the risk period. + time_resolution : str, optional + One of pandas date offset strings or corresponding objects. See :func:`pandas.period_range`. + time_points : int, optional + Number of periods to generate for the PeriodIndex. + interpolation_strategy: InterpolationStrategy, optional + The approach used to interpolate impact matrices in between the two snapshots, linear by default. + impact_computation_strategy: ImpactComputationStrategy, optional + The method used to calculate the impact from the (Haz,Exp,Vul) of the two snapshots. + Defaults to ImpactCalc + + """ + + LOGGER.debug("Instantiating new CalcRiskPeriod.") + self._snapshot0 = snapshot0 + self._snapshot1 = snapshot1 + self.date_idx = self._set_date_idx( + date1=snapshot0.date, + date2=snapshot1.date, + freq=time_resolution, + name=DEFAULT_PERIOD_INDEX_NAME, + ) + self.interpolation_strategy = interpolation_strategy + self.impact_computation_strategy = impact_computation_strategy + self.measure = None # Only possible to set with apply_measure to make sure snapshots are consistent + + self._group_id_E0 = ( + np.array(self.snapshot_start.exposure.gdf[GROUP_ID_COL_NAME].values) + if GROUP_ID_COL_NAME in self.snapshot_start.exposure.gdf.columns + else np.array([]) + ) + self._group_id_E1 = ( + np.array(self.snapshot_end.exposure.gdf[GROUP_ID_COL_NAME].values) + if GROUP_ID_COL_NAME in self.snapshot_end.exposure.gdf.columns + else np.array([]) + ) + self._groups_id = np.unique( + np.concatenate([self._group_id_E0, self._group_id_E1]) + ) + + def _reset_impact_data(self): + """Util method that resets computed data, for instance when changing the time resolution.""" + for fut in list(itertools.product([0, 1], repeat=3)): + setattr(self, f"_E{fut[0]}H{fut[1]}V{fut[2]}", None) + + for fut in list(itertools.product([0, 1], repeat=2)): + setattr(self, f"_imp_mats_H{fut[0]}V{fut[1]}", None) + setattr(self, f"_per_date_eai_H{fut[0]}V{fut[1]}", None) + setattr(self, f"_per_date_aai_H{fut[0]}V{fut[1]}", None) + + self._eai_gdf = None + self._per_date_eai = None + self._per_date_aai = None + self._per_date_return_periods_H0, self._per_date_return_periods_H1 = None, None + + @staticmethod + def _set_date_idx( + date1: str | pd.Timestamp | datetime.date, + date2: str | pd.Timestamp | datetime.date, + freq: str | None = None, + name: str | None = None, + ) -> pd.PeriodIndex: + """Generate a date range index based on the provided parameters. + + Parameters + ---------- + date1 : str or pd.Timestamp or datetime.date + The start date of the period range. + date2 : str or pd.Timestamp or datetime.date + The end date of the period range. + freq : str, optional + Frequency string for the period range. + See `here `_. + name : str, optional + Name of the resulting period range index. + + Returns + ------- + pd.PeriodIndex + A PeriodIndex representing the date range. + + Raises + ------ + ValueError + If the number of periods and frequency given to period_range are inconsistent. + """ + ret = pd.period_range( + date1, + date2, + freq=freq, # type: ignore + name=name, + ) + return ret + + @property + def snapshot_start(self) -> Snapshot: + """The `Snapshot` at the start of the risk period.""" + return self._snapshot0 + + @property + def snapshot_end(self) -> Snapshot: + """The `Snapshot` at the end of the risk period.""" + return self._snapshot1 + + @property + def date_idx(self) -> pd.PeriodIndex: + """The pandas PeriodIndex representing the time dimension of the risk period.""" + return self._date_idx + + @date_idx.setter + def date_idx(self, value, /): + if not isinstance(value, pd.PeriodIndex): + raise ValueError("Not a PeriodIndex") + + self._date_idx = value # Avoids weird hourly data + self._time_points = len(self.date_idx) + self._time_resolution = self.date_idx.freq + self._reset_impact_data() + + @property + def time_points(self) -> int: + """The numbers of different time points (periods) in the risk period.""" + return self._time_points + + @property + def time_resolution(self) -> str: + """The time resolution of the risk periods, expressed as a pandas period frequency string.""" + return self._time_resolution # type: ignore + + @time_resolution.setter + def time_resolution(self, value, /): + self.date_idx = pd.period_range( + self.snapshot_start.date, + self.snapshot_end.date, + freq=value, + name=DEFAULT_PERIOD_INDEX_NAME, + ) + + @property + def interpolation_strategy(self) -> InterpolationStrategyBase: + """The approach used to interpolate impact matrices in between the two snapshots.""" + return self._interpolation_strategy + + @interpolation_strategy.setter + def interpolation_strategy(self, value, /): + if not isinstance(value, InterpolationStrategyBase): + raise ValueError("Not an interpolation strategy") + + self._interpolation_strategy = value + self._reset_impact_data() + + @property + def impact_computation_strategy(self) -> ImpactComputationStrategy: + """The method used to calculate the impact from the (Haz,Exp,Vul) of the two snapshots.""" + return self._impact_computation_strategy + + @impact_computation_strategy.setter + def impact_computation_strategy(self, value, /): + if not isinstance(value, ImpactComputationStrategy): + raise ValueError("Not an impact computation strategy") + + self._impact_computation_strategy = value + self._reset_impact_data() + + ##### Impact objects cube / Risk Cube ##### + + @lazy_property + def E0H0V0(self) -> Impact: + """Impact object corresponding to starting exposure, starting hazard and starting vulnerability.""" + return self.impact_computation_strategy.compute_impacts( + self.snapshot_start.exposure, + self.snapshot_start.hazard, + self.snapshot_start.impfset, + ) + + @lazy_property + def E1H0V0(self) -> Impact: + """Impact object corresponding to future exposure, starting hazard and starting vulnerability.""" + return self.impact_computation_strategy.compute_impacts( + self.snapshot_end.exposure, + self.snapshot_start.hazard, + self.snapshot_start.impfset, + ) + + @lazy_property + def E0H1V0(self) -> Impact: + """Impact object corresponding to starting exposure, future hazard and starting vulnerability.""" + return self.impact_computation_strategy.compute_impacts( + self.snapshot_start.exposure, + self.snapshot_end.hazard, + self.snapshot_start.impfset, + ) + + @lazy_property + def E1H1V0(self) -> Impact: + """Impact object corresponding to future exposure, future hazard and starting vulnerability.""" + return self.impact_computation_strategy.compute_impacts( + self.snapshot_end.exposure, + self.snapshot_end.hazard, + self.snapshot_start.impfset, + ) + + @lazy_property + def E0H0V1(self) -> Impact: + """Impact object corresponding to starting exposure, starting hazard and future vulnerability.""" + return self.impact_computation_strategy.compute_impacts( + self.snapshot_start.exposure, + self.snapshot_start.hazard, + self.snapshot_end.impfset, + ) + + @lazy_property + def E1H0V1(self) -> Impact: + """Impact object corresponding to future exposure, starting hazard and future vulnerability.""" + return self.impact_computation_strategy.compute_impacts( + self.snapshot_end.exposure, + self.snapshot_start.hazard, + self.snapshot_end.impfset, + ) + + @lazy_property + def E0H1V1(self) -> Impact: + """Impact object corresponding to starting exposure, future hazard and future vulnerability.""" + return self.impact_computation_strategy.compute_impacts( + self.snapshot_start.exposure, + self.snapshot_end.hazard, + self.snapshot_end.impfset, + ) + + @lazy_property + def E1H1V1(self) -> Impact: + """Impact object corresponding to future exposure, future hazard and future vulnerability.""" + return self.impact_computation_strategy.compute_impacts( + self.snapshot_end.exposure, + self.snapshot_end.hazard, + self.snapshot_end.impfset, + ) + + ############################### + + ### Impact Matrices arrays #### + + def _interp_mats(self, start_attr, end_attr) -> list: + """Helper to reduce repetition in impact matrix interpolation.""" + start = getattr(self, start_attr).imp_mat + end = getattr(self, end_attr).imp_mat + return self.interpolation_strategy.interp_over_exposure_dim( + start, end, self.time_points + ) + + @property + def imp_mats_H0V0(self) -> list: + """List of `time_points` impact matrices with changing exposure, starting hazard and starting vulnerability.""" + return self._interp_mats("E0H0V0", "E1H0V0") + + @property + def imp_mats_H1V0(self) -> list: + """List of `time_points` impact matrices with changing exposure, future hazard and starting vulnerability.""" + return self._interp_mats("E0H1V0", "E1H1V0") + + @property + def imp_mats_H0V1(self) -> list: + """List of `time_points` impact matrices with changing exposure, starting hazard and future vulnerability.""" + return self._interp_mats("E0H0V1", "E1H0V1") + + @property + def imp_mats_H1V1(self) -> list: + """List of `time_points` impact matrices with changing exposure, future hazard and future vulnerability.""" + return self._interp_mats("E0H1V1", "E1H1V1") + + @property + def imp_mats_E0H0V0(self) -> list: + """List of `time_points` impact matrices with base exposure, base hazard and base vulnerability.""" + return self._interp_mats("E0H0V0", "E0H0V0") + + @property + def imp_mats_E0H1V0(self) -> list: + """List of `time_points` impact matrices with base exposure, future hazard and base vulnerability.""" + return self._interp_mats("E0H1V0", "E0H1V0") + + @property + def imp_mats_E0H0V1(self) -> list: + """List of `time_points` impact matrices with base exposure, base hazard and base vulnerability.""" + return self._interp_mats("E0H0V1", "E0H0V1") + + ############################### + + ########## Core EAI ########### + + @property + def per_date_eai_H0V0(self) -> np.ndarray: + """Expected annual impacts for changing exposure, starting hazard and starting vulnerability.""" + return calc_per_date_eais( + self.imp_mats_H0V0, self.snapshot_start.hazard.frequency + ) + + @property + def per_date_eai_H1V0(self) -> np.ndarray: + """Expected annual impacts for changing exposure, future hazard and starting vulnerability.""" + return calc_per_date_eais( + self.imp_mats_H1V0, self.snapshot_end.hazard.frequency + ) + + @property + def per_date_eai_H0V1(self) -> np.ndarray: + """Expected annual impacts for changing exposure, starting hazard and future vulnerability.""" + return calc_per_date_eais( + self.imp_mats_H0V1, self.snapshot_start.hazard.frequency + ) + + @property + def per_date_eai_H1V1(self) -> np.ndarray: + """Expected annual impacts for changing exposure, future hazard and future vulnerability.""" + return calc_per_date_eais( + self.imp_mats_H1V1, self.snapshot_end.hazard.frequency + ) + + @property + def per_date_eai_E0H0V0(self) -> np.ndarray: + """Expected annual impacts for base exposure, base hazard and base vulnerability.""" + return calc_per_date_eais( + self.imp_mats_E0H0V0, self.snapshot_start.hazard.frequency + ) + + @property + def per_date_eai_E0H1V0(self) -> np.ndarray: + """Expected annual impacts for base exposure, future hazard and base vulnerability.""" + return calc_per_date_eais( + self.imp_mats_E0H1V0, self.snapshot_end.hazard.frequency + ) + + @property + def per_date_eai_E0H0V1(self) -> np.ndarray: + """Expected annual impacts for base exposure, future hazard and base vulnerability.""" + return calc_per_date_eais( + self.imp_mats_E0H0V1, self.snapshot_start.hazard.frequency + ) + + ################################## + + ######### Core AAIs ########## + + @property + def per_date_aai_H0V0(self) -> np.ndarray: + """Average annual impacts for changing exposure, starting hazard and starting vulnerability.""" + return calc_per_date_aais(self.per_date_eai_H0V0) + + @property + def per_date_aai_H1V0(self) -> np.ndarray: + """Average annual impacts for changing exposure, future hazard and starting vulnerability.""" + return calc_per_date_aais(self.per_date_eai_H1V0) + + @property + def per_date_aai_H0V1(self) -> np.ndarray: + """Average annual impacts for changing exposure, starting hazard and future vulnerability.""" + return calc_per_date_aais(self.per_date_eai_H0V1) + + @property + def per_date_aai_H1V1(self) -> np.ndarray: + """Average annual impacts for changing exposure, future hazard and future vulnerability.""" + return calc_per_date_aais(self.per_date_eai_H1V1) + + @property + def per_date_aai_E0H0V0(self) -> np.ndarray: + """Average annual impacts for base exposure, base hazard and base vulnerability.""" + return calc_per_date_aais(self.per_date_eai_E0H0V0) + + @property + def per_date_aai_E0H1V0(self) -> np.ndarray: + """Average annual impacts for base exposure, base hazard and base vulnerability.""" + return calc_per_date_aais(self.per_date_eai_E0H1V0) + + @property + def per_date_aai_E0H0V1(self) -> np.ndarray: + """Average annual impacts for base exposure, base hazard and base vulnerability.""" + return calc_per_date_aais(self.per_date_eai_E0H0V1) + + ################################# + + ######### Core RPs ######### + + def per_date_return_periods_H0V0(self, return_periods: list[int]) -> np.ndarray: + """Estimated impacts per dates for given return periods, with changing exposure, starting hazard and starting vulnerability.""" + return calc_per_date_rps( + self.imp_mats_H0V0, + self.snapshot_start.hazard.frequency, + self.date_idx.freqstr[0], + return_periods, + ) + + def per_date_return_periods_H1V0(self, return_periods: list[int]) -> np.ndarray: + """Estimated impacts per dates for given return periods, with changing exposure, future hazard and starting vulnerability.""" + return calc_per_date_rps( + self.imp_mats_H1V0, + self.snapshot_end.hazard.frequency, + self.date_idx.freqstr[0], + return_periods, + ) + + def per_date_return_periods_H0V1(self, return_periods: list[int]) -> np.ndarray: + """Estimated impacts per dates for given return periods, with changing exposure, starting hazard and future vulnerability.""" + return calc_per_date_rps( + self.imp_mats_H0V1, + self.snapshot_start.hazard.frequency, + self.date_idx.freqstr[0], + return_periods, + ) + + def per_date_return_periods_H1V1(self, return_periods: list[int]) -> np.ndarray: + """Estimated impacts per dates for given return periods, with changing exposure, future hazard and future vulnerability.""" + return calc_per_date_rps( + self.imp_mats_H1V1, + self.snapshot_end.hazard.frequency, + self.date_idx.freqstr[0], + return_periods, + ) + + ################################## + + ##### Interpolation of metrics ##### + + def calc_eai(self) -> np.ndarray: + """Compute the EAIs at each date of the risk period (including changes in exposure, hazard and vulnerability).""" + per_date_eai_H0V0, per_date_eai_H1V0, per_date_eai_H0V1, per_date_eai_H1V1 = ( + self.per_date_eai_H0V0, + self.per_date_eai_H1V0, + self.per_date_eai_H0V1, + self.per_date_eai_H1V1, + ) + per_date_eai_V0 = self.interpolation_strategy.interp_over_hazard_dim( + per_date_eai_H0V0, per_date_eai_H1V0 + ) + per_date_eai_V1 = self.interpolation_strategy.interp_over_hazard_dim( + per_date_eai_H0V1, per_date_eai_H1V1 + ) + per_date_eai = self.interpolation_strategy.interp_over_vulnerability_dim( + per_date_eai_V0, per_date_eai_V1 + ) + return per_date_eai + + ### Fully interpolated metrics ### + + @lazy_property + def per_date_eai(self) -> np.ndarray: + """Expected annual impacts per date with changing exposure, changing hazard and changing vulnerability""" + return self.calc_eai() + + @lazy_property + def per_date_aai(self) -> np.ndarray: + """Average annual impacts per date with changing exposure, changing hazard and changing vulnerability.""" + return calc_per_date_aais(self.per_date_eai) + + @lazy_property + def eai_gdf(self) -> pd.DataFrame: + """Convenience function returning a DataFrame (with both datetime and coordinates ids) from `per_date_eai`. + + This dataframe can easily be merged with one of the snapshot exposure geodataframe. + + Notes + ----- + + The DataFrame from the starting snapshot is used as a basis (notably for `value` and `group_id`). + + """ + return self.calc_eai_gdf() + + #################################### + + ### Metrics from impact matrices ### + + # These methods might go in a utils file instead, to be reused + # for a no interpolation case (and maybe the timeseries?) + + #################################### + + def calc_eai_gdf(self) -> pd.DataFrame: + """Merge the per date EAIs of the risk period with the GeoDataframe of the exposure of the starting snapshot.""" + df = pd.DataFrame(self.per_date_eai, index=self.date_idx) + df = df.reset_index().melt( + id_vars=DEFAULT_PERIOD_INDEX_NAME, + var_name=COORD_ID_COL_NAME, + value_name=RISK_COL_NAME, + ) + if GROUP_ID_COL_NAME in self.snapshot_start.exposure.gdf: + eai_gdf = self.snapshot_start.exposure.gdf[[GROUP_ID_COL_NAME]] + eai_gdf[COORD_ID_COL_NAME] = eai_gdf.index + eai_gdf = eai_gdf.merge(df, on=COORD_ID_COL_NAME) + eai_gdf = eai_gdf.rename(columns={GROUP_ID_COL_NAME: GROUP_COL_NAME}) + else: + eai_gdf = df + eai_gdf[GROUP_COL_NAME] = pd.NA + + eai_gdf[GROUP_COL_NAME] = pd.Categorical( + eai_gdf[GROUP_COL_NAME], categories=self._groups_id + ) + eai_gdf[METRIC_COL_NAME] = EAI_METRIC_NAME + eai_gdf[MEASURE_COL_NAME] = ( + self.measure.name if self.measure else NO_MEASURE_VALUE + ) + eai_gdf[UNIT_COL_NAME] = self.snapshot_start.exposure.value_unit + return eai_gdf + + def calc_aai_metric(self) -> pd.DataFrame: + """Compute a DataFrame of the AAI at each dates of the risk period (including changes in exposure, hazard and vulnerability).""" + aai_df = pd.DataFrame( + index=self.date_idx, columns=[RISK_COL_NAME], data=self.per_date_aai + ) + aai_df[GROUP_COL_NAME] = pd.Categorical( + [pd.NA] * len(aai_df), categories=self._groups_id + ) + aai_df[METRIC_COL_NAME] = AAI_METRIC_NAME + aai_df[MEASURE_COL_NAME] = ( + self.measure.name if self.measure else NO_MEASURE_VALUE + ) + aai_df[UNIT_COL_NAME] = self.snapshot_start.exposure.value_unit + aai_df.reset_index(inplace=True) + return aai_df + + def calc_aai_per_group_metric(self) -> pd.DataFrame | None: + """Compute a DataFrame of the AAI distinguised per group id in the exposures, at each dates of the risk period (including changes in exposure, hazard and vulnerability). + + Notes + ----- + + If group ids changes between starting and ending snapshots of the risk period, the AAIs are linearly interpolated (with a warning for transparency). + + """ + if len(self._group_id_E0) < 1 or len(self._group_id_E1) < 1: + LOGGER.warning( + "No group id defined in at least one of the Exposures object. Per group aai will be empty." + ) + return None + + eai_pres_groups = self.eai_gdf[ + [ + DEFAULT_PERIOD_INDEX_NAME, + COORD_ID_COL_NAME, + GROUP_COL_NAME, + RISK_COL_NAME, + ] + ].copy() + aai_per_group_df = eai_pres_groups.groupby( + [DEFAULT_PERIOD_INDEX_NAME, GROUP_COL_NAME], as_index=False, observed=True + )[RISK_COL_NAME].sum() + if not np.array_equal(self._group_id_E0, self._group_id_E1): + LOGGER.warning( + "Group id are changing between present and future snapshot. Per group AAI will be linearly interpolated." + ) + eai_fut_groups = self.eai_gdf.copy() + eai_fut_groups[GROUP_COL_NAME] = pd.Categorical( + np.tile(self._group_id_E1, len(self.date_idx)), + categories=self._groups_id, + ) + aai_fut_groups = eai_fut_groups.groupby( + [DEFAULT_PERIOD_INDEX_NAME, GROUP_COL_NAME], as_index=False + )[RISK_COL_NAME].sum() + aai_per_group_df[RISK_COL_NAME] = linear_interp_arrays( + aai_per_group_df[RISK_COL_NAME].values, + aai_fut_groups[RISK_COL_NAME].values, + ) + + aai_per_group_df[METRIC_COL_NAME] = AAI_METRIC_NAME + aai_per_group_df[MEASURE_COL_NAME] = ( + self.measure.name if self.measure else NO_MEASURE_VALUE + ) + aai_per_group_df[UNIT_COL_NAME] = self.snapshot_start.exposure.value_unit + return aai_per_group_df + + def calc_return_periods_metric(self, return_periods: list[int]) -> pd.DataFrame: + """Compute a DataFrame of the estimated impacts for a list of return + periods, at each dates of the risk period (including changes in exposure, + hazard and vulnerability). + + Parameters + ---------- + + return_periods : list of int + The return periods to estimate impacts for. + + """ + + # currently mathematicaly wrong, but approximatively correct, to be reworked when concatenating the impact matrices for the interpolation + per_date_rp_H0V0, per_date_rp_H1V0, per_date_rp_H0V1, per_date_rp_H1V1 = ( + self.per_date_return_periods_H0V0(return_periods), + self.per_date_return_periods_H1V0(return_periods), + self.per_date_return_periods_H0V1(return_periods), + self.per_date_return_periods_H1V1(return_periods), + ) + per_date_rp_V0 = self.interpolation_strategy.interp_over_hazard_dim( + per_date_rp_H0V0, per_date_rp_H1V0 + ) + per_date_rp_V1 = self.interpolation_strategy.interp_over_hazard_dim( + per_date_rp_H0V1, per_date_rp_H1V1 + ) + per_date_rp = self.interpolation_strategy.interp_over_vulnerability_dim( + per_date_rp_V0, per_date_rp_V1 + ) + rp_df = pd.DataFrame( + index=self.date_idx, columns=return_periods, data=per_date_rp + ).melt(value_name=RISK_COL_NAME, var_name="rp", ignore_index=False) + rp_df.reset_index(inplace=True) + rp_df[GROUP_COL_NAME] = pd.Categorical( + [pd.NA] * len(rp_df), categories=self._groups_id + ) + rp_df[METRIC_COL_NAME] = RP_VALUE_PREFIX + "_" + rp_df["rp"].astype(str) + rp_df = rp_df.drop("rp", axis=1) + rp_df[MEASURE_COL_NAME] = ( + self.measure.name if self.measure else NO_MEASURE_VALUE + ) + rp_df[UNIT_COL_NAME] = self.snapshot_start.exposure.value_unit + return rp_df + + def calc_risk_contributions_metric(self) -> pd.DataFrame: + """Compute a DataFrame of the individual contributions of risk (impact), + at each dates of the risk period (including changes in exposure, + hazard and vulnerability). + + """ + per_date_aai_E0V0 = self.interpolation_strategy.interp_over_hazard_dim( + self.per_date_aai_E0H0V0, self.per_date_aai_E0H1V0 + ) + per_date_aai_E0H0 = self.interpolation_strategy.interp_over_vulnerability_dim( + self.per_date_aai_E0H0V0, self.per_date_aai_E0H0V1 + ) + df = pd.DataFrame( + { + CONTRIBUTION_TOTAL_RISK_NAME: self.per_date_aai, + CONTRIBUTION_BASE_RISK_NAME: self.per_date_aai[0], + CONTRIBUTION_EXPOSURE_NAME: self.per_date_aai_H0V0 + - self.per_date_aai[0], + CONTRIBUTION_HAZARD_NAME: per_date_aai_E0V0 + # - (self.per_date_aai_H0V0 - self.per_date_aai[0]) + - self.per_date_aai[0], + CONTRIBUTION_VULNERABILITY_NAME: per_date_aai_E0H0 + - self.per_date_aai[0], + # - (self.per_date_aai_H0V0 - self.per_date_aai[0]), + }, + index=self.date_idx, + ) + df[CONTRIBUTION_INTERACTION_TERM_NAME] = df[CONTRIBUTION_TOTAL_RISK_NAME] - ( + df[CONTRIBUTION_BASE_RISK_NAME] + + df[CONTRIBUTION_EXPOSURE_NAME] + + df[CONTRIBUTION_HAZARD_NAME] + + df[CONTRIBUTION_VULNERABILITY_NAME] + ) + df = df.melt( + value_vars=[ + CONTRIBUTION_BASE_RISK_NAME, + CONTRIBUTION_EXPOSURE_NAME, + CONTRIBUTION_HAZARD_NAME, + CONTRIBUTION_VULNERABILITY_NAME, + CONTRIBUTION_INTERACTION_TERM_NAME, + ], + var_name=METRIC_COL_NAME, + value_name=RISK_COL_NAME, + ignore_index=False, + ) + df.reset_index(inplace=True) + df[GROUP_COL_NAME] = pd.Categorical( + [pd.NA] * len(df), categories=self._groups_id + ) + df[MEASURE_COL_NAME] = self.measure.name if self.measure else NO_MEASURE_VALUE + df[UNIT_COL_NAME] = self.snapshot_start.exposure.value_unit + return df + + def apply_measure(self, measure: Measure) -> "CalcRiskMetricsPeriod": + """Creates a new `CalcRiskMetricsPeriod` object with a measure. + + The given measure is applied to both snapshot of the risk period. + + Parameters + ---------- + measure : Measure + The measure to apply. + + Returns + ------- + + CalcRiskPeriod + The risk period with given measure applied. + + """ + snap0 = self.snapshot_start.apply_measure(measure) + snap1 = self.snapshot_end.apply_measure(measure) + + risk_period = CalcRiskMetricsPeriod( + snap0, + snap1, + self.time_resolution, + self.interpolation_strategy, + self.impact_computation_strategy, + ) + + risk_period.measure = measure + return risk_period + + +def calc_per_date_eais(imp_mats: list[csr_matrix], frequency: np.ndarray) -> np.ndarray: + """Calculate expected average impact (EAI) values from a list of impact matrices + corresponding to impacts at different dates (with possible changes along + exposure, hazard and vulnerability). + + Parameters + ---------- + imp_mats : list of np.ndarray + List of impact matrices. + frequency : np.ndarray + Hazard frequency values. + + Returns + ------- + np.ndarray + 2D array of EAI (1D) for each dates. + + """ + per_date_eai_exp = np.array( + [ImpactCalc.eai_exp_from_mat(imp_mat, frequency) for imp_mat in imp_mats] + ) + return per_date_eai_exp + + +def calc_per_date_aais(per_date_eai_exp: np.ndarray) -> np.ndarray: + """Calculate per_date aggregate annual impact (AAI) values + resulting from a list arrays corresponding to EAI at different + dates (with possible changes along exposure, hazard and vulnerability). + + Parameters + ---------- + per_date_eai_exp: np.ndarray + EAIs arrays. + + Returns + ------- + np.ndarray + 1D array of AAI (0D) for each dates. + """ + per_date_aai = np.array( + [ImpactCalc.aai_agg_from_eai_exp(eai_exp) for eai_exp in per_date_eai_exp] + ) + return per_date_aai + + +def calc_per_date_rps( + imp_mats: list[csr_matrix], + frequency: np.ndarray, + frequency_unit: str, + return_periods: list[int], +) -> np.ndarray: + """Calculate per date return period impact values from a + list of impact matrices corresponding to impacts at different + dates (with possible changes along exposure, hazard and vulnerability). + + Parameters + ---------- + imp_mats: list of scipy.crs_matrix + List of impact matrices. + frequency: np.ndarray + Frequency values. + return_periods : list of int + Return periods to calculate impact values for. + + Returns + ------- + np.ndarray + 2D array of impacts per return periods (1D) for each dates. + + """ + rp = np.array( + [ + calc_freq_curve(imp_mat, frequency, frequency_unit, return_periods).impact + for imp_mat in imp_mats + ] + ) + return rp + + +def calc_freq_curve( + imp_mat_intrpl, frequency, frequency_unit, return_per=None +) -> ImpactFreqCurve: + """Calculate the estimated impacts for given return periods. + + Parameters + ---------- + + imp_mat_intrpl: scipy.csr_matrix + An impact matrix. + frequency: np.ndarray + The frequency of the hazard. + return_per: np.ndarray + The return periods to compute impacts for. + + Returns + ------- + np.ndarray + The estimated impacts for the different return periods. + + """ + + at_event = np.sum(imp_mat_intrpl, axis=1).A1 + + # Sort descendingly the impacts per events + sort_idxs = np.argsort(at_event)[::-1] + # Calculate exceedence frequency + exceed_freq = np.cumsum(frequency[sort_idxs]) + # Set return period and impact exceeding frequency + ifc_return_per = 1 / exceed_freq[::-1] + ifc_impact = at_event[sort_idxs][::-1] + + if return_per is not None: + interp_imp = np.interp(return_per, ifc_return_per, ifc_impact) + ifc_return_per = return_per + ifc_impact = interp_imp + + return ImpactFreqCurve( + return_per=ifc_return_per, + impact=ifc_impact, + frequency_unit=frequency_unit, + label="Exceedance frequency curve", + ) diff --git a/climada/trajectories/constants.py b/climada/trajectories/constants.py new file mode 100644 index 0000000000..c315f17761 --- /dev/null +++ b/climada/trajectories/constants.py @@ -0,0 +1,55 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +Define constants for trajectories module. +""" + +DEFAULT_TIME_RESOLUTION = "Y" +DATE_COL_NAME = "date" +PERIOD_COL_NAME = "period" +GROUP_COL_NAME = "group" +GROUP_ID_COL_NAME = "group_id" +MEASURE_COL_NAME = "measure" +NO_MEASURE_VALUE = "no_measure" +METRIC_COL_NAME = "metric" +UNIT_COL_NAME = "unit" +RISK_COL_NAME = "risk" +COORD_ID_COL_NAME = "coord_id" + +DEFAULT_PERIOD_INDEX_NAME = "date" + +DEFAULT_RP = [20, 50, 100] +"""Default return periods to use when computing return period impact estimates.""" + +DEFAULT_ALLGROUP_NAME = "All" +"""Default string to use to define the exposure subgroup containing all exposure points.""" + +EAI_METRIC_NAME = "eai" +AAI_METRIC_NAME = "aai" +AAI_PER_GROUP_METRIC_NAME = "aai_per_group" +CONTRIBUTIONS_METRIC_NAME = "risk_contributions" +RETURN_PERIOD_METRIC_NAME = "return_periods" +RP_VALUE_PREFIX = "rp" + + +CONTRIBUTION_BASE_RISK_NAME = "base risk" +CONTRIBUTION_TOTAL_RISK_NAME = "total risk" +CONTRIBUTION_EXPOSURE_NAME = "exposure contribution" +CONTRIBUTION_HAZARD_NAME = "hazard contribution" +CONTRIBUTION_VULNERABILITY_NAME = "vulnerability contribution" +CONTRIBUTION_INTERACTION_TERM_NAME = "interaction contribution" diff --git a/climada/trajectories/test/test_riskperiod.py b/climada/trajectories/test/test_riskperiod.py new file mode 100644 index 0000000000..8ae328109d --- /dev/null +++ b/climada/trajectories/test/test_riskperiod.py @@ -0,0 +1,1389 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +This modules implements different sparce matrices interpolation approaches. + +""" + +import types +import unittest +from unittest.mock import MagicMock, call, patch + +import geopandas as gpd +import numpy as np +import pandas as pd +from scipy.sparse import csr_matrix, issparse +from shapely import Point + +# Assuming these are the necessary imports from climada +from climada.entity.exposures import Exposures +from climada.entity.impact_funcs import ImpactFuncSet +from climada.entity.impact_funcs.trop_cyclone import ImpfTropCyclone +from climada.entity.measures.base import Measure +from climada.hazard import Hazard +from climada.trajectories.constants import ( + AAI_METRIC_NAME, + CONTRIBUTION_BASE_RISK_NAME, + CONTRIBUTION_EXPOSURE_NAME, + CONTRIBUTION_HAZARD_NAME, + CONTRIBUTION_INTERACTION_TERM_NAME, + CONTRIBUTION_VULNERABILITY_NAME, + COORD_ID_COL_NAME, + DATE_COL_NAME, + EAI_METRIC_NAME, + GROUP_COL_NAME, + GROUP_ID_COL_NAME, + MEASURE_COL_NAME, + METRIC_COL_NAME, + NO_MEASURE_VALUE, + RISK_COL_NAME, + UNIT_COL_NAME, +) + +# Import the CalcRiskPeriod class and other necessary classes/functions +from climada.trajectories.impact_calc_strat import ( + ImpactCalcComputation, + ImpactComputationStrategy, +) +from climada.trajectories.interpolation import ( + AllLinearStrategy, + InterpolationStrategyBase, +) +from climada.trajectories.riskperiod import ( + CalcRiskMetricsPeriod, + CalcRiskMetricsPoints, + calc_freq_curve, + calc_per_date_aais, + calc_per_date_eais, + calc_per_date_rps, +) +from climada.trajectories.snapshot import Snapshot +from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5 + + +class TestCalcRiskMetricsPoints(unittest.TestCase): + def setUp(self): + # Create mock objects for testing + self.present_date = 2020 + self.future_date = 2025 + self.exposure_present = Exposures.from_hdf5(EXP_DEMO_H5) + self.exposure_present.gdf.rename(columns={"impf_": "impf_TC"}, inplace=True) + self.exposure_present.gdf["impf_TC"] = 1 + self.exposure_present.gdf[GROUP_ID_COL_NAME] = ( + self.exposure_present.gdf["value"] + > self.exposure_present.gdf["value"].mean() + ) * 1 + self.hazard_present = Hazard.from_hdf5(HAZ_DEMO_H5) + self.exposure_present.assign_centroids(self.hazard_present, distance="approx") + self.impfset_present = ImpactFuncSet([ImpfTropCyclone.from_emanuel_usa()]) + + self.exposure_future = Exposures.from_hdf5(EXP_DEMO_H5) + n_years = self.future_date - self.present_date + 1 + growth_rate = 1.02 + growth = growth_rate**n_years + self.exposure_future.gdf["value"] = self.exposure_future.gdf["value"] * growth + self.exposure_future.gdf.rename(columns={"impf_": "impf_TC"}, inplace=True) + self.exposure_future.gdf["impf_TC"] = 1 + self.exposure_future.gdf[GROUP_ID_COL_NAME] = ( + self.exposure_future.gdf["value"] > self.exposure_future.gdf["value"].mean() + ) * 1 + self.hazard_future = Hazard.from_hdf5(HAZ_DEMO_H5) + self.hazard_future.intensity *= 1.1 + self.exposure_future.assign_centroids(self.hazard_future, distance="approx") + self.impfset_future = ImpactFuncSet( + [ + ImpfTropCyclone.from_emanuel_usa(impf_id=1, v_half=60.0), + ] + ) + + self.measure = MagicMock(spec=Measure) + self.measure.name = "Test Measure" + + # Setup mock return values for measure.apply + self.measure_exposure = MagicMock(spec=Exposures) + self.measure_hazard = MagicMock(spec=Hazard) + self.measure_impfset = MagicMock(spec=ImpactFuncSet) + self.measure.apply.return_value = ( + self.measure_exposure, + self.measure_impfset, + self.measure_hazard, + ) + + # Create mock snapshots + self.mock_snapshot_start = Snapshot( + exposure=self.exposure_present, + hazard=self.hazard_present, + impfset=self.impfset_present, + date=self.present_date, + ) + self.mock_snapshot_end = Snapshot( + exposure=self.exposure_future, + hazard=self.hazard_future, + impfset=self.impfset_future, + date=self.future_date, + ) + + # Create an instance of CalcRiskPeriod + self.calc_risk_metrics_points = CalcRiskMetricsPoints( + [self.mock_snapshot_start, self.mock_snapshot_end], + impact_computation_strategy=ImpactCalcComputation(), + ) + + self.expected_eai = np.array( + [ + [ + 8702904.63375606, + 7870925.19290905, + 1805021.12653289, + 3827196.02428828, + 5815346.97427834, + 7870925.19290905, + 7871847.53906951, + 7870925.19290905, + 7886487.76136572, + 7870925.19290905, + 7876058.84500811, + 3858228.67061225, + 8401461.85304853, + 9210350.19520265, + 1806363.23553602, + 6922250.59852326, + 6711006.70101515, + 6886568.00391817, + 6703749.80009753, + 6704689.17531993, + 6703401.93516038, + 6818839.81873556, + 6716262.5286998, + 6703369.87656195, + 6703952.06070945, + 5678897.05935781, + 4984034.77073219, + 6708908.84462217, + 6702586.9472999, + 4961843.43826371, + 5139913.92380089, + 5255310.96072403, + 4981705.85074492, + 4926529.74583162, + 4973726.6063121, + 4926015.68274236, + 4937618.79350358, + 4926144.19851468, + 4926015.68274236, + 9575288.06765627, + 5100904.22956578, + 3501325.10900064, + 5093920.89144773, + 3505527.05928994, + 4002552.92232482, + 3512012.80001039, + 3514993.26161994, + 3562009.79687436, + 3869298.39771648, + 3509317.94922485, + ], + [ + 46651387.10647343, + 42191612.28496882, + 14767621.68800634, + 24849532.38841432, + 32260334.11128166, + 42191612.28496882, + 42196556.46505447, + 42191612.28496882, + 42275034.47974126, + 42191612.28496882, + 42219130.91253302, + 24227735.90988531, + 45035521.54835925, + 49371517.94999501, + 14778602.03484606, + 39909758.65668079, + 38691846.52720026, + 39834520.43061425, + 38650007.36519716, + 38655423.2682883, + 38648001.77388126, + 39313550.93419428, + 38722148.63941796, + 38647816.9422419, + 38651173.48481285, + 33700748.42359267, + 30195870.8789255, + 38679751.48077733, + 38643303.01755095, + 30061424.26274527, + 31140267.73715352, + 31839402.91317674, + 30181761.07222111, + 29847475.57538872, + 30133418.66577969, + 29844361.11423809, + 29914658.78479145, + 29845139.72952577, + 29844361.11423809, + 58012067.61585025, + 30903926.75151934, + 23061159.87895984, + 33550647.3781805, + 23088835.64296583, + 26362451.35547444, + 23131553.38525813, + 23151183.92499699, + 23460854.06493051, + 24271571.95828693, + 23113803.99527559, + ], + ] + ) + + self.expected_aai = np.array([2.88895461e08, 1.69310367e09]) + self.expected_aai_per_group = np.array( + [2.33513758e08, 5.53817034e07, 1.37114041e09, 3.21963264e08] + ) + self.expected_return_period_metric = np.array( + [ + 0.00000000e00, + 0.00000000e00, + 7.10925472e09, + 4.53975437e10, + 1.36547014e10, + 7.69981714e10, + ] + ) + + def test_reset_impact_data(self): + self.calc_risk_metrics_points._impacts = "A" # type:ignore + self.calc_risk_metrics_points._eai_gdf = "B" # type:ignore + self.calc_risk_metrics_points._per_date_eai = "C" # type:ignore + self.calc_risk_metrics_points._per_date_aai = "D" # type:ignore + self.calc_risk_metrics_points._reset_impact_data() + self.assertIsNone(self.calc_risk_metrics_points._impacts) + self.assertIsNone(self.calc_risk_metrics_points._eai_gdf) + self.assertIsNone(self.calc_risk_metrics_points._per_date_aai) + self.assertIsNone(self.calc_risk_metrics_points._per_date_eai) + + def test_set_impact_computation_strategy(self): + new_impact_computation_strategy = MagicMock(spec=ImpactComputationStrategy) + self.calc_risk_metrics_points.impact_computation_strategy = ( + new_impact_computation_strategy + ) + self.assertEqual( + self.calc_risk_metrics_points.impact_computation_strategy, + new_impact_computation_strategy, + ) + + def test_set_impact_computation_strategy_wtype(self): + with self.assertRaises(ValueError): + self.calc_risk_metrics_points.impact_computation_strategy = "A" + + @patch.object(CalcRiskMetricsPoints, "impact_computation_strategy") + def test_impacts_arrays(self, mock_impact_compute): + mock_impact_compute.compute_impacts.side_effect = ["A", "B"] + results = self.calc_risk_metrics_points.impacts + mock_impact_compute.compute_impacts.assert_has_calls( + [ + call( + self.mock_snapshot_start.exposure, + self.mock_snapshot_start.hazard, + self.mock_snapshot_start.impfset, + ), + call( + self.mock_snapshot_end.exposure, + self.mock_snapshot_end.hazard, + self.mock_snapshot_end.impfset, + ), + ] + ) + self.assertEqual(results, ["A", "B"]) + + def test_per_date_eai(self): + np.testing.assert_allclose( + self.calc_risk_metrics_points.per_date_eai, self.expected_eai + ) + + def test_per_date_aai(self): + np.testing.assert_allclose( + self.calc_risk_metrics_points.per_date_aai, + self.expected_aai, + ) + + def test_eai_gdf(self): + result_gdf = self.calc_risk_metrics_points.calc_eai_gdf() + self.assertIsInstance(result_gdf, pd.DataFrame) + self.assertEqual( + result_gdf.shape[0], + len(self.mock_snapshot_start.exposure.gdf) + + len(self.mock_snapshot_end.exposure.gdf), + ) + expected_columns = [ + DATE_COL_NAME, + COORD_ID_COL_NAME, + GROUP_COL_NAME, + RISK_COL_NAME, + METRIC_COL_NAME, + MEASURE_COL_NAME, + UNIT_COL_NAME, + ] + self.assertTrue( + all(col in list(result_gdf.columns) for col in expected_columns) + ) + np.testing.assert_allclose( + np.array(result_gdf[RISK_COL_NAME].values), self.expected_eai.flatten() + ) + # Check constants and column transformations + self.assertEqual(result_gdf[METRIC_COL_NAME].unique(), EAI_METRIC_NAME) + self.assertEqual(result_gdf[MEASURE_COL_NAME].iloc[0], NO_MEASURE_VALUE) + self.assertEqual( + result_gdf[UNIT_COL_NAME].iloc[0], + self.mock_snapshot_start.exposure.value_unit, + ) + self.assertEqual(result_gdf[GROUP_COL_NAME].dtype.name, "category") + self.assertListEqual( + list(result_gdf[GROUP_COL_NAME].cat.categories), + list(self.calc_risk_metrics_points._group_id), + ) + + def test_calc_aai_metric(self): + result_df = self.calc_risk_metrics_points.calc_aai_metric() + self.assertIsInstance(result_df, pd.DataFrame) + self.assertEqual( + result_df.shape[0], len(self.calc_risk_metrics_points.snapshots) + ) + expected_columns = [ + DATE_COL_NAME, + GROUP_COL_NAME, + RISK_COL_NAME, + METRIC_COL_NAME, + MEASURE_COL_NAME, + UNIT_COL_NAME, + ] + self.assertTrue(all(col in result_df.columns for col in expected_columns)) + np.testing.assert_allclose( + np.array(result_df[RISK_COL_NAME].values), self.expected_aai + ) + # Check constants and column transformations + self.assertEqual(result_df[METRIC_COL_NAME].unique(), AAI_METRIC_NAME) + self.assertEqual(result_df[MEASURE_COL_NAME].iloc[0], NO_MEASURE_VALUE) + self.assertEqual( + result_df[UNIT_COL_NAME].iloc[0], + self.mock_snapshot_start.exposure.value_unit, + ) + self.assertEqual(result_df[GROUP_COL_NAME].dtype.name, "category") + + def test_calc_aai_per_group_metric(self): + result_df = self.calc_risk_metrics_points.calc_aai_per_group_metric() + self.assertIsInstance(result_df, pd.DataFrame) + self.assertEqual( + result_df.shape[0], + len(self.calc_risk_metrics_points.snapshots) + * len(self.calc_risk_metrics_points._group_id), + ) + expected_columns = [ + DATE_COL_NAME, + GROUP_COL_NAME, + RISK_COL_NAME, + METRIC_COL_NAME, + MEASURE_COL_NAME, + UNIT_COL_NAME, + ] + self.assertTrue(all(col in result_df.columns for col in expected_columns)) + np.testing.assert_allclose( + np.array(result_df[RISK_COL_NAME].values), self.expected_aai_per_group + ) + # Check constants and column transformations + self.assertEqual(result_df[METRIC_COL_NAME].unique(), AAI_METRIC_NAME) + self.assertEqual(result_df[MEASURE_COL_NAME].iloc[0], NO_MEASURE_VALUE) + self.assertEqual( + result_df[UNIT_COL_NAME].iloc[0], + self.mock_snapshot_start.exposure.value_unit, + ) + self.assertEqual(result_df[GROUP_COL_NAME].dtype.name, "category") + self.assertListEqual(list(result_df[GROUP_COL_NAME].unique()), [0, 1]) + + def test_calc_return_periods_metric(self): + result_df = self.calc_risk_metrics_points.calc_return_periods_metric( + [20, 50, 100] + ) + self.assertIsInstance(result_df, pd.DataFrame) + self.assertEqual( + result_df.shape[0], len(self.calc_risk_metrics_points.snapshots) * 3 + ) + expected_columns = [ + DATE_COL_NAME, + GROUP_COL_NAME, + RISK_COL_NAME, + METRIC_COL_NAME, + MEASURE_COL_NAME, + UNIT_COL_NAME, + ] + self.assertTrue(all(col in result_df.columns for col in expected_columns)) + np.testing.assert_allclose( + np.array(result_df[RISK_COL_NAME].values), + self.expected_return_period_metric, + ) + # Check constants and column transformations + self.assertListEqual( + list(result_df[METRIC_COL_NAME].unique()), ["rp_20", "rp_50", "rp_100"] + ) + self.assertEqual(result_df[MEASURE_COL_NAME].iloc[0], NO_MEASURE_VALUE) + self.assertEqual( + result_df[UNIT_COL_NAME].iloc[0], + self.mock_snapshot_start.exposure.value_unit, + ) + self.assertEqual(result_df[GROUP_COL_NAME].dtype.name, "category") + + @patch.object(Snapshot, "apply_measure") + @patch("climada.trajectories.riskperiod.CalcRiskMetricsPoints") + def test_apply_measure(self, mock_CalcRiskMetricPoints, mock_snap_apply_measure): + mock_CalcRiskMetricPoints.return_value = MagicMock(spec=CalcRiskMetricsPeriod) + mock_snap_apply_measure.return_value = 42 + result = self.calc_risk_metrics_points.apply_measure(self.measure) + mock_snap_apply_measure.assert_called_with(self.measure) + mock_CalcRiskMetricPoints.assert_called_with( + [42, 42], + self.calc_risk_metrics_points.impact_computation_strategy, + ) + self.assertEqual(result.measure, self.measure) + + +class TestCalcRiskMetricsPeriod_TopLevel(unittest.TestCase): + def setUp(self): + # Create mock objects for testing + self.present_date = 2020 + self.future_date = 2025 + self.exposure_present = Exposures.from_hdf5(EXP_DEMO_H5) + self.exposure_present.gdf.rename(columns={"impf_": "impf_TC"}, inplace=True) + self.exposure_present.gdf["impf_TC"] = 1 + self.exposure_present.gdf[GROUP_ID_COL_NAME] = ( + self.exposure_present.gdf["value"] > 500000 + ) * 1 + self.hazard_present = Hazard.from_hdf5(HAZ_DEMO_H5) + self.exposure_present.assign_centroids(self.hazard_present, distance="approx") + self.impfset_present = ImpactFuncSet([ImpfTropCyclone.from_emanuel_usa()]) + + self.exposure_future = Exposures.from_hdf5(EXP_DEMO_H5) + n_years = self.future_date - self.present_date + 1 + growth_rate = 1.02 + growth = growth_rate**n_years + self.exposure_future.gdf["value"] = self.exposure_future.gdf["value"] * growth + self.exposure_future.gdf.rename(columns={"impf_": "impf_TC"}, inplace=True) + self.exposure_future.gdf["impf_TC"] = 1 + self.exposure_future.gdf[GROUP_ID_COL_NAME] = ( + self.exposure_future.gdf["value"] > 500000 + ) * 1 + self.hazard_future = Hazard.from_hdf5(HAZ_DEMO_H5) + self.hazard_future.intensity *= 1.1 + self.exposure_future.assign_centroids(self.hazard_future, distance="approx") + self.impfset_future = ImpactFuncSet( + [ + ImpfTropCyclone.from_emanuel_usa(impf_id=1, v_half=60.0), + ] + ) + + self.measure = MagicMock(spec=Measure) + self.measure.name = "Test Measure" + + # Setup mock return values for measure.apply + self.measure_exposure = MagicMock(spec=Exposures) + self.measure_hazard = MagicMock(spec=Hazard) + self.measure_impfset = MagicMock(spec=ImpactFuncSet) + self.measure.apply.return_value = ( + self.measure_exposure, + self.measure_impfset, + self.measure_hazard, + ) + + # Create mock snapshots + self.mock_snapshot_start = Snapshot( + exposure=self.exposure_present, + hazard=self.hazard_present, + impfset=self.impfset_present, + date=self.present_date, + ) + self.mock_snapshot_end = Snapshot( + exposure=self.exposure_future, + hazard=self.hazard_future, + impfset=self.impfset_future, + date=self.future_date, + ) + + # Create an instance of CalcRiskPeriod + self.calc_risk_period = CalcRiskMetricsPeriod( + self.mock_snapshot_start, + self.mock_snapshot_end, + time_resolution="Y", + interpolation_strategy=AllLinearStrategy(), + impact_computation_strategy=ImpactCalcComputation(), + # These will have to be tested when implemented + # risk_transf_attach=0.1, + # risk_transf_cover=0.9, + # calc_residual=False + ) + + def test_init(self): + self.assertEqual(self.calc_risk_period.snapshot_start, self.mock_snapshot_start) + self.assertEqual(self.calc_risk_period.snapshot_end, self.mock_snapshot_end) + self.assertEqual(self.calc_risk_period.time_resolution, "Y") + self.assertEqual( + self.calc_risk_period.time_points, self.future_date - self.present_date + 1 + ) + self.assertIsInstance( + self.calc_risk_period.interpolation_strategy, AllLinearStrategy + ) + self.assertIsInstance( + self.calc_risk_period.impact_computation_strategy, ImpactCalcComputation + ) + np.testing.assert_array_equal( + self.calc_risk_period._group_id_E0, + self.mock_snapshot_start.exposure.gdf[GROUP_ID_COL_NAME].values, + ) + np.testing.assert_array_equal( + self.calc_risk_period._group_id_E1, + self.mock_snapshot_end.exposure.gdf[GROUP_ID_COL_NAME].values, + ) + self.assertIsInstance(self.calc_risk_period.date_idx, pd.PeriodIndex) + self.assertEqual( + len(self.calc_risk_period.date_idx), + self.future_date - self.present_date + 1, + ) + + def test_set_date_idx_wrong_type(self): + with self.assertRaises(ValueError): + self.calc_risk_period.date_idx = "A" + + def test_set_date_idx_periods(self): + new_date_idx = pd.period_range("2023-01-01", periods=24) + self.calc_risk_period.date_idx = new_date_idx + self.assertEqual(len(self.calc_risk_period.date_idx), 24) + + def test_set_date_idx_freq(self): + new_date_idx = pd.period_range("2023-01-01", "2023-12-01", freq="M") + self.calc_risk_period.date_idx = new_date_idx + self.assertEqual(len(self.calc_risk_period.date_idx), 12) + pd.testing.assert_index_equal( + self.calc_risk_period.date_idx, + pd.period_range("2023-01-01", "2023-12-01", freq="M"), + ) + + def test_set_time_resolution(self): + self.calc_risk_period.time_resolution = "M" + self.assertEqual(self.calc_risk_period.time_resolution, "M") + pd.testing.assert_index_equal( + self.calc_risk_period.date_idx, + pd.PeriodIndex( + [ + "2020-01-01", + "2020-02-01", + "2020-03-01", + "2020-04-01", + "2020-05-01", + "2020-06-01", + "2020-07-01", + "2020-08-01", + "2020-09-01", + "2020-10-01", + "2020-11-01", + "2020-12-01", + "2021-01-01", + "2021-02-01", + "2021-03-01", + "2021-04-01", + "2021-05-01", + "2021-06-01", + "2021-07-01", + "2021-08-01", + "2021-09-01", + "2021-10-01", + "2021-11-01", + "2021-12-01", + "2022-01-01", + "2022-02-01", + "2022-03-01", + "2022-04-01", + "2022-05-01", + "2022-06-01", + "2022-07-01", + "2022-08-01", + "2022-09-01", + "2022-10-01", + "2022-11-01", + "2022-12-01", + "2023-01-01", + "2023-02-01", + "2023-03-01", + "2023-04-01", + "2023-05-01", + "2023-06-01", + "2023-07-01", + "2023-08-01", + "2023-09-01", + "2023-10-01", + "2023-11-01", + "2023-12-01", + "2024-01-01", + "2024-02-01", + "2024-03-01", + "2024-04-01", + "2024-05-01", + "2024-06-01", + "2024-07-01", + "2024-08-01", + "2024-09-01", + "2024-10-01", + "2024-11-01", + "2024-12-01", + "2025-01-01", + ], + name=DATE_COL_NAME, + freq="M", + ), + ) + + def test_set_interpolation_strategy(self): + new_interpolation_strategy = MagicMock(spec=InterpolationStrategyBase) + self.calc_risk_period.interpolation_strategy = new_interpolation_strategy + self.assertEqual( + self.calc_risk_period.interpolation_strategy, new_interpolation_strategy + ) + + def test_set_interpolation_strategy_wtype(self): + with self.assertRaises(ValueError): + self.calc_risk_period.interpolation_strategy = "A" + + def test_set_impact_computation_strategy(self): + new_impact_computation_strategy = MagicMock(spec=ImpactComputationStrategy) + self.calc_risk_period.impact_computation_strategy = ( + new_impact_computation_strategy + ) + self.assertEqual( + self.calc_risk_period.impact_computation_strategy, + new_impact_computation_strategy, + ) + + def test_set_impact_computation_strategy_wtype(self): + with self.assertRaises(ValueError): + self.calc_risk_period.impact_computation_strategy = "A" + + # The computation are tested in the CalcImpactStrategy / InterpolationStrategyBase tests + # Here we just make sure that the calling works + @patch.object(CalcRiskMetricsPeriod, "impact_computation_strategy") + def test_impacts_arrays(self, mock_impact_compute): + mock_impact_compute.compute_impacts.side_effect = [1, 2, 3, 4, 5, 6, 7, 8] + self.assertEqual(self.calc_risk_period.E0H0V0, 1) + self.assertEqual(self.calc_risk_period.E1H0V0, 2) + self.assertEqual(self.calc_risk_period.E0H1V0, 3) + self.assertEqual(self.calc_risk_period.E1H1V0, 4) + self.assertEqual(self.calc_risk_period.E0H0V1, 5) + self.assertEqual(self.calc_risk_period.E1H0V1, 6) + self.assertEqual(self.calc_risk_period.E0H1V1, 7) + self.assertEqual(self.calc_risk_period.E1H1V1, 8) + mock_impact_compute.compute_impacts.assert_has_calls( + [ + call( + exp, + haz, + impf, + ) + for exp, haz, impf in [ + ( + self.mock_snapshot_start.exposure, + self.mock_snapshot_start.hazard, + self.mock_snapshot_start.impfset, + ), + ( + self.mock_snapshot_end.exposure, + self.mock_snapshot_start.hazard, + self.mock_snapshot_start.impfset, + ), + ( + self.mock_snapshot_start.exposure, + self.mock_snapshot_end.hazard, + self.mock_snapshot_start.impfset, + ), + ( + self.mock_snapshot_end.exposure, + self.mock_snapshot_end.hazard, + self.mock_snapshot_start.impfset, + ), + ( + self.mock_snapshot_start.exposure, + self.mock_snapshot_start.hazard, + self.mock_snapshot_end.impfset, + ), + ( + self.mock_snapshot_end.exposure, + self.mock_snapshot_start.hazard, + self.mock_snapshot_end.impfset, + ), + ( + self.mock_snapshot_start.exposure, + self.mock_snapshot_end.hazard, + self.mock_snapshot_end.impfset, + ), + ( + self.mock_snapshot_end.exposure, + self.mock_snapshot_end.hazard, + self.mock_snapshot_end.impfset, + ), + ] + ] + ) + + @patch.object(CalcRiskMetricsPeriod, "interpolation_strategy") + def test_imp_mats_H0V0(self, mock_interpolate): + mock_interpolate.interp_over_exposure_dim.return_value = 1 + result = self.calc_risk_period.imp_mats_H0V0 + self.assertEqual(result, 1) + mock_interpolate.interp_over_exposure_dim.assert_called_with( + self.calc_risk_period.E0H0V0.imp_mat, + self.calc_risk_period.E1H0V0.imp_mat, + self.calc_risk_period.time_points, + ) + + @patch.object(CalcRiskMetricsPeriod, "interpolation_strategy") + def test_imp_mats_H1V0(self, mock_interpolate): + mock_interpolate.interp_over_exposure_dim.return_value = 1 + result = self.calc_risk_period.imp_mats_H1V0 + self.assertEqual(result, 1) + mock_interpolate.interp_over_exposure_dim.assert_called_with( + self.calc_risk_period.E0H1V0.imp_mat, + self.calc_risk_period.E1H1V0.imp_mat, + self.calc_risk_period.time_points, + ) + + @patch.object(CalcRiskMetricsPeriod, "interpolation_strategy") + def test_imp_mats_H0V1(self, mock_interpolate): + mock_interpolate.interp_over_exposure_dim.return_value = 1 + result = self.calc_risk_period.imp_mats_H0V1 + self.assertEqual(result, 1) + mock_interpolate.interp_over_exposure_dim.assert_called_with( + self.calc_risk_period.E0H0V1.imp_mat, + self.calc_risk_period.E1H0V1.imp_mat, + self.calc_risk_period.time_points, + ) + + @patch.object(CalcRiskMetricsPeriod, "interpolation_strategy") + def test_imp_mats_H1V1(self, mock_interpolate): + mock_interpolate.interp_over_exposure_dim.return_value = 1 + result = self.calc_risk_period.imp_mats_H1V1 + self.assertEqual(result, 1) + mock_interpolate.interp_over_exposure_dim.assert_called_with( + self.calc_risk_period.E0H1V1.imp_mat, + self.calc_risk_period.E1H1V1.imp_mat, + self.calc_risk_period.time_points, + ) + + @patch("climada.trajectories.riskperiod.calc_per_date_eais") + def test_per_date_eai_H0V0(self, mock_calc_per_date_eais): + mock_calc_per_date_eais.return_value = 1 + result = self.calc_risk_period.per_date_eai_H0V0 + + actual_arg0 = mock_calc_per_date_eais.call_args[0][0] + expected_arg0 = self.calc_risk_period.imp_mats_H0V0 + + actual_arg1 = mock_calc_per_date_eais.call_args[0][1] + expected_arg1 = self.calc_risk_period.snapshot_start.hazard.frequency + + assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) + np.testing.assert_array_equal(actual_arg1, expected_arg1) + self.assertEqual(result, 1) + + @patch("climada.trajectories.riskperiod.calc_per_date_eais") + def test_per_date_eai_H1V0(self, mock_calc_per_date_eais): + mock_calc_per_date_eais.return_value = 1 + result = self.calc_risk_period.per_date_eai_H1V0 + actual_arg0 = mock_calc_per_date_eais.call_args[0][0] + expected_arg0 = self.calc_risk_period.imp_mats_H1V0 + + actual_arg1 = mock_calc_per_date_eais.call_args[0][1] + expected_arg1 = self.calc_risk_period.snapshot_start.hazard.frequency + + assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) + np.testing.assert_array_equal(actual_arg1, expected_arg1) + self.assertEqual(result, 1) + + @patch("climada.trajectories.riskperiod.calc_per_date_eais") + def test_per_date_eai_H0V1(self, mock_calc_per_date_eais): + mock_calc_per_date_eais.return_value = 1 + result = self.calc_risk_period.per_date_eai_H0V1 + + actual_arg0 = mock_calc_per_date_eais.call_args[0][0] + expected_arg0 = self.calc_risk_period.imp_mats_H0V1 + + actual_arg1 = mock_calc_per_date_eais.call_args[0][1] + expected_arg1 = self.calc_risk_period.snapshot_start.hazard.frequency + + assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) + np.testing.assert_array_equal(actual_arg1, expected_arg1) + self.assertEqual(result, 1) + + @patch("climada.trajectories.riskperiod.calc_per_date_eais") + def test_per_date_eai_H1V1(self, mock_calc_per_date_eais): + mock_calc_per_date_eais.return_value = 1 + result = self.calc_risk_period.per_date_eai_H1V1 + actual_arg0 = mock_calc_per_date_eais.call_args[0][0] + expected_arg0 = self.calc_risk_period.imp_mats_H1V1 + + actual_arg1 = mock_calc_per_date_eais.call_args[0][1] + expected_arg1 = self.calc_risk_period.snapshot_start.hazard.frequency + + assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) + np.testing.assert_array_equal(actual_arg1, expected_arg1) + self.assertEqual(result, 1) + + @patch("climada.trajectories.riskperiod.calc_per_date_aais") + def test_per_date_aai_H0V0(self, mock_calc_per_date_aais): + mock_calc_per_date_aais.return_value = 1 + result = self.calc_risk_period.per_date_aai_H0V0 + + actual_arg0 = mock_calc_per_date_aais.call_args[0][0] + expected_arg0 = self.calc_risk_period.per_date_eai_H0V0 + self.assertEqual(result, 1) + np.testing.assert_array_equal(actual_arg0, expected_arg0) + + @patch("climada.trajectories.riskperiod.calc_per_date_aais") + def test_per_date_aai_H1V0(self, mock_calc_per_date_aais): + mock_calc_per_date_aais.return_value = 1 + result = self.calc_risk_period.per_date_aai_H1V0 + + actual_arg0 = mock_calc_per_date_aais.call_args[0][0] + expected_arg0 = self.calc_risk_period.per_date_eai_H1V0 + self.assertEqual(result, 1) + np.testing.assert_array_equal(actual_arg0, expected_arg0) + + @patch("climada.trajectories.riskperiod.calc_per_date_aais") + def test_per_date_aai_H0V1(self, mock_calc_per_date_aais): + mock_calc_per_date_aais.return_value = 1 + result = self.calc_risk_period.per_date_aai_H0V1 + + actual_arg0 = mock_calc_per_date_aais.call_args[0][0] + expected_arg0 = self.calc_risk_period.per_date_eai_H0V1 + self.assertEqual(result, 1) + np.testing.assert_array_equal(actual_arg0, expected_arg0) + + @patch("climada.trajectories.riskperiod.calc_per_date_aais") + def test_per_date_aai_H1V1(self, mock_calc_per_date_aais): + mock_calc_per_date_aais.return_value = 1 + result = self.calc_risk_period.per_date_aai_H1V1 + + actual_arg0 = mock_calc_per_date_aais.call_args[0][0] + expected_arg0 = self.calc_risk_period.per_date_eai_H1V1 + self.assertEqual(result, 1) + np.testing.assert_array_equal(actual_arg0, expected_arg0) + + @patch("climada.trajectories.riskperiod.calc_per_date_rps") + def test_per_date_return_periods_H0V0(self, mock_calc_per_date_rps): + mock_calc_per_date_rps.return_value = 1 + result = self.calc_risk_period.per_date_return_periods_H0V0([10, 50]) + + actual_arg0 = mock_calc_per_date_rps.call_args[0][0] + expected_arg0 = self.calc_risk_period.imp_mats_H0V0 + + actual_arg1 = mock_calc_per_date_rps.call_args[0][1] + expected_arg1 = self.calc_risk_period.snapshot_start.hazard.frequency + + actual_arg2 = mock_calc_per_date_rps.call_args[0][2] + expected_arg2 = [10, 50] + + assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) + np.testing.assert_array_equal(actual_arg1, expected_arg1) + self.assertEqual(actual_arg2, expected_arg2) + self.assertEqual(result, 1) + + @patch("climada.trajectories.riskperiod.calc_per_date_rps") + def test_per_date_return_periods_H1V0(self, mock_calc_per_date_rps): + mock_calc_per_date_rps.return_value = 1 + result = self.calc_risk_period.per_date_return_periods_H1V0([10, 50]) + + actual_arg0 = mock_calc_per_date_rps.call_args[0][0] + expected_arg0 = self.calc_risk_period.imp_mats_H1V0 + + actual_arg1 = mock_calc_per_date_rps.call_args[0][1] + expected_arg1 = self.calc_risk_period.snapshot_end.hazard.frequency + + actual_arg2 = mock_calc_per_date_rps.call_args[0][2] + expected_arg2 = [10, 50] + + assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) + np.testing.assert_array_equal(actual_arg1, expected_arg1) + self.assertEqual(actual_arg2, expected_arg2) + self.assertEqual(result, 1) + + @patch("climada.trajectories.riskperiod.calc_per_date_rps") + def test_per_date_return_periods_H0V1(self, mock_calc_per_date_rps): + mock_calc_per_date_rps.return_value = 1 + result = self.calc_risk_period.per_date_return_periods_H0V1([10, 50]) + + actual_arg0 = mock_calc_per_date_rps.call_args[0][0] + expected_arg0 = self.calc_risk_period.imp_mats_H0V1 + + actual_arg1 = mock_calc_per_date_rps.call_args[0][1] + expected_arg1 = self.calc_risk_period.snapshot_start.hazard.frequency + + actual_arg2 = mock_calc_per_date_rps.call_args[0][2] + expected_arg2 = [10, 50] + + assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) + np.testing.assert_array_equal(actual_arg1, expected_arg1) + self.assertEqual(actual_arg2, expected_arg2) + self.assertEqual(result, 1) + + @patch("climada.trajectories.riskperiod.calc_per_date_rps") + def test_per_date_return_periods_H1V1(self, mock_calc_per_date_rps): + mock_calc_per_date_rps.return_value = 1 + result = self.calc_risk_period.per_date_return_periods_H1V1([10, 50]) + + actual_arg0 = mock_calc_per_date_rps.call_args[0][0] + expected_arg0 = self.calc_risk_period.imp_mats_H1V1 + + actual_arg1 = mock_calc_per_date_rps.call_args[0][1] + expected_arg1 = self.calc_risk_period.snapshot_end.hazard.frequency + + actual_arg2 = mock_calc_per_date_rps.call_args[0][2] + expected_arg2 = [10, 50] + + assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) + np.testing.assert_array_equal(actual_arg1, expected_arg1) + self.assertEqual(actual_arg2, expected_arg2) + self.assertEqual(result, 1) + + @patch.object(CalcRiskMetricsPeriod, "calc_eai_gdf", return_value=1) + def test_eai_gdf(self, mock_calc_eai_gdf): + result = self.calc_risk_period.eai_gdf + mock_calc_eai_gdf.assert_called_once() + self.assertEqual(result, 1) + + # Here we mock the impact calc method just to make sure it is rightfully called + def test_calc_per_date_eais(self): + results = calc_per_date_eais( + imp_mats=[ + csr_matrix( + [ + [1, 1, 1], + [2, 2, 2], + ] + ), + csr_matrix( + [ + [2, 0, 1], + [2, 0, 2], + ] + ), + ], + frequency=np.array([1, 1]), + ) + np.testing.assert_array_equal(results, np.array([[3, 3, 3], [4, 0, 3]])) + + def test_calc_per_date_aais(self): + results = calc_per_date_aais(np.array([[3, 3, 3], [4, 0, 3]])) + np.testing.assert_array_equal(results, np.array([9, 7])) + + def test_calc_freq_curve(self): + results = calc_freq_curve( + imp_mat_intrpl=csr_matrix( + [ + [0.1, 0, 0], + [1, 0, 0], + [10, 0, 0], + ] + ), + frequency=np.array([0.5, 0.05, 0.005]), + return_per=[10, 50, 100], + ) + np.testing.assert_array_equal(results, np.array([0.55045, 2.575, 5.05])) + + def test_calc_per_date_rps(self): + base_imp = csr_matrix( + [ + [0.1, 0, 0], + [1, 0, 0], + [10, 0, 0], + ] + ) + results = calc_per_date_rps( + [base_imp, base_imp * 2, base_imp * 4], + frequency=np.array([0.5, 0.05, 0.005]), + return_periods=[10, 50, 100], + ) + np.testing.assert_array_equal( + results, + np.array( + [[0.55045, 2.575, 5.05], [1.1009, 5.15, 10.1], [2.2018, 10.3, 20.2]] + ), + ) + + +class TestCalcRiskPeriod_LowLevel(unittest.TestCase): + def setUp(self): + # Create mock objects for testing + self.calc_risk_period = MagicMock(spec=CalcRiskMetricsPeriod) + + # Little trick to bind the mocked object method to the real one + self.calc_risk_period.calc_eai = types.MethodType( + CalcRiskMetricsPeriod.calc_eai, self.calc_risk_period + ) + + self.calc_risk_period.calc_eai_gdf = types.MethodType( + CalcRiskMetricsPeriod.calc_eai_gdf, self.calc_risk_period + ) + self.calc_risk_period.calc_aai_metric = types.MethodType( + CalcRiskMetricsPeriod.calc_aai_metric, self.calc_risk_period + ) + + self.calc_risk_period.calc_aai_per_group_metric = types.MethodType( + CalcRiskMetricsPeriod.calc_aai_per_group_metric, self.calc_risk_period + ) + self.calc_risk_period.calc_return_periods_metric = types.MethodType( + CalcRiskMetricsPeriod.calc_return_periods_metric, self.calc_risk_period + ) + self.calc_risk_period.calc_risk_components_metric = types.MethodType( + CalcRiskMetricsPeriod.calc_risk_contributions_metric, self.calc_risk_period + ) + self.calc_risk_period.apply_measure = types.MethodType( + CalcRiskMetricsPeriod.apply_measure, self.calc_risk_period + ) + + self.calc_risk_period.per_date_eai_H0V0 = np.array( + [[1, 0, 1], [1, 2, 0], [3, 3, 3]] + ) + self.calc_risk_period.per_date_eai_H1V0 = np.array( + [[2, 0, 2], [2, 4, 0], [12, 6, 6]] + ) + self.calc_risk_period.per_date_aai_H0V0 = np.array([2, 3, 9]) + self.calc_risk_period.per_date_aai_H1V0 = np.array([4, 6, 24]) + + self.calc_risk_period.per_date_eai_H0V1 = np.array( + [[1, 0, 1], [1, 2, 0], [3, 3, 3]] + ) + self.calc_risk_period.per_date_eai_H1V1 = np.array( + [[2, 0, 2], [2, 4, 0], [12, 6, 6]] + ) + self.calc_risk_period.per_date_aai_H0V1 = np.array([2, 3, 9]) + self.calc_risk_period.per_date_aai_H1V1 = np.array([4, 6, 24]) + + self.calc_risk_period.date_idx = pd.PeriodIndex( + ["2020-01-01", "2025-01-01", "2030-01-01"], name=DATE_COL_NAME, freq="5Y" + ) + self.calc_risk_period.snapshot_start.exposure.gdf = gpd.GeoDataFrame( + { + GROUP_ID_COL_NAME: [1, 2, 2], + "geometry": [Point(0, 0), Point(1, 1), Point(2, 2)], + "value": [10, 10, 20], + } + ) + self.calc_risk_period.snapshot_end.exposure.gdf = gpd.GeoDataFrame( + { + GROUP_ID_COL_NAME: [1, 2, 2], + "geometry": [Point(0, 0), Point(1, 1), Point(2, 2)], + "value": [10, 10, 20], + } + ) + self.calc_risk_period.measure = MagicMock(spec=Measure) + self.calc_risk_period.measure.name = "dummy_measure" + + def test_calc_eai(self): + # Mock the return values of interp_over_hazard_dim + self.calc_risk_period.interpolation_strategy.interp_over_hazard_dim.side_effect = [ + "V0_interpolated_data", # First call (for per_date_eai_V0) + "V1_interpolated_data", # Second call (for per_date_eai_V1) + ] + # Mock the return value of interp_over_vulnerability_dim + self.calc_risk_period.interpolation_strategy.interp_over_vulnerability_dim.return_value = ( + "final_eai_result" + ) + + result = self.calc_risk_period.calc_eai() + + # Assert that interp_over_hazard_dim was called with the correct arguments + self.calc_risk_period.interpolation_strategy.interp_over_hazard_dim.assert_has_calls( + [ + call( + self.calc_risk_period.per_date_eai_H0V0, + self.calc_risk_period.per_date_eai_H1V0, + ), + call( + self.calc_risk_period.per_date_eai_H0V1, + self.calc_risk_period.per_date_eai_H1V1, + ), + ] + ) + + # Assert that interp_over_vulnerability_dim was called with the results of interp_over_hazard_dim + self.calc_risk_period.interpolation_strategy.interp_over_vulnerability_dim.assert_called_once_with( + "V0_interpolated_data", "V1_interpolated_data" + ) + + # Assert the final returned value + self.assertEqual(result, "final_eai_result") + + def test_calc_eai_gdf(self): + self.calc_risk_period._groups_id = np.array([0]) + expected_risk = np.array([[1.0, 1.5, 12], [0, 3, 6], [1, 0, 6]]) + self.calc_risk_period.per_date_eai = expected_risk + result = self.calc_risk_period.calc_eai_gdf() + expected_columns = { + GROUP_COL_NAME, + COORD_ID_COL_NAME, + DATE_COL_NAME, + RISK_COL_NAME, + METRIC_COL_NAME, + MEASURE_COL_NAME, + } + self.assertTrue(expected_columns.issubset(set(result.columns))) + self.assertTrue((result[METRIC_COL_NAME] == EAI_METRIC_NAME).all()) + self.assertTrue((result[MEASURE_COL_NAME] == "dummy_measure").all()) + # Check calculated risk values by coord_id, date + actual_risk = result[RISK_COL_NAME].values + np.testing.assert_allclose(expected_risk.T.flatten(), actual_risk) + + def test_calc_aai_metric(self): + expected_aai = np.array([2, 4.5, 24]) + self.calc_risk_period.per_date_aai = expected_aai + self.calc_risk_period._groups_id = np.array([0]) + result = self.calc_risk_period.calc_aai_metric() + expected_columns = { + GROUP_COL_NAME, + DATE_COL_NAME, + RISK_COL_NAME, + METRIC_COL_NAME, + MEASURE_COL_NAME, + } + self.assertTrue(expected_columns.issubset(set(result.columns))) + self.assertTrue((result[METRIC_COL_NAME] == AAI_METRIC_NAME).all()) + self.assertTrue((result[MEASURE_COL_NAME] == "dummy_measure").all()) + + # Check calculated risk values by coord_id, date + actual_risk = result[RISK_COL_NAME].values + np.testing.assert_allclose(expected_aai, actual_risk) + + def test_calc_aai_per_group_metric(self): + self.calc_risk_period._group_id_E0 = np.array([1, 1, 2]) + self.calc_risk_period._group_id_E1 = np.array([2, 2, 2]) + self.calc_risk_period._groups_id = np.array([1, 2]) + self.calc_risk_period.eai_gdf = pd.DataFrame( + { + DATE_COL_NAME: pd.PeriodIndex( + ["2020-01-01"] * 3 + ["2025-01-01"] * 3 + ["2030-01-01"] * 3, + name=DATE_COL_NAME, + freq="5Y", + ), + COORD_ID_COL_NAME: [0, 1, 2, 0, 1, 2, 0, 1, 2], + GROUP_COL_NAME: [1, 1, 2, 1, 1, 2, 1, 1, 2], + RISK_COL_NAME: [2, 3, 4, 5, 6, 7, 8, 9, 10], + METRIC_COL_NAME: [EAI_METRIC_NAME, EAI_METRIC_NAME, EAI_METRIC_NAME] + * 3, + MEASURE_COL_NAME: ["dummy_measure", "dummy_measure", "dummy_measure"] + * 3, + } + ) + self.calc_risk_period.eai_gdf[GROUP_COL_NAME] = self.calc_risk_period.eai_gdf[ + GROUP_COL_NAME + ].astype("category") + result = self.calc_risk_period.calc_aai_per_group_metric() + expected_columns = { + GROUP_COL_NAME, + DATE_COL_NAME, + RISK_COL_NAME, + METRIC_COL_NAME, + MEASURE_COL_NAME, + } + self.assertTrue(expected_columns.issubset(set(result.columns))) + self.assertTrue((result[METRIC_COL_NAME] == AAI_METRIC_NAME).all()) + self.assertTrue((result[MEASURE_COL_NAME] == "dummy_measure").all()) + # Check calculated risk values by coord_id, date + expected_risk = np.array([5, 5, 6.6, 13.6, 3.4, 27]) + actual_risk = result[RISK_COL_NAME].values + np.testing.assert_allclose(expected_risk, actual_risk) + + def test_calc_return_periods_metric(self): + self.calc_risk_period._groups_id = np.array([0]) + self.calc_risk_period.per_date_return_periods_H0V0.return_value = "H0V0" + self.calc_risk_period.per_date_return_periods_H1V0.return_value = "H1V0" + self.calc_risk_period.per_date_return_periods_H0V1.return_value = "H0V1" + self.calc_risk_period.per_date_return_periods_H1V1.return_value = "H1V1" + # Mock the return values of interp_over_hazard_dim + self.calc_risk_period.interpolation_strategy.interp_over_hazard_dim.side_effect = [ + "V0_interpolated_data", # First call (for per_date_rp_V0) + "V1_interpolated_data", # Second call (for per_date_rp_V1) + ] + # Mock the return value of interp_over_vulnerability_dim + self.calc_risk_period.interpolation_strategy.interp_over_vulnerability_dim.return_value = np.array( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]] + ) + + result = self.calc_risk_period.calc_return_periods_metric([10, 20, 30]) + + # Assert that interp_over_hazard_dim was called with the correct arguments + self.calc_risk_period.interpolation_strategy.interp_over_hazard_dim.assert_has_calls( + [call("H0V0", "H1V0"), call("H0V1", "H1V1")] + ) + + # Assert that interp_over_vulnerability_dim was called with the results of interp_over_hazard_dim + self.calc_risk_period.interpolation_strategy.interp_over_vulnerability_dim.assert_called_once_with( + "V0_interpolated_data", "V1_interpolated_data" + ) + + # Assert the final returned value + + expected_columns = { + GROUP_COL_NAME, + DATE_COL_NAME, + RISK_COL_NAME, + METRIC_COL_NAME, + MEASURE_COL_NAME, + } + self.assertTrue(expected_columns.issubset(set(result.columns))) + self.assertTrue( + all(result[METRIC_COL_NAME].unique() == ["rp_10", "rp_20", "rp_30"]) + ) + self.assertTrue((result[MEASURE_COL_NAME] == "dummy_measure").all()) + + # Check calculated risk values by rp, date + np.testing.assert_allclose( + result[RISK_COL_NAME].values, np.array([1, 4, 7, 2, 5, 8, 3, 6, 9]) + ) + + def test_calc_risk_components_metric(self): + self.calc_risk_period._groups_id = np.array([0]) + self.calc_risk_period.per_date_aai_H0V0 = np.array([1, 3, 5]) + self.calc_risk_period.per_date_aai_E0H0V0 = np.array([1, 1, 1]) + self.calc_risk_period.per_date_aai_E0H1V0 = np.array( + [2, 2, 2] + ) # Haz change doubles damages in fut + self.calc_risk_period.per_date_aai_E0H0V1 = np.array( + [3, 3, 3] + ) # Vul change triples damages in fut + self.calc_risk_period.per_date_aai = np.array([1, 6, 10]) + + # Mock the return values of interp_over_hazard_dim + self.calc_risk_period.interpolation_strategy.interp_over_hazard_dim.return_value = np.array( + [1, 1.5, 2] + ) + + # Mock the return value of interp_over_vulnerability_dim + self.calc_risk_period.interpolation_strategy.interp_over_vulnerability_dim.return_value = np.array( + [1, 2, 3] + ) + + result = self.calc_risk_period.calc_risk_components_metric() + + # Assert that interp_over_hazard_dim was called with the correct arguments + self.calc_risk_period.interpolation_strategy.interp_over_hazard_dim.assert_called_once_with( + self.calc_risk_period.per_date_aai_E0H0V0, + self.calc_risk_period.per_date_aai_E0H1V0, + ) + + # Assert that interp_over_vulnerability_dim was called with the results of interp_over_hazard_dim + self.calc_risk_period.interpolation_strategy.interp_over_vulnerability_dim.assert_called_once_with( + self.calc_risk_period.per_date_aai_E0H0V0, + self.calc_risk_period.per_date_aai_E0H0V1, + ) + + # Assert the final returned value + expected_columns = { + GROUP_COL_NAME, + DATE_COL_NAME, + RISK_COL_NAME, + METRIC_COL_NAME, + MEASURE_COL_NAME, + } + self.assertTrue(expected_columns.issubset(set(result.columns))) + self.assertTrue( + all( + result[METRIC_COL_NAME].unique() + == [ + CONTRIBUTION_BASE_RISK_NAME, + CONTRIBUTION_EXPOSURE_NAME, + CONTRIBUTION_HAZARD_NAME, + CONTRIBUTION_VULNERABILITY_NAME, + CONTRIBUTION_INTERACTION_TERM_NAME, + ] + ) + ) + self.assertTrue((result[MEASURE_COL_NAME] == "dummy_measure").all()) + + np.testing.assert_allclose( + result[RISK_COL_NAME].values, + np.array([1.0, 1.0, 1.0, 0, 2.0, 4.0, 0, 0.5, 1.0, 0, 1, 2, 0, 1.5, 2.0]), + ) + + @patch("climada.trajectories.riskperiod.CalcRiskMetricsPeriod") + def test_apply_measure(self, mock_CalcRiskPeriod): + mock_CalcRiskPeriod.return_value = MagicMock(spec=CalcRiskMetricsPeriod) + self.calc_risk_period.snapshot_start.apply_measure.return_value = 2 + self.calc_risk_period.snapshot_end.apply_measure.return_value = 3 + result = self.calc_risk_period.apply_measure(self.calc_risk_period.measure) + self.assertEqual(result.measure, self.calc_risk_period.measure) + mock_CalcRiskPeriod.assert_called_with( + 2, + 3, + self.calc_risk_period.time_resolution, + self.calc_risk_period.interpolation_strategy, + self.calc_risk_period.impact_computation_strategy, + ) + + +def assert_sparse_matrix_array_equal(expected_array, actual_array): + """ + Compares two numpy arrays where elements are sparse matrices. + Uses numpy testing for robust comparison of the sparse matrix internals. + """ + if len(expected_array) != len(actual_array): + raise AssertionError( + f"Expected array length {len(expected_array)} but got {len(actual_array)}" + ) + + for i, (expected_mat, actual_mat) in enumerate(zip(expected_array, actual_array)): + if not (issparse(expected_mat) and issparse(actual_mat)): + raise TypeError(f"Element at index {i} is not a sparse matrix.") + + # Robustly compare the underlying data + np.testing.assert_array_equal( + expected_mat.data, + actual_mat.data, + err_msg=f"Data differs at matrix index {i}", + ) + np.testing.assert_array_equal( + expected_mat.indices, + actual_mat.indices, + err_msg=f"Indices differ at matrix index {i}", + ) + np.testing.assert_array_equal( + expected_mat.indptr, + actual_mat.indptr, + err_msg=f"Indptr differs at matrix index {i}", + ) + # You may also want to assert equal shapes: + assert ( + expected_mat.shape == actual_mat.shape + ), f"Shape differs at matrix index {i}" + + +if __name__ == "__main__": + TESTS = unittest.TestLoader().loadTestsFromTestCase( + TestCalcRiskMetricsPeriod_TopLevel + ) + TESTS.addTests( + unittest.TestLoader().loadTestsFromTestCase(TestCalcRiskMetricsPoints) + ) + TESTS.addTests( + unittest.TestLoader().loadTestsFromTestCase(TestCalcRiskPeriod_LowLevel) + ) + unittest.TextTestRunner(verbosity=2).run(TESTS) From 40f37b07615bce14462e9fcd60465f3ddb32f790 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Thu, 18 Dec 2025 11:05:52 +0100 Subject: [PATCH 05/37] cherry pick, renaming --- climada/trajectories/calc_risk_metrics.py | 895 +---------- climada/trajectories/static_trajectory.py | 316 ++++ .../test/test_calc_risk_metrics.py | 448 ++++++ climada/trajectories/test/test_riskperiod.py | 1389 ----------------- climada/trajectories/test/test_trajectory.py | 326 ++++ climada/trajectories/trajectory.py | 268 ++++ 6 files changed, 1361 insertions(+), 2281 deletions(-) create mode 100644 climada/trajectories/static_trajectory.py create mode 100644 climada/trajectories/test/test_calc_risk_metrics.py delete mode 100644 climada/trajectories/test/test_riskperiod.py create mode 100644 climada/trajectories/test/test_trajectory.py create mode 100644 climada/trajectories/trajectory.py diff --git a/climada/trajectories/calc_risk_metrics.py b/climada/trajectories/calc_risk_metrics.py index 04846d18d3..2d325fb495 100644 --- a/climada/trajectories/calc_risk_metrics.py +++ b/climada/trajectories/calc_risk_metrics.py @@ -16,9 +16,9 @@ --- -This modules implements the CalcRiskPeriod class. +This modules implements the CalcRiskMetrics classes. -CalcRiskPeriod are used to compute risk metrics (and intermediate requirements) +CalcRiskMetrics are used to compute risk metrics (and intermediate requirements) in between two snapshots. As these computations are not always required and can become "heavy", a so called "lazy" @@ -26,28 +26,17 @@ """ -import datetime -import itertools import logging import numpy as np import pandas as pd -from scipy.sparse import csr_matrix -from climada.engine.impact import Impact, ImpactFreqCurve -from climada.engine.impact_calc import ImpactCalc +from climada.engine.impact import Impact from climada.entity.measures.base import Measure from climada.trajectories.constants import ( AAI_METRIC_NAME, - CONTRIBUTION_BASE_RISK_NAME, - CONTRIBUTION_EXPOSURE_NAME, - CONTRIBUTION_HAZARD_NAME, - CONTRIBUTION_INTERACTION_TERM_NAME, - CONTRIBUTION_TOTAL_RISK_NAME, - CONTRIBUTION_VULNERABILITY_NAME, COORD_ID_COL_NAME, DATE_COL_NAME, - DEFAULT_PERIOD_INDEX_NAME, EAI_METRIC_NAME, GROUP_COL_NAME, GROUP_ID_COL_NAME, @@ -59,21 +48,12 @@ UNIT_COL_NAME, ) from climada.trajectories.impact_calc_strat import ImpactComputationStrategy -from climada.trajectories.interpolation import ( - InterpolationStrategyBase, - linear_interp_arrays, -) from climada.trajectories.snapshot import Snapshot LOGGER = logging.getLogger(__name__) __all__ = [ "CalcRiskMetricsPoints", - "CalcRiskMetricsPeriod", - "calc_per_date_aais", - "calc_per_date_eais", - "calc_per_date_rps", - "calc_freq_curve", ] @@ -343,872 +323,3 @@ def apply_measure(self, measure: Measure) -> "CalcRiskMetricsPoints": risk_period.measure = measure return risk_period - - -class CalcRiskMetricsPeriod: - """This class handles the computation of impacts for a risk period. - - This object handles the interpolations and computations of risk metrics in - between two given snapshots, along a DateTimeIndex build from either a - `time_resolution` (which must be a valid "freq" string to build a DateTimeIndex) - and defaults to "Y" (start of the year) or `time_points` integer argument, in which case - the DateTimeIndex will have that many periods. - - Note that most attribute like members are properties with their own docstring. - - Attributes - ---------- - - date_idx: pd.PeriodIndex - The date index for the different interpolated points between the two snapshots - interpolation_strategy: InterpolationStrategy, optional - The approach used to interpolate impact matrices in between the two snapshots, linear by default. - impact_computation_strategy: ImpactComputationStrategy, optional - The method used to calculate the impact from the (Haz,Exp,Vul) of the two snapshots. - Defaults to ImpactCalc - measure: Measure, optional - The measure to apply to both snapshots. Defaults to None. - - Notes - ----- - - This class is intended for internal computation. - """ - - def __init__( - self, - snapshot0: Snapshot, - snapshot1: Snapshot, - time_resolution: str, - interpolation_strategy: InterpolationStrategyBase, - impact_computation_strategy: ImpactComputationStrategy, - ): - """Initialize a new `CalcRiskMetricsPeriod` - - This initializes and instantiate a new `CalcRiskMetricsPeriod` object. - No computation is done at initialisation and only done "just in time". - - Parameters - ---------- - snapshot0 : Snapshot - The `Snapshot` at the start of the risk period. - snapshot1 : Snapshot - The `Snapshot` at the end of the risk period. - time_resolution : str, optional - One of pandas date offset strings or corresponding objects. See :func:`pandas.period_range`. - time_points : int, optional - Number of periods to generate for the PeriodIndex. - interpolation_strategy: InterpolationStrategy, optional - The approach used to interpolate impact matrices in between the two snapshots, linear by default. - impact_computation_strategy: ImpactComputationStrategy, optional - The method used to calculate the impact from the (Haz,Exp,Vul) of the two snapshots. - Defaults to ImpactCalc - - """ - - LOGGER.debug("Instantiating new CalcRiskPeriod.") - self._snapshot0 = snapshot0 - self._snapshot1 = snapshot1 - self.date_idx = self._set_date_idx( - date1=snapshot0.date, - date2=snapshot1.date, - freq=time_resolution, - name=DEFAULT_PERIOD_INDEX_NAME, - ) - self.interpolation_strategy = interpolation_strategy - self.impact_computation_strategy = impact_computation_strategy - self.measure = None # Only possible to set with apply_measure to make sure snapshots are consistent - - self._group_id_E0 = ( - np.array(self.snapshot_start.exposure.gdf[GROUP_ID_COL_NAME].values) - if GROUP_ID_COL_NAME in self.snapshot_start.exposure.gdf.columns - else np.array([]) - ) - self._group_id_E1 = ( - np.array(self.snapshot_end.exposure.gdf[GROUP_ID_COL_NAME].values) - if GROUP_ID_COL_NAME in self.snapshot_end.exposure.gdf.columns - else np.array([]) - ) - self._groups_id = np.unique( - np.concatenate([self._group_id_E0, self._group_id_E1]) - ) - - def _reset_impact_data(self): - """Util method that resets computed data, for instance when changing the time resolution.""" - for fut in list(itertools.product([0, 1], repeat=3)): - setattr(self, f"_E{fut[0]}H{fut[1]}V{fut[2]}", None) - - for fut in list(itertools.product([0, 1], repeat=2)): - setattr(self, f"_imp_mats_H{fut[0]}V{fut[1]}", None) - setattr(self, f"_per_date_eai_H{fut[0]}V{fut[1]}", None) - setattr(self, f"_per_date_aai_H{fut[0]}V{fut[1]}", None) - - self._eai_gdf = None - self._per_date_eai = None - self._per_date_aai = None - self._per_date_return_periods_H0, self._per_date_return_periods_H1 = None, None - - @staticmethod - def _set_date_idx( - date1: str | pd.Timestamp | datetime.date, - date2: str | pd.Timestamp | datetime.date, - freq: str | None = None, - name: str | None = None, - ) -> pd.PeriodIndex: - """Generate a date range index based on the provided parameters. - - Parameters - ---------- - date1 : str or pd.Timestamp or datetime.date - The start date of the period range. - date2 : str or pd.Timestamp or datetime.date - The end date of the period range. - freq : str, optional - Frequency string for the period range. - See `here `_. - name : str, optional - Name of the resulting period range index. - - Returns - ------- - pd.PeriodIndex - A PeriodIndex representing the date range. - - Raises - ------ - ValueError - If the number of periods and frequency given to period_range are inconsistent. - """ - ret = pd.period_range( - date1, - date2, - freq=freq, # type: ignore - name=name, - ) - return ret - - @property - def snapshot_start(self) -> Snapshot: - """The `Snapshot` at the start of the risk period.""" - return self._snapshot0 - - @property - def snapshot_end(self) -> Snapshot: - """The `Snapshot` at the end of the risk period.""" - return self._snapshot1 - - @property - def date_idx(self) -> pd.PeriodIndex: - """The pandas PeriodIndex representing the time dimension of the risk period.""" - return self._date_idx - - @date_idx.setter - def date_idx(self, value, /): - if not isinstance(value, pd.PeriodIndex): - raise ValueError("Not a PeriodIndex") - - self._date_idx = value # Avoids weird hourly data - self._time_points = len(self.date_idx) - self._time_resolution = self.date_idx.freq - self._reset_impact_data() - - @property - def time_points(self) -> int: - """The numbers of different time points (periods) in the risk period.""" - return self._time_points - - @property - def time_resolution(self) -> str: - """The time resolution of the risk periods, expressed as a pandas period frequency string.""" - return self._time_resolution # type: ignore - - @time_resolution.setter - def time_resolution(self, value, /): - self.date_idx = pd.period_range( - self.snapshot_start.date, - self.snapshot_end.date, - freq=value, - name=DEFAULT_PERIOD_INDEX_NAME, - ) - - @property - def interpolation_strategy(self) -> InterpolationStrategyBase: - """The approach used to interpolate impact matrices in between the two snapshots.""" - return self._interpolation_strategy - - @interpolation_strategy.setter - def interpolation_strategy(self, value, /): - if not isinstance(value, InterpolationStrategyBase): - raise ValueError("Not an interpolation strategy") - - self._interpolation_strategy = value - self._reset_impact_data() - - @property - def impact_computation_strategy(self) -> ImpactComputationStrategy: - """The method used to calculate the impact from the (Haz,Exp,Vul) of the two snapshots.""" - return self._impact_computation_strategy - - @impact_computation_strategy.setter - def impact_computation_strategy(self, value, /): - if not isinstance(value, ImpactComputationStrategy): - raise ValueError("Not an impact computation strategy") - - self._impact_computation_strategy = value - self._reset_impact_data() - - ##### Impact objects cube / Risk Cube ##### - - @lazy_property - def E0H0V0(self) -> Impact: - """Impact object corresponding to starting exposure, starting hazard and starting vulnerability.""" - return self.impact_computation_strategy.compute_impacts( - self.snapshot_start.exposure, - self.snapshot_start.hazard, - self.snapshot_start.impfset, - ) - - @lazy_property - def E1H0V0(self) -> Impact: - """Impact object corresponding to future exposure, starting hazard and starting vulnerability.""" - return self.impact_computation_strategy.compute_impacts( - self.snapshot_end.exposure, - self.snapshot_start.hazard, - self.snapshot_start.impfset, - ) - - @lazy_property - def E0H1V0(self) -> Impact: - """Impact object corresponding to starting exposure, future hazard and starting vulnerability.""" - return self.impact_computation_strategy.compute_impacts( - self.snapshot_start.exposure, - self.snapshot_end.hazard, - self.snapshot_start.impfset, - ) - - @lazy_property - def E1H1V0(self) -> Impact: - """Impact object corresponding to future exposure, future hazard and starting vulnerability.""" - return self.impact_computation_strategy.compute_impacts( - self.snapshot_end.exposure, - self.snapshot_end.hazard, - self.snapshot_start.impfset, - ) - - @lazy_property - def E0H0V1(self) -> Impact: - """Impact object corresponding to starting exposure, starting hazard and future vulnerability.""" - return self.impact_computation_strategy.compute_impacts( - self.snapshot_start.exposure, - self.snapshot_start.hazard, - self.snapshot_end.impfset, - ) - - @lazy_property - def E1H0V1(self) -> Impact: - """Impact object corresponding to future exposure, starting hazard and future vulnerability.""" - return self.impact_computation_strategy.compute_impacts( - self.snapshot_end.exposure, - self.snapshot_start.hazard, - self.snapshot_end.impfset, - ) - - @lazy_property - def E0H1V1(self) -> Impact: - """Impact object corresponding to starting exposure, future hazard and future vulnerability.""" - return self.impact_computation_strategy.compute_impacts( - self.snapshot_start.exposure, - self.snapshot_end.hazard, - self.snapshot_end.impfset, - ) - - @lazy_property - def E1H1V1(self) -> Impact: - """Impact object corresponding to future exposure, future hazard and future vulnerability.""" - return self.impact_computation_strategy.compute_impacts( - self.snapshot_end.exposure, - self.snapshot_end.hazard, - self.snapshot_end.impfset, - ) - - ############################### - - ### Impact Matrices arrays #### - - def _interp_mats(self, start_attr, end_attr) -> list: - """Helper to reduce repetition in impact matrix interpolation.""" - start = getattr(self, start_attr).imp_mat - end = getattr(self, end_attr).imp_mat - return self.interpolation_strategy.interp_over_exposure_dim( - start, end, self.time_points - ) - - @property - def imp_mats_H0V0(self) -> list: - """List of `time_points` impact matrices with changing exposure, starting hazard and starting vulnerability.""" - return self._interp_mats("E0H0V0", "E1H0V0") - - @property - def imp_mats_H1V0(self) -> list: - """List of `time_points` impact matrices with changing exposure, future hazard and starting vulnerability.""" - return self._interp_mats("E0H1V0", "E1H1V0") - - @property - def imp_mats_H0V1(self) -> list: - """List of `time_points` impact matrices with changing exposure, starting hazard and future vulnerability.""" - return self._interp_mats("E0H0V1", "E1H0V1") - - @property - def imp_mats_H1V1(self) -> list: - """List of `time_points` impact matrices with changing exposure, future hazard and future vulnerability.""" - return self._interp_mats("E0H1V1", "E1H1V1") - - @property - def imp_mats_E0H0V0(self) -> list: - """List of `time_points` impact matrices with base exposure, base hazard and base vulnerability.""" - return self._interp_mats("E0H0V0", "E0H0V0") - - @property - def imp_mats_E0H1V0(self) -> list: - """List of `time_points` impact matrices with base exposure, future hazard and base vulnerability.""" - return self._interp_mats("E0H1V0", "E0H1V0") - - @property - def imp_mats_E0H0V1(self) -> list: - """List of `time_points` impact matrices with base exposure, base hazard and base vulnerability.""" - return self._interp_mats("E0H0V1", "E0H0V1") - - ############################### - - ########## Core EAI ########### - - @property - def per_date_eai_H0V0(self) -> np.ndarray: - """Expected annual impacts for changing exposure, starting hazard and starting vulnerability.""" - return calc_per_date_eais( - self.imp_mats_H0V0, self.snapshot_start.hazard.frequency - ) - - @property - def per_date_eai_H1V0(self) -> np.ndarray: - """Expected annual impacts for changing exposure, future hazard and starting vulnerability.""" - return calc_per_date_eais( - self.imp_mats_H1V0, self.snapshot_end.hazard.frequency - ) - - @property - def per_date_eai_H0V1(self) -> np.ndarray: - """Expected annual impacts for changing exposure, starting hazard and future vulnerability.""" - return calc_per_date_eais( - self.imp_mats_H0V1, self.snapshot_start.hazard.frequency - ) - - @property - def per_date_eai_H1V1(self) -> np.ndarray: - """Expected annual impacts for changing exposure, future hazard and future vulnerability.""" - return calc_per_date_eais( - self.imp_mats_H1V1, self.snapshot_end.hazard.frequency - ) - - @property - def per_date_eai_E0H0V0(self) -> np.ndarray: - """Expected annual impacts for base exposure, base hazard and base vulnerability.""" - return calc_per_date_eais( - self.imp_mats_E0H0V0, self.snapshot_start.hazard.frequency - ) - - @property - def per_date_eai_E0H1V0(self) -> np.ndarray: - """Expected annual impacts for base exposure, future hazard and base vulnerability.""" - return calc_per_date_eais( - self.imp_mats_E0H1V0, self.snapshot_end.hazard.frequency - ) - - @property - def per_date_eai_E0H0V1(self) -> np.ndarray: - """Expected annual impacts for base exposure, future hazard and base vulnerability.""" - return calc_per_date_eais( - self.imp_mats_E0H0V1, self.snapshot_start.hazard.frequency - ) - - ################################## - - ######### Core AAIs ########## - - @property - def per_date_aai_H0V0(self) -> np.ndarray: - """Average annual impacts for changing exposure, starting hazard and starting vulnerability.""" - return calc_per_date_aais(self.per_date_eai_H0V0) - - @property - def per_date_aai_H1V0(self) -> np.ndarray: - """Average annual impacts for changing exposure, future hazard and starting vulnerability.""" - return calc_per_date_aais(self.per_date_eai_H1V0) - - @property - def per_date_aai_H0V1(self) -> np.ndarray: - """Average annual impacts for changing exposure, starting hazard and future vulnerability.""" - return calc_per_date_aais(self.per_date_eai_H0V1) - - @property - def per_date_aai_H1V1(self) -> np.ndarray: - """Average annual impacts for changing exposure, future hazard and future vulnerability.""" - return calc_per_date_aais(self.per_date_eai_H1V1) - - @property - def per_date_aai_E0H0V0(self) -> np.ndarray: - """Average annual impacts for base exposure, base hazard and base vulnerability.""" - return calc_per_date_aais(self.per_date_eai_E0H0V0) - - @property - def per_date_aai_E0H1V0(self) -> np.ndarray: - """Average annual impacts for base exposure, base hazard and base vulnerability.""" - return calc_per_date_aais(self.per_date_eai_E0H1V0) - - @property - def per_date_aai_E0H0V1(self) -> np.ndarray: - """Average annual impacts for base exposure, base hazard and base vulnerability.""" - return calc_per_date_aais(self.per_date_eai_E0H0V1) - - ################################# - - ######### Core RPs ######### - - def per_date_return_periods_H0V0(self, return_periods: list[int]) -> np.ndarray: - """Estimated impacts per dates for given return periods, with changing exposure, starting hazard and starting vulnerability.""" - return calc_per_date_rps( - self.imp_mats_H0V0, - self.snapshot_start.hazard.frequency, - self.date_idx.freqstr[0], - return_periods, - ) - - def per_date_return_periods_H1V0(self, return_periods: list[int]) -> np.ndarray: - """Estimated impacts per dates for given return periods, with changing exposure, future hazard and starting vulnerability.""" - return calc_per_date_rps( - self.imp_mats_H1V0, - self.snapshot_end.hazard.frequency, - self.date_idx.freqstr[0], - return_periods, - ) - - def per_date_return_periods_H0V1(self, return_periods: list[int]) -> np.ndarray: - """Estimated impacts per dates for given return periods, with changing exposure, starting hazard and future vulnerability.""" - return calc_per_date_rps( - self.imp_mats_H0V1, - self.snapshot_start.hazard.frequency, - self.date_idx.freqstr[0], - return_periods, - ) - - def per_date_return_periods_H1V1(self, return_periods: list[int]) -> np.ndarray: - """Estimated impacts per dates for given return periods, with changing exposure, future hazard and future vulnerability.""" - return calc_per_date_rps( - self.imp_mats_H1V1, - self.snapshot_end.hazard.frequency, - self.date_idx.freqstr[0], - return_periods, - ) - - ################################## - - ##### Interpolation of metrics ##### - - def calc_eai(self) -> np.ndarray: - """Compute the EAIs at each date of the risk period (including changes in exposure, hazard and vulnerability).""" - per_date_eai_H0V0, per_date_eai_H1V0, per_date_eai_H0V1, per_date_eai_H1V1 = ( - self.per_date_eai_H0V0, - self.per_date_eai_H1V0, - self.per_date_eai_H0V1, - self.per_date_eai_H1V1, - ) - per_date_eai_V0 = self.interpolation_strategy.interp_over_hazard_dim( - per_date_eai_H0V0, per_date_eai_H1V0 - ) - per_date_eai_V1 = self.interpolation_strategy.interp_over_hazard_dim( - per_date_eai_H0V1, per_date_eai_H1V1 - ) - per_date_eai = self.interpolation_strategy.interp_over_vulnerability_dim( - per_date_eai_V0, per_date_eai_V1 - ) - return per_date_eai - - ### Fully interpolated metrics ### - - @lazy_property - def per_date_eai(self) -> np.ndarray: - """Expected annual impacts per date with changing exposure, changing hazard and changing vulnerability""" - return self.calc_eai() - - @lazy_property - def per_date_aai(self) -> np.ndarray: - """Average annual impacts per date with changing exposure, changing hazard and changing vulnerability.""" - return calc_per_date_aais(self.per_date_eai) - - @lazy_property - def eai_gdf(self) -> pd.DataFrame: - """Convenience function returning a DataFrame (with both datetime and coordinates ids) from `per_date_eai`. - - This dataframe can easily be merged with one of the snapshot exposure geodataframe. - - Notes - ----- - - The DataFrame from the starting snapshot is used as a basis (notably for `value` and `group_id`). - - """ - return self.calc_eai_gdf() - - #################################### - - ### Metrics from impact matrices ### - - # These methods might go in a utils file instead, to be reused - # for a no interpolation case (and maybe the timeseries?) - - #################################### - - def calc_eai_gdf(self) -> pd.DataFrame: - """Merge the per date EAIs of the risk period with the GeoDataframe of the exposure of the starting snapshot.""" - df = pd.DataFrame(self.per_date_eai, index=self.date_idx) - df = df.reset_index().melt( - id_vars=DEFAULT_PERIOD_INDEX_NAME, - var_name=COORD_ID_COL_NAME, - value_name=RISK_COL_NAME, - ) - if GROUP_ID_COL_NAME in self.snapshot_start.exposure.gdf: - eai_gdf = self.snapshot_start.exposure.gdf[[GROUP_ID_COL_NAME]] - eai_gdf[COORD_ID_COL_NAME] = eai_gdf.index - eai_gdf = eai_gdf.merge(df, on=COORD_ID_COL_NAME) - eai_gdf = eai_gdf.rename(columns={GROUP_ID_COL_NAME: GROUP_COL_NAME}) - else: - eai_gdf = df - eai_gdf[GROUP_COL_NAME] = pd.NA - - eai_gdf[GROUP_COL_NAME] = pd.Categorical( - eai_gdf[GROUP_COL_NAME], categories=self._groups_id - ) - eai_gdf[METRIC_COL_NAME] = EAI_METRIC_NAME - eai_gdf[MEASURE_COL_NAME] = ( - self.measure.name if self.measure else NO_MEASURE_VALUE - ) - eai_gdf[UNIT_COL_NAME] = self.snapshot_start.exposure.value_unit - return eai_gdf - - def calc_aai_metric(self) -> pd.DataFrame: - """Compute a DataFrame of the AAI at each dates of the risk period (including changes in exposure, hazard and vulnerability).""" - aai_df = pd.DataFrame( - index=self.date_idx, columns=[RISK_COL_NAME], data=self.per_date_aai - ) - aai_df[GROUP_COL_NAME] = pd.Categorical( - [pd.NA] * len(aai_df), categories=self._groups_id - ) - aai_df[METRIC_COL_NAME] = AAI_METRIC_NAME - aai_df[MEASURE_COL_NAME] = ( - self.measure.name if self.measure else NO_MEASURE_VALUE - ) - aai_df[UNIT_COL_NAME] = self.snapshot_start.exposure.value_unit - aai_df.reset_index(inplace=True) - return aai_df - - def calc_aai_per_group_metric(self) -> pd.DataFrame | None: - """Compute a DataFrame of the AAI distinguised per group id in the exposures, at each dates of the risk period (including changes in exposure, hazard and vulnerability). - - Notes - ----- - - If group ids changes between starting and ending snapshots of the risk period, the AAIs are linearly interpolated (with a warning for transparency). - - """ - if len(self._group_id_E0) < 1 or len(self._group_id_E1) < 1: - LOGGER.warning( - "No group id defined in at least one of the Exposures object. Per group aai will be empty." - ) - return None - - eai_pres_groups = self.eai_gdf[ - [ - DEFAULT_PERIOD_INDEX_NAME, - COORD_ID_COL_NAME, - GROUP_COL_NAME, - RISK_COL_NAME, - ] - ].copy() - aai_per_group_df = eai_pres_groups.groupby( - [DEFAULT_PERIOD_INDEX_NAME, GROUP_COL_NAME], as_index=False, observed=True - )[RISK_COL_NAME].sum() - if not np.array_equal(self._group_id_E0, self._group_id_E1): - LOGGER.warning( - "Group id are changing between present and future snapshot. Per group AAI will be linearly interpolated." - ) - eai_fut_groups = self.eai_gdf.copy() - eai_fut_groups[GROUP_COL_NAME] = pd.Categorical( - np.tile(self._group_id_E1, len(self.date_idx)), - categories=self._groups_id, - ) - aai_fut_groups = eai_fut_groups.groupby( - [DEFAULT_PERIOD_INDEX_NAME, GROUP_COL_NAME], as_index=False - )[RISK_COL_NAME].sum() - aai_per_group_df[RISK_COL_NAME] = linear_interp_arrays( - aai_per_group_df[RISK_COL_NAME].values, - aai_fut_groups[RISK_COL_NAME].values, - ) - - aai_per_group_df[METRIC_COL_NAME] = AAI_METRIC_NAME - aai_per_group_df[MEASURE_COL_NAME] = ( - self.measure.name if self.measure else NO_MEASURE_VALUE - ) - aai_per_group_df[UNIT_COL_NAME] = self.snapshot_start.exposure.value_unit - return aai_per_group_df - - def calc_return_periods_metric(self, return_periods: list[int]) -> pd.DataFrame: - """Compute a DataFrame of the estimated impacts for a list of return - periods, at each dates of the risk period (including changes in exposure, - hazard and vulnerability). - - Parameters - ---------- - - return_periods : list of int - The return periods to estimate impacts for. - - """ - - # currently mathematicaly wrong, but approximatively correct, to be reworked when concatenating the impact matrices for the interpolation - per_date_rp_H0V0, per_date_rp_H1V0, per_date_rp_H0V1, per_date_rp_H1V1 = ( - self.per_date_return_periods_H0V0(return_periods), - self.per_date_return_periods_H1V0(return_periods), - self.per_date_return_periods_H0V1(return_periods), - self.per_date_return_periods_H1V1(return_periods), - ) - per_date_rp_V0 = self.interpolation_strategy.interp_over_hazard_dim( - per_date_rp_H0V0, per_date_rp_H1V0 - ) - per_date_rp_V1 = self.interpolation_strategy.interp_over_hazard_dim( - per_date_rp_H0V1, per_date_rp_H1V1 - ) - per_date_rp = self.interpolation_strategy.interp_over_vulnerability_dim( - per_date_rp_V0, per_date_rp_V1 - ) - rp_df = pd.DataFrame( - index=self.date_idx, columns=return_periods, data=per_date_rp - ).melt(value_name=RISK_COL_NAME, var_name="rp", ignore_index=False) - rp_df.reset_index(inplace=True) - rp_df[GROUP_COL_NAME] = pd.Categorical( - [pd.NA] * len(rp_df), categories=self._groups_id - ) - rp_df[METRIC_COL_NAME] = RP_VALUE_PREFIX + "_" + rp_df["rp"].astype(str) - rp_df = rp_df.drop("rp", axis=1) - rp_df[MEASURE_COL_NAME] = ( - self.measure.name if self.measure else NO_MEASURE_VALUE - ) - rp_df[UNIT_COL_NAME] = self.snapshot_start.exposure.value_unit - return rp_df - - def calc_risk_contributions_metric(self) -> pd.DataFrame: - """Compute a DataFrame of the individual contributions of risk (impact), - at each dates of the risk period (including changes in exposure, - hazard and vulnerability). - - """ - per_date_aai_E0V0 = self.interpolation_strategy.interp_over_hazard_dim( - self.per_date_aai_E0H0V0, self.per_date_aai_E0H1V0 - ) - per_date_aai_E0H0 = self.interpolation_strategy.interp_over_vulnerability_dim( - self.per_date_aai_E0H0V0, self.per_date_aai_E0H0V1 - ) - df = pd.DataFrame( - { - CONTRIBUTION_TOTAL_RISK_NAME: self.per_date_aai, - CONTRIBUTION_BASE_RISK_NAME: self.per_date_aai[0], - CONTRIBUTION_EXPOSURE_NAME: self.per_date_aai_H0V0 - - self.per_date_aai[0], - CONTRIBUTION_HAZARD_NAME: per_date_aai_E0V0 - # - (self.per_date_aai_H0V0 - self.per_date_aai[0]) - - self.per_date_aai[0], - CONTRIBUTION_VULNERABILITY_NAME: per_date_aai_E0H0 - - self.per_date_aai[0], - # - (self.per_date_aai_H0V0 - self.per_date_aai[0]), - }, - index=self.date_idx, - ) - df[CONTRIBUTION_INTERACTION_TERM_NAME] = df[CONTRIBUTION_TOTAL_RISK_NAME] - ( - df[CONTRIBUTION_BASE_RISK_NAME] - + df[CONTRIBUTION_EXPOSURE_NAME] - + df[CONTRIBUTION_HAZARD_NAME] - + df[CONTRIBUTION_VULNERABILITY_NAME] - ) - df = df.melt( - value_vars=[ - CONTRIBUTION_BASE_RISK_NAME, - CONTRIBUTION_EXPOSURE_NAME, - CONTRIBUTION_HAZARD_NAME, - CONTRIBUTION_VULNERABILITY_NAME, - CONTRIBUTION_INTERACTION_TERM_NAME, - ], - var_name=METRIC_COL_NAME, - value_name=RISK_COL_NAME, - ignore_index=False, - ) - df.reset_index(inplace=True) - df[GROUP_COL_NAME] = pd.Categorical( - [pd.NA] * len(df), categories=self._groups_id - ) - df[MEASURE_COL_NAME] = self.measure.name if self.measure else NO_MEASURE_VALUE - df[UNIT_COL_NAME] = self.snapshot_start.exposure.value_unit - return df - - def apply_measure(self, measure: Measure) -> "CalcRiskMetricsPeriod": - """Creates a new `CalcRiskMetricsPeriod` object with a measure. - - The given measure is applied to both snapshot of the risk period. - - Parameters - ---------- - measure : Measure - The measure to apply. - - Returns - ------- - - CalcRiskPeriod - The risk period with given measure applied. - - """ - snap0 = self.snapshot_start.apply_measure(measure) - snap1 = self.snapshot_end.apply_measure(measure) - - risk_period = CalcRiskMetricsPeriod( - snap0, - snap1, - self.time_resolution, - self.interpolation_strategy, - self.impact_computation_strategy, - ) - - risk_period.measure = measure - return risk_period - - -def calc_per_date_eais(imp_mats: list[csr_matrix], frequency: np.ndarray) -> np.ndarray: - """Calculate expected average impact (EAI) values from a list of impact matrices - corresponding to impacts at different dates (with possible changes along - exposure, hazard and vulnerability). - - Parameters - ---------- - imp_mats : list of np.ndarray - List of impact matrices. - frequency : np.ndarray - Hazard frequency values. - - Returns - ------- - np.ndarray - 2D array of EAI (1D) for each dates. - - """ - per_date_eai_exp = np.array( - [ImpactCalc.eai_exp_from_mat(imp_mat, frequency) for imp_mat in imp_mats] - ) - return per_date_eai_exp - - -def calc_per_date_aais(per_date_eai_exp: np.ndarray) -> np.ndarray: - """Calculate per_date aggregate annual impact (AAI) values - resulting from a list arrays corresponding to EAI at different - dates (with possible changes along exposure, hazard and vulnerability). - - Parameters - ---------- - per_date_eai_exp: np.ndarray - EAIs arrays. - - Returns - ------- - np.ndarray - 1D array of AAI (0D) for each dates. - """ - per_date_aai = np.array( - [ImpactCalc.aai_agg_from_eai_exp(eai_exp) for eai_exp in per_date_eai_exp] - ) - return per_date_aai - - -def calc_per_date_rps( - imp_mats: list[csr_matrix], - frequency: np.ndarray, - frequency_unit: str, - return_periods: list[int], -) -> np.ndarray: - """Calculate per date return period impact values from a - list of impact matrices corresponding to impacts at different - dates (with possible changes along exposure, hazard and vulnerability). - - Parameters - ---------- - imp_mats: list of scipy.crs_matrix - List of impact matrices. - frequency: np.ndarray - Frequency values. - return_periods : list of int - Return periods to calculate impact values for. - - Returns - ------- - np.ndarray - 2D array of impacts per return periods (1D) for each dates. - - """ - rp = np.array( - [ - calc_freq_curve(imp_mat, frequency, frequency_unit, return_periods).impact - for imp_mat in imp_mats - ] - ) - return rp - - -def calc_freq_curve( - imp_mat_intrpl, frequency, frequency_unit, return_per=None -) -> ImpactFreqCurve: - """Calculate the estimated impacts for given return periods. - - Parameters - ---------- - - imp_mat_intrpl: scipy.csr_matrix - An impact matrix. - frequency: np.ndarray - The frequency of the hazard. - return_per: np.ndarray - The return periods to compute impacts for. - - Returns - ------- - np.ndarray - The estimated impacts for the different return periods. - - """ - - at_event = np.sum(imp_mat_intrpl, axis=1).A1 - - # Sort descendingly the impacts per events - sort_idxs = np.argsort(at_event)[::-1] - # Calculate exceedence frequency - exceed_freq = np.cumsum(frequency[sort_idxs]) - # Set return period and impact exceeding frequency - ifc_return_per = 1 / exceed_freq[::-1] - ifc_impact = at_event[sort_idxs][::-1] - - if return_per is not None: - interp_imp = np.interp(return_per, ifc_return_per, ifc_impact) - ifc_return_per = return_per - ifc_impact = interp_imp - - return ImpactFreqCurve( - return_per=ifc_return_per, - impact=ifc_impact, - frequency_unit=frequency_unit, - label="Exceedance frequency curve", - ) diff --git a/climada/trajectories/static_trajectory.py b/climada/trajectories/static_trajectory.py new file mode 100644 index 0000000000..73944b6639 --- /dev/null +++ b/climada/trajectories/static_trajectory.py @@ -0,0 +1,316 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +This file implements \"static\" risk trajectory objects, for an easier evaluation +of risk at multiple points in time (snapshots). + +""" + +import logging + +import pandas as pd + +from climada.entity.disc_rates.base import DiscRates +from climada.trajectories.constants import ( + AAI_METRIC_NAME, + AAI_PER_GROUP_METRIC_NAME, + COORD_ID_COL_NAME, + DATE_COL_NAME, + EAI_METRIC_NAME, + GROUP_COL_NAME, + MEASURE_COL_NAME, + METRIC_COL_NAME, + RETURN_PERIOD_METRIC_NAME, + RISK_COL_NAME, + RP_VALUE_PREFIX, +) +from climada.trajectories.impact_calc_strat import ( + ImpactCalcComputation, + ImpactComputationStrategy, +) +from climada.trajectories.riskperiod import CalcRiskMetricsPoints +from climada.trajectories.snapshot import Snapshot +from climada.trajectories.trajectory import ( + DEFAULT_ALLGROUP_NAME, + DEFAULT_DF_COLUMN_PRIORITY, + DEFAULT_RP, + RiskTrajectory, +) +from climada.util import log_level +from climada.util.dataframe_handling import reorder_dataframe_columns + +LOGGER = logging.getLogger(__name__) + +__all__ = ["StaticRiskTrajectory"] + + +class StaticRiskTrajectory(RiskTrajectory): + """This class implements static risk trajectories, objects that + regroup impacts computations for multiple dates. + + This class computes risk metrics over a series of snapshots, + optionally applying risk discounting. It does not interpolate risk + between the snapshot and only provides results for each snapshot. + + """ + + POSSIBLE_METRICS = [ + EAI_METRIC_NAME, + AAI_METRIC_NAME, + RETURN_PERIOD_METRIC_NAME, + AAI_PER_GROUP_METRIC_NAME, + ] + """Class variable listing the risk metrics that can be computed. + + Currently: + + - eai, expected impact (per exposure point within a period of 1/frequency unit of the hazard object) + - aai, average annual impact (aggregated eai over the whole exposure) + - aai_per_group, average annual impact per exposure subgroup (defined from the exposure geodataframe) + - return_periods, estimated impacts aggregated over the whole exposure for different return periods + """ + + _DEFAULT_ALL_METRICS = [ + AAI_METRIC_NAME, + RETURN_PERIOD_METRIC_NAME, + AAI_PER_GROUP_METRIC_NAME, + ] + + def __init__( + self, + snapshots_list: list[Snapshot], + *, + return_periods: list[int] = DEFAULT_RP, + all_groups_name: str = DEFAULT_ALLGROUP_NAME, + risk_disc_rates: DiscRates | None = None, + impact_computation_strategy: ImpactComputationStrategy | None = None, + ): + """Initialize a new `StaticRiskTrajectory`. + + Parameters + ---------- + snapshots_list : list[Snapshot] + The list of `Snapshot` object to compute risk from. + return_periods: list[int], optional + The return periods to use when computing the `return_periods_metric`. + Defaults to `DEFAULT_RP` ([20, 50, 100]). + all_groups_name: str, optional + The string to use to define all exposure points subgroup. + Defaults to `DEFAULT_ALLGROUP_NAME` ("All"). + risk_disc_rates: DiscRates, optional + The discount rate to apply to future risk. Defaults to None. + impact_computation_strategy: ImpactComputationStrategy, optional + The method used to calculate the impact from the (Haz,Exp,Vul) + of the two snapshots. Defaults to :class:`ImpactCalcComputation`. + + """ + super().__init__( + snapshots_list, + return_periods=return_periods, + all_groups_name=all_groups_name, + risk_disc_rates=risk_disc_rates, + ) + self._risk_metrics_calculators = CalcRiskMetricsPoints( + self._snapshots, + impact_computation_strategy=impact_computation_strategy + or ImpactCalcComputation(), + ) + + @property + def impact_computation_strategy(self) -> ImpactComputationStrategy: + """The approach or strategy used to calculate the impact from the snapshots.""" + return self._risk_metrics_calculators.impact_computation_strategy + + @impact_computation_strategy.setter + def impact_computation_strategy(self, value, /): + if not isinstance(value, ImpactComputationStrategy): + raise ValueError("Not an interpolation strategy") + + self._reset_metrics() + self._risk_metrics_calculators.impact_computation_strategy = value + + def _generic_metrics( + self, + metric_name: str | None = None, + metric_meth: str | None = None, + **kwargs, + ) -> pd.DataFrame: + """Generic method to compute metrics based on the provided metric name and method. + + This method calls the appropriate method from the calculator to return + the results for the given metric, in a tidy formatted dataframe. + + It first checks whether the requested metric is a valid one. + Then looks for a possible cached value and otherwised asks the + calculators (`self._risk_metric_calculators`) to run the computation. + The results are then regrouped in a nice and tidy DataFrame. + If a `risk_disc_rates` was set, values are converted to net present values. + Results are then cached within `self.__metrics` and returned. + + Parameters + ---------- + metric_name : str, optional + The name of the metric to return results for. + metric_meth : str, optional + The name of the specific method of the calculator to call. + + Returns + ------- + pd.DataFrame + A tidy formatted dataframe of the risk metric computed for the + different snapshots. + + Raises + ------ + NotImplementedError + If the requested metric is not part of `POSSIBLE_METRICS`. + ValueError + If either of the arguments are not provided. + + """ + if metric_name is None or metric_meth is None: + raise ValueError("Both metric_name and metric_meth must be provided.") + + if metric_name not in self.POSSIBLE_METRICS: + raise NotImplementedError( + f"{metric_name} not implemented ({self.POSSIBLE_METRICS})." + ) + + # Construct the attribute name for storing the metric results + attr_name = f"_{metric_name}_metrics" + + if getattr(self, attr_name) is not None: + LOGGER.debug(f"Returning cached {attr_name}") + return getattr(self, attr_name) + + with log_level(level="WARNING", name_prefix="climada"): + tmp = getattr(self._risk_metrics_calculators, metric_meth)(**kwargs) + if tmp is None: + return tmp + + tmp = tmp.set_index( + [DATE_COL_NAME, GROUP_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME] + ) + if COORD_ID_COL_NAME in tmp.columns: + tmp = tmp.set_index([COORD_ID_COL_NAME], append=True) + + # When more than 2 snapshots, there might be duplicated rows, we need to remove them. + # Should not be the case in static trajectory, but in any case we really don't want + # duplicated rows, which would mess up some dataframe manipulation down the road. + tmp = tmp[~tmp.index.duplicated(keep="first")] + tmp = tmp.reset_index() + if self._all_groups_name not in tmp[GROUP_COL_NAME].cat.categories: + tmp[GROUP_COL_NAME] = tmp[GROUP_COL_NAME].cat.add_categories( + [self._all_groups_name] + ) + tmp[GROUP_COL_NAME] = tmp[GROUP_COL_NAME].fillna(self._all_groups_name) + + if self._risk_disc_rates: + tmp = self.npv_transform(tmp, self._risk_disc_rates) + + tmp = reorder_dataframe_columns(tmp, DEFAULT_DF_COLUMN_PRIORITY) + + setattr(self, attr_name, tmp) + return getattr(self, attr_name) + + def eai_metrics(self, **kwargs) -> pd.DataFrame: + """Return the estimated annual impacts at each exposure point for each date. + + This method computes and return a `DataFrame` with eai metric + (for each exposure point) for each date. + + Notes + ----- + + This computation may become quite expensive for big areas with high resolution. + + """ + df = self._compute_metrics( + metric_name=EAI_METRIC_NAME, metric_meth="calc_eai_gdf", **kwargs + ) + return df + + def aai_metrics(self, **kwargs) -> pd.DataFrame: + """Return the average annual impacts for each date. + + This method computes and return a `DataFrame` with aai metric for each date. + + """ + + return self._compute_metrics( + metric_name=AAI_METRIC_NAME, metric_meth="calc_aai_metric", **kwargs + ) + + def return_periods_metrics(self, **kwargs) -> pd.DataFrame: + """Return the estimated impacts for different return periods. + + Return periods to estimate impacts for are defined by `self.return_periods`. + + """ + return self._compute_metrics( + metric_name=RETURN_PERIOD_METRIC_NAME, + metric_meth="calc_return_periods_metric", + return_periods=self.return_periods, + **kwargs, + ) + + def aai_per_group_metrics(self, **kwargs) -> pd.DataFrame: + """Return the average annual impacts for each exposure group ID. + + This method computes and return a `DataFrame` with aai metric for each + of the exposure group defined by a group id, for each date. + + """ + + return self._compute_metrics( + metric_name=AAI_PER_GROUP_METRIC_NAME, + metric_meth="calc_aai_per_group_metric", + **kwargs, + ) + + def per_date_risk_metrics( + self, + metrics: list[str] | None = None, + ) -> pd.DataFrame | pd.Series: + """Returns a DataFrame of risk metrics for each dates. + + This methods collects (and if needed computes) the `metrics` + (Defaulting to AAI_METRIC_NAME, RETURN_PERIOD_METRIC_NAME and AAI_PER_GROUP_METRIC_NAME). + + Parameters + ---------- + metrics : list[str], optional + The list of metrics to return (defaults to + [AAI_METRIC_NAME,RETURN_PERIOD_METRIC_NAME,AAI_PER_GROUP_METRIC_NAME]) + + Returns + ------- + pd.DataFrame | pd.Series + A tidy DataFrame with metric values for all possible dates. + + """ + + metrics = ( + [AAI_METRIC_NAME, RETURN_PERIOD_METRIC_NAME, AAI_PER_GROUP_METRIC_NAME] + if metrics is None + else metrics + ) + return pd.concat( + [getattr(self, f"{metric}_metrics")() for metric in metrics], + ignore_index=True, + ) diff --git a/climada/trajectories/test/test_calc_risk_metrics.py b/climada/trajectories/test/test_calc_risk_metrics.py new file mode 100644 index 0000000000..493736d350 --- /dev/null +++ b/climada/trajectories/test/test_calc_risk_metrics.py @@ -0,0 +1,448 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +This modules implements different sparce matrices interpolation approaches. + +""" + +import unittest +from unittest.mock import MagicMock, call, patch + +import numpy as np +import pandas as pd + +# Assuming these are the necessary imports from climada +from climada.entity.exposures import Exposures +from climada.entity.impact_funcs import ImpactFuncSet +from climada.entity.impact_funcs.trop_cyclone import ImpfTropCyclone +from climada.entity.measures.base import Measure +from climada.hazard import Hazard +from climada.trajectories.calc_risk_metrics import CalcRiskMetricsPoints +from climada.trajectories.constants import ( + AAI_METRIC_NAME, + COORD_ID_COL_NAME, + DATE_COL_NAME, + EAI_METRIC_NAME, + GROUP_COL_NAME, + GROUP_ID_COL_NAME, + MEASURE_COL_NAME, + METRIC_COL_NAME, + NO_MEASURE_VALUE, + RISK_COL_NAME, + UNIT_COL_NAME, +) + +# Import the CalcRiskPeriod class and other necessary classes/functions +from climada.trajectories.impact_calc_strat import ( + ImpactCalcComputation, + ImpactComputationStrategy, +) +from climada.trajectories.snapshot import Snapshot +from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5 + + +class TestCalcRiskMetricsPoints(unittest.TestCase): + def setUp(self): + # Create mock objects for testing + self.present_date = 2020 + self.future_date = 2025 + self.exposure_present = Exposures.from_hdf5(EXP_DEMO_H5) + self.exposure_present.gdf.rename(columns={"impf_": "impf_TC"}, inplace=True) + self.exposure_present.gdf["impf_TC"] = 1 + self.exposure_present.gdf[GROUP_ID_COL_NAME] = ( + self.exposure_present.gdf["value"] + > self.exposure_present.gdf["value"].mean() + ) * 1 + self.hazard_present = Hazard.from_hdf5(HAZ_DEMO_H5) + self.exposure_present.assign_centroids(self.hazard_present, distance="approx") + self.impfset_present = ImpactFuncSet([ImpfTropCyclone.from_emanuel_usa()]) + + self.exposure_future = Exposures.from_hdf5(EXP_DEMO_H5) + n_years = self.future_date - self.present_date + 1 + growth_rate = 1.02 + growth = growth_rate**n_years + self.exposure_future.gdf["value"] = self.exposure_future.gdf["value"] * growth + self.exposure_future.gdf.rename(columns={"impf_": "impf_TC"}, inplace=True) + self.exposure_future.gdf["impf_TC"] = 1 + self.exposure_future.gdf[GROUP_ID_COL_NAME] = ( + self.exposure_future.gdf["value"] > self.exposure_future.gdf["value"].mean() + ) * 1 + self.hazard_future = Hazard.from_hdf5(HAZ_DEMO_H5) + self.hazard_future.intensity *= 1.1 + self.exposure_future.assign_centroids(self.hazard_future, distance="approx") + self.impfset_future = ImpactFuncSet( + [ + ImpfTropCyclone.from_emanuel_usa(impf_id=1, v_half=60.0), + ] + ) + + self.measure = MagicMock(spec=Measure) + self.measure.name = "Test Measure" + + # Setup mock return values for measure.apply + self.measure_exposure = MagicMock(spec=Exposures) + self.measure_hazard = MagicMock(spec=Hazard) + self.measure_impfset = MagicMock(spec=ImpactFuncSet) + self.measure.apply.return_value = ( + self.measure_exposure, + self.measure_impfset, + self.measure_hazard, + ) + + # Create mock snapshots + self.mock_snapshot_start = Snapshot( + exposure=self.exposure_present, + hazard=self.hazard_present, + impfset=self.impfset_present, + date=self.present_date, + ) + self.mock_snapshot_end = Snapshot( + exposure=self.exposure_future, + hazard=self.hazard_future, + impfset=self.impfset_future, + date=self.future_date, + ) + + # Create an instance of CalcRiskPeriod + self.calc_risk_metrics_points = CalcRiskMetricsPoints( + [self.mock_snapshot_start, self.mock_snapshot_end], + impact_computation_strategy=ImpactCalcComputation(), + ) + + self.expected_eai = np.array( + [ + [ + 8702904.63375606, + 7870925.19290905, + 1805021.12653289, + 3827196.02428828, + 5815346.97427834, + 7870925.19290905, + 7871847.53906951, + 7870925.19290905, + 7886487.76136572, + 7870925.19290905, + 7876058.84500811, + 3858228.67061225, + 8401461.85304853, + 9210350.19520265, + 1806363.23553602, + 6922250.59852326, + 6711006.70101515, + 6886568.00391817, + 6703749.80009753, + 6704689.17531993, + 6703401.93516038, + 6818839.81873556, + 6716262.5286998, + 6703369.87656195, + 6703952.06070945, + 5678897.05935781, + 4984034.77073219, + 6708908.84462217, + 6702586.9472999, + 4961843.43826371, + 5139913.92380089, + 5255310.96072403, + 4981705.85074492, + 4926529.74583162, + 4973726.6063121, + 4926015.68274236, + 4937618.79350358, + 4926144.19851468, + 4926015.68274236, + 9575288.06765627, + 5100904.22956578, + 3501325.10900064, + 5093920.89144773, + 3505527.05928994, + 4002552.92232482, + 3512012.80001039, + 3514993.26161994, + 3562009.79687436, + 3869298.39771648, + 3509317.94922485, + ], + [ + 46651387.10647343, + 42191612.28496882, + 14767621.68800634, + 24849532.38841432, + 32260334.11128166, + 42191612.28496882, + 42196556.46505447, + 42191612.28496882, + 42275034.47974126, + 42191612.28496882, + 42219130.91253302, + 24227735.90988531, + 45035521.54835925, + 49371517.94999501, + 14778602.03484606, + 39909758.65668079, + 38691846.52720026, + 39834520.43061425, + 38650007.36519716, + 38655423.2682883, + 38648001.77388126, + 39313550.93419428, + 38722148.63941796, + 38647816.9422419, + 38651173.48481285, + 33700748.42359267, + 30195870.8789255, + 38679751.48077733, + 38643303.01755095, + 30061424.26274527, + 31140267.73715352, + 31839402.91317674, + 30181761.07222111, + 29847475.57538872, + 30133418.66577969, + 29844361.11423809, + 29914658.78479145, + 29845139.72952577, + 29844361.11423809, + 58012067.61585025, + 30903926.75151934, + 23061159.87895984, + 33550647.3781805, + 23088835.64296583, + 26362451.35547444, + 23131553.38525813, + 23151183.92499699, + 23460854.06493051, + 24271571.95828693, + 23113803.99527559, + ], + ] + ) + + self.expected_aai = np.array([2.88895461e08, 1.69310367e09]) + self.expected_aai_per_group = np.array( + [2.33513758e08, 5.53817034e07, 1.37114041e09, 3.21963264e08] + ) + self.expected_return_period_metric = np.array( + [ + 0.00000000e00, + 0.00000000e00, + 7.10925472e09, + 4.53975437e10, + 1.36547014e10, + 7.69981714e10, + ] + ) + + def test_reset_impact_data(self): + self.calc_risk_metrics_points._impacts = "A" # type:ignore + self.calc_risk_metrics_points._eai_gdf = "B" # type:ignore + self.calc_risk_metrics_points._per_date_eai = "C" # type:ignore + self.calc_risk_metrics_points._per_date_aai = "D" # type:ignore + self.calc_risk_metrics_points._reset_impact_data() + self.assertIsNone(self.calc_risk_metrics_points._impacts) + self.assertIsNone(self.calc_risk_metrics_points._eai_gdf) + self.assertIsNone(self.calc_risk_metrics_points._per_date_aai) + self.assertIsNone(self.calc_risk_metrics_points._per_date_eai) + + def test_set_impact_computation_strategy(self): + new_impact_computation_strategy = MagicMock(spec=ImpactComputationStrategy) + self.calc_risk_metrics_points.impact_computation_strategy = ( + new_impact_computation_strategy + ) + self.assertEqual( + self.calc_risk_metrics_points.impact_computation_strategy, + new_impact_computation_strategy, + ) + + def test_set_impact_computation_strategy_wtype(self): + with self.assertRaises(ValueError): + self.calc_risk_metrics_points.impact_computation_strategy = "A" + + @patch.object(CalcRiskMetricsPoints, "impact_computation_strategy") + def test_impacts_arrays(self, mock_impact_compute): + mock_impact_compute.compute_impacts.side_effect = ["A", "B"] + results = self.calc_risk_metrics_points.impacts + mock_impact_compute.compute_impacts.assert_has_calls( + [ + call( + self.mock_snapshot_start.exposure, + self.mock_snapshot_start.hazard, + self.mock_snapshot_start.impfset, + ), + call( + self.mock_snapshot_end.exposure, + self.mock_snapshot_end.hazard, + self.mock_snapshot_end.impfset, + ), + ] + ) + self.assertEqual(results, ["A", "B"]) + + def test_per_date_eai(self): + np.testing.assert_allclose( + self.calc_risk_metrics_points.per_date_eai, self.expected_eai + ) + + def test_per_date_aai(self): + np.testing.assert_allclose( + self.calc_risk_metrics_points.per_date_aai, + self.expected_aai, + ) + + def test_eai_gdf(self): + result_gdf = self.calc_risk_metrics_points.calc_eai_gdf() + self.assertIsInstance(result_gdf, pd.DataFrame) + self.assertEqual( + result_gdf.shape[0], + len(self.mock_snapshot_start.exposure.gdf) + + len(self.mock_snapshot_end.exposure.gdf), + ) + expected_columns = [ + DATE_COL_NAME, + COORD_ID_COL_NAME, + GROUP_COL_NAME, + RISK_COL_NAME, + METRIC_COL_NAME, + MEASURE_COL_NAME, + UNIT_COL_NAME, + ] + self.assertTrue( + all(col in list(result_gdf.columns) for col in expected_columns) + ) + np.testing.assert_allclose( + np.array(result_gdf[RISK_COL_NAME].values), self.expected_eai.flatten() + ) + # Check constants and column transformations + self.assertEqual(result_gdf[METRIC_COL_NAME].unique(), EAI_METRIC_NAME) + self.assertEqual(result_gdf[MEASURE_COL_NAME].iloc[0], NO_MEASURE_VALUE) + self.assertEqual( + result_gdf[UNIT_COL_NAME].iloc[0], + self.mock_snapshot_start.exposure.value_unit, + ) + self.assertEqual(result_gdf[GROUP_COL_NAME].dtype.name, "category") + self.assertListEqual( + list(result_gdf[GROUP_COL_NAME].cat.categories), + list(self.calc_risk_metrics_points._group_id), + ) + + def test_calc_aai_metric(self): + result_df = self.calc_risk_metrics_points.calc_aai_metric() + self.assertIsInstance(result_df, pd.DataFrame) + self.assertEqual( + result_df.shape[0], len(self.calc_risk_metrics_points.snapshots) + ) + expected_columns = [ + DATE_COL_NAME, + GROUP_COL_NAME, + RISK_COL_NAME, + METRIC_COL_NAME, + MEASURE_COL_NAME, + UNIT_COL_NAME, + ] + self.assertTrue(all(col in result_df.columns for col in expected_columns)) + np.testing.assert_allclose( + np.array(result_df[RISK_COL_NAME].values), self.expected_aai + ) + # Check constants and column transformations + self.assertEqual(result_df[METRIC_COL_NAME].unique(), AAI_METRIC_NAME) + self.assertEqual(result_df[MEASURE_COL_NAME].iloc[0], NO_MEASURE_VALUE) + self.assertEqual( + result_df[UNIT_COL_NAME].iloc[0], + self.mock_snapshot_start.exposure.value_unit, + ) + self.assertEqual(result_df[GROUP_COL_NAME].dtype.name, "category") + + def test_calc_aai_per_group_metric(self): + result_df = self.calc_risk_metrics_points.calc_aai_per_group_metric() + self.assertIsInstance(result_df, pd.DataFrame) + self.assertEqual( + result_df.shape[0], + len(self.calc_risk_metrics_points.snapshots) + * len(self.calc_risk_metrics_points._group_id), + ) + expected_columns = [ + DATE_COL_NAME, + GROUP_COL_NAME, + RISK_COL_NAME, + METRIC_COL_NAME, + MEASURE_COL_NAME, + UNIT_COL_NAME, + ] + self.assertTrue(all(col in result_df.columns for col in expected_columns)) + np.testing.assert_allclose( + np.array(result_df[RISK_COL_NAME].values), self.expected_aai_per_group + ) + # Check constants and column transformations + self.assertEqual(result_df[METRIC_COL_NAME].unique(), AAI_METRIC_NAME) + self.assertEqual(result_df[MEASURE_COL_NAME].iloc[0], NO_MEASURE_VALUE) + self.assertEqual( + result_df[UNIT_COL_NAME].iloc[0], + self.mock_snapshot_start.exposure.value_unit, + ) + self.assertEqual(result_df[GROUP_COL_NAME].dtype.name, "category") + self.assertListEqual(list(result_df[GROUP_COL_NAME].unique()), [0, 1]) + + def test_calc_return_periods_metric(self): + result_df = self.calc_risk_metrics_points.calc_return_periods_metric( + [20, 50, 100] + ) + self.assertIsInstance(result_df, pd.DataFrame) + self.assertEqual( + result_df.shape[0], len(self.calc_risk_metrics_points.snapshots) * 3 + ) + expected_columns = [ + DATE_COL_NAME, + GROUP_COL_NAME, + RISK_COL_NAME, + METRIC_COL_NAME, + MEASURE_COL_NAME, + UNIT_COL_NAME, + ] + self.assertTrue(all(col in result_df.columns for col in expected_columns)) + np.testing.assert_allclose( + np.array(result_df[RISK_COL_NAME].values), + self.expected_return_period_metric, + ) + # Check constants and column transformations + self.assertListEqual( + list(result_df[METRIC_COL_NAME].unique()), ["rp_20", "rp_50", "rp_100"] + ) + self.assertEqual(result_df[MEASURE_COL_NAME].iloc[0], NO_MEASURE_VALUE) + self.assertEqual( + result_df[UNIT_COL_NAME].iloc[0], + self.mock_snapshot_start.exposure.value_unit, + ) + self.assertEqual(result_df[GROUP_COL_NAME].dtype.name, "category") + + @patch.object(Snapshot, "apply_measure") + @patch("climada.trajectories.riskperiod.CalcRiskMetricsPoints") + def test_apply_measure(self, mock_CalcRiskMetricPoints, mock_snap_apply_measure): + mock_CalcRiskMetricPoints.return_value = MagicMock(spec=CalcRiskMetricsPoints) + mock_snap_apply_measure.return_value = 42 + result = self.calc_risk_metrics_points.apply_measure(self.measure) + mock_snap_apply_measure.assert_called_with(self.measure) + mock_CalcRiskMetricPoints.assert_called_with( + [42, 42], + self.calc_risk_metrics_points.impact_computation_strategy, + ) + self.assertEqual(result.measure, self.measure) + + +if __name__ == "__main__": + TESTS = unittest.TestLoader().loadTestsFromTestCase(TestCalcRiskMetricsPoints) + unittest.TextTestRunner(verbosity=2).run(TESTS) diff --git a/climada/trajectories/test/test_riskperiod.py b/climada/trajectories/test/test_riskperiod.py deleted file mode 100644 index 8ae328109d..0000000000 --- a/climada/trajectories/test/test_riskperiod.py +++ /dev/null @@ -1,1389 +0,0 @@ -""" -This file is part of CLIMADA. - -Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. - -CLIMADA is free software: you can redistribute it and/or modify it under the -terms of the GNU General Public License as published by the Free -Software Foundation, version 3. - -CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY -WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A -PARTICULAR PURPOSE. See the GNU General Public License for more details. - -You should have received a copy of the GNU General Public License along -with CLIMADA. If not, see . - ---- - -This modules implements different sparce matrices interpolation approaches. - -""" - -import types -import unittest -from unittest.mock import MagicMock, call, patch - -import geopandas as gpd -import numpy as np -import pandas as pd -from scipy.sparse import csr_matrix, issparse -from shapely import Point - -# Assuming these are the necessary imports from climada -from climada.entity.exposures import Exposures -from climada.entity.impact_funcs import ImpactFuncSet -from climada.entity.impact_funcs.trop_cyclone import ImpfTropCyclone -from climada.entity.measures.base import Measure -from climada.hazard import Hazard -from climada.trajectories.constants import ( - AAI_METRIC_NAME, - CONTRIBUTION_BASE_RISK_NAME, - CONTRIBUTION_EXPOSURE_NAME, - CONTRIBUTION_HAZARD_NAME, - CONTRIBUTION_INTERACTION_TERM_NAME, - CONTRIBUTION_VULNERABILITY_NAME, - COORD_ID_COL_NAME, - DATE_COL_NAME, - EAI_METRIC_NAME, - GROUP_COL_NAME, - GROUP_ID_COL_NAME, - MEASURE_COL_NAME, - METRIC_COL_NAME, - NO_MEASURE_VALUE, - RISK_COL_NAME, - UNIT_COL_NAME, -) - -# Import the CalcRiskPeriod class and other necessary classes/functions -from climada.trajectories.impact_calc_strat import ( - ImpactCalcComputation, - ImpactComputationStrategy, -) -from climada.trajectories.interpolation import ( - AllLinearStrategy, - InterpolationStrategyBase, -) -from climada.trajectories.riskperiod import ( - CalcRiskMetricsPeriod, - CalcRiskMetricsPoints, - calc_freq_curve, - calc_per_date_aais, - calc_per_date_eais, - calc_per_date_rps, -) -from climada.trajectories.snapshot import Snapshot -from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5 - - -class TestCalcRiskMetricsPoints(unittest.TestCase): - def setUp(self): - # Create mock objects for testing - self.present_date = 2020 - self.future_date = 2025 - self.exposure_present = Exposures.from_hdf5(EXP_DEMO_H5) - self.exposure_present.gdf.rename(columns={"impf_": "impf_TC"}, inplace=True) - self.exposure_present.gdf["impf_TC"] = 1 - self.exposure_present.gdf[GROUP_ID_COL_NAME] = ( - self.exposure_present.gdf["value"] - > self.exposure_present.gdf["value"].mean() - ) * 1 - self.hazard_present = Hazard.from_hdf5(HAZ_DEMO_H5) - self.exposure_present.assign_centroids(self.hazard_present, distance="approx") - self.impfset_present = ImpactFuncSet([ImpfTropCyclone.from_emanuel_usa()]) - - self.exposure_future = Exposures.from_hdf5(EXP_DEMO_H5) - n_years = self.future_date - self.present_date + 1 - growth_rate = 1.02 - growth = growth_rate**n_years - self.exposure_future.gdf["value"] = self.exposure_future.gdf["value"] * growth - self.exposure_future.gdf.rename(columns={"impf_": "impf_TC"}, inplace=True) - self.exposure_future.gdf["impf_TC"] = 1 - self.exposure_future.gdf[GROUP_ID_COL_NAME] = ( - self.exposure_future.gdf["value"] > self.exposure_future.gdf["value"].mean() - ) * 1 - self.hazard_future = Hazard.from_hdf5(HAZ_DEMO_H5) - self.hazard_future.intensity *= 1.1 - self.exposure_future.assign_centroids(self.hazard_future, distance="approx") - self.impfset_future = ImpactFuncSet( - [ - ImpfTropCyclone.from_emanuel_usa(impf_id=1, v_half=60.0), - ] - ) - - self.measure = MagicMock(spec=Measure) - self.measure.name = "Test Measure" - - # Setup mock return values for measure.apply - self.measure_exposure = MagicMock(spec=Exposures) - self.measure_hazard = MagicMock(spec=Hazard) - self.measure_impfset = MagicMock(spec=ImpactFuncSet) - self.measure.apply.return_value = ( - self.measure_exposure, - self.measure_impfset, - self.measure_hazard, - ) - - # Create mock snapshots - self.mock_snapshot_start = Snapshot( - exposure=self.exposure_present, - hazard=self.hazard_present, - impfset=self.impfset_present, - date=self.present_date, - ) - self.mock_snapshot_end = Snapshot( - exposure=self.exposure_future, - hazard=self.hazard_future, - impfset=self.impfset_future, - date=self.future_date, - ) - - # Create an instance of CalcRiskPeriod - self.calc_risk_metrics_points = CalcRiskMetricsPoints( - [self.mock_snapshot_start, self.mock_snapshot_end], - impact_computation_strategy=ImpactCalcComputation(), - ) - - self.expected_eai = np.array( - [ - [ - 8702904.63375606, - 7870925.19290905, - 1805021.12653289, - 3827196.02428828, - 5815346.97427834, - 7870925.19290905, - 7871847.53906951, - 7870925.19290905, - 7886487.76136572, - 7870925.19290905, - 7876058.84500811, - 3858228.67061225, - 8401461.85304853, - 9210350.19520265, - 1806363.23553602, - 6922250.59852326, - 6711006.70101515, - 6886568.00391817, - 6703749.80009753, - 6704689.17531993, - 6703401.93516038, - 6818839.81873556, - 6716262.5286998, - 6703369.87656195, - 6703952.06070945, - 5678897.05935781, - 4984034.77073219, - 6708908.84462217, - 6702586.9472999, - 4961843.43826371, - 5139913.92380089, - 5255310.96072403, - 4981705.85074492, - 4926529.74583162, - 4973726.6063121, - 4926015.68274236, - 4937618.79350358, - 4926144.19851468, - 4926015.68274236, - 9575288.06765627, - 5100904.22956578, - 3501325.10900064, - 5093920.89144773, - 3505527.05928994, - 4002552.92232482, - 3512012.80001039, - 3514993.26161994, - 3562009.79687436, - 3869298.39771648, - 3509317.94922485, - ], - [ - 46651387.10647343, - 42191612.28496882, - 14767621.68800634, - 24849532.38841432, - 32260334.11128166, - 42191612.28496882, - 42196556.46505447, - 42191612.28496882, - 42275034.47974126, - 42191612.28496882, - 42219130.91253302, - 24227735.90988531, - 45035521.54835925, - 49371517.94999501, - 14778602.03484606, - 39909758.65668079, - 38691846.52720026, - 39834520.43061425, - 38650007.36519716, - 38655423.2682883, - 38648001.77388126, - 39313550.93419428, - 38722148.63941796, - 38647816.9422419, - 38651173.48481285, - 33700748.42359267, - 30195870.8789255, - 38679751.48077733, - 38643303.01755095, - 30061424.26274527, - 31140267.73715352, - 31839402.91317674, - 30181761.07222111, - 29847475.57538872, - 30133418.66577969, - 29844361.11423809, - 29914658.78479145, - 29845139.72952577, - 29844361.11423809, - 58012067.61585025, - 30903926.75151934, - 23061159.87895984, - 33550647.3781805, - 23088835.64296583, - 26362451.35547444, - 23131553.38525813, - 23151183.92499699, - 23460854.06493051, - 24271571.95828693, - 23113803.99527559, - ], - ] - ) - - self.expected_aai = np.array([2.88895461e08, 1.69310367e09]) - self.expected_aai_per_group = np.array( - [2.33513758e08, 5.53817034e07, 1.37114041e09, 3.21963264e08] - ) - self.expected_return_period_metric = np.array( - [ - 0.00000000e00, - 0.00000000e00, - 7.10925472e09, - 4.53975437e10, - 1.36547014e10, - 7.69981714e10, - ] - ) - - def test_reset_impact_data(self): - self.calc_risk_metrics_points._impacts = "A" # type:ignore - self.calc_risk_metrics_points._eai_gdf = "B" # type:ignore - self.calc_risk_metrics_points._per_date_eai = "C" # type:ignore - self.calc_risk_metrics_points._per_date_aai = "D" # type:ignore - self.calc_risk_metrics_points._reset_impact_data() - self.assertIsNone(self.calc_risk_metrics_points._impacts) - self.assertIsNone(self.calc_risk_metrics_points._eai_gdf) - self.assertIsNone(self.calc_risk_metrics_points._per_date_aai) - self.assertIsNone(self.calc_risk_metrics_points._per_date_eai) - - def test_set_impact_computation_strategy(self): - new_impact_computation_strategy = MagicMock(spec=ImpactComputationStrategy) - self.calc_risk_metrics_points.impact_computation_strategy = ( - new_impact_computation_strategy - ) - self.assertEqual( - self.calc_risk_metrics_points.impact_computation_strategy, - new_impact_computation_strategy, - ) - - def test_set_impact_computation_strategy_wtype(self): - with self.assertRaises(ValueError): - self.calc_risk_metrics_points.impact_computation_strategy = "A" - - @patch.object(CalcRiskMetricsPoints, "impact_computation_strategy") - def test_impacts_arrays(self, mock_impact_compute): - mock_impact_compute.compute_impacts.side_effect = ["A", "B"] - results = self.calc_risk_metrics_points.impacts - mock_impact_compute.compute_impacts.assert_has_calls( - [ - call( - self.mock_snapshot_start.exposure, - self.mock_snapshot_start.hazard, - self.mock_snapshot_start.impfset, - ), - call( - self.mock_snapshot_end.exposure, - self.mock_snapshot_end.hazard, - self.mock_snapshot_end.impfset, - ), - ] - ) - self.assertEqual(results, ["A", "B"]) - - def test_per_date_eai(self): - np.testing.assert_allclose( - self.calc_risk_metrics_points.per_date_eai, self.expected_eai - ) - - def test_per_date_aai(self): - np.testing.assert_allclose( - self.calc_risk_metrics_points.per_date_aai, - self.expected_aai, - ) - - def test_eai_gdf(self): - result_gdf = self.calc_risk_metrics_points.calc_eai_gdf() - self.assertIsInstance(result_gdf, pd.DataFrame) - self.assertEqual( - result_gdf.shape[0], - len(self.mock_snapshot_start.exposure.gdf) - + len(self.mock_snapshot_end.exposure.gdf), - ) - expected_columns = [ - DATE_COL_NAME, - COORD_ID_COL_NAME, - GROUP_COL_NAME, - RISK_COL_NAME, - METRIC_COL_NAME, - MEASURE_COL_NAME, - UNIT_COL_NAME, - ] - self.assertTrue( - all(col in list(result_gdf.columns) for col in expected_columns) - ) - np.testing.assert_allclose( - np.array(result_gdf[RISK_COL_NAME].values), self.expected_eai.flatten() - ) - # Check constants and column transformations - self.assertEqual(result_gdf[METRIC_COL_NAME].unique(), EAI_METRIC_NAME) - self.assertEqual(result_gdf[MEASURE_COL_NAME].iloc[0], NO_MEASURE_VALUE) - self.assertEqual( - result_gdf[UNIT_COL_NAME].iloc[0], - self.mock_snapshot_start.exposure.value_unit, - ) - self.assertEqual(result_gdf[GROUP_COL_NAME].dtype.name, "category") - self.assertListEqual( - list(result_gdf[GROUP_COL_NAME].cat.categories), - list(self.calc_risk_metrics_points._group_id), - ) - - def test_calc_aai_metric(self): - result_df = self.calc_risk_metrics_points.calc_aai_metric() - self.assertIsInstance(result_df, pd.DataFrame) - self.assertEqual( - result_df.shape[0], len(self.calc_risk_metrics_points.snapshots) - ) - expected_columns = [ - DATE_COL_NAME, - GROUP_COL_NAME, - RISK_COL_NAME, - METRIC_COL_NAME, - MEASURE_COL_NAME, - UNIT_COL_NAME, - ] - self.assertTrue(all(col in result_df.columns for col in expected_columns)) - np.testing.assert_allclose( - np.array(result_df[RISK_COL_NAME].values), self.expected_aai - ) - # Check constants and column transformations - self.assertEqual(result_df[METRIC_COL_NAME].unique(), AAI_METRIC_NAME) - self.assertEqual(result_df[MEASURE_COL_NAME].iloc[0], NO_MEASURE_VALUE) - self.assertEqual( - result_df[UNIT_COL_NAME].iloc[0], - self.mock_snapshot_start.exposure.value_unit, - ) - self.assertEqual(result_df[GROUP_COL_NAME].dtype.name, "category") - - def test_calc_aai_per_group_metric(self): - result_df = self.calc_risk_metrics_points.calc_aai_per_group_metric() - self.assertIsInstance(result_df, pd.DataFrame) - self.assertEqual( - result_df.shape[0], - len(self.calc_risk_metrics_points.snapshots) - * len(self.calc_risk_metrics_points._group_id), - ) - expected_columns = [ - DATE_COL_NAME, - GROUP_COL_NAME, - RISK_COL_NAME, - METRIC_COL_NAME, - MEASURE_COL_NAME, - UNIT_COL_NAME, - ] - self.assertTrue(all(col in result_df.columns for col in expected_columns)) - np.testing.assert_allclose( - np.array(result_df[RISK_COL_NAME].values), self.expected_aai_per_group - ) - # Check constants and column transformations - self.assertEqual(result_df[METRIC_COL_NAME].unique(), AAI_METRIC_NAME) - self.assertEqual(result_df[MEASURE_COL_NAME].iloc[0], NO_MEASURE_VALUE) - self.assertEqual( - result_df[UNIT_COL_NAME].iloc[0], - self.mock_snapshot_start.exposure.value_unit, - ) - self.assertEqual(result_df[GROUP_COL_NAME].dtype.name, "category") - self.assertListEqual(list(result_df[GROUP_COL_NAME].unique()), [0, 1]) - - def test_calc_return_periods_metric(self): - result_df = self.calc_risk_metrics_points.calc_return_periods_metric( - [20, 50, 100] - ) - self.assertIsInstance(result_df, pd.DataFrame) - self.assertEqual( - result_df.shape[0], len(self.calc_risk_metrics_points.snapshots) * 3 - ) - expected_columns = [ - DATE_COL_NAME, - GROUP_COL_NAME, - RISK_COL_NAME, - METRIC_COL_NAME, - MEASURE_COL_NAME, - UNIT_COL_NAME, - ] - self.assertTrue(all(col in result_df.columns for col in expected_columns)) - np.testing.assert_allclose( - np.array(result_df[RISK_COL_NAME].values), - self.expected_return_period_metric, - ) - # Check constants and column transformations - self.assertListEqual( - list(result_df[METRIC_COL_NAME].unique()), ["rp_20", "rp_50", "rp_100"] - ) - self.assertEqual(result_df[MEASURE_COL_NAME].iloc[0], NO_MEASURE_VALUE) - self.assertEqual( - result_df[UNIT_COL_NAME].iloc[0], - self.mock_snapshot_start.exposure.value_unit, - ) - self.assertEqual(result_df[GROUP_COL_NAME].dtype.name, "category") - - @patch.object(Snapshot, "apply_measure") - @patch("climada.trajectories.riskperiod.CalcRiskMetricsPoints") - def test_apply_measure(self, mock_CalcRiskMetricPoints, mock_snap_apply_measure): - mock_CalcRiskMetricPoints.return_value = MagicMock(spec=CalcRiskMetricsPeriod) - mock_snap_apply_measure.return_value = 42 - result = self.calc_risk_metrics_points.apply_measure(self.measure) - mock_snap_apply_measure.assert_called_with(self.measure) - mock_CalcRiskMetricPoints.assert_called_with( - [42, 42], - self.calc_risk_metrics_points.impact_computation_strategy, - ) - self.assertEqual(result.measure, self.measure) - - -class TestCalcRiskMetricsPeriod_TopLevel(unittest.TestCase): - def setUp(self): - # Create mock objects for testing - self.present_date = 2020 - self.future_date = 2025 - self.exposure_present = Exposures.from_hdf5(EXP_DEMO_H5) - self.exposure_present.gdf.rename(columns={"impf_": "impf_TC"}, inplace=True) - self.exposure_present.gdf["impf_TC"] = 1 - self.exposure_present.gdf[GROUP_ID_COL_NAME] = ( - self.exposure_present.gdf["value"] > 500000 - ) * 1 - self.hazard_present = Hazard.from_hdf5(HAZ_DEMO_H5) - self.exposure_present.assign_centroids(self.hazard_present, distance="approx") - self.impfset_present = ImpactFuncSet([ImpfTropCyclone.from_emanuel_usa()]) - - self.exposure_future = Exposures.from_hdf5(EXP_DEMO_H5) - n_years = self.future_date - self.present_date + 1 - growth_rate = 1.02 - growth = growth_rate**n_years - self.exposure_future.gdf["value"] = self.exposure_future.gdf["value"] * growth - self.exposure_future.gdf.rename(columns={"impf_": "impf_TC"}, inplace=True) - self.exposure_future.gdf["impf_TC"] = 1 - self.exposure_future.gdf[GROUP_ID_COL_NAME] = ( - self.exposure_future.gdf["value"] > 500000 - ) * 1 - self.hazard_future = Hazard.from_hdf5(HAZ_DEMO_H5) - self.hazard_future.intensity *= 1.1 - self.exposure_future.assign_centroids(self.hazard_future, distance="approx") - self.impfset_future = ImpactFuncSet( - [ - ImpfTropCyclone.from_emanuel_usa(impf_id=1, v_half=60.0), - ] - ) - - self.measure = MagicMock(spec=Measure) - self.measure.name = "Test Measure" - - # Setup mock return values for measure.apply - self.measure_exposure = MagicMock(spec=Exposures) - self.measure_hazard = MagicMock(spec=Hazard) - self.measure_impfset = MagicMock(spec=ImpactFuncSet) - self.measure.apply.return_value = ( - self.measure_exposure, - self.measure_impfset, - self.measure_hazard, - ) - - # Create mock snapshots - self.mock_snapshot_start = Snapshot( - exposure=self.exposure_present, - hazard=self.hazard_present, - impfset=self.impfset_present, - date=self.present_date, - ) - self.mock_snapshot_end = Snapshot( - exposure=self.exposure_future, - hazard=self.hazard_future, - impfset=self.impfset_future, - date=self.future_date, - ) - - # Create an instance of CalcRiskPeriod - self.calc_risk_period = CalcRiskMetricsPeriod( - self.mock_snapshot_start, - self.mock_snapshot_end, - time_resolution="Y", - interpolation_strategy=AllLinearStrategy(), - impact_computation_strategy=ImpactCalcComputation(), - # These will have to be tested when implemented - # risk_transf_attach=0.1, - # risk_transf_cover=0.9, - # calc_residual=False - ) - - def test_init(self): - self.assertEqual(self.calc_risk_period.snapshot_start, self.mock_snapshot_start) - self.assertEqual(self.calc_risk_period.snapshot_end, self.mock_snapshot_end) - self.assertEqual(self.calc_risk_period.time_resolution, "Y") - self.assertEqual( - self.calc_risk_period.time_points, self.future_date - self.present_date + 1 - ) - self.assertIsInstance( - self.calc_risk_period.interpolation_strategy, AllLinearStrategy - ) - self.assertIsInstance( - self.calc_risk_period.impact_computation_strategy, ImpactCalcComputation - ) - np.testing.assert_array_equal( - self.calc_risk_period._group_id_E0, - self.mock_snapshot_start.exposure.gdf[GROUP_ID_COL_NAME].values, - ) - np.testing.assert_array_equal( - self.calc_risk_period._group_id_E1, - self.mock_snapshot_end.exposure.gdf[GROUP_ID_COL_NAME].values, - ) - self.assertIsInstance(self.calc_risk_period.date_idx, pd.PeriodIndex) - self.assertEqual( - len(self.calc_risk_period.date_idx), - self.future_date - self.present_date + 1, - ) - - def test_set_date_idx_wrong_type(self): - with self.assertRaises(ValueError): - self.calc_risk_period.date_idx = "A" - - def test_set_date_idx_periods(self): - new_date_idx = pd.period_range("2023-01-01", periods=24) - self.calc_risk_period.date_idx = new_date_idx - self.assertEqual(len(self.calc_risk_period.date_idx), 24) - - def test_set_date_idx_freq(self): - new_date_idx = pd.period_range("2023-01-01", "2023-12-01", freq="M") - self.calc_risk_period.date_idx = new_date_idx - self.assertEqual(len(self.calc_risk_period.date_idx), 12) - pd.testing.assert_index_equal( - self.calc_risk_period.date_idx, - pd.period_range("2023-01-01", "2023-12-01", freq="M"), - ) - - def test_set_time_resolution(self): - self.calc_risk_period.time_resolution = "M" - self.assertEqual(self.calc_risk_period.time_resolution, "M") - pd.testing.assert_index_equal( - self.calc_risk_period.date_idx, - pd.PeriodIndex( - [ - "2020-01-01", - "2020-02-01", - "2020-03-01", - "2020-04-01", - "2020-05-01", - "2020-06-01", - "2020-07-01", - "2020-08-01", - "2020-09-01", - "2020-10-01", - "2020-11-01", - "2020-12-01", - "2021-01-01", - "2021-02-01", - "2021-03-01", - "2021-04-01", - "2021-05-01", - "2021-06-01", - "2021-07-01", - "2021-08-01", - "2021-09-01", - "2021-10-01", - "2021-11-01", - "2021-12-01", - "2022-01-01", - "2022-02-01", - "2022-03-01", - "2022-04-01", - "2022-05-01", - "2022-06-01", - "2022-07-01", - "2022-08-01", - "2022-09-01", - "2022-10-01", - "2022-11-01", - "2022-12-01", - "2023-01-01", - "2023-02-01", - "2023-03-01", - "2023-04-01", - "2023-05-01", - "2023-06-01", - "2023-07-01", - "2023-08-01", - "2023-09-01", - "2023-10-01", - "2023-11-01", - "2023-12-01", - "2024-01-01", - "2024-02-01", - "2024-03-01", - "2024-04-01", - "2024-05-01", - "2024-06-01", - "2024-07-01", - "2024-08-01", - "2024-09-01", - "2024-10-01", - "2024-11-01", - "2024-12-01", - "2025-01-01", - ], - name=DATE_COL_NAME, - freq="M", - ), - ) - - def test_set_interpolation_strategy(self): - new_interpolation_strategy = MagicMock(spec=InterpolationStrategyBase) - self.calc_risk_period.interpolation_strategy = new_interpolation_strategy - self.assertEqual( - self.calc_risk_period.interpolation_strategy, new_interpolation_strategy - ) - - def test_set_interpolation_strategy_wtype(self): - with self.assertRaises(ValueError): - self.calc_risk_period.interpolation_strategy = "A" - - def test_set_impact_computation_strategy(self): - new_impact_computation_strategy = MagicMock(spec=ImpactComputationStrategy) - self.calc_risk_period.impact_computation_strategy = ( - new_impact_computation_strategy - ) - self.assertEqual( - self.calc_risk_period.impact_computation_strategy, - new_impact_computation_strategy, - ) - - def test_set_impact_computation_strategy_wtype(self): - with self.assertRaises(ValueError): - self.calc_risk_period.impact_computation_strategy = "A" - - # The computation are tested in the CalcImpactStrategy / InterpolationStrategyBase tests - # Here we just make sure that the calling works - @patch.object(CalcRiskMetricsPeriod, "impact_computation_strategy") - def test_impacts_arrays(self, mock_impact_compute): - mock_impact_compute.compute_impacts.side_effect = [1, 2, 3, 4, 5, 6, 7, 8] - self.assertEqual(self.calc_risk_period.E0H0V0, 1) - self.assertEqual(self.calc_risk_period.E1H0V0, 2) - self.assertEqual(self.calc_risk_period.E0H1V0, 3) - self.assertEqual(self.calc_risk_period.E1H1V0, 4) - self.assertEqual(self.calc_risk_period.E0H0V1, 5) - self.assertEqual(self.calc_risk_period.E1H0V1, 6) - self.assertEqual(self.calc_risk_period.E0H1V1, 7) - self.assertEqual(self.calc_risk_period.E1H1V1, 8) - mock_impact_compute.compute_impacts.assert_has_calls( - [ - call( - exp, - haz, - impf, - ) - for exp, haz, impf in [ - ( - self.mock_snapshot_start.exposure, - self.mock_snapshot_start.hazard, - self.mock_snapshot_start.impfset, - ), - ( - self.mock_snapshot_end.exposure, - self.mock_snapshot_start.hazard, - self.mock_snapshot_start.impfset, - ), - ( - self.mock_snapshot_start.exposure, - self.mock_snapshot_end.hazard, - self.mock_snapshot_start.impfset, - ), - ( - self.mock_snapshot_end.exposure, - self.mock_snapshot_end.hazard, - self.mock_snapshot_start.impfset, - ), - ( - self.mock_snapshot_start.exposure, - self.mock_snapshot_start.hazard, - self.mock_snapshot_end.impfset, - ), - ( - self.mock_snapshot_end.exposure, - self.mock_snapshot_start.hazard, - self.mock_snapshot_end.impfset, - ), - ( - self.mock_snapshot_start.exposure, - self.mock_snapshot_end.hazard, - self.mock_snapshot_end.impfset, - ), - ( - self.mock_snapshot_end.exposure, - self.mock_snapshot_end.hazard, - self.mock_snapshot_end.impfset, - ), - ] - ] - ) - - @patch.object(CalcRiskMetricsPeriod, "interpolation_strategy") - def test_imp_mats_H0V0(self, mock_interpolate): - mock_interpolate.interp_over_exposure_dim.return_value = 1 - result = self.calc_risk_period.imp_mats_H0V0 - self.assertEqual(result, 1) - mock_interpolate.interp_over_exposure_dim.assert_called_with( - self.calc_risk_period.E0H0V0.imp_mat, - self.calc_risk_period.E1H0V0.imp_mat, - self.calc_risk_period.time_points, - ) - - @patch.object(CalcRiskMetricsPeriod, "interpolation_strategy") - def test_imp_mats_H1V0(self, mock_interpolate): - mock_interpolate.interp_over_exposure_dim.return_value = 1 - result = self.calc_risk_period.imp_mats_H1V0 - self.assertEqual(result, 1) - mock_interpolate.interp_over_exposure_dim.assert_called_with( - self.calc_risk_period.E0H1V0.imp_mat, - self.calc_risk_period.E1H1V0.imp_mat, - self.calc_risk_period.time_points, - ) - - @patch.object(CalcRiskMetricsPeriod, "interpolation_strategy") - def test_imp_mats_H0V1(self, mock_interpolate): - mock_interpolate.interp_over_exposure_dim.return_value = 1 - result = self.calc_risk_period.imp_mats_H0V1 - self.assertEqual(result, 1) - mock_interpolate.interp_over_exposure_dim.assert_called_with( - self.calc_risk_period.E0H0V1.imp_mat, - self.calc_risk_period.E1H0V1.imp_mat, - self.calc_risk_period.time_points, - ) - - @patch.object(CalcRiskMetricsPeriod, "interpolation_strategy") - def test_imp_mats_H1V1(self, mock_interpolate): - mock_interpolate.interp_over_exposure_dim.return_value = 1 - result = self.calc_risk_period.imp_mats_H1V1 - self.assertEqual(result, 1) - mock_interpolate.interp_over_exposure_dim.assert_called_with( - self.calc_risk_period.E0H1V1.imp_mat, - self.calc_risk_period.E1H1V1.imp_mat, - self.calc_risk_period.time_points, - ) - - @patch("climada.trajectories.riskperiod.calc_per_date_eais") - def test_per_date_eai_H0V0(self, mock_calc_per_date_eais): - mock_calc_per_date_eais.return_value = 1 - result = self.calc_risk_period.per_date_eai_H0V0 - - actual_arg0 = mock_calc_per_date_eais.call_args[0][0] - expected_arg0 = self.calc_risk_period.imp_mats_H0V0 - - actual_arg1 = mock_calc_per_date_eais.call_args[0][1] - expected_arg1 = self.calc_risk_period.snapshot_start.hazard.frequency - - assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) - np.testing.assert_array_equal(actual_arg1, expected_arg1) - self.assertEqual(result, 1) - - @patch("climada.trajectories.riskperiod.calc_per_date_eais") - def test_per_date_eai_H1V0(self, mock_calc_per_date_eais): - mock_calc_per_date_eais.return_value = 1 - result = self.calc_risk_period.per_date_eai_H1V0 - actual_arg0 = mock_calc_per_date_eais.call_args[0][0] - expected_arg0 = self.calc_risk_period.imp_mats_H1V0 - - actual_arg1 = mock_calc_per_date_eais.call_args[0][1] - expected_arg1 = self.calc_risk_period.snapshot_start.hazard.frequency - - assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) - np.testing.assert_array_equal(actual_arg1, expected_arg1) - self.assertEqual(result, 1) - - @patch("climada.trajectories.riskperiod.calc_per_date_eais") - def test_per_date_eai_H0V1(self, mock_calc_per_date_eais): - mock_calc_per_date_eais.return_value = 1 - result = self.calc_risk_period.per_date_eai_H0V1 - - actual_arg0 = mock_calc_per_date_eais.call_args[0][0] - expected_arg0 = self.calc_risk_period.imp_mats_H0V1 - - actual_arg1 = mock_calc_per_date_eais.call_args[0][1] - expected_arg1 = self.calc_risk_period.snapshot_start.hazard.frequency - - assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) - np.testing.assert_array_equal(actual_arg1, expected_arg1) - self.assertEqual(result, 1) - - @patch("climada.trajectories.riskperiod.calc_per_date_eais") - def test_per_date_eai_H1V1(self, mock_calc_per_date_eais): - mock_calc_per_date_eais.return_value = 1 - result = self.calc_risk_period.per_date_eai_H1V1 - actual_arg0 = mock_calc_per_date_eais.call_args[0][0] - expected_arg0 = self.calc_risk_period.imp_mats_H1V1 - - actual_arg1 = mock_calc_per_date_eais.call_args[0][1] - expected_arg1 = self.calc_risk_period.snapshot_start.hazard.frequency - - assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) - np.testing.assert_array_equal(actual_arg1, expected_arg1) - self.assertEqual(result, 1) - - @patch("climada.trajectories.riskperiod.calc_per_date_aais") - def test_per_date_aai_H0V0(self, mock_calc_per_date_aais): - mock_calc_per_date_aais.return_value = 1 - result = self.calc_risk_period.per_date_aai_H0V0 - - actual_arg0 = mock_calc_per_date_aais.call_args[0][0] - expected_arg0 = self.calc_risk_period.per_date_eai_H0V0 - self.assertEqual(result, 1) - np.testing.assert_array_equal(actual_arg0, expected_arg0) - - @patch("climada.trajectories.riskperiod.calc_per_date_aais") - def test_per_date_aai_H1V0(self, mock_calc_per_date_aais): - mock_calc_per_date_aais.return_value = 1 - result = self.calc_risk_period.per_date_aai_H1V0 - - actual_arg0 = mock_calc_per_date_aais.call_args[0][0] - expected_arg0 = self.calc_risk_period.per_date_eai_H1V0 - self.assertEqual(result, 1) - np.testing.assert_array_equal(actual_arg0, expected_arg0) - - @patch("climada.trajectories.riskperiod.calc_per_date_aais") - def test_per_date_aai_H0V1(self, mock_calc_per_date_aais): - mock_calc_per_date_aais.return_value = 1 - result = self.calc_risk_period.per_date_aai_H0V1 - - actual_arg0 = mock_calc_per_date_aais.call_args[0][0] - expected_arg0 = self.calc_risk_period.per_date_eai_H0V1 - self.assertEqual(result, 1) - np.testing.assert_array_equal(actual_arg0, expected_arg0) - - @patch("climada.trajectories.riskperiod.calc_per_date_aais") - def test_per_date_aai_H1V1(self, mock_calc_per_date_aais): - mock_calc_per_date_aais.return_value = 1 - result = self.calc_risk_period.per_date_aai_H1V1 - - actual_arg0 = mock_calc_per_date_aais.call_args[0][0] - expected_arg0 = self.calc_risk_period.per_date_eai_H1V1 - self.assertEqual(result, 1) - np.testing.assert_array_equal(actual_arg0, expected_arg0) - - @patch("climada.trajectories.riskperiod.calc_per_date_rps") - def test_per_date_return_periods_H0V0(self, mock_calc_per_date_rps): - mock_calc_per_date_rps.return_value = 1 - result = self.calc_risk_period.per_date_return_periods_H0V0([10, 50]) - - actual_arg0 = mock_calc_per_date_rps.call_args[0][0] - expected_arg0 = self.calc_risk_period.imp_mats_H0V0 - - actual_arg1 = mock_calc_per_date_rps.call_args[0][1] - expected_arg1 = self.calc_risk_period.snapshot_start.hazard.frequency - - actual_arg2 = mock_calc_per_date_rps.call_args[0][2] - expected_arg2 = [10, 50] - - assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) - np.testing.assert_array_equal(actual_arg1, expected_arg1) - self.assertEqual(actual_arg2, expected_arg2) - self.assertEqual(result, 1) - - @patch("climada.trajectories.riskperiod.calc_per_date_rps") - def test_per_date_return_periods_H1V0(self, mock_calc_per_date_rps): - mock_calc_per_date_rps.return_value = 1 - result = self.calc_risk_period.per_date_return_periods_H1V0([10, 50]) - - actual_arg0 = mock_calc_per_date_rps.call_args[0][0] - expected_arg0 = self.calc_risk_period.imp_mats_H1V0 - - actual_arg1 = mock_calc_per_date_rps.call_args[0][1] - expected_arg1 = self.calc_risk_period.snapshot_end.hazard.frequency - - actual_arg2 = mock_calc_per_date_rps.call_args[0][2] - expected_arg2 = [10, 50] - - assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) - np.testing.assert_array_equal(actual_arg1, expected_arg1) - self.assertEqual(actual_arg2, expected_arg2) - self.assertEqual(result, 1) - - @patch("climada.trajectories.riskperiod.calc_per_date_rps") - def test_per_date_return_periods_H0V1(self, mock_calc_per_date_rps): - mock_calc_per_date_rps.return_value = 1 - result = self.calc_risk_period.per_date_return_periods_H0V1([10, 50]) - - actual_arg0 = mock_calc_per_date_rps.call_args[0][0] - expected_arg0 = self.calc_risk_period.imp_mats_H0V1 - - actual_arg1 = mock_calc_per_date_rps.call_args[0][1] - expected_arg1 = self.calc_risk_period.snapshot_start.hazard.frequency - - actual_arg2 = mock_calc_per_date_rps.call_args[0][2] - expected_arg2 = [10, 50] - - assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) - np.testing.assert_array_equal(actual_arg1, expected_arg1) - self.assertEqual(actual_arg2, expected_arg2) - self.assertEqual(result, 1) - - @patch("climada.trajectories.riskperiod.calc_per_date_rps") - def test_per_date_return_periods_H1V1(self, mock_calc_per_date_rps): - mock_calc_per_date_rps.return_value = 1 - result = self.calc_risk_period.per_date_return_periods_H1V1([10, 50]) - - actual_arg0 = mock_calc_per_date_rps.call_args[0][0] - expected_arg0 = self.calc_risk_period.imp_mats_H1V1 - - actual_arg1 = mock_calc_per_date_rps.call_args[0][1] - expected_arg1 = self.calc_risk_period.snapshot_end.hazard.frequency - - actual_arg2 = mock_calc_per_date_rps.call_args[0][2] - expected_arg2 = [10, 50] - - assert_sparse_matrix_array_equal(actual_arg0, expected_arg0) - np.testing.assert_array_equal(actual_arg1, expected_arg1) - self.assertEqual(actual_arg2, expected_arg2) - self.assertEqual(result, 1) - - @patch.object(CalcRiskMetricsPeriod, "calc_eai_gdf", return_value=1) - def test_eai_gdf(self, mock_calc_eai_gdf): - result = self.calc_risk_period.eai_gdf - mock_calc_eai_gdf.assert_called_once() - self.assertEqual(result, 1) - - # Here we mock the impact calc method just to make sure it is rightfully called - def test_calc_per_date_eais(self): - results = calc_per_date_eais( - imp_mats=[ - csr_matrix( - [ - [1, 1, 1], - [2, 2, 2], - ] - ), - csr_matrix( - [ - [2, 0, 1], - [2, 0, 2], - ] - ), - ], - frequency=np.array([1, 1]), - ) - np.testing.assert_array_equal(results, np.array([[3, 3, 3], [4, 0, 3]])) - - def test_calc_per_date_aais(self): - results = calc_per_date_aais(np.array([[3, 3, 3], [4, 0, 3]])) - np.testing.assert_array_equal(results, np.array([9, 7])) - - def test_calc_freq_curve(self): - results = calc_freq_curve( - imp_mat_intrpl=csr_matrix( - [ - [0.1, 0, 0], - [1, 0, 0], - [10, 0, 0], - ] - ), - frequency=np.array([0.5, 0.05, 0.005]), - return_per=[10, 50, 100], - ) - np.testing.assert_array_equal(results, np.array([0.55045, 2.575, 5.05])) - - def test_calc_per_date_rps(self): - base_imp = csr_matrix( - [ - [0.1, 0, 0], - [1, 0, 0], - [10, 0, 0], - ] - ) - results = calc_per_date_rps( - [base_imp, base_imp * 2, base_imp * 4], - frequency=np.array([0.5, 0.05, 0.005]), - return_periods=[10, 50, 100], - ) - np.testing.assert_array_equal( - results, - np.array( - [[0.55045, 2.575, 5.05], [1.1009, 5.15, 10.1], [2.2018, 10.3, 20.2]] - ), - ) - - -class TestCalcRiskPeriod_LowLevel(unittest.TestCase): - def setUp(self): - # Create mock objects for testing - self.calc_risk_period = MagicMock(spec=CalcRiskMetricsPeriod) - - # Little trick to bind the mocked object method to the real one - self.calc_risk_period.calc_eai = types.MethodType( - CalcRiskMetricsPeriod.calc_eai, self.calc_risk_period - ) - - self.calc_risk_period.calc_eai_gdf = types.MethodType( - CalcRiskMetricsPeriod.calc_eai_gdf, self.calc_risk_period - ) - self.calc_risk_period.calc_aai_metric = types.MethodType( - CalcRiskMetricsPeriod.calc_aai_metric, self.calc_risk_period - ) - - self.calc_risk_period.calc_aai_per_group_metric = types.MethodType( - CalcRiskMetricsPeriod.calc_aai_per_group_metric, self.calc_risk_period - ) - self.calc_risk_period.calc_return_periods_metric = types.MethodType( - CalcRiskMetricsPeriod.calc_return_periods_metric, self.calc_risk_period - ) - self.calc_risk_period.calc_risk_components_metric = types.MethodType( - CalcRiskMetricsPeriod.calc_risk_contributions_metric, self.calc_risk_period - ) - self.calc_risk_period.apply_measure = types.MethodType( - CalcRiskMetricsPeriod.apply_measure, self.calc_risk_period - ) - - self.calc_risk_period.per_date_eai_H0V0 = np.array( - [[1, 0, 1], [1, 2, 0], [3, 3, 3]] - ) - self.calc_risk_period.per_date_eai_H1V0 = np.array( - [[2, 0, 2], [2, 4, 0], [12, 6, 6]] - ) - self.calc_risk_period.per_date_aai_H0V0 = np.array([2, 3, 9]) - self.calc_risk_period.per_date_aai_H1V0 = np.array([4, 6, 24]) - - self.calc_risk_period.per_date_eai_H0V1 = np.array( - [[1, 0, 1], [1, 2, 0], [3, 3, 3]] - ) - self.calc_risk_period.per_date_eai_H1V1 = np.array( - [[2, 0, 2], [2, 4, 0], [12, 6, 6]] - ) - self.calc_risk_period.per_date_aai_H0V1 = np.array([2, 3, 9]) - self.calc_risk_period.per_date_aai_H1V1 = np.array([4, 6, 24]) - - self.calc_risk_period.date_idx = pd.PeriodIndex( - ["2020-01-01", "2025-01-01", "2030-01-01"], name=DATE_COL_NAME, freq="5Y" - ) - self.calc_risk_period.snapshot_start.exposure.gdf = gpd.GeoDataFrame( - { - GROUP_ID_COL_NAME: [1, 2, 2], - "geometry": [Point(0, 0), Point(1, 1), Point(2, 2)], - "value": [10, 10, 20], - } - ) - self.calc_risk_period.snapshot_end.exposure.gdf = gpd.GeoDataFrame( - { - GROUP_ID_COL_NAME: [1, 2, 2], - "geometry": [Point(0, 0), Point(1, 1), Point(2, 2)], - "value": [10, 10, 20], - } - ) - self.calc_risk_period.measure = MagicMock(spec=Measure) - self.calc_risk_period.measure.name = "dummy_measure" - - def test_calc_eai(self): - # Mock the return values of interp_over_hazard_dim - self.calc_risk_period.interpolation_strategy.interp_over_hazard_dim.side_effect = [ - "V0_interpolated_data", # First call (for per_date_eai_V0) - "V1_interpolated_data", # Second call (for per_date_eai_V1) - ] - # Mock the return value of interp_over_vulnerability_dim - self.calc_risk_period.interpolation_strategy.interp_over_vulnerability_dim.return_value = ( - "final_eai_result" - ) - - result = self.calc_risk_period.calc_eai() - - # Assert that interp_over_hazard_dim was called with the correct arguments - self.calc_risk_period.interpolation_strategy.interp_over_hazard_dim.assert_has_calls( - [ - call( - self.calc_risk_period.per_date_eai_H0V0, - self.calc_risk_period.per_date_eai_H1V0, - ), - call( - self.calc_risk_period.per_date_eai_H0V1, - self.calc_risk_period.per_date_eai_H1V1, - ), - ] - ) - - # Assert that interp_over_vulnerability_dim was called with the results of interp_over_hazard_dim - self.calc_risk_period.interpolation_strategy.interp_over_vulnerability_dim.assert_called_once_with( - "V0_interpolated_data", "V1_interpolated_data" - ) - - # Assert the final returned value - self.assertEqual(result, "final_eai_result") - - def test_calc_eai_gdf(self): - self.calc_risk_period._groups_id = np.array([0]) - expected_risk = np.array([[1.0, 1.5, 12], [0, 3, 6], [1, 0, 6]]) - self.calc_risk_period.per_date_eai = expected_risk - result = self.calc_risk_period.calc_eai_gdf() - expected_columns = { - GROUP_COL_NAME, - COORD_ID_COL_NAME, - DATE_COL_NAME, - RISK_COL_NAME, - METRIC_COL_NAME, - MEASURE_COL_NAME, - } - self.assertTrue(expected_columns.issubset(set(result.columns))) - self.assertTrue((result[METRIC_COL_NAME] == EAI_METRIC_NAME).all()) - self.assertTrue((result[MEASURE_COL_NAME] == "dummy_measure").all()) - # Check calculated risk values by coord_id, date - actual_risk = result[RISK_COL_NAME].values - np.testing.assert_allclose(expected_risk.T.flatten(), actual_risk) - - def test_calc_aai_metric(self): - expected_aai = np.array([2, 4.5, 24]) - self.calc_risk_period.per_date_aai = expected_aai - self.calc_risk_period._groups_id = np.array([0]) - result = self.calc_risk_period.calc_aai_metric() - expected_columns = { - GROUP_COL_NAME, - DATE_COL_NAME, - RISK_COL_NAME, - METRIC_COL_NAME, - MEASURE_COL_NAME, - } - self.assertTrue(expected_columns.issubset(set(result.columns))) - self.assertTrue((result[METRIC_COL_NAME] == AAI_METRIC_NAME).all()) - self.assertTrue((result[MEASURE_COL_NAME] == "dummy_measure").all()) - - # Check calculated risk values by coord_id, date - actual_risk = result[RISK_COL_NAME].values - np.testing.assert_allclose(expected_aai, actual_risk) - - def test_calc_aai_per_group_metric(self): - self.calc_risk_period._group_id_E0 = np.array([1, 1, 2]) - self.calc_risk_period._group_id_E1 = np.array([2, 2, 2]) - self.calc_risk_period._groups_id = np.array([1, 2]) - self.calc_risk_period.eai_gdf = pd.DataFrame( - { - DATE_COL_NAME: pd.PeriodIndex( - ["2020-01-01"] * 3 + ["2025-01-01"] * 3 + ["2030-01-01"] * 3, - name=DATE_COL_NAME, - freq="5Y", - ), - COORD_ID_COL_NAME: [0, 1, 2, 0, 1, 2, 0, 1, 2], - GROUP_COL_NAME: [1, 1, 2, 1, 1, 2, 1, 1, 2], - RISK_COL_NAME: [2, 3, 4, 5, 6, 7, 8, 9, 10], - METRIC_COL_NAME: [EAI_METRIC_NAME, EAI_METRIC_NAME, EAI_METRIC_NAME] - * 3, - MEASURE_COL_NAME: ["dummy_measure", "dummy_measure", "dummy_measure"] - * 3, - } - ) - self.calc_risk_period.eai_gdf[GROUP_COL_NAME] = self.calc_risk_period.eai_gdf[ - GROUP_COL_NAME - ].astype("category") - result = self.calc_risk_period.calc_aai_per_group_metric() - expected_columns = { - GROUP_COL_NAME, - DATE_COL_NAME, - RISK_COL_NAME, - METRIC_COL_NAME, - MEASURE_COL_NAME, - } - self.assertTrue(expected_columns.issubset(set(result.columns))) - self.assertTrue((result[METRIC_COL_NAME] == AAI_METRIC_NAME).all()) - self.assertTrue((result[MEASURE_COL_NAME] == "dummy_measure").all()) - # Check calculated risk values by coord_id, date - expected_risk = np.array([5, 5, 6.6, 13.6, 3.4, 27]) - actual_risk = result[RISK_COL_NAME].values - np.testing.assert_allclose(expected_risk, actual_risk) - - def test_calc_return_periods_metric(self): - self.calc_risk_period._groups_id = np.array([0]) - self.calc_risk_period.per_date_return_periods_H0V0.return_value = "H0V0" - self.calc_risk_period.per_date_return_periods_H1V0.return_value = "H1V0" - self.calc_risk_period.per_date_return_periods_H0V1.return_value = "H0V1" - self.calc_risk_period.per_date_return_periods_H1V1.return_value = "H1V1" - # Mock the return values of interp_over_hazard_dim - self.calc_risk_period.interpolation_strategy.interp_over_hazard_dim.side_effect = [ - "V0_interpolated_data", # First call (for per_date_rp_V0) - "V1_interpolated_data", # Second call (for per_date_rp_V1) - ] - # Mock the return value of interp_over_vulnerability_dim - self.calc_risk_period.interpolation_strategy.interp_over_vulnerability_dim.return_value = np.array( - [[1, 2, 3], [4, 5, 6], [7, 8, 9]] - ) - - result = self.calc_risk_period.calc_return_periods_metric([10, 20, 30]) - - # Assert that interp_over_hazard_dim was called with the correct arguments - self.calc_risk_period.interpolation_strategy.interp_over_hazard_dim.assert_has_calls( - [call("H0V0", "H1V0"), call("H0V1", "H1V1")] - ) - - # Assert that interp_over_vulnerability_dim was called with the results of interp_over_hazard_dim - self.calc_risk_period.interpolation_strategy.interp_over_vulnerability_dim.assert_called_once_with( - "V0_interpolated_data", "V1_interpolated_data" - ) - - # Assert the final returned value - - expected_columns = { - GROUP_COL_NAME, - DATE_COL_NAME, - RISK_COL_NAME, - METRIC_COL_NAME, - MEASURE_COL_NAME, - } - self.assertTrue(expected_columns.issubset(set(result.columns))) - self.assertTrue( - all(result[METRIC_COL_NAME].unique() == ["rp_10", "rp_20", "rp_30"]) - ) - self.assertTrue((result[MEASURE_COL_NAME] == "dummy_measure").all()) - - # Check calculated risk values by rp, date - np.testing.assert_allclose( - result[RISK_COL_NAME].values, np.array([1, 4, 7, 2, 5, 8, 3, 6, 9]) - ) - - def test_calc_risk_components_metric(self): - self.calc_risk_period._groups_id = np.array([0]) - self.calc_risk_period.per_date_aai_H0V0 = np.array([1, 3, 5]) - self.calc_risk_period.per_date_aai_E0H0V0 = np.array([1, 1, 1]) - self.calc_risk_period.per_date_aai_E0H1V0 = np.array( - [2, 2, 2] - ) # Haz change doubles damages in fut - self.calc_risk_period.per_date_aai_E0H0V1 = np.array( - [3, 3, 3] - ) # Vul change triples damages in fut - self.calc_risk_period.per_date_aai = np.array([1, 6, 10]) - - # Mock the return values of interp_over_hazard_dim - self.calc_risk_period.interpolation_strategy.interp_over_hazard_dim.return_value = np.array( - [1, 1.5, 2] - ) - - # Mock the return value of interp_over_vulnerability_dim - self.calc_risk_period.interpolation_strategy.interp_over_vulnerability_dim.return_value = np.array( - [1, 2, 3] - ) - - result = self.calc_risk_period.calc_risk_components_metric() - - # Assert that interp_over_hazard_dim was called with the correct arguments - self.calc_risk_period.interpolation_strategy.interp_over_hazard_dim.assert_called_once_with( - self.calc_risk_period.per_date_aai_E0H0V0, - self.calc_risk_period.per_date_aai_E0H1V0, - ) - - # Assert that interp_over_vulnerability_dim was called with the results of interp_over_hazard_dim - self.calc_risk_period.interpolation_strategy.interp_over_vulnerability_dim.assert_called_once_with( - self.calc_risk_period.per_date_aai_E0H0V0, - self.calc_risk_period.per_date_aai_E0H0V1, - ) - - # Assert the final returned value - expected_columns = { - GROUP_COL_NAME, - DATE_COL_NAME, - RISK_COL_NAME, - METRIC_COL_NAME, - MEASURE_COL_NAME, - } - self.assertTrue(expected_columns.issubset(set(result.columns))) - self.assertTrue( - all( - result[METRIC_COL_NAME].unique() - == [ - CONTRIBUTION_BASE_RISK_NAME, - CONTRIBUTION_EXPOSURE_NAME, - CONTRIBUTION_HAZARD_NAME, - CONTRIBUTION_VULNERABILITY_NAME, - CONTRIBUTION_INTERACTION_TERM_NAME, - ] - ) - ) - self.assertTrue((result[MEASURE_COL_NAME] == "dummy_measure").all()) - - np.testing.assert_allclose( - result[RISK_COL_NAME].values, - np.array([1.0, 1.0, 1.0, 0, 2.0, 4.0, 0, 0.5, 1.0, 0, 1, 2, 0, 1.5, 2.0]), - ) - - @patch("climada.trajectories.riskperiod.CalcRiskMetricsPeriod") - def test_apply_measure(self, mock_CalcRiskPeriod): - mock_CalcRiskPeriod.return_value = MagicMock(spec=CalcRiskMetricsPeriod) - self.calc_risk_period.snapshot_start.apply_measure.return_value = 2 - self.calc_risk_period.snapshot_end.apply_measure.return_value = 3 - result = self.calc_risk_period.apply_measure(self.calc_risk_period.measure) - self.assertEqual(result.measure, self.calc_risk_period.measure) - mock_CalcRiskPeriod.assert_called_with( - 2, - 3, - self.calc_risk_period.time_resolution, - self.calc_risk_period.interpolation_strategy, - self.calc_risk_period.impact_computation_strategy, - ) - - -def assert_sparse_matrix_array_equal(expected_array, actual_array): - """ - Compares two numpy arrays where elements are sparse matrices. - Uses numpy testing for robust comparison of the sparse matrix internals. - """ - if len(expected_array) != len(actual_array): - raise AssertionError( - f"Expected array length {len(expected_array)} but got {len(actual_array)}" - ) - - for i, (expected_mat, actual_mat) in enumerate(zip(expected_array, actual_array)): - if not (issparse(expected_mat) and issparse(actual_mat)): - raise TypeError(f"Element at index {i} is not a sparse matrix.") - - # Robustly compare the underlying data - np.testing.assert_array_equal( - expected_mat.data, - actual_mat.data, - err_msg=f"Data differs at matrix index {i}", - ) - np.testing.assert_array_equal( - expected_mat.indices, - actual_mat.indices, - err_msg=f"Indices differ at matrix index {i}", - ) - np.testing.assert_array_equal( - expected_mat.indptr, - actual_mat.indptr, - err_msg=f"Indptr differs at matrix index {i}", - ) - # You may also want to assert equal shapes: - assert ( - expected_mat.shape == actual_mat.shape - ), f"Shape differs at matrix index {i}" - - -if __name__ == "__main__": - TESTS = unittest.TestLoader().loadTestsFromTestCase( - TestCalcRiskMetricsPeriod_TopLevel - ) - TESTS.addTests( - unittest.TestLoader().loadTestsFromTestCase(TestCalcRiskMetricsPoints) - ) - TESTS.addTests( - unittest.TestLoader().loadTestsFromTestCase(TestCalcRiskPeriod_LowLevel) - ) - unittest.TextTestRunner(verbosity=2).run(TESTS) diff --git a/climada/trajectories/test/test_trajectory.py b/climada/trajectories/test/test_trajectory.py new file mode 100644 index 0000000000..c39d6c9aac --- /dev/null +++ b/climada/trajectories/test/test_trajectory.py @@ -0,0 +1,326 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +unit tests for risk_trajectory + +""" + +import datetime +import unittest +from unittest.mock import MagicMock, Mock, call, patch + +import pandas as pd + +from climada.entity.disc_rates.base import DiscRates +from climada.trajectories.constants import AAI_METRIC_NAME +from climada.trajectories.snapshot import Snapshot +from climada.trajectories.trajectory import ( + DEFAULT_ALLGROUP_NAME, + DEFAULT_RP, + RiskTrajectory, +) + + +class TestRiskTrajectory(unittest.TestCase): + def setUp(self) -> None: + self.mock_snapshot1 = MagicMock(spec=Snapshot) + self.mock_snapshot1.date = datetime.date(2023, 1, 1) + + self.mock_snapshot2 = MagicMock(spec=Snapshot) + self.mock_snapshot2.date = datetime.date(2024, 1, 1) + + self.mock_snapshot3 = MagicMock(spec=Snapshot) + self.mock_snapshot3.date = datetime.date(2025, 1, 1) + + self.risk_disc_rates = MagicMock(spec=DiscRates) + self.risk_disc_rates.years = [2023, 2024, 2025] + self.risk_disc_rates.rates = [0.01, 0.02, 0.03] # Example rates + + self.snapshots_list: list[Snapshot] = [ + self.mock_snapshot1, + self.mock_snapshot2, + self.mock_snapshot3, + ] + + self.custom_all_groups_name = "custom" + self.custom_return_periods = [10, 20] + + def test_init_basic(self): + rt = RiskTrajectory(self.snapshots_list) + self.assertEqual(rt.start_date, self.mock_snapshot1.date) + self.assertEqual(rt.end_date, self.mock_snapshot3.date) + self.assertIsNone(rt._risk_disc_rates) + self.assertEqual(rt._all_groups_name, DEFAULT_ALLGROUP_NAME) + self.assertEqual(rt._return_periods, DEFAULT_RP) + # Check that metrics are reset (initially None) + for metric in RiskTrajectory.POSSIBLE_METRICS: + self.assertIsNone(getattr(rt, "_" + metric + "_metrics")) + + def test_init_args(self): + rt = RiskTrajectory( + self.snapshots_list, + return_periods=self.custom_return_periods, + all_groups_name=self.custom_all_groups_name, + risk_disc_rates=self.risk_disc_rates, + ) + self.assertEqual(rt.start_date, self.mock_snapshot1.date) + self.assertEqual(rt.end_date, self.mock_snapshot3.date) + self.assertEqual(rt._risk_disc_rates, self.risk_disc_rates) + self.assertEqual(rt._all_groups_name, self.custom_all_groups_name) + self.assertEqual(rt._return_periods, self.custom_return_periods) + self.assertEqual(rt.return_periods, self.custom_return_periods) + # Check that metrics are reset (initially None) + for metric in RiskTrajectory.POSSIBLE_METRICS: + self.assertIsNone(getattr(rt, "_" + metric + "_metrics")) + + @patch.object(RiskTrajectory, "_generic_metrics", new_callable=Mock) + def test_compute_metrics(self, mock_generic_metrics): + mock_generic_metrics.return_value = "42" + rt = RiskTrajectory(self.snapshots_list) + result = rt._compute_metrics( + metric_name="dummy_name", + metric_meth="dummy_meth", + dummy_kwarg1="A", + dummy_kwarg2=12, + ) + mock_generic_metrics.assert_called_once_with( + metric_name="dummy_name", + metric_meth="dummy_meth", + dummy_kwarg1="A", + dummy_kwarg2=12, + ) + self.assertEqual(result, "42") + + def test_set_return_periods(self): + rt = RiskTrajectory(self.snapshots_list) + with self.assertRaises(ValueError): + rt.return_periods = "A" + with self.assertRaises(ValueError): + rt.return_periods = ["A"] + + rt.return_periods = [1, 2] + self.assertEqual(rt._return_periods, [1, 2]) + self.assertEqual(rt.return_periods, [1, 2]) + + @patch.object(RiskTrajectory, "_reset_metrics", new_callable=Mock) + def test_set_disc_rates(self, mock_reset_metrics): + rt = RiskTrajectory(self.snapshots_list) + mock_reset_metrics.assert_called_once() # Called during init + with self.assertRaises(ValueError): + rt.risk_disc_rates = "A" + + rt.risk_disc_rates = self.risk_disc_rates + mock_reset_metrics.assert_has_calls([call(), call()]) + self.assertEqual(rt._risk_disc_rates, self.risk_disc_rates) + self.assertEqual(rt.risk_disc_rates, self.risk_disc_rates) + + def test_npv_transform_no_group_col(self): + df_input = pd.DataFrame( + { + "date": pd.to_datetime(["2023-01-01", "2024-01-01"] * 2), + "measure": ["m1", "m1", "m2", "m2"], + "metric": [ + AAI_METRIC_NAME, + AAI_METRIC_NAME, + AAI_METRIC_NAME, + AAI_METRIC_NAME, + ], + "risk": [100.0, 200.0, 80.0, 180.0], + } + ) + # Mock the internal calc_npv_cash_flows + with patch( + "climada.trajectories.trajectory.RiskTrajectory._calc_npv_cash_flows" + ) as mock_calc_npv: + # For each group, it will be called + mock_calc_npv.side_effect = [ + pd.Series( + [100.0 * (1 / (1 + 0.01)) ** 0, 200.0 * (1 / (1 + 0.02)) ** 1], + index=[pd.Timestamp("2023-01-01"), pd.Timestamp("2024-01-01")], + ), + pd.Series( + [80.0 * (1 / (1 + 0.01)) ** 0, 180.0 * (1 / (1 + 0.02)) ** 1], + index=[pd.Timestamp("2023-01-01"), pd.Timestamp("2024-01-01")], + ), + ] + result_df = RiskTrajectory.npv_transform( + df_input.copy(), self.risk_disc_rates + ) + # Assertions for mock calls + # Grouping by 'measure', 'metric' (default _grouper) + pd.testing.assert_series_equal( + mock_calc_npv.mock_calls[0].args[0], + pd.Series( + [100.0, 200.0], + index=pd.Index( + [ + pd.Timestamp("2023-01-01"), + pd.Timestamp("2024-01-01"), + ], + name="date", + ), + name=("m1", AAI_METRIC_NAME), + ), + ) + assert mock_calc_npv.mock_calls[0].args[1] == pd.Timestamp("2023-01-01") + assert mock_calc_npv.mock_calls[0].args[2] == self.risk_disc_rates + pd.testing.assert_series_equal( + mock_calc_npv.mock_calls[1].args[0], + pd.Series( + [80.0, 180.0], + index=pd.Index( + [ + pd.Timestamp("2023-01-01"), + pd.Timestamp("2024-01-01"), + ], + name="date", + ), + name=("m2", AAI_METRIC_NAME), + ), + ) + assert mock_calc_npv.mock_calls[1].args[1] == pd.Timestamp("2023-01-01") + assert mock_calc_npv.mock_calls[1].args[2] == self.risk_disc_rates + + expected_df = pd.DataFrame( + { + "date": pd.to_datetime(["2023-01-01", "2024-01-01"] * 2), + "measure": ["m1", "m1", "m2", "m2"], + "metric": [ + AAI_METRIC_NAME, + AAI_METRIC_NAME, + AAI_METRIC_NAME, + AAI_METRIC_NAME, + ], + "risk": [ + 100.0 * (1 / (1 + 0.01)) ** 0, + 200.0 * (1 / (1 + 0.02)) ** 1, + 80.0 * (1 / (1 + 0.01)) ** 0, + 180.0 * (1 / (1 + 0.02)) ** 1, + ], + } + ) + pd.testing.assert_frame_equal( + result_df.sort_values("date").reset_index(drop=True), + expected_df.sort_values("date").reset_index(drop=True), + rtol=1e-6, + ) + + def test_npv_transform_with_group_col(self): + df_input = pd.DataFrame( + { + "date": pd.to_datetime(["2023-01-01", "2024-01-01", "2023-01-01"]), + "group": ["G1", "G1", "G2"], + "measure": ["m1", "m1", "m1"], + "metric": [AAI_METRIC_NAME, AAI_METRIC_NAME, AAI_METRIC_NAME], + "risk": [100.0, 200.0, 150.0], + } + ) + with patch( + "climada.trajectories.trajectory.RiskTrajectory._calc_npv_cash_flows" + ) as mock_calc_npv: + mock_calc_npv.side_effect = [ + # First group G1, m1, aai + pd.Series( + [100.0 * (1 / (1 + 0.01)) ** 0, 200.0 * (1 / (1 + 0.02)) ** 1], + index=[pd.Timestamp("2023-01-01"), pd.Timestamp("2024-01-01")], + ), + # Second group G2, m1, aai + pd.Series( + [150.0 * (1 / (1 + 0.01)) ** 0], index=[pd.Timestamp("2023-01-01")] + ), + ] + result_df = RiskTrajectory.npv_transform( + df_input.copy(), self.risk_disc_rates + ) + + expected_df = pd.DataFrame( + { + "date": pd.to_datetime(["2023-01-01", "2024-01-01", "2023-01-01"]), + "group": ["G1", "G1", "G2"], + "measure": ["m1", "m1", "m1"], + "metric": [AAI_METRIC_NAME, AAI_METRIC_NAME, AAI_METRIC_NAME], + "risk": [ + 100.0 * (1 / (1 + 0.01)) ** 0, + 200.0 * (1 / (1 + 0.02)) ** 1, + 150.0 * (1 / (1 + 0.01)) ** 0, + ], + } + ) + pd.testing.assert_frame_equal( + result_df.sort_values(["group", "date"]).reset_index(drop=True), + expected_df.sort_values(["group", "date"]).reset_index(drop=True), + rtol=1e-6, + ) + + # --- Test NPV Transformation (`npv_transform` and `calc_npv_cash_flows`) --- + + ## Test `calc_npv_cash_flows` (standalone function) + def test_calc_npv_cash_flows_no_disc(self): + cash_flows = pd.Series( + [100, 200, 300], + index=pd.to_datetime(["2023-01-01", "2024-01-01", "2025-01-01"]), + ) + start_date = datetime.date(2023, 1, 1) + result = RiskTrajectory._calc_npv_cash_flows( + cash_flows, start_date, disc_rates=None + ) + # If no disc, it should return the original cash_flows Series + pd.testing.assert_series_equal(result, cash_flows) + + def test_calc_npv_cash_flows_with_disc(self): + cash_flows = pd.Series( + [100, 200, 300], + index=pd.period_range(start="2023-01-01", end="2025-01-01", freq="Y"), + ) + start_date = datetime.date(2023, 1, 1) + # Using the risk_disc_rates from SetUp + + # year 2023: (2023-01-01 - 2023-01-01) days // 365 = 0, factor = (1/(1+0.01))^0 = 1 + # year 2024: (2024-01-01 - 2023-01-01) days // 365 = 1, factor = (1/(1+0.02))^1 = 0.98039215... + # year 2025: (2025-01-01 - 2023-01-01) days // 365 = 2, factor = (1/(1+0.03))^2 = 0.9425959... + expected_cash_flows = pd.Series( + [ + 100 * (1 / (1 + 0.01)) ** 0, + 200 * (1 / (1 + 0.02)) ** 1, + 300 * (1 / (1 + 0.03)) ** 2, + ], + index=pd.period_range(start="2023-01-01", end="2025-01-01", freq="Y"), + name="npv_cash_flow", + ) + + result = RiskTrajectory._calc_npv_cash_flows( + cash_flows, start_date, disc_rates=self.risk_disc_rates + ) + pd.testing.assert_series_equal( + result, expected_cash_flows, check_dtype=False, rtol=1e-6 + ) + + def test_calc_npv_cash_flows_invalid_index(self): + cash_flows = pd.Series([100, 200, 300]) # No datetime index + start_date = datetime.date(2023, 1, 1) + with self.assertRaises( + ValueError, msg="cash_flows must be a pandas Series with a datetime index" + ): + RiskTrajectory._calc_npv_cash_flows( + cash_flows, start_date, disc_rates=self.risk_disc_rates + ) + + +if __name__ == "__main__": + TESTS = unittest.TestLoader().loadTestsFromTestCase(TestRiskTrajectory) + unittest.TextTestRunner(verbosity=2).run(TESTS) diff --git a/climada/trajectories/trajectory.py b/climada/trajectories/trajectory.py new file mode 100644 index 0000000000..5675521710 --- /dev/null +++ b/climada/trajectories/trajectory.py @@ -0,0 +1,268 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +This file implements abstract trajectory objects, to factorise the code common to +interpolated and static trajectories. + +""" + +import datetime +import logging +from abc import ABC + +import pandas as pd + +from climada.entity.disc_rates.base import DiscRates +from climada.trajectories.constants import ( + DATE_COL_NAME, + DEFAULT_ALLGROUP_NAME, + DEFAULT_RP, + GROUP_COL_NAME, + MEASURE_COL_NAME, + METRIC_COL_NAME, + PERIOD_COL_NAME, + RISK_COL_NAME, + UNIT_COL_NAME, +) +from climada.trajectories.snapshot import Snapshot + +LOGGER = logging.getLogger(__name__) + +__all__ = ["RiskTrajectory"] + +DEFAULT_DF_COLUMN_PRIORITY = [ + DATE_COL_NAME, + PERIOD_COL_NAME, + GROUP_COL_NAME, + MEASURE_COL_NAME, + METRIC_COL_NAME, + UNIT_COL_NAME, +] +INDEXING_COLUMNS = [DATE_COL_NAME, GROUP_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME] + + +class RiskTrajectory(ABC): + _grouper = [MEASURE_COL_NAME, METRIC_COL_NAME] + """Results dataframe grouper used in most `groupby()` calls.""" + + POSSIBLE_METRICS = [] + """Class variable listing the risk metrics that can be computed.""" + + def __init__( + self, + snapshots_list: list[Snapshot], + *, + return_periods: list[int] = DEFAULT_RP, + all_groups_name: str = DEFAULT_ALLGROUP_NAME, + risk_disc_rates: DiscRates | None = None, + ): + """Base abstract class for risk trajectory objects. + + See concrete implementation :class:`StaticRiskTrajectory` and + :class:`InterpolatedRiskTrajectory` for more details. + + """ + + self._reset_metrics() + self._snapshots = sorted(snapshots_list, key=lambda snap: snap.date) + self._all_groups_name = all_groups_name + self._return_periods = return_periods + self.start_date = min([snapshot.date for snapshot in snapshots_list]) + self.end_date = max([snapshot.date for snapshot in snapshots_list]) + self._risk_disc_rates = risk_disc_rates + + def _reset_metrics(self) -> None: + """Resets the computed metrics to None. + + This method is called to inititialize the `POSSIBLE_METRICS` to `None` during + the initialisation. + + It is also called when properties that would change the results of + computed metrics (for instance changing the time resolution in + :class:`InterpolatedRiskMetrics`) + + """ + for metric in self.POSSIBLE_METRICS: + setattr(self, "_" + metric + "_metrics", None) + + def _generic_metrics( + self, /, metric_name: str, metric_meth: str, **kwargs + ) -> pd.DataFrame: + """Main method to return the results of a specific metric. + + This method should call the `_generic_metrics()` of its parent and + define the part of the computation and treatment that + is specific to a child class of :class:`RiskTrajectory`. + + See also + -------- + + - :method:`_compute_metrics` + + """ + ... + + def _compute_metrics( + self, /, metric_name: str, metric_meth: str, **kwargs + ) -> pd.DataFrame: + """Helper method to compute metrics. + + Notes + ----- + + This method exists for the sake of the children classes for option appraisal, for which + `_generic_metrics` can have a different signature and extend on its + parent method. This method can stay the same (same signature) for all classes. + """ + return self._generic_metrics( + metric_name=metric_name, metric_meth=metric_meth, **kwargs + ) + + @property + def return_periods(self) -> list[int]: + """The return period values to use when computing risk period metrics. + + Notes + ----- + + Changing its value resets the corresponding metric. + """ + return self._return_periods + + @return_periods.setter + def return_periods(self, value, /): + if not isinstance(value, list): + raise ValueError("Return periods need to be a list of int.") + if any(not isinstance(i, int) for i in value): + raise ValueError("Return periods need to be a list of int.") + self._return_periods_metrics = None + self._return_periods = value + + @property + def risk_disc_rates(self) -> DiscRates | None: + """The discount rate applied to compute net present values. + None means no discount rate. + + Notes + ----- + + Changing its value resets all the metrics. + """ + return self._risk_disc_rates + + @risk_disc_rates.setter + def risk_disc_rates(self, value, /): + if value is not None and not isinstance(value, (DiscRates)): + raise ValueError("Risk discount needs to be a `DiscRates` object.") + + self._reset_metrics() + self._risk_disc_rates = value + + @classmethod + def npv_transform( + cls, df: pd.DataFrame, risk_disc_rates: DiscRates + ) -> pd.DataFrame: + """Apply provided discount rate to the provided metric `DataFrame`. + + Parameters + ---------- + df : pd.DataFrame + The `DataFrame` of the metric to discount. + risk_disc_rates : DiscRate + The discount rate to apply. + + Returns + ------- + pd.DataFrame + The discounted risk metric. + + """ + + def _npv_group(group, disc): + start_date = group.index.get_level_values(DATE_COL_NAME).min() + return cls._calc_npv_cash_flows(group, start_date, disc) + + df = df.set_index(DATE_COL_NAME) + grouper = cls._grouper + if GROUP_COL_NAME in df.columns: + grouper = [GROUP_COL_NAME] + grouper + + df[RISK_COL_NAME] = df.groupby( + grouper, + dropna=False, + as_index=False, + group_keys=False, + observed=True, + )[RISK_COL_NAME].transform(_npv_group, risk_disc_rates) + df = df.reset_index() + return df + + @staticmethod + def _calc_npv_cash_flows( + cash_flows: pd.DataFrame, + start_date: datetime.date, + disc_rates: DiscRates | None = None, + ): + """Apply discount rate to cash flows. + + If it is defined, applies a discount rate `disc` to a given cash flow + `cash_flows` assuming present year corresponds to `start_date`. + + Parameters + ---------- + cash_flows : pd.DataFrame + The cash flow to apply the discount rate to. + start_date : datetime.date + The date representing the present. + end_date : datetime.date, optional + disc : DiscRates, optional + The discount rate to apply. + + Returns + ------- + + A dataframe (copy) of `cash_flows` where values are discounted according to `disc`. + + """ + + if not disc_rates: + return cash_flows + + if not isinstance(cash_flows.index, (pd.PeriodIndex, pd.DatetimeIndex)): + raise ValueError( + "cash_flows must be a pandas Series with a PeriodIndex or DatetimeIndex" + ) + + df = cash_flows.to_frame(name="cash_flow") # type: ignore + df["year"] = df.index.year + + # Merge with the discount rates based on the year + tmp = df.merge( + pd.DataFrame({"year": disc_rates.years, "rate": disc_rates.rates}), + on="year", + how="left", + ) + tmp.index = df.index + df = tmp.copy() + df["discount_factor"] = (1 / (1 + df["rate"])) ** ( + df.index.year - start_date.year + ) + + # Apply the discount factors to the cash flows + df["npv_cash_flow"] = df["cash_flow"] * df["discount_factor"] + return df["npv_cash_flow"] From bf0026264b5072212ed008637cc3704b5df20aa9 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Thu, 18 Dec 2025 11:07:14 +0100 Subject: [PATCH 06/37] adds __init__ --- climada/trajectories/__init__.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 climada/trajectories/__init__.py diff --git a/climada/trajectories/__init__.py b/climada/trajectories/__init__.py new file mode 100644 index 0000000000..91aca62d1c --- /dev/null +++ b/climada/trajectories/__init__.py @@ -0,0 +1,28 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +This module implements risk trajectory objects which enable computation and +possibly interpolation of risk metric over multiple dates. + +""" + +from .snapshot import Snapshot + +__all__ = [ + "Snapshot", +] From 50ab78bf8abcf465d53c684e11fb4b6d417fc7d4 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Thu, 18 Dec 2025 11:10:45 +0100 Subject: [PATCH 07/37] cherry picks __init__ --- climada/trajectories/__init__.py | 33 ++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 climada/trajectories/__init__.py diff --git a/climada/trajectories/__init__.py b/climada/trajectories/__init__.py new file mode 100644 index 0000000000..575b993969 --- /dev/null +++ b/climada/trajectories/__init__.py @@ -0,0 +1,33 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +This module implements risk trajectory objects which enable computation and +possibly interpolation of risk metric over multiple dates. + +""" + +from .interpolation import AllLinearStrategy, ExponentialExposureStrategy +from .snapshot import Snapshot +from .static_trajectory import StaticRiskTrajectory + +__all__ = [ + "AllLinearStrategy", + "ExponentialExposureStrategy", + "Snapshot", + "StaticRiskTrajectory", +] From 6be5e6cfd5592c46ff28541d20b7e0172788d5fe Mon Sep 17 00:00:00 2001 From: spjuhel Date: Thu, 18 Dec 2025 11:17:14 +0100 Subject: [PATCH 08/37] namespace fixes --- climada/trajectories/static_trajectory.py | 2 +- climada/trajectories/test/test_calc_risk_metrics.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/climada/trajectories/static_trajectory.py b/climada/trajectories/static_trajectory.py index 73944b6639..c9da1949a6 100644 --- a/climada/trajectories/static_trajectory.py +++ b/climada/trajectories/static_trajectory.py @@ -26,6 +26,7 @@ import pandas as pd from climada.entity.disc_rates.base import DiscRates +from climada.trajectories.calc_risk_metrics import CalcRiskMetricsPoints from climada.trajectories.constants import ( AAI_METRIC_NAME, AAI_PER_GROUP_METRIC_NAME, @@ -43,7 +44,6 @@ ImpactCalcComputation, ImpactComputationStrategy, ) -from climada.trajectories.riskperiod import CalcRiskMetricsPoints from climada.trajectories.snapshot import Snapshot from climada.trajectories.trajectory import ( DEFAULT_ALLGROUP_NAME, diff --git a/climada/trajectories/test/test_calc_risk_metrics.py b/climada/trajectories/test/test_calc_risk_metrics.py index 493736d350..7485f3cd3f 100644 --- a/climada/trajectories/test/test_calc_risk_metrics.py +++ b/climada/trajectories/test/test_calc_risk_metrics.py @@ -430,7 +430,7 @@ def test_calc_return_periods_metric(self): self.assertEqual(result_df[GROUP_COL_NAME].dtype.name, "category") @patch.object(Snapshot, "apply_measure") - @patch("climada.trajectories.riskperiod.CalcRiskMetricsPoints") + @patch("climada.trajectories.calc_risk_metrics.CalcRiskMetricsPoints") def test_apply_measure(self, mock_CalcRiskMetricPoints, mock_snap_apply_measure): mock_CalcRiskMetricPoints.return_value = MagicMock(spec=CalcRiskMetricsPoints) mock_snap_apply_measure.return_value = 42 From 0e99117c6f1bd4007063c7a3fa7b351c6f50ca86 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Thu, 18 Dec 2025 11:17:26 +0100 Subject: [PATCH 09/37] cherry picks dataframe handling --- climada/util/dataframe_handling.py | 63 ++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 climada/util/dataframe_handling.py diff --git a/climada/util/dataframe_handling.py b/climada/util/dataframe_handling.py new file mode 100644 index 0000000000..b5ac6bef97 --- /dev/null +++ b/climada/util/dataframe_handling.py @@ -0,0 +1,63 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +Define functions to handle with coordinates +""" + +import pandas as pd + + +def reorder_dataframe_columns( + df: pd.DataFrame, priority_order: list[str], keep_remaining: bool = True +) -> pd.DataFrame | pd.Series: + """ + Applies a column priority list to a DataFrame to reorder its columns. + + This function is robust to cases where: + 1. Columns in 'priority_order' are not in the DataFrame (they are ignored). + 2. Columns in the DataFrame are not in 'priority_order'. + + Parameters + ---------- + df: pd.DataFrame + The input DataFrame. + priority_order: list[str] + A list of strings defining the desired column + order. Columns listed first have higher priority. + keep_remaining: bool + If True, any columns in the DataFrame but NOT in + 'priority_order' will be appended to the end in their + original relative order. If False, these columns + are dropped. + + Returns: + pd.DataFrame: The DataFrame with columns reordered according to the priority list. + """ + + present_priority_columns = [col for col in priority_order if col in df.columns] + + new_column_order = present_priority_columns + + if keep_remaining: + remaining_columns = [ + col for col in df.columns if col not in present_priority_columns + ] + + new_column_order.extend(remaining_columns) + + return df[new_column_order] From dd63bf497c9310b0721ca038ae38767a9d2ac4b0 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Thu, 18 Dec 2025 14:44:23 +0100 Subject: [PATCH 10/37] Introduces on/off option for caching --- climada/trajectories/calc_risk_metrics.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/climada/trajectories/calc_risk_metrics.py b/climada/trajectories/calc_risk_metrics.py index 2d325fb495..ce3818593c 100644 --- a/climada/trajectories/calc_risk_metrics.py +++ b/climada/trajectories/calc_risk_metrics.py @@ -56,6 +56,8 @@ "CalcRiskMetricsPoints", ] +_CACHE_SETTINGS = {"ENABLE_LAZY_CACHE": False} + def lazy_property(method): # This function is used as a decorator for properties @@ -67,11 +69,12 @@ def lazy_property(method): @property def _lazy(self): + if not _CACHE_SETTINGS.get("ENABLE_LAZY_CACHE", True): + return method(self) + if getattr(self, attr_name) is None: - # LOGGER.debug( - # f"Computing {method.__name__} for {self._snapshot0.date}-{self._snapshot1.date} with {meas_n}." - # ) setattr(self, attr_name, method(self)) + return getattr(self, attr_name) return _lazy From a6932e810ec3a9cb749d246af5a963d9ec7635ef Mon Sep 17 00:00:00 2001 From: spjuhel Date: Thu, 18 Dec 2025 14:44:55 +0100 Subject: [PATCH 11/37] removes redondant code --- climada/trajectories/calc_risk_metrics.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/climada/trajectories/calc_risk_metrics.py b/climada/trajectories/calc_risk_metrics.py index ce3818593c..e7ba88c143 100644 --- a/climada/trajectories/calc_risk_metrics.py +++ b/climada/trajectories/calc_risk_metrics.py @@ -185,8 +185,7 @@ def per_date_aai(self) -> np.ndarray: return np.array([imp.aai_agg for imp in self.impacts]) - @lazy_property - def eai_gdf(self) -> pd.DataFrame: + def calc_eai_gdf(self) -> pd.DataFrame: """Convenience function returning a DataFrame (with both datetime and coordinates) from `per_date_eai`. This can easily be merged with the GeoDataFrame of the exposure object of one of the `Snapshot`. @@ -196,10 +195,6 @@ def eai_gdf(self) -> pd.DataFrame: The DataFrame from the first snapshot of the list is used as a basis (notably for `value` and `group_id`). """ - return self.calc_eai_gdf() - - def calc_eai_gdf(self) -> pd.DataFrame: - """Merge the per date EAIs of the risk period with the Dataframe of the exposure of the starting snapshot.""" df = pd.DataFrame(self.per_date_eai, index=self._date_idx) df = df.reset_index().melt( @@ -257,7 +252,7 @@ def calc_aai_per_group_metric(self) -> pd.DataFrame | None: ) return None - eai_pres_groups = self.eai_gdf[ + eai_pres_groups = self.calc_eai_gdf()[ [DATE_COL_NAME, COORD_ID_COL_NAME, GROUP_COL_NAME, RISK_COL_NAME] ].copy() aai_per_group_df = eai_pres_groups.groupby( From 49e1cad666e740602b29016434ba2f282e255cf2 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Thu, 18 Dec 2025 14:55:42 +0100 Subject: [PATCH 12/37] Clarifies docstring --- climada/trajectories/static_trajectory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/climada/trajectories/static_trajectory.py b/climada/trajectories/static_trajectory.py index c9da1949a6..281887f347 100644 --- a/climada/trajectories/static_trajectory.py +++ b/climada/trajectories/static_trajectory.py @@ -110,7 +110,7 @@ def __init__( The return periods to use when computing the `return_periods_metric`. Defaults to `DEFAULT_RP` ([20, 50, 100]). all_groups_name: str, optional - The string to use to define all exposure points subgroup. + The string that should be used to define "all exposure points" subgroup. Defaults to `DEFAULT_ALLGROUP_NAME` ("All"). risk_disc_rates: DiscRates, optional The discount rate to apply to future risk. Defaults to None. From cc74d4abd28d96f851c38dbc13d8135e06ae259e Mon Sep 17 00:00:00 2001 From: spjuhel Date: Thu, 18 Dec 2025 14:58:52 +0100 Subject: [PATCH 13/37] Cherry picks tests --- climada/test/test_trajectories.py | 289 +++++++++++++ .../test/test_static_risk_trajectory.py | 379 ++++++++++++++++++ 2 files changed, 668 insertions(+) create mode 100644 climada/test/test_trajectories.py create mode 100644 climada/trajectories/test/test_static_risk_trajectory.py diff --git a/climada/test/test_trajectories.py b/climada/test/test_trajectories.py new file mode 100644 index 0000000000..bc47ff531e --- /dev/null +++ b/climada/test/test_trajectories.py @@ -0,0 +1,289 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . +--- + +Test trajectories. + +""" + +import copy +from itertools import groupby +from unittest import TestCase + +import geopandas as gpd +import numpy as np +import pandas as pd + +from climada.engine.impact_calc import ImpactCalc +from climada.entity.disc_rates.base import DiscRates +from climada.entity.impact_funcs.base import ImpactFunc +from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet +from climada.test.reusable import ( + CATEGORIES, + reusable_minimal_exposures, + reusable_minimal_hazard, + reusable_minimal_impfset, + reusable_snapshot, +) +from climada.trajectories import StaticRiskTrajectory +from climada.trajectories.constants import ( + AAI_METRIC_NAME, + CONTRIBUTION_BASE_RISK_NAME, + CONTRIBUTION_EXPOSURE_NAME, + CONTRIBUTION_HAZARD_NAME, + CONTRIBUTION_INTERACTION_TERM_NAME, + CONTRIBUTION_VULNERABILITY_NAME, + DATE_COL_NAME, + GROUP_COL_NAME, + MEASURE_COL_NAME, + METRIC_COL_NAME, + NO_MEASURE_VALUE, + PERIOD_COL_NAME, + RISK_COL_NAME, + UNIT_COL_NAME, +) +from climada.trajectories.snapshot import Snapshot +from climada.trajectories.trajectory import DEFAULT_RP + + +class TestStaticTrajectory(TestCase): + PRESENT_DATE = 2020 + HAZ_INCREASE_INTENSITY_FACTOR = 2 + EXP_INCREASE_VALUE_FACTOR = 10 + FUTURE_DATE = 2040 + + def setUp(self) -> None: + self.base_snapshot = reusable_snapshot(date=self.PRESENT_DATE) + self.future_snapshot = reusable_snapshot( + hazard_intensity_increase_factor=self.HAZ_INCREASE_INTENSITY_FACTOR, + exposure_value_increase_factor=self.EXP_INCREASE_VALUE_FACTOR, + date=self.FUTURE_DATE, + ) + + self.expected_base_imp = ImpactCalc( + **self.base_snapshot.impact_calc_data + ).impact() + self.expected_future_imp = ImpactCalc( + **self.future_snapshot.impact_calc_data + ).impact() + # self.group_vector = self.base_snapshot.exposure.gdf[GROUP_ID_COL_NAME] + self.expected_base_return_period_impacts = { + rp: imp + for rp, imp in zip( + self.expected_base_imp.calc_freq_curve(DEFAULT_RP).return_per, + self.expected_base_imp.calc_freq_curve(DEFAULT_RP).impact, + ) + } + self.expected_future_return_period_impacts = { + rp: imp + for rp, imp in zip( + self.expected_future_imp.calc_freq_curve(DEFAULT_RP).return_per, + self.expected_future_imp.calc_freq_curve(DEFAULT_RP).impact, + ) + } + + # fmt: off + self.expected_static_metrics = pd.DataFrame.from_dict( + {'index': [0, 1, 2, 3, 4, 5, 6, 7], + 'columns': [DATE_COL_NAME, GROUP_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME, UNIT_COL_NAME, RISK_COL_NAME], + 'data': [ + [pd.Timestamp(str(self.PRESENT_DATE)), 'All', NO_MEASURE_VALUE, 'aai', 'USD', self.expected_base_imp.aai_agg], + [pd.Timestamp(str(self.FUTURE_DATE)), 'All', NO_MEASURE_VALUE, 'aai', 'USD', self.expected_future_imp.aai_agg], + [pd.Timestamp(str(self.PRESENT_DATE)), 'All', NO_MEASURE_VALUE, f'rp_{DEFAULT_RP[0]}', 'USD', self.expected_base_return_period_impacts[DEFAULT_RP[0]]], + [pd.Timestamp(str(self.FUTURE_DATE)), 'All', NO_MEASURE_VALUE, f'rp_{DEFAULT_RP[0]}', 'USD', self.expected_future_return_period_impacts[DEFAULT_RP[0]]], + [pd.Timestamp(str(self.PRESENT_DATE)), 'All', NO_MEASURE_VALUE, f'rp_{DEFAULT_RP[1]}', 'USD', self.expected_base_return_period_impacts[DEFAULT_RP[1]]], + [pd.Timestamp(str(self.FUTURE_DATE)), 'All', NO_MEASURE_VALUE, f'rp_{DEFAULT_RP[1]}', 'USD', self.expected_future_return_period_impacts[DEFAULT_RP[1]]], + [pd.Timestamp(str(self.PRESENT_DATE)), 'All', NO_MEASURE_VALUE, f'rp_{DEFAULT_RP[2]}', 'USD', self.expected_base_return_period_impacts[DEFAULT_RP[2]]], + [pd.Timestamp(str(self.FUTURE_DATE)), 'All', NO_MEASURE_VALUE, f'rp_{DEFAULT_RP[2]}', 'USD', self.expected_future_return_period_impacts[DEFAULT_RP[2]]], + ], + 'index_names': [None], + 'column_names': [None]}, + orient="tight" + ) + # fmt: on + + def test_static_trajectory(self): + static_traj = StaticRiskTrajectory([self.base_snapshot, self.future_snapshot]) + print(static_traj.per_date_risk_metrics()) + pd.testing.assert_frame_equal( + static_traj.per_date_risk_metrics(), + self.expected_static_metrics, + check_dtype=False, + check_categorical=False, + ) + + def test_static_trajectory_one_snap(self): + static_traj = StaticRiskTrajectory([self.base_snapshot]) + expected = pd.DataFrame.from_dict( + # fmt: off + { + "index": [0, 1, 2, 3], + "columns": [DATE_COL_NAME, GROUP_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME, UNIT_COL_NAME, RISK_COL_NAME,], + "data": [ + [pd.Timestamp(str(self.PRESENT_DATE)), "All", NO_MEASURE_VALUE, AAI_METRIC_NAME, "USD", self.expected_base_imp.aai_agg,], + [pd.Timestamp(str(self.PRESENT_DATE)), "All", NO_MEASURE_VALUE, f"rp_{DEFAULT_RP[0]}", "USD", self.expected_base_return_period_impacts[DEFAULT_RP[0]],], + [pd.Timestamp(str(self.PRESENT_DATE)), "All", NO_MEASURE_VALUE, f"rp_{DEFAULT_RP[1]}", "USD", self.expected_base_return_period_impacts[DEFAULT_RP[1]],], + [pd.Timestamp(str(self.PRESENT_DATE)), "All", NO_MEASURE_VALUE, f"rp_{DEFAULT_RP[2]}", "USD", self.expected_base_return_period_impacts[DEFAULT_RP[2]],], + ], + "index_names": [None], + "column_names": [None], + }, + # fmt: on + orient="tight", + ) + + pd.testing.assert_frame_equal( + static_traj.per_date_risk_metrics(), + expected, + check_dtype=False, + check_categorical=False, + ) + + def test_static_trajectory_with_group(self): + exp0 = reusable_minimal_exposures(group_id=CATEGORIES) + exp1 = reusable_minimal_exposures( + group_id=CATEGORIES, increase_value_factor=self.EXP_INCREASE_VALUE_FACTOR + ) + snap0 = Snapshot( + exposure=exp0, + hazard=reusable_minimal_hazard(), + impfset=reusable_minimal_impfset(), + date=self.PRESENT_DATE, + ) + snap1 = Snapshot( + exposure=exp1, + hazard=reusable_minimal_hazard( + intensity_factor=self.HAZ_INCREASE_INTENSITY_FACTOR + ), + impfset=reusable_minimal_impfset(), + date=self.FUTURE_DATE, + ) + + expected_static_metrics = pd.concat( + [ + self.expected_static_metrics, + pd.DataFrame.from_dict( + # fmt: off + { + "index": [8, 9, 10, 11], + "columns": [DATE_COL_NAME, GROUP_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME, UNIT_COL_NAME, RISK_COL_NAME,], + "data": [ + [pd.Timestamp(str(self.PRESENT_DATE)), 1, NO_MEASURE_VALUE, AAI_METRIC_NAME, "USD", self.expected_base_imp.eai_exp[CATEGORIES == 1].sum(),], + [pd.Timestamp(str(self.PRESENT_DATE)), 2, NO_MEASURE_VALUE, AAI_METRIC_NAME, "USD", self.expected_base_imp.eai_exp[CATEGORIES == 2].sum(),], + [pd.Timestamp(str(self.FUTURE_DATE)), 1, NO_MEASURE_VALUE, AAI_METRIC_NAME, "USD", self.expected_future_imp.eai_exp[CATEGORIES == 1].sum(),], + [pd.Timestamp(str(self.FUTURE_DATE)), 2, NO_MEASURE_VALUE, AAI_METRIC_NAME, "USD", self.expected_future_imp.eai_exp[CATEGORIES == 2].sum(),], + ], + "index_names": [None], + "column_names": [None], + }, + # fmt: on + orient="tight", + ), + ] + ) + + static_traj = StaticRiskTrajectory([snap0, snap1]) + pd.testing.assert_frame_equal( + static_traj.per_date_risk_metrics(), + expected_static_metrics, + check_dtype=False, + check_categorical=False, + ) + + def test_static_trajectory_change_rp(self): + static_traj = StaticRiskTrajectory( + [self.base_snapshot, self.future_snapshot], return_periods=[10, 60, 1000] + ) + expected = pd.DataFrame.from_dict( + # fmt: off + { + "index": [0, 1, 2, 3, 4, 5, 6, 7], + "columns": [DATE_COL_NAME, GROUP_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME, UNIT_COL_NAME, RISK_COL_NAME,], + "data": [ + [pd.Timestamp(str(self.PRESENT_DATE)),"All", NO_MEASURE_VALUE, AAI_METRIC_NAME, "USD", self.expected_base_imp.aai_agg,], + [pd.Timestamp(str(self.FUTURE_DATE)), "All", NO_MEASURE_VALUE, AAI_METRIC_NAME, "USD", self.expected_future_imp.aai_agg,], + [pd.Timestamp(str(self.PRESENT_DATE)),"All", NO_MEASURE_VALUE, "rp_10", "USD", 0.0,], + [pd.Timestamp(str(self.FUTURE_DATE)), "All", NO_MEASURE_VALUE, "rp_10", "USD", 0.0,], + [pd.Timestamp(str(self.PRESENT_DATE)),"All", NO_MEASURE_VALUE, "rp_60", "USD", 700.0,], + [pd.Timestamp(str(self.FUTURE_DATE)), "All", NO_MEASURE_VALUE, "rp_60", "USD", 14000.0,], + [pd.Timestamp(str(self.PRESENT_DATE)),"All", NO_MEASURE_VALUE, "rp_1000", "USD", 1500.0,], + [pd.Timestamp(str(self.FUTURE_DATE)), "All", NO_MEASURE_VALUE, "rp_1000", "USD", 30000.0,], + ], + "index_names": [None], + "column_names": [None], + }, + # fmt: on + orient="tight", + ) + pd.testing.assert_frame_equal( + static_traj.per_date_risk_metrics(), + expected, + check_dtype=False, + check_categorical=False, + ) + + # Also check change to other return period + static_traj.return_periods = DEFAULT_RP + pd.testing.assert_frame_equal( + static_traj.per_date_risk_metrics(), + self.expected_static_metrics, + check_dtype=False, + check_categorical=False, + ) + + def test_static_trajectory_risk_disc_rate(self): + risk_disc_rate = DiscRates( + years=np.array(range(self.PRESENT_DATE, 2041)), rates=np.ones(21) * 0.01 + ) + static_traj = StaticRiskTrajectory( + [self.base_snapshot, self.future_snapshot], risk_disc_rates=risk_disc_rate + ) + expected = pd.DataFrame.from_dict( + # fmt: off + { + "index": [0, 1, 2, 3, 4, 5, 6, 7], + "columns": [DATE_COL_NAME, GROUP_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME, UNIT_COL_NAME, RISK_COL_NAME,], + "data": [ + [pd.Timestamp(str(self.PRESENT_DATE)),"All", NO_MEASURE_VALUE, AAI_METRIC_NAME, "USD", self.expected_base_imp.aai_agg,], + [pd.Timestamp(str(self.FUTURE_DATE)), "All", NO_MEASURE_VALUE, AAI_METRIC_NAME, "USD", self.expected_future_imp.aai_agg * ((1 / (1 + 0.01)) ** 20),], + [pd.Timestamp(str(self.PRESENT_DATE)),"All", NO_MEASURE_VALUE, f"rp_{DEFAULT_RP[0]}", "USD", self.expected_base_return_period_impacts[DEFAULT_RP[0]],], + [pd.Timestamp(str(self.FUTURE_DATE)), "All", NO_MEASURE_VALUE, f"rp_{DEFAULT_RP[0]}", "USD", self.expected_future_return_period_impacts[DEFAULT_RP[0]] * ((1 / (1 + 0.01)) ** 20),], + [pd.Timestamp(str(self.PRESENT_DATE)),"All", NO_MEASURE_VALUE, f"rp_{DEFAULT_RP[1]}", "USD", self.expected_base_return_period_impacts[DEFAULT_RP[1]],], + [pd.Timestamp(str(self.FUTURE_DATE)), "All", NO_MEASURE_VALUE, f"rp_{DEFAULT_RP[1]}", "USD", self.expected_future_return_period_impacts[DEFAULT_RP[1]] * ((1 / (1 + 0.01)) ** 20),], + [pd.Timestamp(str(self.PRESENT_DATE)),"All", NO_MEASURE_VALUE, f"rp_{DEFAULT_RP[2]}", "USD", self.expected_base_return_period_impacts[DEFAULT_RP[2]],], + [pd.Timestamp(str(self.FUTURE_DATE)), "All", NO_MEASURE_VALUE, f"rp_{DEFAULT_RP[2]}", "USD", self.expected_future_return_period_impacts[DEFAULT_RP[2]] * ((1 / (1 + 0.01)) ** 20),], + ], + "index_names": [None], + "column_names": [None], + }, + # fmt: on + orient="tight", + ) + pd.testing.assert_frame_equal( + static_traj.per_date_risk_metrics(), + expected, + check_dtype=False, + check_categorical=False, + ) + + # Also check change to other return period + static_traj.risk_disc_rates = None + pd.testing.assert_frame_equal( + static_traj.per_date_risk_metrics(), + self.expected_static_metrics, + check_dtype=False, + check_categorical=False, + ) diff --git a/climada/trajectories/test/test_static_risk_trajectory.py b/climada/trajectories/test/test_static_risk_trajectory.py new file mode 100644 index 0000000000..7576c957f9 --- /dev/null +++ b/climada/trajectories/test/test_static_risk_trajectory.py @@ -0,0 +1,379 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +unit tests for static_risk_trajectory + +""" + +import datetime +import types +import unittest +from itertools import product +from unittest.mock import MagicMock, Mock, call, patch + +import numpy as np # For potential NaN/NA comparisons +import pandas as pd + +from climada.entity.disc_rates.base import DiscRates +from climada.trajectories.calc_risk_metrics import ( # ImpactComputationStrategy, # If needed to mock its base class directly + CalcRiskMetricsPoints, +) +from climada.trajectories.constants import ( + AAI_METRIC_NAME, + AAI_PER_GROUP_METRIC_NAME, + DATE_COL_NAME, + EAI_METRIC_NAME, + GROUP_COL_NAME, + MEASURE_COL_NAME, + METRIC_COL_NAME, + RETURN_PERIOD_METRIC_NAME, + RISK_COL_NAME, +) +from climada.trajectories.impact_calc_strat import ImpactCalcComputation +from climada.trajectories.snapshot import Snapshot +from climada.trajectories.static_trajectory import ( + DEFAULT_ALLGROUP_NAME, + DEFAULT_RP, + StaticRiskTrajectory, +) + + +class TestStaticRiskTrajectory(unittest.TestCase): + def setUp(self) -> None: + self.dates1 = [pd.Timestamp("2023-01-01"), pd.Timestamp("2024-01-01")] + self.dates2 = [pd.Timestamp("2026-01-01")] + self.groups = ["GroupA", "GroupB", pd.NA] + self.measures = ["MEAS1", "MEAS2"] + self.metrics = [AAI_METRIC_NAME] + self.aai_dates1 = pd.DataFrame( + product(self.groups, self.dates1, self.measures, self.metrics), + columns=[GROUP_COL_NAME, DATE_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME], + ) + self.aai_dates1[RISK_COL_NAME] = np.arange(12) * 100 + self.aai_dates1[GROUP_COL_NAME] = self.aai_dates1[GROUP_COL_NAME].astype( + "category" + ) + + self.aai_dates2 = pd.DataFrame( + product(self.groups, self.dates2, self.measures, self.metrics), + columns=[GROUP_COL_NAME, DATE_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME], + ) + self.aai_dates2[RISK_COL_NAME] = np.arange(6) * 100 + 1200 + self.aai_dates2[GROUP_COL_NAME] = self.aai_dates2[GROUP_COL_NAME].astype( + "category" + ) + + self.aai_alldates = pd.DataFrame( + product( + self.groups, self.dates1 + self.dates2, self.measures, self.metrics + ), + columns=[GROUP_COL_NAME, DATE_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME], + ) + self.aai_alldates[RISK_COL_NAME] = np.arange(18) * 100 + self.aai_alldates[GROUP_COL_NAME] = self.aai_alldates[GROUP_COL_NAME].astype( + "category" + ) + self.aai_alldates[GROUP_COL_NAME] = self.aai_alldates[ + GROUP_COL_NAME + ].cat.add_categories([DEFAULT_ALLGROUP_NAME]) + self.aai_alldates[GROUP_COL_NAME] = self.aai_alldates[GROUP_COL_NAME].fillna( + DEFAULT_ALLGROUP_NAME + ) + self.expected_pre_npv_aai = self.aai_alldates + self.expected_pre_npv_aai = self.expected_pre_npv_aai[ + [ + DATE_COL_NAME, + GROUP_COL_NAME, + MEASURE_COL_NAME, + METRIC_COL_NAME, + RISK_COL_NAME, + ] + ] + + self.expected_npv_aai = pd.DataFrame( + product( + self.dates1 + self.dates2, self.groups, self.measures, self.metrics + ), + columns=[DATE_COL_NAME, GROUP_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME], + ) + self.expected_npv_aai[RISK_COL_NAME] = np.arange(18) * 90 + self.expected_npv_aai[GROUP_COL_NAME] = self.expected_npv_aai[ + GROUP_COL_NAME + ].astype("category") + self.expected_npv_aai[GROUP_COL_NAME] = self.expected_npv_aai[ + GROUP_COL_NAME + ].cat.add_categories(["All"]) + self.expected_npv_aai[GROUP_COL_NAME] = self.expected_npv_aai[ + GROUP_COL_NAME + ].fillna(DEFAULT_ALLGROUP_NAME) + expected_npv_df = self.expected_npv_aai + expected_npv_df = expected_npv_df[ + [ + DATE_COL_NAME, + GROUP_COL_NAME, + MEASURE_COL_NAME, + METRIC_COL_NAME, + RISK_COL_NAME, + ] + ] + + self.mock_snapshot1 = MagicMock(spec=Snapshot) + self.mock_snapshot1.date = datetime.date(2023, 1, 1) + + self.mock_snapshot2 = MagicMock(spec=Snapshot) + self.mock_snapshot2.date = datetime.date(2024, 1, 1) + + self.mock_snapshot3 = MagicMock(spec=Snapshot) + self.mock_snapshot3.date = datetime.date(2026, 1, 1) + + self.snapshots_list: list[Snapshot] = [ + self.mock_snapshot1, + self.mock_snapshot2, + self.mock_snapshot3, + ] + + self.risk_disc_rates = MagicMock(spec=DiscRates) + self.risk_disc_rates.years = [2023, 2024, 2025, 2026] + self.risk_disc_rates.rates = [0.01, 0.02, 0.03, 0.04] # Example rates + + self.mock_impact_computation_strategy = MagicMock(spec=ImpactCalcComputation) + + self.custom_all_groups_name = "custom" + self.custom_return_periods = [10, 20] + + self.mock_static_traj = MagicMock(spec=StaticRiskTrajectory) + self.mock_static_traj._all_groups_name = DEFAULT_ALLGROUP_NAME + self.mock_static_traj._risk_disc_rates = None + self.mock_static_traj._risk_metrics_calculators = MagicMock( + spec=CalcRiskMetricsPoints + ) + + @patch( + "climada.trajectories.static_trajectory.CalcRiskMetricsPoints", + autospec=True, + ) + def test_init_basic(self, MockCalcRiskPoints): + mock_calculator = MagicMock(spec=CalcRiskMetricsPoints) + mock_calculator.impact_computation_strategy = ( + self.mock_impact_computation_strategy + ) + MockCalcRiskPoints.return_value = mock_calculator + rt = StaticRiskTrajectory( + self.snapshots_list, + impact_computation_strategy=self.mock_impact_computation_strategy, + ) + MockCalcRiskPoints.assert_has_calls( + [ + call( + self.snapshots_list, + impact_computation_strategy=self.mock_impact_computation_strategy, + ), + ] + ) + self.assertEqual(rt.start_date, self.mock_snapshot1.date) + self.assertEqual(rt.end_date, self.mock_snapshot3.date) + self.assertIsNone(rt._risk_disc_rates) + self.assertEqual(rt._all_groups_name, DEFAULT_ALLGROUP_NAME) + self.assertEqual(rt._return_periods, DEFAULT_RP) + self.assertEqual( + rt.impact_computation_strategy, self.mock_impact_computation_strategy + ) + # Check that metrics are reset (initially None) + for metric in StaticRiskTrajectory.POSSIBLE_METRICS: + self.assertIsNone(getattr(rt, "_" + metric + "_metrics")) + + @patch( + "climada.trajectories.static_trajectory.CalcRiskMetricsPoints", + autospec=True, + ) + def test_init_args(self, mock_calc_risk_metrics_points): + rt = StaticRiskTrajectory( + self.snapshots_list, + return_periods=self.custom_return_periods, + all_groups_name=self.custom_all_groups_name, + risk_disc_rates=self.risk_disc_rates, + impact_computation_strategy=self.mock_impact_computation_strategy, + ) + self.assertEqual(rt.start_date, self.mock_snapshot1.date) + self.assertEqual(rt.end_date, self.mock_snapshot3.date) + self.assertEqual(rt._risk_disc_rates, self.risk_disc_rates) + self.assertEqual(rt._all_groups_name, self.custom_all_groups_name) + self.assertEqual(rt._return_periods, self.custom_return_periods) + self.assertEqual(rt.return_periods, self.custom_return_periods) + # Check that metrics are reset (initially None) + for metric in StaticRiskTrajectory.POSSIBLE_METRICS: + self.assertIsNone(getattr(rt, "_" + metric + "_metrics")) + self.assertIsInstance(rt._risk_metrics_calculators, CalcRiskMetricsPoints) + mock_calc_risk_metrics_points.assert_called_with( + self.snapshots_list, + impact_computation_strategy=self.mock_impact_computation_strategy, + ) + + @patch.object(StaticRiskTrajectory, "_reset_metrics", new_callable=Mock) + @patch( + "climada.trajectories.static_trajectory.CalcRiskMetricsPoints", + autospec=True, + ) + def test_set_impact_computation_strategy( + self, mock_calc_risk_metrics_points, mock_reset_metrics + ): + rt = StaticRiskTrajectory( + self.snapshots_list, + impact_computation_strategy=self.mock_impact_computation_strategy, + ) + mock_reset_metrics.assert_called_once() # Called during init + with self.assertRaises(ValueError): + rt.impact_computation_strategy = "A" + + # There is only one possibility at the moment so we just check against a new object + new_impact_calc = ImpactCalcComputation() + rt.impact_computation_strategy = new_impact_calc + self.assertEqual(rt.impact_computation_strategy, new_impact_calc) + mock_reset_metrics.assert_has_calls([call(), call()]) + + def test_generic_metrics(self): + self.mock_static_traj.POSSIBLE_METRICS = StaticRiskTrajectory.POSSIBLE_METRICS + self.mock_static_traj._generic_metrics = types.MethodType( + StaticRiskTrajectory._generic_metrics, self.mock_static_traj + ) + self.mock_static_traj._risk_disc_rates = self.risk_disc_rates + self.mock_static_traj._aai_metrics = None + with self.assertRaises(ValueError): + self.mock_static_traj._generic_metrics(None, "dummy_meth") + + with self.assertRaises(NotImplementedError): + self.mock_static_traj._generic_metrics("dummy_name", "dummy_meth") + + self.mock_static_traj._risk_metrics_calculators.calc_aai_metric.return_value = ( + self.aai_alldates + ) + self.mock_static_traj.npv_transform.return_value = self.expected_npv_aai + result = self.mock_static_traj._generic_metrics( + AAI_METRIC_NAME, "calc_aai_metric" + ) + + self.mock_static_traj._risk_metrics_calculators.calc_aai_metric.assert_called_once_with() + self.mock_static_traj.npv_transform.assert_called_once() + pd.testing.assert_frame_equal( + self.mock_static_traj.npv_transform.call_args[0][0].reset_index(drop=True), + self.expected_pre_npv_aai.reset_index(drop=True), + ) + self.assertEqual( + self.mock_static_traj.npv_transform.call_args[0][1], self.risk_disc_rates + ) + pd.testing.assert_frame_equal( + result, self.expected_npv_aai + ) # Final result is from NPV transform + + # Check internal storage + stored_df = getattr(self.mock_static_traj, "_aai_metrics") + # Assert that the stored DF is the one *before* NPV transformation + pd.testing.assert_frame_equal( + stored_df.reset_index(drop=True), + self.expected_npv_aai.reset_index(drop=True), + ) + + result2 = self.mock_static_traj._generic_metrics( + AAI_METRIC_NAME, "calc_aai_metric" + ) + # Check no new call + self.mock_static_traj._risk_metrics_calculators.calc_aai_metric.assert_called_once_with() + pd.testing.assert_frame_equal( + result2, + self.expected_npv_aai.reset_index(drop=True), + ) + + def test_eai_metrics(self): + self.mock_static_traj.eai_metrics = types.MethodType( + StaticRiskTrajectory.eai_metrics, self.mock_static_traj + ) + self.mock_static_traj.eai_metrics(some_arg="test") + self.mock_static_traj._compute_metrics.assert_called_once_with( + metric_name=EAI_METRIC_NAME, metric_meth="calc_eai_gdf", some_arg="test" + ) + + def test_aai_metrics(self): + self.mock_static_traj.aai_metrics = types.MethodType( + StaticRiskTrajectory.aai_metrics, self.mock_static_traj + ) + self.mock_static_traj.aai_metrics(some_arg="test") + self.mock_static_traj._compute_metrics.assert_called_once_with( + metric_name=AAI_METRIC_NAME, metric_meth="calc_aai_metric", some_arg="test" + ) + + def test_return_periods_metrics(self): + self.mock_static_traj.return_periods = [1, 2] + self.mock_static_traj.return_periods_metrics = types.MethodType( + StaticRiskTrajectory.return_periods_metrics, self.mock_static_traj + ) + self.mock_static_traj.return_periods_metrics(some_arg="test") + self.mock_static_traj._compute_metrics.assert_called_once_with( + metric_name=RETURN_PERIOD_METRIC_NAME, + metric_meth="calc_return_periods_metric", + return_periods=[1, 2], + some_arg="test", + ) + + def test_aai_per_group_metrics(self): + self.mock_static_traj.aai_per_group_metrics = types.MethodType( + StaticRiskTrajectory.aai_per_group_metrics, self.mock_static_traj + ) + self.mock_static_traj.aai_per_group_metrics(some_arg="test") + self.mock_static_traj._compute_metrics.assert_called_once_with( + metric_name=AAI_PER_GROUP_METRIC_NAME, + metric_meth="calc_aai_per_group_metric", + some_arg="test", + ) + + def test_per_date_risk_metrics_defaults(self): + self.mock_static_traj.per_date_risk_metrics = types.MethodType( + StaticRiskTrajectory.per_date_risk_metrics, self.mock_static_traj + ) + # Set up mock return values for each method + self.mock_static_traj.aai_metrics.return_value = pd.DataFrame( + {METRIC_COL_NAME: [AAI_METRIC_NAME], RISK_COL_NAME: [100]} + ) + self.mock_static_traj.return_periods_metrics.return_value = pd.DataFrame( + {METRIC_COL_NAME: ["rp"], RISK_COL_NAME: [50]} + ) + self.mock_static_traj.aai_per_group_metrics.return_value = pd.DataFrame( + {METRIC_COL_NAME: ["aai_grp"], RISK_COL_NAME: [10]} + ) + result = self.mock_static_traj.per_date_risk_metrics() + + # Assert calls with default arguments + self.mock_static_traj.aai_metrics.assert_called_once_with() + self.mock_static_traj.return_periods_metrics.assert_called_once_with() + self.mock_static_traj.aai_per_group_metrics.assert_called_once_with() + + # Assert concatenation + expected_df = pd.concat( + [ + self.mock_static_traj.aai_metrics.return_value, + self.mock_static_traj.return_periods_metrics.return_value, + self.mock_static_traj.aai_per_group_metrics.return_value, + ] + ) + pd.testing.assert_frame_equal( + result.reset_index(drop=True), expected_df.reset_index(drop=True) + ) + + +if __name__ == "__main__": + TESTS = unittest.TestLoader().loadTestsFromTestCase(TestStaticRiskTrajectory) + unittest.TextTestRunner(verbosity=2).run(TESTS) From 6bf3416a313685804c7d95b9903f27b44673824e Mon Sep 17 00:00:00 2001 From: spjuhel Date: Thu, 18 Dec 2025 15:01:12 +0100 Subject: [PATCH 14/37] Initial data --- climada/test/common_test_fixtures.py | 210 +++++++++++++++++++++++++++ 1 file changed, 210 insertions(+) create mode 100644 climada/test/common_test_fixtures.py diff --git a/climada/test/common_test_fixtures.py b/climada/test/common_test_fixtures.py new file mode 100644 index 0000000000..5847b42e3e --- /dev/null +++ b/climada/test/common_test_fixtures.py @@ -0,0 +1,210 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . +--- + +A set of reusable objects for testing purpose. + +The objective of this file is to provide minimalistic, understandable and consistent +default objects for unit and integration testing. + +""" + +import copy +from unittest import TestCase + +import geopandas as gpd +import numpy as np +import pandas as pd +from scipy.sparse import csr_matrix +from shapely.geometry import Point + +from climada.entity import Exposures, ImpactFunc, ImpactFuncSet, ImpfTropCyclone +from climada.hazard import Centroids, Hazard +from climada.trajectories import InterpolatedRiskTrajectory, StaticRiskTrajectory +from climada.trajectories.snapshot import Snapshot + +# --------------------------------------------------------------------------- +# Coordinate system and metadata +# --------------------------------------------------------------------------- +CRS_WGS84 = "EPSG:4326" + +# --------------------------------------------------------------------------- +# Exposure attributes +# --------------------------------------------------------------------------- +EXP_DESC = "Test exposure dataset" +EXP_DESC_LATLON = "Test exposure dataset (lat/lon)" +EXPOSURE_REF_YEAR = 2020 +EXPOSURE_VALUE_UNIT = "USD" +VALUES = np.array([0, 1000, 2000, 3000]) +REGIONS = np.array(["A", "A", "B", "B"]) +CATEGORIES = np.array([1, 1, 2, 1]) + +# Exposure coordinates +EXP_LONS = np.array([4, 4.5, 4, 4.5]) +EXP_LATS = np.array([45, 45, 45.5, 45.5]) + +# --------------------------------------------------------------------------- +# Hazard definition +# --------------------------------------------------------------------------- +HAZARD_TYPE = "TEST_HAZARD_TYPE" +HAZARD_UNIT = "TEST_HAZARD_UNIT" + +# Hazard centroid positions +HAZ_JITTER = 0.1 # To test centroid matching +HAZ_LONS = EXP_LONS + HAZ_JITTER +HAZ_LATS = EXP_LATS + HAZ_JITTER + +# Hazard events +EVENT_IDS = np.array([1, 2, 3, 4]) +EVENT_NAMES = ["ev1", "ev2", "ev3", "ev4"] +DATES = np.array([1, 2, 3, 4]) + +# Frequency are choosen so that they cumulate nicely +# to correspond to 100, 50, and 20y return periods (for impacts) +FREQUENCY = np.array([0.1, 0.03, 0.01, 0.01]) +FREQUENCY_UNIT = "1/year" + +# Hazard maximum intensity +# 100 to match 0 to 100% idea +# also in line with linear 1:1 impact function +# for easy mental calculus +HAZARD_MAX_INTENSITY = 100 + +# --------------------------------------------------------------------------- +# Impact function +# --------------------------------------------------------------------------- +IMPF_ID = 1 +IMPF_NAME = "IMPF_1" + +# --------------------------------------------------------------------------- +# Future years +# --------------------------------------------------------------------------- +EXPOSURE_FUTURE_YEAR = 2040 + + +def reusable_minimal_exposures( + values=VALUES, + regions=REGIONS, + group_id=None, + lon=EXP_LONS, + lat=EXP_LATS, + crs=CRS_WGS84, + desc=EXP_DESC, + ref_year=EXPOSURE_REF_YEAR, + value_unit=EXPOSURE_VALUE_UNIT, + assign_impf=IMPF_ID, + increase_value_factor=1, +) -> Exposures: + data = gpd.GeoDataFrame( + { + "value": values * increase_value_factor, + "region_id": regions, + f"impf_{HAZARD_TYPE}": assign_impf, + "geometry": [Point(lon, lat) for lon, lat in zip(lon, lat)], + }, + crs=crs, + ) + if group_id is not None: + data["group_id"] = group_id + return Exposures( + data=data, + description=desc, + ref_year=ref_year, + value_unit=value_unit, + ) + + +def reusable_intensity_mat(max_intensity=HAZARD_MAX_INTENSITY): + # Choosen such that: + # - 1st event has 0 intensity + # - 2nd event has max intensity in first exposure point (defaulting to 0 value) + # - 3rd event has 1/2* of max intensity in second centroid + # - 4th event has 1/4* of max intensity everywhere + # *: So that you can double intensity of the hazard and expect double impacts + return csr_matrix( + [ + [0, 0, 0, 0], + [max_intensity, 0, 0, 0], + [0, max_intensity / 2, 0, 0], + [ + max_intensity / 4, + max_intensity / 4, + max_intensity / 4, + max_intensity / 4, + ], + ] + ) + + +def reusable_minimal_hazard( + haz_type=HAZARD_TYPE, + units=HAZARD_UNIT, + lat=HAZ_LATS, + lon=HAZ_LONS, + crs=CRS_WGS84, + event_id=EVENT_IDS, + event_name=EVENT_NAMES, + date=DATES, + frequency=FREQUENCY, + frequency_unit=FREQUENCY_UNIT, + intensity=None, + intensity_factor=1, +) -> Hazard: + intensity = reusable_intensity_mat() if intensity is None else intensity + intensity *= intensity_factor + return Hazard( + haz_type=haz_type, + units=units, + centroids=Centroids(lat=lat, lon=lon, crs=crs), + event_id=event_id, + event_name=event_name, + date=date, + frequency=frequency, + frequency_unit=frequency_unit, + intensity=intensity, + ) + + +def reusable_minimal_impfset( + hazard=None, name=IMPF_NAME, impf_id=IMPF_ID, max_intensity=HAZARD_MAX_INTENSITY +): + hazard = reusable_minimal_hazard() if hazard is None else hazard + return ImpactFuncSet( + [ + ImpactFunc( + haz_type=hazard.haz_type, + intensity_unit=hazard.units, + name=name, + intensity=np.array([0, max_intensity / 2, max_intensity]), + mdd=np.array([0, 0.5, 1]), + paa=np.array([1, 1, 1]), + id=impf_id, + ) + ] + ) + + +def reusable_snapshot( + hazard_intensity_increase_factor=1, + exposure_value_increase_factor=1, + date=EXPOSURE_REF_YEAR, +): + exposures = reusable_minimal_exposures( + increase_value_factor=exposure_value_increase_factor + ) + hazard = reusable_minimal_hazard(intensity_factor=hazard_intensity_increase_factor) + impfset = reusable_minimal_impfset() + return Snapshot(exposure=exposures, hazard=hazard, impfset=impfset, date=date) From 7ec7db12187894bec8f57696af583703f61f89ee Mon Sep 17 00:00:00 2001 From: spjuhel Date: Thu, 18 Dec 2025 15:04:12 +0100 Subject: [PATCH 15/37] cleanups test --- climada/test/test_trajectories.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/climada/test/test_trajectories.py b/climada/test/test_trajectories.py index bc47ff531e..5df15e2651 100644 --- a/climada/test/test_trajectories.py +++ b/climada/test/test_trajectories.py @@ -19,19 +19,14 @@ """ -import copy -from itertools import groupby from unittest import TestCase -import geopandas as gpd import numpy as np import pandas as pd from climada.engine.impact_calc import ImpactCalc from climada.entity.disc_rates.base import DiscRates -from climada.entity.impact_funcs.base import ImpactFunc -from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet -from climada.test.reusable import ( +from climada.test.common_test_fixtures import ( CATEGORIES, reusable_minimal_exposures, reusable_minimal_hazard, @@ -41,17 +36,11 @@ from climada.trajectories import StaticRiskTrajectory from climada.trajectories.constants import ( AAI_METRIC_NAME, - CONTRIBUTION_BASE_RISK_NAME, - CONTRIBUTION_EXPOSURE_NAME, - CONTRIBUTION_HAZARD_NAME, - CONTRIBUTION_INTERACTION_TERM_NAME, - CONTRIBUTION_VULNERABILITY_NAME, DATE_COL_NAME, GROUP_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME, NO_MEASURE_VALUE, - PERIOD_COL_NAME, RISK_COL_NAME, UNIT_COL_NAME, ) From 7349bc7392e9f4cc55bb5e0121d390266b5a5766 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Thu, 18 Dec 2025 15:04:40 +0100 Subject: [PATCH 16/37] cleansup --- climada/test/common_test_fixtures.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/climada/test/common_test_fixtures.py b/climada/test/common_test_fixtures.py index 5847b42e3e..64ed519fe4 100644 --- a/climada/test/common_test_fixtures.py +++ b/climada/test/common_test_fixtures.py @@ -22,18 +22,13 @@ """ -import copy -from unittest import TestCase - import geopandas as gpd import numpy as np -import pandas as pd from scipy.sparse import csr_matrix from shapely.geometry import Point -from climada.entity import Exposures, ImpactFunc, ImpactFuncSet, ImpfTropCyclone +from climada.entity import Exposures, ImpactFunc, ImpactFuncSet from climada.hazard import Centroids, Hazard -from climada.trajectories import InterpolatedRiskTrajectory, StaticRiskTrajectory from climada.trajectories.snapshot import Snapshot # --------------------------------------------------------------------------- From e12e01461b005550854f833ca2b29b64f2d57fdf Mon Sep 17 00:00:00 2001 From: spjuhel Date: Fri, 19 Dec 2025 15:23:16 +0100 Subject: [PATCH 17/37] Adds option to have references instead of deep copies of members --- climada/trajectories/snapshot.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/climada/trajectories/snapshot.py b/climada/trajectories/snapshot.py index d8c78c0c20..cc4a26f871 100644 --- a/climada/trajectories/snapshot.py +++ b/climada/trajectories/snapshot.py @@ -52,6 +52,9 @@ class Snapshot: The date of the Snapshot, it can be an integer representing a year, a datetime object or a string representation of a datetime object with format "YYYY-MM-DD". + ref_only : bool, default False + Should the `Snapshot` contain deep copies of the Exposures, Hazard and Impfset (False) + or references only (True). Attributes ---------- @@ -80,10 +83,11 @@ def __init__( hazard: Hazard, impfset: ImpactFuncSet, date: int | datetime.date | str, + ref_only: bool = False, ) -> None: - self._exposure = copy.deepcopy(exposure) - self._hazard = copy.deepcopy(hazard) - self._impfset = copy.deepcopy(impfset) + self._exposure = exposure if ref_only else copy.deepcopy(exposure) + self._hazard = hazard if ref_only else copy.deepcopy(hazard) + self._impfset = impfset if ref_only else copy.deepcopy(impfset) self._measure = None self._date = self._convert_to_date(date) From b4f05e1f5986248551d29f121098b664157988ad Mon Sep 17 00:00:00 2001 From: spjuhel Date: Mon, 5 Jan 2026 14:24:46 +0100 Subject: [PATCH 18/37] Pylint fix --- climada/trajectories/snapshot.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/climada/trajectories/snapshot.py b/climada/trajectories/snapshot.py index cc4a26f871..ae844305fb 100644 --- a/climada/trajectories/snapshot.py +++ b/climada/trajectories/snapshot.py @@ -27,8 +27,6 @@ import datetime import logging -import pandas as pd - from climada.entity.exposures import Exposures from climada.entity.impact_funcs import ImpactFuncSet from climada.entity.measures.base import Measure From 4a8c770fe0f1ad0d7832566d5dd4f911556551da Mon Sep 17 00:00:00 2001 From: spjuhel Date: Mon, 5 Jan 2026 15:07:58 +0100 Subject: [PATCH 19/37] Fixes pylint --- climada/trajectories/snapshot.py | 71 +++++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 10 deletions(-) diff --git a/climada/trajectories/snapshot.py b/climada/trajectories/snapshot.py index ae844305fb..24a90ca0e1 100644 --- a/climada/trajectories/snapshot.py +++ b/climada/trajectories/snapshot.py @@ -80,15 +80,65 @@ def __init__( exposure: Exposures, hazard: Hazard, impfset: ImpactFuncSet, + measure: Measure | None, date: int | datetime.date | str, ref_only: bool = False, ) -> None: self._exposure = exposure if ref_only else copy.deepcopy(exposure) self._hazard = hazard if ref_only else copy.deepcopy(hazard) self._impfset = impfset if ref_only else copy.deepcopy(impfset) - self._measure = None + self._measure = measure if ref_only else copy.deepcopy(impfset) self._date = self._convert_to_date(date) + @classmethod + def from_triplet( + cls, + *, + exposure: Exposures, + hazard: Hazard, + impfset: ImpactFuncSet, + date: int | datetime.date | str, + ref_only: bool = False, + ) -> "Snapshot": + """Create a Snapshot from exposure, hazard and impact functions set + + This method is the main point of entry for the creation of Snapshot. It + creates a new Snapshot object for the given date with copies of the + hazard, exposure and impact function set given in argument (or + references if ref_only is True) + + Parameters + ---------- + exposure : Exposures + hazard : Hazard + impfset : ImpactFuncSet + date : int | datetime.date | str + ref_only : bool + If true, uses references to the exposure, hazard and impact + function objects. Note that modifying the original objects after + computations using the Snapshot might lead to inconsistencies in + results. + + Returns + ------- + Snapshot + + Notes + ----- + + To create a Snapshot with a measure, first create the Snapshot without + the measure using this method, and use `apply_measure(measure)` afterward. + + """ + return cls( + exposure=exposure, + hazard=hazard, + impfset=impfset, + measure=None, + date=date, + ref_only=ref_only, + ) + @property def exposure(self) -> Exposures: """Exposure data for the snapshot.""" @@ -129,17 +179,17 @@ def _convert_to_date(date_arg) -> datetime.date: if isinstance(date_arg, int): # Assume the integer represents a year return datetime.date(date_arg, 1, 1) - elif isinstance(date_arg, str): + if isinstance(date_arg, str): # Try to parse the string as a date try: return datetime.datetime.strptime(date_arg, "%Y-%m-%d").date() - except ValueError: - raise ValueError("String must be in the format 'YYYY-MM-DD'") - elif isinstance(date_arg, datetime.date): + except ValueError as exc: + raise ValueError("String must be in the format 'YYYY-MM-DD'") from exc + if isinstance(date_arg, datetime.date): # Already a date object return date_arg - else: - raise TypeError("date_arg must be an int, str, or datetime.date") + + raise TypeError("date_arg must be an int, str, or datetime.date") def apply_measure(self, measure: Measure) -> "Snapshot": """Create a new snapshot by applying a Measure object. @@ -158,8 +208,9 @@ def apply_measure(self, measure: Measure) -> "Snapshot": """ - LOGGER.debug(f"Applying measure {measure.name} on snapshot {id(self)}") + LOGGER.debug("Applying measure %s on snapshot %s", measure.name, id(self)) exp, impfset, haz = measure.apply(self.exposure, self.impfset, self.hazard) - snap = Snapshot(exposure=exp, hazard=haz, impfset=impfset, date=self.date) - snap._measure = measure + snap = Snapshot( + exposure=exp, hazard=haz, impfset=impfset, date=self.date, measure=measure + ) return snap From 87332be211ccf32dc660831610c64276d98b05c3 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Mon, 5 Jan 2026 15:22:09 +0100 Subject: [PATCH 20/37] Complies with pylint --- climada/trajectories/impact_calc_strat.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/climada/trajectories/impact_calc_strat.py b/climada/trajectories/impact_calc_strat.py index a58aceeab2..75cf08f545 100644 --- a/climada/trajectories/impact_calc_strat.py +++ b/climada/trajectories/impact_calc_strat.py @@ -32,6 +32,10 @@ __all__ = ["ImpactCalcComputation"] +# The following is acceptable. +# We design a pattern, and currently it requires only to +# define the compute_impacts method. +# pylint: disable=too-few-public-methods class ImpactComputationStrategy(ABC): """ Interface for impact computation strategies. @@ -73,7 +77,6 @@ def compute_impacts( -------- ImpactCalcComputation : The default implementation of this interface. """ - ... class ImpactCalcComputation(ImpactComputationStrategy): From e01c330e42bb336f2b28a590e875c06e002e7b41 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Mon, 5 Jan 2026 16:02:53 +0100 Subject: [PATCH 21/37] Pylint compliance --- climada/trajectories/calc_risk_metrics.py | 68 +++++++++++++++++------ climada/trajectories/constants.py | 2 +- climada/trajectories/static_trajectory.py | 24 ++++---- climada/trajectories/trajectory.py | 59 +++++++++++--------- 4 files changed, 98 insertions(+), 55 deletions(-) diff --git a/climada/trajectories/calc_risk_metrics.py b/climada/trajectories/calc_risk_metrics.py index e7ba88c143..674b3eb599 100644 --- a/climada/trajectories/calc_risk_metrics.py +++ b/climada/trajectories/calc_risk_metrics.py @@ -60,11 +60,31 @@ def lazy_property(method): - # This function is used as a decorator for properties - # that require "heavy" computation and are not always needed. - # When requested, if a property is none, it uses the corresponding - # computation method and caches the result in the corresponding - # private attribute + """ + Decorator that converts a method into a cached, lazy-evaluated property. + + This decorator is intended for properties that require heavy computation. + The result is calculated only when first accessed and then stored in a + corresponding private attribute (e.g., a method named `impact` will + cache its result in `_impact`). + + Parameters + ---------- + method : callable + The method to be converted into a lazy property. + + Returns + ------- + property + A property object that handles the caching logic and attribute access. + + Notes + ----- + The caching behavior can be globally toggled via the + `_CACHE_SETTINGS["ENABLE_LAZY_CACHE"]` flag. If disabled, the + method will be re-evaluated on every access. + + """ attr_name = f"_{method.__name__}" @property @@ -137,13 +157,16 @@ def __init__( ] ) ) - except ValueError as e: - error_message = str(e).lower() + except ValueError as exc: + error_message = str(exc).lower() if "need at least one array to concatenate" in error_message: self._group_id = np.array([]) def _reset_impact_data(self): - """Util method that resets computed data, for instance when changing the computation strategy.""" + """Util method that resets computed data, for instance when + changing the computation strategy. + + """ self._impacts = None self._eai_gdf = None self._per_date_eai = None @@ -151,7 +174,10 @@ def _reset_impact_data(self): @property def impact_computation_strategy(self) -> ImpactComputationStrategy: - """The method used to calculate the impact from the (Haz,Exp,Vul) of the snapshots.""" + """The method used to calculate the impact from the (Haz,Exp,Vul) + of the snapshots. + + """ return self._impact_computation_strategy @impact_computation_strategy.setter @@ -186,18 +212,22 @@ def per_date_aai(self) -> np.ndarray: return np.array([imp.aai_agg for imp in self.impacts]) def calc_eai_gdf(self) -> pd.DataFrame: - """Convenience function returning a DataFrame (with both datetime and coordinates) from `per_date_eai`. + """Convenience function returning a DataFrame + from `per_date_eai`. - This can easily be merged with the GeoDataFrame of the exposure object of one of the `Snapshot`. + This can easily be merged with the GeoDataFrame of + the exposure object of one of the `Snapshot`. Notes ----- - The DataFrame from the first snapshot of the list is used as a basis (notably for `value` and `group_id`). + The DataFrame from the first snapshot of the list is used + as a basis (notably for `value` and `group_id`). + """ - df = pd.DataFrame(self.per_date_eai, index=self._date_idx) - df = df.reset_index().melt( + metric_df = pd.DataFrame(self.per_date_eai, index=self._date_idx) + metric_df = metric_df.reset_index().melt( id_vars=DATE_COL_NAME, var_name=COORD_ID_COL_NAME, value_name=RISK_COL_NAME ) eai_gdf = pd.concat( @@ -214,7 +244,7 @@ def calc_eai_gdf(self) -> pd.DataFrame: eai_gdf[[GROUP_ID_COL_NAME]] = pd.NA eai_gdf = eai_gdf[[DATE_COL_NAME, COORD_ID_COL_NAME, GROUP_ID_COL_NAME]] - eai_gdf = eai_gdf.merge(df, on=[DATE_COL_NAME, COORD_ID_COL_NAME]) + eai_gdf = eai_gdf.merge(metric_df, on=[DATE_COL_NAME, COORD_ID_COL_NAME]) eai_gdf = eai_gdf.rename(columns={GROUP_ID_COL_NAME: GROUP_COL_NAME}) eai_gdf[GROUP_COL_NAME] = pd.Categorical( eai_gdf[GROUP_COL_NAME], categories=self._group_id @@ -244,7 +274,10 @@ def calc_aai_metric(self) -> pd.DataFrame: return aai_df def calc_aai_per_group_metric(self) -> pd.DataFrame | None: - """Compute a DataFrame of the AAI distinguised per group id in the exposures, for each snapshot.""" + """Compute a DataFrame of the AAI distinguised per group id + in the exposures, for each snapshot. + + """ if len(self._group_id) < 1: LOGGER.warning( @@ -266,7 +299,8 @@ def calc_aai_per_group_metric(self) -> pd.DataFrame | None: return aai_per_group_df def calc_return_periods_metric(self, return_periods: list[int]) -> pd.DataFrame: - """Compute a DataFrame of the estimated impacts for a list of return periods, for each snapshot. + """Compute a DataFrame of the estimated impacts for a list + of return periods, for each snapshot. Parameters ---------- diff --git a/climada/trajectories/constants.py b/climada/trajectories/constants.py index c315f17761..969e585531 100644 --- a/climada/trajectories/constants.py +++ b/climada/trajectories/constants.py @@ -33,7 +33,7 @@ DEFAULT_PERIOD_INDEX_NAME = "date" -DEFAULT_RP = [20, 50, 100] +DEFAULT_RP = (20, 50, 100) """Default return periods to use when computing return period impact estimates.""" DEFAULT_ALLGROUP_NAME = "All" diff --git a/climada/trajectories/static_trajectory.py b/climada/trajectories/static_trajectory.py index 281887f347..42a9e8b84a 100644 --- a/climada/trajectories/static_trajectory.py +++ b/climada/trajectories/static_trajectory.py @@ -22,6 +22,7 @@ """ import logging +from typing import Iterable import pandas as pd @@ -37,8 +38,6 @@ MEASURE_COL_NAME, METRIC_COL_NAME, RETURN_PERIOD_METRIC_NAME, - RISK_COL_NAME, - RP_VALUE_PREFIX, ) from climada.trajectories.impact_calc_strat import ( ImpactCalcComputation, @@ -79,10 +78,14 @@ class StaticRiskTrajectory(RiskTrajectory): Currently: - - eai, expected impact (per exposure point within a period of 1/frequency unit of the hazard object) + - eai, expected impact (per exposure point within a period of 1/frequency + unit of the hazard object) - aai, average annual impact (aggregated eai over the whole exposure) - - aai_per_group, average annual impact per exposure subgroup (defined from the exposure geodataframe) - - return_periods, estimated impacts aggregated over the whole exposure for different return periods + - aai_per_group, average annual impact per exposure subgroup (defined from + the exposure geodataframe) + - return_periods, estimated impacts aggregated over the whole exposure for + different return periods + """ _DEFAULT_ALL_METRICS = [ @@ -93,9 +96,9 @@ class StaticRiskTrajectory(RiskTrajectory): def __init__( self, - snapshots_list: list[Snapshot], + snapshots_list: Iterable[Snapshot], *, - return_periods: list[int] = DEFAULT_RP, + return_periods: Iterable[int] = DEFAULT_RP, all_groups_name: str = DEFAULT_ALLGROUP_NAME, risk_disc_rates: DiscRates | None = None, impact_computation_strategy: ImpactComputationStrategy | None = None, @@ -146,6 +149,7 @@ def impact_computation_strategy(self, value, /): def _generic_metrics( self, + /, metric_name: str | None = None, metric_meth: str | None = None, **kwargs, @@ -195,7 +199,7 @@ def _generic_metrics( attr_name = f"_{metric_name}_metrics" if getattr(self, attr_name) is not None: - LOGGER.debug(f"Returning cached {attr_name}") + LOGGER.debug("Returning cached %s", attr_name) return getattr(self, attr_name) with log_level(level="WARNING", name_prefix="climada"): @@ -240,10 +244,10 @@ def eai_metrics(self, **kwargs) -> pd.DataFrame: This computation may become quite expensive for big areas with high resolution. """ - df = self._compute_metrics( + metric_df = self._compute_metrics( metric_name=EAI_METRIC_NAME, metric_meth="calc_eai_gdf", **kwargs ) - return df + return metric_df def aai_metrics(self, **kwargs) -> pd.DataFrame: """Return the average annual impacts for each date. diff --git a/climada/trajectories/trajectory.py b/climada/trajectories/trajectory.py index 5675521710..06088b3eca 100644 --- a/climada/trajectories/trajectory.py +++ b/climada/trajectories/trajectory.py @@ -23,7 +23,7 @@ import datetime import logging -from abc import ABC +from abc import ABC, abstractmethod import pandas as pd @@ -57,6 +57,13 @@ class RiskTrajectory(ABC): + """Base abstract class for risk trajectory objects. + + See concrete implementation :class:`StaticRiskTrajectory` and + :class:`InterpolatedRiskTrajectory` for more details. + + """ + _grouper = [MEASURE_COL_NAME, METRIC_COL_NAME] """Results dataframe grouper used in most `groupby()` calls.""" @@ -71,19 +78,12 @@ def __init__( all_groups_name: str = DEFAULT_ALLGROUP_NAME, risk_disc_rates: DiscRates | None = None, ): - """Base abstract class for risk trajectory objects. - - See concrete implementation :class:`StaticRiskTrajectory` and - :class:`InterpolatedRiskTrajectory` for more details. - - """ - self._reset_metrics() self._snapshots = sorted(snapshots_list, key=lambda snap: snap.date) self._all_groups_name = all_groups_name self._return_periods = return_periods - self.start_date = min([snapshot.date for snapshot in snapshots_list]) - self.end_date = max([snapshot.date for snapshot in snapshots_list]) + self.start_date = min((snapshot.date for snapshot in snapshots_list)) + self.end_date = max((snapshot.date for snapshot in snapshots_list)) self._risk_disc_rates = risk_disc_rates def _reset_metrics(self) -> None: @@ -100,6 +100,7 @@ def _reset_metrics(self) -> None: for metric in self.POSSIBLE_METRICS: setattr(self, "_" + metric + "_metrics", None) + @abstractmethod def _generic_metrics( self, /, metric_name: str, metric_meth: str, **kwargs ) -> pd.DataFrame: @@ -115,7 +116,9 @@ def _generic_metrics( - :method:`_compute_metrics` """ - ... + raise NotImplementedError( + f"'_generic_metrics' must be implemented by subclasses of {self.__class__.__name__}" + ) def _compute_metrics( self, /, metric_name: str, metric_meth: str, **kwargs @@ -175,13 +178,13 @@ def risk_disc_rates(self, value, /): @classmethod def npv_transform( - cls, df: pd.DataFrame, risk_disc_rates: DiscRates + cls, metric_df: pd.DataFrame, risk_disc_rates: DiscRates ) -> pd.DataFrame: """Apply provided discount rate to the provided metric `DataFrame`. Parameters ---------- - df : pd.DataFrame + metric_df : pd.DataFrame The `DataFrame` of the metric to discount. risk_disc_rates : DiscRate The discount rate to apply. @@ -197,20 +200,20 @@ def _npv_group(group, disc): start_date = group.index.get_level_values(DATE_COL_NAME).min() return cls._calc_npv_cash_flows(group, start_date, disc) - df = df.set_index(DATE_COL_NAME) + metric_df = metric_df.set_index(DATE_COL_NAME) grouper = cls._grouper - if GROUP_COL_NAME in df.columns: + if GROUP_COL_NAME in metric_df.columns: grouper = [GROUP_COL_NAME] + grouper - df[RISK_COL_NAME] = df.groupby( + metric_df[RISK_COL_NAME] = metric_df.groupby( grouper, dropna=False, as_index=False, group_keys=False, observed=True, )[RISK_COL_NAME].transform(_npv_group, risk_disc_rates) - df = df.reset_index() - return df + metric_df = metric_df.reset_index() + return metric_df @staticmethod def _calc_npv_cash_flows( @@ -248,21 +251,23 @@ def _calc_npv_cash_flows( "cash_flows must be a pandas Series with a PeriodIndex or DatetimeIndex" ) - df = cash_flows.to_frame(name="cash_flow") # type: ignore - df["year"] = df.index.year + metric_df = cash_flows.to_frame(name="cash_flow") # type: ignore + metric_df["year"] = metric_df.index.year # Merge with the discount rates based on the year - tmp = df.merge( + tmp = metric_df.merge( pd.DataFrame({"year": disc_rates.years, "rate": disc_rates.rates}), on="year", how="left", ) - tmp.index = df.index - df = tmp.copy() - df["discount_factor"] = (1 / (1 + df["rate"])) ** ( - df.index.year - start_date.year + tmp.index = metric_df.index + metric_df = tmp.copy() + metric_df["discount_factor"] = (1 / (1 + metric_df["rate"])) ** ( + metric_df.index.year - start_date.year ) # Apply the discount factors to the cash flows - df["npv_cash_flow"] = df["cash_flow"] * df["discount_factor"] - return df["npv_cash_flow"] + metric_df["npv_cash_flow"] = ( + metric_df["cash_flow"] * metric_df["discount_factor"] + ) + return metric_df["npv_cash_flow"] From 1ec88bf5b4779687402a41fc6af00db24d60e852 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Mon, 5 Jan 2026 16:04:03 +0100 Subject: [PATCH 22/37] Fixes type hints --- climada/trajectories/trajectory.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/climada/trajectories/trajectory.py b/climada/trajectories/trajectory.py index 06088b3eca..75c30b9aa1 100644 --- a/climada/trajectories/trajectory.py +++ b/climada/trajectories/trajectory.py @@ -24,6 +24,7 @@ import datetime import logging from abc import ABC, abstractmethod +from typing import Iterable import pandas as pd @@ -72,9 +73,9 @@ class RiskTrajectory(ABC): def __init__( self, - snapshots_list: list[Snapshot], + snapshots_list: Iterable[Snapshot], *, - return_periods: list[int] = DEFAULT_RP, + return_periods: Iterable[int] = DEFAULT_RP, all_groups_name: str = DEFAULT_ALLGROUP_NAME, risk_disc_rates: DiscRates | None = None, ): @@ -137,7 +138,7 @@ def _compute_metrics( ) @property - def return_periods(self) -> list[int]: + def return_periods(self) -> Iterable[int]: """The return period values to use when computing risk period metrics. Notes From ad2e77450b1faea09cad53eb36941fe61eb28f1a Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 10:29:19 +0100 Subject: [PATCH 23/37] ref only for apply measure --- climada/trajectories/snapshot.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/climada/trajectories/snapshot.py b/climada/trajectories/snapshot.py index 24a90ca0e1..8d9a74ef6f 100644 --- a/climada/trajectories/snapshot.py +++ b/climada/trajectories/snapshot.py @@ -191,7 +191,7 @@ def _convert_to_date(date_arg) -> datetime.date: raise TypeError("date_arg must be an int, str, or datetime.date") - def apply_measure(self, measure: Measure) -> "Snapshot": + def apply_measure(self, measure: Measure, ref_only: bool = False) -> "Snapshot": """Create a new snapshot by applying a Measure object. This method creates a new `Snapshot` object by applying a measure on @@ -211,6 +211,11 @@ def apply_measure(self, measure: Measure) -> "Snapshot": LOGGER.debug("Applying measure %s on snapshot %s", measure.name, id(self)) exp, impfset, haz = measure.apply(self.exposure, self.impfset, self.hazard) snap = Snapshot( - exposure=exp, hazard=haz, impfset=impfset, date=self.date, measure=measure + exposure=exp, + hazard=haz, + impfset=impfset, + date=self.date, + measure=measure, + ref_only=ref_only, ) return snap From 25fbcdaed93965ea0d8c44d5fc6bc21be4af3180 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 10:35:22 +0100 Subject: [PATCH 24/37] adds API references rst files --- doc/api/climada/climada.rst | 1 + doc/api/climada/climada.trajectories.rst | 7 +++++++ doc/api/climada/climada.trajectories.snapshot.rst | 7 +++++++ 3 files changed, 15 insertions(+) create mode 100644 doc/api/climada/climada.trajectories.rst create mode 100644 doc/api/climada/climada.trajectories.snapshot.rst diff --git a/doc/api/climada/climada.rst b/doc/api/climada/climada.rst index 557532912f..2e8d053946 100644 --- a/doc/api/climada/climada.rst +++ b/doc/api/climada/climada.rst @@ -7,4 +7,5 @@ Software documentation per package climada.engine climada.entity climada.hazard + climada.trajectories climada.util diff --git a/doc/api/climada/climada.trajectories.rst b/doc/api/climada/climada.trajectories.rst new file mode 100644 index 0000000000..28c035e20e --- /dev/null +++ b/doc/api/climada/climada.trajectories.rst @@ -0,0 +1,7 @@ + +climada\.trajectories module +============================ + +.. toctree:: + + climada.trajectories.snapshot diff --git a/doc/api/climada/climada.trajectories.snapshot.rst b/doc/api/climada/climada.trajectories.snapshot.rst new file mode 100644 index 0000000000..ba0faf57ac --- /dev/null +++ b/doc/api/climada/climada.trajectories.snapshot.rst @@ -0,0 +1,7 @@ +climada\.trajectories\.snapshot module +---------------------------------------- + +.. automodule:: climada.trajectories.snapshot + :members: + :undoc-members: + :show-inheritance: From cf40e8f3e4d085ab354af90b4b2e04d6d59b8e9b Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 10:41:10 +0100 Subject: [PATCH 25/37] API references files --- doc/api/climada/climada.trajectories.impact_calc_strat.rst | 7 +++++++ doc/api/climada/climada.trajectories.rst | 1 + 2 files changed, 8 insertions(+) create mode 100644 doc/api/climada/climada.trajectories.impact_calc_strat.rst diff --git a/doc/api/climada/climada.trajectories.impact_calc_strat.rst b/doc/api/climada/climada.trajectories.impact_calc_strat.rst new file mode 100644 index 0000000000..1bf211b4c0 --- /dev/null +++ b/doc/api/climada/climada.trajectories.impact_calc_strat.rst @@ -0,0 +1,7 @@ +climada\.trajectories\.impact_calc_strat module +---------------------------------------- + +.. automodule:: climada.trajectories.impact_calc_strat + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/climada/climada.trajectories.rst b/doc/api/climada/climada.trajectories.rst index 28c035e20e..883078074f 100644 --- a/doc/api/climada/climada.trajectories.rst +++ b/doc/api/climada/climada.trajectories.rst @@ -5,3 +5,4 @@ climada\.trajectories module .. toctree:: climada.trajectories.snapshot + climada.trajectories.impact_calc_strat From 183783f2b432d8f6372f724bc72e8e61eabb3555 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 10:48:29 +0100 Subject: [PATCH 26/37] API references files --- doc/api/climada/climada.trajectories.rst | 1 + .../climada/climada.trajectories.trajectories.rst | 15 +++++++++++++++ 2 files changed, 16 insertions(+) create mode 100644 doc/api/climada/climada.trajectories.trajectories.rst diff --git a/doc/api/climada/climada.trajectories.rst b/doc/api/climada/climada.trajectories.rst index 883078074f..67b37809a6 100644 --- a/doc/api/climada/climada.trajectories.rst +++ b/doc/api/climada/climada.trajectories.rst @@ -5,4 +5,5 @@ climada\.trajectories module .. toctree:: climada.trajectories.snapshot + climada.trajectories.trajectories climada.trajectories.impact_calc_strat diff --git a/doc/api/climada/climada.trajectories.trajectories.rst b/doc/api/climada/climada.trajectories.trajectories.rst new file mode 100644 index 0000000000..4abe8acb15 --- /dev/null +++ b/doc/api/climada/climada.trajectories.trajectories.rst @@ -0,0 +1,15 @@ +climada\.trajectories\.static_trajectory module +---------------------------------------- + +.. automodule:: climada.trajectories.static_trajectory + :members: + :undoc-members: + :show-inheritance: + +climada\.trajectories\.trajectory module +---------------------------------------- + +.. automodule:: climada.trajectories.trajectory + :members: + :undoc-members: + :show-inheritance: From b8ef41a42991767ea2fa50f6c9b90037dcd1f8f8 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 10:59:20 +0100 Subject: [PATCH 27/37] On The Dangers of Copy Pasting (Juhel 2026) --- climada/trajectories/snapshot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/climada/trajectories/snapshot.py b/climada/trajectories/snapshot.py index 8d9a74ef6f..05d948793f 100644 --- a/climada/trajectories/snapshot.py +++ b/climada/trajectories/snapshot.py @@ -87,7 +87,7 @@ def __init__( self._exposure = exposure if ref_only else copy.deepcopy(exposure) self._hazard = hazard if ref_only else copy.deepcopy(hazard) self._impfset = impfset if ref_only else copy.deepcopy(impfset) - self._measure = measure if ref_only else copy.deepcopy(impfset) + self._measure = measure if ref_only else copy.deepcopy(measure) self._date = self._convert_to_date(date) @classmethod From 30e2d0efe49716d32f04b11e30b514210c44c889 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 11:29:20 +0100 Subject: [PATCH 28/37] Adds warnings for direct __init__ call --- climada/trajectories/snapshot.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/climada/trajectories/snapshot.py b/climada/trajectories/snapshot.py index 05d948793f..233cc15696 100644 --- a/climada/trajectories/snapshot.py +++ b/climada/trajectories/snapshot.py @@ -26,6 +26,7 @@ import copy import datetime import logging +import warnings from climada.entity.exposures import Exposures from climada.entity.impact_funcs import ImpactFuncSet @@ -83,7 +84,15 @@ def __init__( measure: Measure | None, date: int | datetime.date | str, ref_only: bool = False, + _from_factory: bool = False, ) -> None: + if not _from_factory: + warnings.warn( + "Direct instantiation of 'Snapshot' is discouraged. " + "Use 'Snapshot.from_triplet()' instead.", + UserWarning, + stacklevel=2, + ) self._exposure = exposure if ref_only else copy.deepcopy(exposure) self._hazard = hazard if ref_only else copy.deepcopy(hazard) self._impfset = impfset if ref_only else copy.deepcopy(impfset) @@ -137,6 +146,7 @@ def from_triplet( measure=None, date=date, ref_only=ref_only, + _from_factory=True, ) @property @@ -191,7 +201,7 @@ def _convert_to_date(date_arg) -> datetime.date: raise TypeError("date_arg must be an int, str, or datetime.date") - def apply_measure(self, measure: Measure, ref_only: bool = False) -> "Snapshot": + def apply_measure(self, measure: Measure) -> "Snapshot": """Create a new snapshot by applying a Measure object. This method creates a new `Snapshot` object by applying a measure on @@ -216,6 +226,7 @@ def apply_measure(self, measure: Measure, ref_only: bool = False) -> "Snapshot": impfset=impfset, date=self.date, measure=measure, - ref_only=ref_only, + ref_only=False, + _from_factory=True, ) return snap From ffbf31eca1086d8a0320a30ec09b3f7e33666561 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 11:31:13 +0100 Subject: [PATCH 29/37] Shifts tests to pytest format (and updates them) --- climada/trajectories/test/test_snapshot.py | 247 ++++++++++++--------- 1 file changed, 141 insertions(+), 106 deletions(-) diff --git a/climada/trajectories/test/test_snapshot.py b/climada/trajectories/test/test_snapshot.py index 4e3b465d8e..e3c2eb0e9c 100644 --- a/climada/trajectories/test/test_snapshot.py +++ b/climada/trajectories/test/test_snapshot.py @@ -1,9 +1,9 @@ import datetime -import unittest from unittest.mock import MagicMock import numpy as np import pandas as pd +import pytest from climada.entity.exposures import Exposures from climada.entity.impact_funcs import ImpactFunc, ImpactFuncSet @@ -12,121 +12,156 @@ from climada.trajectories.snapshot import Snapshot from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5 +# --- Fixtures --- + + +@pytest.fixture(scope="module") +def shared_data(): + """Load heavy HDF5 data once per module to speed up tests.""" + exposure = Exposures.from_hdf5(EXP_DEMO_H5) + hazard = Hazard.from_hdf5(HAZ_DEMO_H5) + impfset = ImpactFuncSet( + [ + ImpactFunc( + "TC", + 3, + intensity=np.array([0, 20]), + mdd=np.array([0, 0.5]), + paa=np.array([0, 1]), + ) + ] + ) + return exposure, hazard, impfset -class TestSnapshot(unittest.TestCase): - - def setUp(self): - # Create mock objects for testing - self.mock_exposure = Exposures.from_hdf5(EXP_DEMO_H5) - self.mock_hazard = Hazard.from_hdf5(HAZ_DEMO_H5) - self.mock_impfset = ImpactFuncSet( - [ - ImpactFunc( - "TC", - 3, - intensity=np.array([0, 20]), - mdd=np.array([0, 0.5]), - paa=np.array([0, 1]), - ) - ] - ) - self.mock_measure = MagicMock(spec=Measure) - self.mock_measure.name = "Test Measure" - - # Setup mock return values for measure.apply - self.mock_modified_exposure = MagicMock(spec=Exposures) - self.mock_modified_hazard = MagicMock(spec=Hazard) - self.mock_modified_impfset = MagicMock(spec=ImpactFuncSet) - self.mock_measure.apply.return_value = ( - self.mock_modified_exposure, - self.mock_modified_impfset, - self.mock_modified_hazard, - ) - def test_init_with_int_date(self): - snapshot = Snapshot( - exposure=self.mock_exposure, - hazard=self.mock_hazard, - impfset=self.mock_impfset, - date=2023, - ) - self.assertEqual(snapshot.date, datetime.date(2023, 1, 1)) - - def test_init_with_str_date(self): - snapshot = Snapshot( - exposure=self.mock_exposure, - hazard=self.mock_hazard, - impfset=self.mock_impfset, - date="2023-01-01", - ) - self.assertEqual(snapshot.date, datetime.date(2023, 1, 1)) - - def test_init_with_date_object(self): - date_obj = datetime.date(2023, 1, 1) - snapshot = Snapshot( - exposure=self.mock_exposure, - hazard=self.mock_hazard, - impfset=self.mock_impfset, - date=date_obj, - ) - self.assertEqual(snapshot.date, date_obj) - - def test_init_with_invalid_date(self): - with self.assertRaises(ValueError): - Snapshot( - exposure=self.mock_exposure, - hazard=self.mock_hazard, - impfset=self.mock_impfset, - date="invalid-date", - ) +@pytest.fixture +def mock_context(shared_data): + """Provides the exposure/hazard/impfset and a pre-configured mock measure.""" + exp, haz, impf = shared_data - def test_init_with_invalid_type(self): - with self.assertRaises(TypeError): - Snapshot( - exposure=self.mock_exposure, - hazard=self.mock_hazard, - impfset=self.mock_impfset, - date=2023.5, # type: ignore - ) + # Setup Mock Measure + mock_measure = MagicMock(spec=Measure) + mock_measure.name = "Test Measure" - def test_properties(self): - snapshot = Snapshot( - exposure=self.mock_exposure, - hazard=self.mock_hazard, - impfset=self.mock_impfset, - date=2023, - ) + modified_exp = MagicMock(spec=Exposures) + modified_haz = MagicMock(spec=Hazard) + modified_imp = MagicMock(spec=ImpactFuncSet) - # We want a new reference - self.assertIsNot(snapshot.exposure, self.mock_exposure) - self.assertIsNot(snapshot.hazard, self.mock_hazard) - self.assertIsNot(snapshot.impfset, self.mock_impfset) + mock_measure.apply.return_value = (modified_exp, modified_imp, modified_haz) - # But we want equality - pd.testing.assert_frame_equal(snapshot.exposure.gdf, self.mock_exposure.gdf) + return { + "exp": exp, + "haz": haz, + "imp": impf, + "measure": mock_measure, + "mod_exp": modified_exp, + "mod_haz": modified_haz, + "mod_imp": modified_imp, + } - self.assertEqual(snapshot.hazard.haz_type, self.mock_hazard.haz_type) - self.assertEqual(snapshot.hazard.intensity.nnz, self.mock_hazard.intensity.nnz) - self.assertEqual(snapshot.hazard.size, self.mock_hazard.size) - self.assertEqual(snapshot.impfset, self.mock_impfset) +# --- Tests --- - def test_apply_measure(self): - snapshot = Snapshot( - exposure=self.mock_exposure, - hazard=self.mock_hazard, - impfset=self.mock_impfset, - date=2023, + +def test_not_from_factory_warning(mock_context): + """Test that direct __init__ call raises a warning""" + with pytest.warns(UserWarning): + Snapshot( + exposure=mock_context["exp"], + hazard=mock_context["haz"], + impfset=mock_context["imp"], + measure=None, + date=2001, ) - new_snapshot = snapshot.apply_measure(self.mock_measure) - self.assertIsNotNone(new_snapshot.measure) - self.assertEqual(new_snapshot.measure.name, "Test Measure") # type: ignore - self.assertEqual(new_snapshot.exposure, self.mock_modified_exposure) - self.assertEqual(new_snapshot.hazard, self.mock_modified_hazard) - self.assertEqual(new_snapshot.impfset, self.mock_modified_impfset) + +@pytest.mark.parametrize( + "input_date,expected", + [ + (2023, datetime.date(2023, 1, 1)), + ("2023-01-01", datetime.date(2023, 1, 1)), + (datetime.date(2023, 1, 1), datetime.date(2023, 1, 1)), + ], +) +def test_init_valid_dates(mock_context, input_date, expected): + """Test various valid date input formats using parametrization.""" + snapshot = Snapshot.from_triplet( + exposure=mock_context["exp"], + hazard=mock_context["haz"], + impfset=mock_context["imp"], + date=input_date, + ) + assert snapshot.date == expected + + +def test_init_invalid_date_format(mock_context): + with pytest.raises(ValueError): + Snapshot.from_triplet( + exposure=mock_context["exp"], + hazard=mock_context["haz"], + impfset=mock_context["imp"], + date="invalid-date", + ) -if __name__ == "__main__": - TESTS = unittest.TestLoader().loadTestsFromTestCase(TestSnapshot) - unittest.TextTestRunner(verbosity=2).run(TESTS) +def test_init_invalid_date_type(mock_context): + with pytest.raises(TypeError): + Snapshot.from_triplet(exposure=mock_context["exp"], hazard=mock_context["haz"], impfset=mock_context["imp"], date=2023.5) # type: ignore + + +def test_properties(mock_context): + snapshot = Snapshot.from_triplet( + exposure=mock_context["exp"], + hazard=mock_context["haz"], + impfset=mock_context["imp"], + date=2023, + ) + + # Check that it's a deep copy (new reference) + assert snapshot.exposure is not mock_context["exp"] + assert snapshot.hazard is not mock_context["haz"] + + assert snapshot.measure is None + + # Check data equality + pd.testing.assert_frame_equal(snapshot.exposure.gdf, mock_context["exp"].gdf) + assert snapshot.hazard.haz_type == mock_context["haz"].haz_type + assert snapshot.impfset == mock_context["imp"] + + +def test_reference(mock_context): + snapshot = Snapshot.from_triplet( + exposure=mock_context["exp"], + hazard=mock_context["haz"], + impfset=mock_context["imp"], + date=2023, + ref_only=True, + ) + + # Check that it is a reference + assert snapshot.exposure is mock_context["exp"] + assert snapshot.hazard is mock_context["haz"] + assert snapshot.impfset is mock_context["imp"] + assert snapshot.measure is None + + # Check data equality + pd.testing.assert_frame_equal(snapshot.exposure.gdf, mock_context["exp"].gdf) + assert snapshot.hazard.haz_type == mock_context["haz"].haz_type + assert snapshot.impfset == mock_context["imp"] + + +def test_apply_measure(mock_context): + snapshot = Snapshot( + exposure=mock_context["exp"], + hazard=mock_context["haz"], + impfset=mock_context["imp"], + measure=None, + date=2023, + ) + new_snapshot = snapshot.apply_measure(mock_context["measure"]) + + assert new_snapshot.measure is not None + assert new_snapshot.measure.name == "Test Measure" + assert new_snapshot.exposure == mock_context["mod_exp"] + assert new_snapshot.hazard == mock_context["mod_haz"] + assert new_snapshot.impfset == mock_context["mod_imp"] From 59ba2918e7c7c8d5616cc19fc1c7901b5f583976 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 12:00:10 +0100 Subject: [PATCH 30/37] updates tests --- climada/trajectories/test/test_snapshot.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/climada/trajectories/test/test_snapshot.py b/climada/trajectories/test/test_snapshot.py index e3c2eb0e9c..cecfa39395 100644 --- a/climada/trajectories/test/test_snapshot.py +++ b/climada/trajectories/test/test_snapshot.py @@ -95,7 +95,7 @@ def test_init_valid_dates(mock_context, input_date, expected): def test_init_invalid_date_format(mock_context): - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="String must be in the format"): Snapshot.from_triplet( exposure=mock_context["exp"], hazard=mock_context["haz"], @@ -105,7 +105,9 @@ def test_init_invalid_date_format(mock_context): def test_init_invalid_date_type(mock_context): - with pytest.raises(TypeError): + with pytest.raises( + TypeError, match=r"date_arg must be an int, str, or datetime.date" + ): Snapshot.from_triplet(exposure=mock_context["exp"], hazard=mock_context["haz"], impfset=mock_context["imp"], date=2023.5) # type: ignore @@ -151,11 +153,10 @@ def test_reference(mock_context): def test_apply_measure(mock_context): - snapshot = Snapshot( + snapshot = Snapshot.from_triplet( exposure=mock_context["exp"], hazard=mock_context["haz"], impfset=mock_context["imp"], - measure=None, date=2023, ) new_snapshot = snapshot.apply_measure(mock_context["measure"]) From 77b76c44b5f8fb39ed925e5df3ba3a5b222df472 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 12:00:21 +0100 Subject: [PATCH 31/37] apply_measure already makes copies --- climada/trajectories/snapshot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/climada/trajectories/snapshot.py b/climada/trajectories/snapshot.py index 233cc15696..cf93171e0e 100644 --- a/climada/trajectories/snapshot.py +++ b/climada/trajectories/snapshot.py @@ -226,7 +226,7 @@ def apply_measure(self, measure: Measure) -> "Snapshot": impfset=impfset, date=self.date, measure=measure, - ref_only=False, + ref_only=True, # Avoid unecessary copies of new objects _from_factory=True, ) return snap From 2da5952fa12d95bee4a014bf8ff993e696526eb0 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 16:00:06 +0100 Subject: [PATCH 32/37] Removes remaining global risk transfer code --- climada/trajectories/impact_calc_strat.py | 28 +---------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/climada/trajectories/impact_calc_strat.py b/climada/trajectories/impact_calc_strat.py index 75cf08f545..b1bb6eebd3 100644 --- a/climada/trajectories/impact_calc_strat.py +++ b/climada/trajectories/impact_calc_strat.py @@ -95,7 +95,7 @@ def compute_impacts( vul: ImpactFuncSet, ) -> Impact: """ - Calculates the impact and applies the "global" risk transfer mechanism. + Calculates the impact. Parameters ---------- @@ -111,30 +111,4 @@ def compute_impacts( Impact The final impact object. """ - impact = self.compute_impacts_pre_transfer(exp, haz, vul) - return impact - - def compute_impacts_pre_transfer( - self, - exp: Exposures, - haz: Hazard, - vul: ImpactFuncSet, - ) -> Impact: - """ - Calculates the raw impact matrix before any risk transfer is applied. - - Parameters - ---------- - exp : Exposures - The exposure data. - haz : Hazard - The hazard data. - vul : ImpactFuncSet - The set of vulnerability functions. - - Returns - ------- - Impact - An Impact object containing the raw, pre-transfer impact matrix. - """ return ImpactCalc(exposures=exp, impfset=vul, hazard=haz).impact() From a4e0e0c8232964436536709d5d449cb3770d3ca2 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 16:00:32 +0100 Subject: [PATCH 33/37] Shifts test to pytest --- .../test/test_impact_calc_strat.py | 109 ++++++++++-------- 1 file changed, 61 insertions(+), 48 deletions(-) diff --git a/climada/trajectories/test/test_impact_calc_strat.py b/climada/trajectories/test/test_impact_calc_strat.py index a828ec51e6..eb5a53a2c0 100644 --- a/climada/trajectories/test/test_impact_calc_strat.py +++ b/climada/trajectories/test/test_impact_calc_strat.py @@ -20,65 +20,78 @@ """ -import unittest from unittest.mock import MagicMock, patch +import pytest + from climada.engine import Impact from climada.entity import ImpactFuncSet from climada.entity.exposures import Exposures from climada.hazard import Hazard from climada.trajectories import Snapshot -from climada.trajectories.impact_calc_strat import ImpactCalcComputation - - -class TestImpactCalcComputation(unittest.TestCase): - def setUp(self): - self.mock_snapshot0 = MagicMock(spec=Snapshot) - self.mock_snapshot0.exposure = MagicMock(spec=Exposures) - self.mock_snapshot0.hazard = MagicMock(spec=Hazard) - self.mock_snapshot0.impfset = MagicMock(spec=ImpactFuncSet) - self.mock_snapshot1 = MagicMock(spec=Snapshot) - self.mock_snapshot1.exposure = MagicMock(spec=Exposures) - self.mock_snapshot1.hazard = MagicMock(spec=Hazard) - self.mock_snapshot1.impfset = MagicMock(spec=ImpactFuncSet) - - self.impact_calc_computation = ImpactCalcComputation() - - @patch.object(ImpactCalcComputation, "compute_impacts_pre_transfer") - def test_compute_impacts(self, mock_calculate_impacts_for_snapshots): - mock_impacts = MagicMock(spec=Impact) - mock_calculate_impacts_for_snapshots.return_value = mock_impacts - - result = self.impact_calc_computation.compute_impacts( - exp=self.mock_snapshot0.exposure, - haz=self.mock_snapshot0.hazard, - vul=self.mock_snapshot0.impfset, - ) +from climada.trajectories.impact_calc_strat import ( + ImpactCalcComputation, + ImpactComputationStrategy, +) + +# --- Fixtures --- + + +@pytest.fixture +def mock_snapshot(): + """Provides a snapshot with mocked exposure, hazard, and impact functions.""" + snap = MagicMock(spec=Snapshot) + snap.exposure = MagicMock(spec=Exposures) + snap.hazard = MagicMock(spec=Hazard) + snap.impfset = MagicMock(spec=ImpactFuncSet) + return snap - self.assertEqual(result, mock_impacts) - mock_calculate_impacts_for_snapshots.assert_called_once_with( - self.mock_snapshot0.exposure, - self.mock_snapshot0.hazard, - self.mock_snapshot0.impfset, - ) - def test_calculate_impacts_for_snapshots(self): - mock_imp_E0H0 = MagicMock(spec=Impact) +@pytest.fixture +def strategy(): + """Provides an instance of the ImpactCalcComputation strategy.""" + return ImpactCalcComputation() - with patch( - "climada.trajectories.impact_calc_strat.ImpactCalc" - ) as mock_impact_calc: - mock_impact_calc.return_value.impact.side_effect = [mock_imp_E0H0] - result = self.impact_calc_computation.compute_impacts_pre_transfer( - exp=self.mock_snapshot0.exposure, - haz=self.mock_snapshot0.hazard, - vul=self.mock_snapshot0.impfset, - ) +# --- Tests --- +def test_interface_compliance(strategy): + """Ensure the class correctly inherits from the Abstract Base Class.""" + assert isinstance(strategy, ImpactComputationStrategy) + assert isinstance(strategy, ImpactCalcComputation) + + +def test_compute_impacts(strategy, mock_snapshot): + """Test that compute_impacts calls the pre-transfer method correctly.""" + mock_impacts = MagicMock(spec=Impact) + + # We patch the ImpactCalc within trajectories + with patch("climada.trajectories.impact_calc_strat.ImpactCalc") as mock_ImpactCalc: + mock_ImpactCalc.return_value.impact.return_value = mock_impacts + result = strategy.compute_impacts( + exp=mock_snapshot.exposure, + haz=mock_snapshot.hazard, + vul=mock_snapshot.impfset, + ) + mock_ImpactCalc.assert_called_once_with( + exposures=mock_snapshot.exposure, + impfset=mock_snapshot.impfset, + hazard=mock_snapshot.hazard, + ) + mock_ImpactCalc.return_value.impact.assert_called_once() + assert result == mock_impacts + - self.assertEqual(result, mock_imp_E0H0) +def test_cannot_instantiate_abstract_base_class(): + """Ensure ImpactComputationStrategy cannot be instantiated directly.""" + with pytest.raises(TypeError, match="Can't instantiate abstract class"): + ImpactComputationStrategy() # type: ignore -if __name__ == "__main__": - TESTS = unittest.TestLoader().loadTestsFromTestCase(TestImpactCalcComputation) - unittest.TextTestRunner(verbosity=2).run(TESTS) +@pytest.mark.parametrize("invalid_input", [None, 123, "string"]) +def test_compute_impacts_type_errors(strategy, invalid_input): + """ + Smoke test: Ensure that if ImpactCalc raises errors due to bad input, + the strategy correctly propagates them. + """ + with pytest.raises(AttributeError): + strategy.compute_impacts(invalid_input, invalid_input, invalid_input) From ca8ca6ec80814797ed4d444000132cce650e4072 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 16:03:18 +0100 Subject: [PATCH 34/37] Wrong merge somewhere pbly --- climada/trajectories/interpolation.py | 439 ------------------ .../trajectories/test/test_interpolation.py | 352 -------------- 2 files changed, 791 deletions(-) delete mode 100644 climada/trajectories/interpolation.py delete mode 100644 climada/trajectories/test/test_interpolation.py diff --git a/climada/trajectories/interpolation.py b/climada/trajectories/interpolation.py deleted file mode 100644 index 9f6687e449..0000000000 --- a/climada/trajectories/interpolation.py +++ /dev/null @@ -1,439 +0,0 @@ -""" -This file is part of CLIMADA. - -Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. - -CLIMADA is free software: you can redistribute it and/or modify it under the -terms of the GNU General Public License as published by the Free -Software Foundation, version 3. - -CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY -WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A -PARTICULAR PURPOSE. See the GNU General Public License for more details. - -You should have received a copy of the GNU General Public License along -with CLIMADA. If not, see . - ---- - -This modules implements different sparce matrices and numpy arrays -interpolation approaches. - -""" - -import logging -from abc import ABC -from collections.abc import Callable -from typing import Any, Dict, List, Optional - -import numpy as np -from scipy import sparse - -LOGGER = logging.getLogger(__name__) - -__all__ = [ - "AllLinearStrategy", - "ExponentialExposureStrategy", - "linear_interp_arrays", - "linear_interp_imp_mat", - "exponential_interp_arrays", - "exponential_interp_imp_mat", -] - - -def linear_interp_imp_mat( - mat_start: sparse.csr_matrix, - mat_end: sparse.csr_matrix, - number_of_interpolation_points: int, -) -> List[sparse.csr_matrix]: - r""" - Linearly interpolates between two sparse impact matrices. - - Creates a sequence of matrices representing a linear transition from a starting - matrix to an ending matrix. The interpolation includes both the start and end - points. - - Parameters - ---------- - mat_start : scipy.sparse.csr_matrix - The starting impact matrix. Must have a shape compatible with `mat_end` - for arithmetic operations. - mat_end : scipy.sparse.csr_matrix - The ending impact matrix. Must have a shape compatible with `mat_start` - for arithmetic operations. - number_of_interpolation_points : int - The total number of matrices to return, including the start and end points. - Must be $\ge 2$. - - Returns - ------- - list of scipy.sparse.csr_matrix - A list of matrices, where the first element is `mat_start` and the last - element is `mat_end`. The total length of the list is - `number_of_interpolation_points`. - - Notes - ----- - The formula used for interpolation at proportion $p$ is: - $$M_p = M_{start} \cdot (1 - p) + M_{end} \cdot p$$ - The proportions $p$ range from 0 to 1, inclusive. - """ - - return [ - mat_start + prop * (mat_end - mat_start) - for prop in np.linspace(0, 1, number_of_interpolation_points) - ] - - -def exponential_interp_imp_mat( - mat_start: sparse.csr_matrix, - mat_end: sparse.csr_matrix, - number_of_interpolation_points: int, -) -> List[sparse.csr_matrix]: - r""" - Exponentially interpolates between two "impact matrices". - - This function performs interpolation in a logarithmic space, effectively - achieving an exponential-like transition between `mat_start` and `mat_end`. - It is designed for objects that wrap NumPy arrays and expose them via a - `.data` attribute. - - Parameters - ---------- - mat_start : object - The starting matrix object. Must have a `.data` attribute that is a - NumPy array of positive values. - mat_end : object - The ending matrix object. Must have a `.data` attribute that is a - NumPy array of positive values and have a compatible shape with `mat_start`. - number_of_interpolation_points : int - The total number of matrix objects to return, including the start and - end points. Must be $\ge 2$. - - Returns - ------- - list of object - A list of interpolated matrix objects. The first element corresponds to - `mat_start` and the last to `mat_end` (after the conversion/reversion). - The list length is `number_of_interpolation_points`. - - Notes - ----- - The interpolation is achieved by: - - 1. Mapping the matrix data to a transformed logarithmic space: - $$M'_{i} = \ln(M_{i})}$$ - (where $\ln$ is the natural logarithm, and $\epsilon$ is added to $M_{i}$ - to prevent $\ln(0)$). - 2. Performing standard linear interpolation on the transformed matrices - $M'_{start}$ and $M'_{end}$ to get $M'_{interp}$: - $$M'_{interp} = M'_{start} \cdot (1 - \text{ratio}) + M'_{end} \cdot \text{ratio}$$ - 3. Mapping the result back to the original domain: - $$M_{interp} = \exp(M'_{interp}$$ - """ - - mat_start = mat_start.copy() - mat_end = mat_end.copy() - mat_start.data = np.log(mat_start.data + np.finfo(float).eps) - mat_end.data = np.log(mat_end.data + np.finfo(float).eps) - - # Perform linear interpolation in the logarithmic domain - res = [] - num_points = number_of_interpolation_points - for point in range(num_points): - ratio = point / (num_points - 1) - mat_interpolated = mat_start * (1 - ratio) + ratio * mat_end - mat_interpolated.data = np.exp(mat_interpolated.data) - res.append(mat_interpolated) - return res - - -def linear_interp_arrays(arr_start: np.ndarray, arr_end: np.ndarray) -> np.ndarray: - r""" - Performs linear interpolation between two NumPy arrays over their first dimension. - - This function interpolates each metric (column) linearly across the time steps - (rows), including both the start and end states. - - Parameters - ---------- - arr_start : numpy.ndarray - The starting array of metrics. The first dimension (rows) is assumed to - represent the interpolation steps (e.g., dates/time points). - arr_end : numpy.ndarray - The ending array of metrics. Must have the exact same shape as `arr_start`. - - Returns - ------- - numpy.ndarray - An array with the same shape as `arr_start` and `arr_end`. The values - in the first dimension transition linearly from those in `arr_start` - to those in `arr_end`. - - Raises - ------ - ValueError - If `arr_start` and `arr_end` do not have the same shape. - - Notes - ----- - The interpolation is performed element-wise along the first dimension - (axis 0). For each row $i$ and proportion $p_i$, the result $R_i$ is calculated as: - - $$R_i = arr\_start_i \cdot (1 - p_i) + arr\_end_i \cdot p_i$$ - - where $p_i$ is generated by $\text{np.linspace}(0, 1, n)$ and $n$ is the - size of the first dimension ($\text{arr\_start.shape}[0]$). - """ - if arr_start.shape != arr_end.shape: - raise ValueError( - f"Cannot interpolate arrays of different shapes: {arr_start.shape} and {arr_end.shape}." - ) - interpolation_range = arr_start.shape[0] - prop1 = np.linspace(0, 1, interpolation_range) - prop0 = 1 - prop1 - if arr_start.ndim > 1: - prop0, prop1 = prop0.reshape(-1, 1), prop1.reshape(-1, 1) - - return np.multiply(arr_start, prop0) + np.multiply(arr_end, prop1) - - -def exponential_interp_arrays(arr_start: np.ndarray, arr_end: np.ndarray) -> np.ndarray: - r""" - Performs exponential interpolation between two NumPy arrays over their first dimension. - - This function achieves an exponential-like transition by performing linear - interpolation in the logarithmic space, suitable to interpolate over a dimension which has - a growth factor. - - Parameters - ---------- - arr_start : numpy.ndarray - The starting array of metrics. Values must be positive. - arr_end : numpy.ndarray - The ending array of metrics. Must have the exact same shape as `arr_start`. - - Returns - ------- - numpy.ndarray - An array with the same shape as `arr_start` and `arr_end`. The values - in the first dimension transition exponentially from those in `arr_start` - to those in `arr_end`. - - Raises - ------ - ValueError - If `arr_start` and `arr_end` do not have the same shape. - - Notes - ----- - The interpolation is performed by transforming the arrays to a logarithmic - domain, linearly interpolating, and then transforming back. - - The formula for the interpolated result $R$ at proportion $\text{prop}$ is: - $$ - R = \exp \left( - \ln(A_{start}) \cdot (1 - \text{prop}) + - \ln(A_{end}) \cdot \text{prop} - \right) - $$ - where $A_{start}$ and $A_{end}$ are the input arrays (with $\epsilon$ added - to prevent $\ln(0)$) and $\text{prop}$ ranges from 0 to 1. - """ - if arr_start.shape != arr_end.shape: - raise ValueError( - f"Cannot interpolate arrays of different shapes: {arr_start.shape} and {arr_end.shape}." - ) - interpolation_range = arr_start.shape[0] - - prop1 = np.linspace(0, 1, interpolation_range) - prop0 = 1 - prop1 - if arr_start.ndim > 1: - prop0, prop1 = prop0.reshape(-1, 1), prop1.reshape(-1, 1) - - # Perform log transformation, linear interpolation, and exponential back-transformation - log_arr_start = np.log(arr_start + np.finfo(float).eps) - log_arr_end = np.log(arr_end + np.finfo(float).eps) - - interpolated_log_arr = np.multiply(log_arr_start, prop0) + np.multiply( - log_arr_end, prop1 - ) - - return np.exp(interpolated_log_arr) - - -class InterpolationStrategyBase(ABC): - r""" - Base abstract class for defining a set of interpolation strategies. - - This class serves as a blueprint for implementing specific interpolation - methods (e.g., 'Linear', 'Exponential') across different impact dimensions: - Exposure (matrices), Hazard, and Vulnerability (arrays/metrics). - - Attributes - ---------- - exposure_interp : Callable - The function used to interpolate sparse impact matrices over the - exposure dimension. - Signature: (mat_start, mat_end, num_points, **kwargs) -> list[sparse.csr_matrix]. - hazard_interp : Callable - The function used to interpolate NumPy arrays of metrics over the - hazard dimension. - Signature: (arr_start, arr_end, **kwargs) -> np.ndarray. - vulnerability_interp : Callable - The function used to interpolate NumPy arrays of metrics over the - vulnerability dimension. - Signature: (arr_start, arr_end, **kwargs) -> np.ndarray. - """ - - exposure_interp: Callable - hazard_interp: Callable - vulnerability_interp: Callable - - def interp_over_exposure_dim( - self, - imp_E0: sparse.csr_matrix, - imp_E1: sparse.csr_matrix, - interpolation_range: int, - /, - **kwargs: Optional[Dict[str, Any]], - ) -> List[sparse.csr_matrix]: - """ - Interpolates between two impact matrices using the defined exposure strategy. - - This method calls the function assigned to :attr:`exposure_interp` to generate - a sequence of matrices. - - Parameters - ---------- - imp_E0 : scipy.sparse.csr_matrix - A sparse matrix of the impacts at the start of the range. - imp_E1 : scipy.sparse.csr_matrix - A sparse matrix of the impacts at the end of the range. - interpolation_range : int - The total number of time points to interpolate, including the start and end. - **kwargs : Optional[Dict[str, Any]] - Keyword arguments to pass to the underlying :attr:`exposure_interp` function. - - Returns - ------- - list of scipy.sparse.csr_matrix - A list of ``interpolation_range`` interpolated impact matrices. - - Raises - ------ - ValueError - If the underlying interpolation function raises a ``ValueError`` - indicating incompatible matrix shapes. - """ - try: - res = self.exposure_interp(imp_E0, imp_E1, interpolation_range, **kwargs) - except ValueError as err: - if str(err) == "inconsistent shapes": - raise ValueError( - "Tried to interpolate impact matrices of different shapes. " - "A possible reason could be Exposures of different shapes." - ) from err - - raise err - - return res - - def interp_over_hazard_dim( - self, - metric_0: np.ndarray, - metric_1: np.ndarray, - /, - **kwargs: Optional[Dict[str, Any]], - ) -> np.ndarray: - """ - Interpolates between two metric arrays using the defined hazard strategy. - - This method calls the function assigned to :attr:`hazard_interp`. - - Parameters - ---------- - metric_0 : numpy.ndarray - The starting array of metrics. - metric_1 : numpy.ndarray - The ending array of metrics. Must have the same shape as ``metric_0``. - **kwargs : Optional [Dict[str, Any]] - Keyword arguments to pass to the underlying :attr:`hazard_interp` function. - - Returns - ------- - numpy.ndarray - The resulting interpolated array. - """ - return self.hazard_interp(metric_0, metric_1, **kwargs) - - def interp_over_vulnerability_dim( - self, - metric_0: np.ndarray, - metric_1: np.ndarray, - /, - **kwargs: Optional[Dict[str, Any]], - ) -> np.ndarray: - """ - Interpolates between two metric arrays using the defined vulnerability strategy. - - This method calls the function assigned to :attr:`vulnerability_interp`. - - Parameters - ---------- - metric_0 : numpy.ndarray - The starting array of metrics. - metric_1 : numpy.ndarray - The ending array of metrics. Must have the same shape as ``metric_0``. - **kwargs : Optional[Dict[str, Any]] - Keyword arguments to pass to the underlying :attr:`vulnerability_interp` function. - - Returns - ------- - numpy.ndarray - The resulting interpolated array. - """ - # Note: Assuming the Callable takes the exact positional arguments - return self.vulnerability_interp(metric_0, metric_1, **kwargs) - - -class InterpolationStrategy(InterpolationStrategyBase): - r"""Interface for interpolation strategies. - - This is the class to use to define your own custom interpolation strategy. - """ - - def __init__( - self, - exposure_interp: Callable, - hazard_interp: Callable, - vulnerability_interp: Callable, - ) -> None: - super().__init__() - self.exposure_interp = exposure_interp - self.hazard_interp = hazard_interp - self.vulnerability_interp = vulnerability_interp - - -class AllLinearStrategy(InterpolationStrategyBase): - r"""Linear interpolation strategy over all dimensions.""" - - def __init__(self) -> None: - super().__init__() - self.exposure_interp = linear_interp_imp_mat - self.hazard_interp = linear_interp_arrays - self.vulnerability_interp = linear_interp_arrays - - -class ExponentialExposureStrategy(InterpolationStrategyBase): - r"""Exponential interpolation strategy for exposure and linear for Hazard and Vulnerability.""" - - def __init__(self) -> None: - super().__init__() - self.exposure_interp = ( - lambda mat_start, mat_end, points: exponential_interp_imp_mat( - mat_start, mat_end, points - ) - ) - self.hazard_interp = linear_interp_arrays - self.vulnerability_interp = linear_interp_arrays diff --git a/climada/trajectories/test/test_interpolation.py b/climada/trajectories/test/test_interpolation.py deleted file mode 100644 index 693c9b9c33..0000000000 --- a/climada/trajectories/test/test_interpolation.py +++ /dev/null @@ -1,352 +0,0 @@ -""" -This file is part of CLIMADA. - -Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. - -CLIMADA is free software: you can redistribute it and/or modify it under the -terms of the GNU General Public License as published by the Free -Software Foundation, version 3. - -CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY -WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A -PARTICULAR PURPOSE. See the GNU General Public License for more details. - -You should have received a copy of the GNU General Public License along -with CLIMADA. If not, see . - ---- - -Tests for interpolation - -""" - -import unittest -from unittest.mock import MagicMock - -import numpy as np -from scipy.sparse import csr_matrix - -from climada.trajectories.interpolation import ( - AllLinearStrategy, - ExponentialExposureStrategy, - InterpolationStrategy, - exponential_interp_arrays, - exponential_interp_imp_mat, - linear_interp_arrays, - linear_interp_imp_mat, -) - - -class TestInterpolationFuncs(unittest.TestCase): - def setUp(self): - # Create mock impact matrices for testing - self.imp_mat0 = csr_matrix(np.array([[1, 2], [3, 4]])) - self.imp_mat1 = csr_matrix(np.array([[5, 6], [7, 8]])) - self.imp_mat2 = csr_matrix(np.array([[5, 6, 7], [8, 9, 10]])) # Different shape - self.time_points = 5 - self.interpolation_range_5 = 5 - self.interpolation_range_1 = 1 - self.interpolation_range_2 = 2 - self.rtol = 1e-5 - self.atol = 1e-8 - - def test_linear_interp_arrays(self): - arr_start = np.array([10, 100]) - arr_end = np.array([20, 200]) - expected = np.array([10.0, 200.0]) - result = linear_interp_arrays(arr_start, arr_end) - np.testing.assert_allclose(result, expected, rtol=self.rtol, atol=self.atol) - - def test_linear_interp_arrays2D(self): - arr_start = np.array([[10, 100], [10, 100]]) - arr_end = np.array([[20, 200], [20, 200]]) - expected = np.array([[10.0, 100.0], [20, 200]]) - result = linear_interp_arrays(arr_start, arr_end) - np.testing.assert_allclose(result, expected, rtol=self.rtol, atol=self.atol) - - def test_linear_interp_arrays_shape(self): - arr_start = np.array([10, 100, 5]) - arr_end = np.array([20, 200]) - with self.assertRaises(ValueError): - linear_interp_arrays(arr_start, arr_end) - - def test_linear_interp_arrays_start_equals_end(self): - arr_start = np.array([5, 5]) - arr_end = np.array([5, 5]) - expected = np.array([5.0, 5.0]) - result = linear_interp_arrays(arr_start, arr_end) - np.testing.assert_allclose(result, expected, rtol=self.rtol, atol=self.atol) - - def test_exponential_interp_arrays_1d(self): - arr_start = np.array([1, 10, 100]) - arr_end = np.array([2, 20, 200]) - expected = np.array([1.0, 14.142136, 200.0]) - result = exponential_interp_arrays(arr_start, arr_end) - np.testing.assert_allclose(result, expected, rtol=self.rtol, atol=self.atol) - - def test_exponential_interp_arrays_shape(self): - arr_start = np.array([10, 100, 5]) - arr_end = np.array([20, 200]) - with self.assertRaises(ValueError): - exponential_interp_arrays(arr_start, arr_end) - - def test_exponential_interp_arrays_2d(self): - arr_start = np.array( - [ - [1, 10, 100], # date 1 metric a,b,c - [1, 10, 100], # date 2 metric a,b,c - [1, 10, 100], - ] - ) # date 3 metric a,b,c - arr_end = np.array([[2, 20, 200], [2, 20, 200], [2, 20, 200]]) - expected = np.array( - [[1.0, 10.0, 100.0], [1.4142136, 14.142136, 141.42136], [2, 20, 200]] - ) - result = exponential_interp_arrays(arr_start, arr_end) - np.testing.assert_allclose(result, expected, rtol=self.rtol, atol=self.atol) - - def test_exponential_interp_arrays_start_equals_end(self): - arr_start = np.array([5, 5]) - arr_end = np.array([5, 5]) - expected = np.array([5.0, 5.0]) - result = exponential_interp_arrays(arr_start, arr_end) - np.testing.assert_allclose(result, expected, rtol=self.rtol, atol=self.atol) - - def test_linear_impmat_interpolate(self): - result = linear_interp_imp_mat(self.imp_mat0, self.imp_mat1, self.time_points) - self.assertEqual(len(result), self.time_points) - for mat in result: - self.assertIsInstance(mat, csr_matrix) - - dense = np.array([r.todense() for r in result]) - expected = np.array( - [ - [[1.0, 2.0], [3.0, 4.0]], - [[2.0, 3.0], [4.0, 5.0]], - [[3.0, 4.0], [5.0, 6.0]], - [[4.0, 5.0], [6.0, 7.0]], - [[5.0, 6.0], [7.0, 8.0]], - ] - ) - np.testing.assert_array_equal(dense, expected) - - def test_linear_impmat_interpolate_inconsistent_shape(self): - with self.assertRaises(ValueError): - linear_interp_imp_mat(self.imp_mat0, self.imp_mat2, self.time_points) - - def test_exp_impmat_interpolate(self): - result = exponential_interp_imp_mat( - self.imp_mat0, self.imp_mat1, self.time_points - ) - self.assertEqual(len(result), self.time_points) - for mat in result: - self.assertIsInstance(mat, csr_matrix) - - dense = np.array([r.todense() for r in result]) - expected = np.array( - [ - [[1.0, 2.0], [3.0, 4.0]], - [[1.49534878, 2.63214803], [3.70779275, 4.75682846]], - [[2.23606798, 3.46410162], [4.58257569, 5.65685425]], - [[3.34370152, 4.55901411], [5.66374698, 6.72717132]], - [[5.0, 6.0], [7.0, 8.0]], - ] - ) - np.testing.assert_array_almost_equal(dense, expected) - - def test_exp_impmat_interpolate_inconsistent_shape(self): - with self.assertRaises(ValueError): - exponential_interp_imp_mat(self.imp_mat0, self.imp_mat2, self.time_points) - - -class TestInterpolationStrategies(unittest.TestCase): - - def setUp(self): - self.interpolation_range = 3 - self.dummy_metric_0 = np.array([10, 20]) - self.dummy_metric_1 = np.array([100, 200]) - self.dummy_matrix_0 = csr_matrix(np.array([[1, 2], [3, 4]])) - self.dummy_matrix_1 = csr_matrix(np.array([[10, 20], [30, 40]])) - - def test_InterpolationStrategy_init(self): - def mock_exposure(a, b, r): - return a + b - - def mock_hazard(a, b, r): - return a * b - - def mock_vulnerability(a, b, r): - return a / b - - strategy = InterpolationStrategy(mock_exposure, mock_hazard, mock_vulnerability) - self.assertEqual(strategy.exposure_interp, mock_exposure) - self.assertEqual(strategy.hazard_interp, mock_hazard) - self.assertEqual(strategy.vulnerability_interp, mock_vulnerability) - - def test_InterpolationStrategy_interp_exposure_dim(self): - mock_exposure = MagicMock(return_value=["mock_result"]) - strategy = InterpolationStrategy( - mock_exposure, linear_interp_arrays, linear_interp_arrays - ) - - result = strategy.interp_over_exposure_dim( - self.dummy_matrix_0, self.dummy_matrix_1, self.interpolation_range - ) - mock_exposure.assert_called_once_with( - self.dummy_matrix_0, self.dummy_matrix_1, self.interpolation_range - ) - self.assertEqual(result, ["mock_result"]) - - def test_InterpolationStrategy_interp_exposure_dim_inconsistent_shapes(self): - mock_exposure = MagicMock(side_effect=ValueError("inconsistent shapes")) - strategy = InterpolationStrategy( - mock_exposure, linear_interp_arrays, linear_interp_arrays - ) - - with self.assertRaisesRegex( - ValueError, "Tried to interpolate impact matrices of different shape" - ): - strategy.interp_over_exposure_dim( - self.dummy_matrix_0, - csr_matrix(np.array([[1]])), - self.interpolation_range, - ) - mock_exposure.assert_called_once() # Ensure it was called - - def test_InterpolationStrategy_interp_hazard_dim(self): - mock_hazard = MagicMock(return_value=np.array([1, 2, 3])) - strategy = InterpolationStrategy( - linear_interp_imp_mat, mock_hazard, linear_interp_arrays - ) - - result = strategy.interp_over_hazard_dim( - self.dummy_metric_0, self.dummy_metric_1 - ) - mock_hazard.assert_called_once_with(self.dummy_metric_0, self.dummy_metric_1) - np.testing.assert_array_equal(result, np.array([1, 2, 3])) - - def test_InterpolationStrategy_interp_vulnerability_dim(self): - mock_vulnerability = MagicMock(return_value=np.array([4, 5, 6])) - strategy = InterpolationStrategy( - linear_interp_imp_mat, linear_interp_arrays, mock_vulnerability - ) - - result = strategy.interp_over_vulnerability_dim( - self.dummy_metric_0, self.dummy_metric_1 - ) - mock_vulnerability.assert_called_once_with( - self.dummy_metric_0, self.dummy_metric_1 - ) - np.testing.assert_array_equal(result, np.array([4, 5, 6])) - - -class TestConcreteInterpolationStrategies(unittest.TestCase): - - def setUp(self): - self.interpolation_range = 3 - self.dummy_metric_0 = np.array([10, 20, 30]) - self.dummy_metric_1 = np.array([100, 200, 300]) - self.dummy_matrix_0 = csr_matrix([[1, 2], [3, 4]]) - self.dummy_matrix_1 = csr_matrix([[10, 20], [30, 40]]) - self.dummy_matrix_0_1_lin = csr_matrix([[5.5, 11], [16.5, 22]]) - self.dummy_matrix_0_1_exp = csr_matrix( - [[3.162278, 6.324555], [9.486833, 12.649111]] - ) - self.rtol = 1e-5 - self.atol = 1e-8 - - def test_AllLinearStrategy_init_and_methods(self): - strategy = AllLinearStrategy() - self.assertEqual(strategy.exposure_interp, linear_interp_imp_mat) - self.assertEqual(strategy.hazard_interp, linear_interp_arrays) - self.assertEqual(strategy.vulnerability_interp, linear_interp_arrays) - - # Test hazard interpolation - expected_hazard_interp = linear_interp_arrays( - self.dummy_metric_0, self.dummy_metric_1 - ) - result_hazard = strategy.interp_over_hazard_dim( - self.dummy_metric_0, self.dummy_metric_1 - ) - np.testing.assert_allclose( - result_hazard, expected_hazard_interp, rtol=self.rtol, atol=self.atol - ) - - # Test vulnerability interpolation - expected_vulnerability_interp = linear_interp_arrays( - self.dummy_metric_0, self.dummy_metric_1 - ) - result_vulnerability = strategy.interp_over_vulnerability_dim( - self.dummy_metric_0, self.dummy_metric_1 - ) - np.testing.assert_allclose( - result_vulnerability, - expected_vulnerability_interp, - rtol=self.rtol, - atol=self.atol, - ) - - # Test exposure interpolation (using mock for linear_interp_imp_mat) - result_exposure = strategy.interp_over_exposure_dim( - self.dummy_matrix_0, self.dummy_matrix_1, self.interpolation_range - ) - # Verify the structure/first/last elements of the mock output - self.assertEqual(len(result_exposure), self.interpolation_range) - np.testing.assert_allclose(result_exposure[0].data, self.dummy_matrix_0.data) - np.testing.assert_allclose( - result_exposure[1].data, self.dummy_matrix_0_1_lin.data - ) - np.testing.assert_allclose(result_exposure[2].data, self.dummy_matrix_1.data) - - def test_ExponentialExposureInterpolation_init_and_methods(self): - strategy = ExponentialExposureStrategy() - # Test hazard interpolation (should be linear) - expected_hazard_interp = linear_interp_arrays( - self.dummy_metric_0, self.dummy_metric_1 - ) - result_hazard = strategy.interp_over_hazard_dim( - self.dummy_metric_0, self.dummy_metric_1 - ) - np.testing.assert_allclose( - result_hazard, expected_hazard_interp, rtol=self.rtol, atol=self.atol - ) - - # Test vulnerability interpolation (should be linear) - expected_vulnerability_interp = linear_interp_arrays( - self.dummy_metric_0, self.dummy_metric_1 - ) - result_vulnerability = strategy.interp_over_vulnerability_dim( - self.dummy_metric_0, self.dummy_metric_1 - ) - np.testing.assert_allclose( - result_vulnerability, - expected_vulnerability_interp, - rtol=self.rtol, - atol=self.atol, - ) - - # Test exposure interpolation (using mock for exponential_interp_imp_mat) - result_exposure = strategy.interp_over_exposure_dim( - self.dummy_matrix_0, self.dummy_matrix_1, self.interpolation_range - ) - # Verify the structure/first/last elements of the mock output - self.assertEqual(len(result_exposure), self.interpolation_range) - np.testing.assert_allclose(result_exposure[0].data, self.dummy_matrix_0.data) - np.testing.assert_allclose( - result_exposure[1].data, - self.dummy_matrix_0_1_exp.data, - rtol=self.rtol, - atol=self.atol, - ) - np.testing.assert_allclose(result_exposure[-1].data, self.dummy_matrix_1.data) - - -if __name__ == "__main__": - TESTS = unittest.TestLoader().loadTestsFromTestCase( - TestConcreteInterpolationStrategies - ) - TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestInterpolationFuncs)) - TESTS.addTests( - unittest.TestLoader().loadTestsFromTestCase(TestInterpolationStrategies) - ) - unittest.TextTestRunner(verbosity=2).run(TESTS) From 46e089ac3e447440198b7384da5315c1483fd856 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 17:00:25 +0100 Subject: [PATCH 35/37] cleanup from wrong merge --- climada/trajectories/__init__.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/climada/trajectories/__init__.py b/climada/trajectories/__init__.py index 575b993969..716081568e 100644 --- a/climada/trajectories/__init__.py +++ b/climada/trajectories/__init__.py @@ -21,13 +21,10 @@ """ -from .interpolation import AllLinearStrategy, ExponentialExposureStrategy from .snapshot import Snapshot from .static_trajectory import StaticRiskTrajectory __all__ = [ - "AllLinearStrategy", - "ExponentialExposureStrategy", "Snapshot", "StaticRiskTrajectory", ] From a9ebdb557c5a89c276c4c256cd861ac8745ce0f2 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 17:00:48 +0100 Subject: [PATCH 36/37] fixes type hint annoyance --- climada/trajectories/trajectory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/climada/trajectories/trajectory.py b/climada/trajectories/trajectory.py index 75c30b9aa1..c64123be92 100644 --- a/climada/trajectories/trajectory.py +++ b/climada/trajectories/trajectory.py @@ -218,7 +218,7 @@ def _npv_group(group, disc): @staticmethod def _calc_npv_cash_flows( - cash_flows: pd.DataFrame, + cash_flows: pd.DataFrame | pd.Series, start_date: datetime.date, disc_rates: DiscRates | None = None, ): From e69794f43ae0e2e2829d7eebd065436343e78e60 Mon Sep 17 00:00:00 2001 From: spjuhel Date: Tue, 6 Jan 2026 17:01:10 +0100 Subject: [PATCH 37/37] Shifts tests to pytest --- .../test/test_static_risk_trajectory.py | 591 +++++++++--------- climada/trajectories/test/test_trajectory.py | 304 +-------- 2 files changed, 309 insertions(+), 586 deletions(-) diff --git a/climada/trajectories/test/test_static_risk_trajectory.py b/climada/trajectories/test/test_static_risk_trajectory.py index 7576c957f9..ec4016a021 100644 --- a/climada/trajectories/test/test_static_risk_trajectory.py +++ b/climada/trajectories/test/test_static_risk_trajectory.py @@ -21,18 +21,15 @@ """ import datetime -import types -import unittest from itertools import product -from unittest.mock import MagicMock, Mock, call, patch +from unittest.mock import MagicMock, call, patch -import numpy as np # For potential NaN/NA comparisons +import numpy as np import pandas as pd +import pytest from climada.entity.disc_rates.base import DiscRates -from climada.trajectories.calc_risk_metrics import ( # ImpactComputationStrategy, # If needed to mock its base class directly - CalcRiskMetricsPoints, -) +from climada.trajectories.calc_risk_metrics import CalcRiskMetricsPoints from climada.trajectories.constants import ( AAI_METRIC_NAME, AAI_PER_GROUP_METRIC_NAME, @@ -51,329 +48,329 @@ DEFAULT_RP, StaticRiskTrajectory, ) +from climada.trajectories.trajectory import RiskTrajectory + +# --- Fixtures --- + + +@pytest.fixture +def mock_snapshots(): + """Provides a list of mock Snapshot objects with sequential dates.""" + snaps = [] + for year in [2023, 2024, 2025]: + m = MagicMock(spec=Snapshot) + m.date = datetime.date(year, 1, 1) + snaps.append(m) + return snaps + + +@pytest.fixture +def mock_disc_rates(): + """Provides a mock DiscRates object.""" + dr = MagicMock(spec=DiscRates) + dr.years = [2023, 2024, 2025] + dr.rates = [0.01, 0.02, 0.03] + return dr + + +@pytest.fixture +def rt_basic(mock_snapshots): + """A basic StaticRiskTrajectory instance.""" + return StaticRiskTrajectory(mock_snapshots) + + +@pytest.fixture +def trajectory_metadata(): + """Common metadata for DataFrame generation.""" + return { + "dates1": [pd.Timestamp("2023-01-01"), pd.Timestamp("2024-01-01")], + "dates2": [pd.Timestamp("2026-01-01")], + "groups": ["GroupA", "GroupB", pd.NA], + "measures": ["MEAS1", "MEAS2"], + "metrics": [AAI_METRIC_NAME], + } + + +@pytest.fixture +def expected_aai_data(trajectory_metadata): + """Generates the expected AAI DataFrames used for comparison.""" + meta = trajectory_metadata + all_dates = meta["dates1"] + meta["dates2"] + + df = pd.DataFrame( + product(meta["groups"], all_dates, meta["measures"], meta["metrics"]), + columns=[GROUP_COL_NAME, DATE_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME], + ) + df[RISK_COL_NAME] = np.arange(len(df)) * 100.0 + + # Handle Categories and Nulls + df[GROUP_COL_NAME] = df[GROUP_COL_NAME].astype("category") + df[GROUP_COL_NAME] = df[GROUP_COL_NAME].cat.add_categories([DEFAULT_ALLGROUP_NAME]) + df[GROUP_COL_NAME] = df[GROUP_COL_NAME].fillna(DEFAULT_ALLGROUP_NAME) + + cols = [ + DATE_COL_NAME, + GROUP_COL_NAME, + MEASURE_COL_NAME, + METRIC_COL_NAME, + RISK_COL_NAME, + ] + return df[cols] + + +@pytest.fixture +def mock_components(): + """Provides standard CLIMADA mock objects.""" + snaps = [ + MagicMock(spec=Snapshot, date=datetime.date(2023 + i, 1, 1)) for i in range(3) + ] + strat = MagicMock(spec=ImpactCalcComputation) + dr = MagicMock( + spec=DiscRates, years=[2023, 2024, 2025, 2026], rates=[0.01, 0.02, 0.03, 0.04] + ) + return {"snaps": snaps, "strat": strat, "disc_rates": dr} -class TestStaticRiskTrajectory(unittest.TestCase): - def setUp(self) -> None: - self.dates1 = [pd.Timestamp("2023-01-01"), pd.Timestamp("2024-01-01")] - self.dates2 = [pd.Timestamp("2026-01-01")] - self.groups = ["GroupA", "GroupB", pd.NA] - self.measures = ["MEAS1", "MEAS2"] - self.metrics = [AAI_METRIC_NAME] - self.aai_dates1 = pd.DataFrame( - product(self.groups, self.dates1, self.measures, self.metrics), - columns=[GROUP_COL_NAME, DATE_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME], - ) - self.aai_dates1[RISK_COL_NAME] = np.arange(12) * 100 - self.aai_dates1[GROUP_COL_NAME] = self.aai_dates1[GROUP_COL_NAME].astype( - "category" - ) +# --- Pure RiskTrajectory Tests --- - self.aai_dates2 = pd.DataFrame( - product(self.groups, self.dates2, self.measures, self.metrics), - columns=[GROUP_COL_NAME, DATE_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME], - ) - self.aai_dates2[RISK_COL_NAME] = np.arange(6) * 100 + 1200 - self.aai_dates2[GROUP_COL_NAME] = self.aai_dates2[GROUP_COL_NAME].astype( - "category" - ) - self.aai_alldates = pd.DataFrame( - product( - self.groups, self.dates1 + self.dates2, self.measures, self.metrics - ), - columns=[GROUP_COL_NAME, DATE_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME], - ) - self.aai_alldates[RISK_COL_NAME] = np.arange(18) * 100 - self.aai_alldates[GROUP_COL_NAME] = self.aai_alldates[GROUP_COL_NAME].astype( - "category" - ) - self.aai_alldates[GROUP_COL_NAME] = self.aai_alldates[ - GROUP_COL_NAME - ].cat.add_categories([DEFAULT_ALLGROUP_NAME]) - self.aai_alldates[GROUP_COL_NAME] = self.aai_alldates[GROUP_COL_NAME].fillna( - DEFAULT_ALLGROUP_NAME - ) - self.expected_pre_npv_aai = self.aai_alldates - self.expected_pre_npv_aai = self.expected_pre_npv_aai[ - [ - DATE_COL_NAME, - GROUP_COL_NAME, - MEASURE_COL_NAME, - METRIC_COL_NAME, - RISK_COL_NAME, - ] - ] +def test_init_basic(rt_basic, mock_snapshots): + assert rt_basic.start_date == mock_snapshots[0].date + assert rt_basic.end_date == mock_snapshots[-1].date + assert rt_basic._risk_disc_rates is None + assert rt_basic._all_groups_name == DEFAULT_ALLGROUP_NAME + assert rt_basic._return_periods == DEFAULT_RP - self.expected_npv_aai = pd.DataFrame( - product( - self.dates1 + self.dates2, self.groups, self.measures, self.metrics - ), - columns=[DATE_COL_NAME, GROUP_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME], - ) - self.expected_npv_aai[RISK_COL_NAME] = np.arange(18) * 90 - self.expected_npv_aai[GROUP_COL_NAME] = self.expected_npv_aai[ - GROUP_COL_NAME - ].astype("category") - self.expected_npv_aai[GROUP_COL_NAME] = self.expected_npv_aai[ - GROUP_COL_NAME - ].cat.add_categories(["All"]) - self.expected_npv_aai[GROUP_COL_NAME] = self.expected_npv_aai[ - GROUP_COL_NAME - ].fillna(DEFAULT_ALLGROUP_NAME) - expected_npv_df = self.expected_npv_aai - expected_npv_df = expected_npv_df[ - [ - DATE_COL_NAME, - GROUP_COL_NAME, - MEASURE_COL_NAME, - METRIC_COL_NAME, - RISK_COL_NAME, - ] - ] + for metric in StaticRiskTrajectory.POSSIBLE_METRICS: + assert getattr(rt_basic, f"_{metric}_metrics") is None - self.mock_snapshot1 = MagicMock(spec=Snapshot) - self.mock_snapshot1.date = datetime.date(2023, 1, 1) - self.mock_snapshot2 = MagicMock(spec=Snapshot) - self.mock_snapshot2.date = datetime.date(2024, 1, 1) +def test_init_args(mock_snapshots, mock_disc_rates): + custom_rp = [10, 20] + custom_name = "custom" + rt = StaticRiskTrajectory( + mock_snapshots, + return_periods=custom_rp, + all_groups_name=custom_name, + risk_disc_rates=mock_disc_rates, + ) + assert rt._risk_disc_rates == mock_disc_rates + assert rt._all_groups_name == custom_name + assert rt.return_periods == custom_rp - self.mock_snapshot3 = MagicMock(spec=Snapshot) - self.mock_snapshot3.date = datetime.date(2026, 1, 1) - self.snapshots_list: list[Snapshot] = [ - self.mock_snapshot1, - self.mock_snapshot2, - self.mock_snapshot3, - ] +# --- Property & Setter Tests --- - self.risk_disc_rates = MagicMock(spec=DiscRates) - self.risk_disc_rates.years = [2023, 2024, 2025, 2026] - self.risk_disc_rates.rates = [0.01, 0.02, 0.03, 0.04] # Example rates - self.mock_impact_computation_strategy = MagicMock(spec=ImpactCalcComputation) +def test_set_return_periods(rt_basic): + with pytest.raises(ValueError): + rt_basic.return_periods = "A" - self.custom_all_groups_name = "custom" - self.custom_return_periods = [10, 20] + rt_basic.return_periods = [1, 2] + assert rt_basic.return_periods == [1, 2] - self.mock_static_traj = MagicMock(spec=StaticRiskTrajectory) - self.mock_static_traj._all_groups_name = DEFAULT_ALLGROUP_NAME - self.mock_static_traj._risk_disc_rates = None - self.mock_static_traj._risk_metrics_calculators = MagicMock( - spec=CalcRiskMetricsPoints - ) - @patch( - "climada.trajectories.static_trajectory.CalcRiskMetricsPoints", - autospec=True, +def test_set_disc_rates(rt_basic, mock_disc_rates): + # Mock the reset_metrics method on the instance + with patch.object(rt_basic, "_reset_metrics", wraps=rt_basic._reset_metrics) as spy: + with pytest.raises(ValueError): + rt_basic.risk_disc_rates = "A" + + rt_basic.risk_disc_rates = mock_disc_rates + # Once in __init__, once in setter + assert spy.call_count == 1 + assert rt_basic.risk_disc_rates == mock_disc_rates + + +# --- NPV Transformation Tests --- + + +def test_npv_transform_no_group_col(mock_disc_rates): + df_input = pd.DataFrame( + { + "date": pd.to_datetime(["2023-01-01", "2024-01-01"] * 2), + "measure": ["m1", "m1", "m2", "m2"], + "metric": [AAI_METRIC_NAME] * 4, + "risk": [100.0, 200.0, 80.0, 180.0], + } ) - def test_init_basic(self, MockCalcRiskPoints): - mock_calculator = MagicMock(spec=CalcRiskMetricsPoints) - mock_calculator.impact_computation_strategy = ( - self.mock_impact_computation_strategy - ) - MockCalcRiskPoints.return_value = mock_calculator - rt = StaticRiskTrajectory( - self.snapshots_list, - impact_computation_strategy=self.mock_impact_computation_strategy, - ) - MockCalcRiskPoints.assert_has_calls( - [ - call( - self.snapshots_list, - impact_computation_strategy=self.mock_impact_computation_strategy, - ), - ] - ) - self.assertEqual(rt.start_date, self.mock_snapshot1.date) - self.assertEqual(rt.end_date, self.mock_snapshot3.date) - self.assertIsNone(rt._risk_disc_rates) - self.assertEqual(rt._all_groups_name, DEFAULT_ALLGROUP_NAME) - self.assertEqual(rt._return_periods, DEFAULT_RP) - self.assertEqual( - rt.impact_computation_strategy, self.mock_impact_computation_strategy - ) - # Check that metrics are reset (initially None) - for metric in StaticRiskTrajectory.POSSIBLE_METRICS: - self.assertIsNone(getattr(rt, "_" + metric + "_metrics")) - @patch( - "climada.trajectories.static_trajectory.CalcRiskMetricsPoints", - autospec=True, + with patch( + "climada.trajectories.trajectory.RiskTrajectory._calc_npv_cash_flows" + ) as mock_calc: + # Side effects to simulate discounted values + mock_calc.side_effect = [ + pd.Series( + [99.0, 196.0], index=pd.to_datetime(["2023-01-01", "2024-01-01"]) + ), + pd.Series( + [79.2, 176.4], index=pd.to_datetime(["2023-01-01", "2024-01-01"]) + ), + ] + + _ = RiskTrajectory.npv_transform(df_input.copy(), mock_disc_rates) + + # Check calls: Grouping should happen by (measure, metric) + assert mock_calc.call_count == 2 + # Verify first group args + args, _ = mock_calc.call_args_list[0] + assert args[1] == pd.Timestamp("2023-01-01") + assert args[2] == mock_disc_rates + + +def test_calc_npv_cash_flows_logic(mock_disc_rates): + """Standalone test for the math inside _calc_npv_cash_flows.""" + cash_flows = pd.Series( + [100, 200, 300], + index=pd.to_datetime(["2023-01-01", "2024-01-01", "2025-01-01"]), ) - def test_init_args(self, mock_calc_risk_metrics_points): - rt = StaticRiskTrajectory( - self.snapshots_list, - return_periods=self.custom_return_periods, - all_groups_name=self.custom_all_groups_name, - risk_disc_rates=self.risk_disc_rates, - impact_computation_strategy=self.mock_impact_computation_strategy, - ) - self.assertEqual(rt.start_date, self.mock_snapshot1.date) - self.assertEqual(rt.end_date, self.mock_snapshot3.date) - self.assertEqual(rt._risk_disc_rates, self.risk_disc_rates) - self.assertEqual(rt._all_groups_name, self.custom_all_groups_name) - self.assertEqual(rt._return_periods, self.custom_return_periods) - self.assertEqual(rt.return_periods, self.custom_return_periods) - # Check that metrics are reset (initially None) - for metric in StaticRiskTrajectory.POSSIBLE_METRICS: - self.assertIsNone(getattr(rt, "_" + metric + "_metrics")) - self.assertIsInstance(rt._risk_metrics_calculators, CalcRiskMetricsPoints) - mock_calc_risk_metrics_points.assert_called_with( - self.snapshots_list, - impact_computation_strategy=self.mock_impact_computation_strategy, - ) + start_date = datetime.date(2023, 1, 1) - @patch.object(StaticRiskTrajectory, "_reset_metrics", new_callable=Mock) - @patch( - "climada.trajectories.static_trajectory.CalcRiskMetricsPoints", - autospec=True, + # NPV Factor: (1 / (1 + rate)) ^ year_delta + # 2023: (1/1.01)^0 = 1.0 -> 100 + # 2024: (1/1.02)^1 = 0.98039... -> 196.078... + # 2025: (1/1.03)^2 = 0.94259... -> 282.778... + + result = RiskTrajectory._calc_npv_cash_flows( + cash_flows, start_date, mock_disc_rates ) - def test_set_impact_computation_strategy( - self, mock_calc_risk_metrics_points, mock_reset_metrics - ): - rt = StaticRiskTrajectory( - self.snapshots_list, - impact_computation_strategy=self.mock_impact_computation_strategy, - ) - mock_reset_metrics.assert_called_once() # Called during init - with self.assertRaises(ValueError): - rt.impact_computation_strategy = "A" - - # There is only one possibility at the moment so we just check against a new object - new_impact_calc = ImpactCalcComputation() - rt.impact_computation_strategy = new_impact_calc - self.assertEqual(rt.impact_computation_strategy, new_impact_calc) - mock_reset_metrics.assert_has_calls([call(), call()]) - - def test_generic_metrics(self): - self.mock_static_traj.POSSIBLE_METRICS = StaticRiskTrajectory.POSSIBLE_METRICS - self.mock_static_traj._generic_metrics = types.MethodType( - StaticRiskTrajectory._generic_metrics, self.mock_static_traj - ) - self.mock_static_traj._risk_disc_rates = self.risk_disc_rates - self.mock_static_traj._aai_metrics = None - with self.assertRaises(ValueError): - self.mock_static_traj._generic_metrics(None, "dummy_meth") - with self.assertRaises(NotImplementedError): - self.mock_static_traj._generic_metrics("dummy_name", "dummy_meth") + assert result.iloc[0] == pytest.approx(100.0) + assert result.iloc[1] == pytest.approx(200 / 1.02) + assert result.iloc[2] == pytest.approx(300 / (1.03**2)) - self.mock_static_traj._risk_metrics_calculators.calc_aai_metric.return_value = ( - self.aai_alldates - ) - self.mock_static_traj.npv_transform.return_value = self.expected_npv_aai - result = self.mock_static_traj._generic_metrics( - AAI_METRIC_NAME, "calc_aai_metric" - ) - self.mock_static_traj._risk_metrics_calculators.calc_aai_metric.assert_called_once_with() - self.mock_static_traj.npv_transform.assert_called_once() - pd.testing.assert_frame_equal( - self.mock_static_traj.npv_transform.call_args[0][0].reset_index(drop=True), - self.expected_pre_npv_aai.reset_index(drop=True), - ) - self.assertEqual( - self.mock_static_traj.npv_transform.call_args[0][1], self.risk_disc_rates - ) - pd.testing.assert_frame_equal( - result, self.expected_npv_aai - ) # Final result is from NPV transform - - # Check internal storage - stored_df = getattr(self.mock_static_traj, "_aai_metrics") - # Assert that the stored DF is the one *before* NPV transformation - pd.testing.assert_frame_equal( - stored_df.reset_index(drop=True), - self.expected_npv_aai.reset_index(drop=True), +def test_calc_npv_cash_flows_invalid_index(mock_disc_rates): + cash_flows = pd.Series([100, 200]) # No datetime index + with pytest.raises(ValueError, match="PeriodIndex or DatetimeIndex"): + RiskTrajectory._calc_npv_cash_flows( + cash_flows, datetime.date(2023, 1, 1), mock_disc_rates ) - result2 = self.mock_static_traj._generic_metrics( - AAI_METRIC_NAME, "calc_aai_metric" - ) - # Check no new call - self.mock_static_traj._risk_metrics_calculators.calc_aai_metric.assert_called_once_with() - pd.testing.assert_frame_equal( - result2, - self.expected_npv_aai.reset_index(drop=True), - ) - def test_eai_metrics(self): - self.mock_static_traj.eai_metrics = types.MethodType( - StaticRiskTrajectory.eai_metrics, self.mock_static_traj - ) - self.mock_static_traj.eai_metrics(some_arg="test") - self.mock_static_traj._compute_metrics.assert_called_once_with( - metric_name=EAI_METRIC_NAME, metric_meth="calc_eai_gdf", some_arg="test" - ) +# ---- StaticRiskTrajectory tests --- - def test_aai_metrics(self): - self.mock_static_traj.aai_metrics = types.MethodType( - StaticRiskTrajectory.aai_metrics, self.mock_static_traj - ) - self.mock_static_traj.aai_metrics(some_arg="test") - self.mock_static_traj._compute_metrics.assert_called_once_with( - metric_name=AAI_METRIC_NAME, metric_meth="calc_aai_metric", some_arg="test" - ) +# --- Metric Computation Tests --- - def test_return_periods_metrics(self): - self.mock_static_traj.return_periods = [1, 2] - self.mock_static_traj.return_periods_metrics = types.MethodType( - StaticRiskTrajectory.return_periods_metrics, self.mock_static_traj - ) - self.mock_static_traj.return_periods_metrics(some_arg="test") - self.mock_static_traj._compute_metrics.assert_called_once_with( - metric_name=RETURN_PERIOD_METRIC_NAME, - metric_meth="calc_return_periods_metric", - return_periods=[1, 2], - some_arg="test", - ) - def test_aai_per_group_metrics(self): - self.mock_static_traj.aai_per_group_metrics = types.MethodType( - StaticRiskTrajectory.aai_per_group_metrics, self.mock_static_traj - ) - self.mock_static_traj.aai_per_group_metrics(some_arg="test") - self.mock_static_traj._compute_metrics.assert_called_once_with( - metric_name=AAI_PER_GROUP_METRIC_NAME, - metric_meth="calc_aai_per_group_metric", - some_arg="test", +def test_compute_metrics(rt_basic): + with patch.object( + StaticRiskTrajectory, "_generic_metrics", return_value="42" + ) as mock_generic: + result = rt_basic._compute_metrics( + metric_name="dummy", metric_meth="meth", arg1="A", arg2=12 ) - def test_per_date_risk_metrics_defaults(self): - self.mock_static_traj.per_date_risk_metrics = types.MethodType( - StaticRiskTrajectory.per_date_risk_metrics, self.mock_static_traj - ) - # Set up mock return values for each method - self.mock_static_traj.aai_metrics.return_value = pd.DataFrame( - {METRIC_COL_NAME: [AAI_METRIC_NAME], RISK_COL_NAME: [100]} + mock_generic.assert_called_once_with( + metric_name="dummy", metric_meth="meth", arg1="A", arg2=12 ) - self.mock_static_traj.return_periods_metrics.return_value = pd.DataFrame( - {METRIC_COL_NAME: ["rp"], RISK_COL_NAME: [50]} - ) - self.mock_static_traj.aai_per_group_metrics.return_value = pd.DataFrame( - {METRIC_COL_NAME: ["aai_grp"], RISK_COL_NAME: [10]} + assert result == "42" + + +def test_init_basic_static(mock_components): + # Patch the calculator class used inside __init__ + with patch( + "climada.trajectories.static_trajectory.CalcRiskMetricsPoints", autospec=True + ) as mock_calc_cls: + rt = StaticRiskTrajectory( + mock_components["snaps"], + impact_computation_strategy=mock_components["strat"], ) - result = self.mock_static_traj.per_date_risk_metrics() - - # Assert calls with default arguments - self.mock_static_traj.aai_metrics.assert_called_once_with() - self.mock_static_traj.return_periods_metrics.assert_called_once_with() - self.mock_static_traj.aai_per_group_metrics.assert_called_once_with() - - # Assert concatenation - expected_df = pd.concat( - [ - self.mock_static_traj.aai_metrics.return_value, - self.mock_static_traj.return_periods_metrics.return_value, - self.mock_static_traj.aai_per_group_metrics.return_value, - ] + + mock_calc_cls.assert_called_once_with( + mock_components["snaps"], + impact_computation_strategy=mock_components["strat"], ) - pd.testing.assert_frame_equal( - result.reset_index(drop=True), expected_df.reset_index(drop=True) + assert rt.start_date == mock_components["snaps"][0].date + + +def test_set_impact_strategy_resets(mock_components): + rt = StaticRiskTrajectory(mock_components["snaps"]) + with patch.object(rt, "_reset_metrics", wraps=rt._reset_metrics) as spy_reset: + new_strat = ImpactCalcComputation() + rt.impact_computation_strategy = new_strat + + assert rt.impact_computation_strategy == new_strat + # Called once in init, once in setter + assert spy_reset.call_count == 1 + + +# --- Logic & Metric Tests --- + + +def test_generic_metrics_caching_and_npv(mock_components, expected_aai_data): + """Tests the complex logic of _generic_metrics including NPV transform and caching.""" + rt = StaticRiskTrajectory( + mock_components["snaps"], risk_disc_rates=mock_components["disc_rates"] + ) + + # Mock the internal calculator's method + mock_calc = MagicMock() + mock_calc.calc_aai_metric.return_value = expected_aai_data + rt._risk_metrics_calculators = mock_calc + + # Mock NPV transform to return a modified version + npv_data = expected_aai_data.copy() + npv_data[RISK_COL_NAME] *= 0.9 + with patch.object(rt, "npv_transform", return_value=npv_data) as mock_npv: + + # First call + result = rt._generic_metrics(AAI_METRIC_NAME, "calc_aai_metric") + + mock_calc.calc_aai_metric.assert_called_once() + mock_npv.assert_called_once() + pd.testing.assert_frame_equal(result, npv_data) + + # Verify Internal Cache + assert rt._aai_metrics is not None + + # Second call (should be cached) + result2 = rt._generic_metrics(AAI_METRIC_NAME, "calc_aai_metric") + assert mock_calc.calc_aai_metric.call_count == 1 # No new call + pd.testing.assert_frame_equal(result2, npv_data) + + +@pytest.mark.parametrize( + "metric_name, method_name, attr_name", + [ + (EAI_METRIC_NAME, "calc_eai_gdf", "eai_metrics"), + (AAI_METRIC_NAME, "calc_aai_metric", "aai_metrics"), + ( + AAI_PER_GROUP_METRIC_NAME, + "calc_aai_per_group_metric", + "aai_per_group_metrics", + ), + ], +) +def test_metric_wrappers(mock_components, metric_name, method_name, attr_name): + """Uses parametrization to test all simple metric wrapper methods at once.""" + rt = StaticRiskTrajectory(mock_components["snaps"]) + with patch.object(rt, "_compute_metrics") as mock_compute: + wrapper_func = getattr(rt, attr_name) + wrapper_func(test_arg="val") + mock_compute.assert_called_once_with( + metric_name=metric_name, metric_meth=method_name, test_arg="val" ) -if __name__ == "__main__": - TESTS = unittest.TestLoader().loadTestsFromTestCase(TestStaticRiskTrajectory) - unittest.TextTestRunner(verbosity=2).run(TESTS) +def test_per_date_risk_metrics_aggregation(mock_components): + rt = StaticRiskTrajectory(mock_components["snaps"]) + + # Setup mock returns for the constituent parts + df_aai = pd.DataFrame({METRIC_COL_NAME: ["aai"], RISK_COL_NAME: [100]}) + df_rp = pd.DataFrame({METRIC_COL_NAME: ["rp"], RISK_COL_NAME: [50]}) + df_grp = pd.DataFrame({METRIC_COL_NAME: ["grp"], RISK_COL_NAME: [10]}) + + with ( + patch.object(rt, "aai_metrics", return_value=df_aai) as m1, + patch.object(rt, "return_periods_metrics", return_value=df_rp) as m2, + patch.object(rt, "aai_per_group_metrics", return_value=df_grp) as m3, + ): + + result = rt.per_date_risk_metrics() + assert len(result) == 3 + assert list(result[METRIC_COL_NAME]) == ["aai", "rp", "grp"] + # Verify it called all three internal methods + m1.assert_called_once() + m2.assert_called_once() + m3.assert_called_once() diff --git a/climada/trajectories/test/test_trajectory.py b/climada/trajectories/test/test_trajectory.py index c39d6c9aac..4e0259483b 100644 --- a/climada/trajectories/test/test_trajectory.py +++ b/climada/trajectories/test/test_trajectory.py @@ -16,15 +16,15 @@ --- -unit tests for risk_trajectory +unit tests for RiskTrajectory (Being an abstract ) """ import datetime -import unittest -from unittest.mock import MagicMock, Mock, call, patch +from unittest.mock import MagicMock, call import pandas as pd +import pytest from climada.entity.disc_rates.base import DiscRates from climada.trajectories.constants import AAI_METRIC_NAME @@ -36,291 +36,17 @@ ) -class TestRiskTrajectory(unittest.TestCase): - def setUp(self) -> None: - self.mock_snapshot1 = MagicMock(spec=Snapshot) - self.mock_snapshot1.date = datetime.date(2023, 1, 1) +@pytest.fixture +def mock_snapshots(): + """Provides a list of mock Snapshot objects with sequential dates.""" + snaps = [] + for year in [2023, 2024, 2025]: + m = MagicMock(spec=Snapshot) + m.date = datetime.date(year, 1, 1) + snaps.append(m) + return snaps - self.mock_snapshot2 = MagicMock(spec=Snapshot) - self.mock_snapshot2.date = datetime.date(2024, 1, 1) - self.mock_snapshot3 = MagicMock(spec=Snapshot) - self.mock_snapshot3.date = datetime.date(2025, 1, 1) - - self.risk_disc_rates = MagicMock(spec=DiscRates) - self.risk_disc_rates.years = [2023, 2024, 2025] - self.risk_disc_rates.rates = [0.01, 0.02, 0.03] # Example rates - - self.snapshots_list: list[Snapshot] = [ - self.mock_snapshot1, - self.mock_snapshot2, - self.mock_snapshot3, - ] - - self.custom_all_groups_name = "custom" - self.custom_return_periods = [10, 20] - - def test_init_basic(self): - rt = RiskTrajectory(self.snapshots_list) - self.assertEqual(rt.start_date, self.mock_snapshot1.date) - self.assertEqual(rt.end_date, self.mock_snapshot3.date) - self.assertIsNone(rt._risk_disc_rates) - self.assertEqual(rt._all_groups_name, DEFAULT_ALLGROUP_NAME) - self.assertEqual(rt._return_periods, DEFAULT_RP) - # Check that metrics are reset (initially None) - for metric in RiskTrajectory.POSSIBLE_METRICS: - self.assertIsNone(getattr(rt, "_" + metric + "_metrics")) - - def test_init_args(self): - rt = RiskTrajectory( - self.snapshots_list, - return_periods=self.custom_return_periods, - all_groups_name=self.custom_all_groups_name, - risk_disc_rates=self.risk_disc_rates, - ) - self.assertEqual(rt.start_date, self.mock_snapshot1.date) - self.assertEqual(rt.end_date, self.mock_snapshot3.date) - self.assertEqual(rt._risk_disc_rates, self.risk_disc_rates) - self.assertEqual(rt._all_groups_name, self.custom_all_groups_name) - self.assertEqual(rt._return_periods, self.custom_return_periods) - self.assertEqual(rt.return_periods, self.custom_return_periods) - # Check that metrics are reset (initially None) - for metric in RiskTrajectory.POSSIBLE_METRICS: - self.assertIsNone(getattr(rt, "_" + metric + "_metrics")) - - @patch.object(RiskTrajectory, "_generic_metrics", new_callable=Mock) - def test_compute_metrics(self, mock_generic_metrics): - mock_generic_metrics.return_value = "42" - rt = RiskTrajectory(self.snapshots_list) - result = rt._compute_metrics( - metric_name="dummy_name", - metric_meth="dummy_meth", - dummy_kwarg1="A", - dummy_kwarg2=12, - ) - mock_generic_metrics.assert_called_once_with( - metric_name="dummy_name", - metric_meth="dummy_meth", - dummy_kwarg1="A", - dummy_kwarg2=12, - ) - self.assertEqual(result, "42") - - def test_set_return_periods(self): - rt = RiskTrajectory(self.snapshots_list) - with self.assertRaises(ValueError): - rt.return_periods = "A" - with self.assertRaises(ValueError): - rt.return_periods = ["A"] - - rt.return_periods = [1, 2] - self.assertEqual(rt._return_periods, [1, 2]) - self.assertEqual(rt.return_periods, [1, 2]) - - @patch.object(RiskTrajectory, "_reset_metrics", new_callable=Mock) - def test_set_disc_rates(self, mock_reset_metrics): - rt = RiskTrajectory(self.snapshots_list) - mock_reset_metrics.assert_called_once() # Called during init - with self.assertRaises(ValueError): - rt.risk_disc_rates = "A" - - rt.risk_disc_rates = self.risk_disc_rates - mock_reset_metrics.assert_has_calls([call(), call()]) - self.assertEqual(rt._risk_disc_rates, self.risk_disc_rates) - self.assertEqual(rt.risk_disc_rates, self.risk_disc_rates) - - def test_npv_transform_no_group_col(self): - df_input = pd.DataFrame( - { - "date": pd.to_datetime(["2023-01-01", "2024-01-01"] * 2), - "measure": ["m1", "m1", "m2", "m2"], - "metric": [ - AAI_METRIC_NAME, - AAI_METRIC_NAME, - AAI_METRIC_NAME, - AAI_METRIC_NAME, - ], - "risk": [100.0, 200.0, 80.0, 180.0], - } - ) - # Mock the internal calc_npv_cash_flows - with patch( - "climada.trajectories.trajectory.RiskTrajectory._calc_npv_cash_flows" - ) as mock_calc_npv: - # For each group, it will be called - mock_calc_npv.side_effect = [ - pd.Series( - [100.0 * (1 / (1 + 0.01)) ** 0, 200.0 * (1 / (1 + 0.02)) ** 1], - index=[pd.Timestamp("2023-01-01"), pd.Timestamp("2024-01-01")], - ), - pd.Series( - [80.0 * (1 / (1 + 0.01)) ** 0, 180.0 * (1 / (1 + 0.02)) ** 1], - index=[pd.Timestamp("2023-01-01"), pd.Timestamp("2024-01-01")], - ), - ] - result_df = RiskTrajectory.npv_transform( - df_input.copy(), self.risk_disc_rates - ) - # Assertions for mock calls - # Grouping by 'measure', 'metric' (default _grouper) - pd.testing.assert_series_equal( - mock_calc_npv.mock_calls[0].args[0], - pd.Series( - [100.0, 200.0], - index=pd.Index( - [ - pd.Timestamp("2023-01-01"), - pd.Timestamp("2024-01-01"), - ], - name="date", - ), - name=("m1", AAI_METRIC_NAME), - ), - ) - assert mock_calc_npv.mock_calls[0].args[1] == pd.Timestamp("2023-01-01") - assert mock_calc_npv.mock_calls[0].args[2] == self.risk_disc_rates - pd.testing.assert_series_equal( - mock_calc_npv.mock_calls[1].args[0], - pd.Series( - [80.0, 180.0], - index=pd.Index( - [ - pd.Timestamp("2023-01-01"), - pd.Timestamp("2024-01-01"), - ], - name="date", - ), - name=("m2", AAI_METRIC_NAME), - ), - ) - assert mock_calc_npv.mock_calls[1].args[1] == pd.Timestamp("2023-01-01") - assert mock_calc_npv.mock_calls[1].args[2] == self.risk_disc_rates - - expected_df = pd.DataFrame( - { - "date": pd.to_datetime(["2023-01-01", "2024-01-01"] * 2), - "measure": ["m1", "m1", "m2", "m2"], - "metric": [ - AAI_METRIC_NAME, - AAI_METRIC_NAME, - AAI_METRIC_NAME, - AAI_METRIC_NAME, - ], - "risk": [ - 100.0 * (1 / (1 + 0.01)) ** 0, - 200.0 * (1 / (1 + 0.02)) ** 1, - 80.0 * (1 / (1 + 0.01)) ** 0, - 180.0 * (1 / (1 + 0.02)) ** 1, - ], - } - ) - pd.testing.assert_frame_equal( - result_df.sort_values("date").reset_index(drop=True), - expected_df.sort_values("date").reset_index(drop=True), - rtol=1e-6, - ) - - def test_npv_transform_with_group_col(self): - df_input = pd.DataFrame( - { - "date": pd.to_datetime(["2023-01-01", "2024-01-01", "2023-01-01"]), - "group": ["G1", "G1", "G2"], - "measure": ["m1", "m1", "m1"], - "metric": [AAI_METRIC_NAME, AAI_METRIC_NAME, AAI_METRIC_NAME], - "risk": [100.0, 200.0, 150.0], - } - ) - with patch( - "climada.trajectories.trajectory.RiskTrajectory._calc_npv_cash_flows" - ) as mock_calc_npv: - mock_calc_npv.side_effect = [ - # First group G1, m1, aai - pd.Series( - [100.0 * (1 / (1 + 0.01)) ** 0, 200.0 * (1 / (1 + 0.02)) ** 1], - index=[pd.Timestamp("2023-01-01"), pd.Timestamp("2024-01-01")], - ), - # Second group G2, m1, aai - pd.Series( - [150.0 * (1 / (1 + 0.01)) ** 0], index=[pd.Timestamp("2023-01-01")] - ), - ] - result_df = RiskTrajectory.npv_transform( - df_input.copy(), self.risk_disc_rates - ) - - expected_df = pd.DataFrame( - { - "date": pd.to_datetime(["2023-01-01", "2024-01-01", "2023-01-01"]), - "group": ["G1", "G1", "G2"], - "measure": ["m1", "m1", "m1"], - "metric": [AAI_METRIC_NAME, AAI_METRIC_NAME, AAI_METRIC_NAME], - "risk": [ - 100.0 * (1 / (1 + 0.01)) ** 0, - 200.0 * (1 / (1 + 0.02)) ** 1, - 150.0 * (1 / (1 + 0.01)) ** 0, - ], - } - ) - pd.testing.assert_frame_equal( - result_df.sort_values(["group", "date"]).reset_index(drop=True), - expected_df.sort_values(["group", "date"]).reset_index(drop=True), - rtol=1e-6, - ) - - # --- Test NPV Transformation (`npv_transform` and `calc_npv_cash_flows`) --- - - ## Test `calc_npv_cash_flows` (standalone function) - def test_calc_npv_cash_flows_no_disc(self): - cash_flows = pd.Series( - [100, 200, 300], - index=pd.to_datetime(["2023-01-01", "2024-01-01", "2025-01-01"]), - ) - start_date = datetime.date(2023, 1, 1) - result = RiskTrajectory._calc_npv_cash_flows( - cash_flows, start_date, disc_rates=None - ) - # If no disc, it should return the original cash_flows Series - pd.testing.assert_series_equal(result, cash_flows) - - def test_calc_npv_cash_flows_with_disc(self): - cash_flows = pd.Series( - [100, 200, 300], - index=pd.period_range(start="2023-01-01", end="2025-01-01", freq="Y"), - ) - start_date = datetime.date(2023, 1, 1) - # Using the risk_disc_rates from SetUp - - # year 2023: (2023-01-01 - 2023-01-01) days // 365 = 0, factor = (1/(1+0.01))^0 = 1 - # year 2024: (2024-01-01 - 2023-01-01) days // 365 = 1, factor = (1/(1+0.02))^1 = 0.98039215... - # year 2025: (2025-01-01 - 2023-01-01) days // 365 = 2, factor = (1/(1+0.03))^2 = 0.9425959... - expected_cash_flows = pd.Series( - [ - 100 * (1 / (1 + 0.01)) ** 0, - 200 * (1 / (1 + 0.02)) ** 1, - 300 * (1 / (1 + 0.03)) ** 2, - ], - index=pd.period_range(start="2023-01-01", end="2025-01-01", freq="Y"), - name="npv_cash_flow", - ) - - result = RiskTrajectory._calc_npv_cash_flows( - cash_flows, start_date, disc_rates=self.risk_disc_rates - ) - pd.testing.assert_series_equal( - result, expected_cash_flows, check_dtype=False, rtol=1e-6 - ) - - def test_calc_npv_cash_flows_invalid_index(self): - cash_flows = pd.Series([100, 200, 300]) # No datetime index - start_date = datetime.date(2023, 1, 1) - with self.assertRaises( - ValueError, msg="cash_flows must be a pandas Series with a datetime index" - ): - RiskTrajectory._calc_npv_cash_flows( - cash_flows, start_date, disc_rates=self.risk_disc_rates - ) - - -if __name__ == "__main__": - TESTS = unittest.TestLoader().loadTestsFromTestCase(TestRiskTrajectory) - unittest.TextTestRunner(verbosity=2).run(TESTS) +def test_abstract(): + with pytest.raises(TypeError, match="abstract class"): + RiskTrajectory(mock_snapshots) # type: ignore