Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
ad819f2
Forecast templates
peanutfun Nov 21, 2025
abafba4
Clean slate for Forecast
peanutfun Dec 5, 2025
1688641
1127 - Forecast base class init (#1167)
Evelyn-M Dec 8, 2025
d66c8dd
Impact forecast class (#1168)
peanutfun Dec 8, 2025
35f9517
Hazard forecast class (#1171)
luseverin Dec 8, 2025
54bb592
Change leadtime from datetime to timedelta (#1172)
ValentinGebhart Dec 8, 2025
4aaad4e
Hazard forecast test select (#1175)
luseverin Dec 8, 2025
d04fecf
Hazard forecast concat test (#1178)
luseverin Dec 8, 2025
d3cc83a
Check if `Impact.select` works on `ImpactForecast` (#1170)
peanutfun Dec 8, 2025
a386d22
Check if `Impact.concat` works on `ImpactForecast` (#1174)
peanutfun Dec 8, 2025
34612c5
adapt from_hdf5 and write_hdf5
ValentinGebhart Dec 9, 2025
8a63b43
Check sizes of forecast data vs. Hazard and Impact (#1180)
peanutfun Dec 9, 2025
e123b7b
implement code review
ValentinGebhart Dec 9, 2025
9ce7f73
add comment to test
ValentinGebhart Dec 9, 2025
48b6d40
Merge pull request #1181 from CLIMADA-project/adapt_hdf5_write_from
ValentinGebhart Dec 9, 2025
64da484
Forecast/select extended tests (#1182)
chahank Dec 9, 2025
6a156ac
Impact calc return impact forecast (#1179)
luseverin Dec 9, 2025
9d6fef9
add idx boolean selection for member and leadtime (#1183)
elianekobler Dec 9, 2025
b17bcc9
Add select to ImpactForecast (#1188)
ValentinGebhart Dec 10, 2025
302be63
Add select method to HazardForecast (#1185)
ValentinGebhart Dec 10, 2025
52edc45
Add `HazardForecast.concat` (#1184)
peanutfun Dec 10, 2025
2b9b388
Implement mean min max for impact forecast and hazard forecast (#1187)
luseverin Dec 10, 2025
bb99794
Support `ImpactForecast.concat` (#1192)
peanutfun Dec 10, 2025
0e7857f
Add hdf5 IO for ImpactForecast (#1190)
ValentinGebhart Dec 10, 2025
e66a558
Implement quantiles and median for hazard and impact forecasts (#1191)
luseverin Dec 10, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions climada/engine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,4 @@
from .cost_benefit import *
from .impact import *
from .impact_calc import *
from .impact_forecast import ImpactForecast
35 changes: 28 additions & 7 deletions climada/engine/impact.py
Original file line number Diff line number Diff line change
Expand Up @@ -1431,6 +1431,8 @@ def write_attribute(group, name, value):

def write_dataset(group, name, value):
"""Write a dataset"""
if name == "lead_time":
value = value.astype("timedelta64[ns]").astype("int64")
group.create_dataset(name, data=value, dtype=_str_type_helper(value))

def write_dict(group, name, value):
Expand Down Expand Up @@ -1618,7 +1620,9 @@ def read_excel(self, *args, **kwargs):
self.__dict__ = Impact.from_excel(*args, **kwargs).__dict__

@classmethod
def from_hdf5(cls, file_path: Union[str, Path]):
def from_hdf5(
cls, file_path: Union[str, Path], *, add_scalar_attrs=None, add_array_attrs=None
):
"""Create an impact object from an H5 file.

This assumes a specific layout of the file. If values are not found in the
Expand Down Expand Up @@ -1663,6 +1667,10 @@ def from_hdf5(cls, file_path: Union[str, Path]):
----------
file_path : str or Path
The file path of the file to read.
add_scalar_attrs : Iterable of str, optional
Scalar attributes to read from file. Defaults to None.
add_array_attrs : Iterable of str, optional
Array attributes to read from file. Defaults to None.

Returns
-------
Expand Down Expand Up @@ -1691,17 +1699,27 @@ def from_hdf5(cls, file_path: Union[str, Path]):
# Scalar attributes
scalar_attrs = set(
("crs", "tot_value", "unit", "aai_agg", "frequency_unit", "haz_type")
).intersection(file.attrs.keys())
)
if add_scalar_attrs is not None:
scalar_attrs = scalar_attrs.union(add_scalar_attrs)
scalar_attrs = scalar_attrs.intersection(file.attrs.keys())
kwargs.update({attr: file.attrs[attr] for attr in scalar_attrs})

# Array attributes
# NOTE: Need [:] to copy array data. Otherwise, it would be a view that is
# invalidated once we close the file.
array_attrs = set(
("event_id", "date", "coord_exp", "eai_exp", "at_event", "frequency")
).intersection(file.keys())
)
if add_array_attrs is not None:
array_attrs = array_attrs.union(add_array_attrs)
array_attrs = array_attrs.intersection(file.keys())
kwargs.update({attr: file[attr][:] for attr in array_attrs})

# correct lead_time attribut to timedelta
if "lead_time" in kwargs:
kwargs["lead_time"] = np.array(file["lead_time"][:]).astype(
"timedelta64[ns]"
)
# Special handling for 'event_name' because it should be a list of strings
if "event_name" in file:
# pylint: disable=no-member
Expand Down Expand Up @@ -2208,9 +2226,12 @@ def stack_attribute(attr_name: str) -> np.ndarray:
imp_mat = sparse.vstack(imp_mats)

# Concatenate other attributes
kwargs = {
attr: stack_attribute(attr) for attr in ("date", "frequency", "at_event")
}
concat_attrs = {
name.lstrip("_") # Private attributes with getter/setter
for name, value in first_imp.__dict__.items()
if isinstance(value, np.ndarray)
}.difference(("event_id", "coord_exp", "eai_exp", "aai_agg"))
kwargs = {attr: stack_attribute(attr) for attr in concat_attrs}

# Get remaining attributes from first impact object in list
return cls(
Expand Down
49 changes: 43 additions & 6 deletions climada/engine/impact_calc.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@

from climada import CONFIG
from climada.engine.impact import Impact
from climada.engine.impact_forecast import ImpactForecast
from climada.hazard.forecast import HazardForecast

LOGGER = logging.getLogger(__name__)

Expand Down Expand Up @@ -217,7 +219,7 @@ def _return_impact(self, imp_mat_gen, save_mat):

Returns
-------
Impact
Impact or ImpactForecast
Impact Object initialize from the impact matrix

See Also
Expand All @@ -230,12 +232,31 @@ def _return_impact(self, imp_mat_gen, save_mat):
at_event, eai_exp, aai_agg = self.risk_metrics(
imp_mat, self.hazard.frequency
)
if isinstance(self.hazard, HazardForecast):
eai_exp = np.full_like(eai_exp, np.nan, dtype=eai_exp.dtype)
aai_agg = np.full_like(aai_agg, np.nan, dtype=aai_agg.dtype)
LOGGER.warning(
"eai_exp and aai_agg are undefined with forecasts. "
"Setting them to NaN arrays."
)

else:
if isinstance(self.hazard, HazardForecast):
raise ValueError(
"Saving impact matrix is required when using HazardForecast."
"Please set save_mat=True."
)
imp_mat = None
at_event, eai_exp, aai_agg = self.stitch_risk_metrics(imp_mat_gen)
return Impact.from_eih(

impact = Impact.from_eih(
self.exposures, self.hazard, at_event, eai_exp, aai_agg, imp_mat
)
if isinstance(self.hazard, HazardForecast):
return ImpactForecast.from_impact(
impact, self.hazard.lead_time, self.hazard.member
)
return impact

def _return_empty(self, save_mat):
"""
Expand All @@ -248,21 +269,37 @@ def _return_empty(self, save_mat):

Returns
-------
Impact
Impact or ImpactForecast
Empty impact object with correct array sizes.
"""
at_event = np.zeros(self.n_events)
eai_exp = np.zeros(self.n_exp_pnt)
aai_agg = 0.0
if isinstance(self.hazard, HazardForecast):
eai_exp = np.full(self.n_exp_pnt, np.nan)
aai_agg = np.nan
else:
eai_exp = np.zeros(self.n_exp_pnt)
aai_agg = 0.0

if save_mat:
imp_mat = sparse.csr_matrix(
(self.n_events, self.n_exp_pnt), dtype=np.float64
)
else:
if isinstance(self.hazard, HazardForecast):
raise ValueError(
"Saving impact matrix is required when using HazardForecast. "
"Please set save_mat=True."
)
imp_mat = None
return Impact.from_eih(

impact = Impact.from_eih(
self.exposures, self.hazard, at_event, eai_exp, aai_agg, imp_mat
)
if isinstance(self.hazard, HazardForecast):
return ImpactForecast.from_impact(
impact, self.hazard.lead_time, self.hazard.member
)
return impact

def minimal_exp_gdf(
self, impf_col, assign_centroids, ignore_cover, ignore_deductible
Expand Down
Loading
Loading