Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/scripts/make_release.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
to create the new release.

"""

import glob
import re
import subprocess
Expand Down
1 change: 1 addition & 0 deletions .github/scripts/setup_devbranch.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
The changes are not commited to the repository. This is dealt with in the bash script
`setup_devbranch.sh` (which is also the caller of this script).
"""

import glob
import json
import re
Expand Down
20 changes: 9 additions & 11 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,17 @@ default_language_version:
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
rev: v6.0.0
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace

- repo: https://github.com/pycqa/isort
rev: '5.13.2'
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.14.5
hooks:
- id: isort
args: ["--profile", "black", "--filter-files"]

# Using this mirror lets us use mypyc-compiled black, which is about 2x faster
- repo: https://github.com/psf/black-pre-commit-mirror
rev: '24.4.2'
hooks:
- id: black-jupyter
# Run the linter, applying safe fixes
# TODO: Enable once all existing issues are fixed
# - id: ruff-check
# args: [ --fix ]
# Run the formatter.
- id: ruff-format
90 changes: 41 additions & 49 deletions climada/engine/calibration_opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,33 +94,28 @@ def calib_instance(
for cnt_, year in years_in_common.iteritems():
df_out.loc[df_out["year"] == year, "impact_CLIMADA"] = iys[year]

else: # impact per event
if df_out.empty | df_out.index.shape[0] == 1:
for cnt_, impact in enumerate(impacts.at_event):
if cnt_ > 0:
df_out.loc[cnt_] = df_out.loc[0] # copy info from first row
df_out.loc[cnt_, "impact_CLIMADA"] = impact
df_out.loc[cnt_, "event_id"] = int(impacts.event_id[cnt_])
df_out.loc[cnt_, "event_name"] = impacts.event_name[cnt_]
df_out.loc[cnt_, "year"] = dt.datetime.fromordinal(
impacts.date[cnt_]
).year
df_out.loc[cnt_, "date"] = impacts.date[cnt_]
elif df_out.index.shape[0] == impacts.at_event.shape[0]:
for cnt_, (impact, ind) in enumerate(zip(impacts.at_event, df_out.index)):
df_out.loc[ind, "impact_CLIMADA"] = impact
df_out.loc[ind, "event_id"] = int(impacts.event_id[cnt_])
df_out.loc[ind, "event_name"] = impacts.event_name[cnt_]
df_out.loc[ind, "year"] = dt.datetime.fromordinal(
impacts.date[cnt_]
).year
df_out.loc[ind, "date"] = impacts.date[cnt_]
else:
raise ValueError(
"adding simulated impacts to reported impacts not"
" yet implemented. use yearly_impact=True or run"
" without init_impact_data."
)
elif df_out.empty | df_out.index.shape[0] == 1:
for cnt_, impact in enumerate(impacts.at_event):
if cnt_ > 0:
df_out.loc[cnt_] = df_out.loc[0] # copy info from first row
df_out.loc[cnt_, "impact_CLIMADA"] = impact
df_out.loc[cnt_, "event_id"] = int(impacts.event_id[cnt_])
df_out.loc[cnt_, "event_name"] = impacts.event_name[cnt_]
df_out.loc[cnt_, "year"] = dt.datetime.fromordinal(impacts.date[cnt_]).year
df_out.loc[cnt_, "date"] = impacts.date[cnt_]
elif df_out.index.shape[0] == impacts.at_event.shape[0]:
for cnt_, (impact, ind) in enumerate(zip(impacts.at_event, df_out.index)):
df_out.loc[ind, "impact_CLIMADA"] = impact
df_out.loc[ind, "event_id"] = int(impacts.event_id[cnt_])
df_out.loc[ind, "event_name"] = impacts.event_name[cnt_]
df_out.loc[ind, "year"] = dt.datetime.fromordinal(impacts.date[cnt_]).year
df_out.loc[ind, "date"] = impacts.date[cnt_]
else:
raise ValueError(
"adding simulated impacts to reported impacts not"
" yet implemented. use yearly_impact=True or run"
" without init_impact_data."
)
if return_cost != "False":
df_out = calib_cost_calc(df_out, return_cost)
return df_out
Expand Down Expand Up @@ -293,8 +288,7 @@ def init_impact_data(
# em_data = emdat_impact_event(source_file)
else:
raise ValueError(
"init_impact_data not yet implemented for other impact_data_sources "
"than emdat."
"init_impact_data not yet implemented for other impact_data_sources than emdat."
)
return em_data

Expand Down Expand Up @@ -378,17 +372,16 @@ def calib_all(
# prepare impact data
if isinstance(impact_data_source, pd.DataFrame):
df_impact_data = impact_data_source
elif list(impact_data_source.keys()) == ["emdat"]:
df_impact_data = init_impact_data(
hazard_type,
region_ids,
year_range,
impact_data_source["emdat"],
year_range[-1],
)
else:
if list(impact_data_source.keys()) == ["emdat"]:
df_impact_data = init_impact_data(
hazard_type,
region_ids,
year_range,
impact_data_source["emdat"],
year_range[-1],
)
else:
raise ValueError("other impact data sources not yet implemented.")
raise ValueError("other impact data sources not yet implemented.")
params_generator = (
dict(zip(param_full_dict, x))
for x in itertools.product(*param_full_dict.values())
Expand Down Expand Up @@ -460,17 +453,16 @@ def calib_optimize(
# prepare impact data
if isinstance(impact_data_source, pd.DataFrame):
df_impact_data = impact_data_source
elif list(impact_data_source.keys()) == ["emdat"]:
df_impact_data = init_impact_data(
hazard_type,
region_ids,
year_range,
impact_data_source["emdat"],
year_range[-1],
)
else:
if list(impact_data_source.keys()) == ["emdat"]:
df_impact_data = init_impact_data(
hazard_type,
region_ids,
year_range,
impact_data_source["emdat"],
year_range[-1],
)
else:
raise ValueError("other impact data sources not yet implemented.")
raise ValueError("other impact data sources not yet implemented.")

# definie specific function to
def specific_calib(values):
Expand Down
22 changes: 11 additions & 11 deletions climada/engine/cost_benefit.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@
import logging
from typing import Dict, Optional, Tuple, Union

import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colors
from matplotlib.patches import FancyArrowPatch, Rectangle
from tabulate import tabulate

Expand Down Expand Up @@ -661,7 +661,7 @@ def plot_waterfall(
ent_future,
risk_func=risk_aai_agg,
axis=None,
**kwargs
**kwargs,
):
"""Plot waterfall graph at future with given risk metric. Can be called
before and after calc().
Expand Down Expand Up @@ -740,7 +740,7 @@ def plot_waterfall(
2,
height=(risk_dev - curr_risk) / norm_fact,
bottom=curr_risk / norm_fact,
**kwargs
**kwargs,
)
axis.text(
2,
Expand All @@ -755,7 +755,7 @@ def plot_waterfall(
3,
height=(fut_risk - risk_dev) / norm_fact,
bottom=risk_dev / norm_fact,
**kwargs
**kwargs,
)
axis.text(
3,
Expand Down Expand Up @@ -799,7 +799,7 @@ def plot_arrow_averted(
risk_func=risk_aai_agg,
disc_rates=None,
imp_time_depen=1,
**kwargs
**kwargs,
):
"""Plot waterfall graph with accumulated values from present to future
year. Call after calc() with save_imp=True.
Expand Down Expand Up @@ -877,7 +877,7 @@ def plot_arrow_averted(
tot_benefit,
bars[3].get_height() * norm_fact,
norm_fact,
**kwargs
**kwargs,
)

def plot_waterfall_accumulated(
Expand All @@ -888,7 +888,7 @@ def plot_waterfall_accumulated(
risk_func=risk_aai_agg,
imp_time_depen=1,
axis=None,
**kwargs
**kwargs,
):
"""Plot waterfall graph with accumulated values from present to future
year. Call after calc() with save_imp=True. Provide same inputs as in calc.
Expand Down Expand Up @@ -976,7 +976,7 @@ def plot_waterfall_accumulated(
2,
height=(risk_dev - risk_curr) / norm_fact,
bottom=risk_curr / norm_fact,
**kwargs
**kwargs,
)
axis.text(
2,
Expand All @@ -991,7 +991,7 @@ def plot_waterfall_accumulated(
3,
height=(risk_tot - risk_dev) / norm_fact,
bottom=risk_dev / norm_fact,
**kwargs
**kwargs,
)
axis.text(
3,
Expand Down Expand Up @@ -1395,7 +1395,7 @@ def _plot_list_cost_ben(cb_list, axis=None, **kwargs):
cb_res.benefit[meas_n] / norm_fact,
1 / cb_res.cost_ben_ratio[meas_n],
color=cb_res.color_rgb[meas_n],
**kwargs
**kwargs,
)
)

Expand Down Expand Up @@ -1489,7 +1489,7 @@ def _plot_averted_arrow(axis, bar_4, tot_benefit, risk_tot, norm_fact, **kwargs)
bar_top[0] - (bar_top[0] - bar_bottom[0]) / 2,
risk_tot / norm_fact - arrow_len,
),
**kwargs
**kwargs,
)
)

Expand Down
4 changes: 2 additions & 2 deletions climada/engine/forecast.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,7 +476,7 @@ def _plot_imp_map(
coord[:, 0],
c=array_im,
transform=ccrs.PlateCarree(),
**kwargs
**kwargs,
)
if shapes:
# add warning regions
Expand Down Expand Up @@ -882,7 +882,7 @@ def _plot_exc_prob(
coord[:, 0],
c=array_im,
transform=ccrs.PlateCarree(),
**kwargs
**kwargs,
)
if shapes:
# add warning regions
Expand Down
18 changes: 7 additions & 11 deletions climada/engine/impact.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,12 @@
import contextily as ctx
import geopandas as gpd
import h5py
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xlsxwriter
from deprecation import deprecated
from matplotlib import animation
from matplotlib.colors import Normalize
from pandas.api.types import is_string_dtype
from pyproj import CRS as pyprojCRS
Expand Down Expand Up @@ -188,8 +188,7 @@ def __init__(
)
if len(self.coord_exp) != len(self.eai_exp):
raise AttributeError(
"Number of exposures points is different from"
"number of eai_exp values"
"Number of exposures points is different fromnumber of eai_exp values"
)
if imp_mat is not None:
self.imp_mat = imp_mat
Expand All @@ -214,8 +213,7 @@ def calc(
):
"""This function is deprecated, use ``ImpactCalc.impact`` instead."""
LOGGER.warning(
"The use of Impact().calc() is deprecated."
" Use ImpactCalc().impact() instead."
"The use of Impact().calc() is deprecated. Use ImpactCalc().impact() instead."
)
from climada.engine.impact_calc import ( # pylint: disable=import-outside-toplevel
ImpactCalc,
Expand Down Expand Up @@ -1238,8 +1236,9 @@ def plot_rp_imp(

impacts_stats_vals = impacts_stats.values[:, 1:].T.astype(float)
if not log10_scale:
min_impact, max_impact = np.nanmin(impacts_stats_vals), np.nanmax(
impacts_stats_vals
min_impact, max_impact = (
np.nanmin(impacts_stats_vals),
np.nanmax(impacts_stats_vals),
)
kwargs.update(
{
Expand Down Expand Up @@ -1471,7 +1470,6 @@ def write_csr(group, name, value):

# Open file in write mode
with h5py.File(file_path, "w") as file:

# Now write all attributes
# NOTE: Remove leading underscore to write '_tot_value' as regular attribute
for name, value in self.__dict__.items():
Expand Down Expand Up @@ -1612,8 +1610,7 @@ def from_excel(cls, file_name):
def read_excel(self, *args, **kwargs):
"""This function is deprecated, use Impact.from_excel instead."""
LOGGER.warning(
"The use of Impact.read_excel is deprecated."
"Use Impact.from_excel instead."
"The use of Impact.read_excel is deprecated.Use Impact.from_excel instead."
)
self.__dict__ = Impact.from_excel(*args, **kwargs).__dict__

Expand Down Expand Up @@ -1671,7 +1668,6 @@ def from_hdf5(cls, file_path: Union[str, Path]):
"""
kwargs = dict()
with h5py.File(file_path, "r") as file:

# Impact matrix
if "imp_mat" in file:
impact_matrix = file["imp_mat"]
Expand Down
3 changes: 1 addition & 2 deletions climada/engine/impact_calc.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,8 +196,7 @@ def impact(
)
if insured:
LOGGER.info(
"cover and/or deductible columns detected,"
" going to calculate insured impact"
"cover and/or deductible columns detected, going to calculate insured impact"
)
# TODO: make a better impact matrix generator for insured impacts when
# the impact matrix is already present
Expand Down
1 change: 0 additions & 1 deletion climada/engine/test/test_cost_benefit.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@
from climada.entity.measures.base import LOGGER as ILOG
from climada.hazard.base import Hazard
from climada.test import get_test_file
from climada.util.api_client import Client
from climada.util.constants import ENT_DEMO_FUTURE, ENT_DEMO_TODAY

ENT_TEST_MAT = get_test_file("demo_today", file_format="MAT-file")
Expand Down
1 change: 0 additions & 1 deletion climada/engine/test/test_impact.py
Original file line number Diff line number Diff line change
Expand Up @@ -1207,7 +1207,6 @@ def test__exp_build_event(self):


class TestMatchCentroids(unittest.TestCase):

def test_match_centroids(self):
"Test that hazard centroids get assigned correctly"
exp = ENT.exposures
Expand Down
1 change: 0 additions & 1 deletion climada/engine/test/test_impact_calc.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
from climada.entity.entity_def import Entity
from climada.hazard.base import Centroids, Hazard
from climada.test import get_test_file
from climada.util.api_client import Client
from climada.util.config import Config
from climada.util.constants import DEMO_DIR, ENT_DEMO_TODAY

Expand Down
3 changes: 1 addition & 2 deletions climada/engine/unsequa/calc_delta_climate.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,8 +235,7 @@ def uncertainty(

if unc_sample.samples_df.empty:
raise ValueError(
"No sample was found. Please create one first"
"using UncImpact.make_sample(N)"
"No sample was found. Please create one firstusing UncImpact.make_sample(N)"
)

# copy may not be needed, but is kept to prevent potential
Expand Down
Loading
Loading