diff --git a/pyproject.toml b/pyproject.toml index 596e327..ff7f490 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ description = "Add your description here" readme = "README.md" requires-python = ">=3.12" dependencies = [ - "astropy~=7.0.1", + "astropy~=7.2.0", "cartopy~=0.24.1", "cf-xarray~=0.10", "cftime~=1.6.0", @@ -13,8 +13,9 @@ dependencies = [ "matplotlib~=3.8", "netcdf4==1.7.3", "numcodecs>=0.13.0,<0.17", - "numcodecs-combinators[xarray]~=0.2.10", + "numcodecs-combinators[xarray]~=0.2.13", "numcodecs-observers~=0.1.2", + "numcodecs-safeguards==0.1.0b2", "numcodecs-wasm~=0.2.2", "numcodecs-wasm-bit-round~=0.4.0", "numcodecs-wasm-fixed-offset-scale~=0.4.0", @@ -28,6 +29,7 @@ dependencies = [ "numcodecs-wasm-zfp~=0.6.0", "numcodecs-wasm-zfp-classic~=0.4.0", "numcodecs-wasm-zstd~=0.4.0", + "numcodecs-zero~=0.1.2", "pandas~=2.2", "scipy~=1.14", "seaborn~=0.13.2", diff --git a/scorecards.ipynb b/scorecards.ipynb new file mode 100644 index 0000000..b95a393 --- /dev/null +++ b/scorecards.ipynb @@ -0,0 +1,641 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "e7ab252b", + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import matplotlib as mpl\n", + "import seaborn as sns\n", + "from matplotlib.lines import Line2D\n", + "\n", + "from climatebenchpress.compressor.plotting.plot_metrics import (\n", + " _rename_compressors,\n", + " _get_legend_name,\n", + " _COMPRESSOR_ORDER,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "4ee15a6b", + "metadata": {}, + "source": [ + "# Process results" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "2d242e7c", + "metadata": {}, + "outputs": [], + "source": [ + "results_file = \"metrics/all_results.csv\"\n", + "df = pd.read_csv(results_file)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "ec124c29", + "metadata": {}, + "outputs": [], + "source": [ + "def create_data_matrix(\n", + " df: pd.DataFrame,\n", + " error_bound: str,\n", + " metrics: list[str] = [\n", + " \"DSSIM\",\n", + " \"MAE\",\n", + " \"Max Absolute Error\",\n", + " \"Spectral Error\",\n", + " \"Compression Ratio [raw B / enc B]\",\n", + " \"Satisfies Bound (Value)\",\n", + " ],\n", + "):\n", + " df_filtered = df[df[\"Error Bound Name\"] == error_bound].copy()\n", + " df_filtered[\"Satisfies Bound (Value)\"] = (\n", + " df_filtered[\"Satisfies Bound (Value)\"] * 100\n", + " ) # Convert to percentage\n", + "\n", + " # Get unique variables and compressors\n", + " # dataset_variables = sorted(df_filtered[['Dataset', 'Variable']].drop_duplicates().apply(lambda x: \"/\".join(x), axis=1).unique())\n", + " dataset_variables = sorted(df_filtered[\"Variable\"].unique())\n", + " compressors = sorted(\n", + " df_filtered[\"Compressor\"].unique(),\n", + " key=lambda k: _COMPRESSOR_ORDER.index(_get_legend_name(k)),\n", + " )\n", + "\n", + " column_labels = []\n", + " for metric in metrics:\n", + " for dataset_variable in dataset_variables:\n", + " column_labels.append(f\"{dataset_variable}\\n{metric}\")\n", + "\n", + " # Initialize the data matrix\n", + " data_matrix = np.full((len(compressors), len(column_labels)), np.nan)\n", + "\n", + " # Fill the matrix with data\n", + " for i, compressor in enumerate(compressors):\n", + " for j, metric in enumerate(metrics):\n", + " for k, dataset_variable in enumerate(dataset_variables):\n", + " # Get data for this compressor-variable combination\n", + " # dataset, variable = dataset_variable.split('/')\n", + " variable = dataset_variable\n", + " subset = df_filtered[\n", + " (df_filtered[\"Compressor\"] == compressor)\n", + " & (df_filtered[\"Variable\"] == variable) # &\n", + " # (df_filtered['Dataset'] == dataset)\n", + " ]\n", + " if subset.empty:\n", + " print(f\"No data for Compressor: {compressor}, Variable: {variable}\")\n", + " continue\n", + "\n", + " if metric in [\"DSSIM\", \"Spectral Error\"] and variable in [\"ta\", \"tos\"]:\n", + " # These variables have large regions of NaN values which makes the\n", + " # DSSIM and Spectral Error values unreliable.\n", + " continue\n", + "\n", + " col_idx = j * len(dataset_variables) + k\n", + " if metric in subset.columns:\n", + " values = subset[metric]\n", + " if len(values) == 1:\n", + " data_matrix[i, col_idx] = values.iloc[0]\n", + "\n", + " return data_matrix, compressors, dataset_variables" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2a399d30", + "metadata": {}, + "outputs": [], + "source": [ + "df = df[\n", + " ~df[\"Compressor\"].isin(\n", + " [\n", + " \"bitround\",\n", + " \"jpeg2000-conservative-abs\",\n", + " \"stochround-conservative-abs\",\n", + " \"stochround-pco-conservative-abs\",\n", + " \"zfp-conservative-abs\",\n", + " \"bitround-conservative-rel\",\n", + " \"stochround-pco\",\n", + " \"stochround\",\n", + " \"zfp\",\n", + " \"jpeg2000\",\n", + " ]\n", + " )\n", + "]\n", + "df = df[~df[\"Dataset\"].str.contains(\"-tiny\")]\n", + "df = df[~df[\"Dataset\"].str.contains(\"-chunked\")]\n", + "df = _rename_compressors(df)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "2d019b8d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: sperr, Variable: pr\n", + "No data for Compressor: sperr, Variable: ta\n", + "No data for Compressor: sperr, Variable: tos\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n", + "No data for Compressor: safeguarded-sperr, Variable: pr\n" + ] + } + ], + "source": [ + "metrics = [\n", + " \"DSSIM\",\n", + " \"MAE\",\n", + " \"Max Absolute Error\",\n", + " \"Spectral Error\",\n", + " \"Compression Ratio [raw B / enc B]\",\n", + " \"Satisfies Bound (Value)\",\n", + "]\n", + "scorecard_data = {}\n", + "for error_bound in [\"low\", \"mid\", \"high\"]:\n", + " scorecard_data[error_bound] = create_data_matrix(df, error_bound, metrics)" + ] + }, + { + "cell_type": "markdown", + "id": "ae80d757", + "metadata": {}, + "source": [ + "# Scorecard" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "871ae766", + "metadata": {}, + "outputs": [], + "source": [ + "METRICS2NAME = {\n", + " \"DSSIM\": \"dSSIM\",\n", + " \"MAE\": \"Mean Absolute Error\",\n", + " \"Compression Ratio [raw B / enc B]\": \"Compression Ratio\",\n", + " \"Satisfies Bound (Value)\": r\"% of Data Points Violating the Error Bound\",\n", + "}\n", + "\n", + "VARIABLE2NAME = {\n", + " \"10m_u_component_of_wind\": \"10u\",\n", + " \"10m_v_component_of_wind\": \"10v\",\n", + " \"mean_sea_level_pressure\": \"msl\",\n", + "}\n", + "\n", + "DATASET2PREFIX = {\n", + " \"era5-hurricane\": \"h-\",\n", + "}\n", + "\n", + "\n", + "def get_variable_label(variable):\n", + " dataset, var_name = variable.split(\"/\")\n", + " prefix = DATASET2PREFIX.get(dataset, \"\")\n", + " var_name = VARIABLE2NAME.get(var_name, var_name)\n", + " return f\"{prefix}{var_name}\"\n", + "\n", + "\n", + "def create_compression_scorecard(\n", + " data_matrix,\n", + " compressors,\n", + " variables,\n", + " metrics,\n", + " cbar=True,\n", + " ref_compressor=\"sz3\",\n", + " higher_better_metrics=[\"DSSIM\", \"Compression Ratio [raw B / enc B]\"],\n", + " save_fn=None,\n", + " compare_against_0=False,\n", + " highlight_bigger_than_one=False,\n", + "):\n", + " \"\"\"\n", + " Create a scorecard plot similar to the weather forecasting example\n", + "\n", + " Parameters:\n", + " - data_matrix: 2D array with compressors as rows, metric-variable combinations as columns\n", + " - compressors: list of compressor names\n", + " - variables: list of variable names\n", + " - metrics: list of metric names\n", + " - ref_compressor: reference compressor for relative calculations\n", + " - save_fn: filename to save plot (optional)\n", + " \"\"\"\n", + "\n", + " # Calculate relative differences vs reference compressor\n", + " ref_idx = compressors.index(ref_compressor)\n", + " ref_values = data_matrix[ref_idx, :]\n", + " if compare_against_0:\n", + " ref_values = np.zeros_like(data_matrix[ref_idx, :])\n", + "\n", + " relative_matrix = np.full_like(data_matrix, np.nan)\n", + " if highlight_bigger_than_one:\n", + " relative_matrix = np.sign(data_matrix) * 101\n", + " for j in range(data_matrix.shape[1]):\n", + " if metrics[j // len(variables)] == \"Satisfies Bound (Value)\":\n", + " # For bound satisfication lower is better (less number of pixels exceeding error bound).\n", + " relative_matrix[:, j] = -1 * relative_matrix[:, j]\n", + " else:\n", + " for i in range(len(compressors)):\n", + " for j in range(data_matrix.shape[1]):\n", + " if not np.isnan(data_matrix[i, j]) and not np.isnan(ref_values[j]):\n", + " ref_val = np.abs(ref_values[j])\n", + " if ref_val == 0.0:\n", + " ref_val = 1e-10 # Avoid division by zero\n", + " if metrics[j // len(variables)] in higher_better_metrics:\n", + " # Higher is better metrics\n", + " relative_matrix[i, j] = (\n", + " (ref_values[j] - data_matrix[i, j]) / ref_val * 100\n", + " )\n", + " elif metrics[j // len(variables)] == \"Satisfies Bound (Value)\":\n", + " relative_matrix[i, j] = 100 if data_matrix[i, j] != 0 else 0\n", + " else:\n", + " relative_matrix[i, j] = (\n", + " (data_matrix[i, j] - ref_values[j]) / ref_val * 100\n", + " )\n", + "\n", + " # Set up colormap - similar to original\n", + " reds = sns.color_palette(\"Reds\", 6)\n", + " blues = sns.color_palette(\"Blues_r\", 6)\n", + " cmap = mpl.colors.ListedColormap(blues + [(0.95, 0.95, 0.95)] + reds)\n", + " # cb_levels = [-50, -20, -10, -5, -2, -1, 1, 2, 5, 10, 20, 50]\n", + " # cb_levels = [-75, -50, -25, -10, -5, -1, 1, 5, 10, 25, 50, 75]\n", + " cb_levels = [-100, -75, -50, -25, -10, -1, 1, 10, 25, 50, 75, 100]\n", + "\n", + " norm = mpl.colors.BoundaryNorm(cb_levels, cmap.N, extend=\"both\")\n", + "\n", + " # Calculate figure dimensions\n", + " ncompressors = len(compressors)\n", + " nvariables = len(variables)\n", + " nmetrics = len(metrics)\n", + "\n", + " panel_width = (2.5 / 5) * nvariables\n", + " label_width = 1.5 * panel_width\n", + " padding_right = 0.1\n", + " panel_height = panel_width / nvariables\n", + "\n", + " title_height = panel_height * 1.25\n", + " cbar_height = panel_height * 2\n", + " spacing_height = panel_height * 0.1\n", + " spacing_width = panel_height * 0.2\n", + "\n", + " total_width = (\n", + " label_width\n", + " + nmetrics * panel_width\n", + " + (nmetrics - 1) * spacing_width\n", + " + padding_right\n", + " )\n", + " total_height = (\n", + " title_height\n", + " + cbar_height\n", + " + ncompressors * panel_height\n", + " + (ncompressors - 1) * spacing_height\n", + " )\n", + "\n", + " # Create figure and gridspec\n", + " fig = plt.figure(figsize=(total_width, total_height))\n", + " gs = mpl.gridspec.GridSpec(\n", + " ncompressors,\n", + " nmetrics,\n", + " figure=fig,\n", + " left=label_width / total_width,\n", + " right=1 - padding_right / total_width,\n", + " top=1 - (title_height / total_height),\n", + " bottom=cbar_height / total_height,\n", + " hspace=spacing_height / panel_height,\n", + " wspace=spacing_width / panel_width,\n", + " )\n", + "\n", + " # Plot each panel\n", + " for row, compressor in enumerate(compressors):\n", + " for col, metric in enumerate(metrics):\n", + " ax = fig.add_subplot(gs[row, col])\n", + "\n", + " # Get data for this metric (all variables)\n", + " start_col = col * nvariables\n", + " end_col = start_col + nvariables\n", + "\n", + " rel_values = relative_matrix[row, start_col:end_col].reshape(1, -1)\n", + " abs_values = data_matrix[row, start_col:end_col]\n", + "\n", + " # Create heatmap\n", + " img = ax.imshow(rel_values, aspect=\"auto\", cmap=cmap, norm=norm)\n", + "\n", + " # Customize axes\n", + " ax.set_xticks([])\n", + " ax.set_xticklabels([])\n", + " ax.set_yticks([])\n", + " ax.set_yticklabels([])\n", + "\n", + " # Add white grid lines\n", + " for i in range(1, nvariables):\n", + " rect = mpl.patches.Rectangle(\n", + " (i - 0.5, -0.5),\n", + " 1,\n", + " 1,\n", + " linewidth=1,\n", + " edgecolor=\"lightgrey\"\n", + " if np.isnan(abs_values[i]) and np.isnan(abs_values[i - 1])\n", + " else \"white\",\n", + " facecolor=\"none\",\n", + " )\n", + " ax.add_patch(rect)\n", + "\n", + " # Add absolute values as text\n", + " for i, val in enumerate(abs_values):\n", + " # Ensure we don't have black text on dark background\n", + " color = \"black\" if abs(rel_values[0, i]) < 75 else \"white\"\n", + " fontsize = 10\n", + " # Format numbers appropriately\n", + " if metric in [\"DSSIM\", \"Spectral Error\"] and variables[i] in [\n", + " \"ta\",\n", + " \"tos\",\n", + " ]:\n", + " # These variables have large regions of NaN values which makes the\n", + " # DSSIM and Spectral Error values unreliable.\n", + " text = \"N/A\"\n", + " color = \"black\"\n", + " elif np.isnan(val):\n", + " text = \"Crash\"\n", + " color = \"black\"\n", + " elif abs(val) > 10_000:\n", + " text = f\"{val:.1e}\"\n", + " fontsize = 8\n", + " elif abs(val) > 10:\n", + " text = f\"{val:.0f}\"\n", + " elif abs(val) > 1:\n", + " text = f\"{val:.1f}\"\n", + " elif val == 1 and metric == \"DSSIM\":\n", + " text = \"1\"\n", + " elif val == 0:\n", + " text = \"0\"\n", + " elif abs(val) < 0.01:\n", + " text = f\"{val:.1e}\"\n", + " fontsize = 8\n", + " else:\n", + " text = f\"{val:.2f}\"\n", + " ax.text(\n", + " i, 0, text, ha=\"center\", va=\"center\", fontsize=fontsize, color=color\n", + " )\n", + "\n", + " if (\n", + " row > 0\n", + " and np.isnan(val)\n", + " and np.isnan(data_matrix[row - 1, col * nvariables + i])\n", + " and compressor == f\"safeguarded-{compressors[row - 1]}\"\n", + " and not (\n", + " metric in [\"DSSIM\", \"Spectral Error\"]\n", + " and variables[i]\n", + " in [\n", + " \"ta\",\n", + " \"tos\",\n", + " ]\n", + " )\n", + " ):\n", + " ax.annotate(\n", + " \"\",\n", + " xy=(i, -0.15),\n", + " xytext=(i, -0.9),\n", + " arrowprops=dict(arrowstyle=\"->\", lw=2, color=\"lightgrey\"),\n", + " )\n", + "\n", + " # Add row labels (compressor names)\n", + " if col == 0:\n", + " ax.set_ylabel(\n", + " _get_legend_name(compressor),\n", + " rotation=0,\n", + " ha=\"right\",\n", + " va=\"center\",\n", + " labelpad=10,\n", + " fontsize=14,\n", + " )\n", + "\n", + " # Add column titles (variable names)\n", + " if row == 0:\n", + " # ax.set_title(VARIABLE2NAME.get(variable, variable), fontsize=10, pad=10)\n", + " ax.set_title(METRICS2NAME.get(metric, metric), fontsize=16, pad=10)\n", + "\n", + " # Add metric labels at the top on the top row\n", + " if row == 0:\n", + " # ax.tick_params(top=True, labeltop=True, bottom=False, labelbottom=False)\n", + " # ax.set_xticks(range(nmetrics))\n", + " # ax.set_xticklabels(\n", + " # [METRICS2NAME.get(m, m) for m in metrics],\n", + " # rotation=45,\n", + " # ha='left', fontsize=8)\n", + " ax.tick_params(top=True, labeltop=True, bottom=False, labelbottom=False)\n", + " ax.set_xticks(range(nvariables))\n", + " ax.set_xticklabels(\n", + " [VARIABLE2NAME.get(v, v) for v in variables],\n", + " rotation=45,\n", + " ha=\"left\",\n", + " fontsize=12,\n", + " )\n", + "\n", + " # Style spines\n", + " for spine in ax.spines.values():\n", + " spine.set_color(\"0.7\")\n", + "\n", + " # Add colorbar\n", + " if cbar and not highlight_bigger_than_one:\n", + " rel_cbar_height = cbar_height / total_height\n", + " cax = fig.add_axes((0.4, rel_cbar_height * 0.3, 0.5, rel_cbar_height * 0.2))\n", + " cb = fig.colorbar(img, cax=cax, orientation=\"horizontal\")\n", + " cb.ax.set_xticks(cb_levels)\n", + " if highlight_bigger_than_one:\n", + " cb.ax.set_xlabel(\"Better ← |non-chunked - chunked| → Worse\", fontsize=16)\n", + " else:\n", + " cb.ax.set_xlabel(\n", + " f\"Better ← % difference vs {_get_legend_name(ref_compressor)} → Worse\",\n", + " fontsize=16,\n", + " )\n", + "\n", + " if highlight_bigger_than_one:\n", + " chunking_handles = [\n", + " Line2D(\n", + " [],\n", + " [],\n", + " marker=\"s\",\n", + " color=cmap(101),\n", + " linestyle=\"None\",\n", + " markersize=10,\n", + " label=\"Not Chunked Better\",\n", + " ),\n", + " Line2D(\n", + " [],\n", + " [],\n", + " marker=\"s\",\n", + " color=cmap(-101),\n", + " linestyle=\"None\",\n", + " markersize=10,\n", + " label=\"Chunked Better\",\n", + " ),\n", + " ]\n", + "\n", + " ax.legend(\n", + " handles=chunking_handles,\n", + " loc=\"upper left\",\n", + " ncol=2,\n", + " bbox_to_anchor=(-0.5, -0.05),\n", + " fontsize=16,\n", + " )\n", + "\n", + " # plt.tight_layout()\n", + "\n", + " if save_fn:\n", + " plt.savefig(save_fn, dpi=300, bbox_inches=\"tight\")\n", + " plt.close()\n", + " else:\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "678c927b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating scorecard for low bound...\n", + "Creating scorecard for mid bound...\n", + "Creating scorecard for high bound...\n" + ] + } + ], + "source": [ + "for bound_name, (data_matrix, compressors, variables) in scorecard_data.items():\n", + " print(f\"Creating scorecard for {bound_name} bound...\")\n", + " # Split into two rows for better readability.\n", + " create_compression_scorecard(\n", + " data_matrix[:, : 3 * len(variables)],\n", + " compressors,\n", + " variables,\n", + " metrics[:3],\n", + " ref_compressor=\"bitround-pco\",\n", + " cbar=False,\n", + " save_fn=f\"scorecards/{bound_name}_scorecard_row1.pdf\",\n", + " )\n", + "\n", + " create_compression_scorecard(\n", + " data_matrix[:, 3 * len(variables) :],\n", + " compressors,\n", + " variables,\n", + " metrics[3:],\n", + " ref_compressor=\"bitround-pco\",\n", + " save_fn=f\"scorecards/{bound_name}_scorecard_row2.pdf\",\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c2d8dea1-cd87-48d1-9d5b-8fe106183cbf", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/src/climatebenchpress/compressor/compressors/__init__.py b/src/climatebenchpress/compressor/compressors/__init__.py index b523c75..63da691 100644 --- a/src/climatebenchpress/compressor/compressors/__init__.py +++ b/src/climatebenchpress/compressor/compressors/__init__.py @@ -2,6 +2,12 @@ "BitRound", "BitRoundPco", "Jpeg2000", + "SafeguardedBitRoundPco", + "SafeguardedSperr", + "SafeguardedSz3", + "SafeguardedZero", + "SafeguardedZeroDssim", + "SafeguardedZfpRound", "Sperr", "StochRound", "StochRoundPco", @@ -15,6 +21,14 @@ from .bitround import BitRound from .bitround_pco import BitRoundPco from .jpeg2000 import Jpeg2000 +from .safeguarded import ( + SafeguardedBitRoundPco, + SafeguardedSperr, + SafeguardedSz3, + SafeguardedZero, + SafeguardedZeroDssim, + SafeguardedZfpRound, +) from .sperr import Sperr from .stochround import StochRound from .stochround_pco import StochRoundPco diff --git a/src/climatebenchpress/compressor/compressors/abc.py b/src/climatebenchpress/compressor/compressors/abc.py index c4dacb8..e20429f 100644 --- a/src/climatebenchpress/compressor/compressors/abc.py +++ b/src/climatebenchpress/compressor/compressors/abc.py @@ -88,6 +88,10 @@ def abs_bound_codec( dtype: Optional[np.dtype] = None, data_min: Optional[float] = None, data_max: Optional[float] = None, + data_abs_min: Optional[float] = None, + data_abs_max: Optional[float] = None, + data_min_2d: Optional[np.ndarray] = None, + data_max_2d: Optional[np.ndarray] = None, ) -> Codec: """Create a codec with an absolute error bound.""" pass @@ -100,6 +104,10 @@ def rel_bound_codec( dtype: Optional[np.dtype] = None, data_min: Optional[float] = None, data_max: Optional[float] = None, + data_abs_min: Optional[float] = None, + data_abs_max: Optional[float] = None, + data_min_2d: Optional[np.ndarray] = None, + data_max_2d: Optional[np.ndarray] = None, ) -> Codec: """Create a codec with a relative error bound.""" pass @@ -112,6 +120,8 @@ def build( data_abs_max: dict[VariableName, float], data_min: dict[VariableName, float], data_max: dict[VariableName, float], + data_min_2d: dict[VariableName, np.ndarray], + data_max_2d: dict[VariableName, np.ndarray], error_bounds: list[dict[VariableName, ErrorBound]], ) -> dict[VariantName, list[NamedPerVariableCodec]]: """ @@ -135,6 +145,12 @@ def build( Dict mapping from variable name to minimum value for the variable. data_max : dict[VariableName, float] Dict mapping from variable name to maximum value for the variable. + data_min_2d : dict[VariableName, np.ndarray] + Dict mapping from variable name to per-lat-lon-slice minimum value for the + variable. + data_max_2d : dict[VariableName, np.ndarray] + Dict mapping from variable name to per-lat-lon-slice maximum value for the + variable. error_bounds: list[ErrorBound] List of error bounds to use for the compressor. @@ -167,6 +183,10 @@ def build( dtype=dtypes[var], data_min=data_min[var], data_max=data_max[var], + data_abs_min=data_abs_min[var], + data_abs_max=data_abs_max[var], + data_min_2d=data_min_2d[var], + data_max_2d=data_max_2d[var], ) elif eb.rel_error is not None and cls.has_rel_error_impl: new_codecs[var] = partial( @@ -175,6 +195,10 @@ def build( dtype=dtypes[var], data_min=data_min[var], data_max=data_max[var], + data_abs_min=data_abs_min[var], + data_abs_max=data_abs_max[var], + data_min_2d=data_min_2d[var], + data_max_2d=data_max_2d[var], ) else: # This should never happen as we have already transformed the error bounds. diff --git a/src/climatebenchpress/compressor/compressors/safeguarded/__init__.py b/src/climatebenchpress/compressor/compressors/safeguarded/__init__.py new file mode 100644 index 0000000..2fb5669 --- /dev/null +++ b/src/climatebenchpress/compressor/compressors/safeguarded/__init__.py @@ -0,0 +1,15 @@ +__all__ = [ + "SafeguardedBitRoundPco", + "SafeguardedSperr", + "SafeguardedSz3", + "SafeguardedZero", + "SafeguardedZeroDssim", + "SafeguardedZfpRound", +] + +from .bitround_pco import SafeguardedBitRoundPco +from .sperr import SafeguardedSperr +from .sz3 import SafeguardedSz3 +from .zero import SafeguardedZero +from .zero_dssim import SafeguardedZeroDssim +from .zfp_round import SafeguardedZfpRound diff --git a/src/climatebenchpress/compressor/compressors/safeguarded/bitround_pco.py b/src/climatebenchpress/compressor/compressors/safeguarded/bitround_pco.py new file mode 100644 index 0000000..36cd540 --- /dev/null +++ b/src/climatebenchpress/compressor/compressors/safeguarded/bitround_pco.py @@ -0,0 +1,66 @@ +__all__ = ["SafeguardedBitRoundPco"] + + +import numcodecs_safeguards +import numcodecs_wasm_bit_round +import numcodecs_wasm_pco + +from ..abc import Compressor +from ..utils import compute_keepbits + + +class SafeguardedBitRoundPco(Compressor): + """Safeguarded Bit Rounding + PCodec compressor. + + This compressor first applies bit rounding to the data, which reduces the precision of the data + while preserving its overall structure. After that, it uses PCodec for further compression. + """ + + name = "safeguarded-bitround-pco" + description = "Safeguarded(Bit Rounding + PCodec)" + + @staticmethod + def abs_bound_codec(error_bound, *, dtype=None, data_abs_max=None, **kwargs): + assert dtype is not None, "dtype must be provided" + assert data_abs_max is not None, "data_abs_max must be provided" + + # conservative abs->rel error bound transformation, + # same as convert_abs_error_to_rel_error + # so that we can inform the safeguards of the abs bound + keepbits = compute_keepbits(dtype, error_bound / data_abs_max) + + return numcodecs_safeguards.SafeguardedCodec( + codec=numcodecs_wasm_bit_round.BitRound(keepbits=keepbits), + lossless=numcodecs_safeguards.lossless.Lossless( + for_codec=numcodecs_wasm_pco.Pco( + level=8, + mode="auto", + delta="auto", + paging="equal-pages-up-to", + ) + ), + safeguards=[ + dict(kind="eb", type="abs", eb=error_bound, equal_nan=True), + ], + ) + + @staticmethod + def rel_bound_codec(error_bound, *, dtype=None, **kwargs): + assert dtype is not None, "dtype must be provided" + + keepbits = compute_keepbits(dtype, error_bound) + + return numcodecs_safeguards.SafeguardedCodec( + codec=numcodecs_wasm_bit_round.BitRound(keepbits=keepbits), + lossless=numcodecs_safeguards.lossless.Lossless( + for_codec=numcodecs_wasm_pco.Pco( + level=8, + mode="auto", + delta="auto", + paging="equal-pages-up-to", + ) + ), + safeguards=[ + dict(kind="eb", type="rel", eb=error_bound, equal_nan=True), + ], + ) diff --git a/src/climatebenchpress/compressor/compressors/safeguarded/sperr.py b/src/climatebenchpress/compressor/compressors/safeguarded/sperr.py new file mode 100644 index 0000000..5bb2373 --- /dev/null +++ b/src/climatebenchpress/compressor/compressors/safeguarded/sperr.py @@ -0,0 +1,60 @@ +__all__ = ["SafeguardedSperr"] + +import numcodecs +import numcodecs.abc +import numcodecs.compat +import numcodecs_safeguards +import numcodecs_wasm_sperr +import numpy as np +from numcodecs_combinators.stack import CodecStack + +from ..abc import Compressor + + +class SafeguardedSperr(Compressor): + """Safeguarded SPERR compressor.""" + + name = "safeguarded-sperr" + description = "Safeguarded(SPERR)" + + @staticmethod + def abs_bound_codec(error_bound, **kwargs): + return numcodecs_safeguards.SafeguardedCodec( + codec=CodecStack( + NaNToMean(), + numcodecs_wasm_sperr.Sperr(mode="pwe", pwe=error_bound), + ), + safeguards=[ + dict(kind="eb", type="abs", eb=error_bound, equal_nan=True), + ], + ) + + @staticmethod + def rel_bound_codec(error_bound, *, data_abs_min=None, **kwargs): + assert data_abs_min is not None, "data_abs_min must be provided" + + return numcodecs_safeguards.SafeguardedCodec( + codec=CodecStack( + NaNToMean(), + # conservative rel->abs error bound transformation, + # same as convert_rel_error_to_abs_error + # so that we can inform the safeguards of the rel bound + numcodecs_wasm_sperr.Sperr(mode="pwe", pwe=error_bound * data_abs_min), + ), + safeguards=[ + dict(kind="eb", type="rel", eb=error_bound, equal_nan=True), + ], + ) + + +# inspired by H5Z-SPERR's treatment of NaN values: +# https://github.com/NCAR/H5Z-SPERR/blob/72ebcb00e382886c229c5ef5a7e237fe451d5fb8/src/h5z-sperr.c#L464-L473 +# https://github.com/NCAR/H5Z-SPERR/blob/72ebcb00e382886c229c5ef5a7e237fe451d5fb8/src/h5zsperr_helper.cpp#L179-L212 +class NaNToMean(numcodecs.abc.Codec): + codec_id = "nan-to-mean" # type: ignore + + def encode(self, buf): + return np.nan_to_num(buf, nan=np.nanmean(buf), posinf=np.inf, neginf=-np.inf) + + def decode(self, buf, out=None): + return numcodecs.compat.ndarray_copy(buf, out) diff --git a/src/climatebenchpress/compressor/compressors/safeguarded/sz3.py b/src/climatebenchpress/compressor/compressors/safeguarded/sz3.py new file mode 100644 index 0000000..609f1de --- /dev/null +++ b/src/climatebenchpress/compressor/compressors/safeguarded/sz3.py @@ -0,0 +1,31 @@ +__all__ = ["SafeguardedSz3"] + +import numcodecs_safeguards +import numcodecs_wasm_sz3 + +from ..abc import Compressor + + +class SafeguardedSz3(Compressor): + """Safeguarded SZ3 compressor.""" + + name = "safeguarded-sz3" + description = "Safeguarded(SZ3)" + + @staticmethod + def abs_bound_codec(error_bound, **kwargs): + return numcodecs_safeguards.SafeguardedCodec( + codec=numcodecs_wasm_sz3.Sz3(eb_mode="abs", eb_abs=error_bound), + safeguards=[ + dict(kind="eb", type="abs", eb=error_bound, equal_nan=True), + ], + ) + + @staticmethod + def rel_bound_codec(error_bound, **kwargs): + return numcodecs_safeguards.SafeguardedCodec( + codec=numcodecs_wasm_sz3.Sz3(eb_mode="rel", eb_rel=error_bound), + safeguards=[ + dict(kind="eb", type="rel", eb=error_bound, equal_nan=True), + ], + ) diff --git a/src/climatebenchpress/compressor/compressors/safeguarded/zero.py b/src/climatebenchpress/compressor/compressors/safeguarded/zero.py new file mode 100644 index 0000000..99e1790 --- /dev/null +++ b/src/climatebenchpress/compressor/compressors/safeguarded/zero.py @@ -0,0 +1,31 @@ +__all__ = ["SafeguardedZero"] + +import numcodecs_safeguards +import numcodecs_zero + +from ..abc import Compressor + + +class SafeguardedZero(Compressor): + """Safeguarded all-zero compressor.""" + + name = "safeguarded-zero" + description = "Safeguarded(0)" + + @staticmethod + def abs_bound_codec(error_bound, **kwargs): + return numcodecs_safeguards.SafeguardedCodec( + codec=numcodecs_zero.ZeroCodec(), + safeguards=[ + dict(kind="eb", type="abs", eb=error_bound, equal_nan=True), + ], + ) + + @staticmethod + def rel_bound_codec(error_bound, **kwargs): + return numcodecs_safeguards.SafeguardedCodec( + codec=numcodecs_zero.ZeroCodec(), + safeguards=[ + dict(kind="eb", type="rel", eb=error_bound, equal_nan=True), + ], + ) diff --git a/src/climatebenchpress/compressor/compressors/safeguarded/zero_dssim.py b/src/climatebenchpress/compressor/compressors/safeguarded/zero_dssim.py new file mode 100644 index 0000000..ea483ba --- /dev/null +++ b/src/climatebenchpress/compressor/compressors/safeguarded/zero_dssim.py @@ -0,0 +1,95 @@ +__all__ = ["SafeguardedZeroDssim"] + +import numcodecs_safeguards +import numcodecs_zero + +from ..abc import Compressor + + +class SafeguardedZeroDssim(Compressor): + """Safeguarded all-zero compressor that also safeguards the dSSIM score.""" + + name = "safeguarded-zero-dssim" + description = "Safeguarded(0, dSSIM)" + + @staticmethod + def abs_bound_codec(error_bound, data_min_2d=None, data_max_2d=None, **kwargs): + assert data_min_2d is not None, "data_min_2d must be provided" + assert data_max_2d is not None, "data_max_2d must be provided" + + return numcodecs_safeguards.SafeguardedCodec( + codec=numcodecs_zero.ZeroCodec(), + safeguards=[ + dict(kind="eb", type="abs", eb=error_bound, equal_nan=True), + # guarantee that the per-latitude-longitude-slice minimum and + # maximum are preserved, which simplifies the rescaling + dict(kind="sign", offset="x_min"), + dict(kind="sign", offset="x_max"), + dict( + kind="qoi_eb_pw", + qoi=""" + # === pointwise dSSIM quantity of interest === # + + # we guarantee that + # min(data) = min(corrected) and + # max(data) = max(corrected) + # with the sign safeguards above + v["smin"] = c["x_min"]; + v["smax"] = c["x_max"]; + v["r"] = v["smax"] - v["smin"]; + + # re-scale to [0-1] and quantize to 256 bins + v["sc_a2"] = round_ties_even(((x - v["smin"]) / v["r"]) * 255) / 255; + + # force the quantized value to stay the same + return v["sc_a2"]; + """, + type="abs", + eb=0, + ), + ], + # use data_min_2d instead of $x_min since we need the minimum per + # 2d latitude-longitude slice + fixed_constants=dict(x_min=data_min_2d, x_max=data_max_2d), + ) + + @staticmethod + def rel_bound_codec(error_bound, data_min_2d=None, data_max_2d=None, **kwargs): + assert data_min_2d is not None, "data_min_2d must be provided" + assert data_max_2d is not None, "data_max_2d must be provided" + + return numcodecs_safeguards.SafeguardedCodec( + codec=numcodecs_zero.ZeroCodec(), + safeguards=[ + dict(kind="eb", type="rel", eb=error_bound, equal_nan=True), + # guarantee that the per-latitude-longitude-slice minimum and + # maximum are preserved, which simplifies the rescaling + dict(kind="sign", offset="x_min"), + dict(kind="sign", offset="x_max"), + dict( + kind="qoi_eb_pw", + qoi=""" + # === pointwise dSSIM quantity of interest === # + + # we guarantee that + # min(data) = min(corrected) and + # max(data) = max(corrected) + # with the sign safeguards above + v["smin"] = c["x_min"]; + v["smax"] = c["x_max"]; + v["r"] = v["smax"] - v["smin"]; + + # re-scale to [0-1] and quantize to 256 bins + v["sc_a2"] = round_ties_even(((x - v["smin"]) / v["r"]) * 255) / 255; + + # force the quantized value to stay the same + return v["sc_a2"]; + """, + type="abs", + eb=0, + ), + ], + # use data_min_2d instead of $x_min since we need the minimum per + # 2d latitude-longitude slice + fixed_constants=dict(x_min=data_min_2d, x_max=data_max_2d), + ) diff --git a/src/climatebenchpress/compressor/compressors/safeguarded/zfp_round.py b/src/climatebenchpress/compressor/compressors/safeguarded/zfp_round.py new file mode 100644 index 0000000..d98f3d9 --- /dev/null +++ b/src/climatebenchpress/compressor/compressors/safeguarded/zfp_round.py @@ -0,0 +1,54 @@ +__all__ = ["SafeguardedZfpRound"] + +import numcodecs_safeguards +import numcodecs_wasm_zfp + +from ..abc import Compressor + + +class SafeguardedZfpRound(Compressor): + """Safeguarded ZFP-ROUND compressor. + + This is an adjusted version of the ZFP compressor with an improved rounding mechanism + for the transform coefficients. + """ + + name = "safeguarded-zfp-round" + description = "Safeguarded(ZFP-ROUND)" + + # NOTE: + # ZFP mechanism for strictly supporting relative error bounds is to + # truncate the floating point bit representation and then use ZFP's lossless + # mode for compression. This is essentially equivalent to the BitRound + # compressors we are already implementing (with a difference what the lossless + # compression algorithm is). + # See https://zfp.readthedocs.io/en/release1.0.1/faq.html#q-relerr for more details. + + @staticmethod + def abs_bound_codec(error_bound, **kwargs): + return numcodecs_safeguards.SafeguardedCodec( + codec=numcodecs_wasm_zfp.Zfp( + mode="fixed-accuracy", tolerance=error_bound, non_finite="allow-unsafe" + ), + safeguards=[ + dict(kind="eb", type="abs", eb=error_bound, equal_nan=True), + ], + ) + + @staticmethod + def rel_bound_codec(error_bound, *, data_abs_min=None, **kwargs): + assert data_abs_min is not None, "data_abs_min must be provided" + + return numcodecs_safeguards.SafeguardedCodec( + # conservative rel->abs error bound transformation, + # same as convert_rel_error_to_abs_error + # so that we can inform the safeguards of the rel bound + codec=numcodecs_wasm_zfp.Zfp( + mode="fixed-accuracy", + tolerance=error_bound * data_abs_min, + non_finite="allow-unsafe", + ), + safeguards=[ + dict(kind="eb", type="rel", eb=error_bound, equal_nan=True), + ], + ) diff --git a/src/climatebenchpress/compressor/plotting/plot_metrics.py b/src/climatebenchpress/compressor/plotting/plot_metrics.py index b00d92c..1b5b1bb 100644 --- a/src/climatebenchpress/compressor/plotting/plot_metrics.py +++ b/src/climatebenchpress/compressor/plotting/plot_metrics.py @@ -13,15 +13,21 @@ _COMPRESSOR2LINEINFO = [ ("jpeg2000", ("#EE7733", "-")), - ("sperr", ("#117733", ":")), - ("zfp-round", ("#DDAA33", "--")), + ("sperr", ("#117733", "-")), + ("zfp-round", ("#DDAA33", "-")), ("zfp", ("#EE3377", "--")), - ("sz3", ("#CC3311", "-.")), - ("bitround-pco", ("#0077BB", ":")), + ("sz3", ("#CC3311", "-")), + ("bitround-pco", ("#0077BB", "-")), ("bitround", ("#33BBEE", "-")), ("stochround-pco", ("#BBBBBB", "--")), ("stochround", ("#009988", "--")), ("tthresh", ("#882255", "-.")), + ("safeguarded-sperr", ("#117733", ":")), + ("safeguarded-zfp-round", ("#DDAA33", ":")), + ("safeguarded-sz3", ("#CC3311", ":")), + ("safeguarded-zero-dssim", ("#9467BD", "--")), + ("safeguarded-zero", ("#9467BD", ":")), + ("safeguarded-bitround-pco", ("#0077BB", ":")), ] @@ -36,19 +42,38 @@ def _get_lineinfo(compressor: str) -> tuple[str, str]: _COMPRESSOR2LEGEND_NAME = [ ("jpeg2000", "JPEG2000"), ("sperr", "SPERR"), - ("zfp-round", "ZFP-ROUND"), + ("zfp-round", "ZFP"), ("zfp", "ZFP"), - ("sz3", "SZ3"), - ("bitround-pco", "BitRound + PCO"), + ("sz3", "SZ3[v3.2]"), + ("bitround-pco", "BitRound"), ("bitround", "BitRound + Zstd"), ("stochround-pco", "StochRound + PCO"), ("stochround", "StochRound + Zstd"), ("tthresh", "TTHRESH"), + ("safeguarded-sperr", "Safeguarded(SPERR)"), + ("safeguarded-zfp-round", "Safeguarded(ZFP)"), + ("safeguarded-sz3", "Safeguarded(SZ3[v3.2])"), + ("safeguarded-zero-dssim", "Safeguarded(0, dSSIM)"), + ("safeguarded-zero", "Safeguarded(0)"), + ("safeguarded-bitround-pco", "Safeguarded(BitRound)"), +] + +_COMPRESSOR_ORDER = [ + "BitRound", + "Safeguarded(BitRound)", + "ZFP", + "Safeguarded(ZFP)", + "SZ3[v3.2]", + "Safeguarded(SZ3[v3.2])", + "SPERR", + "Safeguarded(SPERR)", + "Safeguarded(0)", + "Safeguarded(0, dSSIM)", ] DISTORTION2LEGEND_NAME = { "Relative MAE": "Mean Absolute Error", - "Relative DSSIM": "DSSIM", + "Relative dSSIM": "dSSIM", "Relative MaxAbsError": "Max Absolute Error", "Spectral Error": "Spectral Error", } @@ -102,6 +127,7 @@ def plot_metrics( df = pd.read_csv(metrics_path / "all_results.csv") # Filter out excluded datasets and compressors + # bitround jpeg2000-conservative-abs stochround-conservative-abs stochround-pco-conservative-abs zfp-conservative-abs bitround-conservative-rel stochround-pco stochround zfp jpeg2000 df = df[~df["Compressor"].isin(exclude_compressor)] df = df[~df["Dataset"].isin(exclude_dataset)] is_tiny = df["Dataset"].str.endswith("-tiny") @@ -111,16 +137,16 @@ def plot_metrics( filter_chunked = is_chunked if chunked_datasets else ~is_chunked df = df[filter_chunked] - _plot_per_variable_metrics( - datasets=datasets, - compressed_datasets=compressed_datasets, - plots_path=plots_path, - all_results=df, - rd_curves_metrics=["Max Absolute Error", "MAE", "DSSIM", "Spectral Error"], - ) + # _plot_per_variable_metrics( + # datasets=datasets, + # compressed_datasets=compressed_datasets, + # plots_path=plots_path, + # all_results=df, + # rd_curves_metrics=["Max Absolute Error", "MAE", "DSSIM", "Spectral Error"], + # ) df = _rename_compressors(df) - normalized_df = _normalize(df) + normalized_df, normalized_mean_std = _normalize(df) _plot_bound_violations( normalized_df, bound_names, plots_path / "bound_violations.pdf" ) @@ -129,7 +155,7 @@ def plot_metrics( for metric in [ "Relative MAE", - "Relative DSSIM", + "Relative dSSIM", "Relative MaxAbsError", "Relative SpectralError", ]: @@ -138,6 +164,7 @@ def plot_metrics( normalized_df, compression_metric="Relative CR", distortion_metric=metric, + mean_std=normalized_mean_std[metric], outfile=plots_path / f"rd_curve_{metric.lower().replace(' ', '_')}.pdf", agg="mean", bound_names=bound_names, @@ -147,6 +174,7 @@ def plot_metrics( normalized_df, compression_metric="Relative CR", distortion_metric=metric, + mean_std=normalized_mean_std[metric], outfile=plots_path / f"full_rd_curve_{metric.lower().replace(' ', '_')}.pdf", agg="mean", @@ -188,7 +216,7 @@ def _normalize(data): normalize_vars = [ ("Compression Ratio [raw B / enc B]", "Relative CR"), ("MAE", "Relative MAE"), - ("DSSIM", "Relative DSSIM"), + ("DSSIM", "Relative dSSIM"), ("Max Absolute Error", "Relative MaxAbsError"), ("Spectral Error", "Relative SpectralError"), ] @@ -198,6 +226,7 @@ def _normalize(data): dssim_unreliable = normalized["Variable"].isin(["ta", "tos"]) normalized.loc[dssim_unreliable, "DSSIM"] = np.nan + normalize_mean_std = dict() for col, new_col in normalize_vars: mean_std = dict() for var in variables: @@ -207,12 +236,15 @@ def _normalize(data): # Normalize each variable by its mean and std normalized[new_col] = normalized.apply( - lambda x: (x[col] - mean_std[x["Variable"]][0]) - / mean_std[x["Variable"]][1], + lambda x: ( + (x[col] - mean_std[x["Variable"]][0]) / mean_std[x["Variable"]][1] + ), axis=1, ) - return normalized + normalize_mean_std[new_col] = mean_std + + return normalized, normalize_mean_std def _plot_per_variable_metrics( @@ -407,6 +439,7 @@ def _plot_aggregated_rd_curve( normalized_df, compression_metric, distortion_metric, + mean_std, outfile: None | Path = None, agg="median", bound_names=["low", "mid", "high"], @@ -418,7 +451,10 @@ def _plot_aggregated_rd_curve( # Exclude variables that are not relevant for the distortion metric. normalized_df = normalized_df[~normalized_df["Variable"].isin(exclude_vars)] - compressors = normalized_df["Compressor"].unique() + compressors = sorted( + normalized_df["Compressor"].unique(), + key=lambda k: _COMPRESSOR_ORDER.index(_get_legend_name(k)), + ) agg_distortion = normalized_df.groupby(["Error Bound Name", "Compressor"])[ [compression_metric, distortion_metric] ].agg(agg) @@ -446,7 +482,13 @@ def _plot_aggregated_rd_curve( if remove_outliers: # SZ3 and JPEG2000 often give outlier values and violate the bounds. - exclude_compressors = ["sz3", "jpeg2000"] + exclude_compressors = [ + "sz3", + "jpeg2000", + "safeguarded-zero-dssim", + "safeguarded-zero", + "safeguarded-sz3", + ] filtered_agg = agg_distortion[ ~agg_distortion.index.get_level_values("Compressor").isin( exclude_compressors @@ -492,28 +534,45 @@ def _plot_aggregated_rd_curve( right=True, ) plt.xlabel( - r"Mean Normalized Compression Ratio ($\uparrow$)", + r"Mean Normalised Compression Ratio ($\uparrow$)", fontsize=16, ) metric_name = DISTORTION2LEGEND_NAME.get(distortion_metric, distortion_metric) plt.ylabel( - rf"Mean Normalized {metric_name} ($\downarrow$)", + rf"Mean Normalised {metric_name} ($\downarrow$)", fontsize=16, ) plt.legend( title="Compressor", - loc="upper right", - bbox_to_anchor=(0.8, 0.99), + loc="upper left", + # bbox_to_anchor=(0.8, 0.99), fontsize=12, title_fontsize=14, ) arrow_color = "black" - if "DSSIM" in distortion_metric: + if "dSSIM" in distortion_metric: + # Annotate dSSIM = 1, accounting for the normalization + dssim_one = getattr(np, f"nan{agg}")( + [(1 - ms[0]) / ms[1] for ms in mean_std.values()] + ) + plt.axhline(dssim_one, c="k", ls="--") + plt.text( + np.percentile(plt.xlim(), 63), + dssim_one, + "dSSIM = 1", + fontsize=16, + fontweight="bold", + color="black", + ha="center", + va="center", + bbox=dict(edgecolor="none", facecolor="w", alpha=0.85), + ) + # Add an arrow pointing into the top right corner plt.annotate( "", - xy=(0.95, 0.95), + xy=(0.95, 0.875 if remove_outliers else 0.9), xycoords="axes fraction", xytext=(-60, -50), textcoords="offset points", @@ -526,7 +585,7 @@ def _plot_aggregated_rd_curve( # Attach the text to the lower left of the arrow plt.text( 0.83, - 0.92, + 0.845 if remove_outliers else 0.87, "Better", transform=plt.gca().transAxes, fontsize=16, @@ -537,7 +596,7 @@ def _plot_aggregated_rd_curve( ) # Correct the y-label to point upwards plt.ylabel( - rf"Mean Normalized {metric_name} ($\uparrow$)", + rf"Mean Normalised {metric_name} ($\uparrow$)", fontsize=16, ) else: @@ -565,7 +624,7 @@ def _plot_aggregated_rd_curve( ha="center", ) if ( - "DSSIM" in distortion_metric + "dSSIM" in distortion_metric or "MaxAbsError" in distortion_metric or "SpectralError" in distortion_metric ): @@ -578,24 +637,23 @@ def _plot_aggregated_rd_curve( def _plot_throughput(df, outfile: None | Path = None): - # Transform throughput measurements from raw B/s to s/MB for better comparison - # with instruction count measurements. encode_col = "Encode Throughput [raw B / s]" decode_col = "Decode Throughput [raw B / s]" new_df = df[["Compressor", "Error Bound Name", encode_col, decode_col]].copy() - transformed_encode_col = "Encode Throughput [s / MB]" - transformed_decode_col = "Decode Throughput [s / MB]" - new_df[transformed_encode_col] = 1e6 / new_df[encode_col] - new_df[transformed_decode_col] = 1e6 / new_df[decode_col] + transformed_encode_col = "Encode Throughput [MiB / s]" + transformed_decode_col = "Decode Throughput [MiB / s]" + new_df[transformed_encode_col] = new_df[encode_col] / (2**20) + new_df[transformed_decode_col] = new_df[decode_col] / (2**20) encode_col, decode_col = transformed_encode_col, transformed_decode_col grouped_df = _get_median_and_quantiles(new_df, encode_col, decode_col) _plot_grouped_df( grouped_df, title="", - ylabel="Throughput [s / MB]", + ylabel="Throughput [MiB / s]", logy=True, outfile=outfile, + up=True, ) @@ -609,42 +667,56 @@ def _plot_instruction_count(df, outfile: None | Path = None): ylabel="Instructions [# / raw B]", logy=True, outfile=outfile, + up=False, ) def _get_median_and_quantiles(df, encode_column, decode_column): - return df.groupby(["Compressor", "Error Bound Name"])[ - [encode_column, decode_column] - ].agg( - encode_median=pd.NamedAgg( - column=encode_column, aggfunc=lambda x: x.quantile(0.5) - ), - encode_lower_quantile=pd.NamedAgg( - column=encode_column, aggfunc=lambda x: x.quantile(0.25) - ), - encode_upper_quantile=pd.NamedAgg( - column=encode_column, aggfunc=lambda x: x.quantile(0.75) - ), - decode_median=pd.NamedAgg( - column=decode_column, aggfunc=lambda x: x.quantile(0.5) - ), - decode_lower_quantile=pd.NamedAgg( - column=decode_column, aggfunc=lambda x: x.quantile(0.25) - ), - decode_upper_quantile=pd.NamedAgg( - column=decode_column, aggfunc=lambda x: x.quantile(0.75) - ), + return ( + df.groupby(["Compressor", "Error Bound Name"])[[encode_column, decode_column]] + .agg( + encode_median=pd.NamedAgg( + column=encode_column, aggfunc=lambda x: x.quantile(0.5) + ), + encode_lower_quantile=pd.NamedAgg( + column=encode_column, aggfunc=lambda x: x.quantile(0.25) + ), + encode_upper_quantile=pd.NamedAgg( + column=encode_column, aggfunc=lambda x: x.quantile(0.75) + ), + decode_median=pd.NamedAgg( + column=decode_column, aggfunc=lambda x: x.quantile(0.5) + ), + decode_lower_quantile=pd.NamedAgg( + column=decode_column, aggfunc=lambda x: x.quantile(0.25) + ), + decode_upper_quantile=pd.NamedAgg( + column=decode_column, aggfunc=lambda x: x.quantile(0.75) + ), + ) + .sort_index( + level=0, + key=lambda ks: [_COMPRESSOR_ORDER.index(_get_legend_name(k)) for k in ks], + ) ) def _plot_grouped_df( - grouped_df, title, ylabel, outfile: None | Path = None, logy=False + grouped_df, + title, + ylabel, + outfile: None | Path = None, + logy=False, + up=False, ): fig, axes = plt.subplots(1, 3, figsize=(18, 6), sharex=True, sharey=True) # Bar width bar_width = 0.35 - compressors = grouped_df.index.levels[0].tolist() + compressors = sorted( + grouped_df.index.levels[0].tolist(), + key=lambda k: _COMPRESSOR_ORDER.index(_get_legend_name(k)), + ) x_labels = [_get_legend_name(c) for c in compressors] x_positions = range(len(x_labels)) @@ -652,7 +724,10 @@ def _plot_grouped_df( for i, error_bound in enumerate(error_bounds): ax = axes[i] - bound_data = grouped_df.xs(error_bound, level="Error Bound Name") + bound_data = grouped_df.xs(error_bound, level="Error Bound Name").sort_index( + level=0, + key=lambda ks: [_COMPRESSOR_ORDER.index(_get_legend_name(k)) for k in ks], + ) # Plot encode throughput ax.bar( @@ -663,8 +738,13 @@ def _plot_grouped_df( bound_data["encode_lower_quantile"], bound_data["encode_upper_quantile"], ], - label="Encoding", + label="Compression", + edgecolor="white", + linewidth=0, color=[_get_lineinfo(comp)[0] for comp in compressors], + hatch=[ + "O" if comp.startswith("safeguarded-") else "" for comp in compressors + ], ) # Plot decode throughput @@ -676,10 +756,13 @@ def _plot_grouped_df( bound_data["decode_lower_quantile"], bound_data["decode_upper_quantile"], ], - label="Decoding", + label="Decompression", edgecolor=[_get_lineinfo(comp)[0] for comp in compressors], fill=False, linewidth=4, + hatch=[ + "O" if comp.startswith("safeguarded-") else "" for comp in compressors + ], ) # Add labels and title @@ -689,15 +772,18 @@ def _plot_grouped_df( ax.set_title(f"{error_bound.capitalize()} Error Bound", fontsize=14) ax.grid(axis="y", linestyle="--", alpha=0.7) if i == 0: - ax.legend(fontsize=14) + ax.legend( + fontsize=14, loc="lower left" if up else "upper left", framealpha=0.9 + ) ax.set_ylabel(ylabel, fontsize=14) + if i == 1: ax.annotate( "Better", - xy=(0.1, 0.8), + xy=(0.51, 0.75), xycoords="axes fraction", - xytext=(0.1, 0.95), + xytext=(0.51, 0.92), textcoords="axes fraction", - arrowprops=dict(arrowstyle="->", lw=3, color="black"), + arrowprops=dict(arrowstyle="<-" if up else "->", lw=3, color="black"), fontsize=14, ha="center", va="bottom", @@ -719,11 +805,11 @@ def _plot_bound_violations(df, bound_names, outfile: None | Path = None): df_bound["Compressor"] = df_bound["Compressor"].map(_get_legend_name) pass_fail = df_bound.pivot( index="Compressor", columns="Variable", values="Satisfies Bound (Passed)" - ) + ).sort_index(key=lambda ks: [_COMPRESSOR_ORDER.index(k) for k in ks]) pass_fail = pass_fail.astype(np.float32) fraction_fail = df_bound.pivot( index="Compressor", columns="Variable", values="Satisfies Bound (Value)" - ) + ).sort_index(key=lambda ks: [_COMPRESSOR_ORDER.index(k) for k in ks]) annotations = fraction_fail.map( lambda x: "{:.2f}".format(x * 100) if x * 100 >= 0.01 else "<0.01" ) diff --git a/src/climatebenchpress/compressor/scripts/compress.py b/src/climatebenchpress/compressor/scripts/compress.py index 98b71e4..df7d40f 100644 --- a/src/climatebenchpress/compressor/scripts/compress.py +++ b/src/climatebenchpress/compressor/scripts/compress.py @@ -87,6 +87,8 @@ def compress( ds_abs_maxs: dict[str, float] = dict() ds_mins: dict[str, float] = dict() ds_maxs: dict[str, float] = dict() + ds_min_2ds: dict[str, np.ndarray] = dict() + ds_max_2ds: dict[str, np.ndarray] = dict() for v in ds: vs: str = str(v) abs_vals = xr.ufuncs.abs(ds[v]) @@ -96,6 +98,16 @@ def compress( ds_abs_maxs[vs] = abs_vals.max().values.item() ds_mins[vs] = ds[v].min().values.item() ds_maxs[vs] = ds[v].max().values.item() + ds_min_2ds[vs] = ( + ds[v] + .min(dim=[ds[v].cf["Y"].name, ds[v].cf["X"].name], keepdims=True) + .values + ) + ds_max_2ds[vs] = ( + ds[v] + .max(dim=[ds[v].cf["Y"].name, ds[v].cf["X"].name], keepdims=True) + .values + ) if chunked: for v in ds: @@ -115,7 +127,14 @@ def compress( compressor_variants: dict[str, list[NamedPerVariableCodec]] = ( compressor.build( - ds_dtypes, ds_abs_mins, ds_abs_maxs, ds_mins, ds_maxs, error_bounds + ds_dtypes, + ds_abs_mins, + ds_abs_maxs, + ds_mins, + ds_maxs, + ds_min_2ds, + ds_max_2ds, + error_bounds, ) ) @@ -189,6 +208,15 @@ def compress_decompress( if not isinstance(codec, CodecStack): codec = CodecStack(codec) + # HACK: Safeguarded(0, dSSIM) requires the per-lat-lon-slice minimum + # and maximum + # for potentially-chunked data we should really use xarray-safeguards, + # but not using chunks also works (for now) + is_safeguarded_zero_dssim = ( + "# === pointwise dSSIM quantity of interest === #" + in json.dumps(codec.get_config()) + ) + with numcodecs_observers.observe( codec, observers=[ @@ -197,26 +225,42 @@ def compress_decompress( timing, ], ) as codec_: - variables[v] = codec_.encode_decode_data_array(ds[v]).compute() - - measurements[v] = { - "encoded_bytes": sum( - b.post for b in nbytes.encode_sizes[HashableCodec(codec[-1])] - ), - "decoded_bytes": sum( - b.post for b in nbytes.decode_sizes[HashableCodec(codec[0])] - ), - "encode_timing": sum(t for ts in timing.encode_times.values() for t in ts), - "decode_timing": sum(t for ts in timing.decode_times.values() for t in ts), - "encode_instructions": sum( - i for is_ in instructions.encode_instructions.values() for i in is_ - ) - or None, - "decode_instructions": sum( - i for is_ in instructions.decode_instructions.values() for i in is_ - ) - or None, - } + variables[v] = codec_.encode_decode_data_array( + ds[v].compute() if is_safeguarded_zero_dssim else ds[v] + ).compute() + + cs = [c._codec for c in codec_.__iter__()] + + measurements[v] = { + # bytes measurements: only look at the first and last codec in + # the top level stack, which gives the total encoded and + # decoded sizes + "encoded_bytes": sum( + b.post for b in nbytes.encode_sizes[HashableCodec(cs[-1])] + ), + "decoded_bytes": sum( + b.post for b in nbytes.decode_sizes[HashableCodec(cs[0])] + ), + # time measurements: only sum over the top level stack members + # to avoid double counting from nested codec combinators + "encode_timing": sum( + t for c in cs for t in timing.encode_times[HashableCodec(c)] + ), + "decode_timing": sum( + t for c in cs for t in timing.decode_times[HashableCodec(c)] + ), + # encode instructions: sum over all codecs since WASM + # instruction counts are currently not aggregated in codec + # combinators + "encode_instructions": sum( + i for is_ in instructions.encode_instructions.values() for i in is_ + ) + or None, + "decode_instructions": sum( + i for is_ in instructions.decode_instructions.values() for i in is_ + ) + or None, + } return xr.Dataset(variables, coords=ds.coords, attrs=ds.attrs), measurements