From 7d83ff919804c9e507c1ab521393e792359b9d02 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Fri, 6 Mar 2026 16:36:23 -0500 Subject: [PATCH 01/31] begin scaffolding --- pyproject.toml | 5 + src/globi/models/surrogate/__init__.py | 1 + src/globi/models/surrogate/dummy.py | 39 + src/globi/models/surrogate/training.py | 1101 ++++++++++++++++++ src/globi/pipelines/__init__.py | 13 + src/globi/{pipelines.py => pipelines/gis.py} | 227 +--- src/globi/pipelines/simulations.py | 235 ++++ src/globi/pipelines/training.py | 323 +++++ src/globi/worker/Dockerfile | 3 +- src/globi/worker/main.py | 23 +- uv.lock | 52 +- 11 files changed, 1792 insertions(+), 230 deletions(-) create mode 100644 src/globi/models/surrogate/__init__.py create mode 100644 src/globi/models/surrogate/dummy.py create mode 100644 src/globi/models/surrogate/training.py create mode 100644 src/globi/pipelines/__init__.py rename src/globi/{pipelines.py => pipelines/gis.py} (58%) create mode 100644 src/globi/pipelines/simulations.py create mode 100644 src/globi/pipelines/training.py diff --git a/pyproject.toml b/pyproject.toml index 44de639..6f33108 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,6 +46,11 @@ visualization = [ "playwright>=1.40.0", ] +ml = [ + "lightgbm>=4.6.0", + "xgboost>=3.2.0", +] + cli = [ "click>=8.1.7", "xlsxwriter>=3.2.9", diff --git a/src/globi/models/surrogate/__init__.py b/src/globi/models/surrogate/__init__.py new file mode 100644 index 0000000..d5affc9 --- /dev/null +++ b/src/globi/models/surrogate/__init__.py @@ -0,0 +1 @@ +"""Models used for the surrogate pipeline.""" diff --git a/src/globi/models/surrogate/dummy.py b/src/globi/models/surrogate/dummy.py new file mode 100644 index 0000000..0617998 --- /dev/null +++ b/src/globi/models/surrogate/dummy.py @@ -0,0 +1,39 @@ +"""Dummy simulation for testing.""" + +from pathlib import Path + +import pandas as pd +from scythe.base import ExperimentInputSpec, ExperimentOutputSpec +from scythe.registry import ExperimentRegistry + + +class DummySimulationInput(ExperimentInputSpec): + """The input for the dummy simulation.""" + + a: int + b: float + + +class DummySimulationOutput(ExperimentOutputSpec): + """The output for the dummy simulation.""" + + c: float + + +@ExperimentRegistry.Register( + description="A dummy simulation.", +) +def dummy_simulation( + input_spec: DummySimulationInput, tempdir: Path +) -> DummySimulationOutput: + """A dummy simulation.""" + df = pd.DataFrame({ + "target_0": [input_spec.a + input_spec.b], + "target_1": [input_spec.a - input_spec.b], + "target_2": [input_spec.a * input_spec.b], + "target_3": [input_spec.a / input_spec.b], + }) + df = df.set_index(input_spec.make_multiindex()) + return DummySimulationOutput( + c=input_spec.a + input_spec.b, dataframes={"main_result": df} + ) diff --git a/src/globi/models/surrogate/training.py b/src/globi/models/surrogate/training.py new file mode 100644 index 0000000..e695715 --- /dev/null +++ b/src/globi/models/surrogate/training.py @@ -0,0 +1,1101 @@ +"""Models used for the surrogate training pipeline.""" + +from functools import cached_property +from pathlib import Path +from typing import TYPE_CHECKING, Literal, cast + +import numpy as np +import pandas as pd +from pydantic import BaseModel, Field, model_validator +from scythe.base import BaseSpec, ExperimentInputSpec +from scythe.scatter_gather import RecursionMap +from scythe.settings import ScytheStorageSettings +from scythe.utils.filesys import FileReference, OptionalFileReference + +if TYPE_CHECKING: + from mypy_boto3_s3.client import S3Client as S3ClientType +else: + S3ClientType = object + + +class ConvergenceThresholds(BaseModel): + """The thresholds for convergence.""" + + mae: float = Field(default=0.5, description="The maximum MAE for convergence.") + rmse: float = Field(default=0.5, description="The maximum RMSE for convergence.") + mape: float = Field(default=0.15, description="The maximum MAPE for convergence.") + r2: float = Field(default=0.95, description="The minimum R2 for convergence.") + cvrmse: float = Field( + default=0.05, description="The maximum CV_RMSE for convergence." + ) + + @property + def thresholds(self) -> pd.Series: + """The thresholds for convergence.""" + return pd.Series(self.model_dump(), name="metric") + + def check_convergence(self, metrics: pd.Series): + """Check if the metrics have converged. + + Note that this requires the metrics data frame to have the following shape: + + """ + thresholds = pd.Series(self.model_dump(), name="metric") + + # first, we will select the appropriate threshold for each metric + comparators = thresholds.loc[metrics.index.get_level_values("metric")] + # we can then copy over the index safely + comparators.index = metrics.index + + # next, we will flip the sign of the r2 metric since it is a maximization metric rather thin min + metrics = metrics * np.where( + metrics.index.get_level_values("metric") == "r2", -1, 1 + ) + comparators = comparators * np.where( + comparators.index.get_level_values("metric") == "r2", -1, 1 + ) + + # run the comparisons + comparison = metrics < comparators + + # now we will groupby the stratum (e.g. features.weather.file) + # and by the target (e.g. Electricity, Gas, etc.) + # we are converged if any of the metrics have converged for that target + # in that stratum + comparison_stratum_and_target = comparison.groupby( + level=[lev for lev in comparison.index.names if lev != "metric"] + ).any() + + # then we will check that all targets have converged for each stratum + comparison_strata = comparison_stratum_and_target.groupby(level="stratum").all() + + # finally, we will check that all strata have converged + comparison_all = comparison_strata.all() + + return ( + comparison_all, + comparison_strata, + comparison_stratum_and_target, + comparison, + ) + + +class XGBHyperparameters(BaseModel): + """The parameters for the xgboost model.""" + + max_depth: int = Field(default=5, description="The maximum depth of the tree.") + eta: float = Field(default=0.1, description="The learning rate.") + min_child_weight: int = Field(default=3, description="The minimum child weight.") + subsample: float = Field(default=0.8, description="The subsample rate.") + colsample_bytree: float = Field( + default=0.8, description="The column sample by tree rate." + ) + alpha: float = Field(default=0.01, description="The alpha parameter.") + lam: float = Field(default=0.01, description="The lambda parameter.") + gamma: float = Field(default=0.01, description="The gamma parameter.") + + +class LGBHyperparameters(BaseModel): + """The parameters for the lightgbm model.""" + + objective: Literal["regression", "binary", "multiclass"] = Field( + default="regression", description="The objective function to use." + ) + metric: Literal["rmse"] = Field( + default="rmse", description="The metric to optimize." + ) + # TODO: add other parameters as needed + + +ModelHPType = XGBHyperparameters | LGBHyperparameters + + +class StratificationSpec(BaseModel): + """A spec for stratifying the data.""" + + field: str = Field( + default="feature.weather.file", description="The field to stratify by." + ) + sampling: Literal["equal", "error-weighted", "proportional"] = Field( + default="equal", + description="The sampling method to use over the strata.", + ) + aliases: list[str] = Field( + default_factory=lambda: ["epwzip_path", "epw_path"], + description="The alias to use for the stratum as a fallback.", + ) + + # TODO: consider allowing the stratification to be a compound with e.g. component_map_uri and semantic_fields_uri and database_uri + + +class CrossValidationSpec(BaseModel): + """The cross validation spec.""" + + n_folds: int = Field( + default=5, description="The number of folds for the entire parent task." + ) + + +class IterationSpec(BaseModel): + """The iteration spec.""" + + n_init: int = Field(default=10000, description="The number of initial samples.") + min_per_stratum: int = Field( + default=100, description="The minimum number of samples per stratum." + ) + n_per_iter: int = Field( + default=10000, + description="The number of samples to add per each iteration of the outer loop.", + ) + max_iters: int = Field( + default=100, + description="The maximum number of outer loop iterations to perform.", + ) + recursion: RecursionMap = Field( + default_factory=lambda: RecursionMap(factor=100, max_depth=1), + description="The recursion spec.", + ) + + +# TODO: should this be a subclass of ExperimentInputSpec? +class ProgressiveTrainingSpec(BaseSpec): + """A spec for iteratively training an SBEM regression model.""" + + convergence_criteria: ConvergenceThresholds = Field( + default_factory=ConvergenceThresholds, + description="The convergence criteria.", + ) + model_hyperparameters: ModelHPType = Field( + default_factory=LGBHyperparameters, + description="The hyperparameters for the model.", + ) + stratification: StratificationSpec = Field( + default_factory=StratificationSpec, + description="The stratification spec.", + ) + cross_val: CrossValidationSpec = Field( + default_factory=CrossValidationSpec, + description="The cross validation spec.", + ) + iteration: IterationSpec = Field( + default_factory=IterationSpec, + description="The iteration spec.", + ) + gis_uri: FileReference = Field( + ..., + description="The uri of the gis data to train on.", + ) + storage_settings: ScytheStorageSettings = Field( + default=..., + description="The storage settings to use.", + ) + + @property + def gis_path(self) -> Path: + """The path to the gis data.""" + if isinstance(self.gis_uri, Path): + return self.gis_uri + return self.fetch_uri(self.gis_uri) + + @cached_property + def gis_data(self) -> pd.DataFrame: + """Load the gis data.""" + return pd.read_parquet(self.gis_path) + + # def s3_key_for_iteration(self, iteration_ix: int) -> str: + # """The s3 root key for the iteration.""" + # return f"{self.experiment_id}/iter-{iteration_ix:03d}" + + # def upload_self(self, s3_client: S3ClientType): + # """Upload a dumpout of this spec to the s3 bucket root.""" + # with tempfile.TemporaryDirectory() as tempdir: + # tempdir = Path(tempdir) + # fpath = tempdir / "spec.yml" + # with open(fpath, "w") as f: + # yaml.dump(self.model_dump(mode="json"), f, indent=2) + # s3_client.upload_file( + # fpath.as_posix(), + # self.bucket, + # f"hatchet/{self.experiment_id}/artifacts/experiment-spec.yml", + # ) + + +class StageSpec(BaseModel): + """A spec that is common to both the sample and train stages (and possibly others).""" + + progressive_training_spec: ProgressiveTrainingSpec = Field( + ..., + description="The progressive training spec.", + ) + progressive_training_iteration_ix: int = Field( + ..., + description="The index of the current training iteration within the outer loop.", + ) + data_uri: OptionalFileReference = Field( + ..., + description="The uris of the previous simulation results to sample from.", + ) + stage_type: Literal["sample", "train"] = Field( + ..., + description="The type of stage.", + ) + + @cached_property + def random_generator(self) -> np.random.Generator: + """The random generator.""" + return np.random.default_rng(self.progressive_training_iteration_ix) + + # @cached_property + # def experiment_key(self) -> str: + # """The root key for the experiment.""" + # return f"{self.progressive_training_spec.s3_key_for_iteration(self.progressive_training_iteration_ix)}/{self.stage_type}" + + # def load_previous_data(self, s3_client: S3ClientType) -> pd.DataFrame | None: + # """Load the previous data.""" + # if self.data_uri is None: + # return None + # with tempfile.TemporaryDirectory() as tmpdir: + # tmpdir = Path(tmpdir) + # fpath = tmpdir / "previous_data.parquet" + # fetch_uri( + # uri=self.data_uri, + # local_path=fpath, + # use_cache=False, + # s3=s3_client, + # ) + # df = pd.read_parquet(fpath) + # return df + + +# BASE EXPERIMENT/v1.0.0 +# BASE EXPERIMENT/v1.0.0/simulations/v1.0.0/[...] +# BASE EXPERIMENT/v1.0.0/training/v1.0.0/[...] +# BASE EXPERIMENT/v1.0.0/simulations/v2.0.0/[...] +# BASE EXPERIMENT/v1.0.0/training/v2.0.0/[...] +# BASE EXPERIMENT/v1.0.0/simulations/v2.0.0/[...] +# BASE EXPERIMENT/v1.0.0/training/v3.0.0/[...] + + +class SampleSpec(StageSpec): + """A spec for thhe sampling stage of the progressive training.""" + + # TODO: add the ability to receive the last set of error metrics and use them to inform the sampling + + def stratified_selection(self) -> pd.DataFrame: + """Sample the gis data.""" + df = self.progressive_training_spec.gis_data + + stratification_field = self.progressive_training_spec.stratification.field + stratification_aliases = self.progressive_training_spec.stratification.aliases + + if stratification_field not in df.columns and not any( + alias in df.columns for alias in stratification_aliases + ): + msg = f"Stratification field {stratification_field} not found in gis data. Please check the field name and/or the aliases." + raise ValueError(msg) + + if stratification_field not in df.columns: + stratification_field = next( + alias for alias in stratification_aliases if alias in df.columns + ) + + strata = cast(list[str], df[stratification_field].unique().tolist()) + + if self.progressive_training_spec.stratification.sampling == "equal": + return self.sample_equally_by_stratum(df, strata, stratification_field) + elif self.progressive_training_spec.stratification.sampling == "error-weighted": + msg = "Error-weighted sampling is not yet implemented." + raise NotImplementedError(msg) + elif self.progressive_training_spec.stratification.sampling == "proportional": + msg = "Proportional sampling is not yet implemented." + raise NotImplementedError(msg) + else: + msg = f"Invalid sampling method: {self.progressive_training_spec.stratification.sampling}" + raise ValueError(msg) + + def sample_equally_by_stratum( + self, df: pd.DataFrame, strata: list[str], stratification_field: str + ) -> pd.DataFrame: + """Sample equally by stratum. + + This will break the dataframe up into n strata and ensure that each strata ends up with the same number of samples. + + Args: + df (pd.DataFrame): The dataframe to sample from. + strata (list[str]): The unique values of the strata. + stratification_field (str): The field to stratify the data by. + + Returns: + samples (pd.DataFrame): The sampled dataframe. + """ + stratum_dfs = { + stratum: df[df[stratification_field] == stratum] for stratum in strata + } + n_per_iter = ( + self.progressive_training_spec.iteration.n_per_iter + if self.progressive_training_iteration_ix != 0 + else self.progressive_training_spec.iteration.n_init + ) + n_per_stratum = max( + n_per_iter // len(strata), + ( + self.progressive_training_spec.iteration.min_per_stratum + if self.progressive_training_iteration_ix == 0 + else 0 + ), + ) + + # TODO: consider how we want to handle potentially having the same geometry appear in both + # the training and testing sets. + # if any(len(stratum_df) < n_per_stratum for stratum_df in stratum_dfs.values()): + # msg = "There are not enough buildings in some strata to sample the desired number of buildings per stratum." + # # connsider making this a warning? + # raise ValueError(msg) + + sampled_strata = { + stratum: stratum_df.sample( + n=n_per_stratum, random_state=self.random_generator, replace=True + ) + for stratum, stratum_df in stratum_dfs.items() + } + return cast(pd.DataFrame, pd.concat(sampled_strata.values())) + + # def sample_semantic_fields(self, df: pd.DataFrame) -> pd.DataFrame: + # """Sample the semantic fields.""" + # # TODO: consider randomizing the locations? + # semantic_fields = self.progressive_training_spec.semantic_fields_data + # for field in semantic_fields.Fields: + # if isinstance(field, CategoricalFieldSpec): + # options = field.Options + # df[field.Name] = self.random_generator.choice(options, size=len(df)) + # elif isinstance(field, NumericFieldSpec): + # df[field.Name] = self.random_generator.uniform( + # field.Min, field.Max, size=len(df) + # ) + # else: + # msg = f"Invalid field type: {type(field)}" + # raise TypeError(msg) + # return df + + # def sample_basements_and_attics(self, df: pd.DataFrame) -> pd.DataFrame: + # """Add basement/attics to models.""" + # # get the options for the type literal + # options: list[BasementAtticOccupationConditioningStatus] = [ + # "none", + # "occupied_unconditioned", + # "unoccupied_unconditioned", + # "occupied_conditioned", + # "unoccupied_conditioned", + # ] + # weights = [0.5, *([0.5 / 4] * 4)] + # # sample the type literal + # df["basement"] = self.random_generator.choice(options, size=len(df), p=weights) + # df["attic"] = self.random_generator.choice(options, size=len(df), p=weights) + # df["exposed_basement_frac"] = self.random_generator.uniform( + # 0.1, 0.5, size=len(df) + # ) + # return df + + # def sample_wwrs(self, df: pd.DataFrame) -> pd.DataFrame: + # """Sample the wwrs.""" + # wwr_min = 0.05 + # wwr_max = 0.35 + # df["wwr"] = self.random_generator.uniform(wwr_min, wwr_max, size=len(df)) + # return df + + # def sample_f2f_heights(self, df: pd.DataFrame) -> pd.DataFrame: + # """Sample the f2f heights.""" + # f2f_min = 2.3 + # f2f_max = 4.3 + # df["f2f_height"] = self.random_generator.uniform(f2f_min, f2f_max, size=len(df)) + # return df + + def to_sim_specs(self, df: pd.DataFrame): + """Convert the sampled dataframe to a list of simulation specs. + + For now, we are assuming that all the other necessary fields are present and we are just + ensuring that sort_index and experiment_id are set appropriately. + """ + # df["semantic_field_context"] = df.apply( + # lambda row: { + # field.Name: row[field.Name] + # for field in self.progressive_training_spec.semantic_fields_data.Fields + # }, + # axis=1, + # ) + # df["sort_index"] = np.arange(len(df)) + # df["experiment_id"] = self.experiment_key + # # TODO: consider allowing the component map/semantic_fields/database to be inherited from the row + # # e.g. to allow multiple component maps and dbs per run. + # df["component_map_uri"] = str(self.progressive_training_spec.component_map_uri) + # df["semantic_fields_uri"] = str( + # self.progressive_training_spec.semantic_fields_uri + # ) + # df["db_uri"] = str(self.progressive_training_spec.database_uri) + return df + + # def make_payload(self, s3_client: S3ClientType): + # """Make the payload for the scatter gather task, including generating the simulation specs and serializing them to s3.""" + # df = self.stratified_selection() + # # df = self.sample_semantic_fields(df) + # # df = self.sample_basements_and_attics(df) + # # df = self.sample_wwrs(df) + # # df = self.sample_f2f_heights(df) + # df = self.to_sim_specs(df) + # # serialize to a parquet file and upload to s3 + # bucket = self.progressive_training_spec.storage_settings.BUCKET + # with tempfile.TemporaryDirectory() as tmpdir: + # tmpdir = Path(tmpdir) + # fpath = tmpdir / "specs.pq" + # df.to_parquet(fpath) + # key = f"hatchet/{self.experiment_key}/specs.pq" + # specs_uri = f"s3://{bucket}/{key}" + # s3_client.upload_file(fpath.as_posix(), bucket, key) + + # payload = { + # "specs": specs_uri, + # "bucket": bucket, + # "workflow_name": "simulate_sbem_shoebox", + # "experiment_id": self.experiment_key, + # "recursion_map": { + # "factor": self.progressive_training_spec.iteration.recursion_factor, + # "max_depth": self.progressive_training_spec.iteration.recursion_max_depth, + # }, + # } + # return payload + + # def combine_results(self, new_data_uri: URIResponse, s3_client: S3ClientType): + # """Combine the results of the previous and new data.""" + # previous_data = self.load_previous_data(s3_client) + # with tempfile.TemporaryDirectory() as tmpdir: + # tmpdir = Path(tmpdir) + # fpath = tmpdir / "new_data.parquet" + # fetch_uri( + # uri=new_data_uri.uri, local_path=fpath, use_cache=False, s3=s3_client + # ) + # # TODO: data frame subsection selection should be a configuration option within the + # # progressive iteration training spec. + # df = cast( + # pd.DataFrame, + # cast(pd.DataFrame, pd.read_hdf(fpath, key="results")), + # ) + # if previous_data is not None: + # df = pd.concat([previous_data, df], axis=0) + + # # strip out any constant columns + # is_all_zeros = (df.max(axis=0) - df.min(axis=0)).abs() < 1e-5 + # df = df.loc[:, ~is_all_zeros] + # # serialize to a parquet file and upload to s3 + # bucket = self.progressive_training_spec.bucket + # with tempfile.TemporaryDirectory() as tmpdir: + # tmpdir = Path(tmpdir) + # fpath = tmpdir / "results.parquet" + # df.to_parquet(fpath) + # key = f"hatchet/{self.experiment_key}/full-dataset.pq" + # specs_uri = f"s3://{bucket}/{key}" + # s3_client.upload_file(fpath.as_posix(), bucket, key) + # return specs_uri + + @model_validator(mode="after") + def check_stage(self): + """The sampling spec must have stage set to 'sample'.""" + if self.stage_type != "sample": + msg = f"Invalid stage: {self.stage_type}" + raise ValueError(msg) + return self + + +class TrainFoldSpec(ExperimentInputSpec): + """Train an sbem model for a specific fold. + + The fold is determined by the sort_index, which does mean we need to know the n_folds. + + We will need to know: + - where the data is + - the desired stratification (e.g. feature.weather.file) + - how to divide the data into training and testing splits given the desired stratification + + The data uri should be assumed to have features in the index and targets in the columns. + + TODO: consider the potential for leakage when a stratum has few buildings! + + First, we will subdivide the data into its strata. + + Then for each stratum, we will create a train/test split according to the fold index. + + We wish to return validation metrics with the following hierarchy for the column index + - train/test ["split_segment"] + - loc1/loc2 ... ["stratum"] + - mae/rmse/r2/... ["metric"] + + Theoretically, we also might want to pass in normalization specifications for features and/or targets. + However, with xgb, this is less imperative. + """ + + n_folds: int = Field( + ..., description="The number of folds for the entire parent task." + ) + data_uri: FileReference = Field(..., description="The uri of the data to train on.") + stratification_field: str = Field( + ..., + description="The field to stratify the data by for monitoring convergence in parent task.", + ) + progressive_training_iter_ix: int = Field( + ..., + description="The index of the current training iteration within the outer loop.", + ) + + @property + def data_path(self) -> Path: + """The path to the data.""" + if isinstance(self.data_uri, Path): + return self.data_uri + return self.fetch_uri(self.data_uri) + + @cached_property + def data(self) -> pd.DataFrame: + """The data.""" + df_all = pd.read_parquet(self.data_path) + df_energy: pd.DataFrame = cast(pd.DataFrame, df_all["Energy"]["Raw"]) + df_energy = cast( + pd.DataFrame, + ( + df_energy.T.groupby( + level=[ + lev for lev in df_energy.columns.names if lev.lower() != "month" + ] + ) + .sum() + .T + ), + ) + df_peaks: pd.DataFrame = cast(pd.DataFrame, df_all["Peak"]["Raw"]) + df_peaks = cast( + pd.DataFrame, + ( + df_peaks.T.groupby( + level=[ + lev for lev in df_peaks.columns.names if lev.lower() != "month" + ] + ) + .max() + .T + ), + ) + df_all_annual = pd.concat( + [df_energy, df_peaks], + axis=1, + keys=["Energy", "Peak"], + names=["Measurement"], + ) + # TODO: should we assume they are shuffled already? + # shuffle the order of the rows + df_all_annual = df_all_annual.sample(frac=1, random_state=42, replace=False) + return df_all_annual + + @cached_property + def dparams(self) -> pd.DataFrame: + """The index of the data.""" + return self.data.index.to_frame() + + @cached_property + def stratum_names(self) -> list[str]: + """The values of the stratification field.""" + return sorted(self.dparams[self.stratification_field].unique().tolist()) + + @cached_property + def data_by_stratum(self) -> dict[str, pd.DataFrame]: + """Subdivide the data by the stratification field. + + We want 1/n_folds data in the test segment for each stratification option, + so we will need to compute train/test splits separately for each stratum. + + This would not be necessary if we knew that the strata always had equal representation, but + since we might use things like adaptive sampling or generating samples proportionally to the number of buildings in that stratum, + e.g. by population, then what *could* happen if we just did a random train/test split is that some strata might end up + entirely in the train set. + """ + return { + val: cast( + pd.DataFrame, self.data[self.dparams[self.stratification_field] == val] + ) + for val in self.stratum_names + } + + @cached_property + def train_test_split_by_fold_and_stratum(self) -> pd.DataFrame: + """Create the folds for the data. + + To do this, we will go to each stratum and use a strided step to + construct each fold, then assign the fold matching the sort_index + to the test split. We also recombine the strata since they are now + safely stratified. + """ + all_strata = [] + for val in self.stratum_names: + folds = [] + for i in range(self.n_folds): + fold = self.data_by_stratum[val].iloc[i :: self.n_folds] + folds.append(fold) + folds_df = pd.concat( + folds, + axis=0, + keys=[ + "test" if i == self.sort_index else "train" + for i in range(self.n_folds) + ], + names=["split_segment"], + ) + all_strata.append(folds_df) + return pd.concat(all_strata) + + @cached_property + def train_segment(self) -> tuple[pd.DataFrame, pd.DataFrame]: + """Get the training segment.""" + train_df = cast( + pd.DataFrame, + self.train_test_split_by_fold_and_stratum.xs( + "train", level="split_segment" + ), + ) + params = train_df.index.to_frame(index=False) + targets = train_df + return params, targets + + @cached_property + def test_segment(self) -> tuple[pd.DataFrame, pd.DataFrame]: + """Get the test segment.""" + test_df = cast( + pd.DataFrame, + self.train_test_split_by_fold_and_stratum.xs("test", level="split_segment"), + ) + params = test_df.index.to_frame(index=False) + targets = test_df + return params, targets + + @cached_property + def non_numeric_options(self) -> dict[str, list[str]]: + """Get the non-numeric options for categorical features. + + We must perform this across the entire dataset not just splits for consistency + and to ensure we get all options. + + TODO: In the future, this should be based off of transform instructions. + """ + fparams = self.dparams[ + [col for col in self.dparams.columns if col.startswith("feature.")] + ] + non_numeric_cols = fparams.select_dtypes(include=["object"]).columns + non_numeric_options = { + col: sorted(cast(pd.Series, fparams[col]).unique().tolist()) + for col in non_numeric_cols + } + return non_numeric_options + + # @cached_property + # def numeric_min_maxs(self) -> dict[str, tuple[float, float]]: + # """Get the min and max for numeric features. + + # We perform this only on the training set to prevent leakage. + + # TODO: In the future, this should be based off of transform instructions. + + # Args: + # params (pd.DataFrame): The parameters to get the min and max for. + + # Returns: + # norm_bounds (dict[str, tuple[float, float]]): The min and max for each numeric feature. + # """ + # params, _ = self.train_segment + # fparams = params[[col for col in params.columns if col.startswith("feature.")]] + # numeric_cols = fparams.select_dtypes(include=["number"]).columns + # numeric_min_maxs = { + # col: (float(fparams[col].min()), float(fparams[col].max())) + # for col in numeric_cols + # } + # for col in numeric_min_maxs: + # low, high = numeric_min_maxs[col] + # # we want to floor the "low" value down to the nearest 0.001 + # # and ceil the "high" value up to the nearest 0.001 + # # e.g. if low is -0.799, we want to set it to -0.800 + # # and if high is 0.799, we want to set it to 0.800 + # numeric_min_maxs[col] = ( + # math.floor(low * 1000) / 1000, + # math.ceil(high * 1000) / 1000, + # ) + # return numeric_min_maxs + + # @cached_property + # def feature_spec(self) -> RegressorInputSpec: + # """Get the feature spec which can be serialized and reloaded.""" + # params, _ = self.train_segment + # features: list[CategoricalFeature | ContinuousFeature] = [] + # for col in params.columns: + # if col in self.numeric_min_maxs: + # low, high = self.numeric_min_maxs[col] + # features.append( + # ContinuousFeature(name=col, min=float(low), max=float(high)) + # ) + # elif col in self.non_numeric_options: + # opts = self.non_numeric_options[col] + # features.append(CategoricalFeature(name=col, values=opts)) + # return RegressorInputSpec(features=features) + + # def normalize_params(self, params: pd.DataFrame) -> pd.DataFrame: + # """Normalize the params.""" + # regressor_spec = self.feature_spec + # fparams = regressor_spec.transform(params, do_check=False) + # return fparams + + # def run( + # self, + # ): + # """Train the model.""" + # train_params, train_targets = self.train_segment + # test_params, test_targets = self.test_segment + + # # select/transform the params as necessary + # train_params = self.normalize_params(train_params) + # test_params = self.normalize_params(test_params) + + # # Train the model + # # train_preds, test_preds = self.train_xgboost( + # # train_params, train_targets, test_params, test_targets + # # ) + # s3_client = boto3.client("s3") + # train_preds, test_preds = self.train_lightgbm( + # train_params, train_targets, test_params, test_targets, s3_client + # ) + + # # compute the metrics + # global_train_metrics, stratum_train_metrics = self.compute_metrics( + # train_preds, train_targets + # ) + # global_test_metrics, stratum_test_metrics = self.compute_metrics( + # test_preds, test_targets + # ) + + # global_metrics = pd.concat( + # [global_train_metrics, global_test_metrics], + # axis=1, + # keys=["train", "test"], + # names=["split_segment"], + # ) + # stratum_metrics = pd.concat( + # [stratum_train_metrics, stratum_test_metrics], + # axis=1, + # keys=["train", "test"], + # names=["split_segment"], + # ) + # return { + # "global_metrics": global_metrics, + # "stratum_metrics": stratum_metrics, + # } + + # def compute_frame_metrics( + # self, preds: pd.DataFrame, targets: pd.DataFrame + # ) -> pd.DataFrame: + # """Compute the metrics.""" + # from sklearn.metrics import ( + # mean_absolute_error, + # mean_absolute_percentage_error, + # mean_squared_error, + # r2_score, + # ) + + # mae = mean_absolute_error(targets, preds, multioutput="raw_values") + # mse = mean_squared_error(targets, preds, multioutput="raw_values") + # rmse = np.sqrt(mse) + # r2 = r2_score(targets, preds, multioutput="raw_values") + # cvrmse = rmse / (targets.mean(axis=0) + 1e-5) + # mape = mean_absolute_percentage_error( + # targets + 1e-5, + # preds, + # multioutput="raw_values", + # ) + + # metrics = pd.DataFrame( + # { + # "mae": mae, + # "rmse": rmse, + # "r2": r2, + # "cvrmse": cvrmse, + # "mape": mape, + # }, + # ) + # metrics.columns.names = ["metric"] + # metrics.index.names = ["measurement", "target"] + # return metrics + + # def compute_metrics(self, preds: pd.DataFrame, targets: pd.DataFrame): + # """Compute the metrics.""" + # global_metrics = self.compute_frame_metrics(preds, targets) + # stratum_metric_dfs = {} + # for stratum_name in self.stratum_names: + # stratum_targets = cast( + # pd.DataFrame, targets.xs(stratum_name, level=self.stratification_field) + # ) + # stratum_preds = cast( + # pd.DataFrame, preds.xs(stratum_name, level=self.stratification_field) + # ) + # metrics = self.compute_frame_metrics(stratum_preds, stratum_targets) + # stratum_metric_dfs[stratum_name] = metrics + + # stratum_metrics = pd.concat( + # stratum_metric_dfs, + # axis=1, + # keys=self.stratum_names, + # names=["stratum"], + # ) + # global_metrics = ( + # global_metrics.set_index( + # pd.Index( + # [self.sort_index] * len(global_metrics), + # name="sort_index", + # ), + # append=True, + # ) + # .set_index( + # pd.Index( + # [self.progressive_training_iter_ix] * len(global_metrics), + # name="progressive_training_iter_ix", + # ), + # append=True, + # ) + # .unstack(level="target") + # ) + + # stratum_metrics = ( + # stratum_metrics.set_index( + # pd.Index( + # [self.sort_index] * len(stratum_metrics), + # name="sort_index", + # ), + # append=True, + # ) + # .set_index( + # pd.Index( + # [self.progressive_training_iter_ix] * len(stratum_metrics), + # name="progressive_training_iter_ix", + # ), + # append=True, + # ) + # .unstack(level="target") + # ) + # return global_metrics, stratum_metrics + + # def train_lightgbm( + # self, + # train_params: pd.DataFrame, + # train_targets: pd.DataFrame, + # test_params: pd.DataFrame, + # test_targets: pd.DataFrame, + # s3_client: S3ClientType | None = None, + # ): + # """Train the lightgbm model.""" + # import lightgbm as lgb + + # lgb_params = { + # "objective": "regression", + # "metric": "rmse", + # } + # test_preds = {} + # train_preds = {} + # for col in train_targets.columns: + # lgb_train_data = lgb.Dataset(train_params, label=train_targets[col]) + # lgb_test_data = lgb.Dataset(test_params, label=test_targets[col]) + # model = lgb.train( + # lgb_params, + # lgb_train_data, + # num_boost_round=4000, + # valid_sets=[lgb_test_data], + # valid_names=["eval"], + # callbacks=[lgb.early_stopping(20)], + # ) + # test_preds[col] = pd.Series( + # cast(np.ndarray, model.predict(test_params)), + # index=test_targets.index, + # name=col, + # ) + # train_preds[col] = pd.Series( + # cast(np.ndarray, model.predict(train_params)), + # index=train_targets.index, + # name=col, + # ) + # if s3_client is not None: + # model_name = ( + # f"{col}.lgb" + # if not isinstance(col, tuple) + # else f"{'.'.join(col)}.lgb" + # ) + # model_key = self.format_model_key(model_name) + # model_str = model.model_to_string() + # s3_client.put_object(Bucket=self.bucket, Key=model_key, Body=model_str) + + # if s3_client is not None: + # import yaml + + # space_key = self.format_model_key("space.yml") + # space_str = yaml.dump( + # self.feature_spec.model_dump(mode="json"), indent=2, sort_keys=False + # ) + # s3_client.put_object(Bucket=self.bucket, Key=space_key, Body=space_str) + + # test_preds = pd.concat(test_preds, axis=1) + # train_preds = pd.concat(train_preds, axis=1) + # return train_preds, test_preds + + # @property + # def model_dir_key(self) -> str: + # """Get the key for the model directory.""" + # return f"{self.experiment_id}/{self.sort_index}/models" + + # def format_model_key(self, model_name: str) -> str: + # """Format the model key.""" + # return f"hatchet/{self.model_dir_key}/{model_name}" + + # def train_xgboost( + # self, + # train_params: pd.DataFrame, + # train_targets: pd.DataFrame, + # test_params: pd.DataFrame, + # test_targets: pd.DataFrame, + # ): + # """Train the xgboost model.""" + # import xgboost as xgb + + # hparams = { + # "objective": "reg:squarederror", + # "eval_metric": "rmse", + # "max_depth": 5, # 7 + # "eta": 0.1, + # "min_child_weight": 3, + # "subsample": 0.8, + # "colsample_bytree": 0.8, + # # "alpha": 0.01, + # # "lambda": 0.01, + # # "gamma": 0.01, + # } + + # train_dmatrix = xgb.DMatrix(train_params, label=train_targets) + # test_dmatrix = xgb.DMatrix(test_params, label=test_targets) + + # model = xgb.train( + # hparams, + # train_dmatrix, + # num_boost_round=2000, + # early_stopping_rounds=20, + # verbose_eval=True, + # evals=[(test_dmatrix, "test")], + # ) + + # # compute the metrics + # train_preds = model.predict(train_dmatrix) + # test_preds = model.predict(test_dmatrix) + # train_preds = pd.DataFrame( + # train_preds, index=train_targets.index, columns=train_targets.columns + # ) + # test_preds = pd.DataFrame( + # test_preds, index=test_targets.index, columns=test_targets.columns + # ) + + # return train_preds, test_preds + + +class TrainWithCVSpec(StageSpec): + """Train an SBEM model using a scatter gather approach for cross-fold validation.""" + + @model_validator(mode="after") + def check_stage(self): + """The training spec must have stage set to 'train'.""" + if self.stage_type != "train": + msg = f"Invalid stage: {self.stage_type}" + raise ValueError(msg) + return self + + @property + def schedule(self) -> list[TrainFoldSpec]: + """Create the task schedule.""" + schedule = [] + data_uri = self.data_uri + if data_uri is None: + msg = "Data URI is required for training." + raise ValueError(msg) + + for i in range(self.progressive_training_spec.cross_val.n_folds): + schedule.append( + TrainFoldSpec( + # TODO: this should be set in a better manner + experiment_id="placeholder", + sort_index=i, + n_folds=self.progressive_training_spec.cross_val.n_folds, + data_uri=data_uri, + stratification_field=self.progressive_training_spec.stratification.field, + progressive_training_iter_ix=self.progressive_training_iteration_ix, + storage_settings=self.progressive_training_spec.storage_settings, + ) + ) + return schedule + + # def allocate(self, s3_client: S3ClientType): + # """Allocate the task.""" + # # 1. turn the schedule into a parquet dataframe + # df = pd.DataFrame([m.model_dump(mode="json") for m in self.schedule]) + # bucket = self.progressive_training_spec.bucket + # with tempfile.TemporaryDirectory() as tempdir: + # temp_path = Path(tempdir) / "train_specs.parquet" + # df.to_parquet(temp_path) + # key = f"hatchet/{self.experiment_key}/train_specs.parquet" + # specs_uri = f"s3://{bucket}/{key}" + # s3_client.upload_file(temp_path.as_posix(), bucket, key) + + # payload = { + # "specs": specs_uri, + # "bucket": bucket, + # # TODO: this should be selected in a better manner. + # "workflow_name": "train_regressor_with_cv_fold", + # "experiment_id": self.experiment_key, + # } + # return payload + + # def check_convergence(self, uri: URIResponse, s3_client: S3ClientType): + # """Check the convergence of the training.""" + # with tempfile.TemporaryDirectory() as tempdir: + # tempdir = Path(tempdir) + # results_path = tempdir / "results.hdf" + # # download the results from s3 + # fetch_uri(uri.uri, local_path=results_path, use_cache=False, s3=s3_client) + # results = cast( + # pd.DataFrame, pd.read_hdf(results_path, key="stratum_metrics") + # ) + + # fold_averages = cast( + # pd.Series, + # results.xs( + # "test", + # level="split_segment", + # axis=1, + # ) + # .groupby(level="measurement") + # .mean() + # .unstack(level="measurement"), + # ) + # with tempfile.TemporaryDirectory() as tempdir: + # fold_averages_path = Path(tempdir) / "fold-averaged-errors.pq" + # fold_averages.to_frame( + # name=self.progressive_training_iteration_ix + # ).to_parquet(fold_averages_path) + # key = f"hatchet/{self.experiment_key}/fold-averaged-errors.pq" + # bucket = self.progressive_training_spec.bucket + # s3_client.upload_file(fold_averages_path.as_posix(), bucket, key) + + # ( + # convergence_all, + # convergence_monitor_segment, + # convergence_monitor_segment_and_target, + # convergence, + # ) = self.progressive_training_spec.convergence_criteria.check_convergence( + # fold_averages.xs("Energy", level="measurement") + # ) + + # return convergence_all, convergence diff --git a/src/globi/pipelines/__init__.py b/src/globi/pipelines/__init__.py new file mode 100644 index 0000000..9c61f28 --- /dev/null +++ b/src/globi/pipelines/__init__.py @@ -0,0 +1,13 @@ +"""Pipelines for the GloBI project.""" + +from globi.models.surrogate.dummy import dummy_simulation +from globi.pipelines.gis import preprocess_gis_file +from globi.pipelines.simulations import simulate_globi_building +from globi.pipelines.training import iterative_training + +__all__ = [ + "dummy_simulation", + "iterative_training", + "preprocess_gis_file", + "simulate_globi_building", +] diff --git a/src/globi/pipelines.py b/src/globi/pipelines/gis.py similarity index 58% rename from src/globi/pipelines.py rename to src/globi/pipelines/gis.py index 24ddd58..c3ad274 100644 --- a/src/globi/pipelines.py +++ b/src/globi/pipelines/gis.py @@ -1,27 +1,12 @@ -"""Experiment configuration for building builder simulations.""" +"""GIS processing pipelines for the GloBI project.""" import logging from pathlib import Path from typing import cast import geopandas as gpd -import numpy as np -import pandas as pd import yaml -from epinterface.geometry import ( - SceneContext, - ShoeboxGeometry, -) -from epinterface.sbem.builder import ( - AtticAssumptions, - BasementAssumptions, - Model, - construct_zone_def, -) from epinterface.sbem.fields.spec import SemanticModelFields -from scythe.registry import ExperimentRegistry -from scythe.utils.filesys import FileReference -from shapely import Polygon, from_wkt from globi.gis.errors import SemanticFieldsFileHasNoBuildingIDColumnError from globi.gis.geometry import ( @@ -53,206 +38,10 @@ FileConfig, GISPreprocessorColumnMap, ) -from globi.models.tasks import GloBIBuildingSpec, GloBIOutputSpec logger = logging.getLogger(__name__) -INDEX_COLS_TO_KEEP: list[str] = [ - "feature.geometry.long_edge", - "feature.geometry.short_edge", - "feature.geometry.orientation", - "feature.geometry.num_floors", - "feature.geometry.energy_model_conditioned_area", - "feature.geometry.energy_model_occupied_area", - "feature.semantic.Typology", - "feature.semantic.Age_bracket", - "feature.semantic.Region", - "feature.weather.file", - "feature.geometry.wwr", - "feature.geometry.f2f_height", - "feature.geometry.attic_height", -] - - -def simulate_globi_building_pipeline( - input_spec: GloBIBuildingSpec, - tempdir: Path, -) -> GloBIOutputSpec: - """Simulate a GlobiSpec building and return energy and peak results. - - Args: - input_spec: The input specification containing building parameters and file URIs - tempdir: Temporary directory for intermediate files - Returns: - Output specification containing a DataFrame with MultiIndex: - - Top level: Measurement type (Energy, Peak) - - Feature levels from input specification - """ - spec = input_spec - log = logger.info - zone_def = construct_zone_def( - component_map_path=spec.component_map, - db_path=spec.db_path, - semantic_field_context=spec.semantic_field_context, - ) - model = Model( - Weather=spec.epwzip_path, - Zone=zone_def, - Basement=BasementAssumptions( - Conditioned=spec.basement_is_conditioned, - UseFraction=spec.basement_use_fraction - if spec.basement_is_occupied - else None, - ), - Attic=AtticAssumptions( - Conditioned=spec.attic_is_conditioned, - UseFraction=spec.attic_use_fraction if spec.attic_is_occupied else None, - ), - geometry=ShoeboxGeometry( - x=0, - y=0, - w=spec.long_edge, - d=spec.short_edge, - h=spec.f2f_height, - wwr=spec.wwr, - num_stories=spec.num_floors, - basement=spec.has_basement, - zoning=spec.use_core_perim_zoning, - roof_height=spec.attic_height, - exposed_basement_frac=spec.exposed_basement_frac, - scene_context=SceneContext( - building=cast(Polygon, from_wkt(spec.rotated_rectangle)), - neighbors=[ - cast(Polygon, from_wkt(poly)) for poly in spec.neighbor_polys - ], - neighbor_heights=[ - float(h) if h is not None else 0 for h in spec.neighbor_heights - ], - orientation=spec.long_edge_angle, - ), - ), - ) - - log("Building and running model...") - overheating_config = ( - spec.parent_experiment_spec.overheating_config - if spec.parent_experiment_spec - else None - ) - run_result = model.run( - eplus_parent_dir=tempdir, - overheating_config=overheating_config, - ) - # Validate conditioned area - if not np.allclose( - model.total_conditioned_area, spec.energy_model_conditioned_area - ): - msg = ( - f"Total conditioned area mismatch: " - f"{model.total_conditioned_area} != {spec.energy_model_conditioned_area}" - ) - raise ValueError(msg) - - # Results Post-processing - # TODO: consider if we actually want all t he columns we are including. - feature_index = spec.make_multiindex( - n_rows=1, additional_index_data=spec.feature_dict - ) - results = run_result.energy_and_peak.to_frame().T.set_index(feature_index) - - dfs: dict[str, pd.DataFrame] = { - "EnergyAndPeak": results, - } - if run_result.overheating_results is not None: - # TODO: add feature dict to overheating df indices? Or instead of a full feature df, just add a single column with the building id? - edh = run_result.overheating_results.edh - old_ix = edh.index - feature_index = spec.make_multiindex( - n_rows=len(edh), include_sort_subindex=False - ) - edh.index = feature_index - edh = edh.set_index(old_ix, append=True) - dfs["ExceedanceDegreeHours"] = edh - - basic_oh = run_result.overheating_results.basic_oh - old_ix = basic_oh.index - feature_index = spec.make_multiindex( - n_rows=len(basic_oh), include_sort_subindex=False - ) - basic_oh.index = feature_index - basic_oh = basic_oh.set_index(old_ix, append=True) - dfs["BasicOverheating"] = basic_oh - - heat_index_categories = run_result.overheating_results.hi - old_ix = heat_index_categories.index - feature_index = spec.make_multiindex( - n_rows=len(heat_index_categories), include_sort_subindex=False - ) - heat_index_categories.index = feature_index - heat_index_categories = heat_index_categories.set_index(old_ix, append=True) - dfs["HeatIndexCategories"] = heat_index_categories - - consecutive_e_zone = run_result.overheating_results.consecutive_e_zone - # may be zero if no streaks found in any zones - if len(consecutive_e_zone) > 0: - old_ix = consecutive_e_zone.index - feature_index = spec.make_multiindex( - n_rows=len(consecutive_e_zone), include_sort_subindex=False - ) - consecutive_e_zone.index = feature_index - consecutive_e_zone = consecutive_e_zone.set_index(old_ix, append=True) - dfs["ConsecutiveExceedances"] = consecutive_e_zone - - hourly_data_outpath: FileReference | None = None - - if spec.parent_experiment_spec and spec.parent_experiment_spec.hourly_data_config: - hourly_df = run_result.sql.timeseries_by_name( - spec.parent_experiment_spec.hourly_data_config.data, - reporting_frequency="Hourly", - ) - hourly_df.index.names = ["Timestep"] - hourly_df.columns.names = ["Trash", "Group", "Meter"] - hourly_df: pd.DataFrame = cast( - pd.DataFrame, - hourly_df.droplevel("Trash", axis=1) - .stack(level="Group", future_stack=True) - .unstack(level="Timestep"), - ) - hourly_multiindex = spec.make_multiindex( - n_rows=len(hourly_df), include_sort_subindex=False - ) - old_ix = hourly_df.index - hourly_df.index = hourly_multiindex - hourly_df = hourly_df.set_index(old_ix, append=True) - - if spec.parent_experiment_spec.hourly_data_config.does_dataframe_output: - for meter_name in hourly_df.columns.get_level_values("Meter").unique(): - variable_df = hourly_df.xs(meter_name, level="Meter", axis=1) - dataframe_key = f"HourlyData.{meter_name.replace(' ', '')}" - dfs[dataframe_key] = variable_df - if spec.parent_experiment_spec.hourly_data_config.does_file_output: - hourly_data_outpath = tempdir / "outputs_hourly_data.pq" - hourly_df.to_parquet(hourly_data_outpath) - - return GloBIOutputSpec( - dataframes=dfs, - hourly_data=hourly_data_outpath, - ) - - -@ExperimentRegistry.Register(retries=2, schedule_timeout="10h", execution_timeout="30m") -def simulate_globi_building( - input_spec: GloBIBuildingSpec, tempdir: Path -) -> GloBIOutputSpec: - """Simulate a GlobiSpec building and return monthly energy and peak results. - - NB: this is separated from the pipeline above so the pipeline can still be used as a - local invocation without *too* much difficulty. - """ - return simulate_globi_building_pipeline(input_spec, tempdir) - - def preprocess_gis_file( config: DeterministicGISPreprocessorConfig, file_config: "FileConfig", @@ -522,17 +311,3 @@ def preprocess_gis_file( logger.info(f"saved {len(gdf)} features to {output_path}") return gdf, column_output_map - - -if __name__ == "__main__": - import tempfile - - from globi.models.tasks import MinimalBuildingSpec - - with tempfile.TemporaryDirectory() as tempdir: - with open("inputs/building.yml") as f: - input_spec = MinimalBuildingSpec.model_validate(yaml.safe_load(f)) - o = simulate_globi_building_pipeline( - input_spec=input_spec.globi_spec, - tempdir=Path(tempdir), - ) diff --git a/src/globi/pipelines/simulations.py b/src/globi/pipelines/simulations.py new file mode 100644 index 0000000..dfaff88 --- /dev/null +++ b/src/globi/pipelines/simulations.py @@ -0,0 +1,235 @@ +"""Experiment configuration for building builder simulations.""" + +import logging +from pathlib import Path +from typing import cast + +import numpy as np +import pandas as pd +import yaml +from epinterface.geometry import ( + SceneContext, + ShoeboxGeometry, +) +from epinterface.sbem.builder import ( + AtticAssumptions, + BasementAssumptions, + Model, + construct_zone_def, +) +from scythe.registry import ExperimentRegistry +from scythe.utils.filesys import FileReference +from shapely import Polygon, from_wkt + +from globi.models.tasks import GloBIBuildingSpec, GloBIOutputSpec + +logger = logging.getLogger(__name__) + + +INDEX_COLS_TO_KEEP: list[str] = [ + "feature.geometry.long_edge", + "feature.geometry.short_edge", + "feature.geometry.orientation", + "feature.geometry.num_floors", + "feature.geometry.energy_model_conditioned_area", + "feature.geometry.energy_model_occupied_area", + "feature.semantic.Typology", + "feature.semantic.Age_bracket", + "feature.semantic.Region", + "feature.weather.file", + "feature.geometry.wwr", + "feature.geometry.f2f_height", + "feature.geometry.attic_height", +] + + +def simulate_globi_building_pipeline( + input_spec: GloBIBuildingSpec, + tempdir: Path, +) -> GloBIOutputSpec: + """Simulate a GlobiSpec building and return energy and peak results. + + Args: + input_spec: The input specification containing building parameters and file URIs + tempdir: Temporary directory for intermediate files + Returns: + Output specification containing a DataFrame with MultiIndex: + - Top level: Measurement type (Energy, Peak) + - Feature levels from input specification + """ + spec = input_spec + log = logger.info + zone_def = construct_zone_def( + component_map_path=spec.component_map, + db_path=spec.db_path, + semantic_field_context=spec.semantic_field_context, + ) + model = Model( + Weather=spec.epwzip_path, + Zone=zone_def, + Basement=BasementAssumptions( + Conditioned=spec.basement_is_conditioned, + UseFraction=spec.basement_use_fraction + if spec.basement_is_occupied + else None, + ), + Attic=AtticAssumptions( + Conditioned=spec.attic_is_conditioned, + UseFraction=spec.attic_use_fraction if spec.attic_is_occupied else None, + ), + geometry=ShoeboxGeometry( + x=0, + y=0, + w=spec.long_edge, + d=spec.short_edge, + h=spec.f2f_height, + wwr=spec.wwr, + num_stories=spec.num_floors, + basement=spec.has_basement, + zoning=spec.use_core_perim_zoning, + roof_height=spec.attic_height, + exposed_basement_frac=spec.exposed_basement_frac, + scene_context=SceneContext( + building=cast(Polygon, from_wkt(spec.rotated_rectangle)), + neighbors=[ + cast(Polygon, from_wkt(poly)) for poly in spec.neighbor_polys + ], + neighbor_heights=[ + float(h) if h is not None else 0 for h in spec.neighbor_heights + ], + orientation=spec.long_edge_angle, + ), + ), + ) + + log("Building and running model...") + overheating_config = ( + spec.parent_experiment_spec.overheating_config + if spec.parent_experiment_spec + else None + ) + run_result = model.run( + eplus_parent_dir=tempdir, + overheating_config=overheating_config, + ) + # Validate conditioned area + if not np.allclose( + model.total_conditioned_area, spec.energy_model_conditioned_area + ): + msg = ( + f"Total conditioned area mismatch: " + f"{model.total_conditioned_area} != {spec.energy_model_conditioned_area}" + ) + raise ValueError(msg) + + # Results Post-processing + # TODO: consider if we actually want all t he columns we are including. + feature_index = spec.make_multiindex( + n_rows=1, additional_index_data=spec.feature_dict + ) + results = run_result.energy_and_peak.to_frame().T.set_index(feature_index) + + dfs: dict[str, pd.DataFrame] = { + "EnergyAndPeak": results, + } + if run_result.overheating_results is not None: + # TODO: add feature dict to overheating df indices? Or instead of a full feature df, just add a single column with the building id? + edh = run_result.overheating_results.edh + old_ix = edh.index + feature_index = spec.make_multiindex( + n_rows=len(edh), include_sort_subindex=False + ) + edh.index = feature_index + edh = edh.set_index(old_ix, append=True) + dfs["ExceedanceDegreeHours"] = edh + + basic_oh = run_result.overheating_results.basic_oh + old_ix = basic_oh.index + feature_index = spec.make_multiindex( + n_rows=len(basic_oh), include_sort_subindex=False + ) + basic_oh.index = feature_index + basic_oh = basic_oh.set_index(old_ix, append=True) + dfs["BasicOverheating"] = basic_oh + + heat_index_categories = run_result.overheating_results.hi + old_ix = heat_index_categories.index + feature_index = spec.make_multiindex( + n_rows=len(heat_index_categories), include_sort_subindex=False + ) + heat_index_categories.index = feature_index + heat_index_categories = heat_index_categories.set_index(old_ix, append=True) + dfs["HeatIndexCategories"] = heat_index_categories + + consecutive_e_zone = run_result.overheating_results.consecutive_e_zone + # may be zero if no streaks found in any zones + if len(consecutive_e_zone) > 0: + old_ix = consecutive_e_zone.index + feature_index = spec.make_multiindex( + n_rows=len(consecutive_e_zone), include_sort_subindex=False + ) + consecutive_e_zone.index = feature_index + consecutive_e_zone = consecutive_e_zone.set_index(old_ix, append=True) + dfs["ConsecutiveExceedances"] = consecutive_e_zone + + hourly_data_outpath: FileReference | None = None + + if spec.parent_experiment_spec and spec.parent_experiment_spec.hourly_data_config: + hourly_df = run_result.sql.timeseries_by_name( + spec.parent_experiment_spec.hourly_data_config.data, + reporting_frequency="Hourly", + ) + hourly_df.index.names = ["Timestep"] + hourly_df.columns.names = ["Trash", "Group", "Meter"] + hourly_df: pd.DataFrame = cast( + pd.DataFrame, + hourly_df.droplevel("Trash", axis=1) + .stack(level="Group", future_stack=True) + .unstack(level="Timestep"), + ) + hourly_multiindex = spec.make_multiindex( + n_rows=len(hourly_df), include_sort_subindex=False + ) + old_ix = hourly_df.index + hourly_df.index = hourly_multiindex + hourly_df = hourly_df.set_index(old_ix, append=True) + + if spec.parent_experiment_spec.hourly_data_config.does_dataframe_output: + for meter_name in hourly_df.columns.get_level_values("Meter").unique(): + variable_df = hourly_df.xs(meter_name, level="Meter", axis=1) + dataframe_key = f"HourlyData.{meter_name.replace(' ', '')}" + dfs[dataframe_key] = variable_df + if spec.parent_experiment_spec.hourly_data_config.does_file_output: + hourly_data_outpath = tempdir / "outputs_hourly_data.pq" + hourly_df.to_parquet(hourly_data_outpath) + + return GloBIOutputSpec( + dataframes=dfs, + hourly_data=hourly_data_outpath, + ) + + +@ExperimentRegistry.Register(retries=2, schedule_timeout="10h", execution_timeout="30m") +def simulate_globi_building( + input_spec: GloBIBuildingSpec, tempdir: Path +) -> GloBIOutputSpec: + """Simulate a GlobiSpec building and return monthly energy and peak results. + + NB: this is separated from the pipeline above so the pipeline can still be used as a + local invocation without *too* much difficulty. + """ + return simulate_globi_building_pipeline(input_spec, tempdir) + + +if __name__ == "__main__": + import tempfile + + from globi.models.tasks import MinimalBuildingSpec + + with tempfile.TemporaryDirectory() as tempdir: + with open("inputs/building.yml") as f: + input_spec = MinimalBuildingSpec.model_validate(yaml.safe_load(f)) + o = simulate_globi_building_pipeline( + input_spec=input_spec.globi_spec, + tempdir=Path(tempdir), + ) diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py new file mode 100644 index 0000000..f5c10f6 --- /dev/null +++ b/src/globi/pipelines/training.py @@ -0,0 +1,323 @@ +"""The training pipeline.""" + +from datetime import datetime, timedelta +from pathlib import Path +from typing import Literal + +from hatchet_sdk import Context +from pydantic import BaseModel, HttpUrl +from scythe.base import ExperimentOutputSpec +from scythe.experiments import ( + BaseExperiment, + ExperimentRun, + SemVer, + VersionedExperiment, +) +from scythe.hatchet import hatchet +from scythe.registry import ExperimentRegistry +from scythe.scatter_gather import RecursionMap, ScatterGatherResult, scatter_gather + +from globi.models.surrogate.dummy import DummySimulationInput, dummy_simulation +from globi.models.surrogate.training import SampleSpec, TrainFoldSpec, TrainWithCVSpec + + +class FoldResult(ExperimentOutputSpec): + """The output for a fold.""" + + pass + + +@ExperimentRegistry.Register( + description="Train a regressor with cross-fold validation.", +) +def train_regressor_with_cv_fold( + input_spec: TrainFoldSpec, tempdir: Path +) -> FoldResult: + """Train a regressor with cross-fold validation.""" + # DO TRAINING + + return FoldResult() + + +class ExperimentMetadata(BaseModel): + """Metadata about an experiment.""" + + workflow_run_id: str + run_id: str + run_name: str + version: SemVer + datetime: datetime + + +class CombineResultsResult(BaseModel): + """The result of combining the results of the simulations.""" + + scatter_gather_result: ScatterGatherResult + combined_scatter_gather_result: ScatterGatherResult + + +iterative_training = hatchet.workflow( + name="iterative_training", + description="Sample a collection of buliding simulations to then simulate and train a surrogate model.", + input_validator=SampleSpec, +) + + +@iterative_training.task( + name="iterative_training.create_simulations", + schedule_timeout=timedelta(minutes=30), + execution_timeout=timedelta(minutes=10), +) +def create_simulations(spec: SampleSpec, context: Context) -> ExperimentMetadata: + """Create the simulations.""" + # STEP 1: Generate the training samples, allocate simulations + specs = [ + DummySimulationInput( + a=i, + b=i, + experiment_id="placeholder", + sort_index=i, + ) + for i in range(10) + ] + + # STEP 2: Simulate the simulations using scythe + root_run_name = spec.progressive_training_spec.experiment_id + run_name = f"{root_run_name}/sample" + + exp = BaseExperiment( + # TODO: replace with simulate_globi_flat_building + experiment=dummy_simulation, # TODO: add configurability to switch between simulations. + run_name=run_name, + storage_settings=spec.progressive_training_spec.storage_settings, + ) + + run, ref = exp.allocate( + specs, + version="bumpmajor", # TODO: bump minor if not the first iteration. + recursion_map=spec.progressive_training_spec.iteration.recursion, + ) + + run_name = run.versioned_experiment.base_experiment.run_name + if not run_name: + msg = "Run name is required." + raise ValueError(msg) + run_id = run.experiment_id + + return ExperimentMetadata( + workflow_run_id=ref.workflow_run_id, + run_id=run_id, + run_name=run_name, + version=run.versioned_experiment.version, + datetime=run.timestamp, + ) + + +@iterative_training.task( + name="iterative_training.await_simulations", + schedule_timeout=timedelta(hours=5), + execution_timeout=timedelta(hours=5), + parents=[create_simulations], +) +async def await_simulations(spec: SampleSpec, context: Context) -> ScatterGatherResult: + """Await the simulations.""" + parent_output = context.task_output(create_simulations) + workflow_run_id = parent_output.workflow_run_id + context.log("Awaiting simulations...") + results = await scatter_gather.aio_get_result(workflow_run_id) + context.log("Simulations completed.") + + return results + + +@iterative_training.task( + name="iterative_training.combine_results", + schedule_timeout=timedelta(hours=5), + execution_timeout=timedelta(hours=1), + parents=[await_simulations, create_simulations], +) +async def combine_results(spec: SampleSpec, context: Context) -> CombineResultsResult: + """Combine the results of the simulations.""" + results = context.task_output(await_simulations) + run_info = context.task_output(create_simulations) + # TODO: kind of annoying have to reconstruct the run object here; necessary because the base experiment is not serializable. + _run = ExperimentRun( + versioned_experiment=VersionedExperiment( + base_experiment=BaseExperiment( + experiment=dummy_simulation, # TODO: replace with simulate_globi_flat_building + run_name=run_info.run_name, + storage_settings=spec.progressive_training_spec.storage_settings, + ), + version=run_info.version, + ), + timestamp=run_info.datetime, + ) + # files = run.list_results_files() + # TODO: configure which files to store/combine via input spec. + return CombineResultsResult( + scatter_gather_result=results, + combined_scatter_gather_result=results, + ) + + +class StartTrainingResult(BaseModel): + """The result of starting the training.""" + + training_spec: TrainWithCVSpec + experiment_metadata: ExperimentMetadata + + +@iterative_training.task( + name="iterative_training.start_training", + schedule_timeout=timedelta(hours=5), + execution_timeout=timedelta(hours=1), + parents=[combine_results], +) +async def start_training(spec: SampleSpec, context: Context) -> StartTrainingResult: + """Start the training.""" + results = context.task_output(combine_results) + + train_spec = TrainWithCVSpec( + progressive_training_spec=spec.progressive_training_spec, + progressive_training_iteration_ix=spec.progressive_training_iteration_ix, + data_uri=results.combined_scatter_gather_result.uris[ + "main_result" + ], # TODO: should be configure which result to use + stage_type="train", + ) + + # TODO: create the training specs and then allocate the experiment + + specs = train_spec.schedule + + root_run_name = spec.progressive_training_spec.experiment_id + run_name = f"{root_run_name}/train" + exp = BaseExperiment( + experiment=train_regressor_with_cv_fold, + run_name=run_name, + storage_settings=spec.progressive_training_spec.storage_settings, + ) + run, ref = exp.allocate( + specs, + version="bumpmajor", # TODO: bump minor if not the first iteration. + recursion_map=RecursionMap( + factor=2, + max_depth=0, + ), + ) + + if not run.versioned_experiment.base_experiment.run_name: + msg = "Run name is required." + raise ValueError(msg) + + return StartTrainingResult( + training_spec=train_spec, + experiment_metadata=ExperimentMetadata( + workflow_run_id=ref.workflow_run_id, + run_id=run.experiment_id, + run_name=run.versioned_experiment.base_experiment.run_name, + version=run.versioned_experiment.version, + datetime=run.timestamp, + ), + ) + + +@iterative_training.task( + name="iterative_training.await_training", + schedule_timeout=timedelta(hours=5), + execution_timeout=timedelta(hours=5), + parents=[start_training], +) +async def await_training(spec: SampleSpec, context: Context) -> ScatterGatherResult: + """Await the training.""" + parent_output = context.task_output(start_training) + workflow_run_id = parent_output.experiment_metadata.workflow_run_id + context.log("Awaiting training...") + results = await scatter_gather.aio_get_result(workflow_run_id) + context.log("Training completed.") + + return results + + +class TrainingEvaluationResult(BaseModel): + """The result of evaluating the training.""" + + converged: bool + + +class RecursionTransition(BaseModel): + """The transition of the recursion.""" + + reasoning: Literal["max_depth", "converged"] | None + child_workflow_run_id: str | None + + +@iterative_training.task( + name="iterative_training.evaluate_training", + schedule_timeout=timedelta(hours=5), + execution_timeout=timedelta(minutes=5), + parents=[await_training], +) +async def evaluate_training( + spec: SampleSpec, context: Context +) -> TrainingEvaluationResult: + """Evaluate the training.""" + _results = context.task_output(await_training) + return TrainingEvaluationResult(converged=True) + + +@iterative_training.task( + name="iterative_training.transition_recursion", + schedule_timeout=timedelta(hours=5), + execution_timeout=timedelta(minutes=5), + parents=[evaluate_training, start_training], +) +async def transition_recursion( + spec: SampleSpec, context: Context +) -> RecursionTransition: + """Transition the recursion.""" + results = context.task_output(evaluate_training) + if results.converged: + # create child + return RecursionTransition(reasoning="converged", child_workflow_run_id=None) + if ( + spec.progressive_training_iteration_ix + 1 + >= spec.progressive_training_spec.iteration.max_iters + ): + return RecursionTransition(reasoning="max_depth", child_workflow_run_id=None) + + start_training_output = context.task_output(start_training) + + sample_spec = SampleSpec( + progressive_training_spec=spec.progressive_training_spec, + progressive_training_iteration_ix=spec.progressive_training_iteration_ix + 1, + data_uri=start_training_output.training_spec.data_uri, + stage_type="sample", + ) + + ref = await iterative_training.aio_run_no_wait( + sample_spec, + ) + return RecursionTransition( + reasoning=None, child_workflow_run_id=ref.workflow_run_id + ) + + +if __name__ == "__main__": + from scythe.settings import ScytheStorageSettings + + from globi.models.surrogate.training import ProgressiveTrainingSpec + + progressive_training_spec = ProgressiveTrainingSpec( + experiment_id="test-experiment", + gis_uri=HttpUrl("https://example.com/gis.parquet"), + storage_settings=ScytheStorageSettings(), + ) + spec = SampleSpec( + progressive_training_spec=progressive_training_spec, + progressive_training_iteration_ix=0, + data_uri=None, + stage_type="sample", + ) + result = iterative_training.run(spec) + print(result) diff --git a/src/globi/worker/Dockerfile b/src/globi/worker/Dockerfile index 321b76b..a5c6bc6 100644 --- a/src/globi/worker/Dockerfile +++ b/src/globi/worker/Dockerfile @@ -93,7 +93,8 @@ RUN EP_VERSION_DASH=$(echo "${EP_VERSION}" | tr '.' '-') && \ WORKDIR /code COPY uv.lock pyproject.toml README.md /code/ -RUN uv sync --locked --no-install-project --extra cli +# TODO: only insttall ml for certain containers by passing in a flag to the docker build command +RUN uv sync --locked --no-install-project --extra cli --extra ml RUN uv run epi prisma generate diff --git a/src/globi/worker/main.py b/src/globi/worker/main.py index 4a38a4a..ee7eb38 100644 --- a/src/globi/worker/main.py +++ b/src/globi/worker/main.py @@ -1,16 +1,35 @@ """Worker main script.""" +from scythe.hatchet import hatchet +from scythe.registry import ExperimentRegistry +from scythe.scatter_gather import scatter_gather from scythe.worker import ScytheWorkerConfig from globi.pipelines import * # noqa: F403 +from globi.pipelines import iterative_training conf = ScytheWorkerConfig() def main(): """Main function for the worker.""" - conf.start() + # TODO: this is required since scythe does not allow registering extra tasks/workflows at the moment. + worker = hatchet.worker( + name=conf.computed_name, + slots=conf.computed_slots, + durable_slots=conf.computed_durable_slots, + labels=conf.labels, + ) + workflows = ([scatter_gather] if conf.DOES_FAN else []) + ( + ExperimentRegistry.experiments() if conf.DOES_LEAF else [] + ) + for workflow in workflows: + worker.register_workflow(workflow) + worker.register_workflow(iterative_training) + worker.start() + + # conf.start() if __name__ == "__main__": - conf.start() + main() diff --git a/uv.lock b/uv.lock index 73b2575..2cf86e0 100644 --- a/uv.lock +++ b/uv.lock @@ -1379,6 +1379,10 @@ cli = [ { name = "click" }, { name = "xlsxwriter" }, ] +ml = [ + { name = "lightgbm" }, + { name = "xgboost" }, +] visualization = [ { name = "bokeh" }, { name = "folium" }, @@ -1421,6 +1425,7 @@ requires-dist = [ { name = "folium", marker = "extra == 'visualization'", specifier = ">=0.15.0" }, { name = "geopandas", specifier = ">=0.14.0" }, { name = "ladybug-core", specifier = ">=0.44.29" }, + { name = "lightgbm", marker = "extra == 'ml'", specifier = ">=4.6.0" }, { name = "matplotlib", marker = "extra == 'visualization'", specifier = ">=3.8.0" }, { name = "numpy", specifier = ">=1.26.0" }, { name = "pandas", specifier = ">=2.1.0" }, @@ -1435,9 +1440,10 @@ requires-dist = [ { name = "seaborn", marker = "extra == 'visualization'", specifier = ">=0.13.0" }, { name = "shapely", specifier = ">=2.0.0" }, { name = "streamlit", marker = "extra == 'visualization'", specifier = ">=1.28.0" }, + { name = "xgboost", marker = "extra == 'ml'", specifier = ">=3.2.0" }, { name = "xlsxwriter", marker = "extra == 'cli'", specifier = ">=3.2.9" }, ] -provides-extras = ["visualization", "cli"] +provides-extras = ["visualization", "ml", "cli"] [package.metadata.requires-dev] dev = [ @@ -2156,6 +2162,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d4/52/7b8421a8ace22a17ae77dd9a8367e916364ed8be72502cb744805f06d6ac/ladybug_geometry-1.34.14-py3-none-any.whl", hash = "sha256:af91ee9285333ca1ddfaf439530306dff7f0a891cae40d4dc5491f139fcf7d36", size = 198221, upload-time = "2025-11-07T04:16:46.986Z" }, ] +[[package]] +name = "lightgbm" +version = "4.6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "scipy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/68/0b/a2e9f5c5da7ef047cc60cef37f86185088845e8433e54d2e7ed439cce8a3/lightgbm-4.6.0.tar.gz", hash = "sha256:cb1c59720eb569389c0ba74d14f52351b573af489f230032a1c9f314f8bab7fe", size = 1703705, upload-time = "2025-02-15T04:03:03.111Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f2/75/cffc9962cca296bc5536896b7e65b4a7cdeb8db208e71b9c0133c08f8f7e/lightgbm-4.6.0-py3-none-macosx_10_15_x86_64.whl", hash = "sha256:b7a393de8a334d5c8e490df91270f0763f83f959574d504c7ccb9eee4aef70ed", size = 2010151, upload-time = "2025-02-15T04:02:50.961Z" }, + { url = "https://files.pythonhosted.org/packages/21/1b/550ee378512b78847930f5d74228ca1fdba2a7fbdeaac9aeccc085b0e257/lightgbm-4.6.0-py3-none-macosx_12_0_arm64.whl", hash = "sha256:2dafd98d4e02b844ceb0b61450a660681076b1ea6c7adb8c566dfd66832aafad", size = 1592172, upload-time = "2025-02-15T04:02:53.937Z" }, + { url = "https://files.pythonhosted.org/packages/64/41/4fbde2c3d29e25ee7c41d87df2f2e5eda65b431ee154d4d462c31041846c/lightgbm-4.6.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:4d68712bbd2b57a0b14390cbf9376c1d5ed773fa2e71e099cac588703b590336", size = 3454567, upload-time = "2025-02-15T04:02:56.443Z" }, + { url = "https://files.pythonhosted.org/packages/42/86/dabda8fbcb1b00bcfb0003c3776e8ade1aa7b413dff0a2c08f457dace22f/lightgbm-4.6.0-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:cb19b5afea55b5b61cbb2131095f50538bd608a00655f23ad5d25ae3e3bf1c8d", size = 3569831, upload-time = "2025-02-15T04:02:58.925Z" }, + { url = "https://files.pythonhosted.org/packages/5e/23/f8b28ca248bb629b9e08f877dd2965d1994e1674a03d67cd10c5246da248/lightgbm-4.6.0-py3-none-win_amd64.whl", hash = "sha256:37089ee95664b6550a7189d887dbf098e3eadab03537e411f52c63c121e3ba4b", size = 1451509, upload-time = "2025-02-15T04:03:01.515Z" }, +] + [[package]] name = "littleutils" version = "0.2.4" @@ -2992,6 +3015,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/67/0e/35082d13c09c02c011cf21570543d202ad929d961c02a147493cb0c2bdf5/numpy-2.2.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06", size = 12771374, upload-time = "2025-05-17T21:43:35.479Z" }, ] +[[package]] +name = "nvidia-nccl-cu12" +version = "2.29.7" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/cc/f48875411d1f176bce58e6343fd5d4131fc1db5420719ff25944bdc006c6/nvidia_nccl_cu12-2.29.7-py3-none-manylinux_2_18_aarch64.whl", hash = "sha256:0cf032ee22b560447daf0456108a75e32bd74a4de6c6b64725637a359fa48cd8", size = 293563644, upload-time = "2026-03-03T05:34:46.166Z" }, + { url = "https://files.pythonhosted.org/packages/31/1e/9e366f36efc550f07d6737f199e3f6bffafdf28795d007f10a77dd274f5c/nvidia_nccl_cu12-2.29.7-py3-none-manylinux_2_18_x86_64.whl", hash = "sha256:ecd0a012051abc20c1aa87328841efa8cade3ced65803046e38c2f03c0891fea", size = 293633942, upload-time = "2026-03-03T05:37:05.625Z" }, +] + [[package]] name = "openpyxl" version = "3.1.5" @@ -4786,6 +4818,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3f/0e/fa3b193432cfc60c93b42f3be03365f5f909d2b3ea410295cf36df739e31/widgetsnbextension-4.0.15-py3-none-any.whl", hash = "sha256:8156704e4346a571d9ce73b84bee86a29906c9abfd7223b7228a28899ccf3366", size = 2196503, upload-time = "2025-11-01T21:15:53.565Z" }, ] +[[package]] +name = "xgboost" +version = "3.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "nvidia-nccl-cu12", marker = "sys_platform == 'linux'" }, + { name = "scipy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/bb/1eb0242409d22db725d7a88088e6cfd6556829fb0736f9ff69aa9f1e9455/xgboost-3.2.0.tar.gz", hash = "sha256:99b0e9a2a64896cdaf509c5e46372d336c692406646d20f2af505003c0c5d70d", size = 1263936, upload-time = "2026-02-10T11:03:05.542Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2d/49/6e4cdd877c24adf56cb3586bc96d93d4dcd780b5ea1efb32e1ee0de08bae/xgboost-3.2.0-py3-none-macosx_10_15_x86_64.whl", hash = "sha256:2f661966d3e322536d9c448090a870fcba1e32ee5760c10b7c46bac7a342079a", size = 2507014, upload-time = "2026-02-10T10:50:57.44Z" }, + { url = "https://files.pythonhosted.org/packages/93/f1/c09ef1add609453aa3ba5bafcd0d1c1a805c1263c0b60138ec968f8ec296/xgboost-3.2.0-py3-none-macosx_12_0_arm64.whl", hash = "sha256:eabbd40d474b8dbf6cb3536325f9150b9e6f0db32d18de9914fb3227d0bef5b7", size = 2328527, upload-time = "2026-02-10T10:51:17.502Z" }, + { url = "https://files.pythonhosted.org/packages/96/9f/d9914a7b8df842832850b1a18e5f47aaa071c217cdd1da2ae9deb291018b/xgboost-3.2.0-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:852eabc6d3b3702a59bf78dbfdcd1cb9c4d3a3b6e5ed1f8781d8b9512354fdd2", size = 131100954, upload-time = "2026-02-10T11:02:42.704Z" }, + { url = "https://files.pythonhosted.org/packages/79/98/679de17c2caa4fd3b0b4386ecf7377301702cb0afb22930a07c142fcb1d8/xgboost-3.2.0-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:99b4a6bbcb47212fec5cf5fbe12347215f073c08967431b0122cfbd1ee70312c", size = 131748579, upload-time = "2026-02-10T10:54:40.424Z" }, + { url = "https://files.pythonhosted.org/packages/1f/3d/1661dd114a914a67e3f7ab66fa1382e7599c2a8c340f314ad30a3e2b4d08/xgboost-3.2.0-py3-none-win_amd64.whl", hash = "sha256:0d169736fd836fc13646c7ab787167b3a8110351c2c6bc770c755ee1618f0442", size = 101681668, upload-time = "2026-02-10T10:59:31.202Z" }, +] + [[package]] name = "xlsxwriter" version = "3.2.9" From 8a7d36ec4962dfd1e4f86775902b57517f54b032 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Sat, 7 Mar 2026 13:25:08 -0500 Subject: [PATCH 02/31] use scythe to manage outer loop experiment --- pyproject.toml | 2 +- src/globi/models/surrogate/training.py | 99 +++++++-------- src/globi/pipelines/training.py | 168 +++++++++++++------------ uv.lock | 8 +- 4 files changed, 137 insertions(+), 140 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6f33108..8315ab7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -85,7 +85,7 @@ globi = "globi.tools.cli.main:cli" [tool.uv.sources] # scythe-engine = {git = "https://github.com/szvsw/scythe", branch = "feature/allow-optional-filerefs"} -# scythe-engine = {git = "https://github.com/szvsw/scythe", branch = "feature/update-hatchet"} +scythe-engine = {git = "https://github.com/szvsw/scythe", branch = "feature/allow-versioning-workflows"} # scythe-engine = {path = "../scythe", editable = true} # epinterface = {path = "../epinterface", editable = true} # epinterface = {path = "epinterface", editable = true} diff --git a/src/globi/models/surrogate/training.py b/src/globi/models/surrogate/training.py index e695715..760bacf 100644 --- a/src/globi/models/surrogate/training.py +++ b/src/globi/models/surrogate/training.py @@ -6,10 +6,9 @@ import numpy as np import pandas as pd -from pydantic import BaseModel, Field, model_validator -from scythe.base import BaseSpec, ExperimentInputSpec +from pydantic import BaseModel, Field +from scythe.base import ExperimentInputSpec from scythe.scatter_gather import RecursionMap -from scythe.settings import ScytheStorageSettings from scythe.utils.filesys import FileReference, OptionalFileReference if TYPE_CHECKING: @@ -155,12 +154,25 @@ class IterationSpec(BaseModel): default_factory=lambda: RecursionMap(factor=100, max_depth=1), description="The recursion spec.", ) + current_iter: int = Field( + default=0, + description="The index of the current training iteration within the outer loop.", + ) + + @property + def at_max_iters(self) -> bool: + """Whether the current iteration is the maximum number of iterations.""" + return self.current_iter + 1 >= self.max_iters # TODO: should this be a subclass of ExperimentInputSpec? -class ProgressiveTrainingSpec(BaseSpec): +class ProgressiveTrainingSpec(ExperimentInputSpec): """A spec for iteratively training an SBEM regression model.""" + base_run_name: str = Field( + ..., + description="The base run name for the experiment.", + ) convergence_criteria: ConvergenceThresholds = Field( default_factory=ConvergenceThresholds, description="The convergence criteria.", @@ -185,9 +197,9 @@ class ProgressiveTrainingSpec(BaseSpec): ..., description="The uri of the gis data to train on.", ) - storage_settings: ScytheStorageSettings = Field( - default=..., - description="The storage settings to use.", + data_uri: OptionalFileReference = Field( + ..., + description="The uris of the previous simulation results to sample from.", ) @property @@ -223,27 +235,15 @@ def gis_data(self) -> pd.DataFrame: class StageSpec(BaseModel): """A spec that is common to both the sample and train stages (and possibly others).""" - progressive_training_spec: ProgressiveTrainingSpec = Field( + parent: ProgressiveTrainingSpec = Field( ..., - description="The progressive training spec.", - ) - progressive_training_iteration_ix: int = Field( - ..., - description="The index of the current training iteration within the outer loop.", - ) - data_uri: OptionalFileReference = Field( - ..., - description="The uris of the previous simulation results to sample from.", - ) - stage_type: Literal["sample", "train"] = Field( - ..., - description="The type of stage.", + description="The parent spec.", ) @cached_property def random_generator(self) -> np.random.Generator: """The random generator.""" - return np.random.default_rng(self.progressive_training_iteration_ix) + return np.random.default_rng(self.parent.iteration.current_iter) # @cached_property # def experiment_key(self) -> str: @@ -277,16 +277,16 @@ def random_generator(self) -> np.random.Generator: class SampleSpec(StageSpec): - """A spec for thhe sampling stage of the progressive training.""" + """A spec for the sampling stage of the progressive training.""" # TODO: add the ability to receive the last set of error metrics and use them to inform the sampling def stratified_selection(self) -> pd.DataFrame: """Sample the gis data.""" - df = self.progressive_training_spec.gis_data + df = self.parent.gis_data - stratification_field = self.progressive_training_spec.stratification.field - stratification_aliases = self.progressive_training_spec.stratification.aliases + stratification_field = self.parent.stratification.field + stratification_aliases = self.parent.stratification.aliases if stratification_field not in df.columns and not any( alias in df.columns for alias in stratification_aliases @@ -301,16 +301,16 @@ def stratified_selection(self) -> pd.DataFrame: strata = cast(list[str], df[stratification_field].unique().tolist()) - if self.progressive_training_spec.stratification.sampling == "equal": + if self.parent.stratification.sampling == "equal": return self.sample_equally_by_stratum(df, strata, stratification_field) - elif self.progressive_training_spec.stratification.sampling == "error-weighted": + elif self.parent.stratification.sampling == "error-weighted": msg = "Error-weighted sampling is not yet implemented." raise NotImplementedError(msg) - elif self.progressive_training_spec.stratification.sampling == "proportional": + elif self.parent.stratification.sampling == "proportional": msg = "Proportional sampling is not yet implemented." raise NotImplementedError(msg) else: - msg = f"Invalid sampling method: {self.progressive_training_spec.stratification.sampling}" + msg = f"Invalid sampling method: {self.parent.stratification.sampling}" raise ValueError(msg) def sample_equally_by_stratum( @@ -332,15 +332,15 @@ def sample_equally_by_stratum( stratum: df[df[stratification_field] == stratum] for stratum in strata } n_per_iter = ( - self.progressive_training_spec.iteration.n_per_iter - if self.progressive_training_iteration_ix != 0 - else self.progressive_training_spec.iteration.n_init + self.parent.iteration.n_per_iter + if self.parent.iteration.current_iter != 0 + else self.parent.iteration.n_init ) n_per_stratum = max( n_per_iter // len(strata), ( - self.progressive_training_spec.iteration.min_per_stratum - if self.progressive_training_iteration_ix == 0 + self.parent.iteration.min_per_stratum + if self.parent.iteration.current_iter == 0 else 0 ), ) @@ -496,14 +496,6 @@ def to_sim_specs(self, df: pd.DataFrame): # s3_client.upload_file(fpath.as_posix(), bucket, key) # return specs_uri - @model_validator(mode="after") - def check_stage(self): - """The sampling spec must have stage set to 'sample'.""" - if self.stage_type != "sample": - msg = f"Invalid stage: {self.stage_type}" - raise ValueError(msg) - return self - class TrainFoldSpec(ExperimentInputSpec): """Train an sbem model for a specific fold. @@ -1005,13 +997,10 @@ def non_numeric_options(self) -> dict[str, list[str]]: class TrainWithCVSpec(StageSpec): """Train an SBEM model using a scatter gather approach for cross-fold validation.""" - @model_validator(mode="after") - def check_stage(self): - """The training spec must have stage set to 'train'.""" - if self.stage_type != "train": - msg = f"Invalid stage: {self.stage_type}" - raise ValueError(msg) - return self + data_uri: FileReference = Field( + ..., + description="The uri of the data to train on.", + ) @property def schedule(self) -> list[TrainFoldSpec]: @@ -1022,17 +1011,17 @@ def schedule(self) -> list[TrainFoldSpec]: msg = "Data URI is required for training." raise ValueError(msg) - for i in range(self.progressive_training_spec.cross_val.n_folds): + for i in range(self.parent.cross_val.n_folds): schedule.append( TrainFoldSpec( # TODO: this should be set in a better manner experiment_id="placeholder", sort_index=i, - n_folds=self.progressive_training_spec.cross_val.n_folds, + n_folds=self.parent.cross_val.n_folds, data_uri=data_uri, - stratification_field=self.progressive_training_spec.stratification.field, - progressive_training_iter_ix=self.progressive_training_iteration_ix, - storage_settings=self.progressive_training_spec.storage_settings, + stratification_field=self.parent.stratification.field, + progressive_training_iter_ix=self.parent.iteration.current_iter, + storage_settings=self.parent.storage_settings, ) ) return schedule diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index f5c10f6..1fba256 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -1,6 +1,6 @@ """The training pipeline.""" -from datetime import datetime, timedelta +from datetime import timedelta from pathlib import Path from typing import Literal @@ -10,15 +10,18 @@ from scythe.experiments import ( BaseExperiment, ExperimentRun, - SemVer, - VersionedExperiment, ) from scythe.hatchet import hatchet from scythe.registry import ExperimentRegistry from scythe.scatter_gather import RecursionMap, ScatterGatherResult, scatter_gather from globi.models.surrogate.dummy import DummySimulationInput, dummy_simulation -from globi.models.surrogate.training import SampleSpec, TrainFoldSpec, TrainWithCVSpec +from globi.models.surrogate.training import ( + IterationSpec, + ProgressiveTrainingSpec, + TrainFoldSpec, + TrainWithCVSpec, +) class FoldResult(ExperimentOutputSpec): @@ -39,16 +42,6 @@ def train_regressor_with_cv_fold( return FoldResult() -class ExperimentMetadata(BaseModel): - """Metadata about an experiment.""" - - workflow_run_id: str - run_id: str - run_name: str - version: SemVer - datetime: datetime - - class CombineResultsResult(BaseModel): """The result of combining the results of the simulations.""" @@ -59,16 +52,25 @@ class CombineResultsResult(BaseModel): iterative_training = hatchet.workflow( name="iterative_training", description="Sample a collection of buliding simulations to then simulate and train a surrogate model.", - input_validator=SampleSpec, + input_validator=ProgressiveTrainingSpec, ) +class ExperimentRunWithRef(BaseModel): + """An experiment run with a workflow run id.""" + + run: ExperimentRun + workflow_run_id: str + + @iterative_training.task( name="iterative_training.create_simulations", schedule_timeout=timedelta(minutes=30), execution_timeout=timedelta(minutes=10), ) -def create_simulations(spec: SampleSpec, context: Context) -> ExperimentMetadata: +def create_simulations( + spec: ProgressiveTrainingSpec, context: Context +) -> ExperimentRunWithRef: """Create the simulations.""" # STEP 1: Generate the training samples, allocate simulations specs = [ @@ -82,34 +84,29 @@ def create_simulations(spec: SampleSpec, context: Context) -> ExperimentMetadata ] # STEP 2: Simulate the simulations using scythe - root_run_name = spec.progressive_training_spec.experiment_id - run_name = f"{root_run_name}/sample" + run_name = f"{spec.experiment_id}/sample" exp = BaseExperiment( # TODO: replace with simulate_globi_flat_building experiment=dummy_simulation, # TODO: add configurability to switch between simulations. run_name=run_name, - storage_settings=spec.progressive_training_spec.storage_settings, + storage_settings=spec.storage_settings or ScytheStorageSettings(), ) run, ref = exp.allocate( specs, - version="bumpmajor", # TODO: bump minor if not the first iteration. - recursion_map=spec.progressive_training_spec.iteration.recursion, + version="bumpmajor", # TODO: bump minor if not the first iteration; actually, not necessary since root experiment takes care of this + recursion_map=spec.iteration.recursion, ) run_name = run.versioned_experiment.base_experiment.run_name if not run_name: msg = "Run name is required." raise ValueError(msg) - run_id = run.experiment_id - return ExperimentMetadata( + return ExperimentRunWithRef( + run=run, workflow_run_id=ref.workflow_run_id, - run_id=run_id, - run_name=run_name, - version=run.versioned_experiment.version, - datetime=run.timestamp, ) @@ -119,7 +116,9 @@ def create_simulations(spec: SampleSpec, context: Context) -> ExperimentMetadata execution_timeout=timedelta(hours=5), parents=[create_simulations], ) -async def await_simulations(spec: SampleSpec, context: Context) -> ScatterGatherResult: +async def await_simulations( + spec: ProgressiveTrainingSpec, context: Context +) -> ScatterGatherResult: """Await the simulations.""" parent_output = context.task_output(create_simulations) workflow_run_id = parent_output.workflow_run_id @@ -136,22 +135,14 @@ async def await_simulations(spec: SampleSpec, context: Context) -> ScatterGather execution_timeout=timedelta(hours=1), parents=[await_simulations, create_simulations], ) -async def combine_results(spec: SampleSpec, context: Context) -> CombineResultsResult: +async def combine_results( + spec: ProgressiveTrainingSpec, context: Context +) -> CombineResultsResult: """Combine the results of the simulations.""" results = context.task_output(await_simulations) run_info = context.task_output(create_simulations) # TODO: kind of annoying have to reconstruct the run object here; necessary because the base experiment is not serializable. - _run = ExperimentRun( - versioned_experiment=VersionedExperiment( - base_experiment=BaseExperiment( - experiment=dummy_simulation, # TODO: replace with simulate_globi_flat_building - run_name=run_info.run_name, - storage_settings=spec.progressive_training_spec.storage_settings, - ), - version=run_info.version, - ), - timestamp=run_info.datetime, - ) + _run = run_info.run # files = run.list_results_files() # TODO: configure which files to store/combine via input spec. return CombineResultsResult( @@ -164,7 +155,7 @@ class StartTrainingResult(BaseModel): """The result of starting the training.""" training_spec: TrainWithCVSpec - experiment_metadata: ExperimentMetadata + experiment_run_with_ref: ExperimentRunWithRef @iterative_training.task( @@ -173,29 +164,28 @@ class StartTrainingResult(BaseModel): execution_timeout=timedelta(hours=1), parents=[combine_results], ) -async def start_training(spec: SampleSpec, context: Context) -> StartTrainingResult: +async def start_training( + spec: ProgressiveTrainingSpec, context: Context +) -> StartTrainingResult: """Start the training.""" results = context.task_output(combine_results) train_spec = TrainWithCVSpec( - progressive_training_spec=spec.progressive_training_spec, - progressive_training_iteration_ix=spec.progressive_training_iteration_ix, + parent=spec, data_uri=results.combined_scatter_gather_result.uris[ "main_result" ], # TODO: should be configure which result to use - stage_type="train", ) # TODO: create the training specs and then allocate the experiment specs = train_spec.schedule - root_run_name = spec.progressive_training_spec.experiment_id - run_name = f"{root_run_name}/train" + run_name = f"{spec.experiment_id}/train" exp = BaseExperiment( experiment=train_regressor_with_cv_fold, run_name=run_name, - storage_settings=spec.progressive_training_spec.storage_settings, + storage_settings=spec.storage_settings or ScytheStorageSettings(), ) run, ref = exp.allocate( specs, @@ -212,12 +202,9 @@ async def start_training(spec: SampleSpec, context: Context) -> StartTrainingRes return StartTrainingResult( training_spec=train_spec, - experiment_metadata=ExperimentMetadata( + experiment_run_with_ref=ExperimentRunWithRef( + run=run, workflow_run_id=ref.workflow_run_id, - run_id=run.experiment_id, - run_name=run.versioned_experiment.base_experiment.run_name, - version=run.versioned_experiment.version, - datetime=run.timestamp, ), ) @@ -228,10 +215,12 @@ async def start_training(spec: SampleSpec, context: Context) -> StartTrainingRes execution_timeout=timedelta(hours=5), parents=[start_training], ) -async def await_training(spec: SampleSpec, context: Context) -> ScatterGatherResult: +async def await_training( + spec: ProgressiveTrainingSpec, context: Context +) -> ScatterGatherResult: """Await the training.""" parent_output = context.task_output(start_training) - workflow_run_id = parent_output.experiment_metadata.workflow_run_id + workflow_run_id = parent_output.experiment_run_with_ref.workflow_run_id context.log("Awaiting training...") results = await scatter_gather.aio_get_result(workflow_run_id) context.log("Training completed.") @@ -259,11 +248,11 @@ class RecursionTransition(BaseModel): parents=[await_training], ) async def evaluate_training( - spec: SampleSpec, context: Context + spec: ProgressiveTrainingSpec, context: Context ) -> TrainingEvaluationResult: """Evaluate the training.""" _results = context.task_output(await_training) - return TrainingEvaluationResult(converged=True) + return TrainingEvaluationResult(converged=False) @iterative_training.task( @@ -273,30 +262,35 @@ async def evaluate_training( parents=[evaluate_training, start_training], ) async def transition_recursion( - spec: SampleSpec, context: Context + spec: ProgressiveTrainingSpec, context: Context ) -> RecursionTransition: """Transition the recursion.""" results = context.task_output(evaluate_training) if results.converged: # create child return RecursionTransition(reasoning="converged", child_workflow_run_id=None) - if ( - spec.progressive_training_iteration_ix + 1 - >= spec.progressive_training_spec.iteration.max_iters - ): + if spec.iteration.at_max_iters: return RecursionTransition(reasoning="max_depth", child_workflow_run_id=None) start_training_output = context.task_output(start_training) - sample_spec = SampleSpec( - progressive_training_spec=spec.progressive_training_spec, - progressive_training_iteration_ix=spec.progressive_training_iteration_ix + 1, - data_uri=start_training_output.training_spec.data_uri, - stage_type="sample", + next_spec = spec.model_copy(deep=True) + next_spec.iteration.current_iter += 1 + next_spec.data_uri = ( + start_training_output.training_spec.data_uri + ) # or could be from combined + exp = BaseExperiment( + experiment=iterative_training, + run_name=f"{next_spec.base_run_name}", + storage_settings=spec.storage_settings or ScytheStorageSettings(), ) - - ref = await iterative_training.aio_run_no_wait( - sample_spec, + _run, ref = exp.allocate( + next_spec, + version="bumpminor", + recursion_map=RecursionMap( + factor=2, + max_depth=0, + ), ) return RecursionTransition( reasoning=None, child_workflow_run_id=ref.workflow_run_id @@ -308,16 +302,34 @@ async def transition_recursion( from globi.models.surrogate.training import ProgressiveTrainingSpec + base_run_name = "test-experiment" progressive_training_spec = ProgressiveTrainingSpec( - experiment_id="test-experiment", + sort_index=0, + experiment_id="placeholder", gis_uri=HttpUrl("https://example.com/gis.parquet"), + iteration=IterationSpec( + max_iters=4, + ), storage_settings=ScytheStorageSettings(), - ) - spec = SampleSpec( - progressive_training_spec=progressive_training_spec, - progressive_training_iteration_ix=0, data_uri=None, - stage_type="sample", + base_run_name=base_run_name, + ) + + exp = BaseExperiment( + experiment=iterative_training, + run_name="test-experiment", ) - result = iterative_training.run(spec) - print(result) + + run, ref = exp.allocate( + progressive_training_spec, + version="bumpmajor", + recursion_map=RecursionMap( + factor=2, + max_depth=0, + ), + ) + import yaml + + print(yaml.dump(run.model_dump(mode="json"), indent=2, sort_keys=False)) + # result = iterative_training.run(spec) + # print(result) diff --git a/uv.lock b/uv.lock index 2cf86e0..e9ec544 100644 --- a/uv.lock +++ b/uv.lock @@ -1436,7 +1436,7 @@ requires-dist = [ { name = "rasterio", marker = "extra == 'visualization'", specifier = ">=1.3.9" }, { name = "scikit-learn", specifier = ">=1.3.0" }, { name = "scipy", specifier = ">=1.11.0,<1.15" }, - { name = "scythe-engine", specifier = ">=0.1.2" }, + { name = "scythe-engine", git = "https://github.com/szvsw/scythe?branch=feature%2Fallow-versioning-workflows" }, { name = "seaborn", marker = "extra == 'visualization'", specifier = ">=0.13.0" }, { name = "shapely", specifier = ">=2.0.0" }, { name = "streamlit", marker = "extra == 'visualization'", specifier = ">=1.28.0" }, @@ -4260,7 +4260,7 @@ wheels = [ [[package]] name = "scythe-engine" version = "0.1.2" -source = { registry = "https://pypi.org/simple" } +source = { git = "https://github.com/szvsw/scythe?branch=feature%2Fallow-versioning-workflows#a750f3f18525dde2ebb73c9ad0629cba5ede77a1" } dependencies = [ { name = "boto3" }, { name = "fastparquet" }, @@ -4273,10 +4273,6 @@ dependencies = [ { name = "tables" }, { name = "tqdm" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ac/00/061a3e1301b03b3b1c6817ea5db19cc62c0448c02c504e391d49273451c2/scythe_engine-0.1.2.tar.gz", hash = "sha256:a53c49a8a8700f1dfd7a61f4868898289c1d3751b42ca767369faf7a3c08dc5e", size = 225628, upload-time = "2026-02-12T15:53:02.416Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d3/69/1cfac0fe0aa049d335f2ff6a3aeef32cc7893551ffe831e4d78ccde50b7b/scythe_engine-0.1.2-py3-none-any.whl", hash = "sha256:b2dd6924c0b26a1dfe9a68e9f6b028b77a944263849c82d41a28e635baf899d8", size = 33195, upload-time = "2026-02-12T15:53:00.827Z" }, -] [[package]] name = "seaborn" From 58058fafa85eca20408806541d580aa45f0d3bf4 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Sat, 7 Mar 2026 14:39:32 -0500 Subject: [PATCH 03/31] enable results cache growing --- src/globi/models/surrogate/training.py | 30 ++++-- src/globi/pipelines/training.py | 136 ++++++++++++++----------- 2 files changed, 100 insertions(+), 66 deletions(-) diff --git a/src/globi/models/surrogate/training.py b/src/globi/models/surrogate/training.py index 760bacf..963e7fa 100644 --- a/src/globi/models/surrogate/training.py +++ b/src/globi/models/surrogate/training.py @@ -8,8 +8,8 @@ import pandas as pd from pydantic import BaseModel, Field from scythe.base import ExperimentInputSpec -from scythe.scatter_gather import RecursionMap -from scythe.utils.filesys import FileReference, OptionalFileReference +from scythe.scatter_gather import RecursionMap, ScatterGatherResult +from scythe.utils.filesys import FileReference, S3Url if TYPE_CHECKING: from mypy_boto3_s3.client import S3Client as S3ClientType @@ -197,11 +197,24 @@ class ProgressiveTrainingSpec(ExperimentInputSpec): ..., description="The uri of the gis data to train on.", ) - data_uri: OptionalFileReference = Field( - ..., - description="The uris of the previous simulation results to sample from.", + data_uris: ScatterGatherResult | None = Field( + default=None, + description="The uri of the previous simulation results to train on.", ) + def format_combined_output_key(self, key: str) -> str: + """Format the output key for a combined result file.""" + return f"{self.prefix}/combined/{key}.parquet" + + def format_combined_output_uri(self, key: str) -> S3Url: + """Format the output uri for a combined result file.""" + if self.storage_settings is None: + msg = "Storage settings are not set, so we can't construct a combined output uri." + raise ValueError(msg) + return S3Url( + f"s3://{self.storage_settings.BUCKET}/{self.format_combined_output_key(key)}" + ) + @property def gis_path(self) -> Path: """The path to the gis data.""" @@ -997,16 +1010,17 @@ def non_numeric_options(self) -> dict[str, list[str]]: class TrainWithCVSpec(StageSpec): """Train an SBEM model using a scatter gather approach for cross-fold validation.""" - data_uri: FileReference = Field( + data_uris: ScatterGatherResult = Field( ..., - description="The uri of the data to train on.", + description="The uris of the data to train on.", ) @property def schedule(self) -> list[TrainFoldSpec]: """Create the task schedule.""" schedule = [] - data_uri = self.data_uri + # TODO: this should be configured/selected/etc + data_uri = self.data_uris.uris["main_result"] if data_uri is None: msg = "Data URI is required for training." raise ValueError(msg) diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index 1fba256..96e8bfe 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -4,6 +4,7 @@ from pathlib import Path from typing import Literal +import pandas as pd from hatchet_sdk import Context from pydantic import BaseModel, HttpUrl from scythe.base import ExperimentOutputSpec @@ -14,6 +15,7 @@ from scythe.hatchet import hatchet from scythe.registry import ExperimentRegistry from scythe.scatter_gather import RecursionMap, ScatterGatherResult, scatter_gather +from scythe.utils.filesys import S3Url from globi.models.surrogate.dummy import DummySimulationInput, dummy_simulation from globi.models.surrogate.training import ( @@ -30,6 +32,40 @@ class FoldResult(ExperimentOutputSpec): pass +class CombineResultsResult(BaseModel): + """The result of combining the results of the simulations.""" + + incoming: ScatterGatherResult + combined: ScatterGatherResult + + +class ExperimentRunWithRef(BaseModel): + """An experiment run with a workflow run id.""" + + run: ExperimentRun + workflow_run_id: str + + +class StartTrainingResult(BaseModel): + """The result of starting the training.""" + + training_spec: TrainWithCVSpec + experiment_run_with_ref: ExperimentRunWithRef + + +class TrainingEvaluationResult(BaseModel): + """The result of evaluating the training.""" + + converged: bool + + +class RecursionTransition(BaseModel): + """The transition of the recursion.""" + + reasoning: Literal["max_depth", "converged"] | None + child_workflow_run_id: str | None + + @ExperimentRegistry.Register( description="Train a regressor with cross-fold validation.", ) @@ -42,13 +78,6 @@ def train_regressor_with_cv_fold( return FoldResult() -class CombineResultsResult(BaseModel): - """The result of combining the results of the simulations.""" - - scatter_gather_result: ScatterGatherResult - combined_scatter_gather_result: ScatterGatherResult - - iterative_training = hatchet.workflow( name="iterative_training", description="Sample a collection of buliding simulations to then simulate and train a surrogate model.", @@ -56,13 +85,6 @@ class CombineResultsResult(BaseModel): ) -class ExperimentRunWithRef(BaseModel): - """An experiment run with a workflow run id.""" - - run: ExperimentRun - workflow_run_id: str - - @iterative_training.task( name="iterative_training.create_simulations", schedule_timeout=timedelta(minutes=30), @@ -133,38 +155,54 @@ async def await_simulations( name="iterative_training.combine_results", schedule_timeout=timedelta(hours=5), execution_timeout=timedelta(hours=1), - parents=[await_simulations, create_simulations], + parents=[await_simulations], ) -async def combine_results( +def combine_results( spec: ProgressiveTrainingSpec, context: Context ) -> CombineResultsResult: """Combine the results of the simulations.""" + # TODO: major consider how we handle beyond-memory scale scenarios. + # i.e. we probably need to refactor to allow lists of files that only the + # main worker is responsible for combining. results = context.task_output(await_simulations) - run_info = context.task_output(create_simulations) - # TODO: kind of annoying have to reconstruct the run object here; necessary because the base experiment is not serializable. - _run = run_info.run - # files = run.list_results_files() - # TODO: configure which files to store/combine via input spec. + combined_results: dict[str, S3Url] = {} + + if spec.data_uris: + shared_keys = set(spec.data_uris.uris.keys()) & set(results.uris.keys()) + old_keys_only = set(spec.data_uris.uris.keys()) - shared_keys + new_keys_only = set(results.uris.keys()) - shared_keys + # TODO: consider copying these over to the `combined` folder anyways. + for key in old_keys_only: + combined_results[key] = spec.data_uris.uris[key] + for key in new_keys_only: + combined_results[key] = results.uris[key] + # TODO: refactor to use a threadpool executor? + # For memory reasons, it might be a good idea to stay single threaded here. + for key in shared_keys: + old_df = pd.read_parquet(str(spec.data_uris.uris[key])) + new_df = pd.read_parquet(str(results.uris[key])) + combined_df = pd.concat([old_df, new_df], axis=0) + uri = spec.format_combined_output_uri(key) + combined_df.to_parquet(str(uri)) + combined_results[key] = uri + + else: + # TODO: consider copying these over to the `combined` folder anyways. + combined_results = results.uris + return CombineResultsResult( - scatter_gather_result=results, - combined_scatter_gather_result=results, + incoming=results, + combined=ScatterGatherResult(uris=combined_results), ) -class StartTrainingResult(BaseModel): - """The result of starting the training.""" - - training_spec: TrainWithCVSpec - experiment_run_with_ref: ExperimentRunWithRef - - @iterative_training.task( name="iterative_training.start_training", schedule_timeout=timedelta(hours=5), execution_timeout=timedelta(hours=1), parents=[combine_results], ) -async def start_training( +def start_training( spec: ProgressiveTrainingSpec, context: Context ) -> StartTrainingResult: """Start the training.""" @@ -172,13 +210,9 @@ async def start_training( train_spec = TrainWithCVSpec( parent=spec, - data_uri=results.combined_scatter_gather_result.uris[ - "main_result" - ], # TODO: should be configure which result to use + data_uris=results.combined, # TODO: should configure which results to use ) - # TODO: create the training specs and then allocate the experiment - specs = train_spec.schedule run_name = f"{spec.experiment_id}/train" @@ -189,7 +223,7 @@ async def start_training( ) run, ref = exp.allocate( specs, - version="bumpmajor", # TODO: bump minor if not the first iteration. + version="bumpmajor", # There is normally only ever one training round per parent minor version, except during replays etc recursion_map=RecursionMap( factor=2, max_depth=0, @@ -228,26 +262,13 @@ async def await_training( return results -class TrainingEvaluationResult(BaseModel): - """The result of evaluating the training.""" - - converged: bool - - -class RecursionTransition(BaseModel): - """The transition of the recursion.""" - - reasoning: Literal["max_depth", "converged"] | None - child_workflow_run_id: str | None - - @iterative_training.task( name="iterative_training.evaluate_training", schedule_timeout=timedelta(hours=5), execution_timeout=timedelta(minutes=5), parents=[await_training], ) -async def evaluate_training( +def evaluate_training( spec: ProgressiveTrainingSpec, context: Context ) -> TrainingEvaluationResult: """Evaluate the training.""" @@ -259,9 +280,9 @@ async def evaluate_training( name="iterative_training.transition_recursion", schedule_timeout=timedelta(hours=5), execution_timeout=timedelta(minutes=5), - parents=[evaluate_training, start_training], + parents=[evaluate_training, combine_results], ) -async def transition_recursion( +def transition_recursion( spec: ProgressiveTrainingSpec, context: Context ) -> RecursionTransition: """Transition the recursion.""" @@ -272,13 +293,12 @@ async def transition_recursion( if spec.iteration.at_max_iters: return RecursionTransition(reasoning="max_depth", child_workflow_run_id=None) - start_training_output = context.task_output(start_training) + # start_training_output = context.task_output(start_training) + combine_results_output = context.task_output(combine_results) next_spec = spec.model_copy(deep=True) next_spec.iteration.current_iter += 1 - next_spec.data_uri = ( - start_training_output.training_spec.data_uri - ) # or could be from combined + next_spec.data_uris = combine_results_output.combined exp = BaseExperiment( experiment=iterative_training, run_name=f"{next_spec.base_run_name}", @@ -311,7 +331,7 @@ async def transition_recursion( max_iters=4, ), storage_settings=ScytheStorageSettings(), - data_uri=None, + data_uris=None, base_run_name=base_run_name, ) From 3707ecee60ea7a75e858d47f6d8b61e9c64097a6 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Sat, 7 Mar 2026 15:20:34 -0500 Subject: [PATCH 04/31] clean up some vestiges and update scythe --- src/globi/models/surrogate/training.py | 79 -------------------------- src/globi/pipelines/training.py | 5 ++ uv.lock | 2 +- 3 files changed, 6 insertions(+), 80 deletions(-) diff --git a/src/globi/models/surrogate/training.py b/src/globi/models/surrogate/training.py index 963e7fa..65be229 100644 --- a/src/globi/models/surrogate/training.py +++ b/src/globi/models/surrogate/training.py @@ -227,23 +227,6 @@ def gis_data(self) -> pd.DataFrame: """Load the gis data.""" return pd.read_parquet(self.gis_path) - # def s3_key_for_iteration(self, iteration_ix: int) -> str: - # """The s3 root key for the iteration.""" - # return f"{self.experiment_id}/iter-{iteration_ix:03d}" - - # def upload_self(self, s3_client: S3ClientType): - # """Upload a dumpout of this spec to the s3 bucket root.""" - # with tempfile.TemporaryDirectory() as tempdir: - # tempdir = Path(tempdir) - # fpath = tempdir / "spec.yml" - # with open(fpath, "w") as f: - # yaml.dump(self.model_dump(mode="json"), f, indent=2) - # s3_client.upload_file( - # fpath.as_posix(), - # self.bucket, - # f"hatchet/{self.experiment_id}/artifacts/experiment-spec.yml", - # ) - class StageSpec(BaseModel): """A spec that is common to both the sample and train stages (and possibly others).""" @@ -258,36 +241,6 @@ def random_generator(self) -> np.random.Generator: """The random generator.""" return np.random.default_rng(self.parent.iteration.current_iter) - # @cached_property - # def experiment_key(self) -> str: - # """The root key for the experiment.""" - # return f"{self.progressive_training_spec.s3_key_for_iteration(self.progressive_training_iteration_ix)}/{self.stage_type}" - - # def load_previous_data(self, s3_client: S3ClientType) -> pd.DataFrame | None: - # """Load the previous data.""" - # if self.data_uri is None: - # return None - # with tempfile.TemporaryDirectory() as tmpdir: - # tmpdir = Path(tmpdir) - # fpath = tmpdir / "previous_data.parquet" - # fetch_uri( - # uri=self.data_uri, - # local_path=fpath, - # use_cache=False, - # s3=s3_client, - # ) - # df = pd.read_parquet(fpath) - # return df - - -# BASE EXPERIMENT/v1.0.0 -# BASE EXPERIMENT/v1.0.0/simulations/v1.0.0/[...] -# BASE EXPERIMENT/v1.0.0/training/v1.0.0/[...] -# BASE EXPERIMENT/v1.0.0/simulations/v2.0.0/[...] -# BASE EXPERIMENT/v1.0.0/training/v2.0.0/[...] -# BASE EXPERIMENT/v1.0.0/simulations/v2.0.0/[...] -# BASE EXPERIMENT/v1.0.0/training/v3.0.0/[...] - class SampleSpec(StageSpec): """A spec for the sampling stage of the progressive training.""" @@ -477,38 +430,6 @@ def to_sim_specs(self, df: pd.DataFrame): # } # return payload - # def combine_results(self, new_data_uri: URIResponse, s3_client: S3ClientType): - # """Combine the results of the previous and new data.""" - # previous_data = self.load_previous_data(s3_client) - # with tempfile.TemporaryDirectory() as tmpdir: - # tmpdir = Path(tmpdir) - # fpath = tmpdir / "new_data.parquet" - # fetch_uri( - # uri=new_data_uri.uri, local_path=fpath, use_cache=False, s3=s3_client - # ) - # # TODO: data frame subsection selection should be a configuration option within the - # # progressive iteration training spec. - # df = cast( - # pd.DataFrame, - # cast(pd.DataFrame, pd.read_hdf(fpath, key="results")), - # ) - # if previous_data is not None: - # df = pd.concat([previous_data, df], axis=0) - - # # strip out any constant columns - # is_all_zeros = (df.max(axis=0) - df.min(axis=0)).abs() < 1e-5 - # df = df.loc[:, ~is_all_zeros] - # # serialize to a parquet file and upload to s3 - # bucket = self.progressive_training_spec.bucket - # with tempfile.TemporaryDirectory() as tmpdir: - # tmpdir = Path(tmpdir) - # fpath = tmpdir / "results.parquet" - # df.to_parquet(fpath) - # key = f"hatchet/{self.experiment_key}/full-dataset.pq" - # specs_uri = f"s3://{bucket}/{key}" - # s3_client.upload_file(fpath.as_posix(), bucket, key) - # return specs_uri - class TrainFoldSpec(ExperimentInputSpec): """Train an sbem model for a specific fold. diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index 96e8bfe..e614d53 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -167,6 +167,11 @@ def combine_results( results = context.task_output(await_simulations) combined_results: dict[str, S3Url] = {} + # TODO: in the old version, w removed constant columns from the data, i.e.: + # is_constant = (df.max(axis=0) - df.min(axis=0)).abs() < 1e-5 + # df = df.loc[:, ~is_constant] + # Should this sort of data cleaning be done here, or should it be done in the training task? + if spec.data_uris: shared_keys = set(spec.data_uris.uris.keys()) & set(results.uris.keys()) old_keys_only = set(spec.data_uris.uris.keys()) - shared_keys diff --git a/uv.lock b/uv.lock index e9ec544..e642899 100644 --- a/uv.lock +++ b/uv.lock @@ -4260,7 +4260,7 @@ wheels = [ [[package]] name = "scythe-engine" version = "0.1.2" -source = { git = "https://github.com/szvsw/scythe?branch=feature%2Fallow-versioning-workflows#a750f3f18525dde2ebb73c9ad0629cba5ede77a1" } +source = { git = "https://github.com/szvsw/scythe?branch=feature%2Fallow-versioning-workflows#9aad5d97eaa9ca33bc5ac9e21ec31c9b60f677f1" } dependencies = [ { name = "boto3" }, { name = "fastparquet" }, From 79f0d19d513ca4472312c5643ae237fe01439d09 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Sat, 7 Mar 2026 18:04:52 -0500 Subject: [PATCH 05/31] improve data management --- src/globi/models/surrogate/training.py | 189 +++++++++---------------- src/globi/pipelines/training.py | 6 +- 2 files changed, 73 insertions(+), 122 deletions(-) diff --git a/src/globi/models/surrogate/training.py b/src/globi/models/surrogate/training.py index 65be229..37a67db 100644 --- a/src/globi/models/surrogate/training.py +++ b/src/globi/models/surrogate/training.py @@ -1,5 +1,6 @@ """Models used for the surrogate training pipeline.""" +import math from functools import cached_property from pathlib import Path from typing import TYPE_CHECKING, Literal, cast @@ -458,66 +459,41 @@ class TrainFoldSpec(ExperimentInputSpec): However, with xgb, this is less imperative. """ - n_folds: int = Field( - ..., description="The number of folds for the entire parent task." - ) - data_uri: FileReference = Field(..., description="The uri of the data to train on.") - stratification_field: str = Field( - ..., - description="The field to stratify the data by for monitoring convergence in parent task.", - ) - progressive_training_iter_ix: int = Field( - ..., - description="The index of the current training iteration within the outer loop.", + data_uris: dict[str, S3Url] = Field( + ..., description="The uris of the data to train on." ) - - @property - def data_path(self) -> Path: - """The path to the data.""" - if isinstance(self.data_uri, Path): - return self.data_uri - return self.fetch_uri(self.data_uri) + parent: ProgressiveTrainingSpec = Field(..., description="The parent spec.") @cached_property + def combined_data(self) -> pd.DataFrame: + """Combines the data from the data uris into a single dataframe with a flattened column index.""" + dfs: dict[str, pd.DataFrame] = { + key: pd.read_parquet(str(uri)) for key, uri in self.data_uris.items() + } + if not all( + df.index.equals(next(iter(dfs.values())).index) for df in dfs.values() + ): + msg = "The indices of the dataframes are not all equal. " + "This is not supported, since the features must be identical for all outputs.." + raise ValueError(msg) + + for df in dfs.values(): + df.columns = df.columns.to_flat_index() + df.columns = [ + "/".join(col) if isinstance(col, tuple | list) else col + for col in df.columns + ] + + combined_df = pd.concat(dfs, axis=1) + combined_df.columns = combined_df.columns.to_flat_index() + combined_df.columns = ["/".join(col) for col in combined_df.columns] + shuffled_df = combined_df.sample(frac=1, random_state=42, replace=False) + return shuffled_df + + @property def data(self) -> pd.DataFrame: - """The data.""" - df_all = pd.read_parquet(self.data_path) - df_energy: pd.DataFrame = cast(pd.DataFrame, df_all["Energy"]["Raw"]) - df_energy = cast( - pd.DataFrame, - ( - df_energy.T.groupby( - level=[ - lev for lev in df_energy.columns.names if lev.lower() != "month" - ] - ) - .sum() - .T - ), - ) - df_peaks: pd.DataFrame = cast(pd.DataFrame, df_all["Peak"]["Raw"]) - df_peaks = cast( - pd.DataFrame, - ( - df_peaks.T.groupby( - level=[ - lev for lev in df_peaks.columns.names if lev.lower() != "month" - ] - ) - .max() - .T - ), - ) - df_all_annual = pd.concat( - [df_energy, df_peaks], - axis=1, - keys=["Energy", "Peak"], - names=["Measurement"], - ) - # TODO: should we assume they are shuffled already? - # shuffle the order of the rows - df_all_annual = df_all_annual.sample(frac=1, random_state=42, replace=False) - return df_all_annual + """The combined data.""" + return self.combined_data @cached_property def dparams(self) -> pd.DataFrame: @@ -527,7 +503,7 @@ def dparams(self) -> pd.DataFrame: @cached_property def stratum_names(self) -> list[str]: """The values of the stratification field.""" - return sorted(self.dparams[self.stratification_field].unique().tolist()) + return sorted(self.dparams[self.parent.stratification.field].unique().tolist()) @cached_property def data_by_stratum(self) -> dict[str, pd.DataFrame]: @@ -543,7 +519,8 @@ def data_by_stratum(self) -> dict[str, pd.DataFrame]: """ return { val: cast( - pd.DataFrame, self.data[self.dparams[self.stratification_field] == val] + pd.DataFrame, + self.data[self.dparams[self.parent.stratification.field] == val], ) for val in self.stratum_names } @@ -560,15 +537,17 @@ def train_test_split_by_fold_and_stratum(self) -> pd.DataFrame: all_strata = [] for val in self.stratum_names: folds = [] - for i in range(self.n_folds): - fold = self.data_by_stratum[val].iloc[i :: self.n_folds] + for i in range(self.parent.cross_val.n_folds): + fold = self.data_by_stratum[val].iloc[ + i :: self.parent.cross_val.n_folds + ] folds.append(fold) folds_df = pd.concat( folds, axis=0, keys=[ "test" if i == self.sort_index else "train" - for i in range(self.n_folds) + for i in range(self.parent.cross_val.n_folds) ], names=["split_segment"], ) @@ -618,38 +597,38 @@ def non_numeric_options(self) -> dict[str, list[str]]: } return non_numeric_options - # @cached_property - # def numeric_min_maxs(self) -> dict[str, tuple[float, float]]: - # """Get the min and max for numeric features. + @cached_property + def numeric_min_maxs(self) -> dict[str, tuple[float, float]]: + """Get the min and max for numeric features. - # We perform this only on the training set to prevent leakage. + We perform this only on the training set to prevent leakage. - # TODO: In the future, this should be based off of transform instructions. + TODO: In the future, this should be based off of transform instructions. - # Args: - # params (pd.DataFrame): The parameters to get the min and max for. + Args: + params (pd.DataFrame): The parameters to get the min and max for. - # Returns: - # norm_bounds (dict[str, tuple[float, float]]): The min and max for each numeric feature. - # """ - # params, _ = self.train_segment - # fparams = params[[col for col in params.columns if col.startswith("feature.")]] - # numeric_cols = fparams.select_dtypes(include=["number"]).columns - # numeric_min_maxs = { - # col: (float(fparams[col].min()), float(fparams[col].max())) - # for col in numeric_cols - # } - # for col in numeric_min_maxs: - # low, high = numeric_min_maxs[col] - # # we want to floor the "low" value down to the nearest 0.001 - # # and ceil the "high" value up to the nearest 0.001 - # # e.g. if low is -0.799, we want to set it to -0.800 - # # and if high is 0.799, we want to set it to 0.800 - # numeric_min_maxs[col] = ( - # math.floor(low * 1000) / 1000, - # math.ceil(high * 1000) / 1000, - # ) - # return numeric_min_maxs + Returns: + norm_bounds (dict[str, tuple[float, float]]): The min and max for each numeric feature. + """ + params, _ = self.train_segment + fparams = params[[col for col in params.columns if col.startswith("feature.")]] + numeric_cols = fparams.select_dtypes(include=["number"]).columns + numeric_min_maxs = { + col: (float(fparams[col].min()), float(fparams[col].max())) + for col in numeric_cols + } + for col in numeric_min_maxs: + low, high = numeric_min_maxs[col] + # we want to floor the "low" value down to the nearest 0.001 + # and ceil the "high" value up to the nearest 0.001 + # e.g. if low is -0.799, we want to set it to -0.800 + # and if high is 0.799, we want to set it to 0.800 + numeric_min_maxs[col] = ( + math.floor(low * 1000) / 1000, + math.ceil(high * 1000) / 1000, + ) + return numeric_min_maxs # @cached_property # def feature_spec(self) -> RegressorInputSpec: @@ -941,10 +920,6 @@ def schedule(self) -> list[TrainFoldSpec]: """Create the task schedule.""" schedule = [] # TODO: this should be configured/selected/etc - data_uri = self.data_uris.uris["main_result"] - if data_uri is None: - msg = "Data URI is required for training." - raise ValueError(msg) for i in range(self.parent.cross_val.n_folds): schedule.append( @@ -952,36 +927,12 @@ def schedule(self) -> list[TrainFoldSpec]: # TODO: this should be set in a better manner experiment_id="placeholder", sort_index=i, - n_folds=self.parent.cross_val.n_folds, - data_uri=data_uri, - stratification_field=self.parent.stratification.field, - progressive_training_iter_ix=self.parent.iteration.current_iter, - storage_settings=self.parent.storage_settings, + data_uris=self.data_uris.uris, + parent=self.parent, ) ) return schedule - # def allocate(self, s3_client: S3ClientType): - # """Allocate the task.""" - # # 1. turn the schedule into a parquet dataframe - # df = pd.DataFrame([m.model_dump(mode="json") for m in self.schedule]) - # bucket = self.progressive_training_spec.bucket - # with tempfile.TemporaryDirectory() as tempdir: - # temp_path = Path(tempdir) / "train_specs.parquet" - # df.to_parquet(temp_path) - # key = f"hatchet/{self.experiment_key}/train_specs.parquet" - # specs_uri = f"s3://{bucket}/{key}" - # s3_client.upload_file(temp_path.as_posix(), bucket, key) - - # payload = { - # "specs": specs_uri, - # "bucket": bucket, - # # TODO: this should be selected in a better manner. - # "workflow_name": "train_regressor_with_cv_fold", - # "experiment_id": self.experiment_key, - # } - # return payload - # def check_convergence(self, uri: URIResponse, s3_client: S3ClientType): # """Check the convergence of the training.""" # with tempfile.TemporaryDirectory() as tempdir: diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index e614d53..5cd58cf 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -29,7 +29,7 @@ class FoldResult(ExperimentOutputSpec): """The output for a fold.""" - pass + columns: list[str] class CombineResultsResult(BaseModel): @@ -74,8 +74,7 @@ def train_regressor_with_cv_fold( ) -> FoldResult: """Train a regressor with cross-fold validation.""" # DO TRAINING - - return FoldResult() + return FoldResult(columns=input_spec.data.columns.tolist()) iterative_training = hatchet.workflow( @@ -218,6 +217,7 @@ def start_training( data_uris=results.combined, # TODO: should configure which results to use ) + # Alternatively, one task per fold-column combination? specs = train_spec.schedule run_name = f"{spec.experiment_id}/train" From 44338dec63567e60e42c03849919443920bcff09 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Sat, 7 Mar 2026 21:22:43 -0500 Subject: [PATCH 06/31] begin training --- .cursor/rules/hatchet-docs.mdc | 12 + docker-compose.yml | 4 + pyproject.toml | 49 ++- src/globi/models/surrogate/dummy.py | 10 +- src/globi/models/surrogate/training.py | 335 +++++++++++++-- src/globi/pipelines/training.py | 18 +- src/globi/worker/Dockerfile | 4 +- uv.lock | 567 ++++++++++++++++++++++++- 8 files changed, 931 insertions(+), 68 deletions(-) create mode 100644 .cursor/rules/hatchet-docs.mdc diff --git a/.cursor/rules/hatchet-docs.mdc b/.cursor/rules/hatchet-docs.mdc new file mode 100644 index 0000000..7738938 --- /dev/null +++ b/.cursor/rules/hatchet-docs.mdc @@ -0,0 +1,12 @@ +--- +description: Hatchet documentation MCP server +alwaysApply: true +--- + +When working with Hatchet (task queues, workflows, durable execution), use the +Hatchet MCP docs server for accurate, up-to-date API reference and examples. + +MCP server URL: https://docs.hatchet.run/api/mcp + +Use the search_docs tool to find relevant documentation pages, or get_full_docs +for comprehensive context. Documentation covers Python, TypeScript, and Go SDKs. diff --git a/docker-compose.yml b/docker-compose.yml index c5a1d8a..9e945da 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -16,6 +16,10 @@ services: deploy: mode: replicated replicas: ${SIMULATIONS_REPLICAS:-4} + resources: + reservations: + devices: + - capabilities: [gpu] # Requests access to all GPUs volumes: - ./inputs:/code/inputs - ./outputs:/code/outputs diff --git a/pyproject.toml b/pyproject.toml index 8315ab7..e9d3431 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,9 +46,26 @@ visualization = [ "playwright>=1.40.0", ] -ml = [ +# ml = [ +# "torch>=2.5.0", +# "lightgbm>=4.6.0", +# "xgboost>=3.2.0", +# "pytorch-tabular>=1.2.0", +# "tensorboard>=2.20.0", +# "wandb>=0.25.0", +# "pytorch-tabular>=1.2.0", +# "torch>=2.5.0", +# ] + +ml-gpu = [ + "torch>=2.5.0", "lightgbm>=4.6.0", "xgboost>=3.2.0", + "pytorch-tabular>=1.2.0", + "tensorboard>=2.20.0", + "wandb>=0.25.0", + "pytorch-tabular>=1.2.0", + "torch>=2.5.0", ] cli = [ @@ -79,11 +96,41 @@ docs = [ "mkdocs-click>=0.9.0", ] +# [tool.uv] +# conflicts = [ +# [ +# { extra = "ml" }, +# { extra = "ml-gpu" }, +# ], +# ] + [project.scripts] worker = "globi.worker.main:main" globi = "globi.tools.cli.main:cli" +[[tool.uv.index]] +name = "pytorch-cu128" +url = "https://download.pytorch.org/whl/cu128" +explicit = true + +[[tool.uv.index]] +name = "pytorch-cpu" +url = "https://download.pytorch.org/whl/cpu" +explicit = true + +[[tool.uv.index]] +name = "pypi" +url = "https://pypi.org/simple" +explicit = true + [tool.uv.sources] +# PyTorch: CUDA 12.8 on Linux/Windows (where builds exist), PyPI (CPU) on macOS +torch = [ + { index = "pytorch-cu128", marker = "sys_platform != 'darwin'", extra = "ml-gpu" }, +# { index = "pytorch-cpu", marker = "sys_platform != 'darwin'", extra = "ml" }, + { index = "pypi", marker = "sys_platform == 'darwin'", extra = "ml-gpu" }, +# { index = "pypi", marker = "sys_platform == 'darwin'", extra = "ml" }, +] # scythe-engine = {git = "https://github.com/szvsw/scythe", branch = "feature/allow-optional-filerefs"} scythe-engine = {git = "https://github.com/szvsw/scythe", branch = "feature/allow-versioning-workflows"} # scythe-engine = {path = "../scythe", editable = true} diff --git a/src/globi/models/surrogate/dummy.py b/src/globi/models/surrogate/dummy.py index 0617998..1ebcf24 100644 --- a/src/globi/models/surrogate/dummy.py +++ b/src/globi/models/surrogate/dummy.py @@ -1,6 +1,8 @@ """Dummy simulation for testing.""" +import math from pathlib import Path +from typing import Literal import pandas as pd from scythe.base import ExperimentInputSpec, ExperimentOutputSpec @@ -10,8 +12,10 @@ class DummySimulationInput(ExperimentInputSpec): """The input for the dummy simulation.""" + weather_file: Literal["some", "other"] a: int b: float + c: int class DummySimulationOutput(ExperimentOutputSpec): @@ -30,9 +34,11 @@ def dummy_simulation( df = pd.DataFrame({ "target_0": [input_spec.a + input_spec.b], "target_1": [input_spec.a - input_spec.b], - "target_2": [input_spec.a * input_spec.b], - "target_3": [input_spec.a / input_spec.b], + "target_2": [input_spec.a * input_spec.b * input_spec.c], + "target_3": [input_spec.a / math.sin(input_spec.b)], }) + df_neg = -df + df = pd.concat([df, df_neg], axis=1, keys=["positive", "negative"], names=["sign"]) df = df.set_index(input_spec.make_multiindex()) return DummySimulationOutput( c=input_spec.a + input_spec.b, dataframes={"main_result": df} diff --git a/src/globi/models/surrogate/training.py b/src/globi/models/surrogate/training.py index 37a67db..7d42e06 100644 --- a/src/globi/models/surrogate/training.py +++ b/src/globi/models/surrogate/training.py @@ -1,6 +1,6 @@ """Models used for the surrogate training pipeline.""" -import math +import warnings from functools import cached_property from pathlib import Path from typing import TYPE_CHECKING, Literal, cast @@ -166,6 +166,56 @@ def at_max_iters(self) -> bool: return self.current_iter + 1 >= self.max_iters +class TargetsConfigSpec(BaseModel): + """The targets config spec.""" + + columns: list[str] = Field( + default_factory=list, description="The columns to use as targets." + ) + normalization: Literal["min-max", "standard", "none"] = Field( + default="none", description="The normalization method to use." + ) + + +class FeatureConfigSpec(BaseModel): + """The feature config spec.""" + + continuous_columns: frozenset[str] = Field( + default=frozenset(), description="The continuous columns to use as features." + ) + categorical_columns: frozenset[str] = Field( + default=frozenset(), description="The categorical columns to use as features." + ) + exclude_columns: frozenset[str] = Field( + default=frozenset(), + description="The columns to exclude from the features.", + ) + cont_cat_unicity_transition_threshold: int = Field( + default=10, + description="The threshold for the number of unique values to transition from continuous to categorical variable.", + ) + + +EXCLUDED_COLUMNS = frozenset({ + "experiment_id", + "sort_index", + "workflow_run_id", + "root_workflow_run_id", +}) + + +class RegressionIOConfigSpec(BaseModel): + """The input/output spec for a regression model.""" + + targets: TargetsConfigSpec = Field( + default_factory=TargetsConfigSpec, description="The targets config spec." + ) + features: FeatureConfigSpec = Field( + default_factory=FeatureConfigSpec, + description="The features config spec.", + ) + + # TODO: should this be a subclass of ExperimentInputSpec? class ProgressiveTrainingSpec(ExperimentInputSpec): """A spec for iteratively training an SBEM regression model.""" @@ -178,10 +228,14 @@ class ProgressiveTrainingSpec(ExperimentInputSpec): default_factory=ConvergenceThresholds, description="The convergence criteria.", ) - model_hyperparameters: ModelHPType = Field( - default_factory=LGBHyperparameters, - description="The hyperparameters for the model.", + regression_io_config: RegressionIOConfigSpec = Field( + default_factory=RegressionIOConfigSpec, + description="The regression io config spec.", ) + # model_hyperparameters: ModelHPType = Field( + # default_factory=LGBHyperparameters, + # description="The hyperparameters for the model.", + # ) stratification: StratificationSpec = Field( default_factory=StratificationSpec, description="The stratification spec.", @@ -478,7 +532,10 @@ def combined_data(self) -> pd.DataFrame: raise ValueError(msg) for df in dfs.values(): + # TODO: use level names while constructing the sequential name + _level_names = df.columns.names df.columns = df.columns.to_flat_index() + df.columns = [ "/".join(col) if isinstance(col, tuple | list) else col for col in df.columns @@ -500,6 +557,91 @@ def dparams(self) -> pd.DataFrame: """The index of the data.""" return self.data.index.to_frame() + @cached_property + def all_columns(self) -> frozenset[str]: + """The names of all columns.""" + return frozenset(self.data.columns) + + @cached_property + def continuous_columns(self) -> frozenset[str]: + """The continuous columns.""" + feature_conf = self.parent.regression_io_config.features + candidates = self.all_columns - feature_conf.exclude_columns - EXCLUDED_COLUMNS + object_dype_columns = ( + self.data[candidates].select_dtypes(include=["object"]).columns.tolist() + ) + candidates = candidates - frozenset(object_dype_columns) + nunique_counts = cast(pd.Series, self.data[candidates].nunique()) + thresh = feature_conf.cont_cat_unicity_transition_threshold + passing_candidates = cast( + list[str], + cast(pd.Series, nunique_counts[nunique_counts > thresh]).index.tolist(), + ) + non_passing_candidates = cast( + list[str], + cast(pd.Series, nunique_counts[nunique_counts <= thresh]).index.tolist(), + ) + prespecified = feature_conf.continuous_columns + if prespecified: + skipped_candidates = frozenset(passing_candidates) - (prespecified) + possibly_not_continuous_candidats = ( + frozenset(non_passing_candidates) & prespecified + ) + if possibly_not_continuous_candidats: + warnings.warn( + f"The following columns were specified as continuous but have less than {thresh} unique values: {possibly_not_continuous_candidats}", + stacklevel=2, + ) + if skipped_candidates: + warnings.warn( + f"The following columns are likely continuous but are not included in the continuous columns: {skipped_candidates}", + stacklevel=2, + ) + return prespecified + return frozenset(passing_candidates) + + @cached_property + def categorical_columns(self) -> frozenset[str]: + """The categorical columns.""" + feature_conf = self.parent.regression_io_config.features + candidates = self.all_columns - feature_conf.exclude_columns - EXCLUDED_COLUMNS + object_dtype_columns = ( + self.data[candidates].select_dtypes(include=["object"]).columns.tolist() + ) + non_obj_dtype_columns = candidates - frozenset(object_dtype_columns) + nunique_counts = cast(pd.Series, self.data[non_obj_dtype_columns].nunique()) + thresh = feature_conf.cont_cat_unicity_transition_threshold + passing_non_obj_dtype_candidates = cast( + list[str], + cast(pd.Series, nunique_counts[nunique_counts <= thresh]).index.tolist(), + ) + non_passing_non_obj_dtype_candidates = cast( + list[str], + cast(pd.Series, nunique_counts[nunique_counts > thresh]).index.tolist(), + ) + prespecified = feature_conf.categorical_columns + if prespecified: + skipped_candidates = frozenset(passing_non_obj_dtype_candidates) - ( + prespecified + ) + possibly_not_categorical_candidats = ( + frozenset(non_passing_non_obj_dtype_candidates) & prespecified + ) + if possibly_not_categorical_candidats: + warnings.warn( + f"The following columns were specified as categorical but have more than {thresh} unique values: {possibly_not_categorical_candidats}", + stacklevel=2, + ) + if skipped_candidates: + warnings.warn( + f"The following columns are likely categorical but are not included in the categorical columns: {skipped_candidates}", + stacklevel=2, + ) + return prespecified + return frozenset(passing_non_obj_dtype_candidates) | frozenset( + object_dtype_columns + ) + @cached_property def stratum_names(self) -> list[str]: """The values of the stratification field.""" @@ -579,56 +721,155 @@ def test_segment(self) -> tuple[pd.DataFrame, pd.DataFrame]: return params, targets @cached_property - def non_numeric_options(self) -> dict[str, list[str]]: - """Get the non-numeric options for categorical features. - - We must perform this across the entire dataset not just splits for consistency - and to ensure we get all options. + def targets(self) -> list[str]: + """The list of regression targets.""" + return ( + self.parent.regression_io_config.targets.columns + or self.data.columns.tolist() + ) - TODO: In the future, this should be based off of transform instructions. - """ - fparams = self.dparams[ - [col for col in self.dparams.columns if col.startswith("feature.")] + @cached_property + def target_range(self) -> list[tuple[float, float]]: + """The range of the regression targets.""" + _, targets = self.train_segment + targets = targets[self.targets] + return [ + (float(targets[col].min() * 0.8), float(targets[col].max() * 1.2)) + for col in self.targets ] - non_numeric_cols = fparams.select_dtypes(include=["object"]).columns - non_numeric_options = { - col: sorted(cast(pd.Series, fparams[col]).unique().tolist()) - for col in non_numeric_cols - } - return non_numeric_options - @cached_property - def numeric_min_maxs(self) -> dict[str, tuple[float, float]]: - """Get the min and max for numeric features. + def train_pytorch_tabular(self, tempdir: Path): + """Train a pytorch tabular model.""" + from pytorch_tabular import TabularModel + from pytorch_tabular.config import ( + DataConfig, + ExperimentConfig, + OptimizerConfig, + TrainerConfig, + ) + from pytorch_tabular.models import GANDALFConfig + from pytorch_tabular.models.common.heads import LinearHeadConfig + + data_config = DataConfig( + target=self.targets, + continuous_cols=list(self.continuous_columns), + categorical_cols=list(self.categorical_columns), + # validation_split=0.2, + # continuous_feature_transform="", + # normalize_continuous_features=True, + ) + n_epochs = 200 + optimizer_config = OptimizerConfig( # TODO: make this all configurable + optimizer="AdamW", + optimizer_params={"weight_decay": 1e-5}, + lr_scheduler="CosineAnnealingLR", + lr_scheduler_params={"T_max": n_epochs, "eta_min": 1e-5}, + ) + trainer_config = TrainerConfig( + batch_size=256, + fast_dev_run=False, + max_epochs=n_epochs, + min_epochs=max(n_epochs // 20, 1), + early_stopping=None, + # gradient_clip_val=1.0, + # auto_lr_find=False + # max_time=60, + ) - We perform this only on the training set to prevent leakage. + model_config = GANDALFConfig( + task="regression", + head="LinearHead", + head_config=LinearHeadConfig( + # layers="", + activation="SiLU", + use_batch_norm=False, + # dropout=0, + ).__dict__, + target_range=self.target_range, + embedding_dims=None, + embedding_dropout=0.1, + batch_norm_continuous_input=True, + gflu_stages=6, + gflu_dropout=0.0, + gflu_feature_init_sparsity=0.3, + learnable_sparsity=True, + ) - TODO: In the future, this should be based off of transform instructions. + experiment_config = ExperimentConfig( + run_name=self.experiment_id, + project_name="globi-surrogate-training", + log_target="tensorboard", + ) - Args: - params (pd.DataFrame): The parameters to get the min and max for. + model = TabularModel( + data_config=data_config, + optimizer_config=optimizer_config, + trainer_config=trainer_config, + experiment_config=experiment_config, + model_config=model_config, + ) - Returns: - norm_bounds (dict[str, tuple[float, float]]): The min and max for each numeric feature. - """ - params, _ = self.train_segment - fparams = params[[col for col in params.columns if col.startswith("feature.")]] - numeric_cols = fparams.select_dtypes(include=["number"]).columns - numeric_min_maxs = { - col: (float(fparams[col].min()), float(fparams[col].max())) - for col in numeric_cols - } - for col in numeric_min_maxs: - low, high = numeric_min_maxs[col] - # we want to floor the "low" value down to the nearest 0.001 - # and ceil the "high" value up to the nearest 0.001 - # e.g. if low is -0.799, we want to set it to -0.800 - # and if high is 0.799, we want to set it to 0.800 - numeric_min_maxs[col] = ( - math.floor(low * 1000) / 1000, - math.ceil(high * 1000) / 1000, - ) - return numeric_min_maxs + _, train_targets = self.train_segment + _, test_targets = self.test_segment + trainer = model.fit( + train=train_targets.reset_index(), + validation=test_targets.reset_index(), + seed=42, + ) + model.save_model((tempdir / "model").as_posix()) + return model, trainer + + # @cached_property + # def non_numeric_options(self) -> dict[str, list[str]]: + # """Get the non-numeric options for categorical features. + + # We must perform this across the entire dataset not just splits for consistency + # and to ensure we get all options. + + # TODO: In the future, this should be based off of transform instructions. + # """ + # fparams = self.dparams[ + # [col for col in self.dparams.columns if col.startswith("feature.")] + # ] + # non_numeric_cols = fparams.select_dtypes(include=["object"]).columns + # non_numeric_options = { + # col: sorted(cast(pd.Series, fparams[col]).unique().tolist()) + # for col in non_numeric_cols + # } + # return non_numeric_options + + # @cached_property + # def numeric_min_maxs(self) -> dict[str, tuple[float, float]]: + # """Get the min and max for numeric features. + + # We perform this only on the training set to prevent leakage. + + # TODO: In the future, this should be based off of transform instructions. + + # Args: + # params (pd.DataFrame): The parameters to get the min and max for. + + # Returns: + # norm_bounds (dict[str, tuple[float, float]]): The min and max for each numeric feature. + # """ + # params, _ = self.train_segment + # fparams = params[[col for col in params.columns if col.startswith("feature.")]] + # numeric_cols = fparams.select_dtypes(include=["number"]).columns + # numeric_min_maxs = { + # col: (float(fparams[col].min()), float(fparams[col].max())) + # for col in numeric_cols + # } + # for col in numeric_min_maxs: + # low, high = numeric_min_maxs[col] + # # we want to floor the "low" value down to the nearest 0.001 + # # and ceil the "high" value up to the nearest 0.001 + # # e.g. if low is -0.799, we want to set it to -0.800 + # # and if high is 0.799, we want to set it to 0.800 + # numeric_min_maxs[col] = ( + # math.floor(low * 1000) / 1000, + # math.ceil(high * 1000) / 1000, + # ) + # return numeric_min_maxs # @cached_property # def feature_spec(self) -> RegressorInputSpec: diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index 5cd58cf..f12589c 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -1,5 +1,6 @@ """The training pipeline.""" +import random from datetime import timedelta from pathlib import Path from typing import Literal @@ -74,6 +75,7 @@ def train_regressor_with_cv_fold( ) -> FoldResult: """Train a regressor with cross-fold validation.""" # DO TRAINING + _model, _trainer = input_spec.train_pytorch_tabular(tempdir) return FoldResult(columns=input_spec.data.columns.tolist()) @@ -96,12 +98,14 @@ def create_simulations( # STEP 1: Generate the training samples, allocate simulations specs = [ DummySimulationInput( + weather_file="some" if random.random() < 0.5 else "other", # noqa: S311 a=i, - b=i, + b=-i, + c=random.randint(-10, 10), # noqa: S311 experiment_id="placeholder", sort_index=i, ) - for i in range(10) + for i in range(1000) ] # STEP 2: Simulate the simulations using scythe @@ -325,13 +329,21 @@ def transition_recursion( if __name__ == "__main__": from scythe.settings import ScytheStorageSettings - from globi.models.surrogate.training import ProgressiveTrainingSpec + from globi.models.surrogate.training import ( + ProgressiveTrainingSpec, + StratificationSpec, + ) base_run_name = "test-experiment" progressive_training_spec = ProgressiveTrainingSpec( sort_index=0, experiment_id="placeholder", gis_uri=HttpUrl("https://example.com/gis.parquet"), + stratification=StratificationSpec( + field="weather_file", + sampling="equal", + aliases=["feature.weather.file"], + ), iteration=IterationSpec( max_iters=4, ), diff --git a/src/globi/worker/Dockerfile b/src/globi/worker/Dockerfile index a5c6bc6..e4f1409 100644 --- a/src/globi/worker/Dockerfile +++ b/src/globi/worker/Dockerfile @@ -94,12 +94,12 @@ WORKDIR /code COPY uv.lock pyproject.toml README.md /code/ # TODO: only insttall ml for certain containers by passing in a flag to the docker build command -RUN uv sync --locked --no-install-project --extra cli --extra ml +RUN uv sync --locked --no-install-project --extra cli --extra ml-gpu RUN uv run epi prisma generate COPY src /code/src/ -RUN uv sync --locked --extra cli +RUN uv sync --locked --extra cli --extra ml-gpu CMD [ "uv", "run", "src/globi/worker/main.py" ] diff --git a/uv.lock b/uv.lock index e642899..45faccc 100644 --- a/uv.lock +++ b/uv.lock @@ -2,9 +2,21 @@ version = 1 revision = 3 requires-python = ">=3.12" resolution-markers = [ - "python_full_version >= '3.14'", - "python_full_version == '3.13.*'", - "python_full_version < '3.13'", + "python_full_version >= '3.14' and sys_platform == 'linux'", + "python_full_version < '3.14' and sys_platform == 'linux'", + "python_full_version >= '3.14' and sys_platform != 'darwin' and sys_platform != 'linux'", + "python_full_version < '3.14' and sys_platform != 'darwin' and sys_platform != 'linux'", + "python_full_version >= '3.14' and sys_platform == 'darwin'", + "python_full_version < '3.14' and sys_platform == 'darwin'", +] + +[[package]] +name = "absl-py" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/64/c7/8de93764ad66968d19329a7e0c147a2bb3c7054c554d4a119111b8f9440f/absl_py-2.4.0.tar.gz", hash = "sha256:8c6af82722b35cf71e0f4d1d47dcaebfff286e27110a99fc359349b247dfb5d4", size = 116543, upload-time = "2026-01-28T10:17:05.322Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/a6/907a406bb7d359e6a63f99c313846d9eec4f7e6f7437809e03aa00fa3074/absl_py-2.4.0-py3-none-any.whl", hash = "sha256:88476fd881ca8aab94ffa78b7b6c632a782ab3ba1cd19c9bd423abc4fb4cd28d", size = 135750, upload-time = "2026-01-28T10:17:04.19Z" }, ] [[package]] @@ -148,6 +160,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, ] +[[package]] +name = "antlr4-python3-runtime" +version = "4.9.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3e/38/7859ff46355f76f8d19459005ca000b6e7012f2f1ca597746cbcd1fbfe5e/antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b", size = 117034, upload-time = "2021-11-06T17:52:23.524Z" } + [[package]] name = "anyio" version = "4.11.0" @@ -899,6 +917,34 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ce/c6/c71e82e041c95ffe6a92ac707785500aa2a515a4339c2c7dd67e3c449249/cramjam-2.11.0-cp314-cp314t-win_amd64.whl", hash = "sha256:028400d699442d40dbda02f74158c73d05cb76587a12490d0bfedd958fd49188", size = 1713108, upload-time = "2025-07-27T21:24:10.147Z" }, ] +[[package]] +name = "cuda-bindings" +version = "12.9.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cuda-pathfinder", marker = "sys_platform != 'darwin'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/c2/65bfd79292b8ff18be4dd7f7442cea37bcbc1a228c1886f1dea515c45b67/cuda_bindings-12.9.4-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:694ba35023846625ef471257e6b5a4bc8af690f961d197d77d34b1d1db393f56", size = 11760260, upload-time = "2025-10-21T14:51:40.79Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c1/dabe88f52c3e3760d861401bb994df08f672ec893b8f7592dc91626adcf3/cuda_bindings-12.9.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fda147a344e8eaeca0c6ff113d2851ffca8f7dfc0a6c932374ee5c47caa649c8", size = 12151019, upload-time = "2025-10-21T14:51:43.167Z" }, + { url = "https://files.pythonhosted.org/packages/05/8b/b4b2d1c7775fa403b64333e720cfcfccef8dcb9cdeb99947061ca5a77628/cuda_bindings-12.9.4-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cf8bfaedc238f3b115d957d1fd6562b7e8435ba57f6d0e2f87d0e7149ccb2da5", size = 11570071, upload-time = "2025-10-21T14:51:47.472Z" }, + { url = "https://files.pythonhosted.org/packages/63/56/e465c31dc9111be3441a9ba7df1941fe98f4aa6e71e8788a3fb4534ce24d/cuda_bindings-12.9.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:32bdc5a76906be4c61eb98f546a6786c5773a881f3b166486449b5d141e4a39f", size = 11906628, upload-time = "2025-10-21T14:51:49.905Z" }, + { url = "https://files.pythonhosted.org/packages/ec/07/6aff13bc1e977e35aaa6b22f52b172e2890c608c6db22438cf7ed2bf43a6/cuda_bindings-12.9.4-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3adf4958dcf68ae7801a59b73fb00a8b37f8d0595060d66ceae111b1002de38d", size = 11566797, upload-time = "2025-10-21T14:51:54.581Z" }, + { url = "https://files.pythonhosted.org/packages/a3/84/1e6be415e37478070aeeee5884c2022713c1ecc735e6d82d744de0252eee/cuda_bindings-12.9.4-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:56e0043c457a99ac473ddc926fe0dc4046694d99caef633e92601ab52cbe17eb", size = 11925991, upload-time = "2025-10-21T14:51:56.535Z" }, + { url = "https://files.pythonhosted.org/packages/1e/b5/96a6696e20c4ffd2b327f54c7d0fde2259bdb998d045c25d5dedbbe30290/cuda_bindings-12.9.4-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f53a7f453d4b2643d8663d036bafe29b5ba89eb904c133180f295df6dc151e5", size = 11624530, upload-time = "2025-10-21T14:52:01.539Z" }, + { url = "https://files.pythonhosted.org/packages/d1/af/6dfd8f2ed90b1d4719bc053ff8940e494640fe4212dc3dd72f383e4992da/cuda_bindings-12.9.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8b72ee72a9cc1b531db31eebaaee5c69a8ec3500e32c6933f2d3b15297b53686", size = 11922703, upload-time = "2025-10-21T14:52:03.585Z" }, + { url = "https://files.pythonhosted.org/packages/39/73/d2fc40c043bac699c3880bf88d3cebe9d88410cd043795382826c93a89f0/cuda_bindings-12.9.4-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:20f2699d61d724de3eb3f3369d57e2b245f93085cab44fd37c3bea036cea1a6f", size = 11565056, upload-time = "2025-10-21T14:52:08.338Z" }, + { url = "https://files.pythonhosted.org/packages/6c/19/90ac264acc00f6df8a49378eedec9fd2db3061bf9263bf9f39fd3d8377c3/cuda_bindings-12.9.4-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d80bffc357df9988dca279734bc9674c3934a654cab10cadeed27ce17d8635ee", size = 11924658, upload-time = "2025-10-21T14:52:10.411Z" }, +] + +[[package]] +name = "cuda-pathfinder" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/02/59a5bc738a09def0b49aea0e460bdf97f65206d0d041246147cf6207e69c/cuda_pathfinder-1.4.1-py3-none-any.whl", hash = "sha256:40793006082de88e0950753655e55558a446bed9a7d9d0bcb48b2506d50ed82a", size = 43903, upload-time = "2026-03-06T21:05:24.372Z" }, +] + [[package]] name = "cycler" version = "0.12.1" @@ -956,6 +1002,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16", size = 469047, upload-time = "2025-07-17T16:51:58.613Z" }, ] +[[package]] +name = "einops" +version = "0.8.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2c/77/850bef8d72ffb9219f0b1aac23fbc1bf7d038ee6ea666f331fa273031aa2/einops-0.8.2.tar.gz", hash = "sha256:609da665570e5e265e27283aab09e7f279ade90c4f01bcfca111f3d3e13f2827", size = 56261, upload-time = "2026-01-26T04:13:17.638Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/09/f8d8f8f31e4483c10a906437b4ce31bdf3d6d417b73fe33f1a8b59e34228/einops-0.8.2-py3-none-any.whl", hash = "sha256:54058201ac7087911181bfec4af6091bb59380360f069276601256a76af08193", size = 65638, upload-time = "2026-01-26T04:13:18.546Z" }, +] + [[package]] name = "energy-pandas" version = "0.4.1" @@ -1292,6 +1347,11 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/47/71/70db47e4f6ce3e5c37a607355f80da8860a33226be640226ac52cb05ef2e/fsspec-2025.9.0-py3-none-any.whl", hash = "sha256:530dc2a2af60a414a832059574df4a6e10cce927f6f4a78209390fe38955cfb7", size = 199289, upload-time = "2025-09-02T19:10:47.708Z" }, ] +[package.optional-dependencies] +http = [ + { name = "aiohttp" }, +] + [[package]] name = "future" version = "1.0.0" @@ -1379,8 +1439,13 @@ cli = [ { name = "click" }, { name = "xlsxwriter" }, ] -ml = [ +ml-gpu = [ { name = "lightgbm" }, + { name = "pytorch-tabular" }, + { name = "tensorboard" }, + { name = "torch", version = "2.10.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform == 'darwin'" }, + { name = "torch", version = "2.10.0+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform != 'darwin'" }, + { name = "wandb" }, { name = "xgboost" }, ] visualization = [ @@ -1425,7 +1490,7 @@ requires-dist = [ { name = "folium", marker = "extra == 'visualization'", specifier = ">=0.15.0" }, { name = "geopandas", specifier = ">=0.14.0" }, { name = "ladybug-core", specifier = ">=0.44.29" }, - { name = "lightgbm", marker = "extra == 'ml'", specifier = ">=4.6.0" }, + { name = "lightgbm", marker = "extra == 'ml-gpu'", specifier = ">=4.6.0" }, { name = "matplotlib", marker = "extra == 'visualization'", specifier = ">=3.8.0" }, { name = "numpy", specifier = ">=1.26.0" }, { name = "pandas", specifier = ">=2.1.0" }, @@ -1433,6 +1498,7 @@ requires-dist = [ { name = "plotly", marker = "extra == 'visualization'", specifier = ">=5.18.0" }, { name = "pydantic", specifier = ">=2.11,<3" }, { name = "pyproj", specifier = ">=3.6.0" }, + { name = "pytorch-tabular", marker = "extra == 'ml-gpu'", specifier = ">=1.2.0" }, { name = "rasterio", marker = "extra == 'visualization'", specifier = ">=1.3.9" }, { name = "scikit-learn", specifier = ">=1.3.0" }, { name = "scipy", specifier = ">=1.11.0,<1.15" }, @@ -1440,10 +1506,14 @@ requires-dist = [ { name = "seaborn", marker = "extra == 'visualization'", specifier = ">=0.13.0" }, { name = "shapely", specifier = ">=2.0.0" }, { name = "streamlit", marker = "extra == 'visualization'", specifier = ">=1.28.0" }, - { name = "xgboost", marker = "extra == 'ml'", specifier = ">=3.2.0" }, + { name = "tensorboard", marker = "extra == 'ml-gpu'", specifier = ">=2.20.0" }, + { name = "torch", marker = "sys_platform == 'darwin' and extra == 'ml-gpu'", specifier = ">=2.5.0", index = "https://pypi.org/simple", conflict = { package = "globi", extra = "ml-gpu" } }, + { name = "torch", marker = "sys_platform != 'darwin' and extra == 'ml-gpu'", specifier = ">=2.5.0", index = "https://download.pytorch.org/whl/cu128", conflict = { package = "globi", extra = "ml-gpu" } }, + { name = "wandb", marker = "extra == 'ml-gpu'", specifier = ">=0.25.0" }, + { name = "xgboost", marker = "extra == 'ml-gpu'", specifier = ">=3.2.0" }, { name = "xlsxwriter", marker = "extra == 'cli'", specifier = ">=3.2.9" }, ] -provides-extras = ["visualization", "ml", "cli"] +provides-extras = ["visualization", "ml-gpu", "cli"] [package.metadata.requires-dev] dev = [ @@ -1982,7 +2052,7 @@ dependencies = [ { name = "nbformat" }, { name = "packaging" }, { name = "prometheus-client" }, - { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "pywinpty", marker = "os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux'" }, { name = "pyzmq" }, { name = "send2trash" }, { name = "terminado" }, @@ -2000,7 +2070,7 @@ name = "jupyter-server-terminals" version = "0.5.4" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "pywinpty", marker = "os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux'" }, { name = "terminado" }, ] sdist = { url = "https://files.pythonhosted.org/packages/f4/a7/bcd0a9b0cbba88986fe944aaaf91bfda603e5a50bda8ed15123f381a3b2f/jupyter_server_terminals-0.5.4.tar.gz", hash = "sha256:bbda128ed41d0be9020349f9f1f2a4ab9952a73ed5f5ac9f1419794761fb87f5", size = 31770, upload-time = "2026-01-14T16:53:20.213Z" } @@ -2179,6 +2249,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5e/23/f8b28ca248bb629b9e08f877dd2965d1994e1674a03d67cd10c5246da248/lightgbm-4.6.0-py3-none-win_amd64.whl", hash = "sha256:37089ee95664b6550a7189d887dbf098e3eadab03537e411f52c63c121e3ba4b", size = 1451509, upload-time = "2025-02-15T04:03:01.515Z" }, ] +[[package]] +name = "lightning-utilities" +version = "0.15.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/45/7fa8f56b17dc0f0a41ec70dd307ecd6787254483549843bef4c30ab5adce/lightning_utilities-0.15.3.tar.gz", hash = "sha256:792ae0204c79f6859721ac7f386c237a33b0ed06ba775009cb894e010a842033", size = 33553, upload-time = "2026-02-22T14:48:53.348Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/f4/ead6e0e37209b07c9baa3e984ccdb0348ca370b77cea3aaea8ddbb097e00/lightning_utilities-0.15.3-py3-none-any.whl", hash = "sha256:6c55f1bee70084a1cbeaa41ada96e4b3a0fea5909e844dd335bd80f5a73c5f91", size = 31906, upload-time = "2026-02-22T14:48:52.488Z" }, +] + [[package]] name = "littleutils" version = "0.2.4" @@ -2259,6 +2342,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/59/1b/6ef961f543593969d25b2afe57a3564200280528caa9bd1082eecdd7b3bc/markdown-3.10.1-py3-none-any.whl", hash = "sha256:867d788939fe33e4b736426f5b9f651ad0c0ae0ecf89df0ca5d1176c70812fe3", size = 107684, upload-time = "2026-01-21T18:09:27.203Z" }, ] +[[package]] +name = "markdown-it-py" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, +] + [[package]] name = "markupsafe" version = "3.0.3" @@ -2388,6 +2483,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/af/33/ee4519fa02ed11a94aef9559552f3b17bb863f2ecfe1a35dc7f548cde231/matplotlib_inline-0.2.1-py3-none-any.whl", hash = "sha256:d56ce5156ba6085e00a9d54fead6ed29a9c47e215cd1bba2e976ef39f5710a76", size = 9516, upload-time = "2025-10-23T09:00:20.675Z" }, ] +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, +] + [[package]] name = "mergedeep" version = "1.3.4" @@ -2538,6 +2642,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/98/5c/2597cef67b6947b15c47f8dba967a0baf19fbdfdc86f6e4a8ba7af8b581a/mkdocstrings_python-1.19.0-py3-none-any.whl", hash = "sha256:395c1032af8f005234170575cc0c5d4d20980846623b623b35594281be4a3059", size = 143417, upload-time = "2025-11-10T13:30:54.164Z" }, ] +[[package]] +name = "mpmath" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" }, +] + [[package]] name = "msgpack" version = "1.1.2" @@ -3015,13 +3128,166 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/67/0e/35082d13c09c02c011cf21570543d202ad929d961c02a147493cb0c2bdf5/numpy-2.2.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06", size = 12771374, upload-time = "2025-05-17T21:43:35.479Z" }, ] +[[package]] +name = "nvidia-cublas-cu12" +version = "12.8.4.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/29/99/db44d685f0e257ff0e213ade1964fc459b4a690a73293220e98feb3307cf/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:b86f6dd8935884615a0683b663891d43781b819ac4f2ba2b0c9604676af346d0", size = 590537124, upload-time = "2025-03-07T01:43:53.556Z" }, + { url = "https://files.pythonhosted.org/packages/dc/61/e24b560ab2e2eaeb3c839129175fb330dfcfc29e5203196e5541a4c44682/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:8ac4e771d5a348c551b2a426eda6193c19aa630236b418086020df5ba9667142", size = 594346921, upload-time = "2025-03-07T01:44:31.254Z" }, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.8.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/1f/b3bd73445e5cb342727fd24fe1f7b748f690b460acadc27ea22f904502c8/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4412396548808ddfed3f17a467b104ba7751e6b58678a4b840675c56d21cf7ed", size = 9533318, upload-time = "2025-03-07T01:40:10.421Z" }, + { url = "https://files.pythonhosted.org/packages/f8/02/2adcaa145158bf1a8295d83591d22e4103dbfd821bcaf6f3f53151ca4ffa/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea0cb07ebda26bb9b29ba82cda34849e73c166c18162d3913575b0c9db9a6182", size = 10248621, upload-time = "2025-03-07T01:40:21.213Z" }, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.8.93" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/6b/32f747947df2da6994e999492ab306a903659555dddc0fbdeb9d71f75e52/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:a7756528852ef889772a84c6cd89d41dfa74667e24cca16bb31f8f061e3e9994", size = 88040029, upload-time = "2025-03-07T01:42:13.562Z" }, + { url = "https://files.pythonhosted.org/packages/eb/d1/e50d0acaab360482034b84b6e27ee83c6738f7d32182b987f9c7a4e32962/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fc1fec1e1637854b4c0a65fb9a8346b51dd9ee69e61ebaccc82058441f15bce8", size = 43106076, upload-time = "2025-03-07T01:41:59.817Z" }, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.8.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/75/f865a3b236e4647605ea34cc450900854ba123834a5f1598e160b9530c3a/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:52bf7bbee900262ffefe5e9d5a2a69a30d97e2bc5bb6cc866688caa976966e3d", size = 965265, upload-time = "2025-03-07T01:39:43.533Z" }, + { url = "https://files.pythonhosted.org/packages/0d/9b/a997b638fcd068ad6e4d53b8551a7d30fe8b404d6f1804abf1df69838932/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adade8dcbd0edf427b7204d480d6066d33902cab2a4707dcfc48a2d0fd44ab90", size = 954765, upload-time = "2025-03-07T01:40:01.615Z" }, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "9.10.2.21" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-cublas-cu12", marker = "sys_platform != 'darwin'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/41/e79269ce215c857c935fd86bcfe91a451a584dfc27f1e068f568b9ad1ab7/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:c9132cc3f8958447b4910a1720036d9eff5928cc3179b0a51fb6d167c6cc87d8", size = 705026878, upload-time = "2025-06-06T21:52:51.348Z" }, + { url = "https://files.pythonhosted.org/packages/ba/51/e123d997aa098c61d029f76663dedbfb9bc8dcf8c60cbd6adbe42f76d049/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8", size = 706758467, upload-time = "2025-06-06T21:54:08.597Z" }, +] + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.3.3.83" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-nvjitlink-cu12", marker = "sys_platform != 'darwin'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/bc/7771846d3a0272026c416fbb7e5f4c1f146d6d80704534d0b187dd6f4800/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:848ef7224d6305cdb2a4df928759dca7b1201874787083b6e7550dd6765ce69a", size = 193109211, upload-time = "2025-03-07T01:44:56.873Z" }, + { url = "https://files.pythonhosted.org/packages/1f/13/ee4e00f30e676b66ae65b4f08cb5bcbb8392c03f54f2d5413ea99a5d1c80/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d2dd21ec0b88cf61b62e6b43564355e5222e4a3fb394cac0db101f2dd0d4f74", size = 193118695, upload-time = "2025-03-07T01:45:27.821Z" }, +] + +[[package]] +name = "nvidia-cufile-cu12" +version = "1.13.1.3" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bb/fe/1bcba1dfbfb8d01be8d93f07bfc502c93fa23afa6fd5ab3fc7c1df71038a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1d069003be650e131b21c932ec3d8969c1715379251f8d23a1860554b1cb24fc", size = 1197834, upload-time = "2025-03-07T01:45:50.723Z" }, + { url = "https://files.pythonhosted.org/packages/1e/f5/5607710447a6fe9fd9b3283956fceeee8a06cda1d2f56ce31371f595db2a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:4beb6d4cce47c1a0f1013d72e02b0994730359e17801d395bdcbf20cfb3bb00a", size = 1120705, upload-time = "2025-03-07T01:45:41.434Z" }, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.9.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/5e/92aa15eca622a388b80fbf8375d4760738df6285b1e92c43d37390a33a9a/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:dfab99248034673b779bc6decafdc3404a8a6f502462201f2f31f11354204acd", size = 63625754, upload-time = "2025-03-07T01:46:10.735Z" }, + { url = "https://files.pythonhosted.org/packages/fb/aa/6584b56dc84ebe9cf93226a5cde4d99080c8e90ab40f0c27bda7a0f29aa1/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:b32331d4f4df5d6eefa0554c565b626c7216f87a06a4f56fab27c3b68a830ec9", size = 63619976, upload-time = "2025-03-07T01:46:23.323Z" }, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.7.3.90" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-cublas-cu12", marker = "sys_platform != 'darwin'" }, + { name = "nvidia-cusparse-cu12", marker = "sys_platform != 'darwin'" }, + { name = "nvidia-nvjitlink-cu12", marker = "sys_platform != 'darwin'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/32/f7cd6ce8a7690544d084ea21c26e910a97e077c9b7f07bf5de623ee19981/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:db9ed69dbef9715071232caa9b69c52ac7de3a95773c2db65bdba85916e4e5c0", size = 267229841, upload-time = "2025-03-07T01:46:54.356Z" }, + { url = "https://files.pythonhosted.org/packages/85/48/9a13d2975803e8cf2777d5ed57b87a0b6ca2cc795f9a4f59796a910bfb80/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:4376c11ad263152bd50ea295c05370360776f8c3427b30991df774f9fb26c450", size = 267506905, upload-time = "2025-03-07T01:47:16.273Z" }, +] + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.5.8.93" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-nvjitlink-cu12", marker = "sys_platform != 'darwin'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/f7/cd777c4109681367721b00a106f491e0d0d15cfa1fd59672ce580ce42a97/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b6c161cb130be1a07a27ea6923df8141f3c295852f4b260c65f18f3e0a091dc", size = 288117129, upload-time = "2025-03-07T01:47:40.407Z" }, + { url = "https://files.pythonhosted.org/packages/c2/f5/e1854cb2f2bcd4280c44736c93550cc300ff4b8c95ebe370d0aa7d2b473d/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ec05d76bbbd8b61b06a80e1eaf8cf4959c3d4ce8e711b65ebd0443bb0ebb13b", size = 288216466, upload-time = "2025-03-07T01:48:13.779Z" }, +] + +[[package]] +name = "nvidia-cusparselt-cu12" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/b9/598f6ff36faaece4b3c50d26f50e38661499ff34346f00e057760b35cc9d/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8878dce784d0fac90131b6817b607e803c36e629ba34dc5b433471382196b6a5", size = 283835557, upload-time = "2025-02-26T00:16:54.265Z" }, + { url = "https://files.pythonhosted.org/packages/56/79/12978b96bd44274fe38b5dde5cfb660b1d114f70a65ef962bcbbed99b549/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f1bb701d6b930d5a7cea44c19ceb973311500847f81b634d802b7b539dc55623", size = 287193691, upload-time = "2025-02-26T00:15:44.104Z" }, +] + [[package]] name = "nvidia-nccl-cu12" -version = "2.29.7" +version = "2.27.5" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/20/cc/f48875411d1f176bce58e6343fd5d4131fc1db5420719ff25944bdc006c6/nvidia_nccl_cu12-2.29.7-py3-none-manylinux_2_18_aarch64.whl", hash = "sha256:0cf032ee22b560447daf0456108a75e32bd74a4de6c6b64725637a359fa48cd8", size = 293563644, upload-time = "2026-03-03T05:34:46.166Z" }, - { url = "https://files.pythonhosted.org/packages/31/1e/9e366f36efc550f07d6737f199e3f6bffafdf28795d007f10a77dd274f5c/nvidia_nccl_cu12-2.29.7-py3-none-manylinux_2_18_x86_64.whl", hash = "sha256:ecd0a012051abc20c1aa87328841efa8cade3ced65803046e38c2f03c0891fea", size = 293633942, upload-time = "2026-03-03T05:37:05.625Z" }, + { url = "https://files.pythonhosted.org/packages/bb/1c/857979db0ef194ca5e21478a0612bcdbbe59458d7694361882279947b349/nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:31432ad4d1fb1004eb0c56203dc9bc2178a1ba69d1d9e02d64a6938ab5e40e7a", size = 322400625, upload-time = "2025-06-26T04:11:04.496Z" }, + { url = "https://files.pythonhosted.org/packages/6e/89/f7a07dc961b60645dbbf42e80f2bc85ade7feb9a491b11a1e973aa00071f/nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ad730cf15cb5d25fe849c6e6ca9eb5b76db16a80f13f425ac68d8e2e55624457", size = 322348229, upload-time = "2025-06-26T04:11:28.385Z" }, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.8.93" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/74/86a07f1d0f42998ca31312f998bd3b9a7eff7f52378f4f270c8679c77fb9/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:81ff63371a7ebd6e6451970684f916be2eab07321b73c9d244dc2b4da7f73b88", size = 39254836, upload-time = "2025-03-07T01:49:55.661Z" }, + { url = "https://files.pythonhosted.org/packages/2a/a2/8cee5da30d13430e87bf99bb33455d2724d0a4a9cb5d7926d80ccb96d008/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:adccd7161ace7261e01bb91e44e88da350895c270d23f744f0820c818b7229e7", size = 38386204, upload-time = "2025-03-07T01:49:43.612Z" }, +] + +[[package]] +name = "nvidia-nvshmem-cu12" +version = "3.4.5" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/6a/03aa43cc9bd3ad91553a88b5f6fb25ed6a3752ae86ce2180221962bc2aa5/nvidia_nvshmem_cu12-3.4.5-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0b48363fc6964dede448029434c6abed6c5e37f823cb43c3bcde7ecfc0457e15", size = 138936938, upload-time = "2025-09-06T00:32:05.589Z" }, + { url = "https://files.pythonhosted.org/packages/b5/09/6ea3ea725f82e1e76684f0708bbedd871fc96da89945adeba65c3835a64c/nvidia_nvshmem_cu12-3.4.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:042f2500f24c021db8a06c5eec2539027d57460e1c1a762055a6554f72c369bd", size = 139103095, upload-time = "2025-09-06T00:32:31.266Z" }, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.8.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/10/c0/1b303feea90d296f6176f32a2a70b5ef230f9bdeb3a72bddb0dc922dc137/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d7ad891da111ebafbf7e015d34879f7112832fc239ff0d7d776b6cb685274615", size = 91161, upload-time = "2025-03-07T01:42:23.922Z" }, + { url = "https://files.pythonhosted.org/packages/a2/eb/86626c1bbc2edb86323022371c39aa48df6fd8b0a1647bc274577f72e90b/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b17e2001cc0d751a5bc2c6ec6d26ad95913324a4adb86788c944f8ce9ba441f", size = 89954, upload-time = "2025-03-07T01:42:44.131Z" }, +] + +[[package]] +name = "omegaconf" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "antlr4-python3-runtime" }, + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/09/48/6388f1bb9da707110532cb70ec4d2822858ddfb44f1cdf1233c20a80ea4b/omegaconf-2.3.0.tar.gz", hash = "sha256:d5d4b6d29955cc50ad50c46dc269bcd92c6e00f5f90d23ab5fee7bfca4ba4cc7", size = 3298120, upload-time = "2022-12-08T20:59:22.753Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/94/1843518e420fa3ed6919835845df698c7e27e183cb997394e4a670973a65/omegaconf-2.3.0-py3-none-any.whl", hash = "sha256:7b4df175cdb08ba400f45cae3bdcae7ba8365db4d165fc65fd04b050ab63b46b", size = 79500, upload-time = "2022-12-08T20:59:19.686Z" }, ] [[package]] @@ -3899,6 +4165,49 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/51/e5/fecf13f06e5e5f67e8837d777d1bc43fac0ed2b77a676804df5c34744727/python_json_logger-4.0.0-py3-none-any.whl", hash = "sha256:af09c9daf6a813aa4cc7180395f50f2a9e5fa056034c9953aec92e381c5ba1e2", size = 15548, upload-time = "2025-10-06T04:15:17.553Z" }, ] +[[package]] +name = "pytorch-lightning" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "fsspec", extra = ["http"] }, + { name = "lightning-utilities" }, + { name = "packaging" }, + { name = "pyyaml" }, + { name = "torch", version = "2.10.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform == 'darwin'" }, + { name = "torch", version = "2.10.0+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform != 'darwin'" }, + { name = "torchmetrics" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8b/ac/ebd5f6f58691cbd4f73836e43e1727f3814311b960c41f88e259606ca2b2/pytorch_lightning-2.6.1.tar.gz", hash = "sha256:ba08f8901cf226fcca473046ad9346f414e99117762dc869c76e650d5b3d7bdc", size = 665563, upload-time = "2026-01-30T14:59:11.636Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/93/c8c361bf0a2fe50f828f32def460e8b8a14b93955d3fd302b1a9b63b19e4/pytorch_lightning-2.6.1-py3-none-any.whl", hash = "sha256:1f8118567ec829e3055f16cf1aa320883a86a47c836951bfd9dcfa34ec7ffd59", size = 857273, upload-time = "2026-01-30T14:59:10.141Z" }, +] + +[[package]] +name = "pytorch-tabular" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "einops" }, + { name = "numpy" }, + { name = "omegaconf" }, + { name = "pandas" }, + { name = "pytorch-lightning" }, + { name = "rich" }, + { name = "scikit-base" }, + { name = "scikit-learn" }, + { name = "scipy" }, + { name = "torch", version = "2.10.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform == 'darwin'" }, + { name = "torch", version = "2.10.0+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform != 'darwin'" }, + { name = "torchmetrics" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0b/f2/823de16d6a461504f4ed8e4a555d6ce356e5f81e6525d95e2b64895ec94f/pytorch_tabular-1.2.0.tar.gz", hash = "sha256:1b96b576eb3de443840b313d0b298293eaf83dcfdbba53ed8974b76d1351b821", size = 2312825, upload-time = "2026-01-26T21:48:22.577Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6f/c9/1e01c682e2ad7132bc1943d8d367c96f241bf85679e76d66eb0c4e4cbde9/pytorch_tabular-1.2.0-py3-none-any.whl", hash = "sha256:0a59f8a2304856b3d1e905f7b66153ebc65df1a6a017f2c8a13a29f62dc95b26", size = 165800, upload-time = "2026-01-26T21:48:21.195Z" }, +] + [[package]] name = "pytz" version = "2025.2" @@ -4077,6 +4386,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, ] +[[package]] +name = "rich" +version = "14.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b3/c6/f3b320c27991c46f43ee9d856302c70dc2d0fb2dba4842ff739d5f46b393/rich-14.3.3.tar.gz", hash = "sha256:b8daa0b9e4eef54dd8cf7c86c03713f53241884e814f4e2f5fb342fe520f639b", size = 230582, upload-time = "2026-02-19T17:23:12.474Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/25/b208c5683343959b670dc001595f2f3737e051da617f66c31f7c4fa93abc/rich-14.3.3-py3-none-any.whl", hash = "sha256:793431c1f8619afa7d3b52b2cdec859562b950ea0d4b6b505397612db8d5362d", size = 310458, upload-time = "2026-02-19T17:23:13.732Z" }, +] + [[package]] name = "rpds-py" version = "0.28.0" @@ -4196,6 +4518,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/48/f0/ae7ca09223a81a1d890b2557186ea015f6e0502e9b8cb8e1813f1d8cfa4e/s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456", size = 85712, upload-time = "2025-09-09T19:23:30.041Z" }, ] +[[package]] +name = "scikit-base" +version = "0.13.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/56/a8/610f99f01f326178b8a7347db2ede654b42548e9697b516480cc081e344d/scikit_base-0.13.1.tar.gz", hash = "sha256:169e5427233f7237b38c7d858bf07b8a86bbf59feccf0708e26dad4ac312c593", size = 134482, upload-time = "2026-01-25T11:31:38.814Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/55/c20d8319aab037e11f1d6403b6102d1041694abe24a3aa4a1e27f2cdb9f2/scikit_base-0.13.1-py3-none-any.whl", hash = "sha256:1aca86759435fd2d32d83a526ce11095119c0745e4e5dd91f2e5820023ca8e39", size = 159779, upload-time = "2026-01-25T11:31:36.759Z" }, +] + [[package]] name = "scikit-learn" version = "1.7.2" @@ -4297,6 +4628,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1c/78/504fdd027da3b84ff1aecd9f6957e65f35134534ccc6da8628eb71e76d3f/send2trash-2.1.0-py3-none-any.whl", hash = "sha256:0da2f112e6d6bb22de6aa6daa7e144831a4febf2a87261451c4ad849fe9a873c", size = 17610, upload-time = "2026-01-14T06:27:35.218Z" }, ] +[[package]] +name = "sentry-sdk" +version = "2.54.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c8/e9/2e3a46c304e7fa21eaa70612f60354e32699c7102eb961f67448e222ad7c/sentry_sdk-2.54.0.tar.gz", hash = "sha256:2620c2575128d009b11b20f7feb81e4e4e8ae08ec1d36cbc845705060b45cc1b", size = 413813, upload-time = "2026-03-02T15:12:41.355Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/53/39/be412cc86bc6247b8f69e9383d7950711bd86f8d0a4a4b0fe8fad685bc21/sentry_sdk-2.54.0-py2.py3-none-any.whl", hash = "sha256:fd74e0e281dcda63afff095d23ebcd6e97006102cdc8e78a29f19ecdf796a0de", size = 439198, upload-time = "2026-03-02T15:12:39.546Z" }, +] + [[package]] name = "setuptools" version = "80.9.0" @@ -4457,6 +4801,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/39/60/868371b6482ccd9ef423c6f62650066cf8271fdb2ee84f192695ad6b7a96/streamlit-1.51.0-py3-none-any.whl", hash = "sha256:4008b029f71401ce54946bb09a6a3e36f4f7652cbb48db701224557738cfda38", size = 10171702, upload-time = "2025-10-29T17:07:35.97Z" }, ] +[[package]] +name = "sympy" +version = "1.14.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mpmath" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921, upload-time = "2025-04-27T18:05:01.611Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" }, +] + [[package]] name = "tables" version = "3.10.2" @@ -4501,13 +4857,43 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" }, ] +[[package]] +name = "tensorboard" +version = "2.20.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "grpcio" }, + { name = "markdown" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "protobuf" }, + { name = "setuptools" }, + { name = "tensorboard-data-server" }, + { name = "werkzeug" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/9c/d9/a5db55f88f258ac669a92858b70a714bbbd5acd993820b41ec4a96a4d77f/tensorboard-2.20.0-py3-none-any.whl", hash = "sha256:9dc9f978cb84c0723acf9a345d96c184f0293d18f166bb8d59ee098e6cfaaba6", size = 5525680, upload-time = "2025-07-17T19:20:49.638Z" }, +] + +[[package]] +name = "tensorboard-data-server" +version = "0.7.2" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/13/e503968fefabd4c6b2650af21e110aa8466fe21432cd7c43a84577a89438/tensorboard_data_server-0.7.2-py3-none-any.whl", hash = "sha256:7e0610d205889588983836ec05dc098e80f97b7e7bbff7e994ebb78f578d0ddb", size = 2356, upload-time = "2023-10-23T21:23:32.16Z" }, + { url = "https://files.pythonhosted.org/packages/b7/85/dabeaf902892922777492e1d253bb7e1264cadce3cea932f7ff599e53fea/tensorboard_data_server-0.7.2-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:9fe5d24221b29625dbc7328b0436ca7fc1c23de4acf4d272f1180856e32f9f60", size = 4823598, upload-time = "2023-10-23T21:23:33.714Z" }, + { url = "https://files.pythonhosted.org/packages/73/c6/825dab04195756cf8ff2e12698f22513b3db2f64925bdd41671bfb33aaa5/tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl", hash = "sha256:ef687163c24185ae9754ed5650eb5bc4d84ff257aabdc33f0cc6f74d8ba54530", size = 6590363, upload-time = "2023-10-23T21:23:35.583Z" }, +] + [[package]] name = "terminado" version = "0.18.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "ptyprocess", marker = "os_name != 'nt'" }, - { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "pywinpty", marker = "os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux'" }, { name = "tornado" }, ] sdist = { url = "https://files.pythonhosted.org/packages/8a/11/965c6fd8e5cc254f1fe142d547387da17a8ebfd75a3455f637c663fb38a0/terminado-0.18.1.tar.gz", hash = "sha256:de09f2c4b85de4765f7714688fff57d3e75bad1f909b589fde880460c753fd2e", size = 32701, upload-time = "2024-03-12T14:34:39.026Z" } @@ -4601,6 +4987,103 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" }, ] +[[package]] +name = "torch" +version = "2.10.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.14' and sys_platform == 'darwin'", + "python_full_version < '3.14' and sys_platform == 'darwin'", +] +dependencies = [ + { name = "filelock", marker = "sys_platform == 'darwin'" }, + { name = "fsspec", marker = "sys_platform == 'darwin'" }, + { name = "jinja2", marker = "sys_platform == 'darwin'" }, + { name = "networkx", marker = "sys_platform == 'darwin'" }, + { name = "setuptools", marker = "sys_platform == 'darwin'" }, + { name = "sympy", marker = "sys_platform == 'darwin'" }, + { name = "typing-extensions", marker = "sys_platform == 'darwin'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/d3/54/a2ba279afcca44bbd320d4e73675b282fcee3d81400ea1b53934efca6462/torch-2.10.0-2-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:13ec4add8c3faaed8d13e0574f5cd4a323c11655546f91fbe6afa77b57423574", size = 79498202, upload-time = "2026-02-10T21:44:52.603Z" }, + { url = "https://files.pythonhosted.org/packages/ec/23/2c9fe0c9c27f7f6cb865abcea8a4568f29f00acaeadfc6a37f6801f84cb4/torch-2.10.0-2-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:e521c9f030a3774ed770a9c011751fb47c4d12029a3d6522116e48431f2ff89e", size = 79498254, upload-time = "2026-02-10T21:44:44.095Z" }, + { url = "https://files.pythonhosted.org/packages/c9/5c/dee910b87c4d5c0fcb41b50839ae04df87c1cfc663cf1b5fca7ea565eeaa/torch-2.10.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:6d3707a61863d1c4d6ebba7be4ca320f42b869ee657e9b2c21c736bf17000294", size = 79498198, upload-time = "2026-01-21T16:24:34.704Z" }, + { url = "https://files.pythonhosted.org/packages/1a/0b/39929b148f4824bc3ad6f9f72a29d4ad865bcf7ebfc2fa67584773e083d2/torch-2.10.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:3202429f58309b9fa96a614885eace4b7995729f44beb54d3e4a47773649d382", size = 79851305, upload-time = "2026-01-21T16:24:09.209Z" }, + { url = "https://files.pythonhosted.org/packages/0e/13/e76b4d9c160e89fff48bf16b449ea324bda84745d2ab30294c37c2434c0d/torch-2.10.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:cdf2a523d699b70d613243211ecaac14fe9c5df8a0b0a9c02add60fb2a413e0f", size = 79498248, upload-time = "2026-01-21T16:23:09.315Z" }, + { url = "https://files.pythonhosted.org/packages/4f/93/716b5ac0155f1be70ed81bacc21269c3ece8dba0c249b9994094110bfc51/torch-2.10.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:bf0d9ff448b0218e0433aeb198805192346c4fd659c852370d5cc245f602a06a", size = 79464992, upload-time = "2026-01-21T16:23:05.162Z" }, + { url = "https://files.pythonhosted.org/packages/d8/94/71994e7d0d5238393df9732fdab607e37e2b56d26a746cb59fdb415f8966/torch-2.10.0-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:f5ab4ba32383061be0fb74bda772d470140a12c1c3b58a0cfbf3dae94d164c28", size = 79850324, upload-time = "2026-01-21T16:22:09.494Z" }, +] + +[[package]] +name = "torch" +version = "2.10.0+cu128" +source = { registry = "https://download.pytorch.org/whl/cu128" } +resolution-markers = [ + "python_full_version >= '3.14' and sys_platform == 'linux'", + "python_full_version < '3.14' and sys_platform == 'linux'", + "python_full_version >= '3.14' and sys_platform != 'darwin' and sys_platform != 'linux'", + "python_full_version < '3.14' and sys_platform != 'darwin' and sys_platform != 'linux'", +] +dependencies = [ + { name = "cuda-bindings", marker = "sys_platform == 'linux'" }, + { name = "filelock", marker = "sys_platform != 'darwin'" }, + { name = "fsspec", marker = "sys_platform != 'darwin'" }, + { name = "jinja2", marker = "sys_platform != 'darwin'" }, + { name = "networkx", marker = "sys_platform != 'darwin'" }, + { name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cuda-cupti-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cuda-nvrtc-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cuda-runtime-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cudnn-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cufft-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cufile-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-curand-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cusolver-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cusparse-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cusparselt-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-nccl-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-nvshmem-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-nvtx-cu12", marker = "sys_platform == 'linux'" }, + { name = "setuptools", marker = "sys_platform != 'darwin'" }, + { name = "sympy", marker = "sys_platform != 'darwin'" }, + { name = "triton", marker = "sys_platform == 'linux'" }, + { name = "typing-extensions", marker = "sys_platform != 'darwin'" }, +] +wheels = [ + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:6f09cdf2415516be028ae82e6b985bcfc3eac37bc52ab401142689f6224516ca" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:628e89bd5110ced7debee2a57c69959725b7fbc64eab81a39dd70e46c7e28ba5" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp312-cp312-win_amd64.whl", hash = "sha256:fbde8f6a9ec8c76979a0d14df21c10b9e5cab6f0d106a73ca73e2179bc597cae" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:bdbcc703382f948e951c063448c9406bf38ce66c41dd698d9e2733fcf96c037a" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:7b4bd23ed63de97456fcc81c26fea9f02ee02ce1112111c4dac0d8cfe574b23e" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313-win_amd64.whl", hash = "sha256:4d1b0b49c54223c7c04050b49eac141d77b6edbc34aea1dfc74a6fdb661baa8c" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:f1f8b840c64b645a4bc61a393db48effb9c92b2dc26c8373873911f0750d1ea7" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:23f58258012bcf1c349cb22af387e33aadca7f83ea617b080e774eb41e4fe8ff" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313t-win_amd64.whl", hash = "sha256:01b216e097b17a5277cfb47c383cdcacf06abeadcb0daca0c76b59e72854c3b6" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:c42377bc2607e3e1c60da71b792fb507c3938c87fd6edab8b21c59c91473c36d" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:37d71feea068776855686a1512058df3f19f6f040a151f055aa746601678744f" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314-win_amd64.whl", hash = "sha256:c57017ca29e62271e362fdeee7d20070e254755a5148b30b553d8a10fc83c7ef" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:777461f50b2daf77e4bdd8e2ad34bdfc5a993bf1bdf2ab9ef39f5edfe4e9c12b" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:7bcba6a7c5f0987a13298b1ca843155dcceceac758fa3c7ccd5c7af4059a1080" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314t-win_amd64.whl", hash = "sha256:70d89143c956389d4806cb4e5fe0b1129fe0db280e1073288d17fa76c101cba4" }, +] + +[[package]] +name = "torchmetrics" +version = "1.8.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "lightning-utilities" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "torch", version = "2.10.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform == 'darwin'" }, + { name = "torch", version = "2.10.0+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform != 'darwin'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/2e/48a887a59ecc4a10ce9e8b35b3e3c5cef29d902c4eac143378526e7485cb/torchmetrics-1.8.2.tar.gz", hash = "sha256:cf64a901036bf107f17a524009eea7781c9c5315d130713aeca5747a686fe7a5", size = 580679, upload-time = "2025-09-03T14:00:54.077Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/21/aa0f434434c48490f91b65962b1ce863fdcce63febc166ca9fe9d706c2b6/torchmetrics-1.8.2-py3-none-any.whl", hash = "sha256:08382fd96b923e39e904c4d570f3d49e2cc71ccabd2a94e0f895d1f0dac86242", size = 983161, upload-time = "2025-09-03T14:00:51.921Z" }, +] + [[package]] name = "tornado" version = "6.5.2" @@ -4653,6 +5136,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/61/7a/f38385f1b2d5f54221baf1db3d6371dc6eef8041d95abff39576c694e9d9/transforms3d-0.4.2-py3-none-any.whl", hash = "sha256:1c70399d9e9473ecc23311fd947f727f7c69ed0b063244828c383aa1aefa5941", size = 1376759, upload-time = "2024-06-20T11:09:19.43Z" }, ] +[[package]] +name = "triton" +version = "3.6.0" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/5d/08201db32823bdf77a0e2b9039540080b2e5c23a20706ddba942924ebcd6/triton-3.6.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:374f52c11a711fd062b4bfbb201fd9ac0a5febd28a96fb41b4a0f51dde3157f4", size = 176128243, upload-time = "2026-01-20T16:16:07.857Z" }, + { url = "https://files.pythonhosted.org/packages/ab/a8/cdf8b3e4c98132f965f88c2313a4b493266832ad47fb52f23d14d4f86bb5/triton-3.6.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:74caf5e34b66d9f3a429af689c1c7128daba1d8208df60e81106b115c00d6fca", size = 188266850, upload-time = "2026-01-20T16:00:43.041Z" }, + { url = "https://files.pythonhosted.org/packages/3c/12/34d71b350e89a204c2c7777a9bba0dcf2f19a5bfdd70b57c4dbc5ffd7154/triton-3.6.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:448e02fe6dc898e9e5aa89cf0ee5c371e99df5aa5e8ad976a80b93334f3494fd", size = 176133521, upload-time = "2026-01-20T16:16:13.321Z" }, + { url = "https://files.pythonhosted.org/packages/f9/0b/37d991d8c130ce81a8728ae3c25b6e60935838e9be1b58791f5997b24a54/triton-3.6.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10c7f76c6e72d2ef08df639e3d0d30729112f47a56b0c81672edc05ee5116ac9", size = 188289450, upload-time = "2026-01-20T16:00:49.136Z" }, + { url = "https://files.pythonhosted.org/packages/ce/4e/41b0c8033b503fd3cfcd12392cdd256945026a91ff02452bef40ec34bee7/triton-3.6.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1722e172d34e32abc3eb7711d0025bb69d7959ebea84e3b7f7a341cd7ed694d6", size = 176276087, upload-time = "2026-01-20T16:16:18.989Z" }, + { url = "https://files.pythonhosted.org/packages/35/f8/9c66bfc55361ec6d0e4040a0337fb5924ceb23de4648b8a81ae9d33b2b38/triton-3.6.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d002e07d7180fd65e622134fbd980c9a3d4211fb85224b56a0a0efbd422ab72f", size = 188400296, upload-time = "2026-01-20T16:00:56.042Z" }, + { url = "https://files.pythonhosted.org/packages/49/55/5ecf0dcaa0f2fbbd4420f7ef227ee3cb172e91e5fede9d0ecaddc43363b4/triton-3.6.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef5523241e7d1abca00f1d240949eebdd7c673b005edbbce0aca95b8191f1d43", size = 176138577, upload-time = "2026-01-20T16:16:25.426Z" }, + { url = "https://files.pythonhosted.org/packages/df/3d/9e7eee57b37c80cec63322c0231bb6da3cfe535a91d7a4d64896fcb89357/triton-3.6.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a17a5d5985f0ac494ed8a8e54568f092f7057ef60e1b0fa09d3fd1512064e803", size = 188273063, upload-time = "2026-01-20T16:01:07.278Z" }, + { url = "https://files.pythonhosted.org/packages/48/db/56ee649cab5eaff4757541325aca81f52d02d4a7cd3506776cad2451e060/triton-3.6.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b3a97e8ed304dfa9bd23bb41ca04cdf6b2e617d5e782a8653d616037a5d537d", size = 176274804, upload-time = "2026-01-20T16:16:31.528Z" }, + { url = "https://files.pythonhosted.org/packages/f6/56/6113c23ff46c00aae423333eb58b3e60bdfe9179d542781955a5e1514cb3/triton-3.6.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:46bd1c1af4b6704e554cad2eeb3b0a6513a980d470ccfa63189737340c7746a7", size = 188397994, upload-time = "2026-01-20T16:01:14.236Z" }, +] + [[package]] name = "tsam" version = "2.3.9" @@ -4754,6 +5254,35 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6a/2a/dc2228b2888f51192c7dc766106cd475f1b768c10caaf9727659726f7391/virtualenv-20.36.1-py3-none-any.whl", hash = "sha256:575a8d6b124ef88f6f51d56d656132389f961062a9177016a50e4f507bbcc19f", size = 6008258, upload-time = "2026-01-09T18:20:59.425Z" }, ] +[[package]] +name = "wandb" +version = "0.25.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "gitpython" }, + { name = "packaging" }, + { name = "platformdirs" }, + { name = "protobuf" }, + { name = "pydantic" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sentry-sdk" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fd/60/d94952549920469524b689479c864c692ca47eca4b8c2fe3389b64a58778/wandb-0.25.0.tar.gz", hash = "sha256:45840495a288e34245d69d07b5a0b449220fbc5b032e6b51c4f92ec9026d2ad1", size = 43951335, upload-time = "2026-02-13T00:17:45.515Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/7d/0c131db3ec9deaabbd32263d90863cbfbe07659527e11c35a5c738cecdc5/wandb-0.25.0-py3-none-macosx_12_0_arm64.whl", hash = "sha256:5eecb3c7b5e60d1acfa4b056bfbaa0b79a482566a9db58c9f99724b3862bc8e5", size = 23287536, upload-time = "2026-02-13T00:17:20.265Z" }, + { url = "https://files.pythonhosted.org/packages/c3/95/31bb7f76a966ec87495e5a72ac7570685be162494c41757ac871768dbc4f/wandb-0.25.0-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:daeedaadb183dc466e634fba90ab2bab1d4e93000912be0dee95065a0624a3fd", size = 25196062, upload-time = "2026-02-13T00:17:23.356Z" }, + { url = "https://files.pythonhosted.org/packages/d9/a1/258cdedbf30cebc692198a774cf0ef945b7ed98ee64bdaf62621281c95d8/wandb-0.25.0-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:5e0127dbcef13eea48f4b84268da7004d34d3120ebc7b2fa9cefb72b49dbb825", size = 22799744, upload-time = "2026-02-13T00:17:26.437Z" }, + { url = "https://files.pythonhosted.org/packages/de/91/ec9465d014cfd199c5b2083d271d31b3c2aedeae66f3d8a0712f7f54bdf3/wandb-0.25.0-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:6c4c38077836f9b7569a35b0e1dcf1f0c43616fcd936d182f475edbfea063665", size = 25262839, upload-time = "2026-02-13T00:17:28.8Z" }, + { url = "https://files.pythonhosted.org/packages/c7/95/cb2d1c7143f534544147fb53fe87944508b8cb9a058bc5b6f8a94adbee15/wandb-0.25.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6edd8948d305cb73745bf564b807bd73da2ccbd47c548196b8a362f7df40aed8", size = 22853714, upload-time = "2026-02-13T00:17:31.68Z" }, + { url = "https://files.pythonhosted.org/packages/d7/94/68163f70c1669edcf130822aaaea782d8198b5df74443eca0085ec596774/wandb-0.25.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ada6f08629bb014ad6e0a19d5dec478cdaa116431baa3f0a4bf4ab8d9893611f", size = 25358037, upload-time = "2026-02-13T00:17:34.676Z" }, + { url = "https://files.pythonhosted.org/packages/cc/fb/9578eed2c01b2fc6c8b693da110aa9c73a33d7bb556480f5cfc42e48c94e/wandb-0.25.0-py3-none-win32.whl", hash = "sha256:020b42ca4d76e347709d65f59b30d4623a115edc28f462af1c92681cb17eae7c", size = 24604118, upload-time = "2026-02-13T00:17:37.641Z" }, + { url = "https://files.pythonhosted.org/packages/25/97/460f6cb738aaa39b4eb2e6b4c630b2ae4321cdd70a79d5955ea75a878981/wandb-0.25.0-py3-none-win_amd64.whl", hash = "sha256:78307ac0b328f2dc334c8607bec772851215584b62c439eb320c4af4fb077a00", size = 24604122, upload-time = "2026-02-13T00:17:39.991Z" }, + { url = "https://files.pythonhosted.org/packages/27/6c/5847b4dda1dfd52630dac08711d4348c69ed657f0698fc2d949c7f7a6622/wandb-0.25.0-py3-none-win_arm64.whl", hash = "sha256:c6174401fd6fb726295e98d57b4231c100eca96bd17de51bfc64038a57230aaf", size = 21785298, upload-time = "2026-02-13T00:17:42.475Z" }, +] + [[package]] name = "watchdog" version = "6.0.0" @@ -4805,6 +5334,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/34/db/b10e48aa8fff7407e67470363eac595018441cf32d5e1001567a7aeba5d2/websocket_client-1.9.0-py3-none-any.whl", hash = "sha256:af248a825037ef591efbf6ed20cc5faa03d3b47b9e5a2230a529eeee1c1fc3ef", size = 82616, upload-time = "2025-10-07T21:16:34.951Z" }, ] +[[package]] +name = "werkzeug" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/61/f1/ee81806690a87dab5f5653c1f146c92bc066d7f4cebc603ef88eb9e13957/werkzeug-3.1.6.tar.gz", hash = "sha256:210c6bede5a420a913956b4791a7f4d6843a43b6fcee4dfa08a65e93007d0d25", size = 864736, upload-time = "2026-02-19T15:17:18.884Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/ec/d58832f89ede95652fd01f4f24236af7d32b70cab2196dfcc2d2fd13c5c2/werkzeug-3.1.6-py3-none-any.whl", hash = "sha256:7ddf3357bb9564e407607f988f683d72038551200c704012bb9a4c523d42f131", size = 225166, upload-time = "2026-02-19T15:17:17.475Z" }, +] + [[package]] name = "widgetsnbextension" version = "4.0.15" From 4f9223a4ad2a58e9bd7ca6651146b994a3bce87d Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Sun, 8 Mar 2026 12:54:35 -0400 Subject: [PATCH 07/31] enable xgboost training --- src/globi/models/surrogate/dummy.py | 24 +- src/globi/models/surrogate/training.py | 468 ++++++++++++++++--------- src/globi/pipelines/training.py | 26 +- src/globi/worker/main.py | 3 +- 4 files changed, 344 insertions(+), 177 deletions(-) diff --git a/src/globi/models/surrogate/dummy.py b/src/globi/models/surrogate/dummy.py index 1ebcf24..ee17a81 100644 --- a/src/globi/models/surrogate/dummy.py +++ b/src/globi/models/surrogate/dummy.py @@ -32,10 +32,26 @@ def dummy_simulation( ) -> DummySimulationOutput: """A dummy simulation.""" df = pd.DataFrame({ - "target_0": [input_spec.a + input_spec.b], - "target_1": [input_spec.a - input_spec.b], - "target_2": [input_spec.a * input_spec.b * input_spec.c], - "target_3": [input_spec.a / math.sin(input_spec.b)], + "target_0": [ + (input_spec.a + input_spec.b) + if input_spec.weather_file == "some" + else (input_spec.a - input_spec.b) + ], + "target_1": [ + (input_spec.a - input_spec.b) + if input_spec.weather_file == "some" + else (input_spec.a + input_spec.b) + ], + "target_2": [ + (input_spec.a * input_spec.b * input_spec.c) + if input_spec.weather_file == "some" + else (input_spec.a * input_spec.b / input_spec.c) + ], + "target_3": [ + (input_spec.a / math.sin(input_spec.b)) + if input_spec.weather_file == "some" + else (input_spec.a / math.cos(input_spec.b)) + ], }) df_neg = -df df = pd.concat([df, df_neg], axis=1, keys=["positive", "negative"], names=["sign"]) diff --git a/src/globi/models/surrogate/training.py b/src/globi/models/surrogate/training.py index 7d42e06..29b57cc 100644 --- a/src/globi/models/surrogate/training.py +++ b/src/globi/models/surrogate/training.py @@ -1,9 +1,10 @@ """Models used for the surrogate training pipeline.""" import warnings +from collections.abc import Callable from functools import cached_property from pathlib import Path -from typing import TYPE_CHECKING, Literal, cast +from typing import TYPE_CHECKING, Any, Literal, cast import numpy as np import pandas as pd @@ -18,6 +19,7 @@ S3ClientType = object +# TODO: allow specific configuration per column. class ConvergenceThresholds(BaseModel): """The thresholds for convergence.""" @@ -80,19 +82,70 @@ def check_convergence(self, metrics: pd.Series): ) -class XGBHyperparameters(BaseModel): - """The parameters for the xgboost model.""" +class XGBTrainerConfig(BaseModel): + """The trainer hyperparameters for the xgboost model.""" + + num_boost_round: int = Field( + default=4000, description="The number of boosting rounds." + ) + early_stopping_rounds: int = Field( + default=10, description="The number of boosting rounds to early stop." + ) + verbose_eval: bool = Field( + default=True, description="Whether to print verbose evaluation results." + ) + + +class XGBModelConfig(BaseModel): + """The model hyperparameters for the xgboost model.""" max_depth: int = Field(default=5, description="The maximum depth of the tree.") eta: float = Field(default=0.1, description="The learning rate.") - min_child_weight: int = Field(default=3, description="The minimum child weight.") - subsample: float = Field(default=0.8, description="The subsample rate.") - colsample_bytree: float = Field( - default=0.8, description="The column sample by tree rate." + min_child_weight: int | None = Field( + default=3, description="The minimum child weight." + ) + subsample: float | None = Field(default=None, description="The subsample rate.") + colsample_bytree: float | None = Field( + default=None, description="The column sample by tree rate." + ) + alpha: float | None = Field(default=None, description="The alpha parameter.") + lam: float | None = Field(default=None, description="The lambda parameter.") + gamma: float | None = Field(default=None, description="The gamma parameter.") + seed: int = Field( + default=42, description="The seed for the random number generator." + ) + + @property + def param_dict(self) -> dict[str, Any]: + """The dictionary of parameters.""" + import torch + + data = { + "objective": "reg:squarederror", + "eval_metric": "rmse", + "tree_method": "auto", + "seed": self.seed, + # hyperparameters + **self.model_dump( + exclude_none=True, + ), + } + if torch.cuda.is_available(): + data["device"] = "cuda" + return data + + +class XGBHyperparameters(BaseModel): + """The parameters for the xgboost model.""" + + hp: XGBModelConfig = Field( + default_factory=XGBModelConfig, + description="The hyperparameters for the model.", + ) + trainer: XGBTrainerConfig = Field( + default_factory=XGBTrainerConfig, + description="The trainer hyperparameters for the model.", ) - alpha: float = Field(default=0.01, description="The alpha parameter.") - lam: float = Field(default=0.01, description="The lambda parameter.") - gamma: float = Field(default=0.01, description="The gamma parameter.") class LGBHyperparameters(BaseModel): @@ -232,10 +285,10 @@ class ProgressiveTrainingSpec(ExperimentInputSpec): default_factory=RegressionIOConfigSpec, description="The regression io config spec.", ) - # model_hyperparameters: ModelHPType = Field( - # default_factory=LGBHyperparameters, - # description="The hyperparameters for the model.", - # ) + hyperparameters: ModelHPType = Field( + default_factory=XGBHyperparameters, + description="The hyperparameters for the model.", + ) stratification: StratificationSpec = Field( default_factory=StratificationSpec, description="The stratification spec.", @@ -558,7 +611,12 @@ def dparams(self) -> pd.DataFrame: return self.data.index.to_frame() @cached_property - def all_columns(self) -> frozenset[str]: + def all_feature_columns(self) -> frozenset[str]: + """The names of all columns.""" + return frozenset(self.dparams.columns) + + @cached_property + def all_target_columns(self) -> frozenset[str]: """The names of all columns.""" return frozenset(self.data.columns) @@ -566,12 +624,14 @@ def all_columns(self) -> frozenset[str]: def continuous_columns(self) -> frozenset[str]: """The continuous columns.""" feature_conf = self.parent.regression_io_config.features - candidates = self.all_columns - feature_conf.exclude_columns - EXCLUDED_COLUMNS + candidates = ( + self.all_feature_columns - feature_conf.exclude_columns - EXCLUDED_COLUMNS + ) object_dype_columns = ( - self.data[candidates].select_dtypes(include=["object"]).columns.tolist() + self.dparams[candidates].select_dtypes(include=["object"]).columns.tolist() ) candidates = candidates - frozenset(object_dype_columns) - nunique_counts = cast(pd.Series, self.data[candidates].nunique()) + nunique_counts = cast(pd.Series, self.dparams[candidates].nunique()) thresh = feature_conf.cont_cat_unicity_transition_threshold passing_candidates = cast( list[str], @@ -604,12 +664,14 @@ def continuous_columns(self) -> frozenset[str]: def categorical_columns(self) -> frozenset[str]: """The categorical columns.""" feature_conf = self.parent.regression_io_config.features - candidates = self.all_columns - feature_conf.exclude_columns - EXCLUDED_COLUMNS + candidates = ( + self.all_feature_columns - feature_conf.exclude_columns - EXCLUDED_COLUMNS + ) object_dtype_columns = ( - self.data[candidates].select_dtypes(include=["object"]).columns.tolist() + self.dparams[candidates].select_dtypes(include=["object"]).columns.tolist() ) non_obj_dtype_columns = candidates - frozenset(object_dtype_columns) - nunique_counts = cast(pd.Series, self.data[non_obj_dtype_columns].nunique()) + nunique_counts = cast(pd.Series, self.dparams[non_obj_dtype_columns].nunique()) thresh = feature_conf.cont_cat_unicity_transition_threshold passing_non_obj_dtype_candidates = cast( list[str], @@ -723,9 +785,8 @@ def test_segment(self) -> tuple[pd.DataFrame, pd.DataFrame]: @cached_property def targets(self) -> list[str]: """The list of regression targets.""" - return ( - self.parent.regression_io_config.targets.columns - or self.data.columns.tolist() + return self.parent.regression_io_config.targets.columns or sorted( + self.all_target_columns ) @cached_property @@ -738,6 +799,125 @@ def target_range(self) -> list[tuple[float, float]]: for col in self.targets ] + def train(self, tempdir: Path): + """Train the model.""" + if isinstance(self.parent.hyperparameters, XGBHyperparameters): + # TOOO: Consider adding an interface/protocol/base class so signatures can be consistent. + return self.train_xgboost(tempdir) + else: + raise NotImplementedError( + f"Unsupported hyperparameters type: {type(self.parent.hyperparameters)}" + ) + + def train_xgboost(self, tempdir: Path): + """Train an xgboost model.""" + import xgboost as xgb + + hp = ( + self.parent.hyperparameters + if isinstance(self.parent.hyperparameters, XGBHyperparameters) + else XGBHyperparameters() + ) + + x_train, y_train = self.train_segment + x_test, y_test = self.test_segment + + # select the features + x_train_selected, x_test_selected = ( + x_train.loc[:, self.continuous_columns | self.categorical_columns], + x_test.loc[:, self.continuous_columns | self.categorical_columns], + ) + cats = { + col: self.dparams[col].unique().tolist() for col in self.categorical_columns + } + x_train_encoded = self.index_encode_categorical_columns(x_train_selected, cats) + x_test_encoded = self.index_encode_categorical_columns(x_test_selected, cats) + + # select the targets + y_train, y_test = y_train.loc[:, self.targets], y_test.loc[:, self.targets] + + train_dmat = xgb.DMatrix( + x_train_encoded.reset_index(drop=True), label=y_train.reset_index(drop=True) + ) + test_dmat = xgb.DMatrix( + x_test_encoded.reset_index(drop=True), label=y_test.reset_index(drop=True) + ) + + evals = [(train_dmat, "train"), (test_dmat, "eval")] + model = xgb.train( + hp.hp.param_dict, + train_dmat, + num_boost_round=hp.trainer.num_boost_round, + evals=evals, + early_stopping_rounds=hp.trainer.early_stopping_rounds, + verbose_eval=hp.trainer.verbose_eval, + ) + + def predict(x: pd.DataFrame) -> pd.DataFrame: + """Predict the targets for the given features.""" + x_selected = cast( + pd.DataFrame, + x.loc[:, self.continuous_columns | self.categorical_columns], + ) + x_encoded = self.index_encode_categorical_columns(x_selected, cats) + preds = model.predict( + xgb.DMatrix( + x_encoded.reset_index(drop=True), + ) + ) + return pd.DataFrame( + preds, columns=pd.Index(self.targets), index=pd.MultiIndex.from_frame(x) + ) + + evaluation = self.evaluate(predict, x_train, x_test, y_train, y_test) + model_path = tempdir / "model.ubj" + model.save_model(model_path.as_posix()) + return model, evaluation, model_path + + def evaluate( + self, + fn: Callable[[pd.DataFrame], pd.DataFrame], + x_train: pd.DataFrame, + x_test: pd.DataFrame, + y_train: pd.DataFrame, + y_test: pd.DataFrame, + ) -> tuple[pd.DataFrame, pd.DataFrame]: + """Evaluate a model on the train and test segments.""" + y_train_preds = fn(x_train) + y_test_preds = fn(x_test) + + # compute the metrics + global_train_metrics, stratum_train_metrics = self.compute_metrics( + y_train_preds, y_train + ) + global_test_metrics, stratum_test_metrics = self.compute_metrics( + y_test_preds, y_test + ) + + global_metrics = pd.concat( + [global_train_metrics, global_test_metrics], + axis=1, + keys=["train", "test"], + names=["split_segment"], + ) + stratum_metrics = pd.concat( + [stratum_train_metrics, stratum_test_metrics], + axis=1, + keys=["train", "test"], + names=["split_segment"], + ) + return global_metrics, stratum_metrics + + def index_encode_categorical_columns( + self, df: pd.DataFrame, cats: dict[str, list[str]] + ) -> pd.DataFrame: + """Index encode the categorical columns.""" + df = df.copy(deep=True) + for col in df.columns: + if df[col].dtype == "object": + df[col] = pd.Categorical(df[col], categories=cats[col]).codes + return df + def train_pytorch_tabular(self, tempdir: Path): """Train a pytorch tabular model.""" from pytorch_tabular import TabularModel @@ -762,8 +942,8 @@ def train_pytorch_tabular(self, tempdir: Path): optimizer_config = OptimizerConfig( # TODO: make this all configurable optimizer="AdamW", optimizer_params={"weight_decay": 1e-5}, - lr_scheduler="CosineAnnealingLR", - lr_scheduler_params={"T_max": n_epochs, "eta_min": 1e-5}, + # lr_scheduler="CosineAnnealingLR", + # lr_scheduler_params={"T_max": n_epochs, "eta_min": 1e-5}, ) trainer_config = TrainerConfig( batch_size=256, @@ -771,6 +951,10 @@ def train_pytorch_tabular(self, tempdir: Path): max_epochs=n_epochs, min_epochs=max(n_epochs // 20, 1), early_stopping=None, + # early_stopping= "valid_loss", + # early_stopping_min_delta=0.001, + # early_stopping_mode="min", + # early_stopping_patience=3, # gradient_clip_val=1.0, # auto_lr_find=False # max_time=60, @@ -780,16 +964,16 @@ def train_pytorch_tabular(self, tempdir: Path): task="regression", head="LinearHead", head_config=LinearHeadConfig( - # layers="", + layers="256-128-64", activation="SiLU", - use_batch_norm=False, + use_batch_norm=True, # dropout=0, ).__dict__, target_range=self.target_range, embedding_dims=None, - embedding_dropout=0.1, + embedding_dropout=0.05, batch_norm_continuous_input=True, - gflu_stages=6, + gflu_stages=24, gflu_dropout=0.0, gflu_feature_init_sparsity=0.3, learnable_sparsity=True, @@ -938,97 +1122,100 @@ def train_pytorch_tabular(self, tempdir: Path): # "stratum_metrics": stratum_metrics, # } - # def compute_frame_metrics( - # self, preds: pd.DataFrame, targets: pd.DataFrame - # ) -> pd.DataFrame: - # """Compute the metrics.""" - # from sklearn.metrics import ( - # mean_absolute_error, - # mean_absolute_percentage_error, - # mean_squared_error, - # r2_score, - # ) + def compute_frame_metrics( + self, preds: pd.DataFrame, targets: pd.DataFrame + ) -> pd.DataFrame: + """Compute the metrics.""" + from sklearn.metrics import ( + mean_absolute_error, + mean_absolute_percentage_error, + mean_squared_error, + r2_score, + ) - # mae = mean_absolute_error(targets, preds, multioutput="raw_values") - # mse = mean_squared_error(targets, preds, multioutput="raw_values") - # rmse = np.sqrt(mse) - # r2 = r2_score(targets, preds, multioutput="raw_values") - # cvrmse = rmse / (targets.mean(axis=0) + 1e-5) - # mape = mean_absolute_percentage_error( - # targets + 1e-5, - # preds, - # multioutput="raw_values", - # ) + mae = mean_absolute_error(targets, preds, multioutput="raw_values") + mse = mean_squared_error(targets, preds, multioutput="raw_values") + rmse = np.sqrt(mse) + r2 = r2_score(targets, preds, multioutput="raw_values") + cvrmse = rmse / np.abs(targets.mean(axis=0) + 1e-5) + mape = mean_absolute_percentage_error( + targets + 1e-5, + preds, + multioutput="raw_values", + ) - # metrics = pd.DataFrame( - # { - # "mae": mae, - # "rmse": rmse, - # "r2": r2, - # "cvrmse": cvrmse, - # "mape": mape, - # }, - # ) - # metrics.columns.names = ["metric"] - # metrics.index.names = ["measurement", "target"] - # return metrics - - # def compute_metrics(self, preds: pd.DataFrame, targets: pd.DataFrame): - # """Compute the metrics.""" - # global_metrics = self.compute_frame_metrics(preds, targets) - # stratum_metric_dfs = {} - # for stratum_name in self.stratum_names: - # stratum_targets = cast( - # pd.DataFrame, targets.xs(stratum_name, level=self.stratification_field) - # ) - # stratum_preds = cast( - # pd.DataFrame, preds.xs(stratum_name, level=self.stratification_field) - # ) - # metrics = self.compute_frame_metrics(stratum_preds, stratum_targets) - # stratum_metric_dfs[stratum_name] = metrics + metrics = pd.DataFrame( + { + "mae": mae, + "rmse": rmse, + "r2": r2, + "cvrmse": cvrmse, + "mape": mape, + }, + ) + metrics.columns.names = ["metric"] + metrics.index.names = ["target"] - # stratum_metrics = pd.concat( - # stratum_metric_dfs, - # axis=1, - # keys=self.stratum_names, - # names=["stratum"], - # ) - # global_metrics = ( - # global_metrics.set_index( - # pd.Index( - # [self.sort_index] * len(global_metrics), - # name="sort_index", - # ), - # append=True, - # ) - # .set_index( - # pd.Index( - # [self.progressive_training_iter_ix] * len(global_metrics), - # name="progressive_training_iter_ix", - # ), - # append=True, - # ) - # .unstack(level="target") - # ) + return metrics - # stratum_metrics = ( - # stratum_metrics.set_index( - # pd.Index( - # [self.sort_index] * len(stratum_metrics), - # name="sort_index", - # ), - # append=True, - # ) - # .set_index( - # pd.Index( - # [self.progressive_training_iter_ix] * len(stratum_metrics), - # name="progressive_training_iter_ix", - # ), - # append=True, - # ) - # .unstack(level="target") - # ) - # return global_metrics, stratum_metrics + def compute_metrics(self, preds: pd.DataFrame, targets: pd.DataFrame): + """Compute the metrics.""" + global_metrics = self.compute_frame_metrics(preds, targets) + stratum_metric_dfs = {} + for stratum_name in self.stratum_names: + stratum_targets = cast( + pd.DataFrame, + targets.xs(stratum_name, level=self.parent.stratification.field), + ) + stratum_preds = cast( + pd.DataFrame, + preds.xs(stratum_name, level=self.parent.stratification.field), + ) + metrics = self.compute_frame_metrics(stratum_preds, stratum_targets) + stratum_metric_dfs[stratum_name] = metrics + + stratum_metrics = pd.concat( + stratum_metric_dfs, + axis=1, + keys=self.stratum_names, + names=["stratum"], + ) + global_metrics = ( + global_metrics.set_index( + pd.Index( + [self.sort_index] * len(global_metrics), + name="sort_index", + ), + append=True, + ) + .set_index( + pd.Index( + [self.parent.iteration.current_iter] * len(global_metrics), + name="iteration", + ), + append=True, + ) + .unstack(level="target") + ) + + stratum_metrics = ( + stratum_metrics.set_index( + pd.Index( + [self.sort_index] * len(stratum_metrics), + name="sort_index", + ), + append=True, + ) + .set_index( + pd.Index( + [self.parent.iteration.current_iter] * len(stratum_metrics), + name="iteration", + ), + append=True, + ) + .unstack(level="target") + ) + return global_metrics, stratum_metrics # def train_lightgbm( # self, @@ -1100,53 +1287,6 @@ def train_pytorch_tabular(self, tempdir: Path): # """Format the model key.""" # return f"hatchet/{self.model_dir_key}/{model_name}" - # def train_xgboost( - # self, - # train_params: pd.DataFrame, - # train_targets: pd.DataFrame, - # test_params: pd.DataFrame, - # test_targets: pd.DataFrame, - # ): - # """Train the xgboost model.""" - # import xgboost as xgb - - # hparams = { - # "objective": "reg:squarederror", - # "eval_metric": "rmse", - # "max_depth": 5, # 7 - # "eta": 0.1, - # "min_child_weight": 3, - # "subsample": 0.8, - # "colsample_bytree": 0.8, - # # "alpha": 0.01, - # # "lambda": 0.01, - # # "gamma": 0.01, - # } - - # train_dmatrix = xgb.DMatrix(train_params, label=train_targets) - # test_dmatrix = xgb.DMatrix(test_params, label=test_targets) - - # model = xgb.train( - # hparams, - # train_dmatrix, - # num_boost_round=2000, - # early_stopping_rounds=20, - # verbose_eval=True, - # evals=[(test_dmatrix, "test")], - # ) - - # # compute the metrics - # train_preds = model.predict(train_dmatrix) - # test_preds = model.predict(test_dmatrix) - # train_preds = pd.DataFrame( - # train_preds, index=train_targets.index, columns=train_targets.columns - # ) - # test_preds = pd.DataFrame( - # test_preds, index=test_targets.index, columns=test_targets.columns - # ) - - # return train_preds, test_preds - class TrainWithCVSpec(StageSpec): """Train an SBEM model using a scatter gather approach for cross-fold validation.""" diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index f12589c..59e7f91 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -16,7 +16,7 @@ from scythe.hatchet import hatchet from scythe.registry import ExperimentRegistry from scythe.scatter_gather import RecursionMap, ScatterGatherResult, scatter_gather -from scythe.utils.filesys import S3Url +from scythe.utils.filesys import FileReference, S3Url from globi.models.surrogate.dummy import DummySimulationInput, dummy_simulation from globi.models.surrogate.training import ( @@ -30,7 +30,7 @@ class FoldResult(ExperimentOutputSpec): """The output for a fold.""" - columns: list[str] + regressor: FileReference class CombineResultsResult(BaseModel): @@ -69,14 +69,22 @@ class RecursionTransition(BaseModel): @ExperimentRegistry.Register( description="Train a regressor with cross-fold validation.", + schedule_timeout=timedelta(hours=5), + execution_timeout=timedelta(hours=1), ) def train_regressor_with_cv_fold( input_spec: TrainFoldSpec, tempdir: Path ) -> FoldResult: """Train a regressor with cross-fold validation.""" # DO TRAINING - _model, _trainer = input_spec.train_pytorch_tabular(tempdir) - return FoldResult(columns=input_spec.data.columns.tolist()) + _model, (global_results, stratum_results), model_path = input_spec.train(tempdir) + return FoldResult( + regressor=model_path, + dataframes={ + "global": global_results, + "stratums": stratum_results, + }, + ) iterative_training = hatchet.workflow( @@ -99,13 +107,13 @@ def create_simulations( specs = [ DummySimulationInput( weather_file="some" if random.random() < 0.5 else "other", # noqa: S311 - a=i, - b=-i, + a=random.randint(-10, 10), # noqa: S311 + b=random.randint(-10, 10), # noqa: S311 c=random.randint(-10, 10), # noqa: S311 experiment_id="placeholder", sort_index=i, ) - for i in range(1000) + for i in range(1_000) ] # STEP 2: Simulate the simulations using scythe @@ -326,6 +334,8 @@ def transition_recursion( ) +# TODO: Final training stage? or should we save models along the way. + if __name__ == "__main__": from scythe.settings import ScytheStorageSettings @@ -345,7 +355,7 @@ def transition_recursion( aliases=["feature.weather.file"], ), iteration=IterationSpec( - max_iters=4, + max_iters=10, ), storage_settings=ScytheStorageSettings(), data_uris=None, diff --git a/src/globi/worker/main.py b/src/globi/worker/main.py index ee7eb38..95fe32d 100644 --- a/src/globi/worker/main.py +++ b/src/globi/worker/main.py @@ -25,7 +25,8 @@ def main(): ) for workflow in workflows: worker.register_workflow(workflow) - worker.register_workflow(iterative_training) + if conf.DOES_FAN: + worker.register_workflow(iterative_training) worker.start() # conf.start() From 8963690874edc34547b55d5241904f00a587a72e Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Sun, 8 Mar 2026 14:29:19 -0400 Subject: [PATCH 08/31] enable recursive convergence checks --- src/globi/models/surrogate/training.py | 108 ++++++++++++------------- src/globi/pipelines/training.py | 39 +++++++-- 2 files changed, 85 insertions(+), 62 deletions(-) diff --git a/src/globi/models/surrogate/training.py b/src/globi/models/surrogate/training.py index 29b57cc..3959b4a 100644 --- a/src/globi/models/surrogate/training.py +++ b/src/globi/models/surrogate/training.py @@ -1,5 +1,7 @@ """Models used for the surrogate training pipeline.""" +import fnmatch +import re import warnings from collections.abc import Callable from functools import cached_property @@ -23,25 +25,30 @@ class ConvergenceThresholds(BaseModel): """The thresholds for convergence.""" - mae: float = Field(default=0.5, description="The maximum MAE for convergence.") - rmse: float = Field(default=0.5, description="The maximum RMSE for convergence.") - mape: float = Field(default=0.15, description="The maximum MAPE for convergence.") - r2: float = Field(default=0.95, description="The minimum R2 for convergence.") + # TODO: instead of using a risky hardcoded "n/a" token, make nullability have better support. + mae: float = Field(default=-9e9, description="The maximum MAE for convergence.") + rmse: float = Field(default=-9e9, description="The maximum RMSE for convergence.") + mape: float = Field(default=-9e9, description="The maximum MAPE for convergence.") + r2: float = Field(default=9e9, description="The minimum R2 for convergence.") cvrmse: float = Field( - default=0.05, description="The maximum CV_RMSE for convergence." + default=-9e9, description="The maximum CV_RMSE for convergence." ) - @property - def thresholds(self) -> pd.Series: - """The thresholds for convergence.""" - return pd.Series(self.model_dump(), name="metric") - - def check_convergence(self, metrics: pd.Series): + def check_convergence(self, metrics: pd.Series, target: re.Pattern | None = None): """Check if the metrics have converged. Note that this requires the metrics data frame to have the following shape: """ + # first, we select the data for the relevant targets: + if target is not None: + target_level = metrics.index.get_level_values("target") + # Interpret target as a regex and match + mask = cast(pd.Series, target_level.to_series().astype(str)).str.match( + target + ) + metrics = cast(pd.Series, metrics.loc[mask.values]) + thresholds = pd.Series(self.model_dump(), name="metric") # first, we will select the appropriate threshold for each metric @@ -60,15 +67,41 @@ def check_convergence(self, metrics: pd.Series): # run the comparisons comparison = metrics < comparators + return comparison + + +class ConvergenceThresholdsByTarget(BaseModel): + """The thresholds for convergence by target.""" + + thresholds: dict[str, ConvergenceThresholds] = Field( + default_factory=lambda: {"*": ConvergenceThresholds()}, + description="The thresholds for convergence by target.", + ) + + def make_comparisons(self, metrics: pd.Series) -> list[pd.Series]: + """Generate a list of all stratum/target/metric True/False comparisons.""" + return [ + self.thresholds[target].check_convergence( + metrics, re.compile(fnmatch.translate(target)) + ) + for target in self.thresholds + ] + + def combine_and_check_strata_and_targets(self, comparisons: list[pd.Series]): + """Combine the comparisons and aggregate first by targets then by strata.""" + comparison = pd.concat(comparisons, axis=0) # now we will groupby the stratum (e.g. features.weather.file) # and by the target (e.g. Electricity, Gas, etc.) # we are converged if any of the metrics have converged for that target # in that stratum comparison_stratum_and_target = comparison.groupby( level=[lev for lev in comparison.index.names if lev != "metric"] - ).any() + ).any() # TODO: make it configurable such that instead of `any`, we can specify a count, i.e. at least 2 must be converged # then we will check that all targets have converged for each stratum + + # only levels left in multiindex should be stratum and target + comparison_strata = comparison_stratum_and_target.groupby(level="stratum").all() # finally, we will check that all strata have converged @@ -81,6 +114,11 @@ def check_convergence(self, metrics: pd.Series): comparison, ) + def run(self, metrics: pd.Series) -> tuple[bool, pd.Series, pd.Series, pd.Series]: + """Run the convergence criteria.""" + comparisons = self.make_comparisons(metrics) + return self.combine_and_check_strata_and_targets(comparisons) + class XGBTrainerConfig(BaseModel): """The trainer hyperparameters for the xgboost model.""" @@ -277,8 +315,8 @@ class ProgressiveTrainingSpec(ExperimentInputSpec): ..., description="The base run name for the experiment.", ) - convergence_criteria: ConvergenceThresholds = Field( - default_factory=ConvergenceThresholds, + convergence_criteria: ConvergenceThresholdsByTarget = Field( + default_factory=ConvergenceThresholdsByTarget, description="The convergence criteria.", ) regression_io_config: RegressionIOConfigSpec = Field( @@ -1313,45 +1351,3 @@ def schedule(self) -> list[TrainFoldSpec]: ) ) return schedule - - # def check_convergence(self, uri: URIResponse, s3_client: S3ClientType): - # """Check the convergence of the training.""" - # with tempfile.TemporaryDirectory() as tempdir: - # tempdir = Path(tempdir) - # results_path = tempdir / "results.hdf" - # # download the results from s3 - # fetch_uri(uri.uri, local_path=results_path, use_cache=False, s3=s3_client) - # results = cast( - # pd.DataFrame, pd.read_hdf(results_path, key="stratum_metrics") - # ) - - # fold_averages = cast( - # pd.Series, - # results.xs( - # "test", - # level="split_segment", - # axis=1, - # ) - # .groupby(level="measurement") - # .mean() - # .unstack(level="measurement"), - # ) - # with tempfile.TemporaryDirectory() as tempdir: - # fold_averages_path = Path(tempdir) / "fold-averaged-errors.pq" - # fold_averages.to_frame( - # name=self.progressive_training_iteration_ix - # ).to_parquet(fold_averages_path) - # key = f"hatchet/{self.experiment_key}/fold-averaged-errors.pq" - # bucket = self.progressive_training_spec.bucket - # s3_client.upload_file(fold_averages_path.as_posix(), bucket, key) - - # ( - # convergence_all, - # convergence_monitor_segment, - # convergence_monitor_segment_and_target, - # convergence, - # ) = self.progressive_training_spec.convergence_criteria.check_convergence( - # fold_averages.xs("Energy", level="measurement") - # ) - - # return convergence_all, convergence diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index 59e7f91..b0673f1 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -3,7 +3,7 @@ import random from datetime import timedelta from pathlib import Path -from typing import Literal +from typing import Literal, cast import pandas as pd from hatchet_sdk import Context @@ -82,7 +82,7 @@ def train_regressor_with_cv_fold( regressor=model_path, dataframes={ "global": global_results, - "stratums": stratum_results, + "strata": stratum_results, }, ) @@ -120,7 +120,7 @@ def create_simulations( run_name = f"{spec.experiment_id}/sample" exp = BaseExperiment( - # TODO: replace with simulate_globi_flat_building + # TODO: replace with simulate_globi_flat_building, or better yet, allow loading from the registry via config. experiment=dummy_simulation, # TODO: add configurability to switch between simulations. run_name=run_name, storage_settings=spec.storage_settings or ScytheStorageSettings(), @@ -128,7 +128,7 @@ def create_simulations( run, ref = exp.allocate( specs, - version="bumpmajor", # TODO: bump minor if not the first iteration; actually, not necessary since root experiment takes care of this + version="bumpmajor", recursion_map=spec.iteration.recursion, ) @@ -289,8 +289,28 @@ def evaluate_training( spec: ProgressiveTrainingSpec, context: Context ) -> TrainingEvaluationResult: """Evaluate the training.""" - _results = context.task_output(await_training) - return TrainingEvaluationResult(converged=False) + results_output = context.task_output(await_training) + strata = results_output.uris["strata"] + _globals = results_output.uris["global"] + results = pd.read_parquet(str(strata)) + + fold_averages = cast( + pd.Series, + results.xs("test", level="split_segment", axis=1) + .groupby(level="iteration") + .mean() + .unstack(), + ) + # TODO: fold_averages and strata and globals should be saved to s3 + + ( + convergence_all, + _convergence_monitor_segment, + _convergence_monitor_segment_and_target, + _convergence, + ) = spec.convergence_criteria.run(fold_averages) + + return TrainingEvaluationResult(converged=convergence_all) @iterative_training.task( @@ -340,6 +360,8 @@ def transition_recursion( from scythe.settings import ScytheStorageSettings from globi.models.surrogate.training import ( + ConvergenceThresholds, + ConvergenceThresholdsByTarget, ProgressiveTrainingSpec, StratificationSpec, ) @@ -357,6 +379,11 @@ def transition_recursion( iteration=IterationSpec( max_iters=10, ), + convergence_criteria=ConvergenceThresholdsByTarget( + thresholds={ + "*": ConvergenceThresholds(r2=0.975), + }, + ), storage_settings=ScytheStorageSettings(), data_uris=None, base_run_name=base_run_name, From 7db748f052e61392b292eafbcdbdcbaf4912f561 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Sun, 8 Mar 2026 17:04:18 -0400 Subject: [PATCH 09/31] reorganize some files --- src/globi/models/surrogate/outputs.py | 44 ++++++++++++++++++++ src/globi/models/surrogate/training.py | 8 +++- src/globi/pipelines/training.py | 56 +++++--------------------- 3 files changed, 62 insertions(+), 46 deletions(-) create mode 100644 src/globi/models/surrogate/outputs.py diff --git a/src/globi/models/surrogate/outputs.py b/src/globi/models/surrogate/outputs.py new file mode 100644 index 0000000..0bdb9b6 --- /dev/null +++ b/src/globi/models/surrogate/outputs.py @@ -0,0 +1,44 @@ +"""Outputs for the surrogate model pipeline.""" + +from typing import Literal + +from pydantic import BaseModel +from scythe.experiments import ExperimentRun +from scythe.scatter_gather import ScatterGatherResult + +from globi.models.surrogate.training import TrainWithCVSpec + + +class CombineResultsResult(BaseModel): + """The result of combining the results of the simulations.""" + + incoming: ScatterGatherResult + combined: ScatterGatherResult + + +# TODO: This should perhaps go somewhere else since it is generally useful. +class ExperimentRunWithRef(BaseModel): + """An experiment run with a workflow run id.""" + + run: ExperimentRun + workflow_run_id: str + + +class StartTrainingResult(BaseModel): + """The result of starting the training.""" + + training_spec: TrainWithCVSpec + experiment_run_with_ref: ExperimentRunWithRef + + +class TrainingEvaluationResult(BaseModel): + """The result of evaluating the training.""" + + converged: bool + + +class RecursionTransition(BaseModel): + """The transition of the recursion.""" + + reasoning: Literal["max_depth", "converged"] | None + child_workflow_run_id: str | None diff --git a/src/globi/models/surrogate/training.py b/src/globi/models/surrogate/training.py index 3959b4a..fa6b5b1 100644 --- a/src/globi/models/surrogate/training.py +++ b/src/globi/models/surrogate/training.py @@ -11,7 +11,7 @@ import numpy as np import pandas as pd from pydantic import BaseModel, Field -from scythe.base import ExperimentInputSpec +from scythe.base import ExperimentInputSpec, ExperimentOutputSpec from scythe.scatter_gather import RecursionMap, ScatterGatherResult from scythe.utils.filesys import FileReference, S3Url @@ -1326,6 +1326,12 @@ def compute_metrics(self, preds: pd.DataFrame, targets: pd.DataFrame): # return f"hatchet/{self.model_dir_key}/{model_name}" +class FoldResult(ExperimentOutputSpec): + """The output for a fold.""" + + regressor: FileReference + + class TrainWithCVSpec(StageSpec): """Train an SBEM model using a scatter gather approach for cross-fold validation.""" diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index b0673f1..5d4b891 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -3,23 +3,29 @@ import random from datetime import timedelta from pathlib import Path -from typing import Literal, cast +from typing import cast import pandas as pd from hatchet_sdk import Context -from pydantic import BaseModel, HttpUrl -from scythe.base import ExperimentOutputSpec +from pydantic import HttpUrl from scythe.experiments import ( BaseExperiment, - ExperimentRun, ) from scythe.hatchet import hatchet from scythe.registry import ExperimentRegistry from scythe.scatter_gather import RecursionMap, ScatterGatherResult, scatter_gather -from scythe.utils.filesys import FileReference, S3Url +from scythe.utils.filesys import S3Url from globi.models.surrogate.dummy import DummySimulationInput, dummy_simulation +from globi.models.surrogate.outputs import ( + CombineResultsResult, + ExperimentRunWithRef, + RecursionTransition, + StartTrainingResult, + TrainingEvaluationResult, +) from globi.models.surrogate.training import ( + FoldResult, IterationSpec, ProgressiveTrainingSpec, TrainFoldSpec, @@ -27,46 +33,6 @@ ) -class FoldResult(ExperimentOutputSpec): - """The output for a fold.""" - - regressor: FileReference - - -class CombineResultsResult(BaseModel): - """The result of combining the results of the simulations.""" - - incoming: ScatterGatherResult - combined: ScatterGatherResult - - -class ExperimentRunWithRef(BaseModel): - """An experiment run with a workflow run id.""" - - run: ExperimentRun - workflow_run_id: str - - -class StartTrainingResult(BaseModel): - """The result of starting the training.""" - - training_spec: TrainWithCVSpec - experiment_run_with_ref: ExperimentRunWithRef - - -class TrainingEvaluationResult(BaseModel): - """The result of evaluating the training.""" - - converged: bool - - -class RecursionTransition(BaseModel): - """The transition of the recursion.""" - - reasoning: Literal["max_depth", "converged"] | None - child_workflow_run_id: str | None - - @ExperimentRegistry.Register( description="Train a regressor with cross-fold validation.", schedule_timeout=timedelta(hours=5), From bb657b3d92398f7ed0f2b8edc81e9bbaf95ebab7 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Sun, 8 Mar 2026 17:26:47 -0400 Subject: [PATCH 10/31] reorganize modules --- .../models/surrogate/configs/__init__.py | 1 + .../models/surrogate/configs/pipeline.py | 292 +++++++++ .../models/surrogate/configs/regression.py | 86 +++ src/globi/models/surrogate/sampling.py | 196 ++++++ src/globi/models/surrogate/training.py | 568 +----------------- src/globi/pipelines/training.py | 10 +- 6 files changed, 586 insertions(+), 567 deletions(-) create mode 100644 src/globi/models/surrogate/configs/__init__.py create mode 100644 src/globi/models/surrogate/configs/pipeline.py create mode 100644 src/globi/models/surrogate/configs/regression.py create mode 100644 src/globi/models/surrogate/sampling.py diff --git a/src/globi/models/surrogate/configs/__init__.py b/src/globi/models/surrogate/configs/__init__.py new file mode 100644 index 0000000..2b2e032 --- /dev/null +++ b/src/globi/models/surrogate/configs/__init__.py @@ -0,0 +1 @@ +"""Configs for the surrogate model pipeline.""" diff --git a/src/globi/models/surrogate/configs/pipeline.py b/src/globi/models/surrogate/configs/pipeline.py new file mode 100644 index 0000000..a088a79 --- /dev/null +++ b/src/globi/models/surrogate/configs/pipeline.py @@ -0,0 +1,292 @@ +"""Configs for the surrogate model pipeline.""" + +import fnmatch +import re +from functools import cached_property +from pathlib import Path +from typing import Literal, cast + +import numpy as np +import pandas as pd +from pydantic import BaseModel, Field +from scythe.base import ExperimentInputSpec +from scythe.scatter_gather import RecursionMap, ScatterGatherResult +from scythe.utils.filesys import FileReference, S3Url + +from globi.models.surrogate.configs.regression import ModelHPType, XGBHyperparameters + + +class IterationSpec(BaseModel): + """The iteration spec.""" + + n_init: int = Field(default=10000, description="The number of initial samples.") + min_per_stratum: int = Field( + default=100, description="The minimum number of samples per stratum." + ) + n_per_iter: int = Field( + default=10000, + description="The number of samples to add per each iteration of the outer loop.", + ) + max_iters: int = Field( + default=100, + description="The maximum number of outer loop iterations to perform.", + ) + recursion: RecursionMap = Field( + default_factory=lambda: RecursionMap(factor=100, max_depth=1), + description="The recursion spec.", + ) + current_iter: int = Field( + default=0, + description="The index of the current training iteration within the outer loop.", + ) + + @property + def at_max_iters(self) -> bool: + """Whether the current iteration is the maximum number of iterations.""" + return self.current_iter + 1 >= self.max_iters + + +class StratificationSpec(BaseModel): + """A spec for stratifying the data.""" + + field: str = Field( + default="feature.weather.file", description="The field to stratify by." + ) + sampling: Literal["equal", "error-weighted", "proportional"] = Field( + default="equal", + description="The sampling method to use over the strata.", + ) + aliases: list[str] = Field( + default_factory=lambda: ["epwzip_path", "epw_path"], + description="The alias to use for the stratum as a fallback.", + ) + + # TODO: consider allowing the stratification to be a compound with e.g. component_map_uri and semantic_fields_uri and database_uri + + +class CrossValidationSpec(BaseModel): + """The cross validation spec.""" + + n_folds: int = Field( + default=5, description="The number of folds for the entire parent task." + ) + + +class ConvergenceThresholds(BaseModel): + """The thresholds for convergence.""" + + # TODO: instead of using a risky hardcoded "n/a" token, make nullability have better support. + mae: float = Field(default=-9e9, description="The maximum MAE for convergence.") + rmse: float = Field(default=-9e9, description="The maximum RMSE for convergence.") + mape: float = Field(default=-9e9, description="The maximum MAPE for convergence.") + r2: float = Field(default=9e9, description="The minimum R2 for convergence.") + cvrmse: float = Field( + default=-9e9, description="The maximum CV_RMSE for convergence." + ) + + def check_convergence(self, metrics: pd.Series, target: re.Pattern | None = None): + """Check if the metrics have converged. + + Note that this requires the metrics data frame to have the following shape: + + """ + # first, we select the data for the relevant targets: + if target is not None: + target_level = metrics.index.get_level_values("target") + # Interpret target as a regex and match + mask = cast(pd.Series, target_level.to_series().astype(str)).str.match( + target + ) + metrics = cast(pd.Series, metrics.loc[mask.values]) + + thresholds = pd.Series(self.model_dump(), name="metric") + + # first, we will select the appropriate threshold for each metric + comparators = thresholds.loc[metrics.index.get_level_values("metric")] + # we can then copy over the index safely + comparators.index = metrics.index + + # next, we will flip the sign of the r2 metric since it is a maximization metric rather thin min + metrics = metrics * np.where( + metrics.index.get_level_values("metric") == "r2", -1, 1 + ) + comparators = comparators * np.where( + comparators.index.get_level_values("metric") == "r2", -1, 1 + ) + + # run the comparisons + comparison = metrics < comparators + + return comparison + + +class ConvergenceThresholdsByTarget(BaseModel): + """The thresholds for convergence by target.""" + + thresholds: dict[str, ConvergenceThresholds] = Field( + default_factory=lambda: {"*": ConvergenceThresholds()}, + description="The thresholds for convergence by target.", + ) + + def make_comparisons(self, metrics: pd.Series) -> list[pd.Series]: + """Generate a list of all stratum/target/metric True/False comparisons.""" + return [ + self.thresholds[target].check_convergence( + metrics, re.compile(fnmatch.translate(target)) + ) + for target in self.thresholds + ] + + def combine_and_check_strata_and_targets(self, comparisons: list[pd.Series]): + """Combine the comparisons and aggregate first by targets then by strata.""" + comparison = pd.concat(comparisons, axis=0) + # now we will groupby the stratum (e.g. features.weather.file) + # and by the target (e.g. Electricity, Gas, etc.) + # we are converged if any of the metrics have converged for that target + # in that stratum + comparison_stratum_and_target = comparison.groupby( + level=[lev for lev in comparison.index.names if lev != "metric"] + ).any() # TODO: make it configurable such that instead of `any`, we can specify a count, i.e. at least 2 must be converged + + # then we will check that all targets have converged for each stratum + + # only levels left in multiindex should be stratum and target + + comparison_strata = comparison_stratum_and_target.groupby(level="stratum").all() + + # finally, we will check that all strata have converged + comparison_all = comparison_strata.all() + + return ( + comparison_all, + comparison_strata, + comparison_stratum_and_target, + comparison, + ) + + def run(self, metrics: pd.Series) -> tuple[bool, pd.Series, pd.Series, pd.Series]: + """Run the convergence criteria.""" + comparisons = self.make_comparisons(metrics) + return self.combine_and_check_strata_and_targets(comparisons) + + +class TargetsConfigSpec(BaseModel): + """The targets config spec.""" + + columns: list[str] = Field( + default_factory=list, description="The columns to use as targets." + ) + normalization: Literal["min-max", "standard", "none"] = Field( + default="none", description="The normalization method to use." + ) + + +class FeatureConfigSpec(BaseModel): + """The feature config spec.""" + + continuous_columns: frozenset[str] = Field( + default=frozenset(), description="The continuous columns to use as features." + ) + categorical_columns: frozenset[str] = Field( + default=frozenset(), description="The categorical columns to use as features." + ) + exclude_columns: frozenset[str] = Field( + default=frozenset(), + description="The columns to exclude from the features.", + ) + cont_cat_unicity_transition_threshold: int = Field( + default=10, + description="The threshold for the number of unique values to transition from continuous to categorical variable.", + ) + + +class RegressionIOConfigSpec(BaseModel): + """The input/output spec for a regression model.""" + + targets: TargetsConfigSpec = Field( + default_factory=TargetsConfigSpec, description="The targets config spec." + ) + features: FeatureConfigSpec = Field( + default_factory=FeatureConfigSpec, + description="The features config spec.", + ) + + +class ProgressiveTrainingSpec(ExperimentInputSpec): + """A spec for iteratively training an SBEM regression model.""" + + base_run_name: str = Field( + ..., + description="The base run name for the experiment.", + ) + convergence_criteria: ConvergenceThresholdsByTarget = Field( + default_factory=ConvergenceThresholdsByTarget, + description="The convergence criteria.", + ) + regression_io_config: RegressionIOConfigSpec = Field( + default_factory=RegressionIOConfigSpec, + description="The regression io config spec.", + ) + hyperparameters: ModelHPType = Field( + default_factory=XGBHyperparameters, + description="The hyperparameters for the model.", + ) + stratification: StratificationSpec = Field( + default_factory=StratificationSpec, + description="The stratification spec.", + ) + cross_val: CrossValidationSpec = Field( + default_factory=CrossValidationSpec, + description="The cross validation spec.", + ) + iteration: IterationSpec = Field( + default_factory=IterationSpec, + description="The iteration spec.", + ) + gis_uri: FileReference = Field( + ..., + description="The uri of the gis data to train on.", + ) + data_uris: ScatterGatherResult | None = Field( + default=None, + description="The uri of the previous simulation results to train on.", + ) + + def format_combined_output_key(self, key: str) -> str: + """Format the output key for a combined result file.""" + return f"{self.prefix}/combined/{key}.parquet" + + def format_combined_output_uri(self, key: str) -> S3Url: + """Format the output uri for a combined result file.""" + if self.storage_settings is None: + msg = "Storage settings are not set, so we can't construct a combined output uri." + raise ValueError(msg) + return S3Url( + f"s3://{self.storage_settings.BUCKET}/{self.format_combined_output_key(key)}" + ) + + @property + def gis_path(self) -> Path: + """The path to the gis data.""" + if isinstance(self.gis_uri, Path): + return self.gis_uri + return self.fetch_uri(self.gis_uri) + + @cached_property + def gis_data(self) -> pd.DataFrame: + """Load the gis data.""" + return pd.read_parquet(self.gis_path) + + +class StageSpec(BaseModel): + """A spec that is common to both the sample and train stages (and possibly others).""" + + parent: ProgressiveTrainingSpec = Field( + ..., + description="The parent spec.", + ) + + @cached_property + def random_generator(self) -> np.random.Generator: + """The random generator.""" + return np.random.default_rng(self.parent.iteration.current_iter) diff --git a/src/globi/models/surrogate/configs/regression.py b/src/globi/models/surrogate/configs/regression.py new file mode 100644 index 0000000..b65c64a --- /dev/null +++ b/src/globi/models/surrogate/configs/regression.py @@ -0,0 +1,86 @@ +"""Configs for the surrogate model pipeline.""" + +from typing import Any, Literal + +from pydantic import BaseModel, Field + + +class XGBTrainerConfig(BaseModel): + """The trainer hyperparameters for the xgboost model.""" + + num_boost_round: int = Field( + default=4000, description="The number of boosting rounds." + ) + early_stopping_rounds: int = Field( + default=10, description="The number of boosting rounds to early stop." + ) + verbose_eval: bool = Field( + default=True, description="Whether to print verbose evaluation results." + ) + + +class XGBModelConfig(BaseModel): + """The model hyperparameters for the xgboost model.""" + + max_depth: int = Field(default=5, description="The maximum depth of the tree.") + eta: float = Field(default=0.1, description="The learning rate.") + min_child_weight: int | None = Field( + default=3, description="The minimum child weight." + ) + subsample: float | None = Field(default=None, description="The subsample rate.") + colsample_bytree: float | None = Field( + default=None, description="The column sample by tree rate." + ) + alpha: float | None = Field(default=None, description="The alpha parameter.") + lam: float | None = Field(default=None, description="The lambda parameter.") + gamma: float | None = Field(default=None, description="The gamma parameter.") + seed: int = Field( + default=42, description="The seed for the random number generator." + ) + + @property + def param_dict(self) -> dict[str, Any]: + """The dictionary of parameters.""" + import torch + + data = { + "objective": "reg:squarederror", + "eval_metric": "rmse", + "tree_method": "auto", + "seed": self.seed, + # hyperparameters + **self.model_dump( + exclude_none=True, + ), + } + if torch.cuda.is_available(): + data["device"] = "cuda" + return data + + +class XGBHyperparameters(BaseModel): + """The parameters for the xgboost model.""" + + hp: XGBModelConfig = Field( + default_factory=XGBModelConfig, + description="The hyperparameters for the model.", + ) + trainer: XGBTrainerConfig = Field( + default_factory=XGBTrainerConfig, + description="The trainer hyperparameters for the model.", + ) + + +class LGBHyperparameters(BaseModel): + """The parameters for the lightgbm model.""" + + objective: Literal["regression", "binary", "multiclass"] = Field( + default="regression", description="The objective function to use." + ) + metric: Literal["rmse"] = Field( + default="rmse", description="The metric to optimize." + ) + # TODO: add other parameters as needed + + +ModelHPType = XGBHyperparameters | LGBHyperparameters diff --git a/src/globi/models/surrogate/sampling.py b/src/globi/models/surrogate/sampling.py new file mode 100644 index 0000000..b4bb1d8 --- /dev/null +++ b/src/globi/models/surrogate/sampling.py @@ -0,0 +1,196 @@ +"""Models used for the training set sampling pipeline.""" + +from typing import cast + +import pandas as pd + +from globi.models.surrogate.configs.pipeline import StageSpec + + +class SampleSpec(StageSpec): + """A spec for the sampling stage of the progressive training.""" + + # TODO: add the ability to receive the last set of error metrics and use them to inform the sampling + + def stratified_selection(self) -> pd.DataFrame: + """Sample the gis data.""" + df = self.parent.gis_data + + stratification_field = self.parent.stratification.field + stratification_aliases = self.parent.stratification.aliases + + if stratification_field not in df.columns and not any( + alias in df.columns for alias in stratification_aliases + ): + msg = f"Stratification field {stratification_field} not found in gis data. Please check the field name and/or the aliases." + raise ValueError(msg) + + if stratification_field not in df.columns: + stratification_field = next( + alias for alias in stratification_aliases if alias in df.columns + ) + + strata = cast(list[str], df[stratification_field].unique().tolist()) + + if self.parent.stratification.sampling == "equal": + return self.sample_equally_by_stratum(df, strata, stratification_field) + elif self.parent.stratification.sampling == "error-weighted": + msg = "Error-weighted sampling is not yet implemented." + raise NotImplementedError(msg) + elif self.parent.stratification.sampling == "proportional": + msg = "Proportional sampling is not yet implemented." + raise NotImplementedError(msg) + else: + msg = f"Invalid sampling method: {self.parent.stratification.sampling}" + raise ValueError(msg) + + def sample_equally_by_stratum( + self, df: pd.DataFrame, strata: list[str], stratification_field: str + ) -> pd.DataFrame: + """Sample equally by stratum. + + This will break the dataframe up into n strata and ensure that each strata ends up with the same number of samples. + + Args: + df (pd.DataFrame): The dataframe to sample from. + strata (list[str]): The unique values of the strata. + stratification_field (str): The field to stratify the data by. + + Returns: + samples (pd.DataFrame): The sampled dataframe. + """ + stratum_dfs = { + stratum: df[df[stratification_field] == stratum] for stratum in strata + } + n_per_iter = ( + self.parent.iteration.n_per_iter + if self.parent.iteration.current_iter != 0 + else self.parent.iteration.n_init + ) + n_per_stratum = max( + n_per_iter // len(strata), + ( + self.parent.iteration.min_per_stratum + if self.parent.iteration.current_iter == 0 + else 0 + ), + ) + + # TODO: consider how we want to handle potentially having the same geometry appear in both + # the training and testing sets. + # if any(len(stratum_df) < n_per_stratum for stratum_df in stratum_dfs.values()): + # msg = "There are not enough buildings in some strata to sample the desired number of buildings per stratum." + # # connsider making this a warning? + # raise ValueError(msg) + + sampled_strata = { + stratum: stratum_df.sample( + n=n_per_stratum, random_state=self.random_generator, replace=True + ) + for stratum, stratum_df in stratum_dfs.items() + } + return cast(pd.DataFrame, pd.concat(sampled_strata.values())) + + # def sample_semantic_fields(self, df: pd.DataFrame) -> pd.DataFrame: + # """Sample the semantic fields.""" + # # TODO: consider randomizing the locations? + # semantic_fields = self.progressive_training_spec.semantic_fields_data + # for field in semantic_fields.Fields: + # if isinstance(field, CategoricalFieldSpec): + # options = field.Options + # df[field.Name] = self.random_generator.choice(options, size=len(df)) + # elif isinstance(field, NumericFieldSpec): + # df[field.Name] = self.random_generator.uniform( + # field.Min, field.Max, size=len(df) + # ) + # else: + # msg = f"Invalid field type: {type(field)}" + # raise TypeError(msg) + # return df + + # def sample_basements_and_attics(self, df: pd.DataFrame) -> pd.DataFrame: + # """Add basement/attics to models.""" + # # get the options for the type literal + # options: list[BasementAtticOccupationConditioningStatus] = [ + # "none", + # "occupied_unconditioned", + # "unoccupied_unconditioned", + # "occupied_conditioned", + # "unoccupied_conditioned", + # ] + # weights = [0.5, *([0.5 / 4] * 4)] + # # sample the type literal + # df["basement"] = self.random_generator.choice(options, size=len(df), p=weights) + # df["attic"] = self.random_generator.choice(options, size=len(df), p=weights) + # df["exposed_basement_frac"] = self.random_generator.uniform( + # 0.1, 0.5, size=len(df) + # ) + # return df + + # def sample_wwrs(self, df: pd.DataFrame) -> pd.DataFrame: + # """Sample the wwrs.""" + # wwr_min = 0.05 + # wwr_max = 0.35 + # df["wwr"] = self.random_generator.uniform(wwr_min, wwr_max, size=len(df)) + # return df + + # def sample_f2f_heights(self, df: pd.DataFrame) -> pd.DataFrame: + # """Sample the f2f heights.""" + # f2f_min = 2.3 + # f2f_max = 4.3 + # df["f2f_height"] = self.random_generator.uniform(f2f_min, f2f_max, size=len(df)) + # return df + + def to_sim_specs(self, df: pd.DataFrame): + """Convert the sampled dataframe to a list of simulation specs. + + For now, we are assuming that all the other necessary fields are present and we are just + ensuring that sort_index and experiment_id are set appropriately. + """ + # df["semantic_field_context"] = df.apply( + # lambda row: { + # field.Name: row[field.Name] + # for field in self.progressive_training_spec.semantic_fields_data.Fields + # }, + # axis=1, + # ) + # df["sort_index"] = np.arange(len(df)) + # df["experiment_id"] = self.experiment_key + # # TODO: consider allowing the component map/semantic_fields/database to be inherited from the row + # # e.g. to allow multiple component maps and dbs per run. + # df["component_map_uri"] = str(self.progressive_training_spec.component_map_uri) + # df["semantic_fields_uri"] = str( + # self.progressive_training_spec.semantic_fields_uri + # ) + # df["db_uri"] = str(self.progressive_training_spec.database_uri) + return df + + # def make_payload(self, s3_client: S3ClientType): + # """Make the payload for the scatter gather task, including generating the simulation specs and serializing them to s3.""" + # df = self.stratified_selection() + # # df = self.sample_semantic_fields(df) + # # df = self.sample_basements_and_attics(df) + # # df = self.sample_wwrs(df) + # # df = self.sample_f2f_heights(df) + # df = self.to_sim_specs(df) + # # serialize to a parquet file and upload to s3 + # bucket = self.progressive_training_spec.storage_settings.BUCKET + # with tempfile.TemporaryDirectory() as tmpdir: + # tmpdir = Path(tmpdir) + # fpath = tmpdir / "specs.pq" + # df.to_parquet(fpath) + # key = f"hatchet/{self.experiment_key}/specs.pq" + # specs_uri = f"s3://{bucket}/{key}" + # s3_client.upload_file(fpath.as_posix(), bucket, key) + + # payload = { + # "specs": specs_uri, + # "bucket": bucket, + # "workflow_name": "simulate_sbem_shoebox", + # "experiment_id": self.experiment_key, + # "recursion_map": { + # "factor": self.progressive_training_spec.iteration.recursion_factor, + # "max_depth": self.progressive_training_spec.iteration.recursion_max_depth, + # }, + # } + # return payload diff --git a/src/globi/models/surrogate/training.py b/src/globi/models/surrogate/training.py index fa6b5b1..b3fa03e 100644 --- a/src/globi/models/surrogate/training.py +++ b/src/globi/models/surrogate/training.py @@ -1,292 +1,27 @@ """Models used for the surrogate training pipeline.""" -import fnmatch -import re import warnings from collections.abc import Callable from functools import cached_property from pathlib import Path -from typing import TYPE_CHECKING, Any, Literal, cast +from typing import TYPE_CHECKING, cast import numpy as np import pandas as pd -from pydantic import BaseModel, Field +from pydantic import Field from scythe.base import ExperimentInputSpec, ExperimentOutputSpec -from scythe.scatter_gather import RecursionMap, ScatterGatherResult +from scythe.scatter_gather import ScatterGatherResult from scythe.utils.filesys import FileReference, S3Url +from globi.models.surrogate.configs.pipeline import ProgressiveTrainingSpec, StageSpec +from globi.models.surrogate.configs.regression import XGBHyperparameters + if TYPE_CHECKING: from mypy_boto3_s3.client import S3Client as S3ClientType else: S3ClientType = object -# TODO: allow specific configuration per column. -class ConvergenceThresholds(BaseModel): - """The thresholds for convergence.""" - - # TODO: instead of using a risky hardcoded "n/a" token, make nullability have better support. - mae: float = Field(default=-9e9, description="The maximum MAE for convergence.") - rmse: float = Field(default=-9e9, description="The maximum RMSE for convergence.") - mape: float = Field(default=-9e9, description="The maximum MAPE for convergence.") - r2: float = Field(default=9e9, description="The minimum R2 for convergence.") - cvrmse: float = Field( - default=-9e9, description="The maximum CV_RMSE for convergence." - ) - - def check_convergence(self, metrics: pd.Series, target: re.Pattern | None = None): - """Check if the metrics have converged. - - Note that this requires the metrics data frame to have the following shape: - - """ - # first, we select the data for the relevant targets: - if target is not None: - target_level = metrics.index.get_level_values("target") - # Interpret target as a regex and match - mask = cast(pd.Series, target_level.to_series().astype(str)).str.match( - target - ) - metrics = cast(pd.Series, metrics.loc[mask.values]) - - thresholds = pd.Series(self.model_dump(), name="metric") - - # first, we will select the appropriate threshold for each metric - comparators = thresholds.loc[metrics.index.get_level_values("metric")] - # we can then copy over the index safely - comparators.index = metrics.index - - # next, we will flip the sign of the r2 metric since it is a maximization metric rather thin min - metrics = metrics * np.where( - metrics.index.get_level_values("metric") == "r2", -1, 1 - ) - comparators = comparators * np.where( - comparators.index.get_level_values("metric") == "r2", -1, 1 - ) - - # run the comparisons - comparison = metrics < comparators - - return comparison - - -class ConvergenceThresholdsByTarget(BaseModel): - """The thresholds for convergence by target.""" - - thresholds: dict[str, ConvergenceThresholds] = Field( - default_factory=lambda: {"*": ConvergenceThresholds()}, - description="The thresholds for convergence by target.", - ) - - def make_comparisons(self, metrics: pd.Series) -> list[pd.Series]: - """Generate a list of all stratum/target/metric True/False comparisons.""" - return [ - self.thresholds[target].check_convergence( - metrics, re.compile(fnmatch.translate(target)) - ) - for target in self.thresholds - ] - - def combine_and_check_strata_and_targets(self, comparisons: list[pd.Series]): - """Combine the comparisons and aggregate first by targets then by strata.""" - comparison = pd.concat(comparisons, axis=0) - # now we will groupby the stratum (e.g. features.weather.file) - # and by the target (e.g. Electricity, Gas, etc.) - # we are converged if any of the metrics have converged for that target - # in that stratum - comparison_stratum_and_target = comparison.groupby( - level=[lev for lev in comparison.index.names if lev != "metric"] - ).any() # TODO: make it configurable such that instead of `any`, we can specify a count, i.e. at least 2 must be converged - - # then we will check that all targets have converged for each stratum - - # only levels left in multiindex should be stratum and target - - comparison_strata = comparison_stratum_and_target.groupby(level="stratum").all() - - # finally, we will check that all strata have converged - comparison_all = comparison_strata.all() - - return ( - comparison_all, - comparison_strata, - comparison_stratum_and_target, - comparison, - ) - - def run(self, metrics: pd.Series) -> tuple[bool, pd.Series, pd.Series, pd.Series]: - """Run the convergence criteria.""" - comparisons = self.make_comparisons(metrics) - return self.combine_and_check_strata_and_targets(comparisons) - - -class XGBTrainerConfig(BaseModel): - """The trainer hyperparameters for the xgboost model.""" - - num_boost_round: int = Field( - default=4000, description="The number of boosting rounds." - ) - early_stopping_rounds: int = Field( - default=10, description="The number of boosting rounds to early stop." - ) - verbose_eval: bool = Field( - default=True, description="Whether to print verbose evaluation results." - ) - - -class XGBModelConfig(BaseModel): - """The model hyperparameters for the xgboost model.""" - - max_depth: int = Field(default=5, description="The maximum depth of the tree.") - eta: float = Field(default=0.1, description="The learning rate.") - min_child_weight: int | None = Field( - default=3, description="The minimum child weight." - ) - subsample: float | None = Field(default=None, description="The subsample rate.") - colsample_bytree: float | None = Field( - default=None, description="The column sample by tree rate." - ) - alpha: float | None = Field(default=None, description="The alpha parameter.") - lam: float | None = Field(default=None, description="The lambda parameter.") - gamma: float | None = Field(default=None, description="The gamma parameter.") - seed: int = Field( - default=42, description="The seed for the random number generator." - ) - - @property - def param_dict(self) -> dict[str, Any]: - """The dictionary of parameters.""" - import torch - - data = { - "objective": "reg:squarederror", - "eval_metric": "rmse", - "tree_method": "auto", - "seed": self.seed, - # hyperparameters - **self.model_dump( - exclude_none=True, - ), - } - if torch.cuda.is_available(): - data["device"] = "cuda" - return data - - -class XGBHyperparameters(BaseModel): - """The parameters for the xgboost model.""" - - hp: XGBModelConfig = Field( - default_factory=XGBModelConfig, - description="The hyperparameters for the model.", - ) - trainer: XGBTrainerConfig = Field( - default_factory=XGBTrainerConfig, - description="The trainer hyperparameters for the model.", - ) - - -class LGBHyperparameters(BaseModel): - """The parameters for the lightgbm model.""" - - objective: Literal["regression", "binary", "multiclass"] = Field( - default="regression", description="The objective function to use." - ) - metric: Literal["rmse"] = Field( - default="rmse", description="The metric to optimize." - ) - # TODO: add other parameters as needed - - -ModelHPType = XGBHyperparameters | LGBHyperparameters - - -class StratificationSpec(BaseModel): - """A spec for stratifying the data.""" - - field: str = Field( - default="feature.weather.file", description="The field to stratify by." - ) - sampling: Literal["equal", "error-weighted", "proportional"] = Field( - default="equal", - description="The sampling method to use over the strata.", - ) - aliases: list[str] = Field( - default_factory=lambda: ["epwzip_path", "epw_path"], - description="The alias to use for the stratum as a fallback.", - ) - - # TODO: consider allowing the stratification to be a compound with e.g. component_map_uri and semantic_fields_uri and database_uri - - -class CrossValidationSpec(BaseModel): - """The cross validation spec.""" - - n_folds: int = Field( - default=5, description="The number of folds for the entire parent task." - ) - - -class IterationSpec(BaseModel): - """The iteration spec.""" - - n_init: int = Field(default=10000, description="The number of initial samples.") - min_per_stratum: int = Field( - default=100, description="The minimum number of samples per stratum." - ) - n_per_iter: int = Field( - default=10000, - description="The number of samples to add per each iteration of the outer loop.", - ) - max_iters: int = Field( - default=100, - description="The maximum number of outer loop iterations to perform.", - ) - recursion: RecursionMap = Field( - default_factory=lambda: RecursionMap(factor=100, max_depth=1), - description="The recursion spec.", - ) - current_iter: int = Field( - default=0, - description="The index of the current training iteration within the outer loop.", - ) - - @property - def at_max_iters(self) -> bool: - """Whether the current iteration is the maximum number of iterations.""" - return self.current_iter + 1 >= self.max_iters - - -class TargetsConfigSpec(BaseModel): - """The targets config spec.""" - - columns: list[str] = Field( - default_factory=list, description="The columns to use as targets." - ) - normalization: Literal["min-max", "standard", "none"] = Field( - default="none", description="The normalization method to use." - ) - - -class FeatureConfigSpec(BaseModel): - """The feature config spec.""" - - continuous_columns: frozenset[str] = Field( - default=frozenset(), description="The continuous columns to use as features." - ) - categorical_columns: frozenset[str] = Field( - default=frozenset(), description="The categorical columns to use as features." - ) - exclude_columns: frozenset[str] = Field( - default=frozenset(), - description="The columns to exclude from the features.", - ) - cont_cat_unicity_transition_threshold: int = Field( - default=10, - description="The threshold for the number of unique values to transition from continuous to categorical variable.", - ) - - EXCLUDED_COLUMNS = frozenset({ "experiment_id", "sort_index", @@ -295,288 +30,6 @@ class FeatureConfigSpec(BaseModel): }) -class RegressionIOConfigSpec(BaseModel): - """The input/output spec for a regression model.""" - - targets: TargetsConfigSpec = Field( - default_factory=TargetsConfigSpec, description="The targets config spec." - ) - features: FeatureConfigSpec = Field( - default_factory=FeatureConfigSpec, - description="The features config spec.", - ) - - -# TODO: should this be a subclass of ExperimentInputSpec? -class ProgressiveTrainingSpec(ExperimentInputSpec): - """A spec for iteratively training an SBEM regression model.""" - - base_run_name: str = Field( - ..., - description="The base run name for the experiment.", - ) - convergence_criteria: ConvergenceThresholdsByTarget = Field( - default_factory=ConvergenceThresholdsByTarget, - description="The convergence criteria.", - ) - regression_io_config: RegressionIOConfigSpec = Field( - default_factory=RegressionIOConfigSpec, - description="The regression io config spec.", - ) - hyperparameters: ModelHPType = Field( - default_factory=XGBHyperparameters, - description="The hyperparameters for the model.", - ) - stratification: StratificationSpec = Field( - default_factory=StratificationSpec, - description="The stratification spec.", - ) - cross_val: CrossValidationSpec = Field( - default_factory=CrossValidationSpec, - description="The cross validation spec.", - ) - iteration: IterationSpec = Field( - default_factory=IterationSpec, - description="The iteration spec.", - ) - gis_uri: FileReference = Field( - ..., - description="The uri of the gis data to train on.", - ) - data_uris: ScatterGatherResult | None = Field( - default=None, - description="The uri of the previous simulation results to train on.", - ) - - def format_combined_output_key(self, key: str) -> str: - """Format the output key for a combined result file.""" - return f"{self.prefix}/combined/{key}.parquet" - - def format_combined_output_uri(self, key: str) -> S3Url: - """Format the output uri for a combined result file.""" - if self.storage_settings is None: - msg = "Storage settings are not set, so we can't construct a combined output uri." - raise ValueError(msg) - return S3Url( - f"s3://{self.storage_settings.BUCKET}/{self.format_combined_output_key(key)}" - ) - - @property - def gis_path(self) -> Path: - """The path to the gis data.""" - if isinstance(self.gis_uri, Path): - return self.gis_uri - return self.fetch_uri(self.gis_uri) - - @cached_property - def gis_data(self) -> pd.DataFrame: - """Load the gis data.""" - return pd.read_parquet(self.gis_path) - - -class StageSpec(BaseModel): - """A spec that is common to both the sample and train stages (and possibly others).""" - - parent: ProgressiveTrainingSpec = Field( - ..., - description="The parent spec.", - ) - - @cached_property - def random_generator(self) -> np.random.Generator: - """The random generator.""" - return np.random.default_rng(self.parent.iteration.current_iter) - - -class SampleSpec(StageSpec): - """A spec for the sampling stage of the progressive training.""" - - # TODO: add the ability to receive the last set of error metrics and use them to inform the sampling - - def stratified_selection(self) -> pd.DataFrame: - """Sample the gis data.""" - df = self.parent.gis_data - - stratification_field = self.parent.stratification.field - stratification_aliases = self.parent.stratification.aliases - - if stratification_field not in df.columns and not any( - alias in df.columns for alias in stratification_aliases - ): - msg = f"Stratification field {stratification_field} not found in gis data. Please check the field name and/or the aliases." - raise ValueError(msg) - - if stratification_field not in df.columns: - stratification_field = next( - alias for alias in stratification_aliases if alias in df.columns - ) - - strata = cast(list[str], df[stratification_field].unique().tolist()) - - if self.parent.stratification.sampling == "equal": - return self.sample_equally_by_stratum(df, strata, stratification_field) - elif self.parent.stratification.sampling == "error-weighted": - msg = "Error-weighted sampling is not yet implemented." - raise NotImplementedError(msg) - elif self.parent.stratification.sampling == "proportional": - msg = "Proportional sampling is not yet implemented." - raise NotImplementedError(msg) - else: - msg = f"Invalid sampling method: {self.parent.stratification.sampling}" - raise ValueError(msg) - - def sample_equally_by_stratum( - self, df: pd.DataFrame, strata: list[str], stratification_field: str - ) -> pd.DataFrame: - """Sample equally by stratum. - - This will break the dataframe up into n strata and ensure that each strata ends up with the same number of samples. - - Args: - df (pd.DataFrame): The dataframe to sample from. - strata (list[str]): The unique values of the strata. - stratification_field (str): The field to stratify the data by. - - Returns: - samples (pd.DataFrame): The sampled dataframe. - """ - stratum_dfs = { - stratum: df[df[stratification_field] == stratum] for stratum in strata - } - n_per_iter = ( - self.parent.iteration.n_per_iter - if self.parent.iteration.current_iter != 0 - else self.parent.iteration.n_init - ) - n_per_stratum = max( - n_per_iter // len(strata), - ( - self.parent.iteration.min_per_stratum - if self.parent.iteration.current_iter == 0 - else 0 - ), - ) - - # TODO: consider how we want to handle potentially having the same geometry appear in both - # the training and testing sets. - # if any(len(stratum_df) < n_per_stratum for stratum_df in stratum_dfs.values()): - # msg = "There are not enough buildings in some strata to sample the desired number of buildings per stratum." - # # connsider making this a warning? - # raise ValueError(msg) - - sampled_strata = { - stratum: stratum_df.sample( - n=n_per_stratum, random_state=self.random_generator, replace=True - ) - for stratum, stratum_df in stratum_dfs.items() - } - return cast(pd.DataFrame, pd.concat(sampled_strata.values())) - - # def sample_semantic_fields(self, df: pd.DataFrame) -> pd.DataFrame: - # """Sample the semantic fields.""" - # # TODO: consider randomizing the locations? - # semantic_fields = self.progressive_training_spec.semantic_fields_data - # for field in semantic_fields.Fields: - # if isinstance(field, CategoricalFieldSpec): - # options = field.Options - # df[field.Name] = self.random_generator.choice(options, size=len(df)) - # elif isinstance(field, NumericFieldSpec): - # df[field.Name] = self.random_generator.uniform( - # field.Min, field.Max, size=len(df) - # ) - # else: - # msg = f"Invalid field type: {type(field)}" - # raise TypeError(msg) - # return df - - # def sample_basements_and_attics(self, df: pd.DataFrame) -> pd.DataFrame: - # """Add basement/attics to models.""" - # # get the options for the type literal - # options: list[BasementAtticOccupationConditioningStatus] = [ - # "none", - # "occupied_unconditioned", - # "unoccupied_unconditioned", - # "occupied_conditioned", - # "unoccupied_conditioned", - # ] - # weights = [0.5, *([0.5 / 4] * 4)] - # # sample the type literal - # df["basement"] = self.random_generator.choice(options, size=len(df), p=weights) - # df["attic"] = self.random_generator.choice(options, size=len(df), p=weights) - # df["exposed_basement_frac"] = self.random_generator.uniform( - # 0.1, 0.5, size=len(df) - # ) - # return df - - # def sample_wwrs(self, df: pd.DataFrame) -> pd.DataFrame: - # """Sample the wwrs.""" - # wwr_min = 0.05 - # wwr_max = 0.35 - # df["wwr"] = self.random_generator.uniform(wwr_min, wwr_max, size=len(df)) - # return df - - # def sample_f2f_heights(self, df: pd.DataFrame) -> pd.DataFrame: - # """Sample the f2f heights.""" - # f2f_min = 2.3 - # f2f_max = 4.3 - # df["f2f_height"] = self.random_generator.uniform(f2f_min, f2f_max, size=len(df)) - # return df - - def to_sim_specs(self, df: pd.DataFrame): - """Convert the sampled dataframe to a list of simulation specs. - - For now, we are assuming that all the other necessary fields are present and we are just - ensuring that sort_index and experiment_id are set appropriately. - """ - # df["semantic_field_context"] = df.apply( - # lambda row: { - # field.Name: row[field.Name] - # for field in self.progressive_training_spec.semantic_fields_data.Fields - # }, - # axis=1, - # ) - # df["sort_index"] = np.arange(len(df)) - # df["experiment_id"] = self.experiment_key - # # TODO: consider allowing the component map/semantic_fields/database to be inherited from the row - # # e.g. to allow multiple component maps and dbs per run. - # df["component_map_uri"] = str(self.progressive_training_spec.component_map_uri) - # df["semantic_fields_uri"] = str( - # self.progressive_training_spec.semantic_fields_uri - # ) - # df["db_uri"] = str(self.progressive_training_spec.database_uri) - return df - - # def make_payload(self, s3_client: S3ClientType): - # """Make the payload for the scatter gather task, including generating the simulation specs and serializing them to s3.""" - # df = self.stratified_selection() - # # df = self.sample_semantic_fields(df) - # # df = self.sample_basements_and_attics(df) - # # df = self.sample_wwrs(df) - # # df = self.sample_f2f_heights(df) - # df = self.to_sim_specs(df) - # # serialize to a parquet file and upload to s3 - # bucket = self.progressive_training_spec.storage_settings.BUCKET - # with tempfile.TemporaryDirectory() as tmpdir: - # tmpdir = Path(tmpdir) - # fpath = tmpdir / "specs.pq" - # df.to_parquet(fpath) - # key = f"hatchet/{self.experiment_key}/specs.pq" - # specs_uri = f"s3://{bucket}/{key}" - # s3_client.upload_file(fpath.as_posix(), bucket, key) - - # payload = { - # "specs": specs_uri, - # "bucket": bucket, - # "workflow_name": "simulate_sbem_shoebox", - # "experiment_id": self.experiment_key, - # "recursion_map": { - # "factor": self.progressive_training_spec.iteration.recursion_factor, - # "max_depth": self.progressive_training_spec.iteration.recursion_max_depth, - # }, - # } - # return payload - - class TrainFoldSpec(ExperimentInputSpec): """Train an sbem model for a specific fold. @@ -1316,15 +769,6 @@ def compute_metrics(self, preds: pd.DataFrame, targets: pd.DataFrame): # train_preds = pd.concat(train_preds, axis=1) # return train_preds, test_preds - # @property - # def model_dir_key(self) -> str: - # """Get the key for the model directory.""" - # return f"{self.experiment_id}/{self.sort_index}/models" - - # def format_model_key(self, model_name: str) -> str: - # """Format the model key.""" - # return f"hatchet/{self.model_dir_key}/{model_name}" - class FoldResult(ExperimentOutputSpec): """The output for a fold.""" diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index 5d4b891..f9b7054 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -7,13 +7,13 @@ import pandas as pd from hatchet_sdk import Context -from pydantic import HttpUrl from scythe.experiments import ( BaseExperiment, ) from scythe.hatchet import hatchet from scythe.registry import ExperimentRegistry from scythe.scatter_gather import RecursionMap, ScatterGatherResult, scatter_gather +from scythe.settings import ScytheStorageSettings from scythe.utils.filesys import S3Url from globi.models.surrogate.dummy import DummySimulationInput, dummy_simulation @@ -26,7 +26,6 @@ ) from globi.models.surrogate.training import ( FoldResult, - IterationSpec, ProgressiveTrainingSpec, TrainFoldSpec, TrainWithCVSpec, @@ -323,12 +322,13 @@ def transition_recursion( # TODO: Final training stage? or should we save models along the way. if __name__ == "__main__": + from pydantic import HttpUrl from scythe.settings import ScytheStorageSettings - from globi.models.surrogate.training import ( + from globi.models.surrogate.configs.pipeline import ( ConvergenceThresholds, ConvergenceThresholdsByTarget, - ProgressiveTrainingSpec, + IterationSpec, StratificationSpec, ) @@ -343,7 +343,7 @@ def transition_recursion( aliases=["feature.weather.file"], ), iteration=IterationSpec( - max_iters=10, + max_iters=3, ), convergence_criteria=ConvergenceThresholdsByTarget( thresholds={ From 31663da2057e95dba6d6e3c3826433ad88be89c7 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Sun, 8 Mar 2026 20:30:19 -0400 Subject: [PATCH 11/31] enable runnable selection at surrogate level --- src/globi/allocate.py | 4 ++-- src/globi/models/surrogate/configs/pipeline.py | 3 ++- src/globi/models/surrogate/training.py | 3 +++ src/globi/pipelines/training.py | 13 ++++++------- src/globi/tools/cli/main.py | 4 ++-- src/globi/tools/visualization/data_sources.py | 2 +- uv.lock | 16 ++++++++-------- 7 files changed, 24 insertions(+), 21 deletions(-) diff --git a/src/globi/allocate.py b/src/globi/allocate.py index c7691db..988a022 100644 --- a/src/globi/allocate.py +++ b/src/globi/allocate.py @@ -102,7 +102,7 @@ def allocate_globi_experiment( raise ValueError(msg) experiment = BaseExperiment[ExperimentInputSpec, ExperimentOutputSpec]( - experiment=simulate_globi_building, run_name=name + runnable=simulate_globi_building, run_name=name ) print(f"Submitting {len(buildings_gdf)} buildings for experiment {name}") min_branches_required, _, _ = calculate_branching_factor(specs) @@ -182,7 +182,7 @@ def allocate_globi_dryrun( raise ValueError(msg) experiment = BaseExperiment[ExperimentInputSpec, ExperimentOutputSpec]( - experiment=simulate_globi_building, + runnable=simulate_globi_building, run_name=f"{config.name}/dryrun/{config.scenario}", ) diff --git a/src/globi/models/surrogate/configs/pipeline.py b/src/globi/models/surrogate/configs/pipeline.py index a088a79..11a0e9b 100644 --- a/src/globi/models/surrogate/configs/pipeline.py +++ b/src/globi/models/surrogate/configs/pipeline.py @@ -10,6 +10,7 @@ import pandas as pd from pydantic import BaseModel, Field from scythe.base import ExperimentInputSpec +from scythe.experiments import SerializableRunnable from scythe.scatter_gather import RecursionMap, ScatterGatherResult from scythe.utils.filesys import FileReference, S3Url @@ -212,7 +213,7 @@ class RegressionIOConfigSpec(BaseModel): ) -class ProgressiveTrainingSpec(ExperimentInputSpec): +class ProgressiveTrainingSpec(ExperimentInputSpec, SerializableRunnable): """A spec for iteratively training an SBEM regression model.""" base_run_name: str = Field( diff --git a/src/globi/models/surrogate/training.py b/src/globi/models/surrogate/training.py index b3fa03e..260691d 100644 --- a/src/globi/models/surrogate/training.py +++ b/src/globi/models/surrogate/training.py @@ -68,6 +68,9 @@ def combined_data(self) -> pd.DataFrame: dfs: dict[str, pd.DataFrame] = { key: pd.read_parquet(str(uri)) for key, uri in self.data_uris.items() } + # TODO: we should drop any dataframes which do not participate in training + # for instance, by checking their regression io spec, or if there is another place to check. + # Mostly important for preventing errors on the next line when many differently shaped dataframes are returned. if not all( df.index.equals(next(iter(dfs.values())).index) for df in dfs.values() ): diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index f9b7054..02982e2 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -85,8 +85,7 @@ def create_simulations( run_name = f"{spec.experiment_id}/sample" exp = BaseExperiment( - # TODO: replace with simulate_globi_flat_building, or better yet, allow loading from the registry via config. - experiment=dummy_simulation, # TODO: add configurability to switch between simulations. + runnable=spec.runnable, run_name=run_name, storage_settings=spec.storage_settings or ScytheStorageSettings(), ) @@ -147,6 +146,7 @@ def combine_results( # is_constant = (df.max(axis=0) - df.min(axis=0)).abs() < 1e-5 # df = df.loc[:, ~is_constant] # Should this sort of data cleaning be done here, or should it be done in the training task? + # also, should we make sure to remove NaN? if spec.data_uris: shared_keys = set(spec.data_uris.uris.keys()) & set(results.uris.keys()) @@ -199,7 +199,7 @@ def start_training( run_name = f"{spec.experiment_id}/train" exp = BaseExperiment( - experiment=train_regressor_with_cv_fold, + runnable=train_regressor_with_cv_fold, run_name=run_name, storage_settings=spec.storage_settings or ScytheStorageSettings(), ) @@ -302,7 +302,7 @@ def transition_recursion( next_spec.iteration.current_iter += 1 next_spec.data_uris = combine_results_output.combined exp = BaseExperiment( - experiment=iterative_training, + runnable=iterative_training, run_name=f"{next_spec.base_run_name}", storage_settings=spec.storage_settings or ScytheStorageSettings(), ) @@ -334,6 +334,7 @@ def transition_recursion( base_run_name = "test-experiment" progressive_training_spec = ProgressiveTrainingSpec( + runnable=dummy_simulation, sort_index=0, experiment_id="placeholder", gis_uri=HttpUrl("https://example.com/gis.parquet"), @@ -356,7 +357,7 @@ def transition_recursion( ) exp = BaseExperiment( - experiment=iterative_training, + runnable=iterative_training, run_name="test-experiment", ) @@ -371,5 +372,3 @@ def transition_recursion( import yaml print(yaml.dump(run.model_dump(mode="json"), indent=2, sort_keys=False)) - # result = iterative_training.run(spec) - # print(result) diff --git a/src/globi/tools/cli/main.py b/src/globi/tools/cli/main.py index e36e447..eb35b05 100644 --- a/src/globi/tools/cli/main.py +++ b/src/globi/tools/cli/main.py @@ -127,7 +127,7 @@ def simulate( import pandas as pd from globi.models.tasks import MinimalBuildingSpec - from globi.pipelines import simulate_globi_building_pipeline + from globi.pipelines.simulations import simulate_globi_building_pipeline if isinstance(config, str): config = Path(config) @@ -371,7 +371,7 @@ def experiment( s3_client: S3Client = boto3.client("s3") s3_settings = ScytheStorageSettings() - exp = BaseExperiment(experiment=simulate_globi_building, run_name=run_name) + exp = BaseExperiment(runnable=simulate_globi_building, run_name=run_name) if not version: exp_version = exp.latest_version(s3_client, from_cache=False) diff --git a/src/globi/tools/visualization/data_sources.py b/src/globi/tools/visualization/data_sources.py index 8397258..4c8bace 100644 --- a/src/globi/tools/visualization/data_sources.py +++ b/src/globi/tools/visualization/data_sources.py @@ -262,7 +262,7 @@ def load_run_data(self, run_id: str) -> pd.DataFrame: s3_client = self.client s3_settings = ScytheStorageSettings() exp = BaseExperiment( - experiment=simulate_globi_building, + runnable=simulate_globi_building, run_name=run_id, ) diff --git a/uv.lock b/uv.lock index 45faccc..5c39dd3 100644 --- a/uv.lock +++ b/uv.lock @@ -922,7 +922,7 @@ name = "cuda-bindings" version = "12.9.4" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cuda-pathfinder", marker = "sys_platform != 'darwin'" }, + { name = "cuda-pathfinder", marker = "sys_platform == 'linux'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/0c/c2/65bfd79292b8ff18be4dd7f7442cea37bcbc1a228c1886f1dea515c45b67/cuda_bindings-12.9.4-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:694ba35023846625ef471257e6b5a4bc8af690f961d197d77d34b1d1db393f56", size = 11760260, upload-time = "2025-10-21T14:51:40.79Z" }, @@ -3169,7 +3169,7 @@ name = "nvidia-cudnn-cu12" version = "9.10.2.21" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-cublas-cu12", marker = "sys_platform != 'darwin'" }, + { name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/fa/41/e79269ce215c857c935fd86bcfe91a451a584dfc27f1e068f568b9ad1ab7/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:c9132cc3f8958447b4910a1720036d9eff5928cc3179b0a51fb6d167c6cc87d8", size = 705026878, upload-time = "2025-06-06T21:52:51.348Z" }, @@ -3181,7 +3181,7 @@ name = "nvidia-cufft-cu12" version = "11.3.3.83" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-nvjitlink-cu12", marker = "sys_platform != 'darwin'" }, + { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/60/bc/7771846d3a0272026c416fbb7e5f4c1f146d6d80704534d0b187dd6f4800/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:848ef7224d6305cdb2a4df928759dca7b1201874787083b6e7550dd6765ce69a", size = 193109211, upload-time = "2025-03-07T01:44:56.873Z" }, @@ -3211,9 +3211,9 @@ name = "nvidia-cusolver-cu12" version = "11.7.3.90" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-cublas-cu12", marker = "sys_platform != 'darwin'" }, - { name = "nvidia-cusparse-cu12", marker = "sys_platform != 'darwin'" }, - { name = "nvidia-nvjitlink-cu12", marker = "sys_platform != 'darwin'" }, + { name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cusparse-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/c8/32/f7cd6ce8a7690544d084ea21c26e910a97e077c9b7f07bf5de623ee19981/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:db9ed69dbef9715071232caa9b69c52ac7de3a95773c2db65bdba85916e4e5c0", size = 267229841, upload-time = "2025-03-07T01:46:54.356Z" }, @@ -3225,7 +3225,7 @@ name = "nvidia-cusparse-cu12" version = "12.5.8.93" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-nvjitlink-cu12", marker = "sys_platform != 'darwin'" }, + { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/bc/f7/cd777c4109681367721b00a106f491e0d0d15cfa1fd59672ce580ce42a97/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b6c161cb130be1a07a27ea6923df8141f3c295852f4b260c65f18f3e0a091dc", size = 288117129, upload-time = "2025-03-07T01:47:40.407Z" }, @@ -4591,7 +4591,7 @@ wheels = [ [[package]] name = "scythe-engine" version = "0.1.2" -source = { git = "https://github.com/szvsw/scythe?branch=feature%2Fallow-versioning-workflows#9aad5d97eaa9ca33bc5ac9e21ec31c9b60f677f1" } +source = { git = "https://github.com/szvsw/scythe?branch=feature%2Fallow-versioning-workflows#2976bb3da4cec82784057b673e55d5c5cdda469f" } dependencies = [ { name = "boto3" }, { name = "fastparquet" }, From 3af36694d0e35d8ec58b94ffc622ae8e2cfbd12e Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Mon, 9 Mar 2026 10:41:49 -0400 Subject: [PATCH 12/31] improve results collation and finalization --- .../models/surrogate/configs/pipeline.py | 62 +++++- src/globi/models/surrogate/outputs.py | 13 ++ src/globi/models/surrogate/training.py | 2 - src/globi/pipelines/training.py | 210 +++++++++++++----- src/globi/tools/cli/main.py | 26 +++ uv.lock | 2 +- 6 files changed, 249 insertions(+), 66 deletions(-) diff --git a/src/globi/models/surrogate/configs/pipeline.py b/src/globi/models/surrogate/configs/pipeline.py index 11a0e9b..7f3be86 100644 --- a/src/globi/models/surrogate/configs/pipeline.py +++ b/src/globi/models/surrogate/configs/pipeline.py @@ -76,13 +76,20 @@ class CrossValidationSpec(BaseModel): class ConvergenceThresholds(BaseModel): """The thresholds for convergence.""" - # TODO: instead of using a risky hardcoded "n/a" token, make nullability have better support. - mae: float = Field(default=-9e9, description="The maximum MAE for convergence.") - rmse: float = Field(default=-9e9, description="The maximum RMSE for convergence.") - mape: float = Field(default=-9e9, description="The maximum MAPE for convergence.") - r2: float = Field(default=9e9, description="The minimum R2 for convergence.") - cvrmse: float = Field( - default=-9e9, description="The maximum CV_RMSE for convergence." + mae: float | None = Field( + default=None, description="The maximum MAE for convergence." + ) + rmse: float | None = Field( + default=None, description="The maximum RMSE for convergence." + ) + mape: float | None = Field( + default=None, description="The maximum MAPE for convergence." + ) + r2: float | None = Field( + default=None, description="The minimum R2 for convergence." + ) + cvrmse: float | None = Field( + default=None, description="The maximum CV_RMSE for convergence." ) def check_convergence(self, metrics: pd.Series, target: re.Pattern | None = None): @@ -104,9 +111,13 @@ def check_convergence(self, metrics: pd.Series, target: re.Pattern | None = None # first, we will select the appropriate threshold for each metric comparators = thresholds.loc[metrics.index.get_level_values("metric")] + # we can then copy over the index safely comparators.index = metrics.index + # we will ignore any thresholds that are not set or are NaN + comparators_are_na = comparators.isna() + # next, we will flip the sign of the r2 metric since it is a maximization metric rather thin min metrics = metrics * np.where( metrics.index.get_level_values("metric") == "r2", -1, 1 @@ -117,6 +128,7 @@ def check_convergence(self, metrics: pd.Series, target: re.Pattern | None = None # run the comparisons comparison = metrics < comparators + comparison = comparison.loc[~comparators_are_na] return comparison @@ -252,10 +264,18 @@ class ProgressiveTrainingSpec(ExperimentInputSpec, SerializableRunnable): default=None, description="The uri of the previous simulation results to train on.", ) + metrics_uris: list[ScatterGatherResult] = Field( + default_factory=list, + description="The uris of the iteration metrics from previous iterations.", + ) + previous_experiment_ids: list[str] = Field( + default_factory=list, + description="The ids of the previous experiments.", + ) def format_combined_output_key(self, key: str) -> str: """Format the output key for a combined result file.""" - return f"{self.prefix}/combined/{key}.parquet" + return f"{self.prefix}/combined/data/{key}.parquet" def format_combined_output_uri(self, key: str) -> S3Url: """Format the output uri for a combined result file.""" @@ -266,6 +286,32 @@ def format_combined_output_uri(self, key: str) -> S3Url: f"s3://{self.storage_settings.BUCKET}/{self.format_combined_output_key(key)}" ) + def format_metrics_output_key(self, key: str) -> str: + """Format the output key for a metrics file.""" + return f"{self.prefix}/combined/metrics/{key}.parquet" + + def format_metrics_output_uri(self, key: str) -> S3Url: + """Format the output uri for a metrics file.""" + if self.storage_settings is None: + msg = "Storage settings are not set, so we can't construct a metrics output uri." + raise ValueError(msg) + return S3Url( + f"s3://{self.storage_settings.BUCKET}/{self.format_metrics_output_key(key)}" + ) + + def format_summary_manifest_key(self) -> str: + """Format the output key for a summary manifest file.""" + return f"{self.prefix}/summary.yml" + + def format_summary_manifest_uri(self) -> S3Url: + """Format the output uri for a summary manifest file.""" + if self.storage_settings is None: + msg = "Storage settings are not set, so we can't construct a summary manifest uri." + raise ValueError(msg) + return S3Url( + f"s3://{self.storage_settings.BUCKET}/{self.format_summary_manifest_key()}" + ) + @property def gis_path(self) -> Path: """The path to the gis data.""" diff --git a/src/globi/models/surrogate/outputs.py b/src/globi/models/surrogate/outputs.py index 0bdb9b6..961df92 100644 --- a/src/globi/models/surrogate/outputs.py +++ b/src/globi/models/surrogate/outputs.py @@ -5,6 +5,7 @@ from pydantic import BaseModel from scythe.experiments import ExperimentRun from scythe.scatter_gather import ScatterGatherResult +from scythe.utils.filesys import S3Url from globi.models.surrogate.training import TrainWithCVSpec @@ -17,6 +18,7 @@ class CombineResultsResult(BaseModel): # TODO: This should perhaps go somewhere else since it is generally useful. +# (most likely into scythe itself) class ExperimentRunWithRef(BaseModel): """An experiment run with a workflow run id.""" @@ -35,6 +37,8 @@ class TrainingEvaluationResult(BaseModel): """The result of evaluating the training.""" converged: bool + # TODO: possibly get rid of this since we have nice combined outputs already. + metrics: dict class RecursionTransition(BaseModel): @@ -42,3 +46,12 @@ class RecursionTransition(BaseModel): reasoning: Literal["max_depth", "converged"] | None child_workflow_run_id: str | None + + +class FinalizeResult(BaseModel): + """The result of finalizing the training.""" + + reasoning: Literal["max_depth", "converged"] | None + data_uris: dict[str, S3Url] + metrics_uris: dict[str, S3Url] + experiment_ids: list[str] diff --git a/src/globi/models/surrogate/training.py b/src/globi/models/surrogate/training.py index 260691d..c90b6b2 100644 --- a/src/globi/models/surrogate/training.py +++ b/src/globi/models/surrogate/training.py @@ -791,12 +791,10 @@ class TrainWithCVSpec(StageSpec): def schedule(self) -> list[TrainFoldSpec]: """Create the task schedule.""" schedule = [] - # TODO: this should be configured/selected/etc for i in range(self.parent.cross_val.n_folds): schedule.append( TrainFoldSpec( - # TODO: this should be set in a better manner experiment_id="placeholder", sort_index=i, data_uris=self.data_uris.uris, diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index 02982e2..17c0751 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -1,11 +1,14 @@ """The training pipeline.""" import random +import tempfile from datetime import timedelta from pathlib import Path from typing import cast +import boto3 import pandas as pd +import yaml from hatchet_sdk import Context from scythe.experiments import ( BaseExperiment, @@ -16,10 +19,11 @@ from scythe.settings import ScytheStorageSettings from scythe.utils.filesys import S3Url -from globi.models.surrogate.dummy import DummySimulationInput, dummy_simulation +from globi.models.surrogate.dummy import DummySimulationInput from globi.models.surrogate.outputs import ( CombineResultsResult, ExperimentRunWithRef, + FinalizeResult, RecursionTransition, StartTrainingResult, TrainingEvaluationResult, @@ -255,9 +259,10 @@ def evaluate_training( ) -> TrainingEvaluationResult: """Evaluate the training.""" results_output = context.task_output(await_training) - strata = results_output.uris["strata"] - _globals = results_output.uris["global"] - results = pd.read_parquet(str(strata)) + strata_uri = results_output.uris["strata"] + globals_uri = results_output.uris["global"] + results = pd.read_parquet(str(strata_uri)) + results_globals = pd.read_parquet(str(globals_uri)) fold_averages = cast( pd.Series, @@ -268,6 +273,14 @@ def evaluate_training( ) # TODO: fold_averages and strata and globals should be saved to s3 + global_averages = cast( + pd.Series, + results_globals.xs("test", level="split_segment", axis=1) + .groupby(level="iteration") + .mean() + .unstack(), + ) + ( convergence_all, _convergence_monitor_segment, @@ -275,14 +288,19 @@ def evaluate_training( _convergence, ) = spec.convergence_criteria.run(fold_averages) - return TrainingEvaluationResult(converged=convergence_all) + return TrainingEvaluationResult( + converged=convergence_all, + metrics={ + "global_averages": global_averages.reset_index().to_dict(orient="records"), + }, + ) @iterative_training.task( name="iterative_training.transition_recursion", schedule_timeout=timedelta(hours=5), execution_timeout=timedelta(minutes=5), - parents=[evaluate_training, combine_results], + parents=[evaluate_training, combine_results, await_training], ) def transition_recursion( spec: ProgressiveTrainingSpec, context: Context @@ -290,17 +308,19 @@ def transition_recursion( """Transition the recursion.""" results = context.task_output(evaluate_training) if results.converged: - # create child return RecursionTransition(reasoning="converged", child_workflow_run_id=None) if spec.iteration.at_max_iters: return RecursionTransition(reasoning="max_depth", child_workflow_run_id=None) + await_training_output = context.task_output(await_training) # start_training_output = context.task_output(start_training) combine_results_output = context.task_output(combine_results) next_spec = spec.model_copy(deep=True) next_spec.iteration.current_iter += 1 next_spec.data_uris = combine_results_output.combined + next_spec.metrics_uris.append(await_training_output) + next_spec.previous_experiment_ids.append(spec.experiment_id) exp = BaseExperiment( runnable=iterative_training, run_name=f"{next_spec.base_run_name}", @@ -319,56 +339,136 @@ def transition_recursion( ) -# TODO: Final training stage? or should we save models along the way. +@iterative_training.task( + name="iterative_training.finalize", + schedule_timeout=timedelta(hours=5), + execution_timeout=timedelta(minutes=30), + parents=[transition_recursion, await_training, combine_results], + # skip_if=[ + # # TODO: maybe we should just run every time? + # ParentCondition( + # parent=transition_recursion, + # expression="output.reasoning == null", + # ) + # ], +) +def finalize(spec: ProgressiveTrainingSpec, context: Context) -> FinalizeResult: + """Run when training has exited the loop (converged, max depth, or other reason). Saves final models and artifacts.""" + # TODO: save the final model? + transition = context.task_output(transition_recursion) + context.log(f"Training finished. Finalizing: {transition.reasoning}") + + context.log("Fetching metrics from all iterations...") + await_training_output = context.task_output(await_training) + metrics_uris = [*spec.metrics_uris, await_training_output] + metrics_by_key: dict[str, list[pd.DataFrame]] = {} + for i, metrics_uri in enumerate(metrics_uris): + context.log(f"\tFetching metrics from iteration {i}...") + for key in metrics_uri.uris: + context.log(f"\t\tFetching metrics for key {key} from iteration {i}...") + if key not in metrics_by_key: + metrics_by_key[key] = [] + metrics_by_key[key].append(pd.read_parquet(str(metrics_uri.uris[key]))) + context.log("Combining metrics from all iterations...") + combined_metrics = { + key: pd.concat(metrics, axis=0) for key, metrics in metrics_by_key.items() + } + combined_metrics_uris = { + key: spec.format_metrics_output_uri(key) for key in combined_metrics + } + context.log("Saving combined metrics to s3...") + for key, metrics in combined_metrics.items(): + context.log(f"\tSaving metrics for key {key} to s3...") + metrics.to_parquet(str(combined_metrics_uris[key])) + context.log("Final metrics saved to s3.") + + # Get the simulation data outputs from all steps and this step + combine_results_output = context.task_output(combine_results) -if __name__ == "__main__": - from pydantic import HttpUrl - from scythe.settings import ScytheStorageSettings + # Get the experiment ids from all steps and this step + experiment_ids = [*spec.previous_experiment_ids, spec.experiment_id] - from globi.models.surrogate.configs.pipeline import ( - ConvergenceThresholds, - ConvergenceThresholdsByTarget, - IterationSpec, - StratificationSpec, - ) + # TODO: save final models, or return them a little more directly? - base_run_name = "test-experiment" - progressive_training_spec = ProgressiveTrainingSpec( - runnable=dummy_simulation, - sort_index=0, - experiment_id="placeholder", - gis_uri=HttpUrl("https://example.com/gis.parquet"), - stratification=StratificationSpec( - field="weather_file", - sampling="equal", - aliases=["feature.weather.file"], - ), - iteration=IterationSpec( - max_iters=3, - ), - convergence_criteria=ConvergenceThresholdsByTarget( - thresholds={ - "*": ConvergenceThresholds(r2=0.975), - }, - ), - storage_settings=ScytheStorageSettings(), - data_uris=None, - base_run_name=base_run_name, - ) - - exp = BaseExperiment( - runnable=iterative_training, - run_name="test-experiment", - ) - - run, ref = exp.allocate( - progressive_training_spec, - version="bumpmajor", - recursion_map=RecursionMap( - factor=2, - max_depth=0, - ), + result = FinalizeResult( + reasoning=transition.reasoning, + data_uris=combine_results_output.combined.uris, + metrics_uris=combined_metrics_uris, + experiment_ids=experiment_ids, ) - import yaml - print(yaml.dump(run.model_dump(mode="json"), indent=2, sort_keys=False)) + s3_client = boto3.client("s3") + summary_manifest_uri = spec.format_summary_manifest_key() + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) / "summary.yml" + with open(temp_path, "w") as f: + yaml.dump(result.model_dump(mode="json"), f, indent=2, sort_keys=False) + if spec.storage_settings is None: + msg = ( + "Storage settings are not set, so we can't upload the summary manifest." + ) + raise ValueError(msg) + s3_client.upload_file( + temp_path.as_posix(), spec.storage_settings.BUCKET, summary_manifest_uri + ) + return result + + +# if __name__ == "__main__": +# import yaml +# from pydantic import HttpUrl +# from scythe.settings import ScytheStorageSettings + +# from globi.models.surrogate.configs.pipeline import ( +# ConvergenceThresholds, +# ConvergenceThresholdsByTarget, +# IterationSpec, +# StratificationSpec, +# ) +# from globi.models.surrogate.dummy import dummy_simulation + +# base_run_name = "test-experiment" +# progressive_training_spec = ProgressiveTrainingSpec( +# runnable=dummy_simulation, +# sort_index=0, +# experiment_id="placeholder", +# gis_uri=HttpUrl("https://example.com/gis.parquet"), +# stratification=StratificationSpec( +# field="weather_file", +# sampling="equal", +# aliases=["feature.weather.file"], +# ), +# iteration=IterationSpec( +# max_iters=3, +# ), +# convergence_criteria=ConvergenceThresholdsByTarget( +# thresholds={ +# "*": ConvergenceThresholds(r2=0.975), +# }, +# ), +# storage_settings=ScytheStorageSettings(), +# base_run_name=base_run_name, +# ) +# with open("inputs/training.yml", "w") as f: +# yaml.dump( +# progressive_training_spec.model_dump(mode="json"), +# f, +# indent=2, +# sort_keys=False, +# ) + +# exp = BaseExperiment( +# runnable=iterative_training, +# run_name="test-experiment", +# ) + +# run, ref = exp.allocate( +# progressive_training_spec, +# version="bumpmajor", +# recursion_map=RecursionMap( +# factor=2, +# max_depth=0, +# ), +# ) + +# print(yaml.dump(run.model_dump(mode="json"), indent=2, sort_keys=False)) diff --git a/src/globi/tools/cli/main.py b/src/globi/tools/cli/main.py index eb35b05..cb11098 100644 --- a/src/globi/tools/cli/main.py +++ b/src/globi/tools/cli/main.py @@ -7,6 +7,7 @@ import boto3 import click import yaml +from scythe.experiments import BaseExperiment if TYPE_CHECKING: from mypy_boto3_s3 import S3Client @@ -102,6 +103,31 @@ def manifest( ) +@submit.command() +@click.option( + "--path", + type=click.Path(exists=True), + help="The path to the manifest file which will be used to configure the experiment.", + prompt="Manifest file path (.yml)", +) +def surrogate(path): + """Submit a GloBI surrogate experiment.""" + from globi.models.surrogate.configs.pipeline import ProgressiveTrainingSpec + from globi.pipelines.training import iterative_training + + with open(path) as f: + manifest = yaml.safe_load(f) + + config = ProgressiveTrainingSpec.model_validate(manifest) + + exp = BaseExperiment(runnable=iterative_training, run_name=config.base_run_name) + run, _ref = exp.allocate( + config, + version="bumpmajor", + ) + print(yaml.dump(run.model_dump(mode="json"), indent=2, sort_keys=False)) + + @cli.command() @click.option( "--config", diff --git a/uv.lock b/uv.lock index 5c39dd3..8b45dad 100644 --- a/uv.lock +++ b/uv.lock @@ -4591,7 +4591,7 @@ wheels = [ [[package]] name = "scythe-engine" version = "0.1.2" -source = { git = "https://github.com/szvsw/scythe?branch=feature%2Fallow-versioning-workflows#2976bb3da4cec82784057b673e55d5c5cdda469f" } +source = { git = "https://github.com/szvsw/scythe?branch=feature%2Fallow-versioning-workflows#0bc501d15c20ab23b2379b690756fd3ff3267054" } dependencies = [ { name = "boto3" }, { name = "fastparquet" }, From 6603b70f848d0fc07cf2dd7d71952a3587119f2b Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Mon, 9 Mar 2026 11:34:28 -0400 Subject: [PATCH 13/31] add non linear sample size scaling --- src/globi/models/surrogate/configs/pipeline.py | 16 +++++++++++----- src/globi/models/surrogate/sampling.py | 6 +----- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/src/globi/models/surrogate/configs/pipeline.py b/src/globi/models/surrogate/configs/pipeline.py index 7f3be86..6707b1f 100644 --- a/src/globi/models/surrogate/configs/pipeline.py +++ b/src/globi/models/surrogate/configs/pipeline.py @@ -20,14 +20,13 @@ class IterationSpec(BaseModel): """The iteration spec.""" - n_init: int = Field(default=10000, description="The number of initial samples.") + n_per_iter: int | list[int] = Field( + default=10_000, + description="The number of samples to generate per generation. If the current iteration exceeds the length of the list, the last element will be used.", + ) min_per_stratum: int = Field( default=100, description="The minimum number of samples per stratum." ) - n_per_iter: int = Field( - default=10000, - description="The number of samples to add per each iteration of the outer loop.", - ) max_iters: int = Field( default=100, description="The maximum number of outer loop iterations to perform.", @@ -46,6 +45,13 @@ def at_max_iters(self) -> bool: """Whether the current iteration is the maximum number of iterations.""" return self.current_iter + 1 >= self.max_iters + @property + def n_per_gen_for_current_iter(self) -> int: + """The number of samples to generate for the current iteration.""" + if isinstance(self.n_per_iter, int): + return self.n_per_iter + return self.n_per_iter[min(self.current_iter, len(self.n_per_iter) - 1)] + class StratificationSpec(BaseModel): """A spec for stratifying the data.""" diff --git a/src/globi/models/surrogate/sampling.py b/src/globi/models/surrogate/sampling.py index b4bb1d8..fff8068 100644 --- a/src/globi/models/surrogate/sampling.py +++ b/src/globi/models/surrogate/sampling.py @@ -62,11 +62,7 @@ def sample_equally_by_stratum( stratum_dfs = { stratum: df[df[stratification_field] == stratum] for stratum in strata } - n_per_iter = ( - self.parent.iteration.n_per_iter - if self.parent.iteration.current_iter != 0 - else self.parent.iteration.n_init - ) + n_per_iter = self.parent.iteration.n_per_gen_for_current_iter n_per_stratum = max( n_per_iter // len(strata), ( From b3ebb8fe63155512d6cac4a79cac278b67fbc87e Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Mon, 9 Mar 2026 15:06:01 -0400 Subject: [PATCH 14/31] enabling sampling specification --- .../models/surrogate/configs/pipeline.py | 31 +- src/globi/models/surrogate/dummy.py | 318 +++++++- src/globi/models/surrogate/samplers.py | 720 ++++++++++++++++++ src/globi/models/surrogate/sampling.py | 141 +--- src/globi/pipelines/training.py | 27 +- 5 files changed, 1076 insertions(+), 161 deletions(-) create mode 100644 src/globi/models/surrogate/samplers.py diff --git a/src/globi/models/surrogate/configs/pipeline.py b/src/globi/models/surrogate/configs/pipeline.py index 6707b1f..3b2ed96 100644 --- a/src/globi/models/surrogate/configs/pipeline.py +++ b/src/globi/models/surrogate/configs/pipeline.py @@ -12,9 +12,10 @@ from scythe.base import ExperimentInputSpec from scythe.experiments import SerializableRunnable from scythe.scatter_gather import RecursionMap, ScatterGatherResult -from scythe.utils.filesys import FileReference, S3Url +from scythe.utils.filesys import OptionalFileReference, S3Url from globi.models.surrogate.configs.regression import ModelHPType, XGBHyperparameters +from globi.models.surrogate.samplers import Priors class IterationSpec(BaseModel): @@ -254,6 +255,10 @@ class ProgressiveTrainingSpec(ExperimentInputSpec, SerializableRunnable): default_factory=StratificationSpec, description="The stratification spec.", ) + samplers: Priors = Field( + ..., + description="The sampling spec.", + ) cross_val: CrossValidationSpec = Field( default_factory=CrossValidationSpec, description="The cross validation spec.", @@ -262,8 +267,8 @@ class ProgressiveTrainingSpec(ExperimentInputSpec, SerializableRunnable): default_factory=IterationSpec, description="The iteration spec.", ) - gis_uri: FileReference = Field( - ..., + context: OptionalFileReference = Field( + default=None, description="The uri of the gis data to train on.", ) data_uris: ScatterGatherResult | None = Field( @@ -318,17 +323,25 @@ def format_summary_manifest_uri(self) -> S3Url: f"s3://{self.storage_settings.BUCKET}/{self.format_summary_manifest_key()}" ) + def subrun_name(self, subrun: Literal["sample", "train"]) -> str: + """Format the run name for a subrun.""" + return f"{self.experiment_id}/{subrun}" + @property - def gis_path(self) -> Path: + def context_path(self) -> Path | None: """The path to the gis data.""" - if isinstance(self.gis_uri, Path): - return self.gis_uri - return self.fetch_uri(self.gis_uri) + if self.context is None: + return None + if isinstance(self.context, Path): + return self.context + return self.fetch_uri(self.context) @cached_property - def gis_data(self) -> pd.DataFrame: + def context_data(self) -> pd.DataFrame | None: """Load the gis data.""" - return pd.read_parquet(self.gis_path) + if self.context_path is None: + return None + return pd.read_parquet(self.context_path) class StageSpec(BaseModel): diff --git a/src/globi/models/surrogate/dummy.py b/src/globi/models/surrogate/dummy.py index ee17a81..bd08fda 100644 --- a/src/globi/models/surrogate/dummy.py +++ b/src/globi/models/surrogate/dummy.py @@ -1,27 +1,59 @@ """Dummy simulation for testing.""" import math +from dataclasses import dataclass from pathlib import Path -from typing import Literal +from typing import Any, Literal, get_args +import numpy as np import pandas as pd from scythe.base import ExperimentInputSpec, ExperimentOutputSpec from scythe.registry import ExperimentRegistry +StratificationOption = Literal["some", "other", "option", "another"] + class DummySimulationInput(ExperimentInputSpec): """The input for the dummy simulation.""" - weather_file: Literal["some", "other"] - a: int - b: float - c: int + x0: float + x1: float + x2: float + x3: float + stratification_field: StratificationOption + + @property + def encoded_stratification_field(self) -> float: + """Encode the stratification field as an integer.""" + return get_args(StratificationOption).index(self.stratification_field) / ( + len(get_args(StratificationOption)) + - (1 if len(get_args(StratificationOption)) > 1 else 0) + ) + + @property + def values(self) -> list[float]: + """Get the values of the input spec.""" + vals = self.model_dump( + exclude={ + "stratification_field", + "experiment_id", + "sort_index", + "workflow_run_id", + "root_workflow_run_id", + } + ) + x_vals = {k: v for k, v in vals.items() if k.startswith("x")} + return [*x_vals.values(), self.encoded_stratification_field] + + def n_inputs(self) -> int: + """Get the number of inputs.""" + return len(self.values) class DummySimulationOutput(ExperimentOutputSpec): """The output for the dummy simulation.""" - c: float + y0: float @ExperimentRegistry.Register( @@ -31,31 +63,253 @@ def dummy_simulation( input_spec: DummySimulationInput, tempdir: Path ) -> DummySimulationOutput: """A dummy simulation.""" - df = pd.DataFrame({ - "target_0": [ - (input_spec.a + input_spec.b) - if input_spec.weather_file == "some" - else (input_spec.a - input_spec.b) - ], - "target_1": [ - (input_spec.a - input_spec.b) - if input_spec.weather_file == "some" - else (input_spec.a + input_spec.b) - ], - "target_2": [ - (input_spec.a * input_spec.b * input_spec.c) - if input_spec.weather_file == "some" - else (input_spec.a * input_spec.b / input_spec.c) - ], - "target_3": [ - (input_spec.a / math.sin(input_spec.b)) - if input_spec.weather_file == "some" - else (input_spec.a / math.cos(input_spec.b)) - ], - }) - df_neg = -df - df = pd.concat([df, df_neg], axis=1, keys=["positive", "negative"], names=["sign"]) - df = df.set_index(input_spec.make_multiindex()) + n_inputs = input_spec.n_inputs() + n_outputs = 5 + problem = SyntheticMultiOutputProblem( + n_inputs, + SyntheticProblemConfig( + n_outputs=5, + n_latents=8, + difficulty="medium", + noise_std=0.0, + normalize_outputs=True, + ), + input_spec.sort_index, + ) + y = problem.evaluate(np.array(input_spec.values)) + + main_result = pd.DataFrame({f"y{i}": [y[i]] for i in range(1, n_outputs)}) + main_result = main_result.set_index(input_spec.make_multiindex()) + main_result_neg = -main_result + main_result = pd.concat( + [main_result, main_result_neg], + axis=1, + keys=["positive", "negative"], + names=["sign"], + ) return DummySimulationOutput( - c=input_spec.a + input_spec.b, dataframes={"main_result": df} + y0=y[0], + dataframes={"main_result": main_result}, ) + + +@dataclass(frozen=True) +class SyntheticProblemConfig: + """Configuration for a synthetic multi-output regression problem.""" + + n_outputs: int = 8 + n_latents: int = 4 + difficulty: Literal["easy", "medium"] = "easy" + noise_std: float = 0.0 + normalize_outputs: bool = True + + +class SyntheticMultiOutputProblem: + """Deterministic synthetic multi-output function family. + + Inputs: + x in [0, 1]^d + + Outputs: + y in R^m + + Design goals: + - cheap to evaluate + - arbitrary input dimension + - arbitrary output count + - some outputs share latent structure + - some outputs contain mild independent residuals + - difficulty is tunable but never absurd + """ + + def __init__(self, n_inputs: int, config: SyntheticProblemConfig, seed: int): + """Initialize the synthetic multi-output problem.""" + if n_inputs < 1: + msg = "n_inputs must be >= 1" + raise ValueError(msg) + if config.n_outputs < 1: + msg = "n_outputs must be >= 1" + raise ValueError(msg) + if config.n_latents < 1: + msg = "n_latents must be >= 1" + raise ValueError(msg) + + self.n_inputs = n_inputs + self.config = config + self.rng = np.random.default_rng(seed) + self.seed = seed + + self.active_dims_per_latent = ( + min(5, n_inputs) if config.difficulty == "easy" else min(8, n_inputs) + ) + self.freq_max = 2 if config.difficulty == "easy" else 4 + self.residual_scale = 0.05 if config.difficulty == "easy" else 0.12 + + # Shared latent parameters + self.latent_defs = [ + self._make_latent_definition(k) for k in range(config.n_latents) + ] + + # Output mixing weights: this is what creates output dependency + self.mix_weights = self.rng.normal( + loc=0.0, + scale=1.0 / math.sqrt(config.n_latents), + size=(config.n_outputs, config.n_latents), + ) + + # Small output-specific residual definitions + self.residual_defs = [ + self._make_residual_definition(j) for j in range(config.n_outputs) + ] + + # Optional approximate normalization constants computed deterministically + self.output_shift = np.zeros(config.n_outputs, dtype=float) + self.output_scale = np.ones(config.n_outputs, dtype=float) + if config.normalize_outputs: + self._fit_normalization() + + def evaluate(self, x: np.ndarray) -> np.ndarray: + """Evaluate all outputs at one input vector x.""" + x = np.asarray(x, dtype=float) + if x.shape != (self.n_inputs,): + msg = f"Expected x shape {(self.n_inputs,)}, got {x.shape}" + raise ValueError(msg) + + # Clamp defensively; upstream encoder should already map into [0, 1] + x = np.clip(x, 0.0, 1.0) + + z = np.array([self._eval_latent(x, ld) for ld in self.latent_defs], dtype=float) + y = self.mix_weights @ z + + # Add small output-specific residuals so not everything is perfectly low-rank + residual = np.array( + [self._eval_residual(x, rd) for rd in self.residual_defs], dtype=float + ) + y = y + residual + + if self.config.noise_std > 0: + # deterministic if seed fixed and call order fixed; default is off for stable tests + y = y + self.rng.normal( + 0.0, self.config.noise_std, size=self.config.n_outputs + ) + + y = (y - self.output_shift) / self.output_scale + return y + + def _make_latent_definition(self, k: int) -> dict[str, Any]: + """Create one latent function definition.""" + latent_type = k % 4 + dims = self.rng.choice( + self.n_inputs, size=self.active_dims_per_latent, replace=False + ) + + if latent_type == 0: + # additive sinusoid + return { + "type": "additive_sin", + "dims": dims, + "amp": self.rng.uniform(0.4, 1.2, size=len(dims)), + "freq": self.rng.integers(1, self.freq_max + 1, size=len(dims)), + "phase": self.rng.uniform(0.0, 2 * math.pi, size=len(dims)), + } + + if latent_type == 1: + # smooth quadratic bowl-ish feature + return { + "type": "quadratic", + "dims": dims, + "weight": self.rng.uniform(0.5, 1.5, size=len(dims)), + "center": self.rng.uniform(0.2, 0.8, size=len(dims)), + } + + if latent_type == 2: + # pairwise interaction latent + pair_count = max(1, len(dims) // 2) + pair_dims = dims[: 2 * pair_count].reshape(pair_count, 2) + return { + "type": "pairwise_sin", + "pairs": pair_dims, + "weight": self.rng.uniform(0.4, 1.0, size=pair_count), + } + + # Friedman-like latent, adapted to arbitrary dimension by cycling + d0 = dims[0 % len(dims)] + d1 = dims[1 % len(dims)] + d2 = dims[2 % len(dims)] + d3 = dims[3 % len(dims)] + d4 = dims[4 % len(dims)] + return { + "type": "friedman_like", + "dims": np.array([d0, d1, d2, d3, d4], dtype=int), + } + + def _make_residual_definition(self, j: int) -> dict[str, Any]: + """Create a small output-specific residual.""" + dims = self.rng.choice(self.n_inputs, size=min(3, self.n_inputs), replace=False) + return { + "dims": dims, + "amp": self.rng.uniform(0.2, 0.8, size=len(dims)) * self.residual_scale, + "freq": self.rng.integers(1, self.freq_max + 1, size=len(dims)), + "phase": self.rng.uniform(0.0, 2 * math.pi, size=len(dims)), + } + + def _eval_latent(self, x: np.ndarray, ld: dict[str, Any]) -> float: + t = ld["type"] + + if t == "additive_sin": + dims = ld["dims"] + return float( + np.sum( + ld["amp"] * np.sin(2 * math.pi * ld["freq"] * x[dims] + ld["phase"]) + ) + ) + + if t == "quadratic": + dims = ld["dims"] + xc = x[dims] - ld["center"] + return float(np.sum(ld["weight"] * xc * xc)) + + if t == "pairwise_sin": + total = 0.0 + for w, (i, j) in zip(ld["weight"], ld["pairs"], strict=True): + total += float(w * math.sin(math.pi * x[i] * x[j])) + return total + + if t == "friedman_like": + i0, i1, i2, i3, i4 = ld["dims"] + return float( + 10.0 * math.sin(math.pi * x[i0] * x[i1]) + + 20.0 * (x[i2] - 0.5) ** 2 + + 10.0 * x[i3] + + 5.0 * x[i4] + ) + + msg = f"Unknown latent type: {t}" + raise ValueError(msg) + + def _eval_residual(self, x: np.ndarray, rd: dict[str, Any]) -> float: + dims = rd["dims"] + return float( + np.sum(rd["amp"] * np.sin(2 * math.pi * rd["freq"] * x[dims] + rd["phase"])) + ) + + def _fit_normalization(self) -> None: + """Approximate output mean/std over a fixed reference design.""" + ref_rng = np.random.default_rng(self.seed + 1_000_000) + n_ref = 2048 if self.config.difficulty == "easy" else 4096 + X = ref_rng.uniform(0.0, 1.0, size=(n_ref, self.n_inputs)) + + Y = np.zeros((n_ref, self.config.n_outputs), dtype=float) + for i in range(n_ref): + z = np.array( + [self._eval_latent(X[i], ld) for ld in self.latent_defs], dtype=float + ) + residual = np.array( + [self._eval_residual(X[i], rd) for rd in self.residual_defs], + dtype=float, + ) + Y[i] = self.mix_weights @ z + residual + + self.output_shift = Y.mean(axis=0) + self.output_scale = Y.std(axis=0) + self.output_scale[self.output_scale < 1e-8] = 1.0 diff --git a/src/globi/models/surrogate/samplers.py b/src/globi/models/surrogate/samplers.py new file mode 100644 index 0000000..32acb4c --- /dev/null +++ b/src/globi/models/surrogate/samplers.py @@ -0,0 +1,720 @@ +"""Conditional Priors and Samplers. + +Ported from epengine/models/sampling.py with enhancements: +- Fixed NaN comparison bug in ConditionalPrior +- Added MultiColumnConditionalPrior for multi-column conditioning + without requiring ConcatenateFeaturesSampler intermediate columns +""" + +from abc import ABC, abstractmethod +from typing import Literal, cast + +import networkx as nx +import numpy as np +import pandas as pd +from pydantic import BaseModel, model_validator + +# TODO: Make sure that all of the samplers can be serialized and deserialized with proper discrimination, i.e. that they do not share identical field names. + + +class SamplingError(Exception): + """A sampling error.""" + + pass + + +class Sampler(ABC): + """A sampler.""" + + @abstractmethod + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Sample features from a prior, which may depend on a context.""" + pass + + @property + @abstractmethod + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + pass + + +class UniformSampler(BaseModel, Sampler): + """A uniform sampler which generates values uniformly between a min and max value.""" + + min: float + max: float + round: Literal["ceil", "floor", "nearest"] | None = None + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Sample uniformly between a min and max value.""" + samples = generator.uniform(self.min, self.max, size=n) + if self.round == "ceil": + samples = np.ceil(samples) + elif self.round == "floor": + samples = np.floor(samples) + elif self.round == "nearest": + samples = np.round(samples) + return samples + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return set() + + +class ClippedNormalSampler(BaseModel, Sampler): + """A clipped normal sampler which generates values from a normal distribution, clipped to a min and max value.""" + + mean: float + std: float + clip_min: float | None + clip_max: float | None + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Sample from a normal distribution, clipped to a min and max value.""" + clip_min = self.clip_min if self.clip_min is not None else -np.inf + clip_max = self.clip_max if self.clip_max is not None else np.inf + samples = generator.normal(self.mean, self.std, size=n).clip(clip_min, clip_max) + return samples + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return set() + + +class FixedValueSampler(BaseModel): + """A fixed value sampler which generates a fixed value for all samples.""" + + value: float | str | int | bool + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Sample a fixed value.""" + return np.full(n, self.value) + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return set() + + +class CategoricalSampler(BaseModel): + """A categorical sampler which generates values from a categorical distribution.""" + + values: list[str] | list[float] | list[int] + weights: list[float] + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Sample from a categorical distribution.""" + return generator.choice(self.values, size=n, p=self.weights) + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return set() + + @model_validator(mode="after") + def check_values_and_weights(self): + """Check that the values and weights are the same length and normalized.""" + if len(self.values) != len(self.weights): + msg = "values and weights must be the same length" + raise ValueError(msg) + if not np.isclose(sum(self.weights), 1): + self.weights = [w / sum(self.weights) for w in self.weights] + return self + + +class CopySampler(BaseModel): + """A deterministic sampler which generates a copy of a feature in the provided context dataframe.""" + + feature_to_copy: str + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Compute a copy of a feature.""" + if self.feature_to_copy not in context.columns: + msg = f"Feature to copy {self.feature_to_copy} not found in context dataframe." + raise SamplingError(msg) + if len(context) != n: + msg = ( + f"Context dataframe must have {n} rows, but it has {len(context)} rows." + ) + raise SamplingError(msg) + return context[self.feature_to_copy].to_numpy() + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return {self.feature_to_copy} + + +class AddValueSampler(BaseModel): + """A deterministic sampler which adds a value to a feature.""" + + feature_to_add_to: str + value_to_add: float + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Compute a sum of a feature and a value.""" + if self.feature_to_add_to not in context.columns: + msg = f"Feature to add to {self.feature_to_add_to} not found in context dataframe." + raise SamplingError(msg) + return context[self.feature_to_add_to].to_numpy() + self.value_to_add + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return {self.feature_to_add_to} + + +class SumValuesSampler(BaseModel): + """A deterministic sampler which generates a sum of features.""" + + features_to_sum: list[str] + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Compute a sum of features.""" + if not all(f in context.columns for f in self.features_to_sum): + msg = f"All features to sum {self.features_to_sum} must be found in context dataframe." + raise SamplingError(msg) + if len(context) != n: + msg = ( + f"Context dataframe must have {n} rows, but it has {len(context)} rows." + ) + raise SamplingError(msg) + return np.sum(context[self.features_to_sum].to_numpy(), axis=1) + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return set(self.features_to_sum) + + +class MultiplyValueSampler(BaseModel): + """A deterministic sampler which generates a product of a feature and a value.""" + + feature_to_multiply: str + value_to_multiply: float + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Compute a multiply of a feature.""" + if self.feature_to_multiply not in context.columns: + msg = f"Feature to multiply {self.feature_to_multiply} not found in context dataframe." + raise SamplingError(msg) + if len(context) != n: + msg = ( + f"Context dataframe must have {n} rows, but it has {len(context)} rows." + ) + raise SamplingError(msg) + return context[self.feature_to_multiply].to_numpy() * self.value_to_multiply + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return {self.feature_to_multiply} + + +class ProductValuesSampler(BaseModel): + """A deterministic sampler which generates a product of features.""" + + features_to_multiply: list[str] + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Compute a product of features.""" + if not all(f in context.columns for f in self.features_to_multiply): + msg = f"All features to multiply {self.features_to_multiply} must be found in context dataframe." + raise SamplingError(msg) + if len(context) != n: + msg = ( + f"Context dataframe must have {n} rows, but it has {len(context)} rows." + ) + raise SamplingError(msg) + return np.prod(context[self.features_to_multiply].to_numpy(), axis=1) + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return set(self.features_to_multiply) + + +class InvertSampler(BaseModel): + """A deterministic sampler which generates the multiplicative inverse of a feature.""" + + feature_to_invert: str + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Compute an invert of a feature.""" + if self.feature_to_invert not in context.columns: + msg = f"Feature to invert {self.feature_to_invert} not found in context dataframe." + raise SamplingError(msg) + if len(context) != n: + msg = ( + f"Context dataframe must have {n} rows, but it has {len(context)} rows." + ) + raise SamplingError(msg) + return 1 / context[self.feature_to_invert].to_numpy() + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return {self.feature_to_invert} + + +class PowerSampler(BaseModel): + """A deterministic sampler which generates a power of a feature.""" + + feature_to_power: str + power: float + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Compute a power of a feature.""" + return context[self.feature_to_power].to_numpy() ** self.power + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return {self.feature_to_power} + + +class LogSampler(BaseModel): + """A deterministic sampler which generates a log of a feature.""" + + feature_to_log: str + base: float = np.e + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Compute a log of a feature.""" + if self.feature_to_log not in context.columns: + msg = ( + f"Feature to log {self.feature_to_log} not found in context dataframe." + ) + raise SamplingError(msg) + if len(context) != n: + msg = ( + f"Context dataframe must have {n} rows, but it has {len(context)} rows." + ) + raise SamplingError(msg) + return np.log(context[self.feature_to_log].to_numpy()) / np.log(self.base) + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return {self.feature_to_log} + + +class RoundSampler(BaseModel): + """A deterministic sampler which applies ceil, floor, or nearest to a feature.""" + + feature_to_round: str + operation: Literal["ceil", "floor", "nearest"] + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Apply ceil, floor, or nearest to a feature.""" + if self.feature_to_round not in context.columns: + msg = f"Feature to round {self.feature_to_round} not found in context dataframe." + raise SamplingError(msg) + if len(context) != n: + msg = ( + f"Context dataframe must have {n} rows, but it has {len(context)} rows." + ) + raise SamplingError(msg) + values = context[self.feature_to_round].to_numpy() + if self.operation == "ceil": + return np.ceil(values) + if self.operation == "floor": + return np.floor(values) + return np.round(values) + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return {self.feature_to_round} + + +class ConcatenateFeaturesSampler(BaseModel): + """A deterministic sampler which concatenates features. + + Retained for backward compatibility. Prefer MultiColumnConditionalPrior + for multi-column conditioning instead of creating intermediate compound key columns. + """ + + features_to_concatenate: list[str] + separator: str = ":" + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Compute a concatenation of features.""" + if not all(f in context.columns for f in self.features_to_concatenate): + msg = f"All features to concatenate {self.features_to_concatenate} must be found in context dataframe." + raise SamplingError(msg) + if len(context) != n: + msg = ( + f"Context dataframe must have {n} rows, but it has {len(context)} rows." + ) + raise SamplingError(msg) + cols: pd.DataFrame = cast(pd.DataFrame, context[self.features_to_concatenate]) + return cols.astype(str).agg(self.separator.join, axis=1).to_numpy() + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return set(self.features_to_concatenate) + + +PriorSampler = ( + UniformSampler + | ClippedNormalSampler + | FixedValueSampler + | CategoricalSampler + | CopySampler + | AddValueSampler + | SumValuesSampler + | MultiplyValueSampler + | ProductValuesSampler + | InvertSampler + | LogSampler + | RoundSampler + | ConcatenateFeaturesSampler + | PowerSampler +) + + +class ConditionalPriorCondition(BaseModel): + """A conditional prior condition.""" + + match_val: str | float | int | bool + sampler: PriorSampler + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Sample from a conditional prior condition.""" + return self.sampler.sample(context, n, generator) + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return self.sampler.depends_on + + +class MultiColumnCondition(BaseModel): + """A condition that matches on multiple source features simultaneously. + + Used with MultiColumnConditionalPrior to condition on combinations + of column values without creating intermediate compound key columns. + """ + + match_vals: tuple[str | float | int | bool, ...] + sampler: PriorSampler + + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Sample from this condition's sampler.""" + return self.sampler.sample(context, n, generator) + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return self.sampler.depends_on + + +class PriorABC(ABC): + """A prior.""" + + @abstractmethod + def sample( + self, context: pd.DataFrame, n: int, generator: np.random.Generator + ) -> np.ndarray: + """Sample from a prior.""" + pass + + @property + @abstractmethod + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + pass + + +class ConditionalPrior(BaseModel, PriorABC): + """A conditional prior that selects a sampler based on a single source feature.""" + + source_feature: str + conditions: list[ConditionalPriorCondition] + fallback_prior: PriorSampler | None + + def sample(self, context: pd.DataFrame, n: int, generator: np.random.Generator): + """Sample from a conditional prior.""" + conditional_samples = { + c.match_val: c.sampler.sample(context, n, generator) + for c in self.conditions + } + test_feature = context[self.source_feature].to_numpy() + + final = np.full(n, np.nan) + + any_matched_mask = np.full(n, False) + for match_val, samples_for_match_val in conditional_samples.items(): + mask = test_feature == match_val + any_matched_mask = any_matched_mask | mask + final = np.where(mask, samples_for_match_val, final) + + if self.fallback_prior is not None: + mask = ~any_matched_mask + final = np.where( + mask, self.fallback_prior.sample(context, n, generator), final + ) + + if np.isnan(final).any(): + msg = ( + "Final array contains NaN values; possibly due to an unmatched value for " + f"feature {self.source_feature}." + ) + raise SamplingError(msg) + + return final + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return {self.source_feature} | { + dependency for c in self.conditions for dependency in c.depends_on + } + + +class MultiColumnConditionalPrior(BaseModel, PriorABC): + """A conditional prior that selects a sampler based on multiple source features. + + This eliminates the need for ConcatenateFeaturesSampler + compound key columns. + Instead of creating an intermediate concatenated column and matching on strings, + this prior directly matches on tuples of column values. + + Example usage:: + + prior = MultiColumnConditionalPrior( + source_features=["Typology", "Age_bracket"], + conditions=[ + MultiColumnCondition( + match_vals=("SFH", "pre_1975"), + sampler=CategoricalSampler(values=[...], weights=[...]), + ), + MultiColumnCondition( + match_vals=("MFH", "post_2003"), + sampler=UniformSampler(min=0.5, max=1.0), + ), + ], + fallback_prior=CategoricalSampler(values=[...], weights=[...]), + ) + """ + + source_features: list[str] + conditions: list[MultiColumnCondition] + fallback_prior: PriorSampler | None + + @model_validator(mode="after") + def validate_condition_lengths(self): + """Ensure all conditions have match_vals aligned with source_features.""" + for i, c in enumerate(self.conditions): + if len(c.match_vals) != len(self.source_features): + msg = ( + f"Condition {i}: match_vals length {len(c.match_vals)} " + f"!= source_features length {len(self.source_features)}" + ) + raise ValueError(msg) + return self + + def sample(self, context: pd.DataFrame, n: int, generator: np.random.Generator): + """Sample from a multi-column conditional prior.""" + for f in self.source_features: + if f not in context.columns: + msg = f"Source feature {f} not found in context dataframe." + raise SamplingError(msg) + if len(context) != n: + msg = ( + f"Context dataframe must have {n} rows, but it has {len(context)} rows." + ) + raise SamplingError(msg) + + row_tuples = list( + zip(*(context[f].to_numpy() for f in self.source_features), strict=True) + ) + conditional_samples = { + c.match_vals: c.sampler.sample(context, n, generator) + for c in self.conditions + } + + final = np.full(n, np.nan) + any_matched = np.full(n, False) + + for match_vals, samples in conditional_samples.items(): + mask = np.array([t == match_vals for t in row_tuples]) + any_matched |= mask + final = np.where(mask, samples, final) + + if self.fallback_prior is not None: + final = np.where( + ~any_matched, + self.fallback_prior.sample(context, n, generator), + final, + ) + + if np.isnan(final).any(): + unmatched_examples = [ + row_tuples[i] for i in range(n) if not any_matched[i] + ][:5] + msg = ( + "Final array contains NaN values; possibly due to unmatched values for " + f"features {self.source_features}. Examples of unmatched tuples: {unmatched_examples}" + ) + raise SamplingError(msg) + + return final + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return set(self.source_features) | { + dependency for c in self.conditions for dependency in c.sampler.depends_on + } + + +class UnconditionalPrior(BaseModel, PriorABC): + """An unconditional prior.""" + + sampler: PriorSampler + + def sample(self, context: pd.DataFrame, n: int, generator: np.random.Generator): + """Sample from an unconditional prior.""" + return self.sampler.sample(context, n, generator) + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return self.sampler.depends_on + + +Prior = UnconditionalPrior | ConditionalPrior | MultiColumnConditionalPrior + + +class Priors(BaseModel): + """A collection of priors defining a dependency graph for sampling. + + The sampled_features dict must be ordered such that dependencies come before + dependents (i.e. topological order). Sampling iterates in dict order. + + TODO: Add automatic topological sort and validation that all required + target model fields appear as terminal nodes in the graph. + """ + + sampled_features: dict[str, Prior] + + def sample(self, context: pd.DataFrame, n: int, generator: np.random.Generator): + """Sample from all priors in dependency order.""" + working_df = context.copy(deep=True) + # TODO: how to do we deal with race conditions here in the sense that + # some features may require previous features to have already been sampled? + # TODO: Similarly, how do we ensure that there are no cycles in the dependency graph? + for feature, prior in self.sampled_features.items(): + working_df[feature] = prior.sample(working_df, n, generator) + if working_df.isna().any().any(): # pyright: ignore [reportAttributeAccessIssue] + # TODO: allow na values eg in training? + msg = "Working dataframe contains NaN values; possibly due to an unmatched value." + raise SamplingError(msg) + return working_df + + @property + def depends_on(self) -> set[str]: + """The features that this sampler depends on.""" + return { + dependency + for prior in self.sampled_features.values() + for dependency in prior.depends_on + } + + @property + def dependency_graph(self) -> nx.DiGraph: + """Construct a dependency graph between columns in the context dataframe. + + Edges connect *from* the dependency *to* the dependent feature. + """ + g = nx.DiGraph() + for feature, prior in self.sampled_features.items(): + if prior.depends_on: + for dependency in prior.depends_on: + g.add_edge(dependency, feature) + return g + + @property + def root_features(self) -> set[str]: + """The features that have no dependencies.""" + return { + node + for node in self.dependency_graph.nodes + if self.dependency_graph.in_degree(node) == 0 + } + + def select_prior_tree_for_changed_features( + self, changed_features: set[str], resample_changed_features: bool = True + ) -> "Priors": + """Select the prior tree for the changed features. + + Returns a new Priors object with only the priors that are + downstream of the changed features. + + Args: + changed_features: The features that have changed. + resample_changed_features: Whether to resample the changed features + themselves (dependencies are always resampled). You probably want + this to be False, but for backwards compatibility it defaults to True. + + Returns: + A new Priors object with only the downstream priors. + """ + g = self.dependency_graph + all_changing_priors: set[str] = set() + for any_feature in self.root_features.union(set(self.sampled_features.keys())): + if any(f == any_feature for f in changed_features): + descendants = nx.descendants(g, any_feature) + + if any_feature in self.sampled_features and resample_changed_features: + all_changing_priors.add(any_feature) + + for dep in descendants: + if dep in self.sampled_features: + all_changing_priors.add(dep) + + return Priors( + sampled_features={ + f: p + for f, p in self.sampled_features.items() + if f in all_changing_priors + } + ) diff --git a/src/globi/models/surrogate/sampling.py b/src/globi/models/surrogate/sampling.py index fff8068..8ead08f 100644 --- a/src/globi/models/surrogate/sampling.py +++ b/src/globi/models/surrogate/sampling.py @@ -2,19 +2,29 @@ from typing import cast +import numpy as np import pandas as pd +from pydantic import Field +from scythe.base import ExperimentInputSpec from globi.models.surrogate.configs.pipeline import StageSpec +from globi.models.surrogate.samplers import Priors class SampleSpec(StageSpec): """A spec for the sampling stage of the progressive training.""" # TODO: add the ability to receive the last set of error metrics and use them to inform the sampling + priors: Priors = Field( + ..., + description="The priors to use for sampling.", + ) - def stratified_selection(self) -> pd.DataFrame: + def stratified_selection(self) -> pd.DataFrame | None: """Sample the gis data.""" - df = self.parent.gis_data + df = self.parent.context_data + if df is None: + return None stratification_field = self.parent.stratification.field stratification_aliases = self.parent.stratification.aliases @@ -87,106 +97,27 @@ def sample_equally_by_stratum( } return cast(pd.DataFrame, pd.concat(sampled_strata.values())) - # def sample_semantic_fields(self, df: pd.DataFrame) -> pd.DataFrame: - # """Sample the semantic fields.""" - # # TODO: consider randomizing the locations? - # semantic_fields = self.progressive_training_spec.semantic_fields_data - # for field in semantic_fields.Fields: - # if isinstance(field, CategoricalFieldSpec): - # options = field.Options - # df[field.Name] = self.random_generator.choice(options, size=len(df)) - # elif isinstance(field, NumericFieldSpec): - # df[field.Name] = self.random_generator.uniform( - # field.Min, field.Max, size=len(df) - # ) - # else: - # msg = f"Invalid field type: {type(field)}" - # raise TypeError(msg) - # return df - - # def sample_basements_and_attics(self, df: pd.DataFrame) -> pd.DataFrame: - # """Add basement/attics to models.""" - # # get the options for the type literal - # options: list[BasementAtticOccupationConditioningStatus] = [ - # "none", - # "occupied_unconditioned", - # "unoccupied_unconditioned", - # "occupied_conditioned", - # "unoccupied_conditioned", - # ] - # weights = [0.5, *([0.5 / 4] * 4)] - # # sample the type literal - # df["basement"] = self.random_generator.choice(options, size=len(df), p=weights) - # df["attic"] = self.random_generator.choice(options, size=len(df), p=weights) - # df["exposed_basement_frac"] = self.random_generator.uniform( - # 0.1, 0.5, size=len(df) - # ) - # return df - - # def sample_wwrs(self, df: pd.DataFrame) -> pd.DataFrame: - # """Sample the wwrs.""" - # wwr_min = 0.05 - # wwr_max = 0.35 - # df["wwr"] = self.random_generator.uniform(wwr_min, wwr_max, size=len(df)) - # return df - - # def sample_f2f_heights(self, df: pd.DataFrame) -> pd.DataFrame: - # """Sample the f2f heights.""" - # f2f_min = 2.3 - # f2f_max = 4.3 - # df["f2f_height"] = self.random_generator.uniform(f2f_min, f2f_max, size=len(df)) - # return df - - def to_sim_specs(self, df: pd.DataFrame): - """Convert the sampled dataframe to a list of simulation specs. - - For now, we are assuming that all the other necessary fields are present and we are just - ensuring that sort_index and experiment_id are set appropriately. - """ - # df["semantic_field_context"] = df.apply( - # lambda row: { - # field.Name: row[field.Name] - # for field in self.progressive_training_spec.semantic_fields_data.Fields - # }, - # axis=1, - # ) - # df["sort_index"] = np.arange(len(df)) - # df["experiment_id"] = self.experiment_key - # # TODO: consider allowing the component map/semantic_fields/database to be inherited from the row - # # e.g. to allow multiple component maps and dbs per run. - # df["component_map_uri"] = str(self.progressive_training_spec.component_map_uri) - # df["semantic_fields_uri"] = str( - # self.progressive_training_spec.semantic_fields_uri - # ) - # df["db_uri"] = str(self.progressive_training_spec.database_uri) - return df - - # def make_payload(self, s3_client: S3ClientType): - # """Make the payload for the scatter gather task, including generating the simulation specs and serializing them to s3.""" - # df = self.stratified_selection() - # # df = self.sample_semantic_fields(df) - # # df = self.sample_basements_and_attics(df) - # # df = self.sample_wwrs(df) - # # df = self.sample_f2f_heights(df) - # df = self.to_sim_specs(df) - # # serialize to a parquet file and upload to s3 - # bucket = self.progressive_training_spec.storage_settings.BUCKET - # with tempfile.TemporaryDirectory() as tmpdir: - # tmpdir = Path(tmpdir) - # fpath = tmpdir / "specs.pq" - # df.to_parquet(fpath) - # key = f"hatchet/{self.experiment_key}/specs.pq" - # specs_uri = f"s3://{bucket}/{key}" - # s3_client.upload_file(fpath.as_posix(), bucket, key) - - # payload = { - # "specs": specs_uri, - # "bucket": bucket, - # "workflow_name": "simulate_sbem_shoebox", - # "experiment_id": self.experiment_key, - # "recursion_map": { - # "factor": self.progressive_training_spec.iteration.recursion_factor, - # "max_depth": self.progressive_training_spec.iteration.recursion_max_depth, - # }, - # } - # return payload + # TODO: Add the ability to check the compatiblity of a sampling spec with an input_validator_type. + + def populate_sample_df(self) -> pd.DataFrame: + """Populate the sample dataframe with the priors.""" + base_df = self.stratified_selection() + if base_df is None: + base_df = pd.DataFrame() + # in case we needed more samples due to the strata min req + n_samples = max(self.parent.iteration.n_per_gen_for_current_iter, len(base_df)) + return self.priors.sample( + base_df, + n_samples, + self.random_generator, + ) + + def convert_to_specs( + self, df: pd.DataFrame, input_validator: type[ExperimentInputSpec] + ): + """Convert the sampled dataframe to a list of simulation specs.""" + df["experiment_id"] = "placeholder" + df["sort_index"] = np.arange(len(df)) + return [ + input_validator.model_validate(row) for row in df.to_dict(orient="records") + ] diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index 17c0751..0f53f0f 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -1,6 +1,5 @@ """The training pipeline.""" -import random import tempfile from datetime import timedelta from pathlib import Path @@ -10,6 +9,7 @@ import pandas as pd import yaml from hatchet_sdk import Context +from scythe.base import ExperimentInputSpec from scythe.experiments import ( BaseExperiment, ) @@ -19,7 +19,6 @@ from scythe.settings import ScytheStorageSettings from scythe.utils.filesys import S3Url -from globi.models.surrogate.dummy import DummySimulationInput from globi.models.surrogate.outputs import ( CombineResultsResult, ExperimentRunWithRef, @@ -28,6 +27,7 @@ StartTrainingResult, TrainingEvaluationResult, ) +from globi.models.surrogate.sampling import SampleSpec from globi.models.surrogate.training import ( FoldResult, ProgressiveTrainingSpec, @@ -73,20 +73,17 @@ def create_simulations( ) -> ExperimentRunWithRef: """Create the simulations.""" # STEP 1: Generate the training samples, allocate simulations - specs = [ - DummySimulationInput( - weather_file="some" if random.random() < 0.5 else "other", # noqa: S311 - a=random.randint(-10, 10), # noqa: S311 - b=random.randint(-10, 10), # noqa: S311 - c=random.randint(-10, 10), # noqa: S311 - experiment_id="placeholder", - sort_index=i, - ) - for i in range(1_000) - ] + sample_spec = SampleSpec(parent=spec, priors=spec.samplers) + sample_df = sample_spec.populate_sample_df() + + # TODO: we shouldn't have to cast here, but the typing on `runnable` is not working as expected. + input_validator = cast( + type[ExperimentInputSpec], spec.runnable.input_validator_type + ) + specs = sample_spec.convert_to_specs(sample_df, input_validator) # STEP 2: Simulate the simulations using scythe - run_name = f"{spec.experiment_id}/sample" + run_name = spec.subrun_name("sample") exp = BaseExperiment( runnable=spec.runnable, @@ -201,7 +198,7 @@ def start_training( # Alternatively, one task per fold-column combination? specs = train_spec.schedule - run_name = f"{spec.experiment_id}/train" + run_name = spec.subrun_name("train") exp = BaseExperiment( runnable=train_regressor_with_cv_fold, run_name=run_name, From 5e277f760fcd46ee1264636496e805a044e80a9b Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Mon, 9 Mar 2026 15:07:40 -0400 Subject: [PATCH 15/31] vestigial cleanup --- src/globi/models/surrogate/training.py | 180 ------------------------- src/globi/pipelines/training.py | 60 --------- 2 files changed, 240 deletions(-) diff --git a/src/globi/models/surrogate/training.py b/src/globi/models/surrogate/training.py index c90b6b2..8da98a1 100644 --- a/src/globi/models/surrogate/training.py +++ b/src/globi/models/surrogate/training.py @@ -497,125 +497,6 @@ def train_pytorch_tabular(self, tempdir: Path): model.save_model((tempdir / "model").as_posix()) return model, trainer - # @cached_property - # def non_numeric_options(self) -> dict[str, list[str]]: - # """Get the non-numeric options for categorical features. - - # We must perform this across the entire dataset not just splits for consistency - # and to ensure we get all options. - - # TODO: In the future, this should be based off of transform instructions. - # """ - # fparams = self.dparams[ - # [col for col in self.dparams.columns if col.startswith("feature.")] - # ] - # non_numeric_cols = fparams.select_dtypes(include=["object"]).columns - # non_numeric_options = { - # col: sorted(cast(pd.Series, fparams[col]).unique().tolist()) - # for col in non_numeric_cols - # } - # return non_numeric_options - - # @cached_property - # def numeric_min_maxs(self) -> dict[str, tuple[float, float]]: - # """Get the min and max for numeric features. - - # We perform this only on the training set to prevent leakage. - - # TODO: In the future, this should be based off of transform instructions. - - # Args: - # params (pd.DataFrame): The parameters to get the min and max for. - - # Returns: - # norm_bounds (dict[str, tuple[float, float]]): The min and max for each numeric feature. - # """ - # params, _ = self.train_segment - # fparams = params[[col for col in params.columns if col.startswith("feature.")]] - # numeric_cols = fparams.select_dtypes(include=["number"]).columns - # numeric_min_maxs = { - # col: (float(fparams[col].min()), float(fparams[col].max())) - # for col in numeric_cols - # } - # for col in numeric_min_maxs: - # low, high = numeric_min_maxs[col] - # # we want to floor the "low" value down to the nearest 0.001 - # # and ceil the "high" value up to the nearest 0.001 - # # e.g. if low is -0.799, we want to set it to -0.800 - # # and if high is 0.799, we want to set it to 0.800 - # numeric_min_maxs[col] = ( - # math.floor(low * 1000) / 1000, - # math.ceil(high * 1000) / 1000, - # ) - # return numeric_min_maxs - - # @cached_property - # def feature_spec(self) -> RegressorInputSpec: - # """Get the feature spec which can be serialized and reloaded.""" - # params, _ = self.train_segment - # features: list[CategoricalFeature | ContinuousFeature] = [] - # for col in params.columns: - # if col in self.numeric_min_maxs: - # low, high = self.numeric_min_maxs[col] - # features.append( - # ContinuousFeature(name=col, min=float(low), max=float(high)) - # ) - # elif col in self.non_numeric_options: - # opts = self.non_numeric_options[col] - # features.append(CategoricalFeature(name=col, values=opts)) - # return RegressorInputSpec(features=features) - - # def normalize_params(self, params: pd.DataFrame) -> pd.DataFrame: - # """Normalize the params.""" - # regressor_spec = self.feature_spec - # fparams = regressor_spec.transform(params, do_check=False) - # return fparams - - # def run( - # self, - # ): - # """Train the model.""" - # train_params, train_targets = self.train_segment - # test_params, test_targets = self.test_segment - - # # select/transform the params as necessary - # train_params = self.normalize_params(train_params) - # test_params = self.normalize_params(test_params) - - # # Train the model - # # train_preds, test_preds = self.train_xgboost( - # # train_params, train_targets, test_params, test_targets - # # ) - # s3_client = boto3.client("s3") - # train_preds, test_preds = self.train_lightgbm( - # train_params, train_targets, test_params, test_targets, s3_client - # ) - - # # compute the metrics - # global_train_metrics, stratum_train_metrics = self.compute_metrics( - # train_preds, train_targets - # ) - # global_test_metrics, stratum_test_metrics = self.compute_metrics( - # test_preds, test_targets - # ) - - # global_metrics = pd.concat( - # [global_train_metrics, global_test_metrics], - # axis=1, - # keys=["train", "test"], - # names=["split_segment"], - # ) - # stratum_metrics = pd.concat( - # [stratum_train_metrics, stratum_test_metrics], - # axis=1, - # keys=["train", "test"], - # names=["split_segment"], - # ) - # return { - # "global_metrics": global_metrics, - # "stratum_metrics": stratum_metrics, - # } - def compute_frame_metrics( self, preds: pd.DataFrame, targets: pd.DataFrame ) -> pd.DataFrame: @@ -711,67 +592,6 @@ def compute_metrics(self, preds: pd.DataFrame, targets: pd.DataFrame): ) return global_metrics, stratum_metrics - # def train_lightgbm( - # self, - # train_params: pd.DataFrame, - # train_targets: pd.DataFrame, - # test_params: pd.DataFrame, - # test_targets: pd.DataFrame, - # s3_client: S3ClientType | None = None, - # ): - # """Train the lightgbm model.""" - # import lightgbm as lgb - - # lgb_params = { - # "objective": "regression", - # "metric": "rmse", - # } - # test_preds = {} - # train_preds = {} - # for col in train_targets.columns: - # lgb_train_data = lgb.Dataset(train_params, label=train_targets[col]) - # lgb_test_data = lgb.Dataset(test_params, label=test_targets[col]) - # model = lgb.train( - # lgb_params, - # lgb_train_data, - # num_boost_round=4000, - # valid_sets=[lgb_test_data], - # valid_names=["eval"], - # callbacks=[lgb.early_stopping(20)], - # ) - # test_preds[col] = pd.Series( - # cast(np.ndarray, model.predict(test_params)), - # index=test_targets.index, - # name=col, - # ) - # train_preds[col] = pd.Series( - # cast(np.ndarray, model.predict(train_params)), - # index=train_targets.index, - # name=col, - # ) - # if s3_client is not None: - # model_name = ( - # f"{col}.lgb" - # if not isinstance(col, tuple) - # else f"{'.'.join(col)}.lgb" - # ) - # model_key = self.format_model_key(model_name) - # model_str = model.model_to_string() - # s3_client.put_object(Bucket=self.bucket, Key=model_key, Body=model_str) - - # if s3_client is not None: - # import yaml - - # space_key = self.format_model_key("space.yml") - # space_str = yaml.dump( - # self.feature_spec.model_dump(mode="json"), indent=2, sort_keys=False - # ) - # s3_client.put_object(Bucket=self.bucket, Key=space_key, Body=space_str) - - # test_preds = pd.concat(test_preds, axis=1) - # train_preds = pd.concat(train_preds, axis=1) - # return train_preds, test_preds - class FoldResult(ExperimentOutputSpec): """The output for a fold.""" diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index 0f53f0f..e9b0ce0 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -409,63 +409,3 @@ def finalize(spec: ProgressiveTrainingSpec, context: Context) -> FinalizeResult: temp_path.as_posix(), spec.storage_settings.BUCKET, summary_manifest_uri ) return result - - -# if __name__ == "__main__": -# import yaml -# from pydantic import HttpUrl -# from scythe.settings import ScytheStorageSettings - -# from globi.models.surrogate.configs.pipeline import ( -# ConvergenceThresholds, -# ConvergenceThresholdsByTarget, -# IterationSpec, -# StratificationSpec, -# ) -# from globi.models.surrogate.dummy import dummy_simulation - -# base_run_name = "test-experiment" -# progressive_training_spec = ProgressiveTrainingSpec( -# runnable=dummy_simulation, -# sort_index=0, -# experiment_id="placeholder", -# gis_uri=HttpUrl("https://example.com/gis.parquet"), -# stratification=StratificationSpec( -# field="weather_file", -# sampling="equal", -# aliases=["feature.weather.file"], -# ), -# iteration=IterationSpec( -# max_iters=3, -# ), -# convergence_criteria=ConvergenceThresholdsByTarget( -# thresholds={ -# "*": ConvergenceThresholds(r2=0.975), -# }, -# ), -# storage_settings=ScytheStorageSettings(), -# base_run_name=base_run_name, -# ) -# with open("inputs/training.yml", "w") as f: -# yaml.dump( -# progressive_training_spec.model_dump(mode="json"), -# f, -# indent=2, -# sort_keys=False, -# ) - -# exp = BaseExperiment( -# runnable=iterative_training, -# run_name="test-experiment", -# ) - -# run, ref = exp.allocate( -# progressive_training_spec, -# version="bumpmajor", -# recursion_map=RecursionMap( -# factor=2, -# max_depth=0, -# ), -# ) - -# print(yaml.dump(run.model_dump(mode="json"), indent=2, sort_keys=False)) From d7d0105b6885edbb565cd89b3a432238ec2b4e6a Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Mon, 9 Mar 2026 15:18:21 -0400 Subject: [PATCH 16/31] lazily import scythe to prevent docs error --- src/globi/tools/cli/main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/globi/tools/cli/main.py b/src/globi/tools/cli/main.py index cb11098..7950de6 100644 --- a/src/globi/tools/cli/main.py +++ b/src/globi/tools/cli/main.py @@ -7,7 +7,6 @@ import boto3 import click import yaml -from scythe.experiments import BaseExperiment if TYPE_CHECKING: from mypy_boto3_s3 import S3Client @@ -112,6 +111,8 @@ def manifest( ) def surrogate(path): """Submit a GloBI surrogate experiment.""" + from scythe.experiments import BaseExperiment + from globi.models.surrogate.configs.pipeline import ProgressiveTrainingSpec from globi.pipelines.training import iterative_training From 5474f63380f0ebcbdabb2fb5d797dc603a3303e3 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Mon, 9 Mar 2026 15:25:13 -0400 Subject: [PATCH 17/31] easier dummy fn --- src/globi/models/surrogate/dummy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/globi/models/surrogate/dummy.py b/src/globi/models/surrogate/dummy.py index bd08fda..e8af474 100644 --- a/src/globi/models/surrogate/dummy.py +++ b/src/globi/models/surrogate/dummy.py @@ -69,8 +69,8 @@ def dummy_simulation( n_inputs, SyntheticProblemConfig( n_outputs=5, - n_latents=8, - difficulty="medium", + n_latents=3, + difficulty="easy", noise_std=0.0, normalize_outputs=True, ), From b49a701541507aa6cf7ff19b4e00cecc5e19dc48 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Mon, 9 Mar 2026 15:31:37 -0400 Subject: [PATCH 18/31] add a bunch of log statements --- src/globi/pipelines/training.py | 35 ++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index e9b0ce0..d768327 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -73,14 +73,18 @@ def create_simulations( ) -> ExperimentRunWithRef: """Create the simulations.""" # STEP 1: Generate the training samples, allocate simulations + context.log("Generating training samples...") sample_spec = SampleSpec(parent=spec, priors=spec.samplers) sample_df = sample_spec.populate_sample_df() + context.log("Training samples generated.") # TODO: we shouldn't have to cast here, but the typing on `runnable` is not working as expected. input_validator = cast( type[ExperimentInputSpec], spec.runnable.input_validator_type ) + context.log("Converting training samples to specs...") specs = sample_spec.convert_to_specs(sample_df, input_validator) + context.log("Training samples converted to specs.") # STEP 2: Simulate the simulations using scythe run_name = spec.subrun_name("sample") @@ -91,11 +95,13 @@ def create_simulations( storage_settings=spec.storage_settings or ScytheStorageSettings(), ) + context.log("Allocating simulations...") run, ref = exp.allocate( specs, version="bumpmajor", recursion_map=spec.iteration.recursion, ) + context.log("Simulations allocated.") run_name = run.versioned_experiment.base_experiment.run_name if not run_name: @@ -136,7 +142,12 @@ async def await_simulations( def combine_results( spec: ProgressiveTrainingSpec, context: Context ) -> CombineResultsResult: - """Combine the results of the simulations.""" + """Combine the results of the simulations. + + Specifically, this step is responsible for combining the results of the simulations + of the previous iteration(s) with the results of the current iteration. In other words, + this is where we grow our simulation cache. + """ # TODO: major consider how we handle beyond-memory scale scenarios. # i.e. we probably need to refactor to allow lists of files that only the # main worker is responsible for combining. @@ -150,6 +161,7 @@ def combine_results( # also, should we make sure to remove NaN? if spec.data_uris: + context.log("Combining results from previous iterations...") shared_keys = set(spec.data_uris.uris.keys()) & set(results.uris.keys()) old_keys_only = set(spec.data_uris.uris.keys()) - shared_keys new_keys_only = set(results.uris.keys()) - shared_keys @@ -161,15 +173,20 @@ def combine_results( # TODO: refactor to use a threadpool executor? # For memory reasons, it might be a good idea to stay single threaded here. for key in shared_keys: + context.log(f"Combining results for key {key}...") old_df = pd.read_parquet(str(spec.data_uris.uris[key])) new_df = pd.read_parquet(str(results.uris[key])) combined_df = pd.concat([old_df, new_df], axis=0) uri = spec.format_combined_output_uri(key) combined_df.to_parquet(str(uri)) + context.log(f"Results for key {key} combined and saved to s3.") combined_results[key] = uri else: # TODO: consider copying these over to the `combined` folder anyways. + context.log( + "No previous iterations to combine results from, so using results from current iteration." + ) combined_results = results.uris return CombineResultsResult( @@ -198,6 +215,7 @@ def start_training( # Alternatively, one task per fold-column combination? specs = train_spec.schedule + context.log("Scheduling training...") run_name = spec.subrun_name("train") exp = BaseExperiment( runnable=train_regressor_with_cv_fold, @@ -212,6 +230,7 @@ def start_training( max_depth=0, ), ) + context.log("Training scheduled.") if not run.versioned_experiment.base_experiment.run_name: msg = "Run name is required." @@ -258,8 +277,12 @@ def evaluate_training( results_output = context.task_output(await_training) strata_uri = results_output.uris["strata"] globals_uri = results_output.uris["global"] + context.log("Reading strata results from s3...") results = pd.read_parquet(str(strata_uri)) + context.log("Strata results read from s3.") + context.log("Reading global results from s3...") results_globals = pd.read_parquet(str(globals_uri)) + context.log("Global results read from s3.") fold_averages = cast( pd.Series, @@ -278,12 +301,14 @@ def evaluate_training( .unstack(), ) + context.log("Running convergence criteria...") ( convergence_all, _convergence_monitor_segment, _convergence_monitor_segment_and_target, _convergence, ) = spec.convergence_criteria.run(fold_averages) + context.log("Convergence criteria run.") return TrainingEvaluationResult( converged=convergence_all, @@ -305,14 +330,21 @@ def transition_recursion( """Transition the recursion.""" results = context.task_output(evaluate_training) if results.converged: + context.log("Converged! Time to wrap up... no more recursion.") return RecursionTransition(reasoning="converged", child_workflow_run_id=None) if spec.iteration.at_max_iters: + context.log( + "Not converged, but we're at the max number of iterations. Time to wrap up... no more recursion." + ) return RecursionTransition(reasoning="max_depth", child_workflow_run_id=None) await_training_output = context.task_output(await_training) # start_training_output = context.task_output(start_training) combine_results_output = context.task_output(combine_results) + context.log( + "Not converged, but we have more iterations to try. Time to continue recursion..." + ) next_spec = spec.model_copy(deep=True) next_spec.iteration.current_iter += 1 next_spec.data_uris = combine_results_output.combined @@ -331,6 +363,7 @@ def transition_recursion( max_depth=0, ), ) + context.log("Recursion transitioned.") return RecursionTransition( reasoning=None, child_workflow_run_id=ref.workflow_run_id ) From a15f8eb2083a522357636dacce3aa2b02a860457 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Mon, 9 Mar 2026 15:51:09 -0400 Subject: [PATCH 19/31] simplify dummy problem --- src/globi/models/surrogate/dummy.py | 249 ++++------------------------ 1 file changed, 29 insertions(+), 220 deletions(-) diff --git a/src/globi/models/surrogate/dummy.py b/src/globi/models/surrogate/dummy.py index e8af474..ee33d5e 100644 --- a/src/globi/models/surrogate/dummy.py +++ b/src/globi/models/surrogate/dummy.py @@ -1,9 +1,8 @@ """Dummy simulation for testing.""" import math -from dataclasses import dataclass from pathlib import Path -from typing import Any, Literal, get_args +from typing import Literal, get_args import numpy as np import pandas as pd @@ -65,16 +64,10 @@ def dummy_simulation( """A dummy simulation.""" n_inputs = input_spec.n_inputs() n_outputs = 5 - problem = SyntheticMultiOutputProblem( + problem = SimpleSyntheticProblem( n_inputs, - SyntheticProblemConfig( - n_outputs=5, - n_latents=3, - difficulty="easy", - noise_std=0.0, - normalize_outputs=True, - ), - input_spec.sort_index, + n_outputs, + seed=input_spec.sort_index, ) y = problem.evaluate(np.array(input_spec.values)) @@ -93,223 +86,39 @@ def dummy_simulation( ) -@dataclass(frozen=True) -class SyntheticProblemConfig: - """Configuration for a synthetic multi-output regression problem.""" - - n_outputs: int = 8 - n_latents: int = 4 - difficulty: Literal["easy", "medium"] = "easy" - noise_std: float = 0.0 - normalize_outputs: bool = True - - -class SyntheticMultiOutputProblem: - """Deterministic synthetic multi-output function family. - - Inputs: - x in [0, 1]^d - - Outputs: - y in R^m - - Design goals: - - cheap to evaluate - - arbitrary input dimension - - arbitrary output count - - some outputs share latent structure - - some outputs contain mild independent residuals - - difficulty is tunable but never absurd - """ - - def __init__(self, n_inputs: int, config: SyntheticProblemConfig, seed: int): - """Initialize the synthetic multi-output problem.""" - if n_inputs < 1: - msg = "n_inputs must be >= 1" - raise ValueError(msg) - if config.n_outputs < 1: - msg = "n_outputs must be >= 1" - raise ValueError(msg) - if config.n_latents < 1: - msg = "n_latents must be >= 1" - raise ValueError(msg) +class SimpleSyntheticProblem: + """A simple synthetic problem.""" + def __init__(self, n_inputs: int, n_outputs: int, seed: int): + """Initialize the simple synthetic problem.""" self.n_inputs = n_inputs - self.config = config - self.rng = np.random.default_rng(seed) - self.seed = seed - - self.active_dims_per_latent = ( - min(5, n_inputs) if config.difficulty == "easy" else min(8, n_inputs) - ) - self.freq_max = 2 if config.difficulty == "easy" else 4 - self.residual_scale = 0.05 if config.difficulty == "easy" else 0.12 + self.n_outputs = n_outputs + rng = np.random.default_rng(seed) - # Shared latent parameters - self.latent_defs = [ - self._make_latent_definition(k) for k in range(config.n_latents) - ] - - # Output mixing weights: this is what creates output dependency - self.mix_weights = self.rng.normal( - loc=0.0, - scale=1.0 / math.sqrt(config.n_latents), - size=(config.n_outputs, config.n_latents), - ) - - # Small output-specific residual definitions - self.residual_defs = [ - self._make_residual_definition(j) for j in range(config.n_outputs) - ] - - # Optional approximate normalization constants computed deterministically - self.output_shift = np.zeros(config.n_outputs, dtype=float) - self.output_scale = np.ones(config.n_outputs, dtype=float) - if config.normalize_outputs: - self._fit_normalization() + self.alpha = rng.normal(size=n_outputs) + self.beta = rng.normal(scale=0.8, size=(n_outputs, n_inputs)) + self.gamma = rng.normal(scale=0.4, size=(n_outputs, n_inputs)) + self.delta = rng.normal(scale=0.3, size=(n_outputs, max(0, n_inputs - 1))) + self.eta = rng.normal(scale=0.2, size=n_outputs) + self.sine_dim = rng.integers(0, n_inputs, size=n_outputs) def evaluate(self, x: np.ndarray) -> np.ndarray: - """Evaluate all outputs at one input vector x.""" + """Evaluate the simple synthetic problem.""" x = np.asarray(x, dtype=float) - if x.shape != (self.n_inputs,): - msg = f"Expected x shape {(self.n_inputs,)}, got {x.shape}" - raise ValueError(msg) - - # Clamp defensively; upstream encoder should already map into [0, 1] x = np.clip(x, 0.0, 1.0) - z = np.array([self._eval_latent(x, ld) for ld in self.latent_defs], dtype=float) - y = self.mix_weights @ z - - # Add small output-specific residuals so not everything is perfectly low-rank - residual = np.array( - [self._eval_residual(x, rd) for rd in self.residual_defs], dtype=float - ) - y = y + residual - - if self.config.noise_std > 0: - # deterministic if seed fixed and call order fixed; default is off for stable tests - y = y + self.rng.normal( - 0.0, self.config.noise_std, size=self.config.n_outputs - ) - - y = (y - self.output_shift) / self.output_scale - return y - - def _make_latent_definition(self, k: int) -> dict[str, Any]: - """Create one latent function definition.""" - latent_type = k % 4 - dims = self.rng.choice( - self.n_inputs, size=self.active_dims_per_latent, replace=False - ) - - if latent_type == 0: - # additive sinusoid - return { - "type": "additive_sin", - "dims": dims, - "amp": self.rng.uniform(0.4, 1.2, size=len(dims)), - "freq": self.rng.integers(1, self.freq_max + 1, size=len(dims)), - "phase": self.rng.uniform(0.0, 2 * math.pi, size=len(dims)), - } - - if latent_type == 1: - # smooth quadratic bowl-ish feature - return { - "type": "quadratic", - "dims": dims, - "weight": self.rng.uniform(0.5, 1.5, size=len(dims)), - "center": self.rng.uniform(0.2, 0.8, size=len(dims)), - } - - if latent_type == 2: - # pairwise interaction latent - pair_count = max(1, len(dims) // 2) - pair_dims = dims[: 2 * pair_count].reshape(pair_count, 2) - return { - "type": "pairwise_sin", - "pairs": pair_dims, - "weight": self.rng.uniform(0.4, 1.0, size=pair_count), - } - - # Friedman-like latent, adapted to arbitrary dimension by cycling - d0 = dims[0 % len(dims)] - d1 = dims[1 % len(dims)] - d2 = dims[2 % len(dims)] - d3 = dims[3 % len(dims)] - d4 = dims[4 % len(dims)] - return { - "type": "friedman_like", - "dims": np.array([d0, d1, d2, d3, d4], dtype=int), - } - - def _make_residual_definition(self, j: int) -> dict[str, Any]: - """Create a small output-specific residual.""" - dims = self.rng.choice(self.n_inputs, size=min(3, self.n_inputs), replace=False) - return { - "dims": dims, - "amp": self.rng.uniform(0.2, 0.8, size=len(dims)) * self.residual_scale, - "freq": self.rng.integers(1, self.freq_max + 1, size=len(dims)), - "phase": self.rng.uniform(0.0, 2 * math.pi, size=len(dims)), - } - - def _eval_latent(self, x: np.ndarray, ld: dict[str, Any]) -> float: - t = ld["type"] - - if t == "additive_sin": - dims = ld["dims"] - return float( - np.sum( - ld["amp"] * np.sin(2 * math.pi * ld["freq"] * x[dims] + ld["phase"]) - ) - ) - - if t == "quadratic": - dims = ld["dims"] - xc = x[dims] - ld["center"] - return float(np.sum(ld["weight"] * xc * xc)) - - if t == "pairwise_sin": - total = 0.0 - for w, (i, j) in zip(ld["weight"], ld["pairs"], strict=True): - total += float(w * math.sin(math.pi * x[i] * x[j])) - return total - - if t == "friedman_like": - i0, i1, i2, i3, i4 = ld["dims"] - return float( - 10.0 * math.sin(math.pi * x[i0] * x[i1]) - + 20.0 * (x[i2] - 0.5) ** 2 - + 10.0 * x[i3] - + 5.0 * x[i4] - ) - - msg = f"Unknown latent type: {t}" - raise ValueError(msg) - - def _eval_residual(self, x: np.ndarray, rd: dict[str, Any]) -> float: - dims = rd["dims"] - return float( - np.sum(rd["amp"] * np.sin(2 * math.pi * rd["freq"] * x[dims] + rd["phase"])) - ) + linear = self.beta @ x + quad = self.gamma @ (x**2) - def _fit_normalization(self) -> None: - """Approximate output mean/std over a fixed reference design.""" - ref_rng = np.random.default_rng(self.seed + 1_000_000) - n_ref = 2048 if self.config.difficulty == "easy" else 4096 - X = ref_rng.uniform(0.0, 1.0, size=(n_ref, self.n_inputs)) + if self.n_inputs > 1: + pairwise_terms = x[:-1] * x[1:] + pairwise = self.delta @ pairwise_terms + else: + pairwise = np.zeros(self.n_outputs) - Y = np.zeros((n_ref, self.config.n_outputs), dtype=float) - for i in range(n_ref): - z = np.array( - [self._eval_latent(X[i], ld) for ld in self.latent_defs], dtype=float - ) - residual = np.array( - [self._eval_residual(X[i], rd) for rd in self.residual_defs], - dtype=float, - ) - Y[i] = self.mix_weights @ z + residual + periodic = np.array([ + self.eta[j] * math.sin(2 * math.pi * x[self.sine_dim[j]]) + for j in range(self.n_outputs) + ]) - self.output_shift = Y.mean(axis=0) - self.output_scale = Y.std(axis=0) - self.output_scale[self.output_scale < 1e-8] = 1.0 + return self.alpha + linear + quad + pairwise + periodic From 49553c2eea9d3d47cb983caed9fb7dba707458e1 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Mon, 9 Mar 2026 16:19:18 -0400 Subject: [PATCH 20/31] oops... --- src/globi/models/surrogate/dummy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/globi/models/surrogate/dummy.py b/src/globi/models/surrogate/dummy.py index ee33d5e..88eaee3 100644 --- a/src/globi/models/surrogate/dummy.py +++ b/src/globi/models/surrogate/dummy.py @@ -67,7 +67,7 @@ def dummy_simulation( problem = SimpleSyntheticProblem( n_inputs, n_outputs, - seed=input_spec.sort_index, + seed=42, ) y = problem.evaluate(np.array(input_spec.values)) From 89f2ba4e1c61bbe18ad08be35471ab14fddce601 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Tue, 10 Mar 2026 09:42:24 -0400 Subject: [PATCH 21/31] fix insane race condition --- src/globi/models/surrogate/configs/pipeline.py | 10 +++++++++- src/globi/pipelines/training.py | 8 +++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/globi/models/surrogate/configs/pipeline.py b/src/globi/models/surrogate/configs/pipeline.py index 3b2ed96..05b65b2 100644 --- a/src/globi/models/surrogate/configs/pipeline.py +++ b/src/globi/models/surrogate/configs/pipeline.py @@ -10,7 +10,7 @@ import pandas as pd from pydantic import BaseModel, Field from scythe.base import ExperimentInputSpec -from scythe.experiments import SerializableRunnable +from scythe.experiments import SemVer, SerializableRunnable from scythe.scatter_gather import RecursionMap, ScatterGatherResult from scythe.utils.filesys import OptionalFileReference, S3Url @@ -343,6 +343,14 @@ def context_data(self) -> pd.DataFrame | None: return None return pd.read_parquet(self.context_path) + @property + def current_version(self) -> SemVer: + """The current version.""" + vstr = [ + piece for piece in self.experiment_id.split("/") if piece.startswith("v") + ][-1] + return SemVer.FromString(vstr) + class StageSpec(BaseModel): """A spec that is common to both the sample and train stages (and possibly others).""" diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index d768327..21c2476 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -355,13 +355,11 @@ def transition_recursion( run_name=f"{next_spec.base_run_name}", storage_settings=spec.storage_settings or ScytheStorageSettings(), ) + # manually bump minor here to avoid race conditions between e.g. simultaneously running v29.2.0 and v30.1.0... pretty sure the error only happens when they finish in the exact same second, but... it happened once so. _run, ref = exp.allocate( next_spec, - version="bumpminor", - recursion_map=RecursionMap( - factor=2, - max_depth=0, - ), + version=spec.current_version.next_minor_version(), + recursion_map=None, ) context.log("Recursion transitioned.") return RecursionTransition( From cfb0aa728543649cf34389c0dbd824e8dddc9eac Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Tue, 10 Mar 2026 10:55:38 -0400 Subject: [PATCH 22/31] enable gpu indicator for tasks --- .env.scythe.training | 8 ++++++++ Makefile | 6 +++++- docker-compose.yml | 21 +++++++++++++++++++++ src/globi/pipelines/training.py | 2 ++ src/globi/worker/main.py | 23 ++--------------------- uv.lock | 2 +- 6 files changed, 39 insertions(+), 23 deletions(-) create mode 100644 .env.scythe.training diff --git a/.env.scythe.training b/.env.scythe.training new file mode 100644 index 0000000..90a58c0 --- /dev/null +++ b/.env.scythe.training @@ -0,0 +1,8 @@ +SCYTHE_WORKER_SLOTS=1 +SCYTHE_WORKER_DOES_FAN=False +SCYTHE_WORKER_DOES_LEAF=True +SCYTHE_WORKER_HAS_GPU=True + +SCYTHE_TIMEOUT_EXPERIMENT_SCHEDULE=10h +SCYTHE_TIMEOUT_SCATTER_GATHER_SCHEDULE=10h +SCYTHE_TIMEOUT_SCATTER_GATHER_EXECUTION=10h diff --git a/Makefile b/Makefile index 87de19d..1f2aae0 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -AWS_ENV ?= local.host +AWS_ENV ?= prod HATCHET_ENV ?= local.host ##################### Installation/Environment Management ##################### @@ -71,6 +71,10 @@ simulations-native: ## Run the simulations fanouts-native: ## Run the fanouts @uv run --env-file .env.$(AWS_ENV).aws --env-file .env.$(HATCHET_ENV).hatchet --env-file .env.scythe.storage --env-file .env.scythe.fanouts worker +.PHONY: training-native +training-native: ## Run the training + @uv run --env-file .env.$(AWS_ENV).aws --env-file .env.$(HATCHET_ENV).hatchet --env-file .env.scythe.storage --env-file .env.scythe.training worker + .PHONY: viz-native viz-native: ## Run the visualization tool # TODO: possibly add env vars to the command @uv run streamlit run src/globi/tools/visualization/main.py diff --git a/docker-compose.yml b/docker-compose.yml index 9e945da..a66b0b3 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -16,6 +16,27 @@ services: deploy: mode: replicated replicas: ${SIMULATIONS_REPLICAS:-4} + volumes: + - ./inputs:/code/inputs + - ./outputs:/code/outputs + - ./tests/data/e2e:/code/tests/data/e2e + training: + image: ${AWS_ACCOUNT_ID:-123456789012}.dkr.ecr.${AWS_REGION:-us-east-1}.amazonaws.com/hatchet/globi:${IMAGE_TAG:-latest} + build: + context: . + dockerfile: src/globi/worker/Dockerfile + args: + EP_VERSION: ${EP_VERSION:-25.2.0} + PYTHON_VERSION: ${PYTHON_VERSION:-3.12} + env_file: + - .env + - .env.${AWS_ENV:-local}.aws + - .env.${HATCHET_ENV:-local}.hatchet + - .env.scythe.storage + - .env.scythe.training + deploy: + mode: replicated + replicas: ${TRAINING_REPLICAS:-0} resources: reservations: devices: diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index 21c2476..4ab59b5 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -18,6 +18,7 @@ from scythe.scatter_gather import RecursionMap, ScatterGatherResult, scatter_gather from scythe.settings import ScytheStorageSettings from scythe.utils.filesys import S3Url +from scythe.worker import ScytheWorkerLabel from globi.models.surrogate.outputs import ( CombineResultsResult, @@ -40,6 +41,7 @@ description="Train a regressor with cross-fold validation.", schedule_timeout=timedelta(hours=5), execution_timeout=timedelta(hours=1), + desired_worker_labels=ScytheWorkerLabel.HAS_GPU.worker_label, ) def train_regressor_with_cv_fold( input_spec: TrainFoldSpec, tempdir: Path diff --git a/src/globi/worker/main.py b/src/globi/worker/main.py index 95fe32d..3b9f074 100644 --- a/src/globi/worker/main.py +++ b/src/globi/worker/main.py @@ -1,8 +1,5 @@ """Worker main script.""" -from scythe.hatchet import hatchet -from scythe.registry import ExperimentRegistry -from scythe.scatter_gather import scatter_gather from scythe.worker import ScytheWorkerConfig from globi.pipelines import * # noqa: F403 @@ -12,24 +9,8 @@ def main(): - """Main function for the worker.""" - # TODO: this is required since scythe does not allow registering extra tasks/workflows at the moment. - worker = hatchet.worker( - name=conf.computed_name, - slots=conf.computed_slots, - durable_slots=conf.computed_durable_slots, - labels=conf.labels, - ) - workflows = ([scatter_gather] if conf.DOES_FAN else []) + ( - ExperimentRegistry.experiments() if conf.DOES_LEAF else [] - ) - for workflow in workflows: - worker.register_workflow(workflow) - if conf.DOES_FAN: - worker.register_workflow(iterative_training) - worker.start() - - # conf.start() + """Start the worker.""" + conf.start(additional_workflows=[iterative_training]) if __name__ == "__main__": diff --git a/uv.lock b/uv.lock index 8b45dad..dd4635b 100644 --- a/uv.lock +++ b/uv.lock @@ -4591,7 +4591,7 @@ wheels = [ [[package]] name = "scythe-engine" version = "0.1.2" -source = { git = "https://github.com/szvsw/scythe?branch=feature%2Fallow-versioning-workflows#0bc501d15c20ab23b2379b690756fd3ff3267054" } +source = { git = "https://github.com/szvsw/scythe?branch=feature%2Fallow-versioning-workflows#54e0668df5ab4741d05925c3b5dddff39ff4c9e6" } dependencies = [ { name = "boto3" }, { name = "fastparquet" }, From 4cc66534d43839cb8e36d932b267ca39737e9ded Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Tue, 10 Mar 2026 10:57:22 -0400 Subject: [PATCH 23/31] update gitignore --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index b31cb9f..3e3a71c 100644 --- a/.gitignore +++ b/.gitignore @@ -217,3 +217,5 @@ inputs/ .env.local.hatchet .env.local.host.hatchet + +scratch/ From 1db2fd3ebb72c23e5a8bb4d0d6ce86fd0196472c Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Tue, 10 Mar 2026 11:04:21 -0400 Subject: [PATCH 24/31] add defaults --- .env.scythe.training | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.env.scythe.training b/.env.scythe.training index 90a58c0..d2e3634 100644 --- a/.env.scythe.training +++ b/.env.scythe.training @@ -3,6 +3,7 @@ SCYTHE_WORKER_DOES_FAN=False SCYTHE_WORKER_DOES_LEAF=True SCYTHE_WORKER_HAS_GPU=True -SCYTHE_TIMEOUT_EXPERIMENT_SCHEDULE=10h +SCYTHE_TIMEOUT_EXPERIMENT_SCHEDULE=2h +SCYTHE_TIMEOUT_EXPERIMENT_EXECUTION=1h SCYTHE_TIMEOUT_SCATTER_GATHER_SCHEDULE=10h SCYTHE_TIMEOUT_SCATTER_GATHER_EXECUTION=10h From 676a9b96127397087f2c3202e0baa3d439725ecd Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Tue, 10 Mar 2026 11:25:26 -0400 Subject: [PATCH 25/31] restore old env args --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 1f2aae0..67c54e6 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -AWS_ENV ?= prod +AWS_ENV ?= local.host HATCHET_ENV ?= local.host ##################### Installation/Environment Management ##################### From 8d417db4fb322b46d7f25cfab68ad68a8945edd8 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Tue, 10 Mar 2026 11:30:48 -0400 Subject: [PATCH 26/31] drop torch temporarily --- pyproject.toml | 28 +--- uv.lock | 441 ------------------------------------------------- 2 files changed, 7 insertions(+), 462 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e9d3431..a1d02e3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,25 +47,21 @@ visualization = [ ] # ml = [ -# "torch>=2.5.0", # "lightgbm>=4.6.0", # "xgboost>=3.2.0", # "pytorch-tabular>=1.2.0", +# "torch>=2.5.0", # "tensorboard>=2.20.0", # "wandb>=0.25.0", -# "pytorch-tabular>=1.2.0", -# "torch>=2.5.0", # ] ml-gpu = [ - "torch>=2.5.0", "lightgbm>=4.6.0", "xgboost>=3.2.0", - "pytorch-tabular>=1.2.0", + # "pytorch-tabular>=1.2.0", + # "torch>=2.5.0", "tensorboard>=2.20.0", "wandb>=0.25.0", - "pytorch-tabular>=1.2.0", - "torch>=2.5.0", ] cli = [ @@ -96,14 +92,6 @@ docs = [ "mkdocs-click>=0.9.0", ] -# [tool.uv] -# conflicts = [ -# [ -# { extra = "ml" }, -# { extra = "ml-gpu" }, -# ], -# ] - [project.scripts] worker = "globi.worker.main:main" globi = "globi.tools.cli.main:cli" @@ -125,12 +113,10 @@ explicit = true [tool.uv.sources] # PyTorch: CUDA 12.8 on Linux/Windows (where builds exist), PyPI (CPU) on macOS -torch = [ - { index = "pytorch-cu128", marker = "sys_platform != 'darwin'", extra = "ml-gpu" }, -# { index = "pytorch-cpu", marker = "sys_platform != 'darwin'", extra = "ml" }, - { index = "pypi", marker = "sys_platform == 'darwin'", extra = "ml-gpu" }, -# { index = "pypi", marker = "sys_platform == 'darwin'", extra = "ml" }, -] +# torch = [ +# { index = "pytorch-cu128", marker = "sys_platform != 'darwin'", extra = "ml-gpu" }, +# { index = "pypi", marker = "sys_platform == 'darwin'", extra = "ml-gpu" }, +# ] # scythe-engine = {git = "https://github.com/szvsw/scythe", branch = "feature/allow-optional-filerefs"} scythe-engine = {git = "https://github.com/szvsw/scythe", branch = "feature/allow-versioning-workflows"} # scythe-engine = {path = "../scythe", editable = true} diff --git a/uv.lock b/uv.lock index dd4635b..9043efb 100644 --- a/uv.lock +++ b/uv.lock @@ -160,12 +160,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, ] -[[package]] -name = "antlr4-python3-runtime" -version = "4.9.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/3e/38/7859ff46355f76f8d19459005ca000b6e7012f2f1ca597746cbcd1fbfe5e/antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b", size = 117034, upload-time = "2021-11-06T17:52:23.524Z" } - [[package]] name = "anyio" version = "4.11.0" @@ -917,34 +911,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ce/c6/c71e82e041c95ffe6a92ac707785500aa2a515a4339c2c7dd67e3c449249/cramjam-2.11.0-cp314-cp314t-win_amd64.whl", hash = "sha256:028400d699442d40dbda02f74158c73d05cb76587a12490d0bfedd958fd49188", size = 1713108, upload-time = "2025-07-27T21:24:10.147Z" }, ] -[[package]] -name = "cuda-bindings" -version = "12.9.4" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cuda-pathfinder", marker = "sys_platform == 'linux'" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/0c/c2/65bfd79292b8ff18be4dd7f7442cea37bcbc1a228c1886f1dea515c45b67/cuda_bindings-12.9.4-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:694ba35023846625ef471257e6b5a4bc8af690f961d197d77d34b1d1db393f56", size = 11760260, upload-time = "2025-10-21T14:51:40.79Z" }, - { url = "https://files.pythonhosted.org/packages/a9/c1/dabe88f52c3e3760d861401bb994df08f672ec893b8f7592dc91626adcf3/cuda_bindings-12.9.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fda147a344e8eaeca0c6ff113d2851ffca8f7dfc0a6c932374ee5c47caa649c8", size = 12151019, upload-time = "2025-10-21T14:51:43.167Z" }, - { url = "https://files.pythonhosted.org/packages/05/8b/b4b2d1c7775fa403b64333e720cfcfccef8dcb9cdeb99947061ca5a77628/cuda_bindings-12.9.4-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cf8bfaedc238f3b115d957d1fd6562b7e8435ba57f6d0e2f87d0e7149ccb2da5", size = 11570071, upload-time = "2025-10-21T14:51:47.472Z" }, - { url = "https://files.pythonhosted.org/packages/63/56/e465c31dc9111be3441a9ba7df1941fe98f4aa6e71e8788a3fb4534ce24d/cuda_bindings-12.9.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:32bdc5a76906be4c61eb98f546a6786c5773a881f3b166486449b5d141e4a39f", size = 11906628, upload-time = "2025-10-21T14:51:49.905Z" }, - { url = "https://files.pythonhosted.org/packages/ec/07/6aff13bc1e977e35aaa6b22f52b172e2890c608c6db22438cf7ed2bf43a6/cuda_bindings-12.9.4-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3adf4958dcf68ae7801a59b73fb00a8b37f8d0595060d66ceae111b1002de38d", size = 11566797, upload-time = "2025-10-21T14:51:54.581Z" }, - { url = "https://files.pythonhosted.org/packages/a3/84/1e6be415e37478070aeeee5884c2022713c1ecc735e6d82d744de0252eee/cuda_bindings-12.9.4-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:56e0043c457a99ac473ddc926fe0dc4046694d99caef633e92601ab52cbe17eb", size = 11925991, upload-time = "2025-10-21T14:51:56.535Z" }, - { url = "https://files.pythonhosted.org/packages/1e/b5/96a6696e20c4ffd2b327f54c7d0fde2259bdb998d045c25d5dedbbe30290/cuda_bindings-12.9.4-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f53a7f453d4b2643d8663d036bafe29b5ba89eb904c133180f295df6dc151e5", size = 11624530, upload-time = "2025-10-21T14:52:01.539Z" }, - { url = "https://files.pythonhosted.org/packages/d1/af/6dfd8f2ed90b1d4719bc053ff8940e494640fe4212dc3dd72f383e4992da/cuda_bindings-12.9.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8b72ee72a9cc1b531db31eebaaee5c69a8ec3500e32c6933f2d3b15297b53686", size = 11922703, upload-time = "2025-10-21T14:52:03.585Z" }, - { url = "https://files.pythonhosted.org/packages/39/73/d2fc40c043bac699c3880bf88d3cebe9d88410cd043795382826c93a89f0/cuda_bindings-12.9.4-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:20f2699d61d724de3eb3f3369d57e2b245f93085cab44fd37c3bea036cea1a6f", size = 11565056, upload-time = "2025-10-21T14:52:08.338Z" }, - { url = "https://files.pythonhosted.org/packages/6c/19/90ac264acc00f6df8a49378eedec9fd2db3061bf9263bf9f39fd3d8377c3/cuda_bindings-12.9.4-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d80bffc357df9988dca279734bc9674c3934a654cab10cadeed27ce17d8635ee", size = 11924658, upload-time = "2025-10-21T14:52:10.411Z" }, -] - -[[package]] -name = "cuda-pathfinder" -version = "1.4.1" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/07/02/59a5bc738a09def0b49aea0e460bdf97f65206d0d041246147cf6207e69c/cuda_pathfinder-1.4.1-py3-none-any.whl", hash = "sha256:40793006082de88e0950753655e55558a446bed9a7d9d0bcb48b2506d50ed82a", size = 43903, upload-time = "2026-03-06T21:05:24.372Z" }, -] - [[package]] name = "cycler" version = "0.12.1" @@ -1002,15 +968,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16", size = 469047, upload-time = "2025-07-17T16:51:58.613Z" }, ] -[[package]] -name = "einops" -version = "0.8.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2c/77/850bef8d72ffb9219f0b1aac23fbc1bf7d038ee6ea666f331fa273031aa2/einops-0.8.2.tar.gz", hash = "sha256:609da665570e5e265e27283aab09e7f279ade90c4f01bcfca111f3d3e13f2827", size = 56261, upload-time = "2026-01-26T04:13:17.638Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2a/09/f8d8f8f31e4483c10a906437b4ce31bdf3d6d417b73fe33f1a8b59e34228/einops-0.8.2-py3-none-any.whl", hash = "sha256:54058201ac7087911181bfec4af6091bb59380360f069276601256a76af08193", size = 65638, upload-time = "2026-01-26T04:13:18.546Z" }, -] - [[package]] name = "energy-pandas" version = "0.4.1" @@ -1347,11 +1304,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/47/71/70db47e4f6ce3e5c37a607355f80da8860a33226be640226ac52cb05ef2e/fsspec-2025.9.0-py3-none-any.whl", hash = "sha256:530dc2a2af60a414a832059574df4a6e10cce927f6f4a78209390fe38955cfb7", size = 199289, upload-time = "2025-09-02T19:10:47.708Z" }, ] -[package.optional-dependencies] -http = [ - { name = "aiohttp" }, -] - [[package]] name = "future" version = "1.0.0" @@ -1441,10 +1393,7 @@ cli = [ ] ml-gpu = [ { name = "lightgbm" }, - { name = "pytorch-tabular" }, { name = "tensorboard" }, - { name = "torch", version = "2.10.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform == 'darwin'" }, - { name = "torch", version = "2.10.0+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform != 'darwin'" }, { name = "wandb" }, { name = "xgboost" }, ] @@ -1498,7 +1447,6 @@ requires-dist = [ { name = "plotly", marker = "extra == 'visualization'", specifier = ">=5.18.0" }, { name = "pydantic", specifier = ">=2.11,<3" }, { name = "pyproj", specifier = ">=3.6.0" }, - { name = "pytorch-tabular", marker = "extra == 'ml-gpu'", specifier = ">=1.2.0" }, { name = "rasterio", marker = "extra == 'visualization'", specifier = ">=1.3.9" }, { name = "scikit-learn", specifier = ">=1.3.0" }, { name = "scipy", specifier = ">=1.11.0,<1.15" }, @@ -1507,8 +1455,6 @@ requires-dist = [ { name = "shapely", specifier = ">=2.0.0" }, { name = "streamlit", marker = "extra == 'visualization'", specifier = ">=1.28.0" }, { name = "tensorboard", marker = "extra == 'ml-gpu'", specifier = ">=2.20.0" }, - { name = "torch", marker = "sys_platform == 'darwin' and extra == 'ml-gpu'", specifier = ">=2.5.0", index = "https://pypi.org/simple", conflict = { package = "globi", extra = "ml-gpu" } }, - { name = "torch", marker = "sys_platform != 'darwin' and extra == 'ml-gpu'", specifier = ">=2.5.0", index = "https://download.pytorch.org/whl/cu128", conflict = { package = "globi", extra = "ml-gpu" } }, { name = "wandb", marker = "extra == 'ml-gpu'", specifier = ">=0.25.0" }, { name = "xgboost", marker = "extra == 'ml-gpu'", specifier = ">=3.2.0" }, { name = "xlsxwriter", marker = "extra == 'cli'", specifier = ">=3.2.9" }, @@ -2249,19 +2195,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5e/23/f8b28ca248bb629b9e08f877dd2965d1994e1674a03d67cd10c5246da248/lightgbm-4.6.0-py3-none-win_amd64.whl", hash = "sha256:37089ee95664b6550a7189d887dbf098e3eadab03537e411f52c63c121e3ba4b", size = 1451509, upload-time = "2025-02-15T04:03:01.515Z" }, ] -[[package]] -name = "lightning-utilities" -version = "0.15.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "packaging" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/f1/45/7fa8f56b17dc0f0a41ec70dd307ecd6787254483549843bef4c30ab5adce/lightning_utilities-0.15.3.tar.gz", hash = "sha256:792ae0204c79f6859721ac7f386c237a33b0ed06ba775009cb894e010a842033", size = 33553, upload-time = "2026-02-22T14:48:53.348Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/25/f4/ead6e0e37209b07c9baa3e984ccdb0348ca370b77cea3aaea8ddbb097e00/lightning_utilities-0.15.3-py3-none-any.whl", hash = "sha256:6c55f1bee70084a1cbeaa41ada96e4b3a0fea5909e844dd335bd80f5a73c5f91", size = 31906, upload-time = "2026-02-22T14:48:52.488Z" }, -] - [[package]] name = "littleutils" version = "0.2.4" @@ -2342,18 +2275,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/59/1b/6ef961f543593969d25b2afe57a3564200280528caa9bd1082eecdd7b3bc/markdown-3.10.1-py3-none-any.whl", hash = "sha256:867d788939fe33e4b736426f5b9f651ad0c0ae0ecf89df0ca5d1176c70812fe3", size = 107684, upload-time = "2026-01-21T18:09:27.203Z" }, ] -[[package]] -name = "markdown-it-py" -version = "4.0.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "mdurl" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, -] - [[package]] name = "markupsafe" version = "3.0.3" @@ -2483,15 +2404,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/af/33/ee4519fa02ed11a94aef9559552f3b17bb863f2ecfe1a35dc7f548cde231/matplotlib_inline-0.2.1-py3-none-any.whl", hash = "sha256:d56ce5156ba6085e00a9d54fead6ed29a9c47e215cd1bba2e976ef39f5710a76", size = 9516, upload-time = "2025-10-23T09:00:20.675Z" }, ] -[[package]] -name = "mdurl" -version = "0.1.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, -] - [[package]] name = "mergedeep" version = "1.3.4" @@ -2642,15 +2554,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/98/5c/2597cef67b6947b15c47f8dba967a0baf19fbdfdc86f6e4a8ba7af8b581a/mkdocstrings_python-1.19.0-py3-none-any.whl", hash = "sha256:395c1032af8f005234170575cc0c5d4d20980846623b623b35594281be4a3059", size = 143417, upload-time = "2025-11-10T13:30:54.164Z" }, ] -[[package]] -name = "mpmath" -version = "1.3.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" }, -] - [[package]] name = "msgpack" version = "1.1.2" @@ -3128,119 +3031,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/67/0e/35082d13c09c02c011cf21570543d202ad929d961c02a147493cb0c2bdf5/numpy-2.2.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06", size = 12771374, upload-time = "2025-05-17T21:43:35.479Z" }, ] -[[package]] -name = "nvidia-cublas-cu12" -version = "12.8.4.1" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/29/99/db44d685f0e257ff0e213ade1964fc459b4a690a73293220e98feb3307cf/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:b86f6dd8935884615a0683b663891d43781b819ac4f2ba2b0c9604676af346d0", size = 590537124, upload-time = "2025-03-07T01:43:53.556Z" }, - { url = "https://files.pythonhosted.org/packages/dc/61/e24b560ab2e2eaeb3c839129175fb330dfcfc29e5203196e5541a4c44682/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:8ac4e771d5a348c551b2a426eda6193c19aa630236b418086020df5ba9667142", size = 594346921, upload-time = "2025-03-07T01:44:31.254Z" }, -] - -[[package]] -name = "nvidia-cuda-cupti-cu12" -version = "12.8.90" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/1f/b3bd73445e5cb342727fd24fe1f7b748f690b460acadc27ea22f904502c8/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4412396548808ddfed3f17a467b104ba7751e6b58678a4b840675c56d21cf7ed", size = 9533318, upload-time = "2025-03-07T01:40:10.421Z" }, - { url = "https://files.pythonhosted.org/packages/f8/02/2adcaa145158bf1a8295d83591d22e4103dbfd821bcaf6f3f53151ca4ffa/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea0cb07ebda26bb9b29ba82cda34849e73c166c18162d3913575b0c9db9a6182", size = 10248621, upload-time = "2025-03-07T01:40:21.213Z" }, -] - -[[package]] -name = "nvidia-cuda-nvrtc-cu12" -version = "12.8.93" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/05/6b/32f747947df2da6994e999492ab306a903659555dddc0fbdeb9d71f75e52/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:a7756528852ef889772a84c6cd89d41dfa74667e24cca16bb31f8f061e3e9994", size = 88040029, upload-time = "2025-03-07T01:42:13.562Z" }, - { url = "https://files.pythonhosted.org/packages/eb/d1/e50d0acaab360482034b84b6e27ee83c6738f7d32182b987f9c7a4e32962/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fc1fec1e1637854b4c0a65fb9a8346b51dd9ee69e61ebaccc82058441f15bce8", size = 43106076, upload-time = "2025-03-07T01:41:59.817Z" }, -] - -[[package]] -name = "nvidia-cuda-runtime-cu12" -version = "12.8.90" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/7c/75/f865a3b236e4647605ea34cc450900854ba123834a5f1598e160b9530c3a/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:52bf7bbee900262ffefe5e9d5a2a69a30d97e2bc5bb6cc866688caa976966e3d", size = 965265, upload-time = "2025-03-07T01:39:43.533Z" }, - { url = "https://files.pythonhosted.org/packages/0d/9b/a997b638fcd068ad6e4d53b8551a7d30fe8b404d6f1804abf1df69838932/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adade8dcbd0edf427b7204d480d6066d33902cab2a4707dcfc48a2d0fd44ab90", size = 954765, upload-time = "2025-03-07T01:40:01.615Z" }, -] - -[[package]] -name = "nvidia-cudnn-cu12" -version = "9.10.2.21" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/fa/41/e79269ce215c857c935fd86bcfe91a451a584dfc27f1e068f568b9ad1ab7/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:c9132cc3f8958447b4910a1720036d9eff5928cc3179b0a51fb6d167c6cc87d8", size = 705026878, upload-time = "2025-06-06T21:52:51.348Z" }, - { url = "https://files.pythonhosted.org/packages/ba/51/e123d997aa098c61d029f76663dedbfb9bc8dcf8c60cbd6adbe42f76d049/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8", size = 706758467, upload-time = "2025-06-06T21:54:08.597Z" }, -] - -[[package]] -name = "nvidia-cufft-cu12" -version = "11.3.3.83" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/60/bc/7771846d3a0272026c416fbb7e5f4c1f146d6d80704534d0b187dd6f4800/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:848ef7224d6305cdb2a4df928759dca7b1201874787083b6e7550dd6765ce69a", size = 193109211, upload-time = "2025-03-07T01:44:56.873Z" }, - { url = "https://files.pythonhosted.org/packages/1f/13/ee4e00f30e676b66ae65b4f08cb5bcbb8392c03f54f2d5413ea99a5d1c80/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d2dd21ec0b88cf61b62e6b43564355e5222e4a3fb394cac0db101f2dd0d4f74", size = 193118695, upload-time = "2025-03-07T01:45:27.821Z" }, -] - -[[package]] -name = "nvidia-cufile-cu12" -version = "1.13.1.3" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/bb/fe/1bcba1dfbfb8d01be8d93f07bfc502c93fa23afa6fd5ab3fc7c1df71038a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1d069003be650e131b21c932ec3d8969c1715379251f8d23a1860554b1cb24fc", size = 1197834, upload-time = "2025-03-07T01:45:50.723Z" }, - { url = "https://files.pythonhosted.org/packages/1e/f5/5607710447a6fe9fd9b3283956fceeee8a06cda1d2f56ce31371f595db2a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:4beb6d4cce47c1a0f1013d72e02b0994730359e17801d395bdcbf20cfb3bb00a", size = 1120705, upload-time = "2025-03-07T01:45:41.434Z" }, -] - -[[package]] -name = "nvidia-curand-cu12" -version = "10.3.9.90" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/45/5e/92aa15eca622a388b80fbf8375d4760738df6285b1e92c43d37390a33a9a/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:dfab99248034673b779bc6decafdc3404a8a6f502462201f2f31f11354204acd", size = 63625754, upload-time = "2025-03-07T01:46:10.735Z" }, - { url = "https://files.pythonhosted.org/packages/fb/aa/6584b56dc84ebe9cf93226a5cde4d99080c8e90ab40f0c27bda7a0f29aa1/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:b32331d4f4df5d6eefa0554c565b626c7216f87a06a4f56fab27c3b68a830ec9", size = 63619976, upload-time = "2025-03-07T01:46:23.323Z" }, -] - -[[package]] -name = "nvidia-cusolver-cu12" -version = "11.7.3.90" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-cusparse-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/c8/32/f7cd6ce8a7690544d084ea21c26e910a97e077c9b7f07bf5de623ee19981/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:db9ed69dbef9715071232caa9b69c52ac7de3a95773c2db65bdba85916e4e5c0", size = 267229841, upload-time = "2025-03-07T01:46:54.356Z" }, - { url = "https://files.pythonhosted.org/packages/85/48/9a13d2975803e8cf2777d5ed57b87a0b6ca2cc795f9a4f59796a910bfb80/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:4376c11ad263152bd50ea295c05370360776f8c3427b30991df774f9fb26c450", size = 267506905, upload-time = "2025-03-07T01:47:16.273Z" }, -] - -[[package]] -name = "nvidia-cusparse-cu12" -version = "12.5.8.93" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/bc/f7/cd777c4109681367721b00a106f491e0d0d15cfa1fd59672ce580ce42a97/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b6c161cb130be1a07a27ea6923df8141f3c295852f4b260c65f18f3e0a091dc", size = 288117129, upload-time = "2025-03-07T01:47:40.407Z" }, - { url = "https://files.pythonhosted.org/packages/c2/f5/e1854cb2f2bcd4280c44736c93550cc300ff4b8c95ebe370d0aa7d2b473d/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ec05d76bbbd8b61b06a80e1eaf8cf4959c3d4ce8e711b65ebd0443bb0ebb13b", size = 288216466, upload-time = "2025-03-07T01:48:13.779Z" }, -] - -[[package]] -name = "nvidia-cusparselt-cu12" -version = "0.7.1" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/73/b9/598f6ff36faaece4b3c50d26f50e38661499ff34346f00e057760b35cc9d/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8878dce784d0fac90131b6817b607e803c36e629ba34dc5b433471382196b6a5", size = 283835557, upload-time = "2025-02-26T00:16:54.265Z" }, - { url = "https://files.pythonhosted.org/packages/56/79/12978b96bd44274fe38b5dde5cfb660b1d114f70a65ef962bcbbed99b549/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f1bb701d6b930d5a7cea44c19ceb973311500847f81b634d802b7b539dc55623", size = 287193691, upload-time = "2025-02-26T00:15:44.104Z" }, -] - [[package]] name = "nvidia-nccl-cu12" version = "2.27.5" @@ -3250,46 +3040,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6e/89/f7a07dc961b60645dbbf42e80f2bc85ade7feb9a491b11a1e973aa00071f/nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ad730cf15cb5d25fe849c6e6ca9eb5b76db16a80f13f425ac68d8e2e55624457", size = 322348229, upload-time = "2025-06-26T04:11:28.385Z" }, ] -[[package]] -name = "nvidia-nvjitlink-cu12" -version = "12.8.93" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f6/74/86a07f1d0f42998ca31312f998bd3b9a7eff7f52378f4f270c8679c77fb9/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:81ff63371a7ebd6e6451970684f916be2eab07321b73c9d244dc2b4da7f73b88", size = 39254836, upload-time = "2025-03-07T01:49:55.661Z" }, - { url = "https://files.pythonhosted.org/packages/2a/a2/8cee5da30d13430e87bf99bb33455d2724d0a4a9cb5d7926d80ccb96d008/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:adccd7161ace7261e01bb91e44e88da350895c270d23f744f0820c818b7229e7", size = 38386204, upload-time = "2025-03-07T01:49:43.612Z" }, -] - -[[package]] -name = "nvidia-nvshmem-cu12" -version = "3.4.5" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1d/6a/03aa43cc9bd3ad91553a88b5f6fb25ed6a3752ae86ce2180221962bc2aa5/nvidia_nvshmem_cu12-3.4.5-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0b48363fc6964dede448029434c6abed6c5e37f823cb43c3bcde7ecfc0457e15", size = 138936938, upload-time = "2025-09-06T00:32:05.589Z" }, - { url = "https://files.pythonhosted.org/packages/b5/09/6ea3ea725f82e1e76684f0708bbedd871fc96da89945adeba65c3835a64c/nvidia_nvshmem_cu12-3.4.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:042f2500f24c021db8a06c5eec2539027d57460e1c1a762055a6554f72c369bd", size = 139103095, upload-time = "2025-09-06T00:32:31.266Z" }, -] - -[[package]] -name = "nvidia-nvtx-cu12" -version = "12.8.90" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/10/c0/1b303feea90d296f6176f32a2a70b5ef230f9bdeb3a72bddb0dc922dc137/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d7ad891da111ebafbf7e015d34879f7112832fc239ff0d7d776b6cb685274615", size = 91161, upload-time = "2025-03-07T01:42:23.922Z" }, - { url = "https://files.pythonhosted.org/packages/a2/eb/86626c1bbc2edb86323022371c39aa48df6fd8b0a1647bc274577f72e90b/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b17e2001cc0d751a5bc2c6ec6d26ad95913324a4adb86788c944f8ce9ba441f", size = 89954, upload-time = "2025-03-07T01:42:44.131Z" }, -] - -[[package]] -name = "omegaconf" -version = "2.3.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "antlr4-python3-runtime" }, - { name = "pyyaml" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/09/48/6388f1bb9da707110532cb70ec4d2822858ddfb44f1cdf1233c20a80ea4b/omegaconf-2.3.0.tar.gz", hash = "sha256:d5d4b6d29955cc50ad50c46dc269bcd92c6e00f5f90d23ab5fee7bfca4ba4cc7", size = 3298120, upload-time = "2022-12-08T20:59:22.753Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e3/94/1843518e420fa3ed6919835845df698c7e27e183cb997394e4a670973a65/omegaconf-2.3.0-py3-none-any.whl", hash = "sha256:7b4df175cdb08ba400f45cae3bdcae7ba8365db4d165fc65fd04b050ab63b46b", size = 79500, upload-time = "2022-12-08T20:59:19.686Z" }, -] - [[package]] name = "openpyxl" version = "3.1.5" @@ -4165,49 +3915,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/51/e5/fecf13f06e5e5f67e8837d777d1bc43fac0ed2b77a676804df5c34744727/python_json_logger-4.0.0-py3-none-any.whl", hash = "sha256:af09c9daf6a813aa4cc7180395f50f2a9e5fa056034c9953aec92e381c5ba1e2", size = 15548, upload-time = "2025-10-06T04:15:17.553Z" }, ] -[[package]] -name = "pytorch-lightning" -version = "2.6.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "fsspec", extra = ["http"] }, - { name = "lightning-utilities" }, - { name = "packaging" }, - { name = "pyyaml" }, - { name = "torch", version = "2.10.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform == 'darwin'" }, - { name = "torch", version = "2.10.0+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform != 'darwin'" }, - { name = "torchmetrics" }, - { name = "tqdm" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/8b/ac/ebd5f6f58691cbd4f73836e43e1727f3814311b960c41f88e259606ca2b2/pytorch_lightning-2.6.1.tar.gz", hash = "sha256:ba08f8901cf226fcca473046ad9346f414e99117762dc869c76e650d5b3d7bdc", size = 665563, upload-time = "2026-01-30T14:59:11.636Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0e/93/c8c361bf0a2fe50f828f32def460e8b8a14b93955d3fd302b1a9b63b19e4/pytorch_lightning-2.6.1-py3-none-any.whl", hash = "sha256:1f8118567ec829e3055f16cf1aa320883a86a47c836951bfd9dcfa34ec7ffd59", size = 857273, upload-time = "2026-01-30T14:59:10.141Z" }, -] - -[[package]] -name = "pytorch-tabular" -version = "1.2.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "einops" }, - { name = "numpy" }, - { name = "omegaconf" }, - { name = "pandas" }, - { name = "pytorch-lightning" }, - { name = "rich" }, - { name = "scikit-base" }, - { name = "scikit-learn" }, - { name = "scipy" }, - { name = "torch", version = "2.10.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform == 'darwin'" }, - { name = "torch", version = "2.10.0+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform != 'darwin'" }, - { name = "torchmetrics" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/0b/f2/823de16d6a461504f4ed8e4a555d6ce356e5f81e6525d95e2b64895ec94f/pytorch_tabular-1.2.0.tar.gz", hash = "sha256:1b96b576eb3de443840b313d0b298293eaf83dcfdbba53ed8974b76d1351b821", size = 2312825, upload-time = "2026-01-26T21:48:22.577Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/6f/c9/1e01c682e2ad7132bc1943d8d367c96f241bf85679e76d66eb0c4e4cbde9/pytorch_tabular-1.2.0-py3-none-any.whl", hash = "sha256:0a59f8a2304856b3d1e905f7b66153ebc65df1a6a017f2c8a13a29f62dc95b26", size = 165800, upload-time = "2026-01-26T21:48:21.195Z" }, -] - [[package]] name = "pytz" version = "2025.2" @@ -4386,19 +4093,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, ] -[[package]] -name = "rich" -version = "14.3.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "markdown-it-py" }, - { name = "pygments" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/b3/c6/f3b320c27991c46f43ee9d856302c70dc2d0fb2dba4842ff739d5f46b393/rich-14.3.3.tar.gz", hash = "sha256:b8daa0b9e4eef54dd8cf7c86c03713f53241884e814f4e2f5fb342fe520f639b", size = 230582, upload-time = "2026-02-19T17:23:12.474Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/14/25/b208c5683343959b670dc001595f2f3737e051da617f66c31f7c4fa93abc/rich-14.3.3-py3-none-any.whl", hash = "sha256:793431c1f8619afa7d3b52b2cdec859562b950ea0d4b6b505397612db8d5362d", size = 310458, upload-time = "2026-02-19T17:23:13.732Z" }, -] - [[package]] name = "rpds-py" version = "0.28.0" @@ -4518,15 +4212,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/48/f0/ae7ca09223a81a1d890b2557186ea015f6e0502e9b8cb8e1813f1d8cfa4e/s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456", size = 85712, upload-time = "2025-09-09T19:23:30.041Z" }, ] -[[package]] -name = "scikit-base" -version = "0.13.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/56/a8/610f99f01f326178b8a7347db2ede654b42548e9697b516480cc081e344d/scikit_base-0.13.1.tar.gz", hash = "sha256:169e5427233f7237b38c7d858bf07b8a86bbf59feccf0708e26dad4ac312c593", size = 134482, upload-time = "2026-01-25T11:31:38.814Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e3/55/c20d8319aab037e11f1d6403b6102d1041694abe24a3aa4a1e27f2cdb9f2/scikit_base-0.13.1-py3-none-any.whl", hash = "sha256:1aca86759435fd2d32d83a526ce11095119c0745e4e5dd91f2e5820023ca8e39", size = 159779, upload-time = "2026-01-25T11:31:36.759Z" }, -] - [[package]] name = "scikit-learn" version = "1.7.2" @@ -4801,18 +4486,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/39/60/868371b6482ccd9ef423c6f62650066cf8271fdb2ee84f192695ad6b7a96/streamlit-1.51.0-py3-none-any.whl", hash = "sha256:4008b029f71401ce54946bb09a6a3e36f4f7652cbb48db701224557738cfda38", size = 10171702, upload-time = "2025-10-29T17:07:35.97Z" }, ] -[[package]] -name = "sympy" -version = "1.14.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "mpmath" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921, upload-time = "2025-04-27T18:05:01.611Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" }, -] - [[package]] name = "tables" version = "3.10.2" @@ -4987,103 +4660,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" }, ] -[[package]] -name = "torch" -version = "2.10.0" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.14' and sys_platform == 'darwin'", - "python_full_version < '3.14' and sys_platform == 'darwin'", -] -dependencies = [ - { name = "filelock", marker = "sys_platform == 'darwin'" }, - { name = "fsspec", marker = "sys_platform == 'darwin'" }, - { name = "jinja2", marker = "sys_platform == 'darwin'" }, - { name = "networkx", marker = "sys_platform == 'darwin'" }, - { name = "setuptools", marker = "sys_platform == 'darwin'" }, - { name = "sympy", marker = "sys_platform == 'darwin'" }, - { name = "typing-extensions", marker = "sys_platform == 'darwin'" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/d3/54/a2ba279afcca44bbd320d4e73675b282fcee3d81400ea1b53934efca6462/torch-2.10.0-2-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:13ec4add8c3faaed8d13e0574f5cd4a323c11655546f91fbe6afa77b57423574", size = 79498202, upload-time = "2026-02-10T21:44:52.603Z" }, - { url = "https://files.pythonhosted.org/packages/ec/23/2c9fe0c9c27f7f6cb865abcea8a4568f29f00acaeadfc6a37f6801f84cb4/torch-2.10.0-2-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:e521c9f030a3774ed770a9c011751fb47c4d12029a3d6522116e48431f2ff89e", size = 79498254, upload-time = "2026-02-10T21:44:44.095Z" }, - { url = "https://files.pythonhosted.org/packages/c9/5c/dee910b87c4d5c0fcb41b50839ae04df87c1cfc663cf1b5fca7ea565eeaa/torch-2.10.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:6d3707a61863d1c4d6ebba7be4ca320f42b869ee657e9b2c21c736bf17000294", size = 79498198, upload-time = "2026-01-21T16:24:34.704Z" }, - { url = "https://files.pythonhosted.org/packages/1a/0b/39929b148f4824bc3ad6f9f72a29d4ad865bcf7ebfc2fa67584773e083d2/torch-2.10.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:3202429f58309b9fa96a614885eace4b7995729f44beb54d3e4a47773649d382", size = 79851305, upload-time = "2026-01-21T16:24:09.209Z" }, - { url = "https://files.pythonhosted.org/packages/0e/13/e76b4d9c160e89fff48bf16b449ea324bda84745d2ab30294c37c2434c0d/torch-2.10.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:cdf2a523d699b70d613243211ecaac14fe9c5df8a0b0a9c02add60fb2a413e0f", size = 79498248, upload-time = "2026-01-21T16:23:09.315Z" }, - { url = "https://files.pythonhosted.org/packages/4f/93/716b5ac0155f1be70ed81bacc21269c3ece8dba0c249b9994094110bfc51/torch-2.10.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:bf0d9ff448b0218e0433aeb198805192346c4fd659c852370d5cc245f602a06a", size = 79464992, upload-time = "2026-01-21T16:23:05.162Z" }, - { url = "https://files.pythonhosted.org/packages/d8/94/71994e7d0d5238393df9732fdab607e37e2b56d26a746cb59fdb415f8966/torch-2.10.0-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:f5ab4ba32383061be0fb74bda772d470140a12c1c3b58a0cfbf3dae94d164c28", size = 79850324, upload-time = "2026-01-21T16:22:09.494Z" }, -] - -[[package]] -name = "torch" -version = "2.10.0+cu128" -source = { registry = "https://download.pytorch.org/whl/cu128" } -resolution-markers = [ - "python_full_version >= '3.14' and sys_platform == 'linux'", - "python_full_version < '3.14' and sys_platform == 'linux'", - "python_full_version >= '3.14' and sys_platform != 'darwin' and sys_platform != 'linux'", - "python_full_version < '3.14' and sys_platform != 'darwin' and sys_platform != 'linux'", -] -dependencies = [ - { name = "cuda-bindings", marker = "sys_platform == 'linux'" }, - { name = "filelock", marker = "sys_platform != 'darwin'" }, - { name = "fsspec", marker = "sys_platform != 'darwin'" }, - { name = "jinja2", marker = "sys_platform != 'darwin'" }, - { name = "networkx", marker = "sys_platform != 'darwin'" }, - { name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-cuda-cupti-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-cuda-nvrtc-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-cuda-runtime-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-cudnn-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-cufft-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-cufile-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-curand-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-cusolver-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-cusparse-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-cusparselt-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-nccl-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-nvshmem-cu12", marker = "sys_platform == 'linux'" }, - { name = "nvidia-nvtx-cu12", marker = "sys_platform == 'linux'" }, - { name = "setuptools", marker = "sys_platform != 'darwin'" }, - { name = "sympy", marker = "sys_platform != 'darwin'" }, - { name = "triton", marker = "sys_platform == 'linux'" }, - { name = "typing-extensions", marker = "sys_platform != 'darwin'" }, -] -wheels = [ - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:6f09cdf2415516be028ae82e6b985bcfc3eac37bc52ab401142689f6224516ca" }, - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:628e89bd5110ced7debee2a57c69959725b7fbc64eab81a39dd70e46c7e28ba5" }, - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp312-cp312-win_amd64.whl", hash = "sha256:fbde8f6a9ec8c76979a0d14df21c10b9e5cab6f0d106a73ca73e2179bc597cae" }, - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:bdbcc703382f948e951c063448c9406bf38ce66c41dd698d9e2733fcf96c037a" }, - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:7b4bd23ed63de97456fcc81c26fea9f02ee02ce1112111c4dac0d8cfe574b23e" }, - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313-win_amd64.whl", hash = "sha256:4d1b0b49c54223c7c04050b49eac141d77b6edbc34aea1dfc74a6fdb661baa8c" }, - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:f1f8b840c64b645a4bc61a393db48effb9c92b2dc26c8373873911f0750d1ea7" }, - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:23f58258012bcf1c349cb22af387e33aadca7f83ea617b080e774eb41e4fe8ff" }, - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313t-win_amd64.whl", hash = "sha256:01b216e097b17a5277cfb47c383cdcacf06abeadcb0daca0c76b59e72854c3b6" }, - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:c42377bc2607e3e1c60da71b792fb507c3938c87fd6edab8b21c59c91473c36d" }, - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:37d71feea068776855686a1512058df3f19f6f040a151f055aa746601678744f" }, - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314-win_amd64.whl", hash = "sha256:c57017ca29e62271e362fdeee7d20070e254755a5148b30b553d8a10fc83c7ef" }, - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:777461f50b2daf77e4bdd8e2ad34bdfc5a993bf1bdf2ab9ef39f5edfe4e9c12b" }, - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:7bcba6a7c5f0987a13298b1ca843155dcceceac758fa3c7ccd5c7af4059a1080" }, - { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314t-win_amd64.whl", hash = "sha256:70d89143c956389d4806cb4e5fe0b1129fe0db280e1073288d17fa76c101cba4" }, -] - -[[package]] -name = "torchmetrics" -version = "1.8.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "lightning-utilities" }, - { name = "numpy" }, - { name = "packaging" }, - { name = "torch", version = "2.10.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform == 'darwin'" }, - { name = "torch", version = "2.10.0+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform != 'darwin'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/85/2e/48a887a59ecc4a10ce9e8b35b3e3c5cef29d902c4eac143378526e7485cb/torchmetrics-1.8.2.tar.gz", hash = "sha256:cf64a901036bf107f17a524009eea7781c9c5315d130713aeca5747a686fe7a5", size = 580679, upload-time = "2025-09-03T14:00:54.077Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/02/21/aa0f434434c48490f91b65962b1ce863fdcce63febc166ca9fe9d706c2b6/torchmetrics-1.8.2-py3-none-any.whl", hash = "sha256:08382fd96b923e39e904c4d570f3d49e2cc71ccabd2a94e0f895d1f0dac86242", size = 983161, upload-time = "2025-09-03T14:00:51.921Z" }, -] - [[package]] name = "tornado" version = "6.5.2" @@ -5136,23 +4712,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/61/7a/f38385f1b2d5f54221baf1db3d6371dc6eef8041d95abff39576c694e9d9/transforms3d-0.4.2-py3-none-any.whl", hash = "sha256:1c70399d9e9473ecc23311fd947f727f7c69ed0b063244828c383aa1aefa5941", size = 1376759, upload-time = "2024-06-20T11:09:19.43Z" }, ] -[[package]] -name = "triton" -version = "3.6.0" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/17/5d/08201db32823bdf77a0e2b9039540080b2e5c23a20706ddba942924ebcd6/triton-3.6.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:374f52c11a711fd062b4bfbb201fd9ac0a5febd28a96fb41b4a0f51dde3157f4", size = 176128243, upload-time = "2026-01-20T16:16:07.857Z" }, - { url = "https://files.pythonhosted.org/packages/ab/a8/cdf8b3e4c98132f965f88c2313a4b493266832ad47fb52f23d14d4f86bb5/triton-3.6.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:74caf5e34b66d9f3a429af689c1c7128daba1d8208df60e81106b115c00d6fca", size = 188266850, upload-time = "2026-01-20T16:00:43.041Z" }, - { url = "https://files.pythonhosted.org/packages/3c/12/34d71b350e89a204c2c7777a9bba0dcf2f19a5bfdd70b57c4dbc5ffd7154/triton-3.6.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:448e02fe6dc898e9e5aa89cf0ee5c371e99df5aa5e8ad976a80b93334f3494fd", size = 176133521, upload-time = "2026-01-20T16:16:13.321Z" }, - { url = "https://files.pythonhosted.org/packages/f9/0b/37d991d8c130ce81a8728ae3c25b6e60935838e9be1b58791f5997b24a54/triton-3.6.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10c7f76c6e72d2ef08df639e3d0d30729112f47a56b0c81672edc05ee5116ac9", size = 188289450, upload-time = "2026-01-20T16:00:49.136Z" }, - { url = "https://files.pythonhosted.org/packages/ce/4e/41b0c8033b503fd3cfcd12392cdd256945026a91ff02452bef40ec34bee7/triton-3.6.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1722e172d34e32abc3eb7711d0025bb69d7959ebea84e3b7f7a341cd7ed694d6", size = 176276087, upload-time = "2026-01-20T16:16:18.989Z" }, - { url = "https://files.pythonhosted.org/packages/35/f8/9c66bfc55361ec6d0e4040a0337fb5924ceb23de4648b8a81ae9d33b2b38/triton-3.6.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d002e07d7180fd65e622134fbd980c9a3d4211fb85224b56a0a0efbd422ab72f", size = 188400296, upload-time = "2026-01-20T16:00:56.042Z" }, - { url = "https://files.pythonhosted.org/packages/49/55/5ecf0dcaa0f2fbbd4420f7ef227ee3cb172e91e5fede9d0ecaddc43363b4/triton-3.6.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef5523241e7d1abca00f1d240949eebdd7c673b005edbbce0aca95b8191f1d43", size = 176138577, upload-time = "2026-01-20T16:16:25.426Z" }, - { url = "https://files.pythonhosted.org/packages/df/3d/9e7eee57b37c80cec63322c0231bb6da3cfe535a91d7a4d64896fcb89357/triton-3.6.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a17a5d5985f0ac494ed8a8e54568f092f7057ef60e1b0fa09d3fd1512064e803", size = 188273063, upload-time = "2026-01-20T16:01:07.278Z" }, - { url = "https://files.pythonhosted.org/packages/48/db/56ee649cab5eaff4757541325aca81f52d02d4a7cd3506776cad2451e060/triton-3.6.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b3a97e8ed304dfa9bd23bb41ca04cdf6b2e617d5e782a8653d616037a5d537d", size = 176274804, upload-time = "2026-01-20T16:16:31.528Z" }, - { url = "https://files.pythonhosted.org/packages/f6/56/6113c23ff46c00aae423333eb58b3e60bdfe9179d542781955a5e1514cb3/triton-3.6.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:46bd1c1af4b6704e554cad2eeb3b0a6513a980d470ccfa63189737340c7746a7", size = 188397994, upload-time = "2026-01-20T16:01:14.236Z" }, -] - [[package]] name = "tsam" version = "2.3.9" From 53342876d6e4b0798c4f3a3f4bb35edd77fb70fc Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Tue, 10 Mar 2026 11:54:46 -0400 Subject: [PATCH 27/31] re-enable torch --- pyproject.toml | 13 +- .../models/surrogate/configs/regression.py | 9 +- uv.lock | 443 ++++++++++++++++++ 3 files changed, 456 insertions(+), 9 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a1d02e3..40f537e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,8 +58,9 @@ visualization = [ ml-gpu = [ "lightgbm>=4.6.0", "xgboost>=3.2.0", - # "pytorch-tabular>=1.2.0", - # "torch>=2.5.0", + "numba>=0.63.1", + "pytorch-tabular>=1.2.0", + "torch>=2.5.0", "tensorboard>=2.20.0", "wandb>=0.25.0", ] @@ -113,10 +114,10 @@ explicit = true [tool.uv.sources] # PyTorch: CUDA 12.8 on Linux/Windows (where builds exist), PyPI (CPU) on macOS -# torch = [ -# { index = "pytorch-cu128", marker = "sys_platform != 'darwin'", extra = "ml-gpu" }, -# { index = "pypi", marker = "sys_platform == 'darwin'", extra = "ml-gpu" }, -# ] +torch = [ + { index = "pytorch-cu128", marker = "sys_platform != 'darwin'", extra = "ml-gpu" }, + { index = "pypi", marker = "sys_platform == 'darwin'", extra = "ml-gpu" }, +] # scythe-engine = {git = "https://github.com/szvsw/scythe", branch = "feature/allow-optional-filerefs"} scythe-engine = {git = "https://github.com/szvsw/scythe", branch = "feature/allow-versioning-workflows"} # scythe-engine = {path = "../scythe", editable = true} diff --git a/src/globi/models/surrogate/configs/regression.py b/src/globi/models/surrogate/configs/regression.py index b65c64a..edebb95 100644 --- a/src/globi/models/surrogate/configs/regression.py +++ b/src/globi/models/surrogate/configs/regression.py @@ -1,5 +1,6 @@ """Configs for the surrogate model pipeline.""" +import warnings from typing import Any, Literal from pydantic import BaseModel, Field @@ -43,7 +44,7 @@ def param_dict(self) -> dict[str, Any]: """The dictionary of parameters.""" import torch - data = { + params = { "objective": "reg:squarederror", "eval_metric": "rmse", "tree_method": "auto", @@ -54,8 +55,10 @@ def param_dict(self) -> dict[str, Any]: ), } if torch.cuda.is_available(): - data["device"] = "cuda" - return data + params["device"] = "cuda" + else: + warnings.warn("CUDA is not available, using CPU.", stacklevel=3) + return params class XGBHyperparameters(BaseModel): diff --git a/uv.lock b/uv.lock index 9043efb..7c83226 100644 --- a/uv.lock +++ b/uv.lock @@ -160,6 +160,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, ] +[[package]] +name = "antlr4-python3-runtime" +version = "4.9.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3e/38/7859ff46355f76f8d19459005ca000b6e7012f2f1ca597746cbcd1fbfe5e/antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b", size = 117034, upload-time = "2021-11-06T17:52:23.524Z" } + [[package]] name = "anyio" version = "4.11.0" @@ -911,6 +917,34 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ce/c6/c71e82e041c95ffe6a92ac707785500aa2a515a4339c2c7dd67e3c449249/cramjam-2.11.0-cp314-cp314t-win_amd64.whl", hash = "sha256:028400d699442d40dbda02f74158c73d05cb76587a12490d0bfedd958fd49188", size = 1713108, upload-time = "2025-07-27T21:24:10.147Z" }, ] +[[package]] +name = "cuda-bindings" +version = "12.9.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cuda-pathfinder", marker = "sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/c2/65bfd79292b8ff18be4dd7f7442cea37bcbc1a228c1886f1dea515c45b67/cuda_bindings-12.9.4-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:694ba35023846625ef471257e6b5a4bc8af690f961d197d77d34b1d1db393f56", size = 11760260, upload-time = "2025-10-21T14:51:40.79Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c1/dabe88f52c3e3760d861401bb994df08f672ec893b8f7592dc91626adcf3/cuda_bindings-12.9.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fda147a344e8eaeca0c6ff113d2851ffca8f7dfc0a6c932374ee5c47caa649c8", size = 12151019, upload-time = "2025-10-21T14:51:43.167Z" }, + { url = "https://files.pythonhosted.org/packages/05/8b/b4b2d1c7775fa403b64333e720cfcfccef8dcb9cdeb99947061ca5a77628/cuda_bindings-12.9.4-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cf8bfaedc238f3b115d957d1fd6562b7e8435ba57f6d0e2f87d0e7149ccb2da5", size = 11570071, upload-time = "2025-10-21T14:51:47.472Z" }, + { url = "https://files.pythonhosted.org/packages/63/56/e465c31dc9111be3441a9ba7df1941fe98f4aa6e71e8788a3fb4534ce24d/cuda_bindings-12.9.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:32bdc5a76906be4c61eb98f546a6786c5773a881f3b166486449b5d141e4a39f", size = 11906628, upload-time = "2025-10-21T14:51:49.905Z" }, + { url = "https://files.pythonhosted.org/packages/ec/07/6aff13bc1e977e35aaa6b22f52b172e2890c608c6db22438cf7ed2bf43a6/cuda_bindings-12.9.4-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3adf4958dcf68ae7801a59b73fb00a8b37f8d0595060d66ceae111b1002de38d", size = 11566797, upload-time = "2025-10-21T14:51:54.581Z" }, + { url = "https://files.pythonhosted.org/packages/a3/84/1e6be415e37478070aeeee5884c2022713c1ecc735e6d82d744de0252eee/cuda_bindings-12.9.4-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:56e0043c457a99ac473ddc926fe0dc4046694d99caef633e92601ab52cbe17eb", size = 11925991, upload-time = "2025-10-21T14:51:56.535Z" }, + { url = "https://files.pythonhosted.org/packages/1e/b5/96a6696e20c4ffd2b327f54c7d0fde2259bdb998d045c25d5dedbbe30290/cuda_bindings-12.9.4-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f53a7f453d4b2643d8663d036bafe29b5ba89eb904c133180f295df6dc151e5", size = 11624530, upload-time = "2025-10-21T14:52:01.539Z" }, + { url = "https://files.pythonhosted.org/packages/d1/af/6dfd8f2ed90b1d4719bc053ff8940e494640fe4212dc3dd72f383e4992da/cuda_bindings-12.9.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8b72ee72a9cc1b531db31eebaaee5c69a8ec3500e32c6933f2d3b15297b53686", size = 11922703, upload-time = "2025-10-21T14:52:03.585Z" }, + { url = "https://files.pythonhosted.org/packages/39/73/d2fc40c043bac699c3880bf88d3cebe9d88410cd043795382826c93a89f0/cuda_bindings-12.9.4-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:20f2699d61d724de3eb3f3369d57e2b245f93085cab44fd37c3bea036cea1a6f", size = 11565056, upload-time = "2025-10-21T14:52:08.338Z" }, + { url = "https://files.pythonhosted.org/packages/6c/19/90ac264acc00f6df8a49378eedec9fd2db3061bf9263bf9f39fd3d8377c3/cuda_bindings-12.9.4-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d80bffc357df9988dca279734bc9674c3934a654cab10cadeed27ce17d8635ee", size = 11924658, upload-time = "2025-10-21T14:52:10.411Z" }, +] + +[[package]] +name = "cuda-pathfinder" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/02/59a5bc738a09def0b49aea0e460bdf97f65206d0d041246147cf6207e69c/cuda_pathfinder-1.4.1-py3-none-any.whl", hash = "sha256:40793006082de88e0950753655e55558a446bed9a7d9d0bcb48b2506d50ed82a", size = 43903, upload-time = "2026-03-06T21:05:24.372Z" }, +] + [[package]] name = "cycler" version = "0.12.1" @@ -968,6 +1002,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16", size = 469047, upload-time = "2025-07-17T16:51:58.613Z" }, ] +[[package]] +name = "einops" +version = "0.8.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2c/77/850bef8d72ffb9219f0b1aac23fbc1bf7d038ee6ea666f331fa273031aa2/einops-0.8.2.tar.gz", hash = "sha256:609da665570e5e265e27283aab09e7f279ade90c4f01bcfca111f3d3e13f2827", size = 56261, upload-time = "2026-01-26T04:13:17.638Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/09/f8d8f8f31e4483c10a906437b4ce31bdf3d6d417b73fe33f1a8b59e34228/einops-0.8.2-py3-none-any.whl", hash = "sha256:54058201ac7087911181bfec4af6091bb59380360f069276601256a76af08193", size = 65638, upload-time = "2026-01-26T04:13:18.546Z" }, +] + [[package]] name = "energy-pandas" version = "0.4.1" @@ -1304,6 +1347,11 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/47/71/70db47e4f6ce3e5c37a607355f80da8860a33226be640226ac52cb05ef2e/fsspec-2025.9.0-py3-none-any.whl", hash = "sha256:530dc2a2af60a414a832059574df4a6e10cce927f6f4a78209390fe38955cfb7", size = 199289, upload-time = "2025-09-02T19:10:47.708Z" }, ] +[package.optional-dependencies] +http = [ + { name = "aiohttp" }, +] + [[package]] name = "future" version = "1.0.0" @@ -1393,7 +1441,11 @@ cli = [ ] ml-gpu = [ { name = "lightgbm" }, + { name = "numba" }, + { name = "pytorch-tabular" }, { name = "tensorboard" }, + { name = "torch", version = "2.10.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform == 'darwin'" }, + { name = "torch", version = "2.10.0+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform != 'darwin'" }, { name = "wandb" }, { name = "xgboost" }, ] @@ -1441,12 +1493,14 @@ requires-dist = [ { name = "ladybug-core", specifier = ">=0.44.29" }, { name = "lightgbm", marker = "extra == 'ml-gpu'", specifier = ">=4.6.0" }, { name = "matplotlib", marker = "extra == 'visualization'", specifier = ">=3.8.0" }, + { name = "numba", marker = "extra == 'ml-gpu'", specifier = ">=0.63.1" }, { name = "numpy", specifier = ">=1.26.0" }, { name = "pandas", specifier = ">=2.1.0" }, { name = "playwright", marker = "extra == 'visualization'", specifier = ">=1.40.0" }, { name = "plotly", marker = "extra == 'visualization'", specifier = ">=5.18.0" }, { name = "pydantic", specifier = ">=2.11,<3" }, { name = "pyproj", specifier = ">=3.6.0" }, + { name = "pytorch-tabular", marker = "extra == 'ml-gpu'", specifier = ">=1.2.0" }, { name = "rasterio", marker = "extra == 'visualization'", specifier = ">=1.3.9" }, { name = "scikit-learn", specifier = ">=1.3.0" }, { name = "scipy", specifier = ">=1.11.0,<1.15" }, @@ -1455,6 +1509,8 @@ requires-dist = [ { name = "shapely", specifier = ">=2.0.0" }, { name = "streamlit", marker = "extra == 'visualization'", specifier = ">=1.28.0" }, { name = "tensorboard", marker = "extra == 'ml-gpu'", specifier = ">=2.20.0" }, + { name = "torch", marker = "sys_platform == 'darwin' and extra == 'ml-gpu'", specifier = ">=2.5.0", index = "https://pypi.org/simple", conflict = { package = "globi", extra = "ml-gpu" } }, + { name = "torch", marker = "sys_platform != 'darwin' and extra == 'ml-gpu'", specifier = ">=2.5.0", index = "https://download.pytorch.org/whl/cu128", conflict = { package = "globi", extra = "ml-gpu" } }, { name = "wandb", marker = "extra == 'ml-gpu'", specifier = ">=0.25.0" }, { name = "xgboost", marker = "extra == 'ml-gpu'", specifier = ">=3.2.0" }, { name = "xlsxwriter", marker = "extra == 'cli'", specifier = ">=3.2.9" }, @@ -2195,6 +2251,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5e/23/f8b28ca248bb629b9e08f877dd2965d1994e1674a03d67cd10c5246da248/lightgbm-4.6.0-py3-none-win_amd64.whl", hash = "sha256:37089ee95664b6550a7189d887dbf098e3eadab03537e411f52c63c121e3ba4b", size = 1451509, upload-time = "2025-02-15T04:03:01.515Z" }, ] +[[package]] +name = "lightning-utilities" +version = "0.15.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/45/7fa8f56b17dc0f0a41ec70dd307ecd6787254483549843bef4c30ab5adce/lightning_utilities-0.15.3.tar.gz", hash = "sha256:792ae0204c79f6859721ac7f386c237a33b0ed06ba775009cb894e010a842033", size = 33553, upload-time = "2026-02-22T14:48:53.348Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/f4/ead6e0e37209b07c9baa3e984ccdb0348ca370b77cea3aaea8ddbb097e00/lightning_utilities-0.15.3-py3-none-any.whl", hash = "sha256:6c55f1bee70084a1cbeaa41ada96e4b3a0fea5909e844dd335bd80f5a73c5f91", size = 31906, upload-time = "2026-02-22T14:48:52.488Z" }, +] + [[package]] name = "littleutils" version = "0.2.4" @@ -2275,6 +2344,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/59/1b/6ef961f543593969d25b2afe57a3564200280528caa9bd1082eecdd7b3bc/markdown-3.10.1-py3-none-any.whl", hash = "sha256:867d788939fe33e4b736426f5b9f651ad0c0ae0ecf89df0ca5d1176c70812fe3", size = 107684, upload-time = "2026-01-21T18:09:27.203Z" }, ] +[[package]] +name = "markdown-it-py" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, +] + [[package]] name = "markupsafe" version = "3.0.3" @@ -2404,6 +2485,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/af/33/ee4519fa02ed11a94aef9559552f3b17bb863f2ecfe1a35dc7f548cde231/matplotlib_inline-0.2.1-py3-none-any.whl", hash = "sha256:d56ce5156ba6085e00a9d54fead6ed29a9c47e215cd1bba2e976ef39f5710a76", size = 9516, upload-time = "2025-10-23T09:00:20.675Z" }, ] +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, +] + [[package]] name = "mergedeep" version = "1.3.4" @@ -2554,6 +2644,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/98/5c/2597cef67b6947b15c47f8dba967a0baf19fbdfdc86f6e4a8ba7af8b581a/mkdocstrings_python-1.19.0-py3-none-any.whl", hash = "sha256:395c1032af8f005234170575cc0c5d4d20980846623b623b35594281be4a3059", size = 143417, upload-time = "2025-11-10T13:30:54.164Z" }, ] +[[package]] +name = "mpmath" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" }, +] + [[package]] name = "msgpack" version = "1.1.2" @@ -3031,6 +3130,119 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/67/0e/35082d13c09c02c011cf21570543d202ad929d961c02a147493cb0c2bdf5/numpy-2.2.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06", size = 12771374, upload-time = "2025-05-17T21:43:35.479Z" }, ] +[[package]] +name = "nvidia-cublas-cu12" +version = "12.8.4.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/29/99/db44d685f0e257ff0e213ade1964fc459b4a690a73293220e98feb3307cf/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:b86f6dd8935884615a0683b663891d43781b819ac4f2ba2b0c9604676af346d0", size = 590537124, upload-time = "2025-03-07T01:43:53.556Z" }, + { url = "https://files.pythonhosted.org/packages/dc/61/e24b560ab2e2eaeb3c839129175fb330dfcfc29e5203196e5541a4c44682/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:8ac4e771d5a348c551b2a426eda6193c19aa630236b418086020df5ba9667142", size = 594346921, upload-time = "2025-03-07T01:44:31.254Z" }, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.8.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/1f/b3bd73445e5cb342727fd24fe1f7b748f690b460acadc27ea22f904502c8/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4412396548808ddfed3f17a467b104ba7751e6b58678a4b840675c56d21cf7ed", size = 9533318, upload-time = "2025-03-07T01:40:10.421Z" }, + { url = "https://files.pythonhosted.org/packages/f8/02/2adcaa145158bf1a8295d83591d22e4103dbfd821bcaf6f3f53151ca4ffa/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea0cb07ebda26bb9b29ba82cda34849e73c166c18162d3913575b0c9db9a6182", size = 10248621, upload-time = "2025-03-07T01:40:21.213Z" }, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.8.93" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/6b/32f747947df2da6994e999492ab306a903659555dddc0fbdeb9d71f75e52/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:a7756528852ef889772a84c6cd89d41dfa74667e24cca16bb31f8f061e3e9994", size = 88040029, upload-time = "2025-03-07T01:42:13.562Z" }, + { url = "https://files.pythonhosted.org/packages/eb/d1/e50d0acaab360482034b84b6e27ee83c6738f7d32182b987f9c7a4e32962/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fc1fec1e1637854b4c0a65fb9a8346b51dd9ee69e61ebaccc82058441f15bce8", size = 43106076, upload-time = "2025-03-07T01:41:59.817Z" }, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.8.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/75/f865a3b236e4647605ea34cc450900854ba123834a5f1598e160b9530c3a/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:52bf7bbee900262ffefe5e9d5a2a69a30d97e2bc5bb6cc866688caa976966e3d", size = 965265, upload-time = "2025-03-07T01:39:43.533Z" }, + { url = "https://files.pythonhosted.org/packages/0d/9b/a997b638fcd068ad6e4d53b8551a7d30fe8b404d6f1804abf1df69838932/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adade8dcbd0edf427b7204d480d6066d33902cab2a4707dcfc48a2d0fd44ab90", size = 954765, upload-time = "2025-03-07T01:40:01.615Z" }, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "9.10.2.21" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/41/e79269ce215c857c935fd86bcfe91a451a584dfc27f1e068f568b9ad1ab7/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:c9132cc3f8958447b4910a1720036d9eff5928cc3179b0a51fb6d167c6cc87d8", size = 705026878, upload-time = "2025-06-06T21:52:51.348Z" }, + { url = "https://files.pythonhosted.org/packages/ba/51/e123d997aa098c61d029f76663dedbfb9bc8dcf8c60cbd6adbe42f76d049/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8", size = 706758467, upload-time = "2025-06-06T21:54:08.597Z" }, +] + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.3.3.83" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/bc/7771846d3a0272026c416fbb7e5f4c1f146d6d80704534d0b187dd6f4800/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:848ef7224d6305cdb2a4df928759dca7b1201874787083b6e7550dd6765ce69a", size = 193109211, upload-time = "2025-03-07T01:44:56.873Z" }, + { url = "https://files.pythonhosted.org/packages/1f/13/ee4e00f30e676b66ae65b4f08cb5bcbb8392c03f54f2d5413ea99a5d1c80/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d2dd21ec0b88cf61b62e6b43564355e5222e4a3fb394cac0db101f2dd0d4f74", size = 193118695, upload-time = "2025-03-07T01:45:27.821Z" }, +] + +[[package]] +name = "nvidia-cufile-cu12" +version = "1.13.1.3" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bb/fe/1bcba1dfbfb8d01be8d93f07bfc502c93fa23afa6fd5ab3fc7c1df71038a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1d069003be650e131b21c932ec3d8969c1715379251f8d23a1860554b1cb24fc", size = 1197834, upload-time = "2025-03-07T01:45:50.723Z" }, + { url = "https://files.pythonhosted.org/packages/1e/f5/5607710447a6fe9fd9b3283956fceeee8a06cda1d2f56ce31371f595db2a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:4beb6d4cce47c1a0f1013d72e02b0994730359e17801d395bdcbf20cfb3bb00a", size = 1120705, upload-time = "2025-03-07T01:45:41.434Z" }, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.9.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/5e/92aa15eca622a388b80fbf8375d4760738df6285b1e92c43d37390a33a9a/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:dfab99248034673b779bc6decafdc3404a8a6f502462201f2f31f11354204acd", size = 63625754, upload-time = "2025-03-07T01:46:10.735Z" }, + { url = "https://files.pythonhosted.org/packages/fb/aa/6584b56dc84ebe9cf93226a5cde4d99080c8e90ab40f0c27bda7a0f29aa1/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:b32331d4f4df5d6eefa0554c565b626c7216f87a06a4f56fab27c3b68a830ec9", size = 63619976, upload-time = "2025-03-07T01:46:23.323Z" }, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.7.3.90" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cusparse-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/32/f7cd6ce8a7690544d084ea21c26e910a97e077c9b7f07bf5de623ee19981/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:db9ed69dbef9715071232caa9b69c52ac7de3a95773c2db65bdba85916e4e5c0", size = 267229841, upload-time = "2025-03-07T01:46:54.356Z" }, + { url = "https://files.pythonhosted.org/packages/85/48/9a13d2975803e8cf2777d5ed57b87a0b6ca2cc795f9a4f59796a910bfb80/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:4376c11ad263152bd50ea295c05370360776f8c3427b30991df774f9fb26c450", size = 267506905, upload-time = "2025-03-07T01:47:16.273Z" }, +] + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.5.8.93" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/f7/cd777c4109681367721b00a106f491e0d0d15cfa1fd59672ce580ce42a97/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b6c161cb130be1a07a27ea6923df8141f3c295852f4b260c65f18f3e0a091dc", size = 288117129, upload-time = "2025-03-07T01:47:40.407Z" }, + { url = "https://files.pythonhosted.org/packages/c2/f5/e1854cb2f2bcd4280c44736c93550cc300ff4b8c95ebe370d0aa7d2b473d/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ec05d76bbbd8b61b06a80e1eaf8cf4959c3d4ce8e711b65ebd0443bb0ebb13b", size = 288216466, upload-time = "2025-03-07T01:48:13.779Z" }, +] + +[[package]] +name = "nvidia-cusparselt-cu12" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/b9/598f6ff36faaece4b3c50d26f50e38661499ff34346f00e057760b35cc9d/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8878dce784d0fac90131b6817b607e803c36e629ba34dc5b433471382196b6a5", size = 283835557, upload-time = "2025-02-26T00:16:54.265Z" }, + { url = "https://files.pythonhosted.org/packages/56/79/12978b96bd44274fe38b5dde5cfb660b1d114f70a65ef962bcbbed99b549/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f1bb701d6b930d5a7cea44c19ceb973311500847f81b634d802b7b539dc55623", size = 287193691, upload-time = "2025-02-26T00:15:44.104Z" }, +] + [[package]] name = "nvidia-nccl-cu12" version = "2.27.5" @@ -3040,6 +3252,46 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6e/89/f7a07dc961b60645dbbf42e80f2bc85ade7feb9a491b11a1e973aa00071f/nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ad730cf15cb5d25fe849c6e6ca9eb5b76db16a80f13f425ac68d8e2e55624457", size = 322348229, upload-time = "2025-06-26T04:11:28.385Z" }, ] +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.8.93" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/74/86a07f1d0f42998ca31312f998bd3b9a7eff7f52378f4f270c8679c77fb9/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:81ff63371a7ebd6e6451970684f916be2eab07321b73c9d244dc2b4da7f73b88", size = 39254836, upload-time = "2025-03-07T01:49:55.661Z" }, + { url = "https://files.pythonhosted.org/packages/2a/a2/8cee5da30d13430e87bf99bb33455d2724d0a4a9cb5d7926d80ccb96d008/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:adccd7161ace7261e01bb91e44e88da350895c270d23f744f0820c818b7229e7", size = 38386204, upload-time = "2025-03-07T01:49:43.612Z" }, +] + +[[package]] +name = "nvidia-nvshmem-cu12" +version = "3.4.5" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/6a/03aa43cc9bd3ad91553a88b5f6fb25ed6a3752ae86ce2180221962bc2aa5/nvidia_nvshmem_cu12-3.4.5-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0b48363fc6964dede448029434c6abed6c5e37f823cb43c3bcde7ecfc0457e15", size = 138936938, upload-time = "2025-09-06T00:32:05.589Z" }, + { url = "https://files.pythonhosted.org/packages/b5/09/6ea3ea725f82e1e76684f0708bbedd871fc96da89945adeba65c3835a64c/nvidia_nvshmem_cu12-3.4.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:042f2500f24c021db8a06c5eec2539027d57460e1c1a762055a6554f72c369bd", size = 139103095, upload-time = "2025-09-06T00:32:31.266Z" }, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.8.90" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/10/c0/1b303feea90d296f6176f32a2a70b5ef230f9bdeb3a72bddb0dc922dc137/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d7ad891da111ebafbf7e015d34879f7112832fc239ff0d7d776b6cb685274615", size = 91161, upload-time = "2025-03-07T01:42:23.922Z" }, + { url = "https://files.pythonhosted.org/packages/a2/eb/86626c1bbc2edb86323022371c39aa48df6fd8b0a1647bc274577f72e90b/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b17e2001cc0d751a5bc2c6ec6d26ad95913324a4adb86788c944f8ce9ba441f", size = 89954, upload-time = "2025-03-07T01:42:44.131Z" }, +] + +[[package]] +name = "omegaconf" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "antlr4-python3-runtime" }, + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/09/48/6388f1bb9da707110532cb70ec4d2822858ddfb44f1cdf1233c20a80ea4b/omegaconf-2.3.0.tar.gz", hash = "sha256:d5d4b6d29955cc50ad50c46dc269bcd92c6e00f5f90d23ab5fee7bfca4ba4cc7", size = 3298120, upload-time = "2022-12-08T20:59:22.753Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/94/1843518e420fa3ed6919835845df698c7e27e183cb997394e4a670973a65/omegaconf-2.3.0-py3-none-any.whl", hash = "sha256:7b4df175cdb08ba400f45cae3bdcae7ba8365db4d165fc65fd04b050ab63b46b", size = 79500, upload-time = "2022-12-08T20:59:19.686Z" }, +] + [[package]] name = "openpyxl" version = "3.1.5" @@ -3915,6 +4167,49 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/51/e5/fecf13f06e5e5f67e8837d777d1bc43fac0ed2b77a676804df5c34744727/python_json_logger-4.0.0-py3-none-any.whl", hash = "sha256:af09c9daf6a813aa4cc7180395f50f2a9e5fa056034c9953aec92e381c5ba1e2", size = 15548, upload-time = "2025-10-06T04:15:17.553Z" }, ] +[[package]] +name = "pytorch-lightning" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "fsspec", extra = ["http"] }, + { name = "lightning-utilities" }, + { name = "packaging" }, + { name = "pyyaml" }, + { name = "torch", version = "2.10.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform == 'darwin'" }, + { name = "torch", version = "2.10.0+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform != 'darwin'" }, + { name = "torchmetrics" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8b/ac/ebd5f6f58691cbd4f73836e43e1727f3814311b960c41f88e259606ca2b2/pytorch_lightning-2.6.1.tar.gz", hash = "sha256:ba08f8901cf226fcca473046ad9346f414e99117762dc869c76e650d5b3d7bdc", size = 665563, upload-time = "2026-01-30T14:59:11.636Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/93/c8c361bf0a2fe50f828f32def460e8b8a14b93955d3fd302b1a9b63b19e4/pytorch_lightning-2.6.1-py3-none-any.whl", hash = "sha256:1f8118567ec829e3055f16cf1aa320883a86a47c836951bfd9dcfa34ec7ffd59", size = 857273, upload-time = "2026-01-30T14:59:10.141Z" }, +] + +[[package]] +name = "pytorch-tabular" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "einops" }, + { name = "numpy" }, + { name = "omegaconf" }, + { name = "pandas" }, + { name = "pytorch-lightning" }, + { name = "rich" }, + { name = "scikit-base" }, + { name = "scikit-learn" }, + { name = "scipy" }, + { name = "torch", version = "2.10.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform == 'darwin'" }, + { name = "torch", version = "2.10.0+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform != 'darwin'" }, + { name = "torchmetrics" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0b/f2/823de16d6a461504f4ed8e4a555d6ce356e5f81e6525d95e2b64895ec94f/pytorch_tabular-1.2.0.tar.gz", hash = "sha256:1b96b576eb3de443840b313d0b298293eaf83dcfdbba53ed8974b76d1351b821", size = 2312825, upload-time = "2026-01-26T21:48:22.577Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6f/c9/1e01c682e2ad7132bc1943d8d367c96f241bf85679e76d66eb0c4e4cbde9/pytorch_tabular-1.2.0-py3-none-any.whl", hash = "sha256:0a59f8a2304856b3d1e905f7b66153ebc65df1a6a017f2c8a13a29f62dc95b26", size = 165800, upload-time = "2026-01-26T21:48:21.195Z" }, +] + [[package]] name = "pytz" version = "2025.2" @@ -4093,6 +4388,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, ] +[[package]] +name = "rich" +version = "14.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b3/c6/f3b320c27991c46f43ee9d856302c70dc2d0fb2dba4842ff739d5f46b393/rich-14.3.3.tar.gz", hash = "sha256:b8daa0b9e4eef54dd8cf7c86c03713f53241884e814f4e2f5fb342fe520f639b", size = 230582, upload-time = "2026-02-19T17:23:12.474Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/25/b208c5683343959b670dc001595f2f3737e051da617f66c31f7c4fa93abc/rich-14.3.3-py3-none-any.whl", hash = "sha256:793431c1f8619afa7d3b52b2cdec859562b950ea0d4b6b505397612db8d5362d", size = 310458, upload-time = "2026-02-19T17:23:13.732Z" }, +] + [[package]] name = "rpds-py" version = "0.28.0" @@ -4212,6 +4520,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/48/f0/ae7ca09223a81a1d890b2557186ea015f6e0502e9b8cb8e1813f1d8cfa4e/s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456", size = 85712, upload-time = "2025-09-09T19:23:30.041Z" }, ] +[[package]] +name = "scikit-base" +version = "0.13.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/56/a8/610f99f01f326178b8a7347db2ede654b42548e9697b516480cc081e344d/scikit_base-0.13.1.tar.gz", hash = "sha256:169e5427233f7237b38c7d858bf07b8a86bbf59feccf0708e26dad4ac312c593", size = 134482, upload-time = "2026-01-25T11:31:38.814Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/55/c20d8319aab037e11f1d6403b6102d1041694abe24a3aa4a1e27f2cdb9f2/scikit_base-0.13.1-py3-none-any.whl", hash = "sha256:1aca86759435fd2d32d83a526ce11095119c0745e4e5dd91f2e5820023ca8e39", size = 159779, upload-time = "2026-01-25T11:31:36.759Z" }, +] + [[package]] name = "scikit-learn" version = "1.7.2" @@ -4486,6 +4803,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/39/60/868371b6482ccd9ef423c6f62650066cf8271fdb2ee84f192695ad6b7a96/streamlit-1.51.0-py3-none-any.whl", hash = "sha256:4008b029f71401ce54946bb09a6a3e36f4f7652cbb48db701224557738cfda38", size = 10171702, upload-time = "2025-10-29T17:07:35.97Z" }, ] +[[package]] +name = "sympy" +version = "1.14.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mpmath" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921, upload-time = "2025-04-27T18:05:01.611Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" }, +] + [[package]] name = "tables" version = "3.10.2" @@ -4660,6 +4989,103 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" }, ] +[[package]] +name = "torch" +version = "2.10.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.14' and sys_platform == 'darwin'", + "python_full_version < '3.14' and sys_platform == 'darwin'", +] +dependencies = [ + { name = "filelock", marker = "sys_platform == 'darwin'" }, + { name = "fsspec", marker = "sys_platform == 'darwin'" }, + { name = "jinja2", marker = "sys_platform == 'darwin'" }, + { name = "networkx", marker = "sys_platform == 'darwin'" }, + { name = "setuptools", marker = "sys_platform == 'darwin'" }, + { name = "sympy", marker = "sys_platform == 'darwin'" }, + { name = "typing-extensions", marker = "sys_platform == 'darwin'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/d3/54/a2ba279afcca44bbd320d4e73675b282fcee3d81400ea1b53934efca6462/torch-2.10.0-2-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:13ec4add8c3faaed8d13e0574f5cd4a323c11655546f91fbe6afa77b57423574", size = 79498202, upload-time = "2026-02-10T21:44:52.603Z" }, + { url = "https://files.pythonhosted.org/packages/ec/23/2c9fe0c9c27f7f6cb865abcea8a4568f29f00acaeadfc6a37f6801f84cb4/torch-2.10.0-2-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:e521c9f030a3774ed770a9c011751fb47c4d12029a3d6522116e48431f2ff89e", size = 79498254, upload-time = "2026-02-10T21:44:44.095Z" }, + { url = "https://files.pythonhosted.org/packages/c9/5c/dee910b87c4d5c0fcb41b50839ae04df87c1cfc663cf1b5fca7ea565eeaa/torch-2.10.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:6d3707a61863d1c4d6ebba7be4ca320f42b869ee657e9b2c21c736bf17000294", size = 79498198, upload-time = "2026-01-21T16:24:34.704Z" }, + { url = "https://files.pythonhosted.org/packages/1a/0b/39929b148f4824bc3ad6f9f72a29d4ad865bcf7ebfc2fa67584773e083d2/torch-2.10.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:3202429f58309b9fa96a614885eace4b7995729f44beb54d3e4a47773649d382", size = 79851305, upload-time = "2026-01-21T16:24:09.209Z" }, + { url = "https://files.pythonhosted.org/packages/0e/13/e76b4d9c160e89fff48bf16b449ea324bda84745d2ab30294c37c2434c0d/torch-2.10.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:cdf2a523d699b70d613243211ecaac14fe9c5df8a0b0a9c02add60fb2a413e0f", size = 79498248, upload-time = "2026-01-21T16:23:09.315Z" }, + { url = "https://files.pythonhosted.org/packages/4f/93/716b5ac0155f1be70ed81bacc21269c3ece8dba0c249b9994094110bfc51/torch-2.10.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:bf0d9ff448b0218e0433aeb198805192346c4fd659c852370d5cc245f602a06a", size = 79464992, upload-time = "2026-01-21T16:23:05.162Z" }, + { url = "https://files.pythonhosted.org/packages/d8/94/71994e7d0d5238393df9732fdab607e37e2b56d26a746cb59fdb415f8966/torch-2.10.0-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:f5ab4ba32383061be0fb74bda772d470140a12c1c3b58a0cfbf3dae94d164c28", size = 79850324, upload-time = "2026-01-21T16:22:09.494Z" }, +] + +[[package]] +name = "torch" +version = "2.10.0+cu128" +source = { registry = "https://download.pytorch.org/whl/cu128" } +resolution-markers = [ + "python_full_version >= '3.14' and sys_platform == 'linux'", + "python_full_version < '3.14' and sys_platform == 'linux'", + "python_full_version >= '3.14' and sys_platform != 'darwin' and sys_platform != 'linux'", + "python_full_version < '3.14' and sys_platform != 'darwin' and sys_platform != 'linux'", +] +dependencies = [ + { name = "cuda-bindings", marker = "sys_platform == 'linux'" }, + { name = "filelock", marker = "sys_platform != 'darwin'" }, + { name = "fsspec", marker = "sys_platform != 'darwin'" }, + { name = "jinja2", marker = "sys_platform != 'darwin'" }, + { name = "networkx", marker = "sys_platform != 'darwin'" }, + { name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cuda-cupti-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cuda-nvrtc-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cuda-runtime-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cudnn-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cufft-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cufile-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-curand-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cusolver-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cusparse-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-cusparselt-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-nccl-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-nvshmem-cu12", marker = "sys_platform == 'linux'" }, + { name = "nvidia-nvtx-cu12", marker = "sys_platform == 'linux'" }, + { name = "setuptools", marker = "sys_platform != 'darwin'" }, + { name = "sympy", marker = "sys_platform != 'darwin'" }, + { name = "triton", marker = "sys_platform == 'linux'" }, + { name = "typing-extensions", marker = "sys_platform != 'darwin'" }, +] +wheels = [ + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:6f09cdf2415516be028ae82e6b985bcfc3eac37bc52ab401142689f6224516ca" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:628e89bd5110ced7debee2a57c69959725b7fbc64eab81a39dd70e46c7e28ba5" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp312-cp312-win_amd64.whl", hash = "sha256:fbde8f6a9ec8c76979a0d14df21c10b9e5cab6f0d106a73ca73e2179bc597cae" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:bdbcc703382f948e951c063448c9406bf38ce66c41dd698d9e2733fcf96c037a" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:7b4bd23ed63de97456fcc81c26fea9f02ee02ce1112111c4dac0d8cfe574b23e" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313-win_amd64.whl", hash = "sha256:4d1b0b49c54223c7c04050b49eac141d77b6edbc34aea1dfc74a6fdb661baa8c" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:f1f8b840c64b645a4bc61a393db48effb9c92b2dc26c8373873911f0750d1ea7" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:23f58258012bcf1c349cb22af387e33aadca7f83ea617b080e774eb41e4fe8ff" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp313-cp313t-win_amd64.whl", hash = "sha256:01b216e097b17a5277cfb47c383cdcacf06abeadcb0daca0c76b59e72854c3b6" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:c42377bc2607e3e1c60da71b792fb507c3938c87fd6edab8b21c59c91473c36d" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:37d71feea068776855686a1512058df3f19f6f040a151f055aa746601678744f" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314-win_amd64.whl", hash = "sha256:c57017ca29e62271e362fdeee7d20070e254755a5148b30b553d8a10fc83c7ef" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:777461f50b2daf77e4bdd8e2ad34bdfc5a993bf1bdf2ab9ef39f5edfe4e9c12b" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:7bcba6a7c5f0987a13298b1ca843155dcceceac758fa3c7ccd5c7af4059a1080" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.10.0%2Bcu128-cp314-cp314t-win_amd64.whl", hash = "sha256:70d89143c956389d4806cb4e5fe0b1129fe0db280e1073288d17fa76c101cba4" }, +] + +[[package]] +name = "torchmetrics" +version = "1.8.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "lightning-utilities" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "torch", version = "2.10.0", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform == 'darwin'" }, + { name = "torch", version = "2.10.0+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform != 'darwin'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/2e/48a887a59ecc4a10ce9e8b35b3e3c5cef29d902c4eac143378526e7485cb/torchmetrics-1.8.2.tar.gz", hash = "sha256:cf64a901036bf107f17a524009eea7781c9c5315d130713aeca5747a686fe7a5", size = 580679, upload-time = "2025-09-03T14:00:54.077Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/21/aa0f434434c48490f91b65962b1ce863fdcce63febc166ca9fe9d706c2b6/torchmetrics-1.8.2-py3-none-any.whl", hash = "sha256:08382fd96b923e39e904c4d570f3d49e2cc71ccabd2a94e0f895d1f0dac86242", size = 983161, upload-time = "2025-09-03T14:00:51.921Z" }, +] + [[package]] name = "tornado" version = "6.5.2" @@ -4712,6 +5138,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/61/7a/f38385f1b2d5f54221baf1db3d6371dc6eef8041d95abff39576c694e9d9/transforms3d-0.4.2-py3-none-any.whl", hash = "sha256:1c70399d9e9473ecc23311fd947f727f7c69ed0b063244828c383aa1aefa5941", size = 1376759, upload-time = "2024-06-20T11:09:19.43Z" }, ] +[[package]] +name = "triton" +version = "3.6.0" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/5d/08201db32823bdf77a0e2b9039540080b2e5c23a20706ddba942924ebcd6/triton-3.6.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:374f52c11a711fd062b4bfbb201fd9ac0a5febd28a96fb41b4a0f51dde3157f4", size = 176128243, upload-time = "2026-01-20T16:16:07.857Z" }, + { url = "https://files.pythonhosted.org/packages/ab/a8/cdf8b3e4c98132f965f88c2313a4b493266832ad47fb52f23d14d4f86bb5/triton-3.6.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:74caf5e34b66d9f3a429af689c1c7128daba1d8208df60e81106b115c00d6fca", size = 188266850, upload-time = "2026-01-20T16:00:43.041Z" }, + { url = "https://files.pythonhosted.org/packages/3c/12/34d71b350e89a204c2c7777a9bba0dcf2f19a5bfdd70b57c4dbc5ffd7154/triton-3.6.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:448e02fe6dc898e9e5aa89cf0ee5c371e99df5aa5e8ad976a80b93334f3494fd", size = 176133521, upload-time = "2026-01-20T16:16:13.321Z" }, + { url = "https://files.pythonhosted.org/packages/f9/0b/37d991d8c130ce81a8728ae3c25b6e60935838e9be1b58791f5997b24a54/triton-3.6.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10c7f76c6e72d2ef08df639e3d0d30729112f47a56b0c81672edc05ee5116ac9", size = 188289450, upload-time = "2026-01-20T16:00:49.136Z" }, + { url = "https://files.pythonhosted.org/packages/ce/4e/41b0c8033b503fd3cfcd12392cdd256945026a91ff02452bef40ec34bee7/triton-3.6.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1722e172d34e32abc3eb7711d0025bb69d7959ebea84e3b7f7a341cd7ed694d6", size = 176276087, upload-time = "2026-01-20T16:16:18.989Z" }, + { url = "https://files.pythonhosted.org/packages/35/f8/9c66bfc55361ec6d0e4040a0337fb5924ceb23de4648b8a81ae9d33b2b38/triton-3.6.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d002e07d7180fd65e622134fbd980c9a3d4211fb85224b56a0a0efbd422ab72f", size = 188400296, upload-time = "2026-01-20T16:00:56.042Z" }, + { url = "https://files.pythonhosted.org/packages/49/55/5ecf0dcaa0f2fbbd4420f7ef227ee3cb172e91e5fede9d0ecaddc43363b4/triton-3.6.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef5523241e7d1abca00f1d240949eebdd7c673b005edbbce0aca95b8191f1d43", size = 176138577, upload-time = "2026-01-20T16:16:25.426Z" }, + { url = "https://files.pythonhosted.org/packages/df/3d/9e7eee57b37c80cec63322c0231bb6da3cfe535a91d7a4d64896fcb89357/triton-3.6.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a17a5d5985f0ac494ed8a8e54568f092f7057ef60e1b0fa09d3fd1512064e803", size = 188273063, upload-time = "2026-01-20T16:01:07.278Z" }, + { url = "https://files.pythonhosted.org/packages/48/db/56ee649cab5eaff4757541325aca81f52d02d4a7cd3506776cad2451e060/triton-3.6.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b3a97e8ed304dfa9bd23bb41ca04cdf6b2e617d5e782a8653d616037a5d537d", size = 176274804, upload-time = "2026-01-20T16:16:31.528Z" }, + { url = "https://files.pythonhosted.org/packages/f6/56/6113c23ff46c00aae423333eb58b3e60bdfe9179d542781955a5e1514cb3/triton-3.6.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:46bd1c1af4b6704e554cad2eeb3b0a6513a980d470ccfa63189737340c7746a7", size = 188397994, upload-time = "2026-01-20T16:01:14.236Z" }, +] + [[package]] name = "tsam" version = "2.3.9" From 1cbb40f90902489840998c358a9c0c37fe765de5 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Wed, 11 Mar 2026 16:54:17 -0400 Subject: [PATCH 28/31] improve training with additional annual outputs, better target and feature selection, and better feature and target normalization --- src/globi/models/configs.py | 5 +- .../models/surrogate/configs/pipeline.py | 32 +- src/globi/models/surrogate/outputs.py | 1 + src/globi/models/surrogate/samplers.py | 38 +- src/globi/models/surrogate/sampling.py | 12 +- src/globi/models/surrogate/training.py | 523 +++++++++++++++--- src/globi/models/tasks.py | 82 ++- src/globi/pipelines/simulations.py | 35 +- src/globi/pipelines/training.py | 20 +- 9 files changed, 622 insertions(+), 126 deletions(-) diff --git a/src/globi/models/configs.py b/src/globi/models/configs.py index e860c09..4e64e9d 100644 --- a/src/globi/models/configs.py +++ b/src/globi/models/configs.py @@ -154,6 +154,7 @@ class GISPreprocessorColumnMap(BaseConfig): Basement_col: str Attic_col: str Exposed_Basement_Frac_col: str + # TODO: add basement_use_fraction, attic_use_fraction, attic_height class FileConfig(BaseConfig): @@ -162,9 +163,7 @@ class FileConfig(BaseConfig): gis_file: Path = Field(..., description="The path to the local GIS file.") db_file: Path # these could be file refs? semantic_fields_file: Path # these could be file refs? - epwzip_file: ( - Path | str | None - ) # TODO: our gis to model conversion should handle EPW identification; see gis job submission in epengine + epwzip_file: Path | str | None component_map_file: Path diff --git a/src/globi/models/surrogate/configs/pipeline.py b/src/globi/models/surrogate/configs/pipeline.py index 05b65b2..2185097 100644 --- a/src/globi/models/surrogate/configs/pipeline.py +++ b/src/globi/models/surrogate/configs/pipeline.py @@ -190,17 +190,37 @@ def run(self, metrics: pd.Series) -> tuple[bool, pd.Series, pd.Series, pd.Series return self.combine_and_check_strata_and_targets(comparisons) -class TargetsConfigSpec(BaseModel): +class TargetsConfigBaseSpec(BaseModel): + """The base targets config spec.""" + + normalization: ( + Literal[ + "min-max", + "standard", + ] + | None + ) = Field(default="min-max", description="The normalization method to use.") + + +class TargetsConfigColumnSpec(TargetsConfigBaseSpec): """The targets config spec.""" columns: list[str] = Field( default_factory=list, description="The columns to use as targets." ) - normalization: Literal["min-max", "standard", "none"] = Field( - default="none", description="The normalization method to use." + + +class TargetsConfigGlobSpec(TargetsConfigBaseSpec): + """The targets config spec.""" + + globs: list[str] = Field( + default_factory=list, description="The columns to use as targets." ) +TargetsConfigSpec = TargetsConfigColumnSpec | TargetsConfigGlobSpec + + class FeatureConfigSpec(BaseModel): """The feature config spec.""" @@ -218,13 +238,17 @@ class FeatureConfigSpec(BaseModel): default=10, description="The threshold for the number of unique values to transition from continuous to categorical variable.", ) + cat_encoding: Literal["index", "one-hot"] = Field( + default="index", + description="The encoding method to use for categorical columns.", + ) class RegressionIOConfigSpec(BaseModel): """The input/output spec for a regression model.""" targets: TargetsConfigSpec = Field( - default_factory=TargetsConfigSpec, description="The targets config spec." + default_factory=TargetsConfigColumnSpec, description="The targets config spec." ) features: FeatureConfigSpec = Field( default_factory=FeatureConfigSpec, diff --git a/src/globi/models/surrogate/outputs.py b/src/globi/models/surrogate/outputs.py index 961df92..11df318 100644 --- a/src/globi/models/surrogate/outputs.py +++ b/src/globi/models/surrogate/outputs.py @@ -13,6 +13,7 @@ class CombineResultsResult(BaseModel): """The result of combining the results of the simulations.""" + previous: ScatterGatherResult | None incoming: ScatterGatherResult combined: ScatterGatherResult diff --git a/src/globi/models/surrogate/samplers.py b/src/globi/models/surrogate/samplers.py index 32acb4c..4e85d45 100644 --- a/src/globi/models/surrogate/samplers.py +++ b/src/globi/models/surrogate/samplers.py @@ -15,6 +15,7 @@ from pydantic import BaseModel, model_validator # TODO: Make sure that all of the samplers can be serialized and deserialized with proper discrimination, i.e. that they do not share identical field names. +# TODO: add support for keeping rows which already have values (i.e. do not overwrite values) class SamplingError(Exception): @@ -625,24 +626,29 @@ def depends_on(self) -> set[str]: class Priors(BaseModel): - """A collection of priors defining a dependency graph for sampling. - - The sampled_features dict must be ordered such that dependencies come before - dependents (i.e. topological order). Sampling iterates in dict order. - - TODO: Add automatic topological sort and validation that all required - target model fields appear as terminal nodes in the graph. - """ + """A collection of priors defining a dependency graph for sampling.""" sampled_features: dict[str, Prior] def sample(self, context: pd.DataFrame, n: int, generator: np.random.Generator): """Sample from all priors in dependency order.""" working_df = context.copy(deep=True) - # TODO: how to do we deal with race conditions here in the sense that - # some features may require previous features to have already been sampled? # TODO: Similarly, how do we ensure that there are no cycles in the dependency graph? - for feature, prior in self.sampled_features.items(): + for feature in self.topological_sort: + if feature not in self.sampled_features and feature not in context.columns: + msg = f"Feature {feature} not found in sampled features or context dataframe." + raise SamplingError(msg) + if feature not in self.sampled_features: + if feature not in self.root_features: + msg = ( + f"Feature {feature} not found in root features but expected to." + ) + raise SamplingError(msg) + + # it's a context feature dependency, so we can skip over to the next + continue + + prior = self.sampled_features[feature] working_df[feature] = prior.sample(working_df, n, generator) if working_df.isna().any().any(): # pyright: ignore [reportAttributeAccessIssue] # TODO: allow na values eg in training? @@ -670,8 +676,18 @@ def dependency_graph(self) -> nx.DiGraph: if prior.depends_on: for dependency in prior.depends_on: g.add_edge(dependency, feature) + # TODO: make sure that this is okay and that id does not cause problem siwth select_prior_tree_for_changed_features... + for feature in self.sampled_features: + if feature not in g.nodes: + g.add_node(feature) + return g + @property + def topological_sort(self) -> list[str]: + """The topological sort of the features.""" + return list(nx.topological_sort(self.dependency_graph)) + @property def root_features(self) -> set[str]: """The features that have no dependencies.""" diff --git a/src/globi/models/surrogate/sampling.py b/src/globi/models/surrogate/sampling.py index 8ead08f..e436b49 100644 --- a/src/globi/models/surrogate/sampling.py +++ b/src/globi/models/surrogate/sampling.py @@ -84,6 +84,7 @@ def sample_equally_by_stratum( # TODO: consider how we want to handle potentially having the same geometry appear in both # the training and testing sets. + # if any(len(stratum_df) < n_per_stratum for stratum_df in stratum_dfs.values()): # msg = "There are not enough buildings in some strata to sample the desired number of buildings per stratum." # # connsider making this a warning? @@ -95,7 +96,16 @@ def sample_equally_by_stratum( ) for stratum, stratum_df in stratum_dfs.items() } - return cast(pd.DataFrame, pd.concat(sampled_strata.values())) + combined = cast(pd.DataFrame, pd.concat(sampled_strata.values())) + if len(combined) < n_per_iter: + # This handles cases where, due to rounding, we do not end up with the desired number of samples. + remaining = n_per_iter - len(combined) + additional = df.sample( + n=remaining, random_state=self.random_generator, replace=True + ) + combined = cast(pd.DataFrame, pd.concat([combined, additional])) + + return combined # TODO: Add the ability to check the compatiblity of a sampling spec with an input_validator_type. diff --git a/src/globi/models/surrogate/training.py b/src/globi/models/surrogate/training.py index 8da98a1..963af3c 100644 --- a/src/globi/models/surrogate/training.py +++ b/src/globi/models/surrogate/training.py @@ -1,19 +1,27 @@ """Models used for the surrogate training pipeline.""" +import fnmatch +import logging import warnings from collections.abc import Callable -from functools import cached_property +from dataclasses import dataclass +from functools import cached_property, partial from pathlib import Path -from typing import TYPE_CHECKING, cast +from typing import TYPE_CHECKING, Literal, cast import numpy as np import pandas as pd -from pydantic import Field +import yaml +from pydantic import BaseModel, Field from scythe.base import ExperimentInputSpec, ExperimentOutputSpec from scythe.scatter_gather import ScatterGatherResult from scythe.utils.filesys import FileReference, S3Url -from globi.models.surrogate.configs.pipeline import ProgressiveTrainingSpec, StageSpec +from globi.models.surrogate.configs.pipeline import ( + ProgressiveTrainingSpec, + StageSpec, + TargetsConfigColumnSpec, +) from globi.models.surrogate.configs.regression import XGBHyperparameters if TYPE_CHECKING: @@ -21,6 +29,8 @@ else: S3ClientType = object +logger = logging.getLogger(__name__) + EXCLUDED_COLUMNS = frozenset({ "experiment_id", @@ -30,6 +40,225 @@ }) +@dataclass(frozen=True) +class DataPair: + """A pair of dataframes.""" + + x: pd.DataFrame + y: pd.DataFrame + + +@dataclass(frozen=True) +class TrainTestPair: + """A pair of train and test dataframes.""" + + train: DataPair + test: DataPair + + +class XTransformer(BaseModel, frozen=True): + """A transformer for the x features.""" + + features: list[str] + cat_map: dict[str, list[str | float | int]] + cat_encoding: Literal["index", "one-hot"] + + +class MinMaxScaler(BaseModel, arbitrary_types_allowed=True): + """The configuration for a min-max scaler.""" + + mins_: dict[str, float] = Field(default_factory=dict) + maxs_: dict[str, float] = Field(default_factory=dict) + + @property + def mins(self) -> pd.Series: + """The mins.""" + return pd.Series(self.mins_, name="mins", dtype=float) + + @property + def maxs(self) -> pd.Series: + """The maxs.""" + return pd.Series(self.maxs_, name="maxs", dtype=float) + + def fit(self, y: pd.DataFrame) -> None: + """Fit the min-max scaler.""" + y_min = cast(pd.Series, y.min(axis=0)) + y_max = cast(pd.Series, y.max(axis=0)) + self.mins_ = y_min.to_dict() + self.maxs_ = y_max.to_dict() + + @property + def scale(self) -> pd.Series: + """The scale.""" + return self.maxs - self.mins + + def transform(self, y: pd.DataFrame) -> pd.DataFrame: + """Transform the data.""" + return (y - self.mins) / self.scale + + def fit_transform(self, y: pd.DataFrame) -> pd.DataFrame: + """Fit and transform the data.""" + self.fit(y) + return self.transform(y) + + def inverse_transform(self, y: pd.DataFrame) -> pd.DataFrame: + """Inverse transform the data.""" + return y * self.scale + self.mins + + +class StandardScaler(BaseModel, arbitrary_types_allowed=True): + """The configuration for a min-max scaler.""" + + means_: dict[str, float] = Field(default_factory=dict) + stds_: dict[str, float] = Field(default_factory=dict) + + @property + def means(self) -> pd.Series: + """The means.""" + return pd.Series(self.means_, name="means", dtype=float) + + @property + def stds(self) -> pd.Series: + """The stds.""" + return pd.Series(self.stds_, name="stds", dtype=float) + + def fit(self, y: pd.DataFrame) -> None: + """Fit the standard scaler.""" + y_mean = cast(pd.Series, y.mean(axis=0)) + y_std = cast(pd.Series, y.std(axis=0)) + # if any stds are zero, we will set them to 1 to avoid division by zero + y_std = y_std.where(y_std != 0, 1) + self.means_ = y_mean.to_dict() + self.stds_ = y_std.to_dict() + + def transform(self, y: pd.DataFrame) -> pd.DataFrame: + """Transform the data.""" + return (y - self.means) / self.stds + + def fit_transform(self, y: pd.DataFrame) -> pd.DataFrame: + """Fit and transform the data.""" + self.fit(y) + return self.transform(y) + + def inverse_transform(self, y: pd.DataFrame) -> pd.DataFrame: + """Inverse transform the data.""" + return y * self.stds + self.means + + +class IdentityScaler(BaseModel, frozen=True): + """A scaler that does nothing.""" + + def fit(self, y: pd.DataFrame) -> None: + """Fit the identity scaler.""" + pass + + def transform(self, y: pd.DataFrame) -> pd.DataFrame: + """Transform the data.""" + return y + + def fit_transform(self, y: pd.DataFrame) -> pd.DataFrame: + """Fit and transform the data.""" + self.fit(y) + return self.transform(y) + + def inverse_transform(self, y: pd.DataFrame) -> pd.DataFrame: + """Inverse transform the data.""" + return y + + +class YTransformer(BaseModel, arbitrary_types_allowed=True, frozen=True): + """A transformer for the y features.""" + + scaler: MinMaxScaler | StandardScaler | IdentityScaler + targets: list[str] + normalization: Literal["min-max", "standard"] | None + + +class Transformers(BaseModel, frozen=True): + """A pair of transformers.""" + + x: XTransformer + y: YTransformer + + +@dataclass(frozen=True) +class PrepDataResult: + """The result of preparing the data.""" + + # original data + selected: TrainTestPair + # transformed data + transformed: TrainTestPair + # Transformers + transformers: Transformers + + +def xgb_pred(x: pd.DataFrame, *, model): + """Predict the targets for the given features using xgboost.""" + import xgboost as xgb + + if not isinstance(model, xgb.Booster): + msg = f"Model is not an xgboost model: {type(model)}" + raise TypeError(msg) + + dmat = xgb.DMatrix(x.reset_index(drop=True)) + preds = model.predict(dmat) + return preds + + +def predict[T: pd.DataFrame | np.ndarray]( + x: pd.DataFrame, *, conf: Transformers, pred_fn: Callable[[pd.DataFrame], T] +) -> pd.DataFrame: + """Predict the targets for the given features.""" + x_encoded = encode_inputs( + x, + conf=conf.x, + ) + preds = pred_fn(x_encoded.reset_index(drop=True)) + preds = pd.DataFrame(preds, columns=pd.Index(conf.y.targets), index=x_encoded.index) + if conf.y.scaler: + preds = conf.y.scaler.inverse_transform(preds) + return preds + + +def index_encode_categorical_columns( + df: pd.DataFrame, *, cats: dict[str, list[str | float | int]] +) -> pd.DataFrame: + """Index encode the categorical columns.""" + # TODO: make sure this still works when one of the values is nan + # TODO: drop this copy call since we have already made a copy of the dataframe + df = df.copy(deep=True) + for col in df.columns: + if df[col].dtype == "object": + df[col] = pd.Categorical(df[col], categories=cats[col]).codes + return df + + +def encode_inputs( + x: pd.DataFrame, + *, + conf: XTransformer, + log: Callable[[str], None] = lambda x: logger.info(x), +) -> pd.DataFrame: + """Encode the inputs.""" + log(f"Selecting {len(conf.features)} features out of {len(x.columns)}...") + x_encoded = x.loc[:, conf.features] + log("Selected features.") + + log(f"Encoding categorical inputs with {conf.cat_encoding} encoding...") + if conf.cat_encoding == "index": + x_encoded = index_encode_categorical_columns(x_encoded, cats=conf.cat_map) + elif conf.cat_encoding == "one-hot": + raise NotImplementedError("One-hot encoding is not implemented yet.") + else: + raise NotImplementedError( + f"Unsupported categorical encoding: {conf.cat_encoding}" + ) + log("Encoded inputs.") + # TODO: add continuous encoding + return x_encoded.set_index(pd.MultiIndex.from_frame(x)) + + class TrainFoldSpec(ExperimentInputSpec): """Train an sbem model for a specific fold. @@ -65,33 +294,56 @@ class TrainFoldSpec(ExperimentInputSpec): @cached_property def combined_data(self) -> pd.DataFrame: """Combines the data from the data uris into a single dataframe with a flattened column index.""" - dfs: dict[str, pd.DataFrame] = { + all_dfs: dict[str, pd.DataFrame] = { key: pd.read_parquet(str(uri)) for key, uri in self.data_uris.items() } - # TODO: we should drop any dataframes which do not participate in training - # for instance, by checking their regression io spec, or if there is another place to check. - # Mostly important for preventing errors on the next line when many differently shaped dataframes are returned. - if not all( - df.index.equals(next(iter(dfs.values())).index) for df in dfs.values() - ): - msg = "The indices of the dataframes are not all equal. " - "This is not supported, since the features must be identical for all outputs.." - raise ValueError(msg) - for df in dfs.values(): - # TODO: use level names while constructing the sequential name + # We wiull only include dataframes which have valid targets in the training. + self.log("Checking for valid targets in dataframes...") + dfs_to_use: dict[str, pd.DataFrame] = {} + for key, df in all_dfs.items(): + self.log(f"Checking dataframe {key}...") + # TODO: use level names while constructing the sequential name? _level_names = df.columns.names df.columns = df.columns.to_flat_index() - df.columns = [ - "/".join(col) if isinstance(col, tuple | list) else col + new_columns = [ + "/".join([ + str(c) if not isinstance(c, int) else f"{c:03d}" for c in col + ]) # pad integers with leading zeros to make them sortable + if isinstance(col, tuple | list) + else col for col in df.columns ] + # we will only temporarily include the key prefix in the columns so we can perform the filtering check; + # it will get re-added later when concat the dataframes. + new_columns_with_prefix = [f"{key}/{col}" for col in new_columns] + df.columns = new_columns_with_prefix + viable_targets = self.valid_targets_in_df(df) + df.columns = new_columns + if viable_targets: + self.log( + f"Including dataframe {key} with {len(viable_targets)} targets: {viable_targets}" + ) + dfs_to_use[key] = df + else: + self.log(f"Excluding dataframe {key} because it has no valid targets.") + + # TODO: consider how/if we want to handle dataframes with different indices. + if not all( + df.index.equals(next(iter(dfs_to_use.values())).index) + for df in dfs_to_use.values() + ): + msg = "The indices of the dataframes are not all equal. " + "This is not supported, since the features must be identical for all outputs.." + raise ValueError(msg) - combined_df = pd.concat(dfs, axis=1) + self.log("Concatenating and shuffling dataframes...") + combined_df = pd.concat(dfs_to_use, axis=1) combined_df.columns = combined_df.columns.to_flat_index() combined_df.columns = ["/".join(col) for col in combined_df.columns] shuffled_df = combined_df.sample(frac=1, random_state=42, replace=False) + self.log(f"Shuffled dataframe has {len(shuffled_df)} rows.") return shuffled_df @property @@ -107,7 +359,12 @@ def dparams(self) -> pd.DataFrame: @cached_property def all_feature_columns(self) -> frozenset[str]: """The names of all columns.""" - return frozenset(self.dparams.columns) + init_cols = frozenset(self.dparams.columns) + is_exclusively_one_val = [ + col for col in init_cols if self.dparams[col].nunique() <= 1 + ] + all_cols = init_cols - frozenset(is_exclusively_one_val) + return all_cols @cached_property def all_target_columns(self) -> frozenset[str]: @@ -117,6 +374,7 @@ def all_target_columns(self) -> frozenset[str]: @cached_property def continuous_columns(self) -> frozenset[str]: """The continuous columns.""" + # TODO: add some logging calls here. feature_conf = self.parent.regression_io_config.features candidates = ( self.all_feature_columns - feature_conf.exclude_columns - EXCLUDED_COLUMNS @@ -157,6 +415,7 @@ def continuous_columns(self) -> frozenset[str]: @cached_property def categorical_columns(self) -> frozenset[str]: """The categorical columns.""" + # TODO: add some logging calls here. feature_conf = self.parent.regression_io_config.features candidates = ( self.all_feature_columns - feature_conf.exclude_columns - EXCLUDED_COLUMNS @@ -198,6 +457,11 @@ def categorical_columns(self) -> frozenset[str]: object_dtype_columns ) + @cached_property + def x_features(self) -> frozenset[str]: + """The all features.""" + return self.continuous_columns | self.categorical_columns + @cached_property def stratum_names(self) -> list[str]: """The values of the stratification field.""" @@ -276,12 +540,50 @@ def test_segment(self) -> tuple[pd.DataFrame, pd.DataFrame]: targets = test_df return params, targets + def valid_targets_in_df(self, df: pd.DataFrame) -> list[str]: + """Get the valid targets in the dataframe.""" + if isinstance( + self.parent.regression_io_config.targets, TargetsConfigColumnSpec + ): + if self.parent.regression_io_config.targets.columns: + return [ + c + for c in df.columns + if c in self.parent.regression_io_config.targets.columns + ] + return sorted(df.columns.tolist()) + globs = self.parent.regression_io_config.targets.globs + if not globs: + return sorted(df.columns.tolist()) + viable_target_columns = [] + for col in df.columns: + if any(fnmatch.fnmatch(col, glob) for glob in globs): + viable_target_columns.append(col) + return sorted(viable_target_columns) + @cached_property def targets(self) -> list[str]: """The list of regression targets.""" - return self.parent.regression_io_config.targets.columns or sorted( - self.all_target_columns - ) + self.log("Determining targets...") + if isinstance( + self.parent.regression_io_config.targets, TargetsConfigColumnSpec + ): + final_targets = self.parent.regression_io_config.targets.columns or sorted( + self.all_target_columns + ) + else: + globs = self.parent.regression_io_config.targets.globs + if not globs: + final_targets = sorted(self.all_target_columns) + viable_target_columns = [] + for col in self.all_target_columns: + if any(fnmatch.fnmatch(col, glob) for glob in globs): + viable_target_columns.append(col) + final_targets = sorted(viable_target_columns) + self.log( + f"Selected {len(final_targets)} / {len(self.all_target_columns)} targets." + ) + return final_targets @cached_property def target_range(self) -> list[tuple[float, float]]: @@ -303,38 +605,103 @@ def train(self, tempdir: Path): f"Unsupported hyperparameters type: {type(self.parent.hyperparameters)}" ) - def train_xgboost(self, tempdir: Path): - """Train an xgboost model.""" - import xgboost as xgb - - hp = ( - self.parent.hyperparameters - if isinstance(self.parent.hyperparameters, XGBHyperparameters) - else XGBHyperparameters() - ) - + def prep_data( + self, + *, + x_cat_encoding: Literal["index", "one-hot"], + y_encoding: Literal["min-max", "standard"] | None, + ) -> PrepDataResult: + """Prepare the data for training.""" + self.log("Preparing data for training...") x_train, y_train = self.train_segment x_test, y_test = self.test_segment - # select the features - x_train_selected, x_test_selected = ( - x_train.loc[:, self.continuous_columns | self.categorical_columns], - x_test.loc[:, self.continuous_columns | self.categorical_columns], - ) + # Technically we are allowing some of our test-set features' categorical options + # through, but that's okay; we are assuming we exhaustively know the categorical options + # and this is not leakage. cats = { col: self.dparams[col].unique().tolist() for col in self.categorical_columns } - x_train_encoded = self.index_encode_categorical_columns(x_train_selected, cats) - x_test_encoded = self.index_encode_categorical_columns(x_test_selected, cats) + + x_transformer = XTransformer( + features=sorted(self.x_features), + cat_map=cats, + cat_encoding=x_cat_encoding, + ) + x_train_encoded = encode_inputs( + x_train, + conf=x_transformer, + ) + + x_test_encoded = encode_inputs( + x_test, + conf=x_transformer, + ) + scaler = ( + MinMaxScaler() + if y_encoding == "min-max" + else StandardScaler() + if y_encoding == "standard" + else IdentityScaler() + ) + y_transformer = YTransformer( + scaler=scaler, + targets=self.targets, + normalization=y_encoding, + ) # select the targets - y_train, y_test = y_train.loc[:, self.targets], y_test.loc[:, self.targets] + self.log("Selecting targets...") + y_train, y_test = ( + cast(pd.DataFrame, y_train.loc[:, y_transformer.targets]), + cast(pd.DataFrame, y_test.loc[:, y_transformer.targets]), + ) + self.log("Selected targets.") + + self.log(f"Scaling targets with {type(y_transformer.scaler).__name__}...") + y_train_scaled = y_transformer.scaler.fit_transform(y_train) + y_test_scaled = y_transformer.scaler.transform(y_test) + self.log("Scaled targets.") + + transformers = Transformers( + x=x_transformer, + y=y_transformer, + ) + selected = TrainTestPair( + train=DataPair(x=x_train, y=y_train), + test=DataPair(x=x_test, y=y_test), + ) + transformed = TrainTestPair( + train=DataPair(x=x_train_encoded, y=y_train_scaled), + test=DataPair(x=x_test_encoded, y=y_test_scaled), + ) + return PrepDataResult( + selected=selected, + transformed=transformed, + transformers=transformers, + ) + + def train_xgboost(self, tempdir: Path): + """Train an xgboost model.""" + import xgboost as xgb + x_encoding = self.parent.regression_io_config.features.cat_encoding + y_encoding = self.parent.regression_io_config.targets.normalization + data = self.prep_data(x_cat_encoding=x_encoding, y_encoding=y_encoding) + self.log("Training XGBoost model...") + + hp = ( + self.parent.hyperparameters + if isinstance(self.parent.hyperparameters, XGBHyperparameters) + else XGBHyperparameters() + ) train_dmat = xgb.DMatrix( - x_train_encoded.reset_index(drop=True), label=y_train.reset_index(drop=True) + data.transformed.train.x.reset_index(drop=True), + label=data.transformed.train.y.reset_index(drop=True), ) test_dmat = xgb.DMatrix( - x_test_encoded.reset_index(drop=True), label=y_test.reset_index(drop=True) + data.transformed.test.x.reset_index(drop=True), + label=data.transformed.test.y.reset_index(drop=True), ) evals = [(train_dmat, "train"), (test_dmat, "eval")] @@ -346,37 +713,38 @@ def train_xgboost(self, tempdir: Path): early_stopping_rounds=hp.trainer.early_stopping_rounds, verbose_eval=hp.trainer.verbose_eval, ) + self.log("Trained XGBoost model.") - def predict(x: pd.DataFrame) -> pd.DataFrame: - """Predict the targets for the given features.""" - x_selected = cast( - pd.DataFrame, - x.loc[:, self.continuous_columns | self.categorical_columns], - ) - x_encoded = self.index_encode_categorical_columns(x_selected, cats) - preds = model.predict( - xgb.DMatrix( - x_encoded.reset_index(drop=True), - ) - ) - return pd.DataFrame( - preds, columns=pd.Index(self.targets), index=pd.MultiIndex.from_frame(x) - ) + pred = partial( + predict, conf=data.transformers, pred_fn=partial(xgb_pred, model=model) + ) - evaluation = self.evaluate(predict, x_train, x_test, y_train, y_test) + evaluation = self.evaluate( + pred, + selected=data.selected, + ) + self.log("Saving model...") model_path = tempdir / "model.ubj" model.save_model(model_path.as_posix()) - return model, evaluation, model_path + transforms_path = tempdir / "transforms.yml" + with open(transforms_path, "w") as f: + yaml.dump( + data.transformers.model_dump(mode="json"), f, indent=2, sort_keys=False + ) + self.log("Model saved.") + return (model, model_path), (data.transformers, transforms_path), evaluation def evaluate( self, fn: Callable[[pd.DataFrame], pd.DataFrame], - x_train: pd.DataFrame, - x_test: pd.DataFrame, - y_train: pd.DataFrame, - y_test: pd.DataFrame, + selected: TrainTestPair, ) -> tuple[pd.DataFrame, pd.DataFrame]: """Evaluate a model on the train and test segments.""" + self.log("Evaluating model on train and test segments...") + x_train = selected.train.x + x_test = selected.test.x + y_train = selected.train.y + y_test = selected.test.y y_train_preds = fn(x_train) y_test_preds = fn(x_test) @@ -400,18 +768,9 @@ def evaluate( keys=["train", "test"], names=["split_segment"], ) + self.log("Model evaluated on train and test segments.") return global_metrics, stratum_metrics - def index_encode_categorical_columns( - self, df: pd.DataFrame, cats: dict[str, list[str]] - ) -> pd.DataFrame: - """Index encode the categorical columns.""" - df = df.copy(deep=True) - for col in df.columns: - if df[col].dtype == "object": - df[col] = pd.Categorical(df[col], categories=cats[col]).codes - return df - def train_pytorch_tabular(self, tempdir: Path): """Train a pytorch tabular model.""" from pytorch_tabular import TabularModel @@ -490,8 +849,8 @@ def train_pytorch_tabular(self, tempdir: Path): _, train_targets = self.train_segment _, test_targets = self.test_segment trainer = model.fit( - train=train_targets.reset_index(), - validation=test_targets.reset_index(), + train=train_targets.reset_index(drop=True), + validation=test_targets.reset_index(drop=True), seed=42, ) model.save_model((tempdir / "model").as_posix()) @@ -537,7 +896,16 @@ def compute_metrics(self, preds: pd.DataFrame, targets: pd.DataFrame): """Compute the metrics.""" global_metrics = self.compute_frame_metrics(preds, targets) stratum_metric_dfs = {} + names = [] for stratum_name in self.stratum_names: + if ( + stratum_name + not in targets.index.get_level_values( + self.parent.stratification.field + ).unique() + ): + continue + names.append(stratum_name) stratum_targets = cast( pd.DataFrame, targets.xs(stratum_name, level=self.parent.stratification.field), @@ -552,7 +920,7 @@ def compute_metrics(self, preds: pd.DataFrame, targets: pd.DataFrame): stratum_metrics = pd.concat( stratum_metric_dfs, axis=1, - keys=self.stratum_names, + keys=names, names=["stratum"], ) global_metrics = ( @@ -597,6 +965,7 @@ class FoldResult(ExperimentOutputSpec): """The output for a fold.""" regressor: FileReference + transforms: FileReference class TrainWithCVSpec(StageSpec): diff --git a/src/globi/models/tasks.py b/src/globi/models/tasks.py index 699695a..7a78c83 100644 --- a/src/globi/models/tasks.py +++ b/src/globi/models/tasks.py @@ -3,13 +3,14 @@ import logging from functools import cached_property from pathlib import Path -from typing import Literal +from typing import Any, Literal import numpy as np from epinterface.geometry import compute_shading_mask -from pydantic import BaseModel, Field, model_validator +from pydantic import BaseModel, Field, field_validator, model_validator from scythe.base import ExperimentInputSpec, ExperimentOutputSpec from scythe.utils.filesys import FileReference +from shapely import Polygon from globi.models.configs import GloBIExperimentSpec from globi.type_utils import ( @@ -156,6 +157,7 @@ class GloBIBuildingSpec(ExperimentInputSpec): aspect_ratio: float = Field( ..., description="The aspect ratio of the building footprint [unitless]." ) + # TODO: delete this entirely! rotated_rectangle_area_ratio: float = Field( ..., description="The ratio of the rotated rectangle footprint area to the building footprint area.", @@ -183,12 +185,47 @@ class GloBIBuildingSpec(ExperimentInputSpec): gt=0, lt=1, ) + attic_use_fraction: float | None = Field( + default=None, + description="The use fraction of the attic.", + ge=0, + le=1, + ) + basement_use_fraction: float | None = Field( + default=None, + description="The use fraction of the basement.", + ge=0, + le=1, + ) + attic_height: float | None = Field( + default=None, + description="The height of the attic.", + ge=0, + ) parent_experiment_spec: GloBIExperimentSpec | None = Field( default=None, description="The parent experiment spec.", ) + @field_validator("rotated_rectangle", mode="before") + def validate_rotated_rectangle(cls, value: Any) -> str: + """Validate the rotated rectangle.""" + if isinstance(value, Polygon): + return value.wkt + return value + + @field_validator("neighbor_polys", mode="before") + def validate_neighbor_polys(cls, value: Any) -> list[str]: + """Validate the neighbor polygons.""" + if isinstance(value, list): + for i, poly in enumerate(value): + if isinstance(poly, Polygon): + value[i] = poly.wkt + else: + value[i] = poly + return value + @property def feature_dict(self) -> dict[str, str | int | float]: """Return a dictionary of features which will be available to ML algos.""" @@ -206,7 +243,7 @@ def feature_dict(self) -> dict[str, str | int | float]: "feature.geometry.zoning": self.use_core_perim_zoning, "feature.geometry.energy_model_conditioned_area": self.energy_model_conditioned_area, "feature.geometry.energy_model_occupied_area": self.energy_model_occupied_area, - "feature.geometry.attic_height": self.attic_height or 0, + "feature.geometry.attic_height": self.attic_height_computed or 0, "feature.geometry.exposed_basement_frac": self.exposed_basement_frac, } @@ -249,7 +286,7 @@ def feature_dict(self) -> dict[str, str | int | float]: "Yes" if self.basement_is_conditioned else "No" ) features["feature.extra_spaces.basement.use_fraction"] = ( - self.basement_use_fraction + self.basement_use_fraction_computed ) features["feature.extra_spaces.attic.exists"] = ( "Yes" if self.has_attic else "No" @@ -260,17 +297,24 @@ def feature_dict(self) -> dict[str, str | int | float]: features["feature.extra_spaces.attic.conditioned"] = ( "Yes" if self.attic_is_conditioned else "No" ) - features["feature.extra_spaces.attic.use_fraction"] = self.attic_use_fraction + features["feature.extra_spaces.attic.use_fraction"] = ( + self.attic_use_fraction_computed + ) return features - # TODO: use the scythe automatic referencing for these paths - FileReference class from scythe.utils.files - # choose a local file and direclty use the 'Path' for this - # self scythe - fetch uri - # input_sepc.weather_file - # everything gets a tempdir - - # + @model_validator(mode="before") + def validate_semantic_field_context(cls, values: dict[str, Any]): + """Validate the semantic field context.""" + additional_semantic_fields = { + k.replace("semantic_field_", ""): v + for k, v in values.items() + if (k.startswith("semantic_field_") and k not in ["semantic_field_context"]) + } + if "semantic_field_context" not in values: + values["semantic_field_context"] = {} + values["semantic_field_context"].update(additional_semantic_fields) + return values @cached_property def db_path(self) -> Path: @@ -343,19 +387,21 @@ def attic_is_conditioned(self) -> bool: return self.attic in ConditionedOptions @cached_property - def basement_use_fraction(self) -> float: + def basement_use_fraction_computed(self) -> float: """The use fraction of the basement.""" if not self.basement_is_occupied: return 0 + if self.basement_use_fraction is not None: + return self.basement_use_fraction return np.random.uniform(0.2, 0.6) @cached_property - def attic_use_fraction(self) -> float: + def attic_use_fraction_computed(self) -> float: """The use fraction of the attic.""" if not self.attic_is_occupied: return 0 - # TODO: use sampling as a fallback value when a default is not provided rather - # than always sampling. + if self.attic_use_fraction is not None: + return self.attic_use_fraction return np.random.uniform(0.2, 0.6) @cached_property @@ -369,10 +415,12 @@ def has_attic(self) -> bool: return self.attic != "none" @cached_property - def attic_height(self) -> float | None: + def attic_height_computed(self) -> float | None: """The height of the attic.""" if not self.has_attic: return None + if self.attic_height is not None: + return self.attic_height min_occupied_or_conditioned_rise_over_run = 6 / 12 max_occupied_or_conditioned_rise_over_run = 9 / 12 min_unoccupied_and_unconditioned_rise_over_run = 4 / 12 diff --git a/src/globi/pipelines/simulations.py b/src/globi/pipelines/simulations.py index dfaff88..0f772a8 100644 --- a/src/globi/pipelines/simulations.py +++ b/src/globi/pipelines/simulations.py @@ -69,13 +69,15 @@ def simulate_globi_building_pipeline( Zone=zone_def, Basement=BasementAssumptions( Conditioned=spec.basement_is_conditioned, - UseFraction=spec.basement_use_fraction + UseFraction=spec.basement_use_fraction_computed if spec.basement_is_occupied else None, ), Attic=AtticAssumptions( Conditioned=spec.attic_is_conditioned, - UseFraction=spec.attic_use_fraction if spec.attic_is_occupied else None, + UseFraction=spec.attic_use_fraction_computed + if spec.attic_is_occupied + else None, ), geometry=ShoeboxGeometry( x=0, @@ -87,7 +89,7 @@ def simulate_globi_building_pipeline( num_stories=spec.num_floors, basement=spec.has_basement, zoning=spec.use_core_perim_zoning, - roof_height=spec.attic_height, + roof_height=spec.attic_height_computed, exposed_basement_frac=spec.exposed_basement_frac, scene_context=SceneContext( building=cast(Polygon, from_wkt(spec.rotated_rectangle)), @@ -129,8 +131,35 @@ def simulate_globi_building_pipeline( ) results = run_result.energy_and_peak.to_frame().T.set_index(feature_index) + energy = results["Energy"] + energy_annual = ( + energy.T.groupby( + level=[level for level in energy.columns.names if level != "Month"] + ) + .sum() + .T + ) + peak = results["Peak"] + peak_annual = ( + peak.T.groupby( + level=[level for level in peak.columns.names if level != "Month"] + ) + .max() + .T + ) + EnergyAndPeakAnnual = cast( + pd.DataFrame, + pd.concat( + [energy_annual, peak_annual], + axis=1, + keys=["Energy", "Peak"], + names=results.columns.names[:-1], + ), + ) + dfs: dict[str, pd.DataFrame] = { "EnergyAndPeak": results, + "EnergyAndPeakAnnual": EnergyAndPeakAnnual, } if run_result.overheating_results is not None: # TODO: add feature dict to overheating df indices? Or instead of a full feature df, just add a single column with the building id? diff --git a/src/globi/pipelines/training.py b/src/globi/pipelines/training.py index 4ab59b5..53f7162 100644 --- a/src/globi/pipelines/training.py +++ b/src/globi/pipelines/training.py @@ -48,9 +48,14 @@ def train_regressor_with_cv_fold( ) -> FoldResult: """Train a regressor with cross-fold validation.""" # DO TRAINING - _model, (global_results, stratum_results), model_path = input_spec.train(tempdir) + ( + (_model, model_path), + (_transforms, transforms_path), + (global_results, stratum_results), + ) = input_spec.train(tempdir) return FoldResult( regressor=model_path, + transforms=transforms_path, dataframes={ "global": global_results, "strata": stratum_results, @@ -193,6 +198,7 @@ def combine_results( return CombineResultsResult( incoming=results, + previous=spec.data_uris, combined=ScatterGatherResult(uris=combined_results), ) @@ -211,7 +217,7 @@ def start_training( train_spec = TrainWithCVSpec( parent=spec, - data_uris=results.combined, # TODO: should configure which results to use + data_uris=results.combined, ) # Alternatively, one task per fold-column combination? @@ -293,7 +299,6 @@ def evaluate_training( .mean() .unstack(), ) - # TODO: fold_averages and strata and globals should be saved to s3 global_averages = cast( pd.Series, @@ -314,6 +319,7 @@ def evaluate_training( return TrainingEvaluationResult( converged=convergence_all, + # TODO: maybe we should change/improve what gets logged here? metrics={ "global_averages": global_averages.reset_index().to_dict(orient="records"), }, @@ -374,13 +380,6 @@ def transition_recursion( schedule_timeout=timedelta(hours=5), execution_timeout=timedelta(minutes=30), parents=[transition_recursion, await_training, combine_results], - # skip_if=[ - # # TODO: maybe we should just run every time? - # ParentCondition( - # parent=transition_recursion, - # expression="output.reasoning == null", - # ) - # ], ) def finalize(spec: ProgressiveTrainingSpec, context: Context) -> FinalizeResult: """Run when training has exited the loop (converged, max depth, or other reason). Saves final models and artifacts.""" @@ -419,6 +418,7 @@ def finalize(spec: ProgressiveTrainingSpec, context: Context) -> FinalizeResult: experiment_ids = [*spec.previous_experiment_ids, spec.experiment_id] # TODO: save final models, or return them a little more directly? + # Also, need to return Transformers object somehow. result = FinalizeResult( reasoning=transition.reasoning, From afaf216eae7d5b1ec24e07b84827e03da9ea40d8 Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Wed, 11 Mar 2026 16:55:25 -0400 Subject: [PATCH 29/31] backup training tester script --- scripts/trainer_testing.py | 345 +++++++++++++++++++++++++++++++++++++ 1 file changed, 345 insertions(+) create mode 100644 scripts/trainer_testing.py diff --git a/scripts/trainer_testing.py b/scripts/trainer_testing.py new file mode 100644 index 0000000..3911dbe --- /dev/null +++ b/scripts/trainer_testing.py @@ -0,0 +1,345 @@ +"""Source geometry pipeline for GloBI project ml testing.""" + +from pathlib import Path +from typing import cast + +import boto3 +import pandas as pd +import yaml +from epinterface.sbem.fields.spec import CategoricalFieldSpec, SemanticModelFields +from scythe.experiments import BaseExperiment +from scythe.scatter_gather import RecursionMap +from scythe.settings import ScytheStorageSettings + +from globi.models.configs import ( + DeterministicGISPreprocessorConfig, + FileConfig, +) +from globi.models.surrogate.configs.pipeline import ( + ConvergenceThresholds, + ConvergenceThresholdsByTarget, + CrossValidationSpec, + FeatureConfigSpec, + IterationSpec, + ProgressiveTrainingSpec, + RegressionIOConfigSpec, + StratificationSpec, + TargetsConfigGlobSpec, +) +from globi.models.surrogate.configs.regression import ( + XGBHyperparameters, + XGBModelConfig, + XGBTrainerConfig, +) +from globi.models.surrogate.samplers import ( + CategoricalSampler, + Priors, + ProductValuesSampler, + UnconditionalPrior, + UniformSampler, +) +from globi.pipelines import iterative_training, simulate_globi_building +from globi.pipelines.gis import preprocess_gis_file + + +def geometry_extraction( + config: DeterministicGISPreprocessorConfig, + file_config: FileConfig, +): + """Extract the geometry from the GIS file.""" + gdf, column_output_map = preprocess_gis_file(config, file_config) + + columns = { + "building_id": column_output_map.Building_ID_col, + "db_file": column_output_map.DB_File_col, + "semantic_fields_file": column_output_map.Semantic_Fields_File_col, + "component_map_file": column_output_map.Component_Map_File_col, + "epwzip_file": column_output_map.EPWZip_File_col, + "semantic_field_context": column_output_map.Semantic_Field_Context_col, + "neighbor_polys": column_output_map.Neighbor_Polys_col, + "neighbor_heights": column_output_map.Neighbor_Heights_col, + "neighbor_floors": column_output_map.Neighbor_Floors_col, + "rotated_rectangle": column_output_map.Rotated_Rectangle_col, + "long_edge_angle": column_output_map.Long_Edge_Angle_col, + "long_edge": column_output_map.Long_Edge_col, + "short_edge": column_output_map.Short_Edge_col, + "aspect_ratio": column_output_map.Aspect_Ratio_col, + "rotated_rectangle_area_ratio": column_output_map.Rotated_Rectangle_Area_Ratio_col, + "wwr": column_output_map.WWR_col, + "height": column_output_map.Height_col, + "num_floors": column_output_map.Num_Floors_col, + "f2f_height": column_output_map.F2F_Height_col, + "basement": column_output_map.Basement_col, + "attic": column_output_map.Attic_col, + "exposed_basement_frac": column_output_map.Exposed_Basement_Frac_col, + } + + columns_to_pop = [ + "semantic_field_context", + "f2f_height", + "height", + "basement", + "attic", + "exposed_basement_frac", + "wwr", + ] + for column in columns_to_pop: + columns.pop(column) + gdf = cast(pd.DataFrame, gdf[list(columns.values())]) + gdf = gdf.rename(columns={v: k for k, v in columns.items()}) + return gdf + + +def main(): + """Main function.""" + config = DeterministicGISPreprocessorConfig( + cart_crs="EPSG:3857", + min_building_area=10.0, + min_edge_length=3.0, + max_edge_length=1000.0, + neighbor_threshold=100.0, + f2f_height=3.0, + min_building_height=3.0, + max_building_height=300.0, + min_num_floors=1, + max_num_floors=125, + default_wwr=0.2, + default_num_floors=2, + default_basement="none", + default_attic="none", + default_exposed_basement_frac=0.25, + epw_query="source in ['tmyx']", + ) + + file_config = FileConfig( + gis_file=Path("tests/data/e2e/buildings.parquet"), + semantic_fields_file=Path("tests/data/e2e/semantic-fields.yml"), + component_map_file=Path("tests/data/e2e/component-map.yml"), + db_file=Path("tests/data/e2e/components-lib.db"), + epwzip_file=None, + ) + + df = geometry_extraction(config, file_config) + # TODO: MAJOR REPLACEMENT REQUIRED FOR THIS + settings = ScytheStorageSettings() + semantic_field_key = ( + f"{settings.BUCKET_PREFIX}/test-experiment-artifacts/semantic-fields.yml" + ) + component_map_key = ( + f"{settings.BUCKET_PREFIX}/test-experiment-artifacts/component-map.yml" + ) + db_key = f"{settings.BUCKET_PREFIX}/test-experiment-artifacts/components-lib.db" + semantic_fields_uri = f"s3://{settings.BUCKET}/{semantic_field_key}" + component_map_uri = f"s3://{settings.BUCKET}/{component_map_key}" + db_uri = f"s3://{settings.BUCKET}/{db_key}" + df["semantic_fields_file"] = semantic_fields_uri + df["component_map_file"] = component_map_uri + df["db_file"] = db_uri + df["rotated_rectangle"] = df["rotated_rectangle"].apply(lambda x: x.wkt) + df["neighbor_polys"] = df["neighbor_polys"].apply( + lambda x: [poly.wkt if poly is not None else None for poly in x] + ) + + s3 = boto3.client("s3") + s3.upload_file( + Filename=file_config.semantic_fields_file.as_posix(), + Bucket=settings.BUCKET, + Key=semantic_field_key, + ) + s3.upload_file( + Filename=file_config.component_map_file.as_posix(), + Bucket=settings.BUCKET, + Key=component_map_key, + ) + s3.upload_file( + Filename=file_config.db_file.as_posix(), Bucket=settings.BUCKET, Key=db_key + ) + + output_dir = Path("tests/data/training") + output_dir.mkdir(parents=True, exist_ok=True) + + outpath = output_dir / "context.parquet" + df.to_parquet(outpath) + + experiment_config = ProgressiveTrainingSpec( + context=outpath, + runnable=simulate_globi_building, + base_run_name="test-simulations", + convergence_criteria=ConvergenceThresholdsByTarget( + thresholds={ + "EnergyAndPeakAnnual/Energy/Raw/**": ConvergenceThresholds( + mae=3, + rmse=5, + mape=0.05, + r2=0.975, + cvrmse=0.05, + ), + # "EnergyAndPeak/Energy/Raw/**": ConvergenceThresholds( + # r2=0.9, + # ), + } + ), + regression_io_config=RegressionIOConfigSpec( + targets=TargetsConfigGlobSpec( + globs=[ + "EnergyAndPeakAnnual/*/Raw/**", + ], + normalization="min-max", + ), + features=FeatureConfigSpec( + exclude_columns=frozenset(["building_id"]), + cat_encoding="index", + ), + ), + hyperparameters=XGBHyperparameters( + hp=XGBModelConfig( + max_depth=7, + eta=0.1, + min_child_weight=None, + subsample=None, + colsample_bytree=None, + alpha=None, + lam=None, + gamma=None, + seed=42, + ), + trainer=XGBTrainerConfig( + num_boost_round=8000, + early_stopping_rounds=10, + verbose_eval=True, + ), + ), + stratification=StratificationSpec( + field="epwzip_file", + sampling="equal", + ), + samplers=make_priors(file_config.semantic_fields_file), + cross_val=CrossValidationSpec( + n_folds=5, + ), + iteration=IterationSpec( + n_per_iter=[500], + min_per_stratum=25, + max_iters=5, + recursion=RecursionMap( + factor=100, + max_depth=1, + ), + ), + storage_settings=settings, + experiment_id="placeholder", + sort_index=0, + ) + + exp = BaseExperiment( + runnable=iterative_training, + run_name=experiment_config.base_run_name, + storage_settings=settings, + ) + run, _ref = exp.allocate( + experiment_config, + version="bumpmajor", + ) + print(yaml.dump(run.model_dump(mode="json"), indent=2, sort_keys=False)) + # sample_spec = SampleSpec( + # parent=experiment_config, priors=experiment_config.samplers + # ) + # sample_df = sample_spec.populate_sample_df() + + +def make_priors(semantic_fields_file: Path): + """Make priors for the uninitiated model.""" + with open(semantic_fields_file) as f: + semantic_fields = SemanticModelFields.model_validate(yaml.safe_load(f)) + + categorical_semantic_fields = [ + field + for field in semantic_fields.Fields + if isinstance(field, CategoricalFieldSpec) + ] + # numeric_semantic_field_names = [ + # field for field in semantic_field_names if isinstance(field, NumericFieldSpec) + # ] + return Priors( + sampled_features={ + "height": UnconditionalPrior( + sampler=ProductValuesSampler( + features_to_multiply=["f2f_height", "num_floors"] + ) + ), + "f2f_height": UnconditionalPrior( + sampler=UniformSampler(min=2.5, max=4, round=None) + ), + "wwr": UnconditionalPrior( + sampler=UniformSampler(min=0.1, max=0.5, round=None) + ), + "basement": UnconditionalPrior( + sampler=CategoricalSampler( + values=[ + "none", + "unoccupied_unconditioned", + "unoccupied_conditioned", + "occupied_unconditioned", + "occupied_conditioned", + ], + weights=[0.5, 0.5 / 4, 0.5 / 4, 0.5 / 4, 0.5 / 4], + ) + ), + "attic": UnconditionalPrior( + sampler=CategoricalSampler( + values=[ + "none", + "unoccupied_unconditioned", + "unoccupied_conditioned", + "occupied_unconditioned", + "occupied_conditioned", + ], + weights=[0.5, 0.5 / 4, 0.5 / 4, 0.5 / 4, 0.5 / 4], + ) + ), + "exposed_basement_frac": UnconditionalPrior( + sampler=UniformSampler(min=0.1, max=0.4, round=None) + ), + **{ + f"semantic_field_{field.Name}": UnconditionalPrior( + sampler=CategoricalSampler( + values=field.Options, + weights=[1 / len(field.Options) for _ in field.Options], + ) + ) + for field in categorical_semantic_fields + }, + # TODO: add basement and attic priors + } + ) + + +if __name__ == "__main__": + main() + # instance = GloBIBuildingSpec( + # building_id="test-building", + # experiment_id="test-experiment", + # sort_index=0, + # db_file="test-db.db", + # semantic_fields_file="test-semantic-fields.yml", + # component_map_file="test-component-map.yml", + # epwzip_file="test-epwzip.epw", + # semantic_field_context={}, + # neighbor_polys=[], + # neighbor_heights=[], + # neighbor_floors=[], + # rotated_rectangle="test-rotated-rectangle.wkt", + # long_edge_angle=0, + # long_edge=10, + # short_edge=10, + # aspect_ratio=1, + # rotated_rectangle_area_ratio=100, + # wwr=0.2, + # height=10, + # num_floors=1, + # f2f_height=10, + # basement="none", + # attic="none", + # exposed_basement_frac=0.25, + # ) + # instance.log = lambda msg: print(msg) From 7f1c8ae904a1a7a40811709b50dbf4c82daea62b Mon Sep 17 00:00:00 2001 From: Sam Wolk <36545842+szvsw@users.noreply.github.com> Date: Wed, 11 Mar 2026 16:56:33 -0400 Subject: [PATCH 30/31] add test dataset for training --- tests/data/training/context.parquet | Bin 0 -> 256223 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 tests/data/training/context.parquet diff --git a/tests/data/training/context.parquet b/tests/data/training/context.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6dc60031ea86721305573cb03c8e884a16d769dd GIT binary patch literal 256223 zcmX864|rYYRpxsHN3bOwEo~hiZy(FY_Br;Eee7u4`~RQBNu2>QAN<}F} zv^UKCeV%!q2@oHjv-kIXYpr*^@4LRtPCah#uI|41t?KTpSF2}t|M|%7O1)Cq{(~Rb zeym%mRI)J3f;hBH*Uw7R^9tLH0yi=9v~Z%>v(wPc)aR@)iNYk#%*+l0(<^+3&lQdt zxV{_PPT&{0-8cK4m--_+N-R4{O*bz+)3dGAjO{QsZ96HFIP}~gP1P4Vk#E^v5}1Bc z@VPw6%rvnxGf(2w4egx&U#QQyv6DJ!8kt3G`=*x$p_%%jYZg|TM^;hhc^;_G@zjAA z@%Uj;7JOgqnvw6vW??%ne`2SJ>#NWCX&IGC;G2n?MWz=znHl9?U=~j5C57eKd4A`2 z^O7$tSeQ8R9Mg-u(2P^ZHOs8@X@;ccxGxRRuY79 zndWzX*R$dz3|NiaE^X6GLe?ec#|t+K<1DeVJiGHAUY?bfZ+T|uCVb8*12geT$8@8@ zvhpnRW81p(zUp;zFJ?pdhZk9ynb<{PCW-I(xtIBwcjrB9*Da&KDNH-eSr;d^%qS^C zGcTOd_Yx~~a$9{L9|^MDx6FbSD8q%CyM+ED5Y2{@=eFWM1l)X71ar z>G^jKsu!7I&Q`dEn_J$U^~(#NBNp1G>)1SG>PKc=+Memh1-t2IzFpjTk1Tge$BB8W zJm+_vwB&P+Z5C1J=DrhpVR7eoEhk9)z|TxSvSijiy*$);m}17-Pwk#-P&b=3VL;!!oBH9U&(k8s_taA~P z1vq-ClkW)^y!mxz;?WE_4wup>LPEGzZy>_=cbj$N`~QNUJswo|I}9GVu3%<0L4I7#lj zZ$#wzk;N&Fh>tY&cs>qmmh*pmR&Kl2ojCW)(g||MGHsWm;uRbL9^E%n%MG2N2<+Ux zb54WQPcoNtN_??zfyaK>No=yJNmlrQ&5>5W8~Aa~dWNQN6Fr_!oOAMA@&?Bv%F3{e z?(7GPk~q2LkOgwyqX-I)e5fKcioz^$qd2;=A88sEUSLIL8mjosebzZ(XEGMqa$Sc- zzw^Fo8(mTp36!rQ?>Z*=wlMQ3CEC)$Dxy2*DzI4>Cv!~Ok9ZF&Da~l@=?OAq85N0_ zdSP^FdpRwWG$hC^KUW`O{}Y>UjJ+@{11oU?&+F?gTY2e3x$jW~?2xk@aF)~5GM$ja zUBsl+@T%Ec1wv4DK8iR?wm9q@-ND}JG;iZiz)r6fVX zqlQJ0P<(=*e|!5wg-X&P$1_d|R!*&J#(rj-SyA|fpBE{WQ*A4$H_g}}(~p9T&!rsW zlCQHOPEr`8Wb6N3Hb`<#9I>r|s=Izl8lY_Q^==Z!xs{R=?i^32@DqzQ;a{k*RolQ5 z^EZ4R&a(>79d9m|wo?$Bk(p;yKrf?SP$pP8fI z6;5b{c4o&`(v`Nh9Hh1#u&Q=yb3{Csw^m1~AdWfcLE?H@=jJknf`Dg}d@?oXk#d@p zR%}u>d50uS0%!bYkF!;lKKq%))7Du&nB-(*&P7=yN$m4K`(~FDkK*R#o~qUa138yE zmWO7RIYH)^S(JtKi`ueEoEBS54Y1VNjro{EY?WS0jdIEG#rVouaxXQ5h{m{IghePI^`GnBf^=$2?O$3Nr-rq<4&^+qDWLhER)X7Mo8L@ zy{Yrv(k^q?$+I{apWl9qO^iALf5v{03JcCgLOn7m1X&QUU{0Ce3Hg%PQi+X}M#-k8 zDyX1-(+&9%qNTW#LhL9fC6RO8kRb7ZVN_R-CC;qWiIX3!c+=Vnzn$6^3l)}tBhMkQ z$OjJ3V8?0hIboQ^-M$SXlrJN>q&^$!*&&;h*z8$YvO2a^WI@zf**?p`2;-O{lUUR+ z5@lj0q+`>v!6&616u$4OcOXcdG9iDvq()95H8xHn%3+wsVUj0S6lYzzF|eGcj^}~X zQbL!l;v-R5L?+dmHDN2fGVWVkPNLEYOKKKJgrALbj<-cvQH{V@s=oM~A)DMAhXr4j zcCMZc_|23MHhrI?6-QC(r~RwT89yBqL|#H5vAXPiO3vdS0lRLeg=>$m^!zvtS&+oE z@(cP*!OsB$KqjdQK`(GPvMZjG@YWnzH_TaDm*pabrl#*%b{>LgPZdD*u;=lUTu}0CBRcK{B;dv8MiUA~mftWbFy<@XXF~RuB$B zNV0Vm`ijXc;37lAnZ zEa0n(&SZ;7;o-t;>OE~YcEJOQAN6hbczhe2ODhmfS!YX5WaRRjZctiIRwkwE^L94H zIwzHaqR63MNX$hAF-IMxd}fXxMHkL32W6I$M{>6!e z<{&S;jDH;^?Ml@5LNftsN4E z9AB7|KLGyeI~c{FZK~u9bFB#aajiow_{hDp#7^Z+$Fjl#Wr;0;91cXg3pH>t^#Q{$I zEK3}p{OQZGRP3q0i2|QMO;Xn(X#_E~lOz+QR!U~KVvA+i`n@Pl_)?xGqjRPrKEK)w zBM^t}$u;d9+wT<-2QV&3l>i&^Jcu)O`W7cVPF_K30Ad7vy&2Vzsn#sWc}21>O+;~f zYhr?@;-zfPoaa)Mqm&}0(nHUV*Anb)J4a(JeK7o-FL#?vwjICvb+EGt=~@#d*G zD>$kofVd>Pc;VVC`HMa$FF-&pK?8yrjE8NvG_ADnA)K)qr{TW>0f<5)IUq$ zQNMBknd4>CmnpER>UWZodT+1)QN`}x=w(hqkk};HweJzR9@v6Z5P`~zaC~K% zdQ}!NZA~7yY(+^Oi&D~*MItU@x;zf`!hDtkNL|PV8g9;7?0K|lY+adJ@nz>LugS` zJN29kSDN$}5KQs?UpV4mxIm{8ya&DmhM+0<2F1$Hc-j`-M^+d&!mxe0IQq&DCk8UK3Qv2Ce7HnKv_AfSKD4{f3n|%gWj)X>Aky6wk{j4l5XhDi+FP@I+8cSLmGlNa z73qf5Awnk$N_o@nN;cl?WprT?=!6znbUB;S9mX*zCj}F5j5xD)fA2J8C3uZIWm7JB z`6Q8R!x{~>EDY;3OGz>?yG&u@NRi8foU-q+oTN2KO#qI|lhTMf)0-7n&PdaxISxx_ zZFYu6DFVF100D7mIMXP)e)UNzHc&4F)&u}a2t1-S2)FjM?UJ??ER+}5y8NDFN91Xz zK3AJfIV<4p+zd0HI%pL@aChlOj}%Ow1S8|w#Bw*~{6;?9s<-yt2DKvf+1p-{6b^I= zL|!K6gku{CS&^QLn!;}tssCO*%GkdE08v^%7cY>xn$bVf0`@}2Md^z-dqh;h7Ezi! zxgqpklxqiqj(kz3DdoCMZ_V{=dJ-4j&?FC@8;}dNN+cW-fEXFTiV8pK--vu#ptuC` zM$iVPxAfjq5Vi0|)F0lO1d-Rb2bWXI9GDo0WOG{KyQt?8Jg)~|n}!*WQ})mHtSBQh zQK_?X{iYW9{@S9&CeZ}2`7z+Uzt{7F5TLI}(Wrhyq&`}sAe!>|p+@0DZAHe;Xp{UN z$DSplnylzuwv&oZW9DoyC`;<88x#@ynsd*m5K74jAZm;sE)$+>XD%F+V- z3BMR+MKE=~29k_B@6uUb^6f9wDnWm5*~`leVlOB2vs6$=G|L|3DSww>iijug*5ww2 zasgoWg4|!tYxy-}^7fI6C65iUuHZOp={xndChr|;r$jUF6Q=7cIwiYI zWI!ODEbhcRj1d>DKwPHdr-9t3k#H0g@hkB*nPJ5#pq!xlE0;Sxc7} zh{CP&U3$7SBTWS5rR|BBeRE^P26mk6R>}Hauqd?zt5w>rlZMafr_zF6G2q>@9B#~u zP(|VOtGZ9z+Vv0CnjxJ#$fEO7n1qMEe8Q14{Y1GU9SOR%e7Bw2sLtZ_Y71jy9VHZQB`Tu4l`qQWFkjh z$=p03#UO)K`MTR0J(KTR2?Yt&!0O-bf=B3~VQV6J{lKIba>n_2w6=iS9y(K2rATh~ zKya4F#z1Gv_6R>n*A03G`+^gJczI&9Ijg?>cj{kaZvE>ls|i<@(i)M=sg!hpnfqch z@+l}$M1PUfrI?<4Sx&_zR3dZ^kb`!~k8bS{sU^KL1UQUBu>P4IeK1@lGLY+UIU20( z_$9p7lI3N=7{a_*tHBk?6e(%M(PN?T7pvY&{6LL1~G-R7M7yU~sy+ zU%8YLI>9)x5BYDNyqMe6Qk@%Q*R|PjcJjT{XMxN)xwkIUDZ5Yy3yt^!8alM z$(OVSEGRE{Y2&m_CkT?KsM1KWFK#OTez+1WMNKD*L2TZ$x2t8*`2(GgTd|Sm!saxPH5Smgs|$B{O+J?k9Y|9~4t_4WKTfoxCW(r}l7@ z#)^-H$s_GJjw#q>>i5$D(dj0@ax=UTQFcEy6e;0^wt`j;HqJ`w*R(E7H%t-d*AlE`c%x=NB8t(aqlT!l_pTBIpiuArg*`k;BjV% zjhIO6>s>+=Hv={;hUS6mV|i0qAE3j@VR(~kr!5qZm?K47O49<2NXIT}r^&wbxDe~~ zjZ`KGCE2OfAf9{~*{6f_?Rfm=lQHc?2IprYeDMD4CyjP2m-e>p2-ciF(C=S`6bQu{ zQuuOIjucJMwa;i3DnqdHXI^$(^4c>GJytu-FOh@tez} zMY1)N3`m2(M2G>SA}H|3VA&94?iANrU|O0dc#@yqC);~xUi6)7nOt*4K}S_9E`@X#}$D-vxjV``%q+#X`BghX^i6MR^|6(*`fj zcj+HEBvc-o?7l_Q5XwxeTlxsdP>g|dY_dxu21W;#<#J(EuGed;_iyN>(hcPchpItXzdYK6BTC}PT6?+)<6Tn6 zKEB#bF${$CN+G0T{mMX<6Y${vhKnjn^Zql93sD1+0y3uWkEODAhNv^yZk`{s764+B z+?XAyA)=-7C(?o;g(mfY{oOKU&C!l>Z~fgBtAAB@3ivvCbhPEC$gZkn-q=qZQYDZp zh@>PvHs2#J1#X61LDxA~t2p!lkun_kDP5@xp>BEovszO6@~Yi*3A2MFk&lm7!nGF^ zAr%)HvP+K1pqQyrF`jCWT#+!hv}$CpeX&x?m0e9pA?y^MadY?C+_8N)Eb<-8!6Sh` zQ@K{}IVE&#f*&L->y?ov_5}p}Lgq$kV9q*}QUqZB5}XzR$XQIM&v!}uSRj<-e&I$X z=&aBTkih8N9R~(Hdhuq(myHARv0YUP(p8--N&!m&LZyp>>IVo#ImxdY=f=19Y7|%% zLk`k%3{r=kgRsH@6D7usR!t*W-Yeggr4zGWnZh>^6X0tr0dZyUv9~l#8%m2OdP0P9 z*=bWHVf;9~NW^?hzNyi_wCGyM-hkZq?IB%U+cT;B9Voyq@|}}Y1Y@Jp&`~D?YyJ6# z4J$`J8r)mMdy@lnYd%Wr+7siGO&e7Z!4y!^H`{>p2mi_2^e!zd4Rb;Lx`Y zv#;~7X8|s(wo*#5Gh~tH(l#0wwKjN@E(H=muJ4yk<1~FfP6i?kbsS|4s&jn42VYsn zoKQIDOS3Z$Xd@xvqO1ETo~Tc>R6QP!N5Wf#T5?`)>7*;L8ylYx$>e(D#y|@;5*Zaq z=65yq(8a)7djh5ej0rHMMJ#SF1DK)TkX9jbDeFiiJT-PCMYyCsf>mPoZ#OxcGz+P` zz0t)eIAkGn0gwl15 zYaMV=3G8`J(}2k`*CuD(9@b!frD!xcHb`XQt;_P3Rujd6<6%?n8aC|~;9z`D3t`y} z-5?*kva5|-0bLk1s{YEps^^1_YVca7{DL&IR5?hZVn&CQa~yJ`W~JfQW|e|GL#e_} zL-x-D-?lHUlF%_mp!PtC+CIWuC|@0(K!S#&rLdx6ov|vm{A;I69yICHsr>R+?J6&R ztr} z=n*>320fx7T?STM86koH{&4-A%m5St7j(yVQUW_5K{I3HK(|1ehrGo$CfA26-qvN* zEFKJ7A)GSQm}Okvr=eiVUn=e_SLqW5nUnyZM3VBc{2_9e*PD%mnjaU*uk?^%Vf1+# z62*+x=6D9kD5d+{GCCivMCQnc5A$a7kNe=JDQuME5WIs$&A2^GZwiug*a=eZ%<$Sr zPsKnMyZ|w3A9|A&)_a4El-7@?|0e;3yXr5E$-mWlxuSxR)WRr+BlO8d97@6`W&JhU zIO4S$n{wYlmgK_F*mAQ6U(PS1hs5^Mvi#HGN!k;PlpHQv&5$T}3}p^O40@Xe53NQy`t>?}1NzX?U$FRp2@*)npVL5hQ* zV`b!co&1TPFiY|MxNdIOR|k6_ql9>+tFYwiP&Ejo-NLd5tLTEPX#e7nyhuL{aOQ72 z{CS5$L?jY9@E1Ns2th73i1Q!UU>s=SPV8EnZ6L|fOv)?!T7gA}39Iq!f~rr5+tR7Y z?=+EDHwACem~G7HIisEn!w!^OwSWWm-PB^!{MC=LyzXLa^gocAtQVRSCl;ly~F zTVoBHX4LAfJrD>WRvX)mbA4WiI4A=7%Uv}nf>ajvoCbVy%u(g3LuibC;$Aq{BfJ2= z?3<6TL@&tEk^E6pPce67OFvO5jjKaFyGW4_;A=^E(__e}bdDJW3LGvlZnP48enoeS z0%C4^FAT#8S%1eV)i5HoXC;(5L`Y#=In;z-f*ZADv7v#NHwoR89({Sj(6n z(s>Er+3Hjh`C6mpc|0)(O?o4(EX(lEl$k-B3F@|S0$K92P0^cGde0k2Cx{m>K)=K* zWgF*5b=WyD+V`6^ii0iD0OH784RXg;E*ccN6H}M*XW@Ia>GqJrWqVMr9FpJa_K@-L zX4x!(WKLjj?Ns6^98FZA5GD*D;NpklfPl6j&46fjK$Vg=O=#FGdk7;CF4Q`j>(>p?!Q(yy2u{7B8O2J=DU7RQ^3R8w98QF=Otn=3@GIs2 zL9+Qi0A}jK04fZ8OJ2&S4z_7@@i~AA>L1uu2|NFD=|4Po1Sq^hm=8ZgE7m2T#!urx>W{vRQ1xT6*o|Vgn#Xf|DQGU-9abUBc|w2ju>C zH*_(BP&usP(gt-r07$`@l;bxi@b^+X5f5{+{Cjncy7cm9uf$S>U`1lFmm;buDf?tc z)#Fbdttl_4F@KE49@z@zAHzN0QQsbE#+Vy{ee&3@8s_=**@Z5d$AX;09S8T^#6|+6 zkZygY3)U;aX;fSqiynL!JvdMXX>4&&{%W+6uD^}^*35&@jZ2G8vr?}8W`l^K*&tJ^ ztox=3ML?FyjLivi?B}9J$zuC<=Xp4}3G5WKy9uoU9n{MjO=l7rvyCoLjhaN~Rn(XF z&(d;3iD0@4(bbV400@EInb&BAlb>cYHhLc2r;W^pZ40=O$vX#XxI5Apn}kRRNA{_M zY*z_e&_bV$pb5-Qya(%vu$^cSJsmACCbdHeHbYEyQbOWGPzkaAAe|T6^eclu82A&l&Tq4^JqWTKgot(427Cf7{Hka7eXmIm4MKGv znVZF796~RWcyYuJ*T!olz9d~+D%gabUy${WZCTD(33yXB?E>`d@ zU3uzYHFv%VNI-zFd~{OzwVGV)G=K!?V+e>u$wMbK_T`q<3o!-JWWXl}FtJm2*XSa= zDXr-eGBh!CuJl!;ZtRJOKS8)wxxR~d#`|cqQA)r-2B6*mIUw4o^$XgJ#~UewKX3@d zg^cYK2w2S`#-+~#l$vY|>U8p;hHuupAS;^voLLm3cetfYW$3T|+FRRHA>~)3AO6-@ zn|uI^OjH`HjfRbA5;^koy8w9}kX9C26JTa?x*d|yGGlpHGlj{)+FlIl+j3me0LT_!Mdh+%K{T9NTRxJv7)j(4XF+gC9aR}*&ac$D{y186FslnLD z-xxG-76ry*-$UQHJc?G01x)_)a7`JlWcesX5oZz2A{$>7jHaLu z#eK7~E2<{8VsSovtienPQeb^!uwt+OkZjhdltTk&K`wME1$hAm5{p)Vi{H@3ds&Ei z19vlO0xF-zRwx`Ye7sHM@C#VuuCFxUzR=O+4;$0kR(gtfq4UUSq4C?KwtIbYf?nG# zavJf7wsUr{Z@Wo9Muv>U*(Vi#tq3Q>JtuH5SWTb;kGk+ z2fFAsn47`M&enx0TiMh(>I@oS?_*D(uMK^G6)M1M@)yS_)i=i)1UnE7?AQjJ!}$h& zYG~~qJ|Cnoctk&v-eC&f#|C<8<^=;#jz%f(**|U75Qa$#HBB+qbH)r+UJ+n6?Fa^< z02o#n3&SYyNYy1oLt%4B8bkwlPd?D>kvriup*s9x{c00)Dia5EzjCfg(64kkxjlXk z>pksGp-clQM^3`r#Q=2$g|)^D~+ z{WK2nfa*v(v_2SL@a`3|0$ZLe0Amp6m?KH)qRAEg+fapnQP)wdIrHzefi%DeqB6u} zN>7L}A_$+KrIx)oq(denle{txdqt0i#{-uEmR;I!sTYVon2=U`AH_qT!e=qC;@9}`P`Kn>S-k3-rZy-DOJ_s z@m3ABAd_LQ34LrMtQ(cXkmMPln{@8U8;6>Zyp)(+eyPze9OOpMr)#W?^{6Tdv?ON| zF8v_qcXL#h2f7SH*%UjrdF=eY2J$k5p#(z>h*WwEmQ%j64+bAaFO$hVotvz{Qk|>? zclFNWRWFwtU<7Dq+9!xpsJW7~JPDZxK%hdG3`fw*CtG*3zJFlojK)wCzTj5K4PE3Q zh9O{Dlln!U_fqT;Pjs>}=su=zj| zja{zk(2=Yi6u089akSjtGeOv)+e4~jNIYQGTF7*$%}_*l_hjxkzqWj+4K7#Xl2#=D zZ%{wCOTIgbDQ%pgxr(i?NOr)O^l;hf3x~?dsxlo2{weSfES@5 zg!hCB4?(mgxFt^HgU6N5eMGMBVLd8?)zFpKH{q$MCUdnGoh;%ggtFaT{{-evY76iX z*)vpD_prFAlLNhU=}MKdY@H;y=k+$@caU=dCZ-4Z*Zr$apRZu^l;q zhT8zgl=-ZPSsEQJGR^8=*i}RBGv#ylQj$;;R&_PE=p5KKb##9}k)O4jrKjd3?9QaT zquvFhqGSORTzdF6SS)AasW9vBb~MlP*ETqs(Bu|DEtTCgo{f;%9g63^BbB25`4JA_ z>jxld+VpBT=y8y~NS9BQjFQnR^3%#t6CW#47|Z;LcAik^9U^vmhy8g{Q~C9xFBk8h zf$?W~sSog#FdV+0TE^<$2Gj~-th``Wsl}^i6^V0RhlPRvE8wQ!PW|zwhR=QNe<{Y& z;%GILumjQep79n{29rqaU=(CgfOW{!p1q|gxEY{79w4{|27H)6OAAz1MpP&Yhxb;a zxKF)!@302nid~?^j3i<1Gi%d~h``(m>hEu8ifx?M@r%eO2QjLsp(T_S2nY+BB3U>E zm9tHIkvsLR5EQJ4)GGsx6wA9=N=3({g4P#bSZ+XwDTB;ceSUCqsOkrGipYjjD;Z^} zf9@`Ri@_-iGZs4@%GI$MAKDl9H4Fyn4MQ95q30q!#4MU63!IZ%L#*fKD7GUo8Pl7< z6RZc}^OYWO*MXTU6q@o}$jr6J+a+psal;z+H4l$DPkkY5}U{pe}39{Ma| z4T2s{Z?K_pxjn&b0?juEzl3f;Jam1EqiXgMU9rS6!jO(P;gjX>qN?f0Z=Xb6_0p}m zs!Pg>8jPtS>AbiCxMVJnu@P)qMYd&4;!0s45nPO8pK)m`B7~ZlIB|)^>raVg@OC)# z0d^QneEK8m)N?l)tOHZl@@qXZ;l_cM@=`|pWw|ikqV2{l6Ww7YA&wo(g&TvFn)486 zY4+L^5T4+0@O2bL&t2&iw1l+RYUn6gV3MPARfjdiQ2*C^`1W5pP~66L#s7AAP~JS6 zyhAPwSAaP`X%49k!1B~>#W$!p-CB^_c|oVtu9r(J)d*3MAI5XMq~U#pTK(dbzE!c^Wl zs_6UG@GP!uwiL{qpfkfBMATAve_<@qMQ4(fy7p@(q*^NPAF0^&a{~>eyvWVumjhKw`LjdTnE6w@xn=+s4))-K~afz|JAiGmnD& z7Q;rIVX_4MIF`M=D)47L#mqD%)QG-I zEgU2C+-_Fitm$4x7bM>usIpR<`zQ*lrW{uuTT}?FJN_5|v5;RKrE^BUqI_Iw!q{t6 zZh7xLO{UeL05fBLw_X5!Q`xm!v!MacP0HUKkI)@VoDxV2J{~)NdIx2B=|GbZ23_-W z2vu7@O{IT`;W1NV{@Bp%t^KtEqpCdBt$HSseTwjCN@W0%{l^biZR`3^|BBs9{ZKaw;(J$^Yme zh_Fx%^|d`L;Km3YCKRx|;OdY%tj_k{7Fz*A%L-aEJO^^|Zbf@NNJW6qLv8v$k4){5 zI_S-PDUZkxq8h}YYQ`M6h=JFVANH!Aq9YrO{m~#Qsu3%*t}{+)CZg}^GOOj6E`(fy z2?R8V91ERb%e^9>*>wt34grL;gwKZkNQ}is3wxZ8l`E6Ihg;OcoYayCDh`Yl@HYt& zalbhShYhg6sJ(PAsr(g>ZTKq>^FE$KP$_NU_(~HmjLiw}Tt#aH+`9hr8$Hq>=K<%K z@8Zuyw7^X8(m{L&TXPy2lH#GWfvAZ4j@2+HrS;}+`Wt`_V}cAj%I^%&T=vaQFw22A z2-h&9i(}_c%E)1jxuKu4kG0a;=uYn_m*xuTw;in`}@oGMFN;!M{5S_1S_35xHFj!~L;CtA zdpmbu8NC%=I>=a$A=|;RRbcP z35c=nkuKgW$R|cd5_$KC{J{Nm2qC0`o7r$Su%iJ4hjf+ggzlcJ8OAn`N4WYi7T|sI zm7^^+=8OiHsW8SXFCW*KzLWVb!X?=A>voIg9x>1@2G9&au}m;B*ht~hC|F4S)UKt^ zHcQA0V9~SfE}VFd{RN4;^BD~vRsFbP{zgo?joajRal|0Q z0Re4aexW_SLT2inf)IA(*BbOz+<^k9@Z`0;ErKAlD8TB%l?K)v3W#bmXpUycAk<%G zj!?SjWUz)n=GIuJM7GDOrdz+UOA8WvNoR#tdo5s2USK7fzP#4so46&UloOLx(7#S? zaAfsB8?KMROlA?sugda04LnmYNA~9skTfj4pNGcEz3ku72l>1gMKXi$18ZTs{_FcEpnmBJVasr-8rxh&nQ~z#NYCqm*puaDN?1C-ebDhIK(kk8#G`hNc8 z@@U^DTTEU-IMH+cw=Qn>T$b95I#kPS>%4{sRxTdFtFHzOzdx!oQIR=)z54Sb8dVxR zA~SpBZ|;LW>4H-5+Nz7D24-t9N~ydis8Uuo+p?=GCl2ZKR>*}Pg<#9h%+)*$O7a(@ zIP{rPmj%CyKlS>awnY#@M$lKn;xPVX;gKMwa2-uP*gyhdx>xqQEgCe&mpsSNK|&5h zwDcWn()nXfw4^d33wQU_zzvO0O!t7FI-yM>j49{xLQnOVS6%sN*d-(2CkwE78_S|` zM~wX0LEaV$Z)-=kdq}T^yyJ*`=e`;lo|DmlM{-cC{!;4tLsgQ%+nou_AJF;8pMbfs z=8!`9Z?j~{?J?7W;($sBJp2f9Gh;%G@r5s5)v%4rd)qbcLZJ(pf`gZt)1t)?E+u&w zp1@cRbnb<)9QlN+y7S2)tw2xz-eLJMqeWkdfdy%_3A921v1op(u_X&<=qhIJZ(<<> zucJEkH0rK#yH&%l!~9-T!@v(Nmd_uqqy~tF{FKlAo}|@Nk^>@RpJ0~ z&S(M&PTe3Dl}Cx^**>YUHtj@7V(wojGH zeRX&c#B0pi49UIJ;Qb9F`#LeKayJsR5#|6b zvfcITHJp#}+BR!7`)VHhc)f*E1S#RfV;6^Yb)|$QzdgzyfBXRry@8@hqXqq0GPy`v z|NPA!mt8TFkNq9H0!YH5O@mi~Ky^k9vF#6otTZ7_VQe3COgA4^hZQ7~~ZMFXrXy4@sL z@d|t$?kqlKOrDVUL;W*DN?UN@J`Gk7LPI`-LB76qSOfnJtTqE?54lHT>8I8^Ei?efx>XJ1rDfbTT>b!dWr)pDEeq-|3r1-C zw6V5Gr;39S$W#zJTr!0VKEJfu0BkcID!UF=a*Sh5<+?(r3h8jDbfk!8J2OLDd0@59><5oBhby&?>h9(THN9b49USRwQ-;~de0WwK_&$g+b2qpEu zH&pD9qb@(trM|&faA8CtXU-6VJubUIkSAY2#=im+1yPDsN^zN+ZjIj4I&)bbgCh0B zR;0&woAR&;lAC-D8qA$5x8jT`n=Zfw3nm;;QP+;kJqNpR8(ce-(&~wKcO^0|c61+S zj4i=#7!MR}oty&pn1781j+8fdYgh*oTB8OfpZR=*Cw{KdpyFd2`m%H}|Kp^kJ5Mj1 zgYv_#S^uEHvFM%RAIk%Zx8A9>s0~=IqS9#BHCl1`?MOKWsc5?5_^C4w_i$^oQp)u1 zULp%%>SDwgGf~Vjo8Uq-G&o#*1&3jBmj=mcR7M(%KQIfi_F+KI%>6Yr{#f7Khd2d1 z!;|L)1KT>tls0u`jP!*J^Kr6M-}ky)tjM(?jNFFLUaZf)Mnm(GE7MKJM{uQ^^3f(v zrRS$f^WNHDUx<2m(2%N8Gchq@1bW+UF6Mv@3%FjXz~{hB-SmzcW`OBKI1898C7yQ- z`N_$iawmZ~m;@?~xlw#+>)+7vF)07EO}wwp*1yuGP`+apB8C{Nqv58I3(&717B_?7^s$2Rs}z8*jaIDsi9_;MT$r%PRmkZcaQ;jUXJ+~w(EG5W$v|*e zbl&)?CZeFa8;IK10)@l5V1HleQ7sE}w&>1;@E}L=m+yr~qtd|nBl<9&%6$Oq-{2Qs zXc+oq?1b{0!7M@5kr4@UqcSCLIn<0DH56wfvcTvOp3)!cXeS3D{KQhk%sQ%>{O;X=LvFx0tw0mA1tfC3KQyYDcji3# z_C2E-qu=t^XXF#RW#w4%4xD(1(|FGiG4avLZwxfKlZw!_w>F#fLV zG^v_m{_X>SD}F#OkE@}Ww9{lb7foLNU2|;mK^EhY5sebY@EM8Z@_41_`yLVY>4{c? z6Bqw@QOv;ay>YPO_ifP;R?~cIlM!I1Sa>PrS{j>lW!DUNl{veB>5H0j`pEa8!|I!l zG`K?y0Sn)Wy!J?q76`wej=PTx_x35BAb=3td7bX(OxPQ}L3!n9n|_Y1VY+f^M}r#| z?Igk)!&{{BGBC85nnoK0MAzEH-p$vbR6X9}QV^4o8F+fGRk70Pon8d13%d zo^NmP@h>-d%ZtPEk0Bikei^PeDKintIIm0!jO{$$ zLyure7vePbnM$gZTXRiy-z_c#`9A{Q@U1$EE{X!L{k0KQ=@%ZQ(X(ajNsYWBU)OZc z;dU#kAcNRp+Ie1Q1h+7il%2~@9Hr)RwFQ>Vq@Z)ZV`%EUi^jz_+uUDBtD~&Ie?Ei+f8jP{N9v^3D-X5Q zji5*#uKe154S(hMJY54*6BhTpEUhZ2P&}!oHde^>T#&>4L81M!rr9*czd6(dVslZd zS-&ArejNHXm%j1b%!wHKel=qG@Ap)TNXk8oz=2%2c_924c`$qq^{9hs78fdkr z#z2LDP8zbgxJ}m@V=}SQ=Qjva?E0cTu3*QNX&!lJpND`>G362&PMMWMsxmy!j2`0{ zoa<;nO)Xw|tPXdB!v%VaIq2C2NqcUltFF^yKuxKfAG@cTV%`YbR8vMLJ-X|-2Yv{^3btkpC{FjLV7V)Ja`O=6+vZJOjyyh}85v{#u z%F-E_#hm;633AKQGKZC1c$j!Y2>6c=P$ihCsxJ&zlWhIJak#h6)W0CIZx3(xh}NPz zq0dijL#ru&DNci>@AFD)m6|e=sR0C*N7)*{h+K4`M%%wM#ULQs43|{#op4aJ*|)}Y z98H*%zW8{5uZiME3UTBwUM2I#Dpue3sx)iAbeLCjf279M16FZuf&#>)1V|GgEV+2P z;KnpLq_@-^0MG(4T1Epp2Ub}=Z1NXZhM4Z<5~a>VxZ?PE-<$f^;v&-#+`DTtY=xOu zACOBASEHzu$iZAQ+1MoA56vTT$#eVTB$N8?| zPR4ZbV#u!o0G;BS@^=jq<>|p&=TWD)lUI4K$zYo&yFMf=23fvCKKhiraHe7z=N_tI z^(*9}EngZVAaCD4NhtxLs=LOx9+P{-0;sGWmvWM;aQ5c;)@2!bh?kxol2_L?Ongf^ z3BEz&&jgSB4{f3#3wR|lhGjzD(@qjvSO~HnZa(S?Li^2w6?^GM6MYEQAfNiZ7Lpj= z%shDZ8s-XGQt5cr+)@p3Wr_hCuO9Xq+~Lfu-k3hn&2fhXJ{8-lpKHo(8bVqV*l$-3 zJq1Zx7z>>SiCndn=Yop2w9GbW`$&krS950o_sM{$Q646t=N;r*X+B6ph~JM=KuIGdvNU?b33Tw;8dlA zPVQFM?FrX0K#PLCxcEYkxt0Y5qe3IWggpIZsyQ#Oigq+I@(P?FUKW-wq`T8nuK&?h5dH5vvTG24|xUG;| zDoGVEX>FgQ!w0a6$y;96gx$`0P`u>mSKK&B!07dbBY-lwb~np?+hPJZ^s=e9PG9bE z!z|aEpyR9ApFcB^K}kO$&2?{O*w|z329zX0tp)oO133k>7g* z40htxFg5Jk^3nI)^FV(AQYHS)! z5;&GbTeV-0Jc`SkF)sP^Kua0UQ9*s<8lztr@+r=|nU9m6URU}XIybD*k-dDRlD@2Q zE`Ip|MVLlUviHDcdN}!$6FN+VUCOtoXlAr==8tgNU`KnLou6rGgs)Vd29YwjOTWnF zY;kT6IG~fU!2k#FH8ABNcybP{(`7wwWV38hmO>_Zir; zSezkFO5zo_G@5uW2itlShHLAcXA#pdUE>F)wT%OAwJn^uuGMJo)Xko#>^Y+ppXH3gXp{>vxZ;K1arG_=1UW|SL0DUHVD zu^Pb1udh6agYWOF%;@U_ZG1*7C^sm`r|U2a?fX%uxyTi6So$U^wlVv#d}X+Ye+bSb z$2yf6+*;s5R^kw;Oc7<3Qyhf$99C`~f%@Ta*H>coZ?qYqMyNx5lV9D7LPrYey|#@v zpGg!P!P0%CxJ@g}e2UCIONi_P|Dq8wDFT?6qoDmr22aq~beXZ`E7kSGa^xguY3v@d z|M(d!cZmF!G*1{6Q<)teq55CJ^@n8ga4iFV8#i`w`5kILbOn|urIxd-1lI5a4YVKTs^!m)vMWvc zGul^%yObxq683F#RS6Dpe`&S`tIDj%U>83wck-aMF!kGzm+otEI|WJ!spqK^9Q7~V zJ-tce07J!pwZN236ItA5hO4IhBJ$Zs8$eG)eBf6*jt6#yjX|f`Z(r!oJBdntQ!0ETJiM1P_g5ZXS&i9N`DI~V3wI@;78xDeAYsSJ z9Q;vkS&&~f7`(j~WRsz8^=KV&k}~lC4+PP$V<L%v{m8bC>5q4x0ZV$q&ui;+9xS~4Ry7F>-wn->iFd;HJK#Iks zhObRy5Kb0PflU~(LdS5#>vHuIV=qu!?fdIr8{zT2s?#&nj#=#0# z%<9ZrlERQw0%2JK3 z%3@BH#2$zJZ2}uInk(PIJISZ-9>0mXTY>Q4^;O6sT!Bc9-%j!8ssD&bOgUz3;<h5r4L}br*;#4!=ucUfei-%N{Aim=2pfe z?WBKW0>?S?*4(DSm9|PBjpVKwmHYO;zExtM#0%kF&^180eC&7)O9?FmcRCfy8Virh z6rj$vhn9~$1^S3FjQZam=G0z2Coj-GgrWS#gDrqKuG=y(u63H+_k;Dqk=duQO*hny z$QE&Cwz#8=7LGxW_MX0PGJ{+u7o~DQ$92kC{nL7v8Xc?babYdWOAa$1wEm} zU{h|=kXD^EBl_Qx|uP#TebDyH+7~XU8iqDe&7ffkI~7htHp6gVli{;mkq|d z(FBlpIYXbO?@4~3OYcE;AOYv{xv=VoTyiPLgBnF9Q&WP*QLAgoUskq zb8hoQ?_pR)#^-RxFvT9prNJ8PC&g%Y)v*Dz4KNa$!W<3O9w=IQCF%Q6*Tj&HRToxy ztZntU3mGpIif4dd>jYMAYVfz-#C>eoH>s#h)D&&5dZnG>PJJ`0U*0F*+*5<8_vI@O z(9R&7aPzX8+-_1h5Rx<5c?Q5aJ1BqKlY;}yVXA99lfG|N?7kiq_h0tXLU2aIzOTtw zTN=WA@~2#67|KtZppwZUImes_|6K<-VtM~zScIM+3)Tw*E9A9nY@XCHvPL+4v=FcG?MF2vfT=MJ{#BL(dB#PMslR+e zr;j5rzWQhtm)K6J@n3L~d#9_=tsmdby^H5hHo13=|G)qq#7SA6m#-?j#~U7ypLwvx z#Cdq&cBcWmq!^Jo?hgZ%aeCUS)>V3Rb`bF2a4Rk{vwS3~!b%1IhU$e92ejCHhU;O? zeElh^^Amdq7T3SRNj2)IM{;67^`Gfg^3FV^jEOBcqsNCM*v=t&S5j=Hx2S1D^}c9=up>Q%gRT&&>8 z!u>PQ5&WkXpbWXBksECo2`TDVcCmv;-$YJ-7-27ydmrPu+u8&-nIeOt4=~whOm=%P ztl(6V9Ucwq5zIFimaAZZJ@mT_*aXJ*U=@;R)@4MC|1god7UJgyVNhf_YsZ-Q2we|H z&SBhFr5t^Y47`SWM%d?v#^jS+T>`U-0{9>9xxPYU=E;@eK$b1`$t zN4P-nQUz|Fs~5NcNA9}|El_r5h|m3QjiXK<%v3#eF%oPcf45uCJq5Rc>H~)v$p?0| zu+PK4G2(shDIFXE8E=*R?a%-Jw7q$FlXcqnol8Z9GkcR9FRv<`O z%c8QQLfQtYwxz7%1{B#`2NlI_lnP~W8*WFDaonmPAgH*YY$`=SP*B{_ah&(_W1i=I z-{&~y{pWq|=f00))HY3W{g!k2p5OC(PDwrCz@d`42Bp)E{y-{D_XL?1+4j~FHtr|N zNH{mUjq(e;B>-|-#38r>B*4rB=%+xPRN*OlE>74w2*q%HWmB%v=W`#S$%o9FAXDzY zOrKtIzNUOuF*cE;qe+i0I6mV0q10VXbRf#s!TmueQ48U1Ks2$o(OCY3%Z+3bQ(Od; z#3HF-`sQlH=>V>Yg~2lFec?D}vRrjW($Yd}V?Zv9*eJ45Ly%ZE2oq`y0X$daz@W@e z3vXJ(9c7~?53?wNPKg#dDeRNjL2FIywIP;>RIogD9c9@GkRViZ*!0Wj)JLDsM{t=+ zJBuV#olNd+!vUk7;+if)t|AmP#cAs6Mx#nSqKPKttRy*%HI9%7nsCF&5}>H<6BMwA}FbdoPz6=$-+eK(InF#qP+p(JmFIw0d`;tn5x@uQFyo)60ok z4YWnH84-%3#4oS(FvMMs^L@rIbOWV=Ha8PK2gi|yr%38`LTv&;zvOti+ATgK#Ct|2n9;(J_#;1HIhekLTVyO}dZ672lj^pFra&At97b$glL#QRpN*DjHg5qqn zp5snA1GdT|2_uowNfUrjP$T^u*seqX&HtrcA7=3h$3P`T%VnNb@WE+qV{XXS* z?96N@oul+F*Q>nAY@Al#b^%`={1p-_ezP2kEDCJcIO=mWvfDRf8V6db;?n?PZ8TKs zLrkp8hzTam*?rXRF*(aMrf8`(AQoLNe+R!I=MQ?e(yKST8j@TA50$|Mg%2NGREfGYJ@(~ zB`oJqS&^MxRk8jb-CV_Z+O^rFQj|Uzx`E6IpivSRkkp-pL^Mt{d4ui{$lEsu`6)GY zL@!_nOUDo>VN{u?(b+}{p^pyOz*1Np`LmxLqVv0+ z8xyOBdr2?@!7xmX1#XGbzC-<$9%pJ+^s1^--&J-CU6Lt1nqaNreNW5Z3=V}P_Gjf+ zOa%+&p1(RMMEM5jhK3Qy>XeJxij9Ff`({5{!c4)C-B+H+z|1Bbxzxo>U`FzLv2+lb z*r(=L5qlyWnpdERjw>%SG43PzJ%=tmmN3qc7Ipx|W74Y;Q z>_F5C*ItK5<_n#3lPVbb=1!Ap?u0c3kBx#xJRHrR5ci;j58exLo@JuA4cr7QL(R`G ze?`Lq^cRsu2c>_47MOFrh~rFG!VM-!rQsCrCvV&*PFfnvDFkCiY!r{+biw74Se=zR zA5Uedw9$`81fF_OHr`|*L#Bmiin@nH8%~)q9d;IKfN%(V8-lcha7?FEDO!C}{$6Tf z^~3U9HvjQKeW8z`Ss?qUHI~q$gR(kx!jw)`kFKDpS3@z?U+|-bNd=fsd)=yZgQ6Ws zRQkZVjQ|nXnQSDK^lob_Az=jIVw5nh5`3T1P*Ci()I@0?2uM=~9xhhj@d2Ui$x?!G z7U!8F6S^xh9e<$T*U!q5k?Z~zH-VDp&+lQarDQivmX)af)~!}$hxybE3R|8u7Z3pd zc9t6xl~V!g!h3l}SkB>;eQGzQWJ<-D_>=FWc~b;zcWCowt*jGc8T3=Nkxs%4{);poD0559HMN-120Pk~ld8 zyavjAs>L%dt`7G z$Olm?>R?Q@sAGZ_RrhidRp|AqVTYa+Gf|hm`DzRNbH6;APOFDf=tcKW4TOYtx~#$k zfm&AP7Gmj{GzGuljX8Yhn8OO&0tRb^hhsp`4J9dSgf?=#TG=QTy}lR$btf`)=Vv0u zWyC-WTvAmeFoH`DcG{rAkvwi%18ys|s{8t}I3PJ_p4?}W-)!ad2vAdIgc+oGhYHz} zqakOucZGgrj8@9japh1U(N6)gi`Ji7RLCSe-Vtc$vn$kT4aoPt5@2%>P^f32P6v8z zC7eQtGz{R2A#H_uc|_$#&$vy~xVQZR7D^?x?d9}EDdOFPp31RLpDGt~&uAP)$srhO zJzzv>%QRjogwYR}-G`WCbiIryb*kBCDo{^~&2NHrOFCIX7RCfDszN^PCt7nt2xF+x z9?TLVH-l46LA1m{ZzYtau-sOQNS_1C2CKUsj=eR`Kpq4&yV0qIvBV`aDy4KdPZ2na<-o)ublUEIBuVzNA<`Wg*g z!C6$wG>I3#=5V6g3@S;_NyAA9#Xfmn$pjNc5wgD&kpushEPbL5U5|JY`&thB8?|DJ zeSv6VZ#j?{*#V?S3P1PEP?*TA<2yGNQaJZR5)I4542gLNnO4y1Mm3W2NOuRA1L+J` zs9`WdleqDmjz2JgdvA17wIO0pamZh$n**U+il=1xxLv$XOyomyHFA!;xrLa8fRco@ zI}5EupG1^XixEwv^y44|qEH{<=9(hGRL2qg5kk6}W14Kel@1(SBNf4ob`Gqo zFC~U)(0JgmKzxI82qaSru^)V2uJx1YqS%Mr9?@?Q`3Qha%1!!$>y@7_;M%sh5amBV zLzHvPIZW9(C@G)6wFv4MqCem887D{DXx*Gf!{`^JnnfrqMmpoJs~wD$#GR$&sp&=# z?yWOSz-4W_3~`7w)Q)iwfRmF{JZ`7TNP!hl)G$fj_8T!fXVs7c&f@jgO1u=R&Wd7o z{==N)cqiSa5H85ADEg{O*HU_o=oLH^Ec>oYP8 zrT4uQ^m{~!!h!0bG%OvjI&U&_AhqOAr$9(`85+=~nMUeZj=}V=RLUKF9NYj#<)-xJ z`3NP`+p7>tY=&aCb8T?^bp(sk#ORb>1aJ05BZ&3emtlTbE+iaDgoc=Gv~DWp- zkR+*r_e!^Rfci(O5uBx2FT-2d-(TRm0^0GtJFZA?_!YXw1=&huI7!^%j$b49JgblQuKlGwB5cf!|r$ z!q%+|7{d8$Mw?Vdb8By~!$7E$$UgvpMuCDkON5+B>XSkAIZ>*x8vLp2AVx=J0%Dk0 zHEKM>s?=^`)0SGvCBUu#ISvhAX?C)W@F|kwuC;-?krHV}rTvbaMJ=Pj2we3{ciTHy z-;4TS0fD*M^1DmjN02R`D|)yIG*90;P8O@KoX>k2fn6kw8VUlQ?*1m2sWRzOMS)<3 z8q&WDEvWreio4iGy&R6P3dzuKC47ChyAf6*G_eAo>~j|)!xB}~jAiM<#72M=f)56e zAR#H6`y0v94-KXlgb<$H;a;GJXDDK`e}Q-^4~;O9{);0<;+8~TgaR-=6GV>&#ijep zq2Qvaf)J`ASvRTxCdvE~2U^UuE(IjeTbs|d+-J7JCI&tH^k(GVHcv!bfI1wY3|(r( z>Wl!fQP&0Um#&kT>$D#@23jRh$`ENDVFen%`E0yYReh0m;Ywkr$fTz89iJOgPK5Ai>ACnkEsa(DM-!`v ztIum;hp)c{BW~YWWn(OvTCdx#VN(~~rjszhz`=cfnoflT8nb;xH(tKfEVufNz+`Js zg&>TCn4FU3*zV*E6Rg3s`7$S+VDw0LBePZdQ%u<__b&iDChC^E#u@7wkb(A@^lnrz zE_idxNV+f;t@o8VoYRY~JmBKq2DIuSE&&bKE6QfL&7dJwK^;X6;|&sl`->jzM~npj zIgYFWN7h8~us>}piiPfsDhUxJM;Rw)0mw|rAiqiJwr*T#vUfM z4G3_y`P{KRoLn?|84!nFx&qDvL_=a?$6%F*luv@%1OkQ!M|6%8OXC)LUaaBZp#-P& z3aLIXpvl@0{iOI1nUilt);w9e{fGw*7G~P=WBA1F21LeD;Kp03sol^`pndJnk+6r0 zsHR6iqU%jxaDT4Uj8TZQk+Z=EthL83<|lr|)S|zeYT^XI0CM-fMj8b6m7yt&EOwH? z2oxnz_M%yrq9~@(gCZ(bCSnSJ3NG)o5DFMRNGPZf(4(!mOGe}udt}LuIZmoh>=xL{ zP(*P^cL1xE1qS#pFzMx$RACuZ!Z5xReX1=Eq}hl-r?nYm#W0rq+WxY6qKWG({qjvz z#0yGtoo?~eVNy$!|?i|QE@3j-V)5^iLE%fh)l%s@;9xh2(xVCd1&)C1l2G>dT zP<`P8_(Xdb-xQaj3>xaKvtEc7QkkL(0&5Ft)}C@W$AF@ag+oT!vYUkp8}}^QI+d^_ z&Bh!vv}}@MxsJ}!@FYV(hi)=xl8O#rA84W1yiz{vZiCA~AGv*Vos&U3lpq+rC0UtP zn)d<49aAN%90sl8h0~DeLn&C5k6QSye@h^gN$s$bXiqluDI`H-?PK+DOl9$W+2$q5 z3eYo6;AkLdC=tI@&n=GjzXt_;;DWUbyw5jFNS7gSpoM8GcX%l^PK)PT z{U~j~zvMWe7Y+NMzW31+7;(n7sxiCd@tN64M4u7Q!edrjwCT z(vVkNg0Eo0H-09LjxbwM>!hu}vR=F1ilBI)F5grjo_=y*rjqnK@7<@i(m+@#;U;95 zNdkR#gcpD4$0MCAByQ|cFaMlGk1*Rq0!e~$<8XsN-&v|^v3ry7{a|fUnjS3|43)12 zI2n8fmzOes{>rJ;^&@f#%7?0-2fhYUYt>w=2_qa4h5~SdX*=Rjd^bSix1%Qqn1I23 zu(1Rs7!JF6~vS^RX};uoQ}jEL073RQN_+ zPq?g(HU1qYD)SXlx`Ur|90^N0m@{%GLC_Mx%U}i1fXQ(p9CT zJ7~P7urQ^`LgF83KdUxEcdC&Or|Kqivlh-4T|@n~vb)}^bp?W`4-lnUy%T>)q+ zXNzRWP<9#hfmsgts%lgNlmL(oMBqC+TFcr8Q7w3XSRsXbX2J6o$I7=m%F)IKAA?Fw z3_nDdu~UeRCM+x=Yl%=zp*&ZymE6jYMl9yy6s7o0dV{tKvhkEu0KJaegsvx4JB6gU>*R$?z;75A zg@$3MMeJ!eBytPkK@jR~7Jh$A;4WI)tfc^M?SR8YYo&XD_adC^Nb z^v0nGATJKc2STNJ1O_a{CAoy|P44@pE|g7)QsMm#ooPbL*iPdYywH)0!NGtQzZnm~ z6k1o@uJhesQ%>%PR7Qa7jKOxhfa{N8T=HjUPxHW zDNR8)p=r9<2=cv~+M@hd<__qwo%#iI%R_XX(fo@ zgcy>eAMbC2w}E0pdpRMM^z0`M18r!A(+y0^75O*F6O=N--gtdmx$g8CVr}JcpU{Y` z4cUDmEAoHXMJs8t{M429WmO46=ZXuw+D(&<1(8o(nWE1GBtvBKWLwq8Edp-TCxi?~Yzn#qVgNUuvwXvb+q zP|m1hF69X-qa;R30bWXHYV>n5rjluG4pn8Rm7aLh1QV`aHnC+9-aRRi(es*{aTFV+A#;j2*^Nfb|&O7OomBt zYUG)Fr4>k*dz~0$2?EafkoJ+BYbn7CVJ6AA!m_Izv4j&>7(vtS2`GexV}@f2 z_bul`VC+-PiAR(m6+Jz4ZN$0Cch6&mei~kaFe@}o#2;$qa6bSsXD`Ap1v6Z|z{G7) zd0wsTovT{Wq1UKMGl*u{`*$y-ibE|6e?6`Yphh*Aued{5yW-($c21C@E<&vJqVcBm zWBT>cW956=-kG=tvjwet_JC=NgKUhip@y&B!(|ysjRmV;D~=k{KE5?)FzBP=LSHuG z6`02s)q38;h6#Gu3elVKn)2j5)LZr@kw2g}gPB0)?(A zLS7SbRy9)mb>tY~xK4JEa#Gz!>7*~1yQu(P8oI6&-aFP~$Y{rGmWv8Ykh`E~miGM= zg&jvd*$3^dQZY*5YqjUu^z+lvI)fME2IR;;yVyc!H9mVFkG;8D6*{m)+b})R6OPvc zzs#_;pLNn%z*q{p()E4W4$dU&f@)iqj2WVO5swWpK(8jZPfGc8ndXwFTzzd%_9e`j zMBH4pHrCluN)NCIa&cX?i`f1=;C-2gbci4<22CO5&o$)k&&fTSIa``l%WqA2t{a zr_DcpZS2Co?8(y77aEMt*9}J7)Ymmr$(q+Mu!N4h-o-l5*Fe6w%5QwW z^R0rGZLS@Kb3RJ$=r;dY+m51}f9%}ReUY(vr*ny|cBgAa$rC$^?+hK?*<+xRIj~~^&cgeY9Yv1ep`;T9|*UykvvbVo6 zw|;NJPhFBbZGUowd;R{}JHsdT*R8BBJy5@D?6d=^2UF`0OnY?h zi38WHSz3B<`nvn49cD;B}j~mmX?-e&4i1OE3X7)}HW}hiC7tF8g%OM`N$~bngDthEL}mp8Mse*B@D0_SyVn_h0kb z4JX?-e0JmM7ry-LrnB42KEL_Leb;<`%Pr@QZ}@z{?>~L{`9i~m6Z94wImGe#=Up;kM#fb;&A80u7(De12PCVSW<s{O%fPo7%;=)A8^ zZCJCc=T{roJ#g(;n>Mb0@~bDG-tyH~Pi=my=hsg^|M9h7KeP43lV3mk^3Pv={g>A- z>~(tcHv4s_|7&OIQ>UNX6Z!h|U-u5`b>{hx#$9*jh5gf>IeOfA?15^{2nv;au+derL(*#_xBPZ+QCqw>>YO z{(iS_hv$bqp-&orcqjJd(?7gh^~>oW-s^u+<&S#@<~9BJ{*ba~e*B;&dgjLuM-Q(2 z>7y~@n||6i;hJZD`uNK0&;0bs)a8{w@2_9o^z(t~8=m?3VB?Etem>N)qw<%-b3SSM z<OU;ZPRTuLtjU%07i!%_XUE>ki>A++(=wY&RoB?il$?|Da!YFGHZ<0ApS&|>UDJE~ zh*486PhHb6qj^eXO7+}XGg?x;2M@S>%Gl4T;$gRIhFktvYM8gwU>$$MoEgoTtuR>P7aBriGYrfBXf@1hHB{BiXs(~pTzSQu zWb4(OiuAiQ_aAKkj#-mObXP4YUvdn!3+y6e7i%MDzMMHlAYe&PRW zGj93sU%&Iqt5)#*1Mp;O?U?QGAJ)#XMEgJZ`Tubn{-3Y$@7+*a$T3>f<;R8E|1eWs zu2`|fefVQf*T~dQw~jdMlS9wV?6$0F|FYzP#Z8B{%izp2NX zrminG?SHQ63t!W9(dm^e!MxvgNLvRZRyzXgKt7=Kd{nZC!#-Z**VvaZ^FC_S49kgX=vxk;X~SHC?f# zY2wGv9EvnfzOec02f~wIY@Gb!h>RB-4?fq>&E`4ueB;sQ#_rlOqCl(My`}NZjV-&k zG+p{!lf6qc^$DwS*X}K;t{a-)T$cGr_^w0GHPx+r=86}ac5Qjq@o?oAHp}UiO_Mh_ z4UaTkx2WlZmzoX@YyI$}XTR8T+g;0Mez>K5a>;b}Q%!vIq0u$Q^G@^GEnRX?w_Q+^ z`@p~b=ITS**+JXcpwXjhCJr7o(uqVX>8Rg}gat4tb%&sn%SC5)$jd-0azNOrVGk2! zBcXtob{P&H3LSJrIU`z`Cl>M|j}8ltB1c*&f}LKdr|?i?ydn}dkMKl!8@g{0MhnN% zN@2Em(LEuB4pTOXo89US)QD`F>HL8-_ksrurizN4eRdg`Vde9x@v({+KvT4N0oA~` zmue1Le#k9^wMkX9t$KM|g#1*2DHKSfX(|-Pn$Q#1jFrGjbhWvkVf*%sJiU?w;nuU?8blm$b+ zv;`}Jl%b=15|4#?T-1ZY2kYbo!+{!xQKAXS$%2%Jy)@TS{{sFomxf}j1?s)Q09|6S zm>J2LfH$OVwIGiZWN~~xZGDqau^v-lJk7zrQsXzoxf;AMfC zS4tCLJYpVZjRkoE*BMhZ5J_`8qfxIK+!BwvY=&@VsfX3XM8hygx+bl3!Tp8-SCVSd z8t-yKDX!iEe{8mxz;RPWt+s|6~v~M)GFXAP8 zrhrI?dMx6yp@#w*#&ff=nkBKJKOe$EvcNFsVPpmUuC3Ie!#*!t$D|tG*`tkE{Gi&d zw*psQ03OaeB{c`VZ?FX-Ll1d4pk5#lP^hS{dW!|3v^TldhGsTodG||bR&zqVXdp2* z)aSnoMpw+s!uXk|#7Ry>wbGJ^mtHn@KOS$`!(cM50s~|aP4dOG?+hW=e--kGh(?9{ zO|urSHVnA>0>#MkdztL4Bv_a!;kthj^&EfDi>L!)yR`p$s9f^Q+%q4a#CHiM+qoKS zs+XID7#!+@X>Je4p3*g^0|sVlrsn2Q1v&1N5zs+&ha#LI{vP>URwJI+q_sH1+?)uP zE}k5I`~3!EMMdP+v?}k`3eq(kcJkG1a7=&!MMQIu(55p!8y6!d4v?)y- zCVZ5cXU;l*t=|kxzMzCXkMJ$!dpZa(}9vY3&0G5VVM15YkJ`}a-0CwNQ zB6DiJgbOGIaca^yFf4%r(%C=`%$DtKg_t3}Wys&*uHp^C92ce_BdEZNC%@5Vlt;X1 ztJ9#yC@%A`8&|2L@24*ezxS~;vni?S*b)C{CmdCm25JahuMA9ZEoQ{whqZE&{ zPtZyVBVNoKC#ih_MYdMnAn_}u$8bDzhbJ810z!6T<6`C^27huNY}@3x%1~h-!j7?o zt^Kr;W%sJj-Vu1qFp!@6Xxz(*X6xBbpr6(Og>ZdXpGZ)qzPPI_J{&M7B7VHXWRhK7V6jrKY}jM z#6N8u7!ICmmnjwvwNEsJTfau}1eFXTGkP7(d&&cRZ;lbIwXx*s{=7pt;)M)NPY#DC z5OS?%!|)njE*a)07-}oM{rm-fJF;4{0L{f*t7-D&(6)W)UQK@i+k}C^frP^yhH{<> z&%ngl}p!%MeG0{%3ncd!clTkF^^F%!@IcE z$YX|6qlhh&ebt_YVr`52^ZNj|8@*^0dT~}Ev)e)q!`pfv8Xi&YZSyc>Dm5qMrFg^l zhKTDI2akYFiDbGLoIjzj2kzHsn(~Kv6MtO05e}}uvx(CPXzoR7OLdMUrWzuxyXZ~y zyRXeu)0^5&SHd6V{6;)Z%sq#hy9BL;!{5Gu_61+Sb+#}B>w&VZM;q+HDR5zM3Mfn- zS4SPHxtDF?m?J%we)~P@yzUBYGZ0RTH62!1gg@qqe8K8~2j`7t1ja|9$2FZY3>-$9 z%^iF@+43YttT+F7N|TKtfqZ)q21zw5807b?vphQdVMBRj<8^9EdQ6Hg(V zAPGWy;06&DY3tokD&t8_Z0WFZsRJHOr@0VbO$*AmYF2!kSsNy)3@es_T|cW0vji|_ zuE+UI9O5n|QvAM|_5;waQ?q7}O`-iJzti480_IH$9Qd)-WE|i|;^t zBPy$DQpULZWOt^GP2-Wb_hL#dlLNi@K3Dzr%Q%ja&XT;TzqYyvRxiggXVzE{wOxpg6F&=FnWFAgE>p`?!uU&2ETX>NLOYWC0G>k?Q z1~nIBN`^FN6=FuQ4xvcftuj5!6UY4%DbbG=4e9xr95{j)n%RkMT`#M5@VQ@hVBIh< z!R>Q#;L!kXi$N_YG6m)KbW9E*1Y`4%Q&FmYx+y~|b+GWNM?@<#dYstX2Mv9}ZV*B; z8@z4Dp?x4GUhyqdX$({}-0`U*ocPRG2R~z1h&l0YzF4)A_nMa_baF+s93OUvTM@?` z`sq12l5YRRimN4#Mqx7@5yP-wbE7JY@$z@(&{Yvmedoa%#4(_LL@j;p&$Ssiz`pls z$khd0gPBkfQTh*1uv(qCFRmWb2xYH`DI@u`2~R3p8Na}Etx(!7trP(P8@8Osl1nm? zkdATv{WyAfk0`c&Y_;(ggmywX= z#gC`$QQu|kh4L+WG4M>WXi}_KPfdJmh;I7yyU770@8HFo#KO)*5dMea$&-JUqqUTy5h@^o%%Q@D`Nn{=?Q_WRxKWv|Basu+OS53IOSum78(SC*xi8jcVe5$$a2tME zoIw!dwl;HfCR_;H#YVJ?$Ccr9JfqxVB3D7tAd4|x9la4-)OI`QR3HqCpcsHAax{>z z$paZS;v24K$7%C!A_$foJ3^8{bYkAEhRVT8jw;XV&m7?)yrS2chC2RJe-CaITNCgi z%1`hgkx}WKlD@kQRitcu5V^48p5kWojky*jCgpOYd_v}S>Sng(Otup$D5e3MpfAaW zl^#kaIKRv`V892BCp^Pey8IItqf-{Ao@oem ztaT8%sO(vCOr)rWiat}b8BgMi5KBd*Uk+TTc)aswG=pKj%wn!HiJAUNj$EFj!XAQq zHS#wYay>v#Oh9hSl12u_`K8vTm9m>isE{wybW5N7m1Y|+A+2pido2)HdI~0{DVyQk zG3m_UAK}bdRsu!zv)_>8XO=x+7_drHtwaaYWfnvT;`TsWL8po1fKgHUMR3&&!Q*d1 z86uRTtnbs8=gEpJXOwJ^oRHI{l$oarUxy9Aa%55dkh zX76IA05u}6SzOA`xS?Tx>j#pXZr=)*l8srWbV=T%Z!6iyA7}}~|p%ZJ-HLaD} zI4qo_>Q>A?HlD90f3|1NB)0FnB2A`e8UtSW13fs*JYk_+X;JwMrZui@MNwjlDN$e{ z9-`x-b$n*25z)L`vtZwmVx;YMaM8W2mt5wM5A2zxwrGq%F5IcfI~J|R6!1o6zFMTs zHvN-qD{l*Rh)yg7Y|P2;<=*TYF{rCG6h!1|s|^o?b8^KSD&z`AmZ3fnk_R)aWEV84 zpAZ{?wNkxWoR#{)gQ;S8j?{ z#+Wbw<)DV)qw-X)#mlvBD>4AWa7p^E)~t#=>7!Sbp?svZG$+0*6Ocsa2++BdD`S!B zBBA!n0F#kC;JJJ}M}4*h&q$|Kfg!kbnK9}~D}}<)X2q!cVy&kPPJLxps8(LR+GHWX z52uw%UIv}A=x;EHEfj4%=HTyrI34{vE=(|BRw2NEiOjCTwi6nVA0whPIo!EF_bGL>kBNuO=%f3Y{DG=VrR+5dw_5&_TWzCp&c&Vd&!i_C7Fd zXqtQ>-O6a$c!aUAydWl@5ZV&R0+jjO_n3Hz6sgGtFki(G)}C+@!VxSHg~@e=27l&h zV=%;23LLpSk*&F9l!Ym~B(k{cKvaJ@6KMq3cO6&Kd=fYBT#Vh=o0WXVgtM2g34S7h z{9Od6Tx`zQ!#NBmc3oG+N`aZf>>Hlw?L?e{`MmIj@u0r*+QY1N08`UgFYn~PC2WxN!E=qd7 z{V5ZsB`mYm*U1z9+yY-Vrl08MPy=|$?289k95~&xR{le?4nE$=`fCBZ9$Qr=FNc26YTI_t1{wsCJyq;StjB) zu?u0j{T^g#LlVhDa|if{+p$V3CHC+~+m5PkmCy1!@0QmKWm1Mki4D46QI7U6Ch6(0 zarf~TuB-S6Z6ZNmOm=bQ=|TlmiF?{=*7O6SXwaDO=Mm5ff{Kb9@;VG8(PP6>QMH}$Je60DP@4Fzh zqBrKlgL4gUQbs2|ofMJPek-ffVZ*YhI5LWx!oP;}F%~krpuhEfUieV96r}SC(uJsf zk_%sJ&$L-p@aRqc1X2<6Wx5>!FAfqCx6Q0yV&$9^6gkO}iAdsMhG=Rn0OJGx5GCmL zXbv}(Bff|u?iyU8JSo{B!H8ULVa0D?geO7qA&w%p?E9{XEL1Q66lICBZyb*_H@3ms zy5GW44a>-kv{DbaHXjC*45(r*)t~6rXa6VK~iZB9k0#J7?#56IM)n5r;X_y*h`2rkG`P+O;70w@REncR>XE^ii?u+L1E zwUC}tRat4zw>#o<@V ztKN9W3DxudhqMm2GJ*k&oJOVjo9Rw z3=1a2FQL57*No0casf0w;6BDMv<0}!@_w-#K&?YNV8WE@m%v)%VRjc=R8_)0KNq_^ zp~S)BgNV@n>(-ykOnz(fdjkY4JXNX&x*%Z!kAU#X13ZsDv&h0*fXWk{smV)iMs-v6 z806j@G25NoQ$h-0mqeWU0w&n4v_eGm>#S<1hAhmtQRKm3NO30LS1Z-m>uRDzQ0}m? z(y^dCoM9s~R!3@2p1G71oV^5F4P!W!2c+M7Je&qloO7s=Cxo526vq*u!CUDTiV#}@!h;I@V!4tPVkzJPMBqd!fsIkiIGF)qdaMWV z5NLHoW|)A3NoV}#wPNkcRuq@;!Am#~;F3PCJSN;EQZ&6;m*;HqR2k!pu_-H@ShBwN zXwux>5N#XYVdRfWtgZL+ACN^QoiT3lr6cqv#JVN7LYi&*pR6<}gybQ+4c|hsE47F5 z4e?lFu!Wcup1Gbhkj|3@K{{{GXuSl*xz0UKEawSFfSf<>Wj`#rH zo?S7KzmQuQ@j+b>)VilT9S+;OsF8)qT!MJXmyaCs5NEP&ygsrF(4m`jT+PATo~8bh zw`8?Hckw@s>1|KSZ@Er_DXuLETI&PaOn!cGg)HwbyK_O$O4$km<~D~Rm^SO*hk|#? zV9sEth zDme)slwl#Ws?#sal)s|I$|p&E4{c^S){4IeFr#&4yvV`Q&ZD?F@+$kU8Egf~;}s=U zX!TMU<+H#uz*}SX`4-udZt!-*iO>S_m_>?R9)d42P%v8+!Z`~wOOqXHxPdDxQ$1dG zH^g8?tvR;N@>*ofK!e8Wu3asRct~f9KA(z?hAn)Q@#bto|Zm#HV12Cl= z^UxaUmlGuhe?MhZ-5ArEzz zpK|PNjNuB3vw(M(ub6NVmyx6hV8HYehT7lJh8lfd{oN9x+=zIUYX3Ef9 zE?=q~o3x&jFH@aq-;tBBnh~bzqQIOsoMLy_D>o<;1;}Q1xV2IvKd36kN%bz(W~yxM z$VHOKBlh}LK=4Uc^-Hb#9pDIf`zS?#sCYmi&b<@9CaKR0n4mQB>T~UDkCSda-_6p*BN&6%y-V^!Z&3c8pVf z8PKB(#Toh01EQ6qi_MHWm*es_;Uuv zauW7>$b~kT6vR_9PN#{AKrF5Y)NGlC3Wt|Ohz!NvSfG$l8QGir8ty3T)HMd5BnHu# zF3)$BK?Qmj+LH3lxuwyeZI3RNAGs3j`fyXrUC z_;OyKeM+X;u@e&$&_9Bq;$c~DtRrJ zP@9n9r7U$W?4+~yftWeTDCVR|cD}wVLmOJi#irE3QL^5XDORD)vQqdD#a$0Tu{cVY zODXcT$e(-Saasiy2Le~?N>T<3*T5_9D(#;gy6g^6I3{k=TPxKKNZ6DE)<4LR?QR%- zgH;=Owvr2)Z05qf5hk>BtdTFeccs>6mdZMC>fn|QAt7ztU~E%I32Pi^G0lS zaTnwuaf?MfaQAEz=H;1vvYFtgLs{`cHF~2;~ zT~Zcl_o;OUTe@qD{hLfV=0zeMcM1H7e22lG=m3)37?H&yZ(Eg3u843C`cBAXZ^H7Y zAjK|1%ufUEfodf@N>q^2{&ib^gUlVVp;)E^F&A-yQ6Remq@QWSF_>E{^7jn9!E?nJ zSHPKQ+Ioe}p6yM5C)<@3sQ|PAE~qd`XV9W(Xu7=z+rwFD4_jEkAb=>HlFB~5Xk^zw zDGR+)6Cp-8{uyTiabPbX$u7QSu>}_w^lGP66Wv_9&N3XQzmXf9fE?&0&3&pd_HlXF2PzA|6V$#{=uNguZq1XY z>Gao61Bdg1g~cjY1YXvj(1uxIX#h66Zm) zT>AGW$hR@60xeSrYdxKq4SIdMWTG z5p__$Et1C~0B3DDWd=xzQE59K>dE|V`6^#&tk!s_6u4KzYQj&=gRp?3y1ojSBdFg* zKi@H!qKWWp3w>^74p!A9g`c(<0vWx16zbnojF4D1- z6MAXw7NGH{Xi<*Egkb)&Ifj7!-UufQ$kngkMh|mOyvv|S=e{JZ>@l$y%Tg~VhQwi6 ziTc~HyynAmI=d$KYHq&AB4(v0_Q~(PlV0}Xo`?woSkh-6rjpG2HL1^#8s~Y?=vN+9 zuLN5LK>CFcqr+r~w$7E!8T{E(*|M@Hp;17emTu(*z(X!HgnaT-U&$(T4P#Ru&n8|> z{Gu|Uzw{%3?+oBxBYG`?2d}O-o3L+kjY}V&kxe))?-k19c`CPAjwgspoye!93M$aM zIe4>8{f&S-U-t#N08d!h-QY`{rMg94EsO2iT3)fiLb53jF{RH9lLAkInU0}g>%T4E zzOy?s&5&O4>*GQU$cV`39HMw-Ccm`Ec8DW<{z<#5N>1geY=4IVP`M_;HinkIqPqzB`$}t!b^|6}z}?%?gKkYCF9O{D z@?TClWTqAYTrGog#e-S^paJn3_XLJ5CT^LDQ@*OFmFiR6*I{a1m3&`ba7%hWeq5KA zStEfgyRp3ODG)8-pejPjkyZFw$``TJ%Pz1xp;1qL@5hrqJX^vbaq+<1+bszVf^FDn*1~ z<-Y1BrN#2kXy;KwpyPMNt@2k-%50Ha`w=;XwGyl%bwC?QNXQ#^jfWj3y^FM4R5Vdd zxTG~meW8T{7%I*oxxveOcf6!OWipq_uXK4w^|#B@+%`5_#nrXN_GcY3Sd6&VM1@Kc zH`+BCySbU9e29^g>^b{`ag-()ZmDIY%~ZFL>^hhw-TU%oKj4kUN|_A%tu{u z!7|ioPh5>MtO=}DZtu@w+jOb+oig*zOj+Vn7=)ggt(OG2tQQMA?h~72BDL(5!6leG zzx>u!8)1dW%Ab7RLy?vo6$F-uZVu$tG1o^4lR`-IE|LxDQd}(;dQ|OLZzzxtlpuDk ziJhYZd5BXcWJ@5MOC6TGeR|Jw9`+@05P}V8TpV%;GeLQ`lv%JS!uu%sXuAyIj+s2| zuq>i#{e2VPTGh3v9uKw44VRcpxwSZwfY;zldcb}9CNJOE^R4usvB!*Dm^ zD}pVEyKyQ%+6X`r&ij-oBiZ!bS=ww^JW)C9kUi-NJf|$%mx`6EgxC-&fV?Y43MECE z@>4Hqh=EU%{eczy-d*g}6BSaiu#Zf3%Kkz=yv(KLiletQ6e!!aFP|U-m_o|3cn&#C zP6}O;nzsI=GSD8EvUMk^yv5|BGJ@%VTv7!L&p#V=1&i)7$d47c#KZv=F8^GX>>?0} zLGvO~T8hX0BwKE^TL494dVBZ$P#*=T%vMY2K6k(Z85D@z1-fr@8y4=uTyBPlwjk}udSCvxHhQ>f;YJ2P7&74lQw6H>) z-LNC0i)B5uvHVt%_A>R1b3oAot$oe2<~?W_^pYyBBC;!|Q3?#($B>fgJ9D8c6C@LL z$$YQ+$Tu^2{Z@oxL20lZJP+XKN2*q4V;k-^_`4~m)rW`pWZ5xtL=smwWPkQv_eJvAifC6=`aaHJaF3(d2W!hFahs1pPfoypc(LJwD_)jYURYcidAni6ld(6AmPA%!V%*1R}yDQ+E zza0jD^GP3(Uvd&t<$fpE^G+I7KyDacu0@?=n?|85UWlDE3X_i!;*t(Jsf(G~)gA%ddQOpAm z>237Jv=@z3-R{VfTRaA@egSL~GOU`8@fJQUCQCii-_-GhG7G5-z(v`+519^2ex3NW zW3#Gdq3yzyR`KU1H;czAdtI#`@t=c-YHyQaDPXsUe9Az^5?Run19WP*B8Ks$GomuA zLarJ@n1&)f1qM#Y!g58Uy%wHr6`wOp63-d2xf}ZcBia`aWhLJqB(LO6gO2x>QEstH zAlCu;K=GvYd-FsBWKzty1Lg?ILAN|%l^-C;%3u(xh-;YEuA=-Mmr4NjFa87UYEF=2 z_ot9Nnr&W(NS=zH`q|HnAWJwiq69>io)Bs_f#g3Fo~U0|$qUQ@Hp6O@D|E4rs^zi_ zOIJ)VphFVV7tj~R+>FTuA&M}t>cU}CPcaSJNaCbkZ8j+==hOVQfl{5Yk%2(ZOPow| z^Qhy?48GP5S?(~qxzS!ZP$U}$W-46V-U2hEM*rDD1tly^UEz{O{2g!KjfpJp!tG>% zTz9IAF}ldr!=f1F`khe~d73WPCpr>Ktsu|1(K&euUDghyVFpAE;7D>EJdm=`=W=->loRx~iZI8o_0hiJrw`R=XC!q659=x?G4Shi;D#CE9p(7MO9_?&sS;ul ziI0#?0YGPY*(#HAfhFKd@Ty1khjOXaZM;n3VDfB%{(G_VO%pGW*Z`^SD<}OF)WZ;f zWimC;zKmvMEicHG;Q{$aJ|hD-g;9G*QNLcT;U2tC4F~q)p{~d4WG~ldr0{F+lRsXmi3IR_j0VS z&|4#{$YwL7H;nzW$gpa=w3+&hge=g3@Ulw@CA@Ml4;oK$D4fQ={}Q>TNWZh6RF`$s zxQ77Kgg6LG!g61cJf-rd5EH^N#IxP<=AgDkEWsKp-;6r6eqgZJ?Q+7&Ll%u;j|{~w zn>0~Xhrx^ zM@Kp+;E__0MOtBZ`Q!FeeP!opYQ-GQ2Xfe%ySw2Ow-nh}Y*>_1QhvVB&mHNXD|a~X z@_#Uyju~rzQ!->fWsy3(!92p`@2w-nFv^v8z9f@8YlX&4W5sJZ+RW+aHVv&sU-I#pHbT6^Lk8z>{I<#b=(;9l2pmC;t@Vhyl5k9rb|)UOlD`?<#I z0}AER?()}>tk$3VBcK^mGX`|{1z_Zn-e~_&s=_`7mXgn;>vtDY@Au26J@DjILG^{% z%1uqW06dmN(W#u`Su@{lcv0&RHE8rt6kKHGD82o1d2WDANtY2;KIlY(;|147md1#P z?OUNLwtj|erc$P*gfaA>B>T}v2y=)8TD~d*bOOuiD=iLGq7nrlVBDQ^mr(p7=p1fR zTR{K?0W3nqfeLR=h0NKTIa%^^u5c#bt(F(lrKFHt8@jVhf#Y3G^(08%$@ja{tKCDj zeI`PTA<0s~`LaCuaIh;^HVlpVeK~SnHYWE#h#lz6XuGxZFz=q{lP|r`L={H9(dIIu{`6{r zOdqZ)WmB)q*JkEc|I59T}3A-EIWqQ zfIy(-QK82IL;mBxM)K{AFuw!cLQ}wklc|)TsIkt#7QZj-$AD}+Lz&n;(ChGMY2DIO z_E9&JLG5wL@zZUw_D7+?Cgid(Ni!3%MwyGjx#Y`)8wDo`CA5y$G+lkNO_+|lwLq@X z=2&qNZ~%|v+!Cy))2$Zr3~Ks-+3W0@f$?XAi7awClMfjA@mhh@JH^Zg?#bq~*?ofJ zSWt%1`OR!3G7n@uaMs#ikysHQbHPvw>#h~v6MgdgWV?1+kM}9xHQYWNCws{5t@3Cb zs&6pnKPt63z1*#=wFt+WV`BDAIhA8FW-hH{A{&*}iaP5K0ADjj8xVK+vgeb;dJl{u z9E_y4XOL8*oH z4bObRHLa=OjO_~@6o7tISJ(tV3;`c!j5N_An4HLss_FYEGSi&*9a;2P#!NZcqNYGQ zGbiiUS`(~95<-+j9(SdTb<4sw3UXZBWlKMM5EWE+6UENFJHqMk%qF?Al`j~gPoQ!0 z(PG}>yAt_Md&+#baOO#F#%&D}n;}d3r#1sWGJjwRXv#G6Y>@>}H2|hEY$JTwt+jOL zz}vuJ%-zFzUE@$WzsgS=oFR!JqKx^jzb)GSE`c~ZK<-%56(E-3(^TaSo#^W-?arDm=r3E% zR#u_EkBJN*Po&_RYo}<_?2ow|+qflTZ>;C7=@qBBGfDuTgw)SUDkx<4Wv) zZ4z9?I-co6{w^2-re|a7CjX}_x|r;mA|=VZdhZj{bFXS6cNcSw!t$=mk3)iNi(7T4 z)2rW{#~nEVe?`@dC*}Idbr-8r=V_=7+5>_|!zjnN%o--G_~SFFD7ZaIq8j#}Mn4yt z%^zabdyk*RQza8EzhZ$D+P!=uK!h7T;GIw*5&lIb4Ekbib{5S&MHpIp9o{=!`E;bLYUOc-#zk? zoO-aQtf)?HWqV#XNKQ_4y~KIRy*j2;&Mhz{HRWUHOR_+Q(9X9qnx@P3cESrM_QgqU zME2l9WrD`Um+QTc*U4BIC{^<06q#JjS)=ICy%T&aTVTti>FnSgLUP_hUXJYf3Ju77 zet}zFZ9uOLQBZ=uuIXfAoQkb*n({c7Qjf|tQBv~zyWjc}IP3@#kF=X=T?jyX zTDzpSxdA2VlBCMSQvg~PG?inzAK0j=T~l<-%KcNMAtisQv!3NlW@U_UxSj z1mRMu2ebZH52dFW&Ymo89Sks0a%FODA?zsV5SCN+=9_CAC`F+%j3SAQ$t0f~zErpD z5P1ilj<==B{9I~BXdz!=Ih*s*m>y7Pe*t+P7irHpvC7r4?HqVDC#bfr%*$~v^6zzS*fZ}1yg@C9KlRJswE7)iB2NK1ahPoOGeg@n^u2VSS z?jT?a{1N%r3YpjLL9>KyfUKAce~g|qODPMzMjmy^7@Z0I) zu-qkVb!H5CdI5bL6>!vNXN$8fb3M)+)VNEIkab5$X~@M+ZOeI21O|43XbEJ>()#oz0A9N3AU2)Eh@4>AzIbf(Od= zye^(4moN5jlf_OMPE%0c8Diej*4Z#%E_vB+8cQ8(adanEz$5w^9+^M3^ir)^@ydF< zPs!i=G?#qw- zq|P_ID2ThqA-PAl>B0Zdd#JTVtoudU#6G0>;11x7fJ6uX9`28dJ`k22dJ#x2A3=)G z|Mf-xpV!pXoPYFj*VNS7dRj65n#EsREdI8~%Bwt#e;(|`-yFSCS6Ief&cEJb zISy`zv-mr|=-7MQ8;eg~+_}(LJ^$G2wtBwN>9rh-J3HT)-PyUZ#&W$edt+zk!jn5Y zzu(yDv3kzl>$Q5G+R3Yr<5vo1>reM~F6^A`;XnATy`7zlH+K50K5y{H-*>t#$B!Eq z?tQnjb9Tte;|2VB-t4`zd3oXN*`3FA&hDJeuP@%X*I0SS?cLbf%13YoRkJ%c@&@|G zowK=`cNg*wymaj5^1BOpr%J2m;?8m_Z*k||*^T;Ml~&Pz zK4s(g3peiF$j8<2=NtJ3eTcF)t4GgXw!Geb?2h~br%MXmMW)zO;w|-+Im#<4tE&6f z`1|#*t*dWnY#K1IxutbbAZTbh6qAD{l7s7p^c*^D_=vXD$Wf!)$BZ4jbx8?P5 z+N#@DcI_qT=ftRkPS*44-hrt+8D=H#z_ zu7It-tF36I&CkoH7P*TWZB?!|{_GQ1|~D+kEyz-_F_Q zC8+$r{4qa%;D2*vpZtGTHY>Y#Q(KL_XPf8gU`qaRZM!%!ZQHgv z&(=BT%o=yjiZ!k$?UVm8t@kT)ueqpgXG13K?{L(f^U|h{b0YSqcLwK<>^J`SvaQGG zkL+}s0rrb8sy}P0b4$&*i`tqxw%TuAK6<+6*lJVvZrC&HnDTtqX7`G1D@jZ&v3IZ6 z8a%HZmaT`QPn*p;^w4ei+;#ttC9)^4AJ7lMx3CT(Pj?Fatj!j-p73C2Legr@ZVqbNK+XqH9Jb!@!Ii+% zEo{wUoFnENTxT-xWUICjlME6)9s5e9-HcoWya(`)A6sAQuagYG*;K_{fDfaKP9EG% zLIy*#p0UPrG2|%j1x3QRUz}tM_#0|qQ6dY(f5FPC zg1Or`Ns&ebp8mToqQ4l!TLhm#6_Jx?@Qc8s$R$Ar)h{o?own*RSK^|SVvhztkFE#| z)~7tYFBzh%wh;>yNUm^0!@;~-(rT{g=@4u)s`IFSQ>EGuk)m-cUP753;jNH}f{3V9 zA}`?rNL4hmLC~T3883=b~p1LNn|SYwnnWtJh zwI);hE7eFPkmQqUYqhi>TjV>Wes}Wwi8?P2J)-`1U?7dj#k7h_MWW~5(Q2zVhcdP?v zXie?_fpe(e+VF3C4sB7Z1#Tq%C>W9i1OCs>h8<2E!T^Ah08(Rf4X@#C;f%9*P|Jaw zXMg5J+yj0AN&vSQFHf#a?JH0;BjZK@%{9^MVn<0PyI9;n72(Lm;Nq+Tgl zFi?kCcC1?1yUDbzjW?l4!Wj`;6}T3`SW!a_5e`kSF4vp&=PKfY_|95hB(MTmYXtfo}E^zaLNb5fAUaYrlvCIS3QMKxrhAf&m zk$D$vm%Z~$>`5EYNb1QNHQOY1B;q@tChjYO%fe7Y&-97Qzsqh{lS;J}+x!+cZ0w#Qk=()y=(&yW z-HcXS>qn^3p+IV&EtI*q67^01iGRrIp=%U%rdklCQXc9_{GBpKrmGq`l{n8>I0SwG z*o1nH;VIFDkySF3dyv(`Hozng5^N}xJyrJry#bg*G-FtX+hN3QhbqRQ$5)T!h7eCi zKG+LewICQ0MRM17TCmr_Y+|FcB6z;k_G=tO0tkX^16Bkb-Kf7CjaW}>HCT5!7ymBm ze-GRz&7C}rba87os%!5u5ZAYulGvuy8)n_C5w>I)$ zwCY&u7(YI<^9!nC!m&COQs)!E^2P(`WbE{N(=YP0*43z_gN3BePMIRl+6Yf*Kh&5> z_3DXG`5VY0_2xP55x7|cBnwQCZAJI=WDs^kI zlT(2NFhOdq(l|kt{^?CUdLEOL#M5YO zA|1aqs?#8Qk!2c4|DL%{83z3Y7kyad~IEXhg*PM_@{ne%%(>vAdZQ98sNGErYAw zU?=g8X72NJTbaeY-xlOPq`4QbbLYa{K4!zi`EBYeiyDbNO!B}Z4yM0Cg~ELdWEgst zz8<{o-%x!0ZL0K$1wN0;8!8McD?|-PQ~&D%tQlyH0wud~Zb(Fj9f6aahs6>~G=sk; zauH4>G)Lj4uhZkK@vvvu7K1N#s#eUp*9lOX$nLQP@^+yukK#L<|yllH-I!By8s8=iOs&cCggofIMs0uo!B!-b8RmMitumHNhTE2d5>`ocmA? zTbQGVGXjMar%STyp-NuCNf!vDzgD??o&tcw$M$Yi*)=SCR&y+sHa8TILXqv!7-0<& zby5o(c>(#PGKmAQ^4a*b&hV~dGs?Y%nfLGoAwO!3`R|2%8j57LTD7SK0H(i*Jm9cu zyDo+T#u`H!qkNOP)OwHFBSw?{_YPSO*7n4;l6;1!&jxAZxEg%q z5m#$<%IZVhs==YDaXL63(|y#LIi+xZcv{#23M*pe$QIZ|*pra#Yne*un91 z$(GqG;>M>Ces9;;FygK)(3KD&ZMz6&d{RyVlaFQZt7VxIK{$-#W)7q3hy)xR7_^MT z0!yhKSrxR!vk&cPz&isQFZl!}a9HLVJ<*T}tj@RdT-1C3Y4o4%{_$9;kb=ogPIR`> zNa{XJp=ecL;DR2Wlff*2y<+Yuc2YDX)U#{>_QS z-JawuaoV+Ho3Yz_A-ajA&@jbipu?;$I;2(;)4d8TdX;I zYocqehir;p>3R%>Dp}C3$CO00xUf5Ll4-mU!YY;FT!}#5#j2byD)BZtd}MJz>nOFc zb=&ta%^Gsj(_zfx&JvC6d$g9H!~R8+kQB@sLd|&%hyhyVv=R^BieAb&bc~|1oZXGr zg8#F;27vSC0y*5{=D9~h+xJ=XLwOCs3`v)Ecp!Q2 z4A>&6MK$XG#=#Q^b)8g0zC}2K$(cHzo`QEC=e5ey`a0zoPu=JA7P&Xihpx8R6A8DB za`PF~Hu9x2b29A)NJgJ{A!2i-c=aI~mz71@<@Lj;j(guKuo~n@2hZ|B_vYPJgL0wL zqY0w-dCgXrv=op|a_K6B#%>7ZAD1t?iV$TgG2gu4@sM68Vc+#}VIz=CW)5uqvwB!e zD&Z2q)CqZ1F=R^t^G`ex)g=c}k$E;>E$FhPSXjlxQ-U23#c(p=-JK6lhc$akv6Fxj zGao>o_G&I|y7#=yFQtc(yES!~4^jo0f6(l4s!78bU*7m` zU$wO*IC?-#4apS_Hwy#m#hDnGsMC8muasVpuaJmz2L~alK#Ec(4UFIZ=4b{q0 zZgV&$;$%%igg`w|z6_12EaL_c>rg6poz3|d@L$3AdIsY$jfGAs&bWNxUfx7% zIPx5aQlW>IV8TN5$zBN8;*?6t!7!y!LS9t5+vZZ3y~pKK;IOIyOajX%`*;P3PbhOD z<8KNNz+J4b-a#RmzqHcfhk!NsTYyKMFX3? zoPr}~_eKlH?Ev$UKZ;c?weu|saAG$*hDs9k=sTV^d?2BPfHDF4;F56#Aq_IGEU~ky02}_-oDu%}pdC2|J-;d4{#&-!6L)NyMT%_j0`x3h`V@{ArTeF!vreg{{|)$|a(#b4y4#7EEG~vmW*lV*G}l(L zj49eML~D{4OQfN+mKu<4g{wkn`)v$U;2E^WGZ;|?@X1!X?;v-jIxUcyh0Frw`xd&0 z{;051XOeCQRK|OWRfz=~-M>U2^#IfnjEBFKM80|4n>C1s6dwXdA;zoA8`eH{V~cdX z&6c8iLd{>E$B+nMQIF`fo`qtJyKUPo>n;<8y_fs z4uskG025h%sPd_FlKUioML7rxYo->rA@iY7C3dRNGs5NM)w#j2c*1!I>hiwU+Tp`W z_*|K*XA=l`_t@q4O1au0BMZ$CJS(9TH>wQ@)f`w|#Qeo2&v|fB@UXr3 znwg}MZ6E3-zalBhzRl}5;#n2Sn`aAUKXP(jhUE&w<{h0+1#TXzswJ@o*&u>N>c+F6 z_WKC=^m~JF?{4Vpq4p!^kWsRHJicZL`LvYlW54{x)eZJ?u=6Bws9(N~csVX#Ua7&x z7g62kKKNzBC-;`Kl?5tG9Gsmm6q+vWzh7Jh&yMgUXZd9YgG32SAWFFX=61j_o6cj% zssKSo*T`oA{1lIy^ARdUlSTEJ9h8aKJ)pPqaGL8x5;aj{Ew!JigqQOxP*#W1L;NNG z(d@nD!V`(C;0h;f%&51ZkU0RP^_<+4h7ogz!JSblx8RzxsY2G(WEW!b;luJkX}8&% zsq4bxEcFp)*eSttE`fYWS~uAt)#cpVg8(9Iv&;MODvui`V0hnIaN#fXDw7O1x7p>A zEQjhgvZ<&1>QeQ{>DWKPEb|^#NWDfHgedTSI8nNpRGLYF zh(5vL17rk+iDk!-wuhSD&DW;0`LL{_%@3O-A_BUj-_mt~8XI3q0w|rNVBW_A8Yz zWc@>>VVQnBg=nJakS?gp4dY|yWHZ>$36@A$ekeou^eMG?9jtz;n9GekZH8b-{Lnv2stfn=9pErJ;~< zq>+%2iW8XSgF;zdY28IB{z@Sx1IcV30_C!TO7IhMyksbSbPex7E{Goz{N&V&huj~| z%Bw_>8PTaKV#>_`r*)$(ocj_|J-Gz+xpk^E`Pe#Lqdb6XoE`?5U08y(cC4Z0bM@u9 zNLS$1#cp~}L8@SKk)(;2dpH@{Q0aF$fFN{v;Hq&?g*=oaM2;=?jxUybYVj)qHX@l$ zAJkLr@m`$VfO>c>KQHGQ@7R7;3AGpUfKqR*vauG}oy0y26+9Fx;q(B9#}-P#ii^sF zrSz?!cXFnOBtktLH6-@@5v?uxkdmC#CSbR^t;^<=VIvw0bSzfWQ5eW%7i7a>S)Om5 zrYfGrJtz+7Zc%2(VQ9&oI{&wgsrRr zz_7}$frE8|8B4YYM^3g81KU8vdQLtRTfFOIN{3|3taK53S)`a1&So_qTJjMMmVeZ@A-0H*QFW*5$7&5CsOQw+1*nMzRzvcPDFMYvh)#?KQ zmJ6aaVI%nzzawKd-%!*+&$P1`Hx*KNnR)$}*Zat0NZ#WJ(B(P!XHnBog@RIUEzdoH zgDk+K9H;nSf7q@wp zwSI{OeGMt$bCvXi=r)DTgjydEIAuvVn~)E@(ufSp|GS_1j>2x|K{>f!4L^f@iaq$| z9KX#SK9gMqudHa5C0|xlk|ryIJ|TC386qug-|C^gMw?Xj7D%HLZ(D9H5MoT61!SX? zl>Z1#E#bPZY*cd+6v7xQV~EGz&Y}y1@fFM6iBx6n0RL=WwL!nqNOyWX+AGptHPN2$ zm+jRc+e%6;rWtogCqWK7(rYSPR1Zo-QK?9Xlp4c}+mbXyB z;9PJ$nxEx8IB7xqfCyRWgiiCf0{xX!e862i4u75&V!()U&OpkHg#2Jfxa!)Vo_1ve z^ijOxJmg>gyvHB>&8Yt3}^0%7y58)k0sX{0VzpqXAF(<*14aGK}&k8@@!3}3a$YvIlsqBZo z@B=)3Nb};`xA@q#lz5-$n;NJ_3M7Co8KPv| zXqA)G1-lrXqk@Y&%YeSN)v*sm#=lxhi5^jCv^ovYJnLZ!5e>nRJdrPF%L^ zGt0mb$!LDZ#LV=rmK|>IudINC_XDL=_cN>sOgd_D8(sc2$#)U9hwQFovb?t{A2zgX zAPw{O(JX_|4hAN*MkOGxGx|iGi61H>Uu!RtDT6pP!}2Q{d3jQf8Xymq%0M!@xUBK!l5&5sLJU6?0McDlN$F1qnkwoB zj|NhPU+_hb2HlXGuy^L{Fu&18iSIk;0cRx2%Lc41=)=f9rXBEjI8OTc04oSfIv>q2k`wPO zksP%p-{2~pctgGz*!(-MyRg0&5KbQTI?gc-a&Ber00HRTavpnhkYtObxW+>df}JCe zx8%IqO~1ESBOO$@1b`bPB^|!y&p0_;kY2gvG|*Go@3iXberEY6@G(f5VpYkmRkN_Y zIGlsPmw|L9k+KO}=MFCXr zxcR=-T2B#6OHc{&3tNGcV0=gAsB)ZD-oiSfRr(H;9s?`sODat`nLU~1wA)pFbE_L* zid$Py0?rWWsupK8^C#GB#AyK3oz?Pwu>?TADGklT)%;eZsS(N!)iL8J6H(@sx#>Q# z)d~@A=BWSj)=Q7j+Cwd&UJ(yOBVZNT)qwJXA|+r4HwGrwj&m|KRlW{MNg?9=P~L3_ z`@F_QjQm2;)|P%)dIT%wC9fOYGJxX=f)sZAJ9rP|zA|?f3lm~>e2skD1nwL{dPBx^ zI#L-47IO4<`yYeK9o=9HWkz$qHnR7Z)(@~`BeU*0HX=uuF1`VsW6^7!`?7|n@x3MQhediZWZeC~Z^Ac;$M zh#WnTWwp;GU)$A4@{wxm3Lh_njDSUcz>CppL~g8#rKhgE`hyHkFr4TEj)B+{;ctZ0%e#K}z$ zn|-SM2bpT!?omU%JKS5jmPx%O+83;eSTC>FdH);*TjXe&hJu&uidH0YqO^^b39bpC z#xf6@#rag&oZVz$kX&}tQ=z6OQn;zR&?ln!y_mI}ozCrv6=W{shzdyvr5jP;EBErh z(2|MCu|=Fa?Pn9=L5{JZQM!KA8g0r0oXgvbrLaar3>kkix)X_Qm1I=4n<>Q`%{mzb z8)_W)8Bz*^BXbzI#e(T$J=EcFU*v@{yiSV3T=eJ|*D#?hokzxUCTX9%T#>G7-vW%p zhX0EY`;%r=qji)vq;}c(`7pPcI0${0B>P&YNL~Lf=Ff3p%JZ~pX-2bh+39d!;?jwe zgPQkFh{^hD|B)P>Y@1{b`*h&1c-{^2S*aAXsFmBhb+~R!&~)SbsE^8vQZMek0U1vq zeXi`r1vhHyOLc+}W;{37a!!#Nx}5TIIPV4)=08g0Ouw9*ly96h;3P^tV_iRK_dZmC zk1Xj`@4<4j-%P`}gxZ<}E0R7+9dXSqcKvxs97cwyMdrIehEO@!`mRDB5C=;l+(d;> z8bH$^o_Q6QJ?(=W^1A#UcZ-0OC&7Sd^M$Y=TVR(FWqK}uU8!a4ltS55pWRQVL1zI) zqsHk4>;lf04}5yk{SdP1(~Nb)Ms|`S-!)D8yTO1h>_5UKE zER4(I23=>~lkh|ds3N*TooNH{yzPvJ;;NhNa?3yrkg}Ue-D43`j0ZjiK-~Li4VYkD zE^Jb3x`gQ{*LLZ3^}Skxnd8Lk&kmqSk9ao>t7HL?RuUzaHOUU`=>$u4y*yD&H2+)Q z>>_o_w=!_lVj**0grC1gNy_dDNdJP8$z17e1whfrg^qJt4Lp{miesHsiSlj77VUNb zkK8keYre6@EF)Qnp7Lkg$SW;=Z&SPyA`Dj>38i-VkKwvmj#4loYt?{S@Ccno0D&cYdHP?(KpjX{P{;ker^JNq zoUEbNt}XbGC*(ZW1wg~E7O=bMm#|WlyGi6X&-&!CDm_)xPsvv4>Ln(ay&`>&cVmq_ zUPZKepiE9oriND1jDxL3uyGC*h*`7U%D0a)#dG>-6wo4{SLnC zmqzo<@5;N!v<`32N7$q71z>CWrGhq6Go>+&x1R@iAa@`xkp<>k++K_NSotovv z0i5e<#(ww93 z1y~;D8N{`2J<-vtB_227%y-TI0 z&|6q2|BPrpi6vKytOF*ga zAeZ<^DF8->%JyN*tPIN9GAXQ;BP*%6Ia+UQ(;Gr^pU}$dD8=!TQ4a?qBswn!?pSEH z04q@zne|Opy(0DB;m|@xcOZZdI@z-l2lH+;{n-9U##m6fPr84;l_9`zu)umE-f4`= zkP7|ow<#+Lu&KX{PS{$lXJtSTP~6cMSC=)dS_h2ww=ZB%-K&Z4WeB9>(yxEFAbEYr z%{39{1OE0M>QJ+|hdFG7hbX4W)}>%0-oCV+>D_U>+5*I$aZFu``LMVwm+jyYo`4 z{p1Vv;MzG`ejS0U91Vk~3+0vqhT`z^PwQo^-BL)_8i%}?E=J^?!7^cR zU5DJC02PLO#+Z`nOCorIP_*xk-CSx*W@*iY<%0?)HkctJ<=P-g2ALRLt}};8SH4_30%jISo)rXA z^Io1pB~KHbSp9pG_ba0wTIPp30s=usTm_9f%#tkxZpyp>k7E+g;rAxNK>giTgRifK z_Q=Pr1rA=IlJ;dtA>|DTG|=eFpykm<3FF$sTm7=il~WYE~sFRC)>d zYlZ(24~!;i7wJ2caxo&7ZbX?ih(s$S*W;>^FuO4=&C6lbvqqlo3-1V|1Un>x_1n5) zO4Vx3Hd^XgBB9%V$RCYXmNqCW%=1tOQ|4|T>NLgrD(kf7Uu3YhVo7M?&SwmB9Amz7DL?^M`>s6(D#3kJ+W_J9&1S)~ur%fkJbZPf(gs;BB<>N<4X~Q&Tf^% z!(Bg0XdqsUbY!?22MT4Kk0?)bmfAPMRrRHlA~~;m$$XYDE_>=U89|SNlJ?H(Ov+l% zi<8;m-H*JC7!qqG@8@x_S4L@)T^?elf!f$jriH_}(?R^%vV<%P0dG0`%d~dYFmD>B z<4*UDl&M~w{hLVNB@4@Pg=Qk7C%kW$nagqOk@rV{bAH`tb<9{! zm~r?nCSc^!bg&eqAI>ENW&UXDakT|;`gB=6SVrZ>n%?h|ktzRJ*+&g850uD#I42=~ z{W~&ePU(lR0weUArV?Bq$taH*f(I>oD(1zKb!m!L>jokQ=D+PV=#0 z%|~t67MjOf&2tzFb2rVq0ePgt>*xy_37$BlQ0hch_^tKnC=JKdGuqesJH5n&%z`^f zS->d||ExY&wwjypNG}>`eD+VZ%=|@h?e9k!@m_Q0IZwA*geFXxZqe4%DKaq#VsT&) z-)c`4UmA`2KdR};kl(R+Lk-J|jgNi|Jp`4*Z{a^9D!(m5}8*WfIu(OBPUM&q? z6uSVrm}~yM&_{=odmWZ3bvRe&bSltgmGb6j*;6h}2|0cUwpWRaWiILNLIjez!#F$! zRQZePqYmFOK`vDfH#xF3JIs4r5~nc^>gnyokC(jGY?)f)0R_~U@|~k@wEr0PL)4>V zYss?Zf?_#itgfb{M9Me6Ev6XQohI)PvA9hVuHsQouYOunn!+IveBop!R4?#z%J^PJ|dZbTzB} zS)2T*F2ljxWNuhUQZ4DNI%D(aX46|pG8}lbIo&0n)Zt+<$Az7=SwVpbvk#ZG3ZfR)4f)!iUC@K|m`we_ zLOKydO@1NGKRX;*^3d^le!J3>RsN=17vG|&;TST@~?9Koel;}f_EC_ zb!xcf{k4c4(kK^=h7ZL^P^j|MlK|*YFG9K1e!S4B06Dk8xf%T=w-)I-xWuYu1QT5o zdQDLYI5|gBLy7#S$5|X$1c2vfF2an}SR<7vv&g09Tkcvp4zE%jq?2fr#v(@G$u5xc zRGpLzN`FJ}`D>F*A7plV=n~M3*9VoT;-r1kAvaFUTKwKi?J_H%49`m}xuPFUdyw?Q zlH7-VecR)?FU+Y5v|6~j@({ocanG1SJouv)19O?!K(7<)d?WQma-Sc|ZIVK2hw_KU zOL&xWv|DfyiX}5xi9@1mXrC)jJGP&yUEbmK5=(*rTQd$}aNjb_lCO^f1xl2hj2mN138gXSc|dCdro6CWh~M)R?cXZ^X#%g8&^?3=UPD-CW&mjadJhAYdpwY zYpO%h87HoRBwS9`aH9N!v5s{-eQ2u_<$&9FAY3{&Ia1a zT^+1-Z@`B{N2%Q6bZmY{?y~da+eS;#z^?I%WB{;{NhW2{DDHhu5`A&+j+3-AbZav2 zw#RQrWhT9~wRN3*<;7l`OJg}hPwV` z3tBfzuXcHNh&gu-rFq!AXNXUU)XRQndf?V>TIfq^Z;wBpPiGk*JF>1^-pqSchiKVQ zk+f9#H)6g>)VGBw}XEP-ZSdUF}1~8}(kT zKA+YQ*$SCXCP3-P+bz>Z=*bk%gT`tG6zjpz+Ed<{DN9C3Ie2O`^{E{1kP&mtFSU?6 zx%CG<+(dZVD9XSCxPR7uWg?iwbZ*-dqvV4GPzzjFohu)ZyIY}k`yYjdc`$wXKD{DJ!>jrLL*%xorG>);9U8~SXy@EIdf8x9aHtGY?^L8jlcy` zY^&6!lMl@uCQMlH`Mo!}<=8kUF2hF0$UMj!)$+F(oedLmPoAVwUCq*POd6W|>>vkd;2It^`cN z{+OM6PziS+m3igz#_XJr^*3@#Z#h4~r>>}y!{_r5!>QjWwHbm|Ldf}7JrVwxLU9+S zKI1<>hD3lT)yE-qf)C1RdV*Fq0ahfWSCvw7B5^Y_EPop&r;Xzm?`6t{IbR9Qb{YXX zo^*AS`2n{eAH*!UDuL}*UOoQrfF^rFPnHA74fT;Gr|WF7Y=-Uf0+3ZiK8Khix6xUW zJH%>WS}1U>JAKgGuPU!0{ZD|afVIcP7%1TBbZ>dTC36|5g=u4aSUmapY9=!#A!jjS z=Q*Nfa%@Qh99OjZsV`YNA2PbrYCQI8+wdoPK{bHC(jo2h_%_S><(;ne?^_Oea;h}= z2>YqCI31~Nid9_@aeNP*S4CdZGRHhK1|%dZg#nH~0ExM+)^;v=beU!#CobVeq#8)p zHh*9D1Qfzh`wHCmnbg{!AM}_HoU%^alt1@0KUQ)Cw7c2&M7ID&JrEN(S*-qu9ZO`M9B;hzOd13-NdAEtjuneI1?Dl5oV5;MOB%96GPIkF?Rt6 zvkKP3Wn@{D^zhTxcF189va4m+FeV`q^w*5ebectd zr3#Qp9_sME=~dASle41GUu6OcJ^2-ekY8pw&}V4I_NW02f!VxNo_B(UL@dfMYQ-QR z5^r%EEwu5hCqIbPN@h_jqK^|M31?`zFh-qUkM zGDs($jVUsh1bC(iadgW2g8@5#9|1!(AzS(a456Db|KcLxKh%*jhUNJ2GPFQS%lrrJ zkR;`tLFTLi+%g+{(ldYG1U(Fw=~RyUYqP((A)QfQFOH;Jl+W~brT;9wUoUAA_Q@Eo zW*c+_YNd&brH|%k0pls4+eOZi!YNlCnaWGAi=2*d9Tb3^h47k{m=+td_* z$|V=VTr=l;NVf3JHs)gnGkJe#Qhfh1%hr$1e?pm{B$OY=#pmY(=0>14(2M7@q`4HR_^I^f2Y!QCcj{S&=6**+JzG^5r?UNY`|Jw1+u#6sA3w$O#yM!(vE;a&O7?3pqml_qk~=A$aH$g;NstdQs*Mt%KR0?oU)pm?BC~?Sx%~%WP7rme5YZH9G)mW1el2 zH4{xQ_|1vjv2az9CVVlst5$wq>UEaNhs$IXnUoZ>PH@bdwpzR5!x()NHrY`=iZHqm z6c0QNTD2w2>R|+G4vz>9v3}OG79RGkV%%~ZJw$7~!g=@gQl1m#$QOMjzr#ChDttY; zY>3%VE$`tO-}M+!UrucVA8s;NxTvj>b5FylJ|WjQ^6GsgFgSQbB)>RwA! zn^`UtJ0t9919G5@mrF~`cTPv02f`IOo!nguO8wCgO{3=ZkmLGE*L*Dr^M-j})K;n@)(H8wL5>c| zB_fxa>>O3GKhLD)gsfCC)t^e;7!q)}U+a{QV%*c}2Nrhc0&=8Qqa$(3#%2JUe{xWY z5$GGIRB`G5)D)r%i)HghUp6ogATvOzj;F0`8YXwQSQE{8M=ZY#v(VGErWt%BaFGtr zOOkgJk4H9ae#o3&FUQvTb>3Zs&Ldm(%re<9)ReZU6f|l6Tr9sbyh45{ zR8Fz9Ri>75J#y2kT)VM+avlThWPH1PJrsUIB<649f@m1^8*1*XK(wI(@0Om{RcxHE zVr2PLdG{zKcod>uwt#Id+&=C5(18e=tj zvCK^sH$azrM@$tJH`Pee;<@#Ejmjus%^-hpfLyJ z+huaJTdU$5!~Xs}stZDSxww1LfdUDRA*c%%1^l zSt#H+)?uioNT!h0r~Ap?iPGUn)ez&iZy~~NRUhVmrzikpN$<&w6GjRli!}CrRl^Hq z@=RGZOgb4GIl?s&rABMmyG!YaXUVm0?I1k8P=?|_TaI2aB#*bts%g1f_~Du|dA9!w zmg0wf`yTSfA878lmpGu@+0Sk|2gPeaPUwDUpyR|MeQk_F@ zY0^CRano;uznH~(a)rh4H4ef7q+ljiC4XYXMOeJWyz~@jcP~6|de;=>7yza>wc>!R{1A$ z8tihlTMP?0Ec?f&8E#PF|EHVYP9ki&HKM~Wv*Q3kHxXj<_7E^p)JrF>)HD6*bKpp1fjplE;`6Z;6=2$KGkMreDKa?}A z!vzcWnrUfeM;u=ubCAuupz*iL8W%4tYBcv3i(`&CVU!FFXMcCp>7b4XFgEbkv;BD4 zbt~n-;U-roc84&xIUYp@F(e@;=EW&8gnbf`{gV{~mOsywV?fKzl?9G< zC6^Arzn8s*eE+&5czzF0m8-|pePynhz#I3>ml?Bs^o{s2BA!{S;@VDIG}nXgav5e1yMcmhqzH zQ}?~2l*>7cWQ@os$6~>&$yo}%2Ixa$t#^G&!YQ*&Nj>!g(i`N28t;D3zW0IoVp2oF zvD`a%L1v$pmCxDS&^W<&uF`~d!fJNgvnVC#kI{C>H9_?55C?2fS=-;->z5~2ruQSN z1$|7&X*1awgJ$xx59m6}$}*H~91N?QC8dr!FCLS*+OI&dT$Z9XOW}h~PHOrnND8Rm z>6FXrX|e0l&*Gu3%~UP*`6HkV&ouScHIn}NcZQYTw7(Hm@eyA%J{W1xYmrW zbkv!Ts4=2xkaMXKF^zJ|5bn&FDtTf$m0eOE1HOV(@7-dDTiY>B~u@<#qU?Y@cBZ__q-hZ)Hr7yg1pwy-LcX4p}<< zP}_m8aGlPAIcc8kVybA+d&Lkwtj}7rXDtxI&L-2N0kS-4@1x_>*QO7c{bZ1uhWx!L zlj8u@DZ_HNrU(DdM>}%nQr7rib)0W~o5frOn)mXrBfJg$6{mqj*vJbv9cc?^dlQ6) zFY2Mt*f)c8T*qu|F>^bgj`F@wz0r9f=t3ws$%weuwe z&*W^!X?(bAC?EMs_o00JN*^#nK*{*uH(TqyMU!pO)L*?ox+z|5R><*3M*hj9 zGH%|fDs`xtV~cj2A={53@s*uxS)EVwwK17VFCsL@z#W7H=cAKj zDdj#6msjBu4`b#s0M2CaZZpUyJ=ha;ovNLn94&{7YZi;^{c<%0s?$>Bl>l602%g$3 z=C@wYUrqh=7pK(5kk*n4?>N4EzZzeiyc#7r4X&vZOT( zH+3$?_ImiSVQH9VCU0Z{PC|alm*q2ee$V}>n8@4oE$5O!qN`1zOU`JJQ+@Q8aYYiD zzDXu0R(D^8ay#-mIIsgU3=GHTub1c2a%53vjD}Z;f*&0%d*>KC&!uAsU+}IGEVK8> zcg^8J#h>;Z$&ET{aBh&b-Ma{-GhLop40B+unhJR%sC37VjW@%Pdcy9F8bR9nGP9vV z_4@;R%10%-%k770r)NgeI!)Q)Z_Nb+BA>2iHpil2@+#>%;oulsaPt#nH*pzMAg9c* zHS_Nrf6Up$xxAFm8wCxF8hn<5Dfhbb*!w`?)LLd?rPNj_)^ND|`#8yZWbahujU%V_ z?$T+9t1IMuZ}|W?!F-#8OS6l)k+kN6U2CN+p8Z93FGRD^7Ew*wP>)=Z>V_|&h;k&3 zlKoY-!1m2r*~>5-`M_c6-R6sHh>3Egrh3GVALVscGySeETc%XIubcB^o{^_dl=dd_ zJV;uM_zufOcJtF=rnDosL74k+|6m`2JLC(IyzNqUCd_tt1)b)P4VvTKuN3z+-K+2P z61kq#TQN0TIiuJEW>7!*zzH3Wj)jLW^3LhDm>(o7`w83Qk)`GsWnXCwpw^wZk~MUD zowPWcYUSur1eV`Or#wowhE9(c;S0dJ(!&xm4GE*{=@UN9`eX_rXKOLUttYbC~ zCtWjhoml@BxZcTI9IpP|y82%&+V_!s8{_tVI#m9mkuVa_#@|h)y;{OD_9Qc=LM|(n z6~0UdZfaCQ*Q_MxH7g?|)E&pmp0$9CA?dxE<@CC+%zrvk-d-Rn=3krl{Iac8zMS55 z42ANihm(gblX(q{`cBH-DLa-6vTuTQj4D=-tIP}sGd2gd$h#it?UBpO7K-GH+)5mI zU^BPkjLqh!LHj-dgduylewxyDt{QG0R8y+(X70n~<>p+d4&4lmq}CQw2X)WsU3;eP z&OQJ_5%aBrOb6Jga_vCbT%&|EBvmizY<1r(p3RHvGNce7gz1G67$BkdUS+_5^p4UQ6s1T9DPkEE6j4M(QB?MhiVBLl z?FyEtSg>K&y)FN};yLHKj{7Gw^Od)(ca`UP7QLM$7&Ke`cqN5uYhAjE1P>z(y&c>8ZBI|USw=}W|7!5SxNs@5b`{8TE?IcZ1S+<`rVBx~Ic~-7;ZeR_BD3epXOMy0Py!B1UO!M9U{0uF-z2{#Yk8JzXy>G}IxqCr zqaqTKE84|c%islAC-^V9))H^hJc_Rbg^P0Kc;9GoZ6{4Ro#<*GEfZVuDBI`C>=trC z8qrrSqR*Jf*Vm9I%5nh{CEwG^CuB@Zc|Sv*o^A`r*G4JJQV;^K?Uq9X*4D(<^(BOZ{aLA5-OQlWq~22J zmI-6b-O2V@N+x-`k35xP7L&s#gI~?vGnz`#8ZCJ5Y++X9NM*jfJr8g^9i8sY zc1qqb6-p^zu)=R_iQ-MbYo2h}=cpsH%}%PjK1CmqKRWVF>!$!uvhQXJR$f`zoOvtx zngR+qB zrIR!+eqzn6Sl{}bRHeC+*O<15ZFfaG`8$X8?xoVGDe-~LWl$HM^m|I&OvMxH6b5#( zv(0nndG0lDfI9CVq7rOnS#q>3e+tqTJ|JU8xaM)!@gtRLwSHOb2YGmsT&vloY--r8 za^F6aOQO5Bqx$4? zDy8pe(*=qP%&Ii0>r&TBdXaz%nl+tR9?|LX1{!fGwsTzy0DRPCE=!B9xiH&d%7!F# zJ#Ps7NO}E>@}%Xq#Owb!g~@Gk{Vw#A?ro*KfU=zF?~vSe4CR_q8RnFqfylz#;Vqsh zm4c`(Q2VMm_>ZsIhLT||g&@hsz?tNUWI>KQEwG*FTt`V?p>ft$s+Yzl$^$K^7v}A4 zYF;MaEMJ2cL65U70o$EgN}sG2xhn<4PHEC`qFkQLJ6eB@P=3hJ<{UDlj{Vl+3|X^O z`Zuqy5A86qp=k)wxaGNaD2oKe-9MU>MO_7a9JomNmZH5KHSmzUFoZ5dNa!8NkP91_ zs`=Wx@{2sWdGJ+uP>q-)jkU%g_YDlr6 z@^?t)s_N=~D&7&3o|ns|0X_PqaVwTz%|JaDHtV@`OV|t^&#ZS_$lnoKSs0UmYPD^1 zljCNYQ7}xA(wpT>$o`$YWZ44o>u|5U)`EGg+asNrCa;`tZS&`C>nW4^$%rLH%+Q<% z;9Z-aBTh8D@IlMVOLicEr7WFe1G#u?+1kPJIdjc&mmS$IP9fXw*bzWX9m*hcE)>+y!lC!9?1Xvr7 zw^7!^z9x`fS2_4?FoorR&j@)+l?H-K<%y#BLVNGJp7qKdM+4@3D&yILVuP7e21NRv zQd!uGyN&E4y*t!=Z|}v7{maL)Z0(Stc~tT$-%WLTIq^O6X(wrxNruJ>`fA@-U-pNw zJn(l4F^b5YGvza-Hon!ZCcz~x=TdLrNFErKDJ`VWpxg?Tg2$L_TyEY)i9miN4?&$$ z4uz9xC}D~>%To(6Jm&D2+C^H%{_rL#M77XwI&@U~JfZ)2eiIV z20N@-Zdscv+sA3Seg(-%x~(FrlqbH{oar`y-M=Jz0tZ2DCUfy(XVk3heWP3Dnn^bQ zfw9W@G}m;-&*ho=6-Z{f1l_e)J5?1qT;r0Q8|7%)L`q38fGH#wZIa*+or-MBK2n`4 zw^nK&1n~?v-Y>2)O-Gzuj{&Y4;e@A1xqNARbJYM_d0b^So@*sPWXmz9F|7dskWI;w zQ61vvZ6Vx+W185K%f=RIGB1}P4CNrNoG#SNX>ZjWZj-|-ozxP*n$6bWp)A~@DO}=N z6o0&OsaUK`4Z-sjhcPbK{F$Y}(5O!G$6WbihK$s5c14^=9A-$Y1q}GS-g0V~JlsS6 zc7b(*Vv-~6+nP_CBH;Oq4A~$@(yWJ;>D?_$B1Q74F(@MTloC)Y%}G{}*dVdJJt%(gm{bEwm+?9|axh+#i6P*LRDyF9VOJoU_ljz0480P(a2VgXE3-W?scaGv#l}1 z{X!pPVa$7rHCgvluC(ZUx{=b9cDF_Rnn>%r8>D-_e3>Hq7sT5l>*1x$oVOqsr3&1> zhXeM0GQ~}^(tyuuQlXQ(#~o{pmoqX|{(}~nsdmYK4GZvw=wxxN9lXCo!Qx!l;- zEO46LD)^cU6gT9va{7}?U@CtLF&0*T8Q79o*N=&-Xy&?3J`(BQM%McH!czm-eSR44 zX9vq;aQ~#Hox}@e0KO}l5H4`I5pi^jYBKEZbSWDx^Lx|C9Hb&yWKPHH98zXvDDbaX zc`^m;HP?ZE#l?NTKxc1vnKrm47sid=nZ(SRK{@6Ztr2f-F8j(E)sNAK5Sn+t)=H-B zxL74i)K;%blOxk4Ez2>_?5IZK0Gm=bodDA9p&fWQdK25$J&!YCkG($T984qe!O=RN zJyjJ)zYr-zh#vZ)H(dLWv}(@Gx0xaT)l;iZxZvYOx6$2(NxW@szW*3(T!-Q;?lO5yYY(xi>891rU)z#cqfqX0kpy{o)5F|l9n z$jzL?vVQ+I@UZr?Jb*ub+4Vstg*N|0UUx&1+<6OM-l1Ku?~4+z06P+Qb0cI zCH(ukUh)cb#JYVZVUZdpdTGsbPeZ80D^HD+|E0*ggUt^a@?3AZA%%M_IB0Xb9%Hz| z;sNz@vAOhWQ9Fza5`qUAm{f;3pGDr|8jAt?EVKVD3KKsUe5D63-PuuF_ySuXaf?&6RRS?}Z_Ys@M0iZ?%t0V~ z9vQneslN*GJk9|JdckYYL~Y?1%wp`9&%cSvlKi8C027uMS|YY%MOXR91sEPLkK zJY+2x7{$SyrlQ25movz$Z6>|iNdLK9Xwa=ZJ*?iS{gr3~&zLrB`0!p2f`K(1<{Z@T=E}t~ zr@Bhn4EZi4HqRD`?@RA1?{$|wo0MO*4QGYtS$GGStACT%NInFwIV|3&OstmYr@D43 zIv0(lc^&0kPk9@@LUb&VmpBS|v0XzDJ|OS8ktz=GwjHA+qn~`g-{xI2U&{QF7+`%=gWb9x&qq58ZgOCq=a3*+o}M;N$GkxG%&ggx{k`KmaN|wQE>E1C|vu$Xf~gl zKh;V9qK(0`w+W!X5%5AfEHwsUooS3Ajj zBPA(he$QnNkP+Klq)}hXTwdj^@;axh)V#V<(pym8a0jFRAkK&FmW5rUamt|@a~dBT zF!*Z9p>iAe7;16tT*)?fW+}8()@4Rxvwb?p(+Avoq{_y7oztCIxboKrSQ;N@@;k_LG`wkcQv7xufmu zB`$T?_pz-)R+~9+-QD(+%yEjJFOAFPcH8nv9fAu$rX(U?ra>T-#k&wlCJwEXQIvAh zw5^MDj_}=6S@a6OCxqs0P+HGFa8&-)M!w#{;iY1PkKK>Pk$g!22ekWEQmpooa>XQW z{*0duGHRh@j)hh(pS0uWewnfzBYZxakKgHRtB6;`-jb3g^4mB?4&q}ymS#m-QOPCm z)i?`TZH}3mm)J|PrBqh7l*#SLcBIdhHitARcIS3^v#Cb0Ym?-;g|gu~WofxV9+^oQ z`gxLQCgOkSxtJvm@x??QlB=@h3Cnc}6FP2)+2@fz9C1Xlnnm9%uaz_V4tOZjQPez&cB!?)N!`&?_n{n~%!5C+C$WqqaM=E* zc*P_Qz+6q74r7tpNRm^IrpSG5Y!!7s$b*aPc~!Pn)m2vrh+q(wOUT|?6;b)JFXc;_ z;m3J1LzocaQ!mm6qgh(6S>oZJczxFmy zG^9Zlvk)E6T6yE77dmE(todV&o_Eh(L+;w% z3%RY&Hp=Mhnk|b;!%7wB9G7N(>98ILWvg9Ijg!^O2o{2Jv(GxYqYhwAh2*v|`x?t{ z-9c`LdLx)*X=_@0F!p0Fp@KS+Z+f7C<;Hq~5s-WKqZ)Ul(gD2RuAW-6M5J zS&(~4rpo&^OO?mSdQxl5ny?D3Ar9Nmi9$R_s z5&5YNf2_EH8iG8fndUz;Wuww{du7R3Xkn~{OX=7uHXGDy^*2O_bxKkFZ?zOOw`%g_+YPWNB>hh77bs3-GY`&? z?G)ps!?p1(kU2r8(y*%sHlQG1K753`MrCPJS&(^pa{MAC0FS&h#C)r{t5!-S?@}WD z*OAA5Kc8P}pI0Q99kQ>Ot&rAE7aV~)HQ_}S+~K9Z(y>d;ezPcFN@i*i$!l7-)=!5_ zlC^iTD;ms`#x2;bDJ7^2a@0{!sQ-MX4|M3n=G^n)Yvt1>v0CZ5RsF)Zpnb^Cfa=NM z1JpLBGH_xDwTrl!19(a7YqfTh9rElJX5{ytk`FN^Xnl4EpWin`MpmK5M{TS+TRvJO zkKc;di@4@8KxA&$9WZpLw=9;XZRLSsvUY;(R^sj8aCDn`G*d>&u_{nL^7$5Vv}WwT zCLwHfbK7Uh2t7#B=F}|#=!MDwYQH=;$9rX=gr?TsiEDMJ1UMphrt^_(8J58P_er-~ ztxl7`%2TC%OdgmaKLm9@S(yXmr?%w9&3_B*CAD#R)NMMhhA{MWe2UsJe|a%wa;+tox| z&2hs!-o|>LhAa4zjs9_Tp=1+wCzPFO^B(#OgObu$zTF|m)=Q_xa=?=EG&y!y?(WWz z`{j||a;0h}Ka!KU$F);FtCV+G)mFQ!C9JXzjRz@!Ys+Nf z6EYaRj)=UQZ9gdG9axGd4%H6I8{yTT9-}bfmbWu#$R+7n;wzRyRT}>jrn953lUKx3 zD&4@Of8(-w)Di#iO3h78YFc}o74zu`n?E{<&NZl+(Va!c4*|3z%R7rBOR_sSL0F)Y zbBl_dA?hZ*OC@aqGY^{fK_ph=Wi?k|>>_e^mE773kG^(38;gw>kZ(sRGGvypUCwFf zXbrc8A%lS{Es{0YFpLkQw4LE)4L)a+ZYwDt!a^sX7N9ExIzGM0hz!_lbFa~MjUA#g zRi*ca4PbSq6}oOCd#*Y3%V8&dY%qNJvt?eIoa%xt;VPRYy@DIOk?neqRkt32IayJb zM*F?cA$xsotg6&}@Q7JF1oITT-yi{7#>hh>QX4gy^=(?kq!gD$F5w zhdRw`vW&ZloJiHbe7GMdGmpF?RUX`@c&j^`iw~BXujFA~cIzWF(PUT_SGlJ0%1P1s z`p==JI8e>P{GocR+|jGPDTOWB-V1^9u78!`P%cuBMbd`Jo$mNnc|9uaE9J_8yz@tG z(rC_vaAO_NX-Ov&)h_sDC$1MyZwpT)ISp1Knl0q<8kKfYO>%rz~Ft7!oxxtd9uFonrKHlW`a>+}XmbT^SZ_)YCSTFD7Z zK7^4qf7*MSDT7f^l-Dz`f&eQ@6mRtw(j?~BGIdV_-gq+uIBw{G+P<~Dus^>R*R56` z^o}CNIU+||pyU3*-i3KoSj@hVEcLBF*pq1l4z8e$AbJc9thX7O&DM)8M}SM!1!HY8mJ zA48nJaI!3@;W+ssle~IJwqGlK5Aw=e1`t=CL!?!~;m>rJ&s<{Bl_IDK)^OsiN)%ma z#;f#*No`Zv-T@|`2E`N73CsKB$uD`bcc*4j%(u02e5u@YtzuA1cIV=;w#ArBYM51w z;?p*Bp))WPQsHe$y@uqmryNWMHn_HorWzE0^4_LuhbWQxcC1{}lvUF zZpL~9Pj=zqU!FrTuk=})KdHAl1}zj=qWxQyegiSrTJCRZN}6-_ zR&FC3YGybT;PLR)Xz@k-)()z%@_S#o@)~K@R9;PGcc+!f9VdYY64XlUfMVOMZb}!v z`?mNKTH#E+mw)V;D9>MAbq~3gKV;oR>2sG1Yc4B_txw5!NrQprh31GDp@B6(VzXnn zz%sbFbXGlv>Bb}{CnN8@EJv<{VE(V@1>R!S3Wj|nZy~WoAR1-pa2_O{Cy%s}%M~Q5 zfa&AqkEChTjk2~%3l_augycw`7zb^|QCdTBNCkdBIm)fVEqJHDNzHjog;VaHEnm$c zDmHTuC(R+c(bh*QnC6_veW}~DnP`=Z{P381KAzfEbhAMT83^3w=j64Utwdi1sC|)) zyqDatL7g7ykS|}XR^WQ;GW&N-(;~%O9os2oxphx~D_EeZ=IL70sC~#WPfA^~V*@J{ zWmCH^i+^O$5Hec>(oY#_)g)|)}_nhW{ zH*pY+tdWOXvKlYA-MU{{Lby_JD?H9;jOC*LY3OFX&&oo!av+EhhRs(DM z^Z9k`tZBS|Sws1HEa6A22%}|wnlG=3zT=8c9CGITh&ul8h%CRJ@u+cHE0@a3$8khr zzvrivUf*LDH^w5D2cI z(nSA1u3yMqHX0{?KdUoYJy9H%68<_C%NqrP^=ivLLkpuL6Vyx(#vD}j`1~gr#Xbqa zvT-xc)vc-H5O2M-MjH?5=NmyWjgv{;lHflo*I-6-v5d(KX7-W42k|=YRB1v>U_TJX z>KC>DRWPBWnGRjp>R*Z-rb_vnQWMrfjtwaL;&XNQIvG!$9g5@W4&rf$+l}Sk9C>=JyhZDdCIZ^;NzPRBMzYpQ+ku8d znnbFoJb_{u6e9uRmKXZb^pPeRBrwTF^%0jC|9br=h5d-)UD1b@+D`pl+joP;l;URGx&Lasy3Av zn=TheB3?zefFHmeMPlFE@6l*tswhYVI12GrOV>&L>TvkO0eQ}89!RDnrrTgq*n0OG z*-CZ6Or-@Bhwjv-S#(m+MqE>A6XwOB|I$>xQUJ$0o_G`eO6+4)oLOzw4eh0G8@U{n z$JU_$9D>#0mL6Br0vzP-n z&YG4va%XF9-z!tuFxKJYtlD=QE39ypC5I}^%slo>0|L>YX_;$wAkU{fLuC7VKdbT5 zZ17BB`RJvRZ}1?N&|6Nvud-=#)COf-^lh4E;C(i`<|-luBkgk~KRuQNGd?U&HIoyh z6hgtgt<&Voo#wg8AkKYeI4U%V1Xt$bil{_u?eOS$>7ODOAh?>P&Fh-lys+%F)w-`X z4KlPxmi1K!`;%hUX)@|PsIN=PC^<0zTQa2?yD2e8a@rB<)r-_$)-U+woj@|J@?dky z(k%Id3&8X1VOBj!ZW^P5Wd2Z4lnZw!K1ocL5AKKgTscIxIAgmQzwf3d^@V)b z@1_^j-p|E6=9v`PoG0(B)D*C-bL0!4F8LG8>VpD=;zH63=C#_Q=Qaw>o>2e z7jtBi3|K1BDr@;>+0>NTE9x74T6Q|+4Na{3r~w4zWY`+qT?(4WJGg@;vCmeHxEONe z=)Oq75OWtfQm=82fi)OCY@c1b7Dh^DC)Y`NbhxgJdo{GU=D{SG9{~k&vig&DXnb(L zy1u)Ms!pI;OP+SaGS>prDLH;lLW||QoqRU1QxvQ^K<=TjCuFWuPHWr7PS^5Wm!8H; zh*(!tP`Qf5F*$XoHS7S$O>0YIc}FqH3p26UzLM!?NqVx<&__3wlZKEo zEZ1qOyf>z|6svbOFB#=gdAW1Rd-mBf{}gY~?LPXt0a3!ypPbHteg0L+Q0Fy0=E-(lp&!-`z--=V-~mA5p)k$37=LXEEmOBJdSVtXG55r>UgVbP)cF?6ss@Av^na zb=xk~@Lutn7)GO+v|A~e9wsTA394u-eO=M|fdiI_^IQ2f0w_WG9@z-k2#b^#b7-u& z>IEkqx+yFC<>W1y#s76P3r5J5UF??`%O^nXJ;l7OAek2cvBKWf6tT+ctR^eg6pKP;6;zc7~H4 z%Jy_j!Z>(<)BnSEu=Oj!ZrfH51}N>}|hyOh)`Ae@QcO=Oby%o;`r*g-pK z@}$-D4x&rfcBLx0LuT`sq|5R{H)bKJnHe^KEaKutvZPuL-fkUMcFvfAvZIju4*^rm z%kE7xd&bmVPVdP_?GDI&_w#zKQJhCBjU}IXC2b>!G(SvWO&>0kXEJK{%8~v6xz?Oy zzkL!WGUnvFuZ8uRyS^m1ueXef$^+5(EIz(YnJDK*5p73${Uw!8b7{{#?2;$86Wv~^ zoZo(KZo76x{6oYv5!IE~D-y*?iu~w0s)5+vlEGG1P-QX$*6*1zyQ6&8pKJIiSz@~t z|Mk_ah@-*}cbl7i*ms|_7^DTJ&}N+NVwpNqSL&lT$;N1`nr}bWhYBf`RkvJ!D5+)Q zWts4tl#IBznvY(sk+0sAnROhjDCU&nqw#twvry{~r11j9HKS02BFi7L8qz_cqt8m9iSG z^L*9Z$&Mv!&@`hS7Ma;fmi53&*tfCV$2H(?@83f`j2}tr1Pn_na0nx1b8kd*!5GX4 zC^jK`k@ zl}zv?c*SD^2dYI{K9v(Mvws1?UcD1xz)lgK05^Yf+XP z$9&%2R_bn#Hp0}mERfdV%*=@j`Nk*BI!$8`A~fN}45La|r6tb&-Rz?PMPj7`s@dUN zEK#TI>cETK#DwL`d<25k3pqJ2jRrJ`56yz$*ti8X7kPI+Qx+(%7YoN5j+8T=<#E0G z&G*F>_MhdkE0n?4K;dv*exyGV>K=g@Hr8*N%l0?5J};K{XLxdYYwL zzm%7f7@;gRr|K%wMEKH5=^5}kLf<7Mek-49B%d@jDZBaP?Fuq8)_rBN5oX`9e}Fq3 z()o7j8`5mLs-xjVoXwHzqp~wk-W+7cEU&K}ta>{ORg1y2EM_SFH&1Dhh9ZHm*8xNL(X5rIaf1~v*+`tc;-_Ya`XuC z^P8!H%f6C20mL_yK^8j2B3{mtU2_Z0_?@M;YLAZ7U(YB419*d%_`;(m7SikkrPD zJh1Te0DDQ|aBQL#7^Lpj;54}*Los1LG?ZiKWu2c|CR9csa=K?hj*F&5asL?+V{{$95Ed?E!#rjI>BU_?U zG>Ao2IXb<>EOtqaCPaTMvCn}u*0nN4{+lVat+Z}ggJw+CV+x!8y`3#m(w=D}l6AJ{ zrFY|+G&7Tu^}~7ZqOt+5*t}z5fqB8V8q2gcR(?Bqu|kV;ADodc_ekqZ^Bb<98I4kd z#|%lPv=>kc?cGsp?W3As%e#`b6wQ|w?FDi2r05P9Js1pD0i!-*Sn%S z-Y$JE@#S>%56t#@-tYxW5@-)pn7Q-Cj$(otLWt~@(kAlRT1MyCyXsASpDmGjvH;aF zv@-@8Pa9S*o%hfn=%ch>lBIKYOXWy{s?a*c(T0$Y;w#$@N#{z$D33ua^?MUfu{q}E z>eVa9YJ%d0Vx^Yf)yCdZ;inUjsWc%FMIO0SAdkOD1?RP^i>A7p++6UzCY%>+PU7vLB!45;78)sV6Q1iFT+~(2}Mz-;-#M}hs zuGSdskm;TQ5+1Nik8H_(Ko%sTz+oX6&uwZl$E6`AA22JrkTAf8+_y!>Ea!!;o^IM@ z)myC5EoDoloVSP<3+5=n{iRBjItj4Ob(D%t74~BP=hg6#ojza7#zSkc00seiecDMZ2gz*D=amXPj9AU z=`Y-AO*c839u;}hVU}xxYg1p>OqrT1M_S8$?d6Fr>7|5QUYXsKky@LYi$)^t`NDF3 zwX|t(xIMbj_y?+})3Sfb`!WrX3mp)xf2t3^r4qx!#!{yA-g(HYU|72l&1{bmJ zHojZ8!1}xz_qO5fGA$K5wULT)CMa7?;=IrquSx^wyi2m-Q||ZXa^5hrxpkW!xHl9@ z^MJG+CJ#l~^cTh>F zb+iTYc*iWU7Y0}A5bp1ff8!VT96(wHP?kHm#$let!1cvf#s{A;z3TVV2 z=~N>X+htG|l1z|(=`ld>Q@3l2eZQQu$lxJuHCz_w*6-k6RC~;?8F^C(W94bTOd1iJ zEnin_;JEA+X|mfE$ooN)tmdr`NV0_UteDNK$M~a-*iH*7c-r}ZYvj_fbtmM{7Seio z8tRLISn)rApj|WT&HK6&4AMrI#OFv!)YUV(SUy@H$C@di=Ugi0yV9n$phYo~APa4w^qrM+@CediMWX%m zmR#t*u3Gvwll+9Yns!n$cc=pWjA4flP`k zd*1D!`>2~L-~{$R^7V)uaUze#Upoo9YBc*4na7Sa(Do+emU_+&J>0gJ;5&ieq#kUw z6$uqH6ok^N9A@|o3DUEUpy*eTiID@Vacy|AiAqs>z8zp%Q$9kTY46kqff3WE$n2*z ziX7N1pA4W%rS-D=pOm5_nj0T4(FjB5mC?xQ4IpEsrY4(mZBxKx^z@T65SL$%W=ZRJ z>FP@t$B6YQ!jn0}?DH8e+q`88G2E!+Irof8JFhWoR~U6#uIXA|x>gVa@|emu)uSeVbXR#6it(yW;2tW2oC+sC;u4P* zVk0TW+mXv3AITN>PL+oWao(*kcWQyvMq^ZR}?Y0t#dR>rP_elz= zFrRr-**)JL*WAg%3?#Rr(B|Z(Tm@BlrRz~X*IRUNg1j|L>ek7KSIN@*WPVSTNjFUh z$b%}1Npa}376`u1!Qvv1&_%XRkA1JBKemA$&+EZ?Nx^m*+|}lfE5PBqG=;ZLS%u@h zb`+GYGd#xU&&pF``Z?sD&f?0GqPFHgnQUnq(Jztf0p!cV{`S{sa;SW+;xc6!d?$eNiLuSqZ8I12DnS0_px#;351L$ZLrNO)yalM&< z+#P4f%eFw`5Z-Z@srEVM`DTfg@<|_!D-+3b`_Nnh{`%fbmzUK&!Zr3a(_RCpVmM?| zitN?(v?RzJ)-9FP4%pCtKZ2wNGQDfwCQ=T@rpt*TRq zk;B@;(PzeH@WXZavIdm)fv?Qrd`0vRZBXwr(O7%fMEU(q6Lnl1FSmRQg4b_KD-u2A z=Z#F(2bUlwd(47-@*3y~JgDc`Piuwd(Cjz2R4tS0HJoN)F`8CDGKm#ka&4h+rG=gfbxcg8zZc5jv|zUBXP0OcTlxQhe>-THbN zaF?e_8M^xwT9A`-afJ4RF%6Xn+Ov;it|ArTwRj|DSr*h`YC00nW{014PzA%aO*i8+ z&9Mx*q#F4(Ps+N^P>ko8fn{G2CGj+S#f$()(5Ga2H32etXCG#>?v zisuZn4b7SkvgfPBo%T5jt6YFECT-%`#Z<3$u73KL-_@GR>RepI3%#w;ihEt{A z4~WAcyUcs8zMlW|X$y|nW3C@*jw&gvNSE28bCOCm#(B7ryp|%bb~k@)&f9#dUIxpb z2A5mrRwvV0L*DH!;R7m|^beD#SZwc$<9?+f} z#_IJ_Hd`jE?*9NMpTBolazUV44{Xb)&9^&DWNd~mF~d~vxj_fn_%sDbQe7DZNqx=x zH=5T^swRpL+9uAYiFT1?X|Bz*p!UkNeGK%3>bl?l$?wf+F3lg2bMqh~-a&HHF!I=Enn;g@(tKAM z01j_$olJW~W^dumI~H*8y=;(PhL4~FkCw`Lf9*70NVZO-knmYU4byQ&H8XR4ltnX7 zxbD8k>lH#i$!R)X!fN`Z@+xww3ng@5|js7W?p&x+J8D=U*%9KR;@Juc!q!?VXyj? z`T71$gu7*=C$^A!5gx#}g&1~h=l;x+Vp?hv-;uNPr|TX&H9-EjiB)OZWXsGHNlw)h z_x2Kv%4gRpG<1qc%~SG3vX*iGcUtavMLzF^R3v7wpdp7xk)~>o6aN)i_hsO?4j&=+ zL&&t+-B(YQl;)bf_@UfdohGjy(HzQmN9BrHHJ@lycgMBTuM5v$d)&UC+&2DY(uyx% zi%o%pS_dU+VCItq1#cStkx}d`M0P*KRYxxc87~BrySePe?Cy=PjVOLuZg@J<{cQ_@U|`j8Dfcp%-PVY4T8t+>(pcq5ByHYo`6_oigA( zkTU+-!%{F*dM}r|-j&t;@Rm!FYp%XKLnd9zb1!@o1_WXZMfSVohZ*u|M_q#M!~og3 zMxG^?XMa_C7AJj;)%c1Lr|29olimOu?yjg!W*xg6dCNM!dEzRwvcs}1j{_BIjZsYgyHnb$ZPtbTvE_&-*zs`i4IYqqSnQE zJ0`1MiU#z}-leA>f|$^)r8L|qAK#c>-{Jv7;8xSw?s zq+3U(;lp{FJ9%({`Ojpiyp7ckyz_6d9Mba$@A_FTPk8g@HGeSFD4yygEW9a^P>9$o zpXh&o*5a7+P-3Zk-I)BYJUx@XiMRbKjZOcZw##EE(0E(5dd6n64II#SRF47u3(Lyr z$P)xa>!vbCI}BiJ{z9ZcJoK}sMPz`c@gcfP^Q8!g8jl-gVFU*Qk$=A94OBz`ka_7b zvQ=;u=mOGPh{GV{aTKo^wNu$VZM(7jErF7S8)yuB}zbaCSLk>pD zz5*cx9)SmgZ%Zb0MHhOqMEDK@Q*`^G{ktc}WEndeqtk%IzG4+tpMFdzG|)xKA7XsG?g;-5@-U1% zqfj!Fh9_QBX7OtHMj)w_31u*-+R=l@7j%4APU|X$olbBx7_=96q)8p>H6Yj-@(6(1 zk}ST#xd-Vv$g*)Y#*HLRwbdTh1}+sy7_)Ntbu6ovH*Q~$yIXTMp+C&+O&G0&G5BQz;(f1T8v5vx1&rQalOxd7(P5gG#h}P zhtvC%d%@H&xoqG_(9s9a3i{9U?Z-6GA0blqPviPPr0VZ{nDESx&uxDJn7SuA7GEn6 z4JvRt?B*@)-=n(2aG_tpF2KD^D#;DA78uUxY(mdSbc}YS3%OdyP6M0tCxKGna;~=& zVjYTb|9fk3*ScRw<^Y(PMb``rR`7yE~pq6Ol)U4>pH|R^^ zX6m4Yh5Wu=tIJik4$q99KAy1s0N?Pjupul2m;l=P*e^Tj?BEXDPl8s6c&o;97auiU zzRe@Tc`WJZn3szxA*rFDs{^?uhUEqC4tqzY(L(`R+mW*s{h?Oh!X%A2L+pQdBD*ID zVem^V94>{T7|ovoQDz}GK)u)-2_&wkX)Bjtr&GoCF{iFitFB^ecq6B#LeRZuniG-_1e)0ppw$Hb;Dwn4z39Kd6MV}2=DfT_}CGD;(MU#?8rdu zdPdY2;y!LRC%fK@Vh*c;emksAU_A7!WSP`PiXr{sulYiSL3^JvAIp=Lb=pPJQ3BHm z8XH892+tAB1m#*0jG-x^#)&)(WAd!`7W*@i>AEV8ng~8^z8bV}d3cep|4d_8uXed9qE>`xw&kdIdib2AK_PW)MfX+=snn zrH6T5-|2q#-i7d4K<2O^!OYK5s{J-Q`Ha|%LN-r;9TW1l+J+rsgH(X-C!rsV#q|q6 z6tZ{ldr{wsy~t^~kM0iioH{{%ExNjp6mkWcEPihZF-B}2Kd6gwctwA+1#A0u@$#BM z-h~VBWRL^WOY=j#945iPMlv724L`6SGI#7GgnstcYMJI;JC+aMSAov&5s)0yw?5&0AZk%G16YtR>IF znnfl6Q9RF=+4k9Gm@tNm6m2N%m~HcG5&HXv9hpxIZG=M!43 zohfFFD(EBPafhOdPoHx#=zd=;3qOT~N7AY+AT}H)wdHh;f`ym#qP{G%%SqC_l8tj+0t_IcnqUA?tWh%Y`#Srl_SaTK_GQ2n}uJ0i0ktSsG^4AcU zsh1r{{)X-E=Tl<#a9V~Eq+pyjNCEs&u4z*wB{VgbTJA6ZzR@V zJpdXZA*0LICbeGjm=hH zx)mpXTMEH!$n_CXNH_s++n-)q=3^BqoPZphqf{Hh?6T8)nP}=rau!*6^gL=k_Jeg> z3keo^L9PD{PNTo2FB0D!{|r!b%{|PFAK!|#;zwwv3%~YbT>~sp{(v8o^t~cwu1m{B z_8>n=NoH|`Zf$WcwR7Q}sD)c#_O z4-S|Ivu=Mem*hG2)GH&BA$xJxCZ+it-Z*mrhBUh)99mbNps=!88@Mbzd zE5?7Aqb{qE2EU9ori4zdu1@)U!NbdtK*CSA{z}y$X?A5#PZ&mJ1q5#nSQeNMPYa2j zVn5CfmIS?NbBUm_?*2lyEx+a)XWGTHkkAa;50eHkfmE0h0jnU{<|cQI%JnW@nxV(# zgcTV36&4GYUc#`7fGMQu#E{L#?V?ju#Pt-R54Qeg@6qJkwjdAvk&4rQ1G+#ti5=B9T%N{gK0yQwLXOUKhTAOhW*+6&6^_zEw;L}ojIY6|$kxY8W35Nb&g5M6 zm~jv5NV=YmJLak>3ggjvOZM7}3pt0mI%i94zoU2_o4Dvo`rd(4s|eQ7`Pu;?t>j1C z4+eqlN~fD(-3#jI^6a=0xD33LwLO*F<0|2m`D!bA5+QI2cDaWkz`N%FFZo1COZ`&w zISM$71EJb6Oq3pg8213nkH^m9cx2VtyP20ATK#$!syvOA@xTxiKsB-MvVEni9Wh-w z&71Il6V1qwZ~}>OK)v##8CA6wRv4T2VNi9D9q2&w|qax z6f+n~+#-+ixi)fU{+S0o@20lq{KIMy`mmtMKHQj5%$N`n;V#i!h9Xm|4+!UKoP?oU zVc{NlHy1m|N$sgUAeEZZzK~3U8oV;is8BI{5xxhD_C@c{4a@P^@&B-U_>lI7rkCcp zx}?i=^o_BHKG)awmTT^he}ZEZu}%_HCM&Y#dM9<-)5tsn(B)`@UBM=g)Nz3J| z(CA0o#nDSXpNPtbv+a*vvIW*$WSt|)MfX(pXlCi6`fXh6-D{^lA1il4xd?~rR>hyB zROosjA7MrgEb8z8oH=bc?wG?PaMx-Ylr!IiBlY0Cpd_Ug5}$Z*3CO(mr(0Jbob?D?Vj=VrBN23gsP9qL`4;dW)wuP_L9}OO2?9OBeWo_ zKzHe89F7|MN<#M#Cpo9Ec|0S`M&)QEUqN9LBOkw)EYD;PDwYV=vz;SxLsDrzksL0N zAAv>g|EYzapdcW3lvaLWmR5AJ+I&0|?Io9z!C$apSPQ%}JZGONu2($YVaE-{>Q}Iz z9Upc4L4{2pU6N8#W=f&$@Ij|bA%kUg(82m2Q6A%kq=#1CZP5x_cY+FB&74SEU(cU8@+?h)##nBwDrJr6PUI%G` z$N9JSoJ}ccPi5Av$YNu&IHK*%*e%Nu_OJl<@xL`FMeDwA<=ZeS6!OqYb#E&L_~E| zYa_w<^;B5?mugOAKz5)vePMLBqfcU$lk?USjLo6|LJ|7}rA^nPws6f3jBtb|ne3lK znerkVQX>t>^mpapMdor|;=YD^{fR=c$Lt{g=$3cV<+xCiW5K?y6$Jo}ArnuoeZ5e9 zSI#SXVO{{1M$Ulz7Ymy`6t?;l0rpZ*Xq`qMOioP7>G4O2=1zYHD9#sc>?A)B4AoiDFiInA zEZ~?K$#}&H`A8|kk0x5vP%_f*=(3*H(em3fQ-Dibt4lpKNflDz;miN};573JXMM zv2-Sh$&qn};{ey7?qO{<8!O6X%&6{$58X)w&H9bRKc*M?Q#UXqu9l zqNhx(hXO->DKHHz#mc>)u-$Eq2@=Epy7MFltTUQdKa-qG!$!PEZ-PXGUs`2R(sKLa zEwyZaF9a(#bI@e5LqqORtcb9yE23cG8rqMea`b5y9hE|^IpriL2w^y^NUNIjDRR)6 zrpv*6lrOgz#J;wpIL25pxkE4SFJinQy^4NA< zl;w_RwKyNbpC^&aLCb=foeHIqQaS_El4KhTmz_Ykf!pN`)!fa=ugbg@ZVu_h3Qia@k6zJVjpsJ6f9a;uf=w3J{ zK@mh#OGX+~bxd+Xe;}a;m`F?O%aZ6+7CeyU3?@9i-~IwIGE(jib^#RB4(BP+sk3RR_p;M<(T?zT506twOnmg3N1(J);wp3K0)VgerANX7{XsGO-= z@cziB;xt2?rmG|~()G)R@>3>_y0Y-$sbN@e1L^{)*E}nX{|Tp zy{vJ@Cg*Y_1EbW2a+43KA_b^h?>B#7> z`9MsjoJmb8C4eCi)|H&P+vyl$DjYskofaN$!KmW%mGx}C`C212jWr7lSCpkM=xf*ny2<=P`)xpuLOB`R0N-mYF0 zH7fSryWel}J=bq3}{Z*(*tJQY=UlzH`r-xvjDSlwv2bv~|C#0{^&F;4gWlY#nep({uFf zfQ>P;u##~k`G~jN5NUs$9#7tttjIH_5+D_jj+Kd-ev*PH_WafK#L%baoaM@KoAoHGSX7z`syU z``H%Lppr2QIM#bwU>& z{O4};8~`t2VLlevH_wwnT@L;1IofKqA4i&mxIVAsHr>-vQvBKNPl}Ce8>6qPj`C=F zBZqc`;dW*y_*qja_AA6&z>UCcl2R_GKghy`fhffRf=xZVs z$OI9WhQhm^$w5Alz4olbfVfq3oze1a8(qk)#CEh^xbH68v%o%p3SXPy zJ<^L42B!9w%v^jiFxy6VE*UHztGS+SlM^6Tx;Ph*7pJezMQ9mx@JVXLd-n7aZDI=V zP5FOg`g8*C@J`76lIyDlsAbph&EV(>#w(z8anQV;p|g{qch_lFO%k!TJ}YcK%=GBb zLS64of<>W=d*uQJk;)EY_D23w$}%I~C*?zp5e@^kgx9L9axc&sCAI|&*Nf{(kn(CD z?KPPqO!}=r^1vc68#FzW>Tt5vm?j^@4btce*?K>>ObZKi7El~0K6MZO$?!a_>wF&6 zNL_7Fav7)zSOuXT)eg)s?j6D{C0c8$)|)fU?l-po#NW=TMDxl2m5*AzgT|rG|2@^} z4Jcf%R{x~Ht02#u=3)0wFE6V+7-{{MoMgSb&|io`6j3Jd;_aXg4Ux#gr&}(dY7+gh z2Di4hGYU`xZ6|Nf20o^Cwze{f3+N(f_Z)6rd_EXC)%cLJYa#45!0*@j-3eY3EiV&_ zZE?do%J=?bu$*2%*Iz}}T5g{LBXIp_M`o(dPnZmDE0d{@unMu?eMGrn?%VDwvu22Y z9eF0Iv138VqY~83xI){VkGkC(sc@Pf9#DCPy-xmKrfw>-WsdguRuy6_W<;) z3nqS^MinXNUAMp+)|YVMC)D%3UX^C7P%Y;LVw+T#r?=@@TJH{G4Zzs}0wHhHL2FsW@-&OG`bt%`h|;r&YwTS58av^2&9QgP($2a8liDXY@Sj zu?*BcA&#F^5~CAq$dy1wOu`3HOCJkScU2LxXE%7s#{ebm?4g#FG_uw8=*%+zb$Kki zle^Iz4rh2kiP`Ca`FN%f@s4=6U4R5|g9>nhx!=k8X)Hj5yi*ANZP3d(^<8V23shH@ z&0sVBK*+;V?j2xK>gw9}xnlzfwe2smejhV|uw^}Xl1a6=(ynW{|5w}th#KvS^#B_q znmBUvB?(P|PPurgWP2Li$32P78`}P5@MC^yE=VZp#ZcRa`lY*WFNc~C48f3fAJy6y zio)JTLL!3Na=lz!h(|+uj&2srmj8#sT##I1*icK`qzzZ1q%G!Q|Jx0aV@qr4;y^p*530SgNPUuiBPWnmXe zHwDm^Ju2{XA}J>shH|aWsMh3yz-;%9F{zvD^?fB5Xbu=*zpV+iJI3*7H~+~g|KD47 zdS6TKxnqkhC0K01B7Xh~uf8{3stdgOAlGW>a%k;UtGvQi1fW~?Yo&p)ucvz`{AZ-= z+A&^;lj3ipDp;?l)2c3pr5tI1A%lmput-CCW49W%z0NtZ+XvC4HeRa_3KB<~+j-nE zZeT9ZPhDi;9r#p_z;DIGRoK%1(#pYeYV~^~LJ192)-&U0mAN}2{JBdAMOQks(a#Jr zqz2@vF-W;(gFBPmEtT8RDL za*3_v4}0mPbQK4w+ytR)^xuyY40xKQS=|^9|63#~!Z<-m0DL+Nz0?#>DS|@lFi{y! z|4Z``8Sq^CHyA>A+XWzZujY1icEi#h6*s*xk5&B=JGxRc%XMQ>3?*HZsFX;D>)KFz zs04luj117YjuIgq^f8Rrr<3X%#Wp!UAMP{COkT$M7O2`(+06vmyQ+f>W$F`yQ0viS z294+D`r9(g-sDH$M)i0``?*-KQB>7N^idk&4-CV2YAmcD2WVchH$(sx)@?L#Eu=$gtV12EHK+r{%8D2}Xs)*pr~7sgob_7u(ujwyKGiZ4zaeYeQappR$W|%7G-_@z@p|en5re&2zcNPkP)Q$ z$uZUb%{YzhnrAI1byK(2868TLH(C%ygZWj1bgtmTQ_yFQ`)~chPQo3EPXnLrjjqxn zzt+%BELvAK3I&TschVq@%W!^dH{}<3YVXT>3f0hr9T(+-blo3Tr?OnKsGQY}>R%h1 zW0F_y%yKVy84KW2Yj`_3PoE{Zz1j4c;+ClVwv-b`V4MdYq$?Ei6_&$4 zoaJf;)5MbD`?C+y{SjS{Jg>g!NeCHiwU&k&SE#8ObZqhax&G-ERIUlJ{IdWMpuW0P z6S=*pr~7drwW{y+^Sot&LU9s#QoIEYdQ9;t16_JD@(QX=A`N6B4|^KU<2gR*)F1m) z54J`S1<+0nQQ3!X3^7kwyQ+DNrS^~CbFH~I5F?cAx}$ygHmm5;Bbt+^iB;{VxO@EYRY^>;DuWu8uTZA?RWp70wOW; zt#hq3bYQgQMACP zfYN&{EY**kW!>@dX@^Cxqz1P&8Fz#Lq&>)DyL2c|zr+iQ6tZQzw?GvgzICBhLe<5u z;C%PoN;y|^E%;0rH(bAF>*b0{mOr19Ma7^!RI2Vniz;lp(15Ydz1BCeyabj8X?29+ z{K2x;rvr&*^VqhjWr^$5({zGvE_5&ECzr)(Ef99w>8wi5rD;$XE7KZ*dIBzXf0k-~ zsGjj@Jpqv=jjch4uFJ`~c)*@c8u7|z;o zGlbVVO(4U|x{M)}5De^YGiVf!*iiRZWZ(7lzEKaXO)xor0|S~$Dh1o8M`(MJe`hx@ zxw@K$R`g}V03V|`P;pJ44XCyPR5hwR5x5hG5p5*Y8u^f(%C+j%!$&^UN=Z&ORhzj^ z>FgZ+p29=~+lCRp5@|x4Lb#qu@)w%(I*pVP@vVRFU`x65$jybR2obOj$!yF0jnsdV zah1DQAd?pK{G&gMRK!^wZlCH|?E?|SZPRZlDoHiDj%N}e#gk`czu<*o>TBKC!?mqY zSJm=#o%=fSE|5X=_Yh#~kR}$E4L3{_&I8;_Wrf>5FCm2m9tOBgldJs8XxU?BgL{Us zvqG)UgU|52#kIsyQTILdt?FZe*?BcOYB6kC4iuDO6Vvret={dX2RP}IpG~NX)$^S< zds9HJSh5~0HT96fT&PA+3-+DO(C6jqwSH86x6aq4Rf*+z=`aKQ8xe4*(>C-FgLR%8 znS>r1Sd&oKf@$}!FY&A@!I_0n0OdD*Wpe2v%m9Lz-K&VH+5)&ZZSThDsjTmd>A!iG z%kt6Yd-b4Fas%WuDXwVnpP%RAH9T2e4-!1VwsH8GoV@H%+@o$tW+Z;83vy8E^?{w^ zRl8THz=Ju%v6 zsDI)x9m+AQ5!I3=O=}|Y(m-ZTzCl)Kd|HH3L$}fC^E_QLbPnR4y{uxZyVGMe378PK2P7Jrr~FX%IH04rHv0d_YguLaEEIl zAde=)Vx+`V)7?d6C>WE$^*{j)dZWxgYyc-z``NULjnow%44DJJC6Sh44LY%oNC%gp ztQYA%Uguv;`ml#KLZ@y0pT6l~W^qSvzL%Wjt~3L#xL6xJq4PUUe{Cg#_3iBL{!SsGhHF|*m+*)#tah_Nu8e}Z%cK99kHTwq z8qDTfXk9?b>f@E?(5$Xhzl1oQkM{va1QkXFql`E*8n~H1u1})=7XE>`d0$vtFR%4! znAKlPke~ukC`+X%o*{jzZ-=>7gY_ z76}d?jV?^s?M?TTdg0@-9IoxH$xg14?oD{C6<$PX%%Yb1ApTQ@=C;%+$9=TU{S2_)QO<47-ZUFe9MmP&VmdJX#bZbo8I@$HN)wB^^c8%b5-0YNG z2=AnY&~UOu#)e*C6&;$ebv2y!tS_4^?9biJO>)bLCl?9uwS-b24Xnozs#QqYq%$+)Go!%>U|E`{-h)Rty8>DWiQE3Q<3GVX@c8bV>ol?hG3PWr7%qYKqkt(IYB8#OPD-|P)?cGv0FT0)VqM6-$3tuX4s+n(X+ zb`Jqb*A3029Z=`f|D4_*Lrc!xg9&IOI%K^U<)4Tbjb{w4kv>Ba-AFr9S67B$MWbq> z?#q7AQ4KP_BvHeLX%TGd7Ft&*eQc~r2YTsc!|ZoWDP(uxQ*!8hG|)X}4NIB_qyEnW zo&u^HcDpeJ@dRU~uNuP-uB)yhf=|{o6oJEG_g9J5G+7ujQm1z3C{0TR8;p87`syr? zL5dIBNnr_%u%33vxHY!F#Z&%W!eTnBuk|{dU*$fm(Ct+^s~<88#aEgN=!-#CdGBVv zIKKJ++lnRQ#y~u%Z@w*!bT3j=NxGHdrAF%5tSBU+lRHqwf6|~hflq~F>$t(yJyGH} z_xiU^NRp}VAbr)HtVq3o$W+}ofLn|yhJ(Q60@mp4izz;%uaYiSMQe;tbE41|-8a+p z$_TT$gRJ?;y8d`W^|jlsF_&@)Jx4TSrW%seFUY%*d6qtmQE?h}!|9o=jL~YXpd(CT z1*>vkYZ~L{nW&|WIOTQvb1Kt*Ep2LPIp50?bf2ORo32jjoD#t8mvu+Ts*IhkB-#x6 z$O)F>cvE18STCU1SQIhl=uA#3a*03XV~Pi-6Mv#Lvw)wU+>F}0@CM(dIgw4T^_4>Ef?6TU|x zd@=^*ryL5OV5w+j-M%+9tB0QPA>~AZA$bhev?ko_u|RE8ua4}4`>K=~L~bHt;5fWT zbO`!r$8>h`tRiItrU6Aq`cd`^G0hL~VTyH0Iy*J#37f6L^r+CbCCw;lw8;j?|zyTV5jXU@noM$MhU{avin2_r3~kM(hoQg-AnDv9l3t)88Zg?f#s7@jK02Rgsk5dMTwpwc1p zFK-&7d4;Ut+kBktw}sa+DZ4xXiDWQzTqQTXeFAC2CyR8_3?c`JqY8~sMT+&z0coA9 zQ>sl!wl44xAZ5m}2V+z_@o&FR)O}(6s+S9NTRu(HY8yNcvYF=hY%#d|hf1TqP@X|D zDQu-+E>=fc5=XQjt-ft`est^(bX-Fg-@GsjCn0}bswNW+>QG-TD{~Dw3E`Hk2F1=1m_H>#8qJf8pht z1PcGWQ3`|T?^n%$y_Nx$s3|v9K9%AR!bOJ#+vsl?Ecga>lJz;eHbf6dBh$|5r!i=I zsV>E>&x+s5Nn5LJLyb9Hm8;V&L#PSXnMnD5my(Y_kVQ)Omg`koWT;m$83@7@|7xs7 zIOM<5>6KzV(8HvyTDr*-A>PH2r{uM+-W?jIZAI!!vXBB7ch6Y%#a*fTsg@#)4lpv( z*PykQb-EN^o{T@oHMrhHzBUP2Aa`%TGiu>?9QZG!frv$29w7=r%!@;*(gS{^AW)r; zWf??ia&ig)<=34E-R$v44Tk0i8^}?T!KuAA2YeXKf~b48CqW)f#Q4#IWTaMh`Wc;# z)gY<37k=(^1G=)5&!8Sx7j0X|H{LJSpiw&0NEzERb+n%Uf78yu`G}(AlOs9XpFD<# zKFqcWySK*LsdZwqDu(Mo?}PBUn?%^t-EZQ2Hvm*MXp{ltJAHb)4_Eqwt{-}k1rMa^vn{6ME(jlz_ zQf#%p*BE!3@6RIT?Q)H&@@*jS2bsZs203O0$Sgy$&_4s*NDKMXCMVP^{8H8E6Q-%r zwGHxtc;Msh+mj1RI|8pca1b2kyLm3Dk}52#NBn4tYl)>^p^(e2i) zEVdicwaT0S`LI$zOcIPlxL`4jD#?Jc#d3eMNDCI|thzYXy+J4Rq4eJ9kE0{p=pRw3w0!N)K;DG$)#8antC)| zrJE}KV@nwG++LgvdjT0C!qC9xxqK+0t9Z5eJJdT^GWqr#{|`2C8;f<-aU7XBUY%X2 zB^8!pTn&H(tUE79>#Fr_3H(+alY&MAoHhyy{T?Jlk!07KF$jL9mh@gBL&2N%UC6__ z2OIDbsSBF~HU+|_9_%f+^hJK#a|oZ0NafiJ3f#A3IiQ+Nh3A@Hgv>PGWo@~7L(cAz$UDyeHB*&lVkX1nBS zxTm0+K|P9UzbZ$NPSyGXxD;iV+wafo&+}hCfL+&|VQ%8P5L>7<#0aw+pX}#ZUkKlu z>^~osMH0uFkNGqD0uK0PuT4PxApfsuIU?3npU~*JoKLcvV8RojkTzgK!w_3rm*7{u zHcvk_TORp#!oS51k=!ejFi(GAc0|jq_BVOCHm|y-YGj7KH{hyt31XwTVJGx#o64Co zWuB36r*~mKDHw2%9*5h8@GHXq?__!bOD#L*(}5HQV}Ey+)T~1Hfj@S`l9^_8rJ34>yH@W>@`%O~oUTFNFufk$8kW@~3jymw2n z$7=xH;}ji{f75ys=gk`qV=79Ei3hEfRFM^fd^1AVQZT`CsvcuB1Kg-Q|B|G@Mi`Lj zJx2ImPYZfm#v8!+7+ie)*TsLqVm?BaAQkkZ^;}I4Vol~+*XqPz;xl6RrKaj06HqR! zF^Ui!vZtFU{A;lO+V4(?Bu>}2ll4q@H4!2~P1cUFI=o!_!#v~aEQ7WnpEqbuN2?hv zBdROozFn#&bm-e-E9~2d}=)T|cM&yo7M_8ZL`4XVaw~ zeFWrNYG(Pv$;*QeMS(Q||j_I0sBH$1(=7S-{pk%J$#U#*QR{JnD2 z7v#Cj{DS4VdUYi}Os#*$7_J+2=cBw#LwLP$Z1Os$sivL&aDh}kUxit00*EOY&bu~M zxyw@B4rx}Zk31|QIy!4?A@MT0Cp3R-Oa=On#s}(;WS-n*sk2Ga-gjJL87x2`+X!oO z=Q#IZx_&Cs>Kc7JlaE)TMF=io3A%(#r4COXO<6Si_rntN!P|SH6i2jucT)SMhQN-q zUP1LblHR&aAI#NpD~$aN&~#XM0JYkTv&XsB#9A|l(`}o zOrO+1$nUSTIyw)D9qmPA+wHY(q3_k2Tb^t_NTAZq%fU+H;r$xb?&5e)<7RziQo1{q zD&8~pZ|m0(wN3l^LKjHUl0XS+Ozav;Z1Z#As{1?H82+nSUNWrebiyLGAw5Vat%OE|CVl-$^qn??^8=hQA zZpHm48P(|VR^4=3J#E!tapp4B+eYXddNw}0k=bcju5D}EHduq{{H}TdN?uLd5zjfM z+Rs%^GpBn>n1!})E=GoO3cvO>Yx~&P+woUohZu#6Gj(ge9`2^}nG96-3GTfp^^IY@ zUZ6JG%R^otUqyfo9se2KI7Gf;4z8_Dx{>1}N(t8gd#(?6R|@Ej?xzp34h-cVTVZK8 z>dF)^6m5dN(}!?pTQl!Wv}Dan_Yv$R(y6SaNV7(+DI~^1Fd|sf@|MP10{eA>6Qur7 zZ;Z418YzMy!TMr;+x_hyy9Gh3<#EqsamPPq>1rPr-b_|;hsO(^$jSaNVOt5Im`gCL z(U=5&1wR|f71k->P)u4(sYB zT|z95a@Kr216Nd}#m6dp4Cw;@aylJFbqDRF>AlwAT618@pm#Wuxw^YT??%l`b`c)%oG|O|+`d}1O3(F4T&;V&CKE!^3QspMs78@4#YfJY*KVx#e;6X&`@kA2Gkq?>LaIBa+Rl%A-X)Fzn zL(~%o=KK#ZU??z?_q!+;Zf@8lNtpGrd%=pP@iz!PcuUsqi}NOw)K2w@a$*IXn-feY2RHSW<& zKz}zPR}UK28?7MRB{lwP$g_n(cxt9~5j=CfJDC8W{V^lX<#bPo20lzKWzxJU`e*|( zDs?(itz!&WZ&1=`oqclpC{;Jn&w-fhhbEN6t<|7Og(p$IMwf#|b>@s~Yz9YZJx`y^ zA)iL5m{W;6^ThE~jX;Is6!<&do(Xn?Z@*A0ax6;Bt!E_-4p{q>^N-g`>r|MT#i4uV z5UQ!ID zO@!oQBVBOtCnmbs@xzFygxjLzPACXSn1+lRs{eKLf)9_;1Ui+N`(Ca~XFuTDQ6Wpq%L!L*nT=~*-E zhfCeB6Isr~HZz%<`=}3IQU2T!x~INFUrw{<(Fk~qMisAOG-$>1v_UqxgoWXT`)e8J1q5753- z43!T|_V=sSEFXVMw4>wFmGYJ9H2TpZ{MFD>?W(dmj*u;d81#BNU5)1mJJGkQgFkGX z^DByZj1^@IGg^j`jN;$#AEUJy`j#Qr)+lfNNU{4A`@3z0b~bgCpnUIiS&h4-Ko`Z8 zitaqZE3R*;r#$~X2xR`dk7p-$@oM=R9be)3vF#!Blim9UkR*Dyy=%+!fzGW>w#Uw= zstAFnC`}pfGnb_E7W?Y0Yq0?2ec5GqOt!eRODP1ja;4Iqmerf#zACZZ`rH~@4cE-W zxAnj36=h(3nVdfZ=mN45Z%k)A7N+}On6I;&^$1WC`bVypH+iku#j#x!vzTL$MV#A^ z%&qq9W9PN3?vNf}644DC3zZ7s<9jH*@p0)!(r3}2X-N1EYuc481HB@Od1_+d150rj zD3pZGbc4GQcRQ>%vOu!aNYqF7-Gn*oeu5W)CgR|41#8`83lCX54?X^YWPOjMx}MqG z>QnJBok=BRki=<646U=GUgQRFE9r|BCV?K(-{6@bxI>Z=uOx(hIn)N#+y8V9)GhqG zc?O*%FK&5X|1pU7_y%nr2-Rijzf?F%Z>-m2a7TTf4wc{UJArc%sm@uWRZI0&B?8B5 z!}>RJXF1AF7u}<&!7rGp)Cu{Y=c@0lgiw5*T9&x)lJ!2Fw~AX&b{l-UEUf2y!{?_+ zFz^6219_A}-Cw{|TrnI%>E|b~pt@GKx9heciKES3z8I-Toil`D23WTdZ4S8A$*y=N zGA#!y+zURH57Op|T2`PN9~2S(t~3 zl#QZz>tO%7DQw<4x*qGM4S``=K~|g*eRhGq?%uplM~89Uhv(~o<(iuUZ?I9LV;nPn z;DfUJaYlZH%@=al4Nop>Yt}^@XvR7xc@1N-Bc#&f2hOts&yil8dOWR8I05tbmF42A z!}5sg@@(kee7w+(r{8i~N0i;CC$dR{)51W#hPmFyv5ICp{fYXVMyCcYhG!Sm{iA__ zTW5LJ#E#?jdpJFEY~YAh_ds!k2jiZu(wD=H-9!W4M+Y$#m6cWjuJ6aU<3tx|e2$w; z0PQm2kc8FUQhu!|?r4wrNHnn-hMwbR_qf4>mXYk7afSi%mIsfd7N(_M>*A z3|}=)yH9H0#v|?pZm12quTSxmngHaeE-J|dKT_hFKuhOfr0fg6eQinLu)+VK<w&BKTB+6fk@z09rBnO>`VS9~u5o4B zc7}Cwz9%8r{15*%vq(SKe9g_)@6D7#psi5_WjBApR+f@_5cfqjyatC3PjM8dc8%vN z9oh_n6>DLil91EE^}OKaljy>SfxWM@UhWAK2~>5%IO_s)3*;QQeC=kam#xuP(39G! z`pD8%`T1)GldeL44b~SuFDqSEx*bQ}^~P77G=}r9e?RrB*SDKhWvzZ6sKU*=nSiHJ zW75%a09U~x;M>x7>B{vj@7SteN1Fx$;cupJyZ;)vdSGjyhfeFkAR{^cIehXA6izt< z`ZcSaN+R?W-OLKZ46h0L;MH46T?+jLakN@Jc>7&u==mhh)%>sy7<}<)vaZSCeIH7L z`Odb!GuiVTA?hx!{|@J(+mLC{$n0Y@b}H;L%-ZUnl-fpJVUhl+(4qNUB8}5yn7XHG z*qF9Qt<*)AlfEU_L*_TA+d!yVR_U>^z@9W`r2-VhEb)+mB%jUI)-wSd{E;yo1(64G z<)5mhC&r%Vs*BH2LI%^~wrw?pYW6WUOwSJkaYGKKjYm)11d*U#J$mYqf%qDO&L+-A zmP!{_5=0_*)X1U@=}AH@Qbsff^W~~@zIH`4#j*mseSo5Yh*Tryo@eJB_AYL@ADFJ*|ND5InlJwb^&1}d0CzNMD+^m| z2QiM5X9R}3dsb@;Op&Pi%;79hsL49|1T{I%HvYINxx(PGa=J*;>`dcP1&ZJe&*`Jd z6JoRQ+fxSksMDZ!M;povw5{kA`&cJXrKu-3>E-lzg+AP(%km5e>YAagvoNcVbaXoS zEvWjDdUvIdADckK;%>SDmw|*a|M6~_&M#)XCKa*@CJ&)B5%s<#&Luh=SHX6>*b8{i zJWp)VBVMw|&0TB(kVr^g$TIn9m41eENikTv0X>JtW9rVqDbke-I!-%xOLZ=#jBpYg z-Tq?ub6Bu%ChA{3%u3UZy+U`)@uI;QjQGbP)WC#M%_iMan>Z$rt9NSEkC4>W&ZS@8 zknEPutb^aYo^_T)_^7M;(j}RS25af?RhHXWqhF3Qgnqzmf7kzSKzh{aTRM%o%G{Q< zfqQwh(Lo*!qSPyevF~(7m0?kOZ(GMPOtYG7_NBY+6eFnB=v5=YzD_d9O`}-BT`*Jm zt2JUz@+v>DvSvE$!g{I(KR3kXLFsb4J}tO02RkkvJIUm zP6>cp8z#E^GpLRDqP6a=Ec6+zJZD#2W2jRkz&SEfA0Uprd^_c6Yl|`WfRGFe>6Ic9`7TB_oJw zaMb$KC)EF0xe|}njWCb_bGjE@!d`@-NB1ijOjZv0RHfCnwIXe(ijyd>&_kfs+a~GQ z>b7nsrmp@An2gc6xzyH2NYiQD789KE7V7Ifl{M>~vCPqsi}bElqwngbL0k3GtoYB` zv`)LGt9m3U=^HXVBMm1=4z%XbrzT1PC;dT{R-Z=YBQ;S-nTA_N#BVXj_4tAC0mu$r z&`R5lIz8P>V-_;1Hx*=-Yg7@t;GUkYOLzN-G)yj7e{n+N(pR|k%ybn^RqBPU+Z7qF zE=@^MJ$<@#``=K+N(<7W{?H<0=E0KBWeRrk?bZd_D;c=4_8r$f` zhihEg)`zT&u3f^mVrouYqxL-14Rsy;{4hM9FL7$ z69o#&9ks#5Z_{Pt7|KSq&-QnkrCrvTC}Ek;dYuI$Zd#Fil;Q3oRSENSI zeasRJt?R&a+ZzaP{ST+;r+)2wpm{`eL)7-Mp;mK`QMg-gN`(NieWurH85zSGUgCM z9?4VUAeGjueWkuNGyStPbFzCdwd^6iwiTp}UKYBq7^n_arD8r`K0^H_xAjppQ#Tt2 zafofiQ?iv(O%Z_k!@K7DuQ>_I?`6I8$OKnD{QoI1fMyTPav96{^3)(aj@m$=!!7%C zrx76LHM;9k3>ljsE#7EiWYf=%8|&ihUJSt{OSt*LD%}v)7Yp3!aYZi;P?Mz3a@vn3 zgkv|+EET?Mo(><6PFwP5?K_|E-!6Sz3t~R-jWYd&A7ebGyEl@Xci9wD>-#a5t9j=_ zz0ynZ85-Xg6%i_&0dgs!Pl3!}$&H(HldP)e3Jr|ep?J}D^|zvr&m$cI@Q3w8zWzzo zXG3)B&<=uFpNrB*y8Y0F;DF`ORm)AfIHA$H+;#8NK7sqs4PzqDT0;mxjzWiuaW47m zjj8&)k9J>#(jcT!RHGZZcL{r`;o>;VS_!gGn`J2`-M&kQF2qp^xmWU`38FNpzpD1# z2&WUUlGH@ZuZyOm2h8CS(s5VNj>~^?2{v&UE@JOOU74zAlhy3KJWJnC<~^El>s+~O z5iQo7Y7QzZ#CNCg?7FMc-G@mG(chKfIw8m1y*D%dXb! zVOA2b`(zy%qi7!jGRh>a({=e`-FuE3n%9BGm{$aGD5PTuN+z<`zVPdt2?`iEc_3tM zjL*z)l!lygsh-Sd!JR&h2mJ(Tv_>8vTA^c(b+emLXhpARJGVV+wqZiMJYcys+Ky~Q z+d9ff^v^2K%j5#cIzgRFUTYMg*lb-BkPi~xGQF^45eFlBI5&591Z91ViW=CdL}N{6 zEwx>+WLt-(*9S{&X?EVA#n*7NZ6Q9dM41(3f>P~2mvA7czw!}BsMqid^lqIyT(IwX zyunLn>V*vk26R1zr*E9$|JiiJ>C9Fv&!qTxQ=s`%8kD5l;-*iuy?5d!F2CnrI1{z% z>kSym7qJfUAOTI!^%rKX7c1*>iT(2Ci9SR(bKLv-J zdClL1TZ4`~w;8z-IpY(n3mX^+HsiWN~Vd`Z;FDXH!g~@G@#6iw##8 zv1C&%BH@$%-4#H}2&6 zb?%dChMkTak3g~}oS0DKc~Rvxoj!8>`PI`Hkh+$u^fCZ-2m&KN^HiAo|4Sbn-3C2#3N+xk z3-kcRt#y=-1{24+4V0a`0eM!S`-dHIi#AfHL8X&63;=$D=@oHx>Ad`hJN0-Vjb#o@ z5@cL&DWc4>+35>f0qU=%!a4afCBDLrg6qRW`Wy04I^w!k>YJp%ml{kW>)^*4Kg4~P zr|jKuGIyWFRqq|4O_%A;HM-k$Q2-&<^w7xTb>_ZUgC0E<830`F_Wu%-7gh7^U%ceC zqx-(p)}a(xE9}wbV_oT z@jD|*I@{rIJw!1+w(L&pLq64Sr}67uk+6>F^h~wP)6hw7eL(x8UO-wLrpqs;z6mhq zXo@Y(66NwR4gTjdJ!!glw`LA(o~@g2&<{>8bKd(~*Yd*OPhNHJ$hr*=zytQhSaM*ewX6=cqVunca4orQFAXPHo0t? z=enQ}rSwV3 zt2M8ZNJ{q&HwqYd>A?BPDnjB!b%VWh=)rD%z9N3Lfqj*ub;0(fpORiFJ2|%zf1Gf~ zy-=r;Yt0i|vq!i0Q+Y2-SsFi2A2v{7b(AE!PU&kb>dywb@Dkc>5N|WHKTvxeJok?@ zYO*65a}D2r`dnh2p=ug#Y^u9D^VEmTV10fyY-6NN-81!Rbg)8VmAI~slXXt69$nAS z)TybNnfbj}D`zjCe{eIST5#pTC%MAJ{z=xR<13n_Zw>fN5aY@IWGCDO8M-W$Y7`Qk zR<+kM{sI-s;+n8E*xS#BWKOkx<*dW8YLQ z+^9}VOgymvGB!=V(Q{{W40CVFlLqNl7|0y9{k%G7bW3SlTA~TrRQq)d9UHZCr%@x) zuGZwj%+)O=T3`&+E4_x60!{3_kp)lXj~Za#G(su-IN)B*a5}ZX4{$A) zB!m}U4`}3qYulFU^K7!gE!C2Jbt? zzi*G5u#s0ea>IF4*ai`S0J+%S-AG?|?$En4TZZz+^?h5OfB>7ghu%S2H{VC01RR{= zudE_!@M679y}uh}*@wln-$fo^_RShs8JpoU`|7_}GHgXDswB0%UMF3w!At!28Fm5! zzh|mOE!X+hPr#17a5~ey352laZ$D$Qa;83O(KX{a={Yf8n5ex=z~IYIX2A}$*t4vT zEC1KUtdy?K&a+W$S9sC|l$L0U(=U}SWw1_%5_c1X?2zs{!@X1PS!0`^QS}7|u6PRU z$UAMQ%Zc(P7oDyIvcB$nFOsJmfg`WwR>xdG{kV?MU#UnR%~4?6!mm`)S6hbbuM0U` zx1Xu#c%@M@3Kt<~Ajic`1sr!+w$}ox*b}EbPJ{X1W>hr8(c&ZH^wVZ@9;dXLVX8Tn zHUjj3g0JZw*n*#wt!|s*-&)JIbB5Z7 z>4iccc_12p==eqMl6-|J>m~8Z9tkDgTS2fuX?9flIfQHi1H}tQk_PRa3$GpyA{t3TBMUuhR2v(>PKh~re>=1DItBft^pepIb6t-^a}N4w1EJr3Z|n?)9=cMzxBdaSn9rF* zdhWWudT@X~IS#WFZrpc7(>5g3w;iL_shq1iv_i`b3!XeaA)J3!vA*75Y5TB-4)tH3 zs&PBKl-;so>FhfLiE_`1r#!1!TlqD|Jn-T2Ayvt;edGJB~bG zR~4MebI!2F%Ks^GTTZ})^RxGF(sh&{;Zs#d)in>UE|iZ~ne`K=$W z^9oItv&(+Z7L$fiztf>-#S1)p*(Rmk^x{dnxNW@X@yyMc$hB113PkzPQK0)(V7MR!vw$D}@Y0@v73lF*o0rbCzUO&>*n zqgV(>3=bqxcTaNsleQ;VYS5UMUx}JBX6p9oo*8M#gAt+8yh9qPlc;9b)^6wv8D(H^ zs%GxjiQAO0-n`6PN0ERjp65kJv>{yP@31w$`gQLd9w;{>(`q%J9NspZE3L5>G{k;= zf1#p7r|;Gq)4)0r805Tk=CC($`JCLXP-*?zz*Te>31_P zl$q|Kr8)sveJdA#wWWC;z7EmCWYV0iQx~q)sN>Nde}Po5V#AV=f5!>1ipO3)edqoE z>H;04+^?eKF^RZ-Cc+_5IVE2@O1JBkUQ$yY*T(gy%@c;bsj?8x-M>o`UlH& z;3EF(``PZ1Dek3Ioj%FfMY@ujpz?#`Re1$>{`a_qi08fZwd|1p6}hErV{yX*=O1G! z^Danx*XuV5QWnNv&4;PH-C&?%F!Lk+Dx^0qhh9sG;Eh1IZyZI=xk^2o!^zGxvY$3 z^M={vvVs2IX(Z}^KQ@lz;u&zh3?*2&Lzf$wa{E+Hir(%;uASfYfKcT<}t%uDv-UWd-5^Nu2wLX`5bW6I1 z?YD|w*ym@YR+Z}fsr>M{O4asI=?Ig^&Fj!VDeCRN+V~1DpRTtin+D}nN2~*6KUmAA zo4dbrTG?v6pb*W?E$o{rJz)*MN1!*Y{R6;1dT!E~dY%>GUB>&_A<1JoaBTs8br;v2 zTFX1Xy-xpLme6qYW2KWCs0aK{h_?NrmzufktJ8E&maeGs+`v_boUDt_)5{ByyeW0( zj5B`gX+8YU6KBGx3vn-Q({lu4@1%O3|gmfKBM~J!J|*bvP{M8w;I#FU!2UMS~wn z0eWjnRhEA2t8dOX*XF+U>f_hmHL8j`R6lC{Q;pWT|72c%DR{sCrGHzvJNFp=c*{L@ zBE5bxd#!kRxjsP99fSTp?vlIsIIKc#ZTx+6;3mRpjJW59yD8_fVA{=femJf5ZYE$t z59l`yZuBt!C;Rm0y`;Vu8AESL@hvHpy++KJO zmbU?Ao#=mX7{FRB&J@BnpxJ#JJTU0lhPSnP(GsGzawqvv-IZ^<;mk`Q`WhRHCx|F@ zEdS^|T#0M_=M42ClYsB{%L&@UTDkbDN@ViTb46M@Qs@5%O@W|`?$mC8^6givXtnlc z>f_OPKtI~r{4$S6zP`W4UlX{TMLZ`{`SZB;fXNBr<~z_vp$$x|!gk%k|N@N?zjnLjK}jFBw9+s#=#A zQJX5m_Aj}`YlC_J0|Pxz`v3Bqce8MyE|k^Uz2*ZP&^s9%3zO~+^f9#L)}B=AH$hBb z*W4Rqfh>LVxbiUQS-BU3|LrfX=<${oJh;*^0K4jfb-O0nFL) ziwmvA1%2#7{w3wvqus?1#~Nt5gxu!Ylv$8m+_~~Zp0{8C3pAqnH@FK+Xzpc`j@IKK zhA^V@Qq+{$eyyG%(e;4abZq)_`6D|>T+*5RZ~^iPJnFGL)Q7ym9~(o=`=y-R%V9G}Kd(LcrdgB|40P1V8*_i}6Vh>CgIWR*Os3D}4Q1yqJ^_OoH{U`R6)RAp%i%ZxPUzY@0_a@1 zzmlQwQk;d7%ieSpo3S7ZW*Bw`0{6uNR>z%p+FLw<594~1)+p6bpXP7nQAU!zgp^=F z>+}X@K18t5pukXncMHlFoUV*|!w+wzk(v&e4yWGehvV-r>_`VKjQYs417~PynZ}XES1!l-GiXYS9RPl~K&2R zzFPCZ!m*g?7ya*^N>&yX^GcmMfuujj;%wsu+>uGalYeuv;AmdcTLA*iZ3~T^^kkOu zdMU5I4a$VdQ8rK0t*hBaH%ujQQ0M=1t{J;OPQcW+zRx!v%nz*A>D5+HdPA|sEsMc* zBt`)o1zWkT!T<9Wv{z!|tPHqC^({yF#ru>~Zj0~EuUf6$gA;0S7m#2d1Kx79S3>-0p=+J!cXS&)>wBSYi!0AnOg)FK{sY;ZQ>Kvt+HkmH9lXyseeM;}i>feNFe_cznLK&_m!mJ^>P~p%28qIrt)n}?L#0#|FXQDO37nm#Y+8r8& zoR{WqcN>cB4~b8*<~Js_y{dfl)X+dwI-&&3z{7^t>_rG3C5{HQHyKO=sCrwGwhWZ- ztd>Xk__;L8Zhtr0p$56&9{m@8o7dYt){O}1W-9y;iy6*Jg*r3`1C}>gClVe+^-53ES$Zied5oKsvW&y#Kxam2O%)vv zkw3+1eA5&duAco^sYxbdwhl-li5YAn1Raa`HUSI__JyE$Y-`O}#wkc;Ma>Y*P6HY&;88UKX+i(>Z6u?-{qnC1QnPpiE7Gf z@Y|*&YvmbMd$?tZp~sgG(Ko=i5l!l4d5qb1W%nD!ER>6P=f;T}55A=EMu>#p=+z{x zSk!^QyG9fU0548U)97n?T&(A8gp2B3&-n@s;1A=gVGXcS3q7MO=X-0CzP?5utkj_j z?HnHXN~8aer85CIzuU$2U-dN%23KY2?!dr3d)QE3ZhX! z@H&Dxqecas6=%FSR0RPMXRbI$1!qO{I_v#@yZ`UG&vVr_P2RlkIs5Fr_S$PT%jlaj zyzzKvZ(JRTvA9|UBwFJ~Vu2Nopbi)&b%bhyzee;!2 zRTf6rz6dL>;H7`=;A&5PP#!Kci3xJGPVC>jFtJeb3S?ecMqrjInYRpGFWK|cE}aQo z#01q57cp`67;Pt+3#9c{U|$($m&y0@$Mw_Xn%Z;=w>T(H4SfwW;!xM5C|fqIWHox* zNHQPjtBsP#Fo{$6HlzSRyy)=r99^8x#m{fl-+sGG+D4g4yDdvn2h6>W*=$T`iVJV& z(Pdwqs+5f_)m1ppF!7%YxR0G!zNQ(o#xbozlk_Y;r8iO#B;R2PE|!lH1yC=6WynrR zW!GBUk2L5J{qT!f_)XGiJO7^ezvaTp0xDMiezG=uIlNyyS^AI7h^&S1kh!GbdN_jY zJWN4#NPD!MXjC4ODL#2b^>}wT8gaSN0EU9ZB=_5WsCxY7tXfw4Q6jWYwyCz1`Tj9d zJ~fyw-U(L7U3gMPm1(;D`Q5;g*o;h0wwP}TF45#eq#o5zLk{|1P|oto3o-KeQ7q3E zM-SrUMI)s7JV~!$&6&$Z&a>goi8Ex3>O4>)f1XWV8J6nDrKrK^$h%L1BCg4jrFoH0 z8Pv6$JTQ_N7x!t3Z)Q=QbCs667$oJ5#b3UQd2UEtY zcK%6mbMs8%G%e7oH7v%zbt33Rr<_`2zD42ZG}?Z5$^H%OiJ%7puygjUUERpX@8WjT zAdOm8IT*gWY{-febqE#1UN!$PtOIfM`>}F*K^Nw`RVfFBd-=wnhgp@2dND@^bn>J} z9zUJcRFNb9i<$lf^mFjalgEqAX6+!)tdKP{g(8A+`RjJcTqpB}@cf)-ETJE_ayCcp zA2(Rrp-ZX$LE|Fax8%e|3C-&~Cf;Th$J~+6$Nn)}b~Y*bV%shHUT_tASqLrO~+*&@5xv=-4sXKS8#mGDxwfi3$H zY@0mkal1U;n=mws-SKG;Dey?W&d zd$7n%&9N-@v?TA-uhY-6Sb1wyDySdJ-!6W3iOM=g1D!o2zJ^OW*TJGRm%-I#mRNo& zH?g6-HemTt3a(*&eml>cc|ZMongHwY`%@!#pPU#nOGzo@O?;lfjyIIrkyfUpWOb|R zQrl$IJLK)za@HD|^pw2aO8tH`hv6aO_80eN1XH_s;{z9IZSZNGWjcXl@pW<$qi$3r zva5+)tEqt_4yfXnb7gL}rPeHeBuj^%4<&U)utCa3W%xZ$GcD{hCt*e?8BfT88(>I> zoTKjK{=bV0FvLl|oz1`dCCgF?wwcxIO>4IL&t91+Q)Uv0(4Kn%`$6Ydl?)HaeLjpU zsu=?cnQuUarT~h2ovT#FVXVoeW(>)%CwH7EZFsQzAuF|d;62Eq7YRT`)P8&dvzOF&?~I7QZBoUVcO~ z_`gPmj8EO|bl$|_c#yfJ26Uf7{^U(s&pok9kM-Aar}P_Zqh+K_=_P?hjC=owk>(t~ z?6HgEwzWshwDZKdLOz}1VNpq$OZNI^5s~V;$-IkYI^9_5T|&W3ITA{2lU|Dfr0F*I zk%d{ZZ@L_7<+Q`WgXR0TPUofKiC5D7<$*^24zoxVV#^e?kYplLWtpx&69qB0V(Y$trujr8Wf@p^@hdv-DbEfwE26yPD;T}Vi=*XVd3b-)KRQ7^Ac8&2b)3^6wR!sP zwMKUJ##v&h9sA*eBY=d%wdohZjKg~1gYSYUsv{C;K{5Wq~Smw&cO z;{wY5`B!L0^>wXs=szlU9?s_`uBM+wmX|D&k$&z&zQ_;{bpMAMrLjS-&6g|hF`WZ& zj8T?K=fp0?&x^i9Ju5XJUk{Yu-pYvBRC$Eu|F=i*h-j40tDOA`oakA7=8?m<2nXQ@ z`Eo+BcDKKDFe4O9{~lk@cX>NYQ<_;TIHWS3=gV$*6GFet^A``8!iBv!oKH9*E5n=k zug)rLM>f`H{x{v(x*F;vE=;#8UN>pJ`D>s#XNa6$E_({b5Dgl-W%UKgKa&buK7>_Vx$$q?lDz6$-Zs*S_Q#U>-U#b4&5&z+e7RvS${#1o zj)FuV)gX~K&SufPGgG3y}>A-%6DB2*lws+!=hGM zKhaq*Q8H_#bRSin*7NUR&#E-Qzf0u&Yxv01@~n$w`YSR;1=l`3S4*0;nL3jdl{K)C z<=JuMWk2~z(U2&qkPCm+u<`Owln(c51@g;iTV2&brfNJ^f-rm zbRI?CmxKZ2F3+?`K67+lp&9v8B4 z#;6Hb<}8)YVMe_}?nNgNPRL{6S|_no;xpU9=arN)ysWi7K^*^UB>zbl$4dnz|pza}zDxsmg5sUE%tybIh~%(BcqF zZVLxwm*n4#37K&Y+)%O+=Yxgj;+1~+p>Mj6Iop-2h<~7Z7{B(9JBrtiNTXhJ zgS^(4Mp4=QPinB}K8?@0cQHCwYG^)+L8i^74YmO@U(X;-o zjyr7T@*L+4<{HLZfiW>bm*%;$VyWqat}gidYL=15&Lu6n&a&E^bEA;OMr^V-mrr}3 zgP%V#P$8qI%9CCF@z21%rGVKwGcyQsI^k3*<(IA!`-S*ABQS!iDviV}2A1?>-o%KM zytf3qVzqSKiLQ>hCr^%ypwzL`{6qubb!+PgJ|k7eyPlJ8dd!h`-{d(gsFi2(^}Lmak9}z#TEcbu;9wo4_atH9gu2OO+V3-$0lH zydTg1z3B2hspH$n$9H}R_No+Ut30&cw9#q~ItMR=Z;PK!7&zXO&xa4bIFs*d+an)i zq(|4^G|iVev!%yjc?1T5Y*88W>Q=6QJzSWGeCKZsa0lP-E1zbiM#xWAX?#U)9K^Bu zSJ>9ZbTmv=vbd{10G~8qt-?tFA0}C|h_Cje5;%7@ki7q0 zDK89`wf~W|Q@H!Tp#UR8mvAXpLIghxs-k=vs8lN>;c@4tIy+kCVGL~ z?OLyrzn^XKJSG3EG**v9AqaVskC7j=LpG2&45_G+wXU(GU*MxxYt~*&i>7l`=QpGS z%FDh!DaUh3uILBrBY2SE*W%cGQ$N$pM_0+@e9Xt5Fttlyw)0~Khh)h~x^ynDRPi18 zNXI}v>XbDXa!>ylnYouZ0J_#^Y(_XSU%p|cr(Pk%)tT~{hPH-PCVQ806v-(Z*>Xb( z%AtVp)h=DrOV^nFD`ifHG5Sq=!;DWnBinIhMb|6CZ^t>0VDDY#na>w%+hpd`?!P-x zoxGXDQB?_1a4$4Zwa9JSK3P7_Y&evjslB_?VZ@~-%8WwU16@)6eNyT_>0&vXSRFFx z-#*)*=j|(NST-~KloK`b^BBaj8EZ2qVy6lvgg-j-Q2-|Eak-;_rMCg4a&N#qS0W>t z%=Er|+H30fbhwxHoSf5mK!@xwU*acr;kOJ_^}9`KFMtkixk$Q^;B%%xv3ygl4!bG! zt9y?0T4j>Tpnm@z)9j@9fNb@Gu6rjpcWX{^9^d$nYgjc4?jrH;BQ(s(z3Z?tSi{^N)QJx}o@x%M&WvjFN-x-m8H8NMZN*^{QO0p2bmy%ub zV_1jQmY1}uvd#%PvT84_WyV)7bI5dH^=NU{jVg{e3SkP%kBKVqxCpZJz)aXN1%hRl zUg~`K6FE0&`nBmd-CrzY`s>Y!Bl0Q7OPtLZsmZWYWESc!^3O>;%l|1=VdHzM^fl1Y zc|0+atPK!SEl1@YKv})-6>?c4jELaY6I4Gxh?%t9JW+0^w&Fh`#j=xx)LHjDZ(VMc zl?AflY&pAy@J_1pW2xj`A`@x^b-F;zc|nf0`qOvl{hU^2{t9Kmc9kW1NhDTfVz7?n zwNSdt7qqOKG?dFeTtw!|zdhAMCAflz{osVI`KRA_sA#YA^5Sgy>{ZWP$%a7RnXPHn z^@W&XN2qmfgSwIYh0U~DquW&g1o2t&qW!|QqC`7*IC1}Rg=Ov@(fKg2JbH1@VoIvS$>iJm1eHB>X1-(|G$E(#etQBb z_FhDUm_`MU_RG|3WFN2fAS+l1CP!|LpCP8O+48_bF05mSbX+JuJulhWW~EjB^|`#; z%0u|YF3ZcYsg-;0;`593sgIw@&j+;0$Rcpen2fba56(?FSZ^IE*I#YfCH>}EM(RMo z8$A$JrZR6RHxD7t5jB6F1fUcTnFX+|nR8XHoBgW9kGye@K^7fWwNQtT%LsH%>;f2= z9kt?zEVJjw|4}6Bd88%jN!iW((dNe`0=oF?EZATh@&Q_6$=$ro?*lcJtyUWTC7Mz$ z2UTZhzwl)OLLpG;m4qjv)RtD*^s9*cmz*WaZd0W5Jo&sp7F|jc=Q0+yRvm*a>)v877}$cWeYXSla~U2T{J z$iLY5F~n89K`_8SS|kkazyWeC_iE22GT5EDSB9U$Uwpe#ntCI)q|s)oOa920f31@X zMklwK9cbYBC6{)31&d_QY+y|8&Ks0RGTz>`q2|WWAJER)JM!f!X6@oe0_7kxetD*t z5-9B4b_?I|CXl?ztYReomu}+kd~)^*Nv)Du?dBaTTCgCs2Ol#}?dQpj(i8sjT={}( z0B05rR_~qZG-GA^71Xbkyo+x&-^7jQCb|4&ez&rah-p&Tb7T7n9vzfDSMy`YCuCks zD1}1_7~|%29ftd1F$qS(fmhS7eJ7EwA%4Dw4bSc7f6S-LbFCrZc+qXIpxA zlV|VI@Od4DmYF}ue5~E6M{QEDQ8x897uNB%H8)9KgYL1PK1lqm%+Zn9)~sXVIAjiy zq?oUENJH1UJAP`{to*DGa}oFV7O#GY!_{2;oJ`e`jyhTV@&hmMCLtljMSsnR&>pQ|S7;N7%rB2nr)t3ECsQBdVlQY6wAL6qvtWRwKE|nX%%jI37N_P}f zw1pTl_XhKkVJBvqEgR{FQN?Y1-<()0jjtx-6_8fYIL5Hp6f;wz;PK-Q-wh<1$X0{0Jg4JcIck+%cN3RaZuKSbiP<_9`lra!wAn=*dBV67CFS7wCc#$1{u4R^{Z<+9~x>g4fOODzj3hDkdr&weIbZ>}`?S-ttdb=3;^XNZZ>t2)I$B zG=J>C%3I}SGThvZ22Davo(lx=yka@iK9J8^%;rJngi{Msftc`Dz1BVd{od?s%(36>4}+r0iJA?nm-^J0a}d|!5ajnS4b0HqmY3bS)A z2e^6FDPOGQq6|H_k1`^jYh~3!osh6q%892I-?9L6ZYTf%TfKa?cIDzf%{&;Cl?lLl zG1;-6(zDP z7hfYzn6RGvY&CZJr|`ML<0ave4vf{yBy_5rzu!#El3u6F(RR2++T(35g{+}dnUoms z6657yp}cvfe4R{=mMgSr$?(Nf<~eXn z%qEMH98VdO zi!w1<2GczstZmcXeQ>mF87pITVB$*Q-lm*#vX9dKeh=J9Irf^4N6k&j$~^t^I_g^4 ze3e2nVdteKV%6#Dqd8)uRRr_v2fmnVP?zL`^VCvE-_&1Cx@f)2QtLGQ*LLIYUs3J5 z+VSS38_h$sk$gv7k~`1f?-G%{ty0D08)Kh~JPT#%DokAD(xsih5J3C5rO!=eEGK!=b-cTKsyY~`G5OwI;;d8!c z7uS7qE*VWuDpQtjlLxYS#Z6~O1wPt5u_GmN*1F<9Ue=s@K(?`+%MM`8-#JAFXYs~k zdZ_}F{Wi+f`2@J473d<9Lf+<(@dL!!l2#h%ln%L(bcY^w@#nPBYviTPvgv!d-C(cealjPuzE*G7(+r;7*a0s@osYC9BxjN5=swTK~~|+ zax+Xp{u|NZ{6olgkb2U2{ud42`)SLWiyU2){ntmDYn!FCDRnkX$aTEqQ3NJa>p$Ace`)m z;FX+F@{%DTDL(rk5$S|RIcYFCYC7B6CJ#^GC+la3Eu<(zZMRQomnKl1s$^ff1hBY$pWUXlk^F0uDW{f+Ch6b4Oiu(G_jEszNW80?< zA3LU^u1>Y^8*$C@a*#5O@KS?9{)<>HuvS+@wJ(Q6 zUof+}LB0GS0fsnAW40>30WM{XsVXoQvQ!U>aSc&gzY(fR6rjlJ+v<3Yio>F870hg- zEN4!7kAHW@Ag zAiW=DWITx2NdYwr@N(;bW|2DdBaVeKTYE)NNgdqHfaiUb2Ka+0?i}1~X!QAu>b%@3 z>?)AI;F8$6T+++_{TYJ*%K`{D#<=esW0-(j?i%ES6$L*Nlfo!71%Tyo7jFmMF~Hk~ zs15RK>eR6^N+%5EIV-l~jSwYGgFolGIh#HAEl34@Y0wvj~XC~awu&t&+?FU&fL5yubPq@^p6X0}@7gHeS!$9HNODoX312reOT2hS5a~%&qZ)BC`Bd ztJ>=iCXP`f1iha9?l7B>JLT_a=M^jlo;yaSxPOdRPV#2UAm+1TU5YAnYM_He!^yVw z2oLi4e(1#P;h5Sg^6mM&7@o_j#2W}k;G&~x|j@U zuR_0?hmUxdH+elTAchea2xSP}QkjvWH^y%SvGn>=eYr6_s(oR^ass|odj$hwOuVvd zcGq9Tke1KKW5$e!`^!W$;gDMuVo#!-&Hh+>J+B7V2hgQI?+tfmT55u923QAN9i5k` zFR`3h#lB%5MN>QCm*%Phdi+^8Ym*xuip6_a+Ur~F(877%MUd9Hniu`AX4G-P>>5}K zkx=z=%k)9qVHO2TCW!7)l#5C1a%*hn`Gn$c#6N*gk5T}1OKii#zVtJmvl@sHtT8T) zuoX|FHf98p&!E`Hc8^%v=*)H{iEI=&TajS01i^lM7(Eb$Vv%IA;v@Cl8o1ghk&f#K zcvIaoe4Y^vtiv#(k=Ppfqt)}9Sx0PUq8O`@h#p8aJ9tGP(wNx77px`VR67mNcz$ zC_VmA@x2JovM-QI(%gMb7MV*i-K`B95qrcFc3mAomdFueGS0tYv#!km(Ulm@z*Uq* z+syBIylThES>+3ovbnrW;IQsf*JXH*fVKE#0w2OL;)x;j*>xhk>YscO*(nqZbRM_V@{QQU zOtA55j!e&o`X1a&OH+m~^`&(Tq>|L{E_SewE__D~Z-_TIy&E|XB;8iK`W7_=$?#bw zWHiQS;+)4O=s2z|`#^yEMBrwV$g|n$I~$0uJRDLDMtVKh57n+d0hH4pbEXi-sV~vX z9L-Wm_-JtC8kC+fK~MuX^6BI=d@nC!ZfHakGy-1Bu{!>q2g(F8D8$Q%_QUczpXKs2 z#UmrNghI}3{CDMhYvOyZhiA%i!6FuvJoQNxn>)xJA=;%uhN9NJiJfYJ13^)yc)2-{b^#+(Y03`zqnGC-KFZ}JjajzC4&wT`<+08)Q%!YY zwmzGX7#Xu^S z*>=@PZ%piQsYNV{JRYvV$jHwD6{ZxXc@kJR&FdttPFYM2lZS; zsxKh@t#xWd#sN<@7VxyuFh)U0ut|Ubh6)BVBxz?I=@?sr1A)=WZ?H6D37sYrt(a*b z6Px4atU$w$3h<6DZ)+t8KbGg##YAEW+DnE?4$F=R%bPaaq6RW7u9plH5;H982u$3; z$GSBWwc#BH$m^JrCN!*lhN8=#xQl0xQ?3Vh&0r!_#rGsTL;2okxf9+kilLp|QL93h zQs!jy9x(+ONDz=bM7k>R0~H#6GFK}qIo9gZ#lSMfgAYz1wr2NdSr!wW2;xET2j{yv zicF&t!wDbj+~pa{{fNY1nYwItf*M?^;UW)ZxyVXbsG(Ff+b0-u{+$Dj3XF8W-9g?; zwk?MRGa<{#nXav4>k}Nfd5R_3EQ`1$B$G@`M<7W3 zpQ8Bw4401u5QBbxx)n07XLLMK7vOTyPA5hNQ{TuBc4~5Ob`=|vjDeiTFOTF@g$RJ; zSv*j=!A_&rpPuZB=%^t46B{601xYYRui9i?YlnWp;*gyzK!laOYme{)7OuHbQpl)i zr}oqnXe#-V(JI0q@sLu+pLrLH+`Dxm!Uhq4;voGLIx3zv7so1vOLS|CozssQlTQ&u z=I9kKD7jq?_yTfGUb3H!^oN`Pjwthm_vKrb*r_LizGOo39m(gc?WwOMBP;ow?wMb^ zCC+qnqp7$;k@Rz0JM6?Wm`h8co6@khCdZTLM^$N$>?^?kf~{%VI>Gc`z{<%QEr-}D z#Ceu~kgaOQQnup&%7VgM5Ks|hy9#_@Tu&FFpVJ{${cl7a$nAZYbC8xPgI+|SpH8Sn*S zo)0r3iF*-)Nv>c%2wtL74hjw1AfETP=T?xz@{Cz)a+}jyTgB21l7v$sn_KM-yaA_s zh?0orMsA+x20m<~%|TS=3{QLq@>88GPd*3=<&0%HC>|)M5Z7gxDq=ZqSqz?v)&)<6 z$jv9K)~`2kWUsQq-*X~s#xBrdpnG#6iiDCm6(kJo5bhNY>AdEqQ)BZL{O zQ8Bxw*rkb5ka-dp!%f2kh&m1YU?VOIky!e67gs?|?={(ZE)orDyqMXPLoB(<0YtYr zyNZ{gfx&L~J#dDD=%Vu?YaE>4iSEea#bh-z->^_1>_yC&T+0$d3L6m75`5$H;26ea zv`c3T_GbX=iod9W6rOUFxMkGI6ADAVo|As&^Cc~y*>rjHafk%~!Ae{b_i!1gO$O75 z*ZX`VofL%V6!a_Nq>JYjKypvMtsxzqI>f`%-@2$(sGb6?+a;MVV{;vpOM$4*)1+(F z)1ywiC&XX+1pXvC>1J@gooq1}HlO4mE|oYgQzf@iTT(-{FO)9S_N$VVzjxZHika&8 zBnKTvWWsE$(P*Kan#LE1-(zXdrG5_a*Cq$PPX+ZhhYPdoiS@}25(t&NxAav0U13g_ zbhe6jzf}bO!Jwp-Lj}i(j-*DiEp`{DJvT5Y=i2yax>hzfPl=;B5J12>k~$xh1>}o6 zyq#$3;B%=u6st3dZNC-JYDDqfN!NJ*|sL zSjo>G(1MVFhkrR=`zNdOR~zz#+)Dy71xYk>dIPx-ENkjEO8w3cGolGb@5s^qBGDkg zwn;&k$^C!wSpOS7=d#F*rKyz0s^0_*4TNCwbp&(h&IR&DqDMw3oyurRpmT-fXDQ=& zvxW7x(B&d?05^-p$F??c(z5JT=8GHDpK&^rQteWg@8FE1h}v*2-=w$8MHI&}HJU&j zCLBLTu0$53M#oVn=0E=w5zKh1F|k=OB_j-xd#BY;sq>MHC@Y|gjBazwx*`aXp6j~$ zbNzdi0f2y*l7>qvOI|FcA52{^@@F#PBg;uZ_@8mQ!4+T;tSxkoDbnU}uEDOBI`I~_ zoP`$%3-1BCdc|V^%CVAksQMk8ViX;Fd`02ol@vtxH}Ot44Fkm6#Zl$;5ZbwzMyAL7w71J zDj+8eo9VEV6){q^bayy}p6@VplWpp@(GD$F{_8UTaY#W00fe!8_5&mT8q7c^p7+-{ z>wl{PyP)<#HwK4DJICv(ZmnJ;lmnZ!KM^gKDHV1$PB>^jWaf$tglqtai9FG=*E$jh zKLP;>Vlvm-W)Fa7p*vDQ-1eDXN>d)?mPnaB3ao&ekh0I(iuITBSdn$)0t~I2v$Og} zy_sjakwDQgd2e+oPktA6(tde9hr{o8o-D5b$isQU1&l%Y60+Q>q7cUr_m?|u@Xcem zNdSX!P63;e_jv}DM*Gnv9uS;=HXH!M;sGUQLvHLVz;6RxLZKYr+RmJ46*LpxMc0kPH43W!mz9}qsvVSc%Lf&t2&6~O^K&!F{&;2q`Kedu zK8;n%>E?9wn-zw96tDtdkzRRx7JZyeRy3NJdk)usTE%=lVE`|6A1zjEjk*n$I$_?F zc}|XRRTkM*#SN4qA@E|52Xf^5&}74erjF))NRZxWVu+|g%20196KH-Hf+SuB+P z1ZgN9z^^%O-XKDP7?Gp2UE$78wYq?Fh~xhF17>h8k`xg%*M7n1FV?vLO~b+WP^3x! zD&BB93AhwE%@v}T3Rui@@tLs|5-nBymf|Wz*$uGc|H?3k;u)!Yrs6E~kuD;yvyRM^ zik1x>jP0&eRl_{Kot#51XtI@Q{#=fOGKc3Szk2{+e&oQ7niV=!GKvvw;syI;zkgSE zyb9^CFfoP?SXSRf0%dy{2j%mGaBHoSb~=hN2HxE*m*1kSTp%CjnIkH_x*-o9 z43GmEmKR)!J4TvI575R*`F^41oOHUjdnlhd3%#M_3)ZDhPX(7ZGS|99hUcCI2=XzG zcWAInvm0>~HR?Q}sPi}{80iNoi=5;1{FKED;`Kzaj(0MAyW!b`79#$}+lRcVhb?VY z6r%vAv{HP|=_*RTtP00RG9l@&lH?y0I>;u$nx)Z^+G9i0nso3+rcmW~H&8gZ&}IvG zJ=VSeetw$rl^K!x5&Yrmo)y4jEGXVu+zwjBIW|y|n3r7_A0gEz=ORSn%}jDMd1Xqr1B4AI z)3Tka7!BN?-4udK$oC$BPT%o|hP{umMl@B<3QX_-rMYYAURwi}Dmw@?$J~};b7HjO>YVMC2TMUQIs-EvdBCt_F+K2h9=6oDshL6iNDKeNxt<>Brv3r*4#kpR zx31UJ)h{dA2A-|5#oA@~>!T85fHs10IluL$F%%11eg>vJa-E({eX>AoK+zdy{lDyM z_6gEfLK@_i)+(vbHD~6yX`~S|6~D}DZBl$G;?w_n1cFU0=DdaDGg0d%6(^;po^GzU zjG`Q+!Ulekx9lBse96M5mm!)5&KTNywcMJOd7NWvdA@0IA)&#suoQ9!OXgU)rJaFr zcI*rOt1@M6IepfETnAvG&fuRH!{c*N2W#;o?Jk&5Agh-Bb^v>N(FcptcaT7k{x?#j z%2Of>3&D{A=Gm07#8S@Ft8JFH3V?a?4Pp*C9z2+*^4uIpfEZzK=LMR8uw2LyHoQ0`tFY`rIeGoM-V!V{$Ts zezJ)JS*GT)@arm6V|asytb~bx&r0FIa+FpRmoz9e<+})hF^#bY9l4s#S-xYVO)g@% zk_2v~{7=&`Cia!}x3AibAT@%JEYM3_hNa5JmEV+l5y?(2W{?)il461pT_V)fnsek; z%I5gUM3d!0rfvC4-nHmZGf2PW>GWm)>c+l#p1EC*?PIQ3`pX$UUfxm9w!9C*`j zIigzV(ns|hTaInnqyk5BcR#>1St{)y{?X=8s@liZ@?EVwrll?wKYsR* z6X*2e!(US~*D7ip;?xsXvlkuz02AivITE2kl^pmqsA(R_0-H${(C?;y z$4o0N*+9fvqv?5@o&yyyGkh{n39tw9BtIX)Koun&R4x;<7(d2~72-P?El$`>Tf|E% z$RP-UU<>Amk)3*q%yl7{=vA%(2+(L7JCK@jp4~yNLaibd<(5Tcj8UoL(%@qFduA?n zku8HVV#FlLr$O{yt38Wb^Fz1vlN^RYDXqkue7@5C$(ncp z&PB&o7n_NbBqEn%|LJ@uOTN?DKSytwRN*oIN8|dd7sGv!>T?@dv|Kg0Iw-#7XV29R zjv8PXImG*!S)zP`Jq!xV)u5vsNYq)Blfk&C6~&DRGNST};WGuI<$4v=nP;N|#y!|! zw^pe|_>YVaA`t0gy^0G!Io;tGg6? z@9k&#)>6HU`Jl-s@a2y6Vkk{Nu4k9wI`zs zfYO60gh@rheq1>_wQdqodLZF&@~VtL{132z8Fn59unD8|Ft}$Bn0>O&N_gJ-EoD~# zxhp<7>CFhWu2eY6ZIsc0jgX7B5HQxD63-XyEKVv6fIfm%gl-G32uhiz8Z#TH-M6Nc zSIvo(c{_rBw2yO13&rom(6@J<(sn;W zC?P#9v+aBy$8W+rKVGHG_;^pm!nkEq`hwq5DlLE-iWj$eACI?SoI+lT7{b5@8C(_1 zQ?ESb!4QCaXRuc(Oh8RCus;1IU)ly8Gc2|;05QmWnVMv+_I4>Tomh*}*mpeS3yLw^ zjitWQ z8#Mo7a&DfTh6};{1wzo)erk{gMpJ4B%a3}^p$eW5GZoL*vySGLH`Sd((ho?WJh(%4Hs;bx|@AzNJnF`HCLuEBb3}z1r9?8k+c^#0{FyaY`>*D7Q{LxK4Xl6`l20=R_1y|^Qh;63XqoHL?$h7` zH(Qrnhck0Jo@PFO@%kd<5QrP4m@?r|p^DfK+3ajd*2CyR(ymo*_&!nE>C8a-%u4|x z5+lse`jHkuA}~~cm_PX&-CAb_`D_@*3x~qyMv6&Y>U#^*4f0ZNc|fE86Bv8b-&#lE zn7}QNLnNovMP$>)x9ll5sM3*wb)xnz7a*rqm07c4qP$gQCV(rY(1Amjf<+Y&0=qx+ zFS$|eZkq9_T6?9n#zpB!-z?&Jtc&&DWEgt})G(jt;CIiB087+{y(em2f)paaN@tFK zF^&0_|M!K6L-`jDUXQOx{JhNxAWSRbTh@J~Q z%vU+je7>z00!#tsyVV^pEbsU%H4XG@0T5NxkxtxB7o|7Fq*uO#h}~2Bk=I%D3=MP! zlv8}TLY}IX|Ef7{j}o{fkUW=`rSm*14g6gru~qw{*A%yJ~(h zHm7#7X_9zbHT^9z40!_-3B>jNJ}O%FZ3P`g;M0KxX0nYm!kbD`OtTP5ax@1LIzLb% zkTK3rzHg~=(BGtKjTT(@0N#~UIDBzpDl8d-+Mjv&`w@Eg*Yb!o2A^qFXyqDKbd8r1O zq|Qndd_tysulV+^cNkvhVq?Me!pZ<;3X<(`s)y=xq;a$SzcJG>xqs^u5YT0KFy zNtYJNXKF?Oc+%7tG7d=*_Yqn~$RFRHERdV~$-QNOQ$Dk=mt~ZLDuDb)1~n$CbT*)} zTk3TXK6Z&@f?cKF=N(J>kZGkzb>y;$1)?UJUpi8lQ{Vazvwxk~RhT!L2uHJB4Aufs z{K&|N94U6PDBrJ>-d_5FAsHUkMuxei#C+pJgY8QU(`08=VjA!IYmsH~5{{4E8RlYr z%uTlRM3Yo3P|oP3-x%x+ZFj{Oo~=^kU_v7waAj>=c`l4-gBJlT%2D)jXaG{3SL|s!%p(uYJd|&>mn7Ga%EH^62XPvYVTS8X_Zz zq<-U!xUebS*d@+X)=zqspd`}y6%B%J1MF1Mu}JE7%igVvIX>uhujHamL=Xcp&@>s3 zOn0fQ?WLUaSa0+PJ!HNM<&!??6R~OIe0Y_+9CapCV1E(+irWaX3@~4+bzw3sI~W^? z#>DG7s@NQ5IBu$|f^3O?F9=Vk>U2MltLBdkLZGW;lOynQ=%82Q70R>4hxi4?bb!$- zk$vE2p-4xWLJjg|jx6pc&jsm-h9tuU<$*(IV&2+EEQ=wHfsCoSdnmO>er$$3+_Iy) z+Sx2DsUU?=S^{{%{9^4+hvXYSpFFUJgZ?d>Ugc92b~;O(=A)>}Jqz?j0a+8aQ<7f&&%aX2|6+Um>I=XvwrzoDoM7YhGCU}yn)VZYN`0E$v4c}{PaIEq%WSE-i|LR zNt8H$-T-?sa)DIZ_CvI2>aR_1Y#*kf#1Fs#s_g0dp-X4X{p{ zDmkOjR|=;z4co>XOU&QY4NE6XpCY-C+N z@77Bgb|L|7VIexFq%BL4;EiarO&Dx+%Su!5# z$)Mlq*U6eIZSi>-+`IIQTpnS>+|k0UH@HL?KWg&Oel4U$<^~e8_}!$b#LGFQjOlvy zzf{f)Nh?gegIr*BoEcI1qn0U%n77efRu3m)cP(H3N}?pF z9+$#+SY}NXPd17iFw`n%e;ul`MqM1ZNSyf+ORY)E+T zTjaa&Wsb@rjD|y!S%GR%#Hzt%bI?JVM8Gr=o4D*4cFA9H)vo(j8T$#$lSsEUN7lH_ zU%k0IkMvSeWUq79Kr_Ti=fo@B>;Q%A^lik~nmXu5w3sPbqTadClYkoojn4HEcHA0nI8>XgML-IP<1C(;RtIUB@-e}mR7TM{&sAs| zC8u+SQqIR@PH&pTerF6KtT+bP;UZC4U858a=p=w9C}>hsSCyI%v@bEBk`e~x*1kYX zBUJnt@e-zdK%0IN^Q~J4E@ej{>FCxMI(uu2_2oD#N)}YDoh^N_ljmqw=a6Oc?=NOE);?+ahO{JJQ6};1j0RKW$7MMEo$Qs4?=_lY9HGh(6go$Wq{y#i zj8HHh_H;{(&~FlGR1JrgTJC=<^c$U-?Wko8a}Ot3my-BXh+dh2U3N=5yLa!)pf1%b zf<4Wu-9p24lg7w93Q+gy@_2aMkOpak(s)wy_(I+zs_7p>I7ZnEGE$Az^KIPF&Qc6_ zWGNk4W6@!17$oprajBFQE`1APR*lHYR&u{!$0*(-DBW^obiU`eOkGN^`xdY}(1a%T zHw>a^qx&nfpgbCzORKuS5Q_-F+w!3xQwP3BRR?2;hW2!~XAuYnzZHw&Vqh@=O9GHY z34+|Px3a8MkYJS@zLH8r(AG4^Lyp8Dr{AC!swk|$g|08u|MleEPJ9Cf_ zKZ{q{<#>Ro1?<_!{Lxdc^<>dcQ4tQtXszk@=#Gh=HJD6sbC0D`w9`(Ym~;JdA*$7vza$8u4UxYD?bG_@NH5guM<}H43u|WbfL&!?&vF5R7=mG zXO}Fn@{oUu@~4X&o@dOw2BvZeKHG}v6*v)q`Gg}zdy#0d*fLd&uI027{?Ps}y*~(m*3a`)Qu?rcKg_zyb?ZuwQqN?ZC{$T*Rp48Nhk9-N>a_cDE`7BwL;4Al)j8*lTfb zlm}S^3^dhriAHs(liRPxOnKdYJmO9sif zfpUsp(spTVRaMe&`m1rsr!|BfBpm=Hd@>DDIWFt`RtLZotTH*b1Wvw6dxsm-Z%R0a z$Ix!4x@Ma`+(@fZ17h-_JV=bzYs(kUq+}*@C3p`xSr&@aeqi7oBUGlwiP;_yhGB;&vhl zjg$PM9Tnz)99J2OG*xZ<8)#(!C~xJ;B!`3%?H`wLWKkrUT6Mml5CrH5dCi+SkQC7g zZA!;1M@(nFWsx=x*V@79ajSzLti=X`bRnsCsjApRt$5fuVN!9*eCh(RP?}wRelAT1 znlqz$Oz9$q%NMEsVTd#wU)DIPRjBvqUd(ThZwjTIqR3pXefGsgdSsjsBIZ7iKTfbq zq-|8O;$FKOJHC_8BXYxddPgFov=+$l|NpDe16`bz^7s&@*CR)+<%YtsoxrCbiV^J( z3Obs(9G#)!4sDIC8Xz;msL4^+sl#AU?yP54FD8LH>DJ|tI^IAO_o0@gx93rp z4Cp0)__P@-Gn|Rx+!&tZzL;!+$S!pgW!XT&%@|aLF15?JLT*r;3{Muen{%{dM$wJ& z^41A*d;~weDXM$PCzk>9mC`jdQzlvVnUk}VkIDMB%!qb!{!_hssq}A?dpX^yPNGe+ z-VRHX2`7;exuAD3=dL{7cz2#$Hb@e+iH%eqC`7u*%D@D}QdcSSaG`gSM9KYl)k>_F z^vLJe%yAM@5F5(NK{PTDOwE@^M}ugZ(@IkCAWOjM!p?sWq*0I>o7~6kyRQtzSUC3M zfK}=;q8lFXA{0htK|j!b(=pOgtGv^VD!&;>H@FNOmc6r&P*<<91u`;O?+aF$V z!elA#n>b&-@Z!KmXU8ocM|qnw4Fh^gGT2NBj}A~9zuVFab6k!#dcknI19DLVx9e8> z*7w{HJ%CB6VP7GEnA=SrY0hXwkuRr=?xNyul>PHzuiVsIZnW~w8~Xs~4aWNX^*;RK=pgwTxXL_N$$$vmkZZHV z+0@yF07%fBXD33mqHSjy0Y==63W2@FqE{5UNJd#G0XbTX+*ok;4_zuYed%Tg<-hpa zf?hF~H5MrgOqfc6e$=(>VqKC?lvwt27wG5@3p-$rYAXAFp|Pqu;qlz~`}A)j@<$0B zJ4jwIaV6P6$5?S5pY1pkI%IRULQmV;&ve+OVwB$NaDHNZDq$Hdi(BNET0Uc-t*c6Z zs!TSa8e9B~HF1s%A7jRmg%>L)y(L>*wbH9X+dx+%d00FKy~ot2{N-zn+~p|_x(6A4 z5bKRosd9Zs;w-Y(n>ctbu1{Sik5MMWN9~Mbx6FX->l73u({fuqhW9tc5I7{i=n{YU zD$8k{Xy}E<=8{VV;v2w0FySF2M-V-lHQWJI2v=zHxw&+8f}NdA^QX>`Uq|y$ZW+yW zT+-9|Se0iq=ckk-6zi*A;)00_yM-z~toi|m+cwD>@Aak`9K%=1 z84p-Sng6MoZ((ITM?d?8?lL%69LCl+Fu7LWOGQor!2F%FU z>)%CMo%))MFuF7&s37u6a(%xzCb>CxVS*r5eL9`QK@L)Q6)PAFO5Bg9J)?^g{UA%o zSsIW~g=FWzv!+uLPs%ByxdG=C$f23&FAW`NzV0udpTt}eJ^L}4qd3s%Mkpz9qjTISNF!83%2xTj+4*q3TvkGb8g}G*I=xJ}eVf}59hzj9J7y?&a1=(I`QI@4w4MX~0?h;0cIkZj<&;Ur z(nORFdk)KoV{O$6|G3aNLZ2N5BaQPN{C~u}U)4Zf1HFT+o`Wpyl1V*P1^5)8jn0E+ zoTystB^5ebc(yykoBWVU^3Sd!CrU!IyB_3`QMjcOyR-Z*09J>=TEskfGRmF0Xp(Z( zt7UUx7U(e60CI^_mEIofE*F<7HZ=y(%{Zw@$Cnv_#3ptCZ0Vu-a$BjL*#ls7=-VO7 z>E@FGR%i>1)-c*UOimmxYqP+5z$bT=-*bQ`d{r@Q4*cNRlB;X2OE7D|FFOlz7}O;l zP{T`p20N>#Pu|Hfl_hi_2mgwoFC}{1re7~HCL^}?3i+p%dyM5_bog%Ng1_4vXoU_% z$X{HABhr!U;n~RfYOguBrv&8C>YvrUz%8N=K)57ZD5Tq#kKf`Ab*>qT1p@{-~&d}L_fNG>BeAaQGk z7YO=6YF6j7g|f9&9x9j1hOl7r>Uhfw%2{v!>z`;R=?3a)!h0ZKE*T(Yvxur_sg+Tw zL5?>|N-awqAS(b0a#^*k>H`O+9u z@>2maxE=7v}5}N06?sp`457)J6Bvv`R`QawH2t|2- z%W}_ke6!Q4oM^eHGB zep;hPw-(Svucb)7w#ma@mdo9hsSr$g6|R2CD9qP+Joi;gCn+K+ zY+HV7f1bywlc-C>@@#eTS-xsC?cjq7S~C27p=Ka!=%sfPjKy!vY?ouFK?+r9M6Rmx zY_`-`j!{)I3s_^3RK(kY=p@G!Krm|f= zncDdZ7rcp4AI#qB3fQx)70RA8`l2X8iykqq7Cs)|}ZasIS(QCot%hQ^qlN2DU+e^h4~myNs>< zw9E+mlk3o+;@eYh&ES&W>@6evBr;45+gb8Y*3kdoVhSga^dCh&m+fjMC8)0YZ zCo*RSsd}yXx()O_BRm~gqpq9YyaP(;7?5vP+s!LqoGO1iWM-64GX>c^6G~l@^oaC9g%%gfD@SRB>2i+z#3B8&l}X?p;!=SM=#b9o z{mq<#+<+B+K649#hPonpp`Cx3zCxfY2BT?r;tCkvDup%W+j58zba-cdFxYd^$ z<2kKU12QeM9p!RLoC{qIhs>;r@_i?akjVpz|DM3!zHq3ltd*rL$ z{3Z%(8!sEHbc}@Y8ZckFbl&up4CSJP<=i?o#2TFKJOTDvUl}q^ZuE39zP@zJj65c1 zNHamY%n%zly>nQXBeT3-mH~H4miu%H?5Cb`LEpsqE`sNzN(S$c3J5rDrBXnGC&_9# z$3xG@o74GBSD;w!*X-hk0u_UreFAz!bF(D3T_Sa6WhEam9-TH@>(N9%Ib{Z1RJjuQC;*%3L8TI;Ia)DHdTjLcj275)XK)$U~ zL}Oc!s5r1|ah_z8ZzBYjeOCE$h@6TBo(LW;m9&e&W?#+{dXfMgqLk$aQV@3`^k`P}OLLv?GbUrxo^UW*o-dXOu0$WXepLD#PE_Z6 zwsfK7HOu+|X3JE~QJ)|UZ)a8RZO|TxH?cashr~?&A4_K*7j?b;{}~lyJ-{3I2&ljy zpeW!lD=4Ca;)46GIAN4oa5bnWI1gl>YycLXPHNJck zFMBM!jYshLeNyLDMO;YP{N zFY9vR@DC*USuQKuMye!6o~BPW1SB`XOr0*h2P>vvi(Dgr0KY;Kn^Zmo7D%%&;5T50$t9;jYdbO0H^Ie-|% z%9P&+qQflhlVn>@`9u|2ZFUEae(I9$X=Vy44w=ESBY%c357 zI-THlpwcI`KE59tYqgAL;d{7GGNpDUOguOYt7?xtV9BiU#y(csCTsQrNrYm2Jdh#e z+z{?%wT-nofrEP(<+3$WcB{zW!B#wZRu*R-w?U53O_cBNpzGDNY$(S3K2^n%x2beu zVtbo+f)+uB=>-3!y*wHStd!dZ$vdgC*>8W_v7OI0x%e!xJ!JrANnoXdX|Fbv`Sl}a zrts{2$x4}5n;`?>@z=d2x947*DeW?4YkS?KHpia4LPe{%nH-#ERe_4--sVIu>J1z{ zBd*pGC)S=rJoHAk6cy`qdW_sP0m+c*@*yX7^N^9LoJ14kV|*g0*H+A_KQTQwSbM{k z3(U@A?T9vyq=z-_64@C&ek#!{F*xQim|EFN^Piy^R}@MrDZ^s##m@yLdEWtsREZ^%IGm$f*+$|m36H3A)uzD*cFQpfRIoQ|Iw;*CpJ zuKS!cnoE8Lz5u!qHqLO}y4N`v9KUy^Pu%8ES^YP9wq-LL33i|dG;U6(O8+#0nGt98 zQ%tNi#JI`AM_A9NCucPSlEDo~0bl4@|NAvKW@V%tfq;xG!@}-_M+tlk2KRa0TB^I5#>KSWZ(7UDpI-McFF$`eV<)X0|Jl++ zp2$xBWP!X@1p>iYTKAHC9}6FMbtlOVCXm`kj(2c@yfWbAE4)Alb9wsuUABsGUK2E0XCk-TvaMa3l7} zyB+z5)6>HJK^1!0;igrJw>>op&`8pH3?M9KMB^mzFc&Av{dz4lToj#kd$zm@F=R-6 zYE@^(?0GOe#*uM+HYcPm@GniMBVk%nYGqg^xw|y>@Gki6ns&J|rMvVQFZq**E)F@g z#N?POe`6r+k-MtY&)zT2d}$2GBfQ7_){5bSJZA+^XDgzw^cXHJ3TZ%FRLK3mye_xw z5F=0fHL{J!pa${}smh|0Pbo!nV60}0PfV48qiO093FdKqcjsBd%B{olXd`+Mbc%B7op03qD`zbKy?$B!Y{E13FZJA;7hBlD7m;%OG ze$BBJpDfl-|1*K*lzPO%XyiIk2+_P|AJ*AG2b03G6ySo_!MB+oV>L)6Qhs8nsqdSF z88UIG{MFj5YhiPTFMtwu7Rb9nIk_kqC@jnaadb$_Q_$Zxf!ZJG)c5;B{{g;f8WO~S z@*AXe(jP#?1a6N~5AmH@(ErmrWz#0b(gS#dxx9#qVMzdQj*@En{$HV&K9V;5bm#fB z)aJAw(4A*yd-=RMG*w@y3oVi3lOL+&{XQ}%D)@-l)Gz(`T#9l%+H=A)qdVxT@SCfxS8!ym8r~SjBpUp^7{PP; zq<@qmt&3!n-*l<2y-!cb!8(}-B$R@rKaipkP}F2Sf48cXu)fZ_ttl>&3mg$+o-C3} zV2F#Pyp7yT3W`%=H-$jImSOgJ5$IprLIyWMd(%9_Ax@#FXKeC{-%OvuWm9aL-oS5dk0VUx zUs@N3-jq4I{O1KFX9xbzPK7@JmB^6l2vQ%zflSwG8VR zd_!ebpoy1w!yV0)W;&LC>>$fp%H64EG^h-?OtL6P&d18BTR7G;@Z?-a){39u5{h?l zpq4hj&|R+MzLgv<0GbS2*9>UGH`)%5QdhVOISj%4I5{M;BS)=U@3PC8f@G+lir<`H zZSx-ZlEwzYBOi}vPTiMwV6}XTO{%!Z$_q==znrC~@=?vgcaWMt)0BTt&y%a2gJfu< z>YD0jFU*y}_Rf1FwHgmKWk#3Z$n$s(98SHJooQM-wc}<)QLgJxF z9`3K_?z|Qt_<0-E>Mlu#hzV7J%k7%f;zdS1P2Tc}^iX1k1TZ^q_vF7(ws zLeA~dkVXu(=Wpg2hIUcbC}?fJ^;&Di`#_G}K+ehUIKxor6UQQtA0%I#JxwlCERv)` z)`TnV`S6Vl^BUPIv;W01S zdsSBR!ID!-+K3%Q+R3_V^_!Iiu}rFg14sq2$GJk)T0uk=I@8=W9SqP<QHxHdLB3>6(i6;!#f+<7k@%zl#keUKF?>}BC21_dOnDfW6N<-f zU$9yDC&(i_v}X{na#t%;pbmjoYCwbmzAvTVs40L(i?muRyXSMW*E`6olfnW19I8C! z$u53O)n5yYC5zYwUiA=*l%Sc7?EyK@tO=b~&U9cN_&)@=i{0NyP|uUc)oD5P(Ew74m> zMw#2vxb}~l3wlr&rtHF~StR+2N=!J?Dz0}Y{Xqc;rE(6)qijf^q{ShxLsMxMC6mg` zZ;|p_5x6Oze0($Xhhl5+Vi|?SVu@BG0Q#|k$+`~QqL(_#j&Waf)~Ce3BXdhm%z7+A zLPO*%S_unn-ny~KgvjC2K!gS<@p!F9Q%@jaQ{H$5>=-~lF|sQ%(8cDD7#5tanvFNn zH#_6y*>-ZGEBzmil+rQ5%ML|i42&e=AV$3(jJdbeu>~enu_O$!xi7xQ7}!%)S8nsy zCdjADd91|Q{CLf6BpPy`IlM_WcbCa?>Y_DQhXRJ4|YLAt}a6_mMNk7Sb~<==6v zAop@cy?2nbyj>TaNBR)`H!~8qLL*hX3arQ{Rr1OpWsKJTi7B7CC!abK{Fru;jC}J$ zBn;&o_-^HnM&^4(jZ99F4x@ScWJknIIT9_0szz}{x`KO`U+zzVbfKC%TFNIO7sH4%aRsuXpPmG+&vbkxr;3<3 zZ}w+pG*?8X=F7LkO%{#=kX?J3*`qjzv6r_DjN+ILB&=(&0mJw%O~71365Q!tJx7HivR<-uCjtKUVNg7$~_VUe(|mXPbYc>#5;^ zUHltLm(awWlGGi>4oA$%p5DqK1XyyHo&kF>Zs`lky2l4_;sWnMtsqKnN8QKVh?Jrn zwzWZ{6$mn5G(2iqKMH)&yKJMWh|-Wy9W8PX$!_17g}Rt+1*(cQhL2iHDqF##QOx!X z-0RS0BxLLe??eC=&>W-lApR%z94p=^h+eA zPfp+TOXP*`%18wVj%ZP-^qCrZjKTJ1xtwrXzqgfxLum7{y*YcLGiDx}FBt{0v0ADI zn1KGrk>U1pybz429u_pRFke0bDBb3$jA-prW|Y_T@w@hoG4wgy(3R~Yo__W zRP#0)gIX}2n;<3q%XZ%FTOe)R1v`Y^oQ{JII-GWijW_Tuqrb zh(}eCoF)1O;l)%oT;K;}ZEvpscnZ;~8<~6Ag75|(<|$9*iAXt`LH66^48sccWQ_a+ z=($Md4I$0!3R7qM>aJ%mCAW-)`mLQ(~ z#Vy?RR}|U7ht_nU0ZyLILU6|9wdEE})5OM=+$ zvGNgn79|JBCNt&D7~o{!Jbq;pbIAVwxm`l{)ixle{yo@J*4Ahs;;i7F{dy60BO;Ta z#oOD);;8_{(B4hkv`MUK5U2U$&SR2QgVgV=T+Rlm0NMw$HAQo*e=?wcbdilW6L^Sh zO*em$|EqnL_xoW-?#q!gP0Slez)w%P~%qEV@BsrliUY z+==>fT}Umy5^I=KYaH+4Qs)BX5!}qIY28=4IE}xzMhYj>rFXIP93YW1$Q$^qq1h~z z<|oMYuni%`xz$f#_5j)2MLL5;G~JRV(2urBrXDI(8jn^<(MX>Em(`4jgeK-#27~d- zG%Dr@1Q(l@mfV>qlLCCXbvgNSbAyAN{F~0_K>Q%yWY~LX8tIk@GkcBvML_MB9vLQI z_nc9S9Md#0S5_$}3*>SOsfv|}g`r!gd~frGy2=U7x%*P6kg~TF&qOX(9NQc-WGa?1 zR2NuKb`()zb592OmA;D^fwNYo;=0~lWgai*aLSU5aULc`TYE@(3ecV`;8u)GnCA3SamoPWrp}^^i{Tax|%AyCN7i zR>{A8SvEpTg3YCCb91y%Mq3UFzJcO!wTZ;Qk7dQaN&0t}vwciBUm|C+b&+h#r0@W~ zEIb7u3a6}+ksAygD`vqMO3_~PX$w7}z%0O9`$dkA~MWZZ_i};*+MDe#y^HsDKMBVb1o11)4l)l1G6<_%~V`|xh z-XxXdlUquaFD3CEfon1o!cD`Sq^B>iT7L=7y@NHrM{P%w+LM;%^;T4}6V|n3j(yoh z<$~m8+zV{QK+LFNC!@?jwLSQmtZQv`o-EhfF|tyy#mF%qi2SEbGVDSvkyVUfp3TkY zQTxtOsf-P6T0y#C_YlZWI!#VHK8sGo#e$q9{U`xFY@Ftw1AfJDXB z1N&RxUW6YRBwmj+o1s~eD{1mY7r79vFibOfr6i9aq-aN3o1K4!CcO58H>*4#E{Uqo~`@8^{Q0YfrT)iq^erF1Rgl1*FI-o>|i z48~GCPu)vz?jRMLI3OA5Z?7+uD)=PWY{|cKZO#Kh79U?=qyo?1FEXE`JDSJ^(C^?; zvyAn{EnvgT%cJ!o;Y}vEC;<|M2MaS={M0bw2bMB!4}0kN5}oyD{op*%0M4a>W%O)T z`C9_=$ri0p$s-H(tji+1HO#6Mbya1OmNUBDr2`=s9v8{ed;t37ba21($n{$3q6fv%EX^%6Y92vp#EfT&04rtW z?k?P8xX*DMz#(4MQn-sT=E46l7KI{@xhIyrSNBVVIQ<-iNUIr%UpLYwm)$KiLRwFjKgXTCPDt!33b*8qDu-z32PBUyAT?rKX|3Y^b<50J z(ne^Gr{U~EICc%J5vm>I`^dr)d9j;3nIy?z(j+!n{dVf*JjxaIZ(q$nu4@gsutfGX zl}L7HbGjz9QenbRE!8OcnWpBE1UafNI2g@p)O1X{DwLu$Cc32-7PdA1f9gihRj><1 z^w2!X>o&Q4DzkoDE1U`C)v0KD=tjJ}H{&jEiFtOM85TPV&9m99H!5BmEts1balLu zC=H_-^?qqo2;7zwZPz~L9VQ&#{;gWx3dV6d@yV=lCJItcF3z4jT#r7QP#T&M(Nm$| z8|=DfbOEhcEX`VwQg(Ef{RTxH5uf9~)ZeaeO@IZ>IQ>tW3IpA% z8G{c;(O@mJ>eY6v&r$@#G-ytE_Kmt2W0J-gua~=5&o|L}g8i^ih9-pe1$S^T<~&=5 z0U*qbh}Y$#V!>j$Saz36rXqlwOwj;4a3AgSpK_TY^+3sgl#4fvmJ+^SvkjeF01Ny{ zE1Hscc>Pl)C;s3LyCdx>yVvL;$A4LkH?aSot)iQ7> zoq4c@G;I>qTXIM0KH8_54AVpRxe8wGpscf8UEgSq^n$O!%q6*90gOX#eylBbtr0oV=VaGmv;zm)W;>+MJas^4BW494!fM>qbATCO(8> zCQ8)K(EatSfYn2(m&%zOd(Q-94LEi_6%ecrphUhOeoo?E=RqwcgGR_R{_xU7jIT%-NP)Vu1^XwUXI# zFR41&a1RDeTBG{!g5X#g9v7Y@>zc}BFKci5nULApR%W5lIlE3F$0~dDUXi($A0+2z zU7I9V?jU6H%4c9JL+xSwKp@Jl6lGh-BqDy5rtlvc*(nFW)TP6Xj^{2@aym?v&*w?+Wi#t}nC z4`eRROzUw35|I2eg~*aVZrz)!{N|9iM)2#mC()^Q8k+T7jGP8XAkR%Fd-viX-GANV zBz^<)&w49^##x^pO0$)OPNUr8@gLY|bA=DepF!!$LshaynFFnj-QxULp4Cuh=WB5y zU6G(Kvgl~48IRJvBnPGKVy(iE@2Jd8B`wc!&}n_qLZx87=_AhWa%5{%jx5Qo`-tWE zMr--0Qu^h^f_iev+~smhzV3OIfAY}(7IV*yr68-CL!l$DMlG1}q3DTnq(EdxgzeHP(R!`kSn}luSLdjMO8V>GWZ2T=HRWUKkmmd0ieVbwKz{8jyM`lD zgG6m;X)vUa40-h+?LB`0fE-0Gy>&ac-@8h0Qk7Rbfj%A6HWp%_AEN;IPBoB^G^GW4 zY2EwOczCm~4V7Cm`OUiR5qYd=q|EwDf2=9Zhgw;mJx@+xlA$`^8E!Qhd_!4+wx)6m zyh0fR>B&qY>*bS=DruKv{jAq#N~^AB(mks0H74E4hE(aZe<9Bg6JJH}3Qh)o@g z0fnuq- z($cO;`|k$HA8a3PD>_dOZ)R%RR*=Sq7O)B=iUuoDxN~6BK$V7ZIv7}&UmCBGZb>>C zx7%efjOCWW^StK*kk%$9lWN3J=fM~<)>AYpo2;q3`%7GmiZ8CngB&&*b?4I1WoR$93zX}@dVQ?nX$FU?Ya`Iih%FS20gsH z)iDg>TrEJ08L{s4gs$RQpt3HV00@|)@IwyCbU?m9 zKf<-ScYy?_1I}-jMqQ;r0-a)3_9I_ON}yPAtEW0?Mrg|fo6B*{e7VWy zsbH zz8#lkkjgqA^ox*ak(# zv@K*cKk$wMB5~7*d4nQUM$=@B24PSzMQ9JoYGu<hMhHm~cf7>iF(Td{8OF=#lh^%*VU67x+{T>&yMDU*gB$ODms z_sUljDapcu=OBmvC`ZBrZKSD-%qhG&XjP!FD)w9@xL z8U|}ovrV2_K+Ek+jhe0Vh)$o^B)E}!^Pq!NN8F|?O(BG5Y{^IsdC8MJ+a46S0+aC; z??1-o3Z7uw_nNQEuqM&apJe*Lyi$ZRkkrW%#+r9Hq;~5zLy7nKE~`yqIG8MDbyGri>jbU6F&L z@JhMmgJIIJx4I(vq-&}9I2KG%v*w|dEL~e$v-Uu|vz0pj;aan5u8z`Bo+H<9e7LUfJ%UAc?T%p@w|Dk_^Ob|PGJ|eqE|F+*?Bn8_k?F%Kz~@b0vO*F$kY+1Ex7c#GNkt3auX;djTgmSu zh|#7f{bhd({JluAbGVbwVgrj5?fy_(X`3IBbKCz`f|riU*ezQ1tO7(U_ibR=ToNNc zOp|w;gC1d8zt~S=9+Bj=CMz-h@ERU3V>wT|C{=0`WW4gB2A2}^7Zll?tH0p*@0U%7 z^xQOQ4A}w5!foU?=)TBs6xeIbqZ1eqsL3rHx{+m(VHohN(BLnzOy^iqe7 zT;H1{Lwk~5`{W*EB(No_98Qji;E`6K!e|W~#HQhv{H2Qf_GjB?*xdG8C1>#1`rPzoqypE-lkK_EoeKg7LSCzt zm;c`+O5tVyMQR^`Gub>-zR#2UD=)rabJk_*IDV{FZhs*7CU<@8NiLojV#=3IvR4IW zN=#eYi6y5>zNwHceVIZi<~^1vFR@_vlI$lHSh%Sdv8|Q}%A;A6z8hG(i$qkougN0! zE*qOl8d{1jqz0KvGB21X8!#1O&upI+sI60saj&)V(%rI*vzI~F1w_ilmvW!ur-bt; zv&g+WY`*YpaTe-iALNoyQD_yPHf}Jk7A>B|0OVN>#0%{S&050#Y7q z;A%Rhuu3C}$=S?PpX{wf1_1g-93R|u92I2_A{EsQWamh^X{dy&6{K(eO{#rWCa!~K zO4MkE7MNlE6+wL^TF$IBK)(ac&6uZkME-G1HmflB&+TOYk>DGgbUr~zND@(sDU~zs z&~nM7d5Xh-(gIuZYmdnnxn}%0$Cu{MY~A(qm4SGE4jm~8$>_cJYfeLXPt)nkKv;Q7(?8p#_4=2GNqLeudzybz zolFY==RJ`mD`wUHYjf+`v&}AR3_P3Kp;SBr!zJ$y`L%^^^#gW9EDygR zuYtV@|HNK2s+1?c^ImY?nmW#@(BsJ3NLhse4nV26%KhfzVn~1NDL=jxWX3YV*Yl)a ziSEsjJameu&xtX)nkMPhPNVJYhBB&>hG8`Gxe~tD_LW=GUJ> zYGYE|n{9K~qO^ws5Ii9Ota9`*g4|pkCl4sR!!^pjPu_0YD!b@NVhP2T*TB6Tm4oiu}V3mE*8{Lu`kCY3zO`kX>JgfLBM<-=kASUXP|Jx+*i zO-u$W^*3HtZ2zrBvnS4!6Hzs^!Wldh2)cLIKD_!5GjIaCtXnN?8tya zMbUh}IPj4)(j#?hKZpo^Grvem=gYorN)uZ*+kDU@mbzJqP<7le#w-V>Y#v<(?E!d? zDpE(!BQXK>jyU;^+htRI4e%rf>Jmv+Xy-kt=rZNcP~pPbr*zhSK30lAxm2Jl69B|i zmlS+msydtHdvu(6)=H?UjK7Z`4qI#9K5p)DaP`!eWMs0eYr;jXKL^(t={P#~S#zNq zs0t^i?$t>e_DFVw@Z%?ItNh$$9fA zSKL~;`K6n+t~cFG$m28FJLFcc`K=8}LZ<=#btM)`HEv6!@WWkXQ5&%<|LFH^8aKQ! z*H(P^7wppNAy7Be)J2ET@57~evV4(Xua;`W0%T02`O#N*j92Q}ft&x(MvX8$Xg3~p z%i}=oO~QE92=>a<1bMh^?GDFGMihy+I|A#|8|2Am_=;MvK80Nb$LA!m8#H=E0R2g}{YVt0cUT4Xu5#02Vfc!!A&VBf>)MUyx zFPLm*nuMO_UA8A7Z6Vzk1#l;G%c5e(RhuuMnZ4RX@h<_=Uure*^X?#Fk42g1mPPcG zRhqKwF}0eZo}=rLa{huiOk(0 zwW+l$M=s>cH3yq{m{a6%Hd_YxrQ;mqTyixagJ#(0)xHq%o%GI^k0Gmi zB1rNt;Dkj6_WfkuEb6$-$0MxN)S%SqG=CJvs=%*hKTNdjmXsOt6I)UCs!{UmeH`3e zb$_mdi|@EK2enJeefV!X9nZw#z~hx$n$`dN9;zb zcSg0mjmpmThO}+RBE6ksjr^7};T(6mw?^W(O7r{d)tvp!OqAw8(=*&6uImZ)#~ium zUb(X7J5964~U1!YcRs4gVBuCFfC0V)pE|KJr%AB5=0pkvu=UZ z61=VpeAAmy3eJK==EcW~8bf?^_#xTSjW1mvgaNCZ>M08b%JeN#36jg2Ge&XWwL7Sc z1E?@}j_2}y3+0Ov$$k{=kBjI=8pkQXAAWcEY3Y-U(-P>aS%p%)SYCAC{`H@AlJ&aG zZ5vTfQk@&6(1~l&vUig7D4cLbj;QU-j(!M#`6yDFy6$obrc!H?x7HY`StMPkfS8Yq z0KF<=kSc!YaVEos?q&r30m;k(OvkV~D+NZkMg#JlQ5dK}&3`sgCJUB{p|i~VuVPWq zs2C3BGxU>;U&)FxGvDgfRR#m*l=EX5$ltUT8=41B`(C9sRIa4ySgjw7kp>v+SkIo2 zdt;DGqy+WOGI>8eyhAtH@eAeGSRJ`$j!36C8gTVCYsVBa2?wASNKnv_Q0{7ugofL^ zQ)zng%U>^-*UIFX-SR}=Fp-foO_0o?Ja>mIxedI1dhIYBIzFd0g^sM-tEhtw>l|mp zJ$g+I7J(|T0j_Ft_i8l}L1sUX$=_~E3#Zi68dLxN9%+-qUbdHIpdY5+l zv+**<{7<^I(_Ol-VulXIPIp3q2)!yf3vEzbqnYL1Tt3>BJHPd`Txn-j*IdU)C4 zNOhnD9`<3#=B&NldL)KhowDR$Vgg3WY=yJp&7pea=mF^~)}u{j`tJIZ<5Z`$W1JZY zSGSFZ)TJ)_849pvN%i*k-jME>)NpkCLIA}5&7g0Z+1VhYOqiWM~G-f+OF;;o^p9T^duT} zZRFvpO-21ovmIU%T1F>!o^tV+{WNMCeDivjEXd!Y~q^o%hqS);2xVR5G_;A&}qY)O7G6nwVf7H*2rz3(~a#G3RfsTPpu>r zSX2_s(#QJa8UFCJN7++>v`lt)M;swB=P9}9l2b9_A`F0UP0hSXJ;si^|K|kB}JkR@Poa`P#>K#btW4+Q#9@U5*B*Tf;yxxc> zrIW_sW82x6W3c6h*DB@AYL6ApkyqQ)zAK(gLa_YBjC8i{J;`V)<0Ee%=L37ZsYF~u zNe{{9m8<{dPPg9UIEJ}SiOdl`4owa&H-)#Itb`Lt6=7QsbD>;onzu&Ob03$;Jvs8` zCh}z=MLF$TT)US}9d3m_+1(5$^(&P5-V~WW^snWNJI6<|4p$P zI%t6-x3OGI9LIKVeN!oWU*WIk-IlF6tWt2MV(%;8b(cMFQ|Tn(>bae_4r?2jOEKhe zNqePabCIGBta_QT2(i#Qn`hfE^KTPlY(@6v4CWR0*C&$77Xl?PQdj%#4im+ z%X){r?c%|f2-m*ml9LEeNZN6EN>508pz;M*$-d_F*0Cm7zNw2I2lbsz11LBLq+`2C zRsd;l%ww%srT?DG>!x)udy}LwDu5$opHH5DRf@E1jzN{j@f1RV%#$iz1TwHrg=LLV>^HSR=Rr zd_IL3sJYB7GvO%dpQNgdX(#G3^e8nyTlPN9gIt1BD=UUm&H$9^BamA4fOr=NPcq2s zUXg*Vr2K?jiUj;(B1fB|$|cuy;f(9Abdwu+i#2_i- z%xqabP8W}NqHC-8`9p~k9xGE@$x}d_lVF~0H2F+HAe4GM`SDcd#H02;$vHdIw4v=Y8(zr}I8!<)^Zf9-S9Z~y`Uq(r8 z8-k4^yJc((`&}}-H80;S%6cjl0BZG`Qii<$_<>i_twyRZj>w-CfC#S)G;@TM&3RMFSSyC;2J$p z`HY-SPQU<*kf7ZOEp3ilM4nZ$Yq)aD0gf4TSz-^gO(^0`D@8rv?I(HtUKnG@i>t}& z-9{-97xjF?E`pF|TjXknbT5$8^CSx(+`1?gFS(%}PbtmiZA`0C1D#5Z0Va}PYS_`% z$fY%;^J?#wId71eojV0pgSo?lCJV}#`Z(A~Q zmbtA|w}zHRz63sM&@n`58#2lp-I9sc7^7BiWGBxgW4NWL)Td(=;y+UjX-%81c*nTI z1I=v(@^n2t1;cciH&U{u$|9g^2jp<2{5LAxP`Y`N$?fC7W?t(jN9W7#o5@lJ&xY<` zx%WyJzkEhwFjys*mrB)Uv6a{JFrHxJfG?t$!ID5jjlis%kkpU`56O~3ym3tTCiGK- z1vKU@9p!SjfR8^%L^1zEL2Kn;v!=+%TVf#}7WkJ>rjiOno`g*|cD{W-q1eN5Qf{Qm zA-B9Q50;6^HZEhYkr&n(^3~g8qiSqk)kPWBK=KER`v9-DX|{4Jk+8#cTa~WSpKPs# zbZ<%knO_rMTD#A{C>W3^pEXl(!92nh8T=vjXq zO7Xx{NsE${kKQWB(^%$5G?kG++G;^nMOx{;hc-9St!HbOn`Vvb?ToCxzWph8#n&yf);}x z@5j>jda4%sfw$S!j&6C?=k z7=tKGz?EMv{Q!E%(g7s3`pc(CYBIAirT5WODYr^VVh}bp>MrwmYJ_&2_EzIm|xWxpt#8F3`(tw_vi=XG_Xc%DDTGAer9unY1Mo~U+`*fSzFKB-!WfC#n4() zYE;%04y-ETT|Xk3ugjxGatahZbXdP|a(VD4nKC9Yh9UIqc6s_0^Ffxm&JFl%_-M`7 z?|Fh9-~J9SFo{^g=V%8#5rbPfbXVXjxd~S>9t8Eiii43#>w8DR?L&id>-3YSiHVU+ z-YE80DMSSy)c!7So?7Au{O0zX=I6|By6dhODgPZ{-LHV$DFb9_iEM*aYkjjp1GIJJUaBwkeA++n;E`)f<>`*nEt#AK zu1t5z*9%Ql9BU3NnHh4lsa$PGxKjb~zm2*p!WB(JOr^wZ$N!f>Rr9c9jhC_nMRbwO zpPnKA6%eh(@63kVR#zUF#VPiO8Ct3I1z)cHBHW6DAY!vPRy{%WZuPS|aZZr$4CbBd zQ%|vpKt??}Do;fkpK28PT#mqSaK)lK=VU7x@u_U9F{4|@DOh1i zUztnMAygf1Ade2EF>SLcA^XxaTmF3kBWy-9>^>=4cAqbmd*X8BgRaRqWO+)R=XNMi z8^l^oJhSDsv_Na~VTyFh)(x-sn-V=bSN$W-50g8~*{tjY2OMyP}(`Bk^1C zn3^x8vQuCvKYS}+Cg8LaFUulH-7+tpT!|bY>D7*{3<`MHAC{ze(qROw-#ww*2{frf zxuaittgeiw%Nh%tLgf1V(n2ry@sIpbQzo~(=alV(yd&$Y!_esJ&)5ph;I^p*)kMk8trDvL%&Uy z+n(V(u956hL-5+^SR@ON0T8d7plO~bQ%sYA{AgpEd>Adocc#J*!9NM|eT@8BCZ|U6 z*@51Y{+P8X9y>}*EacHDn>&B2Bn^-eIVi}g^2G;z?58Qb7)ei%lx=~AT$$S|RAwU! zhEJA$%2??g89tF-IE{DMkATwKvOCE>Os@8osm+M~kK}}3<&zUG@rnE$;)MBnj6~fn zCmec~zT0o}{{9Z!_{yVCytIQOzjX! zxHZ_L9m~pTmLObLALeatB*R9w3ch3D?(d=%z>)y}`hFZwcw(4JpeB4(FMQ#*#Fisq@iZHaO4W_i5weT-iTLvJ&+$Q`DAkeyx;=kAVf6*OQn4 zAH<6%M+P4XPh_61DUd0u>gZ+$@mJ^rv&x%ED3V>m%Cfek-fexPqV;=k2 zGRQqA|3{P<+(7B-_m7ibF`C&%=p2YvIEvXeSYn!*^UEZWH}I(;?Fwtgcfp2oXNlHg zD;LGWRe*y#QF>B2Nf4wr_cRSnmiDbVUwXqE%{3QJB^_ zN3ovL7(xE1YDU(K?*nZ~n|Htx_@vp^O_oiRQJthcIEB*mAM$C9e_o<>cY0*bdR~4^ z6#ZWDdkQ~n_yB*&z!7YSJ1cma-_FRK3bdMBW=vN$UeF`f2DzL_puRl=uN-Q=j>WR5 zqm;S$^71F?-sbTLFL}(EC}_fLV0K1D_zsy^o`BOcrQxT~0j>9^%jq-^hps!7Q12NL#=6wD81>kMZHf z1bKEw-RIISmN&W520sO6L0O7;j64#heyK(BPCz=}6YMIhnjk006#wJBde@8d>*1h ze-?&2%h<`XxlHmDvG=HYT9TYGkGvs&y=4}qIAUp-%UD}~VlWfth&gzK2FW8{l{Zt3 zx4o`w|7|Nhdnq5G+q{T*((whGyJlDYg7o0e^z;}fZ?L+S{-4yzR1=n(qCA+$i zJ5|cREjXT-p%}mt3qOzAMzWlrCv6fW#%;zAmA-K2O?H92|GIcP$bcE9-9T1DNNlTl ztTFxMudMJ^8FEMl1!QAqUO@3;dsX@|I#WEyB%Z@`I@%kk+ydSf|KiA(F$dEc@d0iJ* zXsz)h&7OVUUS{``9Q6pOxZ5uC!NKOq?9*KF1;`$>MuNw@pP2M%DO6PZn`G2TEySO9XCcv z!>T|d9e5zbPj``Ns9F%ie$qYsF5lh$h3rys=pNaoELxt4k@RBS<^Qg(oy*?ddboKd zNwyN!%7JOL%Uw;R{Xlf(xLJ$dvi~D_?UV5D5wkSnUVsrHwXR_ivt=YTi&DDG%6*vR zuirV92FPnDt=q`f9BJ*5nmqY6iX#tH!1uJ9x3#NlDu3hKpQwX>v^L!RqJ`>)KTk`;-GkH}7yQ!A;FACu(3ddcZ64{W7UQQ1f?@0B-O*h)hW z$xMo=5cFt$W&M88P3E_g+z#oxpTPeKGSLJjkhW+84zuo7PV~M6$3ptguXWy=Rk->n zu{x*Ddvu7EL-nTuWw|=h)?vuMAxmx~M+NB$mD| z`TwyuB)+MdE5&;y+t`Nb*7@pL9{0wJyqcLEFBSzIGIdP& zEvdN9&DL15fV6`Jm<5xR&6+p5)?B8<)f=Y1n;;Y7rAz9`d*sq+P0XG@Lk^|(d95kr zH{{n19aisT`Kd`DLFNqBW6#x=3VK^+$*=pk>A1ITo{0C&V}jQTk;a&pSbo9GmK2$^ z`{}6;yQ~*7rSN6Ren<93nIgY^j`s4zI4LImZmlV|x14YJ*kmU`_l{|bfSm=c;UpS6+OHtnF?-|I#`>3069C#}DxR>3gYr9C%10ty7pV%DwL}kk2Fd zDgO+0{3z?kvvK3bl@Av}Zlr>@^dLTLPuJirVlU@9v)U_7XA>KSuSrB&4kd3VObHm? zRzzgo4>IBoIk!tz0Cq4Nhsgh>*?bNyU`-n)7v}~KN%;Wl&3fmBo{gocPSr1sv^*_; z-xby*Q`B~e^2*Vck;R0!{d47D8wOd9o+D?h1zyYRBufICOy;&{@fNv|CZ1cwK7)jz z-@Fp59%1IlLn4{-S|2$)pe`-f&o$3MyOW2yYIcmYQWX=u>n1yjE7X@3H>KpBz7;7x z^UF)M3yIN#Kgx8)|41p)EW~n8H21cToOrqby0*;=f<+VSy*!kUqMhWk%y9G z`5vsmiX|4!d{m(7HD9BfO7no6-vO%!>R+*nc+Y5(06~(>fCsw~uYCj05NrRrZk!=;ZDj1(GxZml0qO=1nW4BI=Pt7e6MHbKOdBd zA2ajB8)qv-qkZTYsU9F_T;_{(E#+ksFn409FePPV2qMd|`}uDTADA^M;8Uq5u1uBR zZZ|(=#1^V9WG7EyR`8Qjd7lND;sfdvT2X$hP zJV~ZF8VoSKI5nmWt3(jbP=;XYQ4uOV&W_^67=SRfAPX97X)&{tQXc9d$Y=Zh_cg*Q z3^9DD{bJ)?M#3HwQ~n?U2$;Sk4x3`lyaP)hdvEWh?kVW;A`Hnznapv$ei=}V7BOaA zsE;^(`T4KgY_@)Vdkyc_w@+59R%B*8PPCKZjHu^#NijD?*u~GkmGIc=yIs#{bYb0x zcr=kScP_@vx>YgOL4K4($ofa+rDBy#yLs#%B1=rgx1a>)B`0Rihy=p5RV{El+3}@D zZ3>V~{ZqrLLBSTmZgOIGYa0553%CcdO$jdTDs77hRDKIYcDye8KZA@5@Ms1l!p7{va6dKI35>+*udfNT!W z$T-GuII27JC%{#{z-XvOCH}aYR&IJ3H54{if9*F~Mk{Ct=7ecl6o?HzsOsw;`{`!P zLMDf&BvjzI68OMb!b{^BO&j@Ko`~gTe2fu7DNaSOfp`piFL*Qjgi-GRl^z()z~BtX z2qwh~K#j3eP|S?MsK!f|I1Af>wf1`*&mu%y>|Xt?1}1OXrlU*+!v;zeJ;$g6G{RuU z)7%Ha*O8+}=7Hx}pnS}8t)lUUxUmP4utBFL#Bu}3u(&iFMW)HEZmtM5araV}<8IDW z_-Gb~$h*7j*E?|^<}Vy4OmU%y;Y8Vse#2}MpAGz*eqg{`B6?M6S)K|y8R$)G*7|A zpVsDR>;IUNZaw!X49VtMR2UhP)W>T@8)MX(^R#J=jJdmAG z23S}R+YXBy9O~lG5nFL>c~tjiJQVlwJL9Uf_BFV1Gi(be0vEdwQS=o*Y9+AXmKFu? zPGE_l{eCV}TLY%Pr<84gDFTqg=B^DlW3^Ck^WxkZzESLDrA2xc>KqG5aDhE9hr&6_ z7i9QS$8$Ds@HgZ!Sl+!7h*aa%91Jy}1JYZ*z~TR`CMSLCBf!~teD-t6j5i8k2Rbza z*jK6||3UQLNXvAi550oW)7Ih@s~8+amzLfqf6( zAXCaY$K&vp6R}~t3LIqaI5I32pT!Em#Bt@s0LXCxuU4Aka~X)9m&PNs!p^OgxbjW z7T8J{BzDFopjD8H6CIs2ZD4-%FiTb6XA2I5T#(_qNqwkdbdA&Ny z)N86sqdH4Kcr5t-UCq=+;n#n2 zno)nAe^~`iop-BMNOwAMmcimvXV z|NS>po~g=%IvQy!qe5^Elu7A7>+z1#&p#(1y{+LO;RK>IjfR z3y?8Jtpqf~(F779CFC&}ipn9hM?@0QCQU?B(00)H$Io=s{z0pJ96>))UBuv!IR2vo zib$gn6GCPI@lN^oVsylz5P{Wjli~jt??NwuVpbjRQS&WS#))$F!)?eX$>$Y8EvB{6 z??2H%UMZDB3h@D{)hf!KYw+@Mp*98`m(Z;=#C3SBxCFLxqghvokQ6bj*-&*k zni+|h;J;aE2;F&|Q4JWUgoX&hA0%CqvIR==bddtowd*ndDO5?4)Gm2T7w9t=4RBE5201~Btiyc{pf1c9RgHNaRyQc z^lXNv(0)|&sp=N=uh}J?H;#`g8}^?NydX)iah+i2Iz_?CwSs(tY=UfJ8YH&k#~LJMXv;_+kZ%54^X~}J!%(-^e=fv7w+B59aVexBs2&s1 z!2c}z|9T~Pd6Dn`w@siEM?&%s@sT}3jcL(0|M$ZPFcAJAlaI*0He&DpmT4fXhE5a_ zdNilxKi=8@EJI6>t|Mmle=b9FSdi>#A?v07pF{b_b&=!q?}U-a{?9V>P;_v}yF=C$ zv9teqC|ZWl5fxJ+a*Ou-|Lf)C<&md0#5py{NIZXkXYBd5@ZgkB!Y#T z*-lXSOvFF4uD5gjOcn7+`phO4-2RzeJW{-fLn_v_h*K^tsc4O2etQuYy;%GUH?!9D z3y)S;(idL6srD~xfmNcI&t$z@F~7M~a+S5^z6$Q~Nd7A16x{Ju z*fmn3M8qT3tz?6DT5^e~Uw%i)#(-jpQnBD#x6(}^UCE`J&rWreZaL4owM;y6{lPMc z%Tg(2TceqsWs)%_Tg# zDvj3Qzg3#;k&@rFx?*Q-6P8B&t+3 zunKxq8*)geRU2`ucU2qnnM(aMS#R(0(^SMW?dLYJ<6S?^#G|BsnM>XE_+=sYB<+`_ z;_I$oR`jn@HP*~JkDBdT-Dx#D^rpLNY=A0VyVFF_vv!xc^po1%*6Q80wmVIw>+J08 zJ?rfEcs{9fa5~;y=ja+GUBAcUu4nyT?11sWOeOk%EUB-7ZU~Hy(^u?`d?8F_mfZh_ye|{Ojs zTWIK9_tUnprRmird-y3=}eSdEPn|xP{iI7j% zEpwU7uG`ic{atr_;t{3Hd?t8@f^gQrR&+Li!d(+>O z5KtoDn;2a0)B7-_C$sm_*_r;{$LHB-eMyl*zJ1A;WuEn=L~9K6rN(Td^{2%;`1U`! zf9P3%dg6(J{-;S7X#*K)alQkY8R^dko@Kuo7|422LL1D^ulF6yDd>4N_`GmtU@(_p zQy6+tEOdD2WtmLYP+q0R;Lxk;Z3@HrwGM}eUpF4g8h+DyVsQ9v`$dJ3g08s3Bk%gs zvqs(zy%`+&Fjk^4S~yjIc=Y33PuA$CrJ2D|oW!m;Mz9L|jeX{j%^oY_)*Kr9!e^#9 zUcBDXZ~Uu>SN3>`*vX;sQt?ZQ6J=8O{3gofo@P%}D83z<_(m^PoUCLv_)S)6^=40g z*P9)h`~mDrQ`IKIN2Y$7%jQh|veq1)s@Z9#G+k@wcx1Y6k5|rgz0=9z=?2$JN;8cf z_m0doc|XmWY4&?NJkt_Tsx;dg+;C*}cSvu}?4PrG*&EpbZNXyHg{>FQgd`^vf4~#d8*d&=<;-&emO z+4f5+D|21<){;nqtb*-l1qaSr|1(b{0TStj!Ia9D5c#S<+9oUnFD1PFDi;StEfhP6 zhfUxQN+XIOvI8?s|5EfonU#ooP`*d!zz5vSEvsnrzywyZGlA{>%RZd@-S($)*U9i; zA_X>_Sj25Z-2B-Vr*ZFlrXT&7tOV40+CJUILgdKN?Q+>kL_yk(y_$47bbp1HiAe=1zOpZ(#uD<9`0RMvrCVWj#R`0^CC68Kj8n6Rx!vJ}^)-wLFG@?byXa(cnB2h6gF^2hH z-CY(E%tmMxsjZV{8^;eHUPJrnGKhzt;M(4PpOv^auh<&Xx`H1}`(={+gF@UM3FW-i zH-L3hTKp@Gs1R^^+t~MkS$u^rf0<8_0)yNa8wvWOZov6I$$ zh5sRyc;#DVkEL?Jjf$5qxJ=lgygTveRwXhaUFzJR)6#_}Z|CReuVo?Zli%H03Sc9i zvVK|9;Xj5wU&$==y)%F-V9oBg#_4GVGLn}}C8V->UMOVO-3zyh`{ib-nX zOW54XCD-1oEQD$3I^mAx*v53x&xYJKs=)#a|=OV{K@J)0$92RFMtg{aU2S;^c&a%}a8~*g-E>+(p{T{LbE!47)a~(VQuU9A4 zeIpZ&GkZA0v)PGLJIZX1hD~5$ZhXEeDJ_^}P;>t$$udmpf*CCDWrG@_xW$x(SZu&S7WABJh|mQ&?Wr z?@^BBHasas220x0glAtnK`Vd9O1v<9dg%PRG3@irH81qL=CGFnm#{Mm=MQdVNRl9Ecm5r90}8nA;N_zR zPvarJKB5mV4dNY1>yCEElR-k=d-Fzh5@B(X=Z8x%3-P5aqL?~C0tbuu_ND*|2wy$+ z3YsXOK6f+n+kttE?a*bjy(5EIu({6Pm!wtfw%4JB>L6BVn58&NEKvzHv%&rNtE=(bJfjC2*-VXK zf7gKbtlcj^IJSh}6?!zQX}W+nJkuRNwKfmiyRLacBxn)qRl4dpnc9opQLyOx>A(&n zj?oTv(qzKD_j|GKbPpzHpqa8cfCcb~g}{XBK8!El-{+zg8>D;ArEBNRVW~3ZdLP#I zV9Q6ex_{`92;a@3LD*Y1_$_c+zdV2gij_@KGpuF&nSE-Qt|SMsdjME$W83iPr9|$O z^>;C|&*Mh~Sy^DwbEgptg-lEzADojsG>><)hp5WjA`yy7dg&=HL%0(w_mi&WNenC$ zG`kKg;bvETe&<%B_vCZ4U9Yo*^%}^3QaVwBMXL?__U=de&r$tM_V5y>)3aREypKY- z(A?x32bytaV48wp=Qyr3WWlAtxq|V%Cy7sWQy}zy-f+)oOy{DeuPUHZkob9dyo@8RQJK>n@Wh%IS z-CcC`STQ!RamTCP(`tB~SZnVe$~@*>w<%MIgygYerubqD1$HmFncMSs;f-(kE>&=l zA^eQDFs+3|_;9;Q+T0z()njUIaEepm%#UMl-m{fs#KzLsR5%*oXq`C6O6+(G5p2;DcZ^~lSo#dbrO|Obf zbX6#D*L~C^S#}(6`qrJJnae`3YbSCafBXw)k5J%oAH^|?`#c_ZoBOe!-u6+?`q|-< zaNcxS6NwN7qs`vYz4&f3*N7+HtZ*#;mG}pqMXa7PWcJ|}Dx6ikYhrf35i`~r4SiBF zjCUMfJzaK_0;J4B9F|o2@CSA;ac#FBP;UsbJB+m|^gX0+m}X{B4(ICls56P5T_t zdCjH}-%!HhaB-!*%o!56$3A&lxzLN%1wDBZQOO29U+%g~@^BD2V$)AVY3kx6dmXjZy^&PtK z!Fr9Q>S!vA3WT>kILbyC6!ld^Jzc=*FIQDBXU|}1y(30E-zKn2{Vfs^O{+LqcS|Ui z_27RlWWB!o5!s6xTW8TAHsZBSPIGK_6YigL#)w_F122E`;kmHH5|*a$lMxj{hV5+^ z7{^DZaE|b+_b>F%V+oDB^IT<$@yk($r~KB=;4klv*Jv^RVs57z-yhXSxIUPYn-TLJ z-)h2k%A$G+=SXN@7)hnV4q;J&4+oks-@Tp(&g-$k=No4hqdFJxv)QWN9Nq7*6|v^T zsA+cKJixXTtAy;RC(A^6%`7Hcchvd-vg6wnUCIpOzF|jBoKdk!8N;ks#@nvsx8v&S z3U3Z6&f<4JP-K4AEn$u_ewABV7BM6Czl)qw>_mgm;MLzhsNhm>De!m{;Z$?`0}~q# z&^M3H;hLVu@5@K}NgSENy!TWqHWpKfhsvj$H7J|}h&Wp+E*0R-S`VHY`vhWjirQTJXUn1+6N^3J7P+al;>0uUd{#33jd!It=dc~adPhcTx-Gej^ zw3DHpEh=qBg$#d3Xu>(Nt1PECmcIJ^yG)n;9hw3!3kvl+jc!! zdHJjl@tY4qx2KkH9kYzVSsya-X9KO<@7X+lXy!geFpq_>GPC=w^+p>(`jJsCQltu;mFY5O5;uMQleFtIa?)6-3_I_g0zC z;pBMj(|s-d*!2+ZtyhHRFk6etJ-2c?Fz2M-tDN^Iu)*!e2jZn!hy~#R_o}CCK=wQ_ zvO}63@(s@XHF00Slx+8u^qa6kq8G<87p{7Ybga2Y)NvVCR#;-#_0QqH{D-Y9uTkLZ z@RvvV2T8=DLDAJO^|kobjJ}~OvA=OO$EQ3$T}QD6*SGq?e+u!oBlr5lMtiWQ#;HYy zQ6wTlwV6w6EekZ(7Yx`TyZ2J;F5xRq-f(_8mYJuhMzpD}cZ47$Mz z$rQ;5*K888D{0fZhZ+=OCBLLY?)EC?5xgd<{ptwz{J_Y_U)5^N=B4UVxc4&75qDv4 z(Ulqe@vY(WUPT-1qL?Keo>1Op6vct9fsn=n(WMc2% z9kz@WRuC205^21(9=pY=6rI0-aHja@CPj{EY(TxPpKoyqA6`6H-%eh}qaS~1D>S3R zrjnIeSi4f2 zQx|SZKh1w`*9`W;B6ML-RWEM7_XmIAZYt4!SNRHAc@;l?X1QvgLm$3z_K0B}c@TH? zA18fNVk3w%4~%ZBQHi!!C$&n8R&g{SV_)YQP68VffnPnprZ+$X7&PUH;4M_sS{%@CXJ?jHG z6>lf7;OgiBh60kq?Pa7jA1L4(amlUk0tqfy!rCi(WC-y-s=<`+!kHn5^!ky0@Hh*T z>hd1o$DYt5E+E|d{w8?j%9<6tJGkDv$ACo4+zL3y%wq+grj@Re=y|+Rkh(vXqZ5nr z<9&TaogJzgeFX;ODUfGi=&|hu3ve8M=yU1OcYG7Eeopi$EAjQAop*W35_Yidr;=m) zI5wgtZ`gaS6L;gu&Pm$K4x6s$B!(gWtG1Kc?DC63V7k}a<~oP4*Dp`4R|#YzL|P;! zix96_E12kAXUq!QhJ8kCcgt+S8Hv-I6y!*8suG%|*g={6sE*OvFy>;!@ zv1BrlzWl!5`RH%F;QrkcJ$KM~TBXx%;`>zhvJlQ=U(;J>DIk0FocW(P7I^pk+U~V5 zjr9p{F*VviC4@9m=~BT{7+s$>UxWOFw>>MRi*Dms)9q1W$z>7i)8aSR`^ZkTZDLk8%3ty_|lU4k3(`J*~2iak_ype53Ko8dNiCtSs0QmT8OLna{ab*}GyVIEvuE8_tkJe`BwBt2v9cK2a zvOsO(_lp`b!`PwgYR})CSi$9V&M3HE?#HK<#k0u)D|m#o!Lvg=oJ4=q%-Px?q}Piz z%R&_s*e1icn3=odcU%JNl+G zq)Bi_@G$PzwuB`<*GRlFxq@ep?XUY@x`3B4?rq^cxr&(z>`NFsIf0#cCe|6efdW!{ zc?%P3y0FGyoY~aRQ`mU?W9QvpCa}N*LQ&0MdNKZ1gZ)pU2XMVK+9IY+CZ02$2an!i z1K$UqNp}!F1(;|vb)RcEj$Ra0PF6FXPP?J+#-oY6~L-Dzyy5zjY^ zj`O%W+sE^1$i5ASmp@Z#U?iA&rdF&(O!ZRJulgZumt?Pddt@(;`PUUJ-I&L$u5z>KWl)KL zq9?Cqt2yBM+47qQOj@wBpB^mw5lh&lcG$_uT^-m&DG7UCu#8>aow_0!uz+Wq{3tjz zNP%0qcJC~vKj2PU1}dyyJMkr7_Pq>FPS7*2`gP>&3U=LSw!}N072;@EoMHMq{B%a~ zucAFP8xa3|ybN2%E(~JM`;IPZSeN22FAIg_eo4nP zZdgr_qo*-(o=^!ggyq?}Q><{1nKbFNrwaZ5GJoG=nb( zcvNdeQ(*q&!}A|K?%*rOlBFF|may6qDf`9waonffqA)IM75{VBGGu!)8!`9J>h+sb zBtp<|Aj$GZ40hDAxv;=}t5u6)d+5O2KC zeF*x=O3*mjwo}~7FlfC#uoqM$iVche&g)wvJ%6^R!gD5{1gvkW zib%aIaJPYXjq}A%`0ZM9yyiCwcujRFWp3`n0$&?!-Gou#o#yj1xm&65;D;-%pyxHN z-)v}obD!OdsB#Yo6<3HI5Cxay#jC zd;rVpak5f(WdRz6^M&QU1x%juqkp^6A3V%*T-dU95EtF1bSO7v8FMmnJmALc!IyGm z4Gy9B4O{%=!?Vl@EI~ueIaq*&Shrj_aQPbtY>;!gEA^`z-!(M@RdGF7u+sxa_3!2Q z6kk|`?J_IziT?I_$g@e@^S6w3vU@Ln*|}RNVVuAg^DgtLp}3&3j+OY~S`Mh#&_EL1 zK?bs1cgv2pX{=sGYWRA^3jU$Sp7o=56Mn;^;aTj+OZ;7c=dWFs3pfe?W~J8Ii6;op zzYDy~LD-0ei{or9EPKdP1 zN|taC=95Chx64=XZ@Vn7mL|5~kGl7il)DaNmK}pe`#Y&b9>$-2aTdkl%59!8zEI&n zbN8l-rA0iuwWy56X&CFN{e8IhQ#t;`Xn@&EoxzsRf`JP57Yn-c+ag2tFJ_@s@`<{G z3hJY$ya0xrZZe7O-`r^Lv@nLfU|Q70A9;a$ zw9-4X0!Tzcj97FAl?uxvx1y(hQ=q`u5OgNlA$c`jaEI{({%9pYaAP`!nE06Xrj4?K zRsK-#4&vuPlb^@czgX#gG2g1{ReU{?^!_$zxJ>+YZc%6MOW=&F)NXE<*Qhx(=;z^(_U`lFSXPP+X^;C$;(AUlv&X zB7UBqe+9dhvTQ^hX>OGJGkIH#t}^j?D$^K3x}3gZC`;?9M{=&LhIe zx^6ccRJ?OeetVZ4O7?apVkl1|v+nZNhF>#S)wQdIb>CBPnYwKm_iYw2wRb0tBIqnc zlFZQFO->{t)#2FH({+pZJ*vFy*U?(My)7&9^4>YT^b^wV_9T({7YkJMJ@d2jxNAjD9k{#BmV* zVMT%m;yB>Fji1E6D3HO{T{KK`j2|w3T8$1`z z278Ro*vdZ}$99%S+r-(hK#?lRKJ!!>mj1Uw{i0_t-oS9$9Irl$Pdt2i%B!3me6ElB z1s(0jeP{Ld&P-83u5{qFRZAnTx-+d^RQDGKmUpZ-pJ0W;VcPAGkX4LkF!H6;g9@S3 zRcAi1OyZI1&y?(CP+t1~S>z?c@xPOBA{_tQtq>x~{lEO{f6I>kTXyu{vZMc&9sRfL zXfjbK;psX>Fp)Sroirv7iVNp6C6%=x{nPF$t1@XYs#^glwuM8AA8|)29R#kGuD5yu zaHCB%+hGW6I7CcxF%hP2RTO&FDNiuhD13t~D>dKV34A!BWh zoqVDe3?7k*5|uKCSpCh5{y8e}`p5Y#QK3q(Qfay24owcuWVWo{I;9K0WS!Ukkud~A ziEHIy;hy2SDO>}NpY2NNyCe(KVmmkWKhuCm{ZH=8GF0gc68QTT|@cm&)yEfGHy_8NmrwR$=0(HY! zfL&4iibY<^;J9_F+U%GDL@Td#Kf8?vF&FK&_>`)_xyDw*r^0mLp*w>uL9Q(|9)wBUlr)pB^g3Pn&992E%S>J4F*%y zqHb3(pu};jU>uhgEUewjSm&kzTjMv#trRPPVb$ZC20AprUvb*!{$)bi{-Kq!P-&3v zroG*ekL01^zyVxQ15%2w)A4O`khJnrYz)bLO8A+zbV41f(}^opWeljOyVMrxs|XkU zFI9~us)88Hiox$z6bFMfWSYd7P3xsi1J)ICsS_>$M%e$`nHn>wR=I#)d1ONUk78T-;| zd59T#cQI5+ALbZIQKsm-ZP_BF)V`Sp;+}VYZal96tOgEjVLOz-cQZxqSd28BopM@c z8BqX{y`+!PrmA3HbJJ)pg$|<^1xLm&qCb_5KhxeSfU-T?hAJgZ==I|HUPxm?=b4{w zYVVjJ6)hi?@1X(Xz4BZ`t8}|{qk<~b zq^opvw#dVY=C--_Yh)lSKRB7+Spilberf&@q5wLHo~?{0s?h%6i|cBN7I0OX(r6<} z(Cn&o0XECQ;Dw}0wY5@UZv2-*DAK`Y&&C{qG&-CL)snlGsQ}!sDc4zR7?auJwP;FrOl+SqbM-J+_m__IQ(jZ*r^BW1e z24sb7wLknw8)TMUe3xu#@aJwTM|mj?q%(!xe#pbvj&*?zob}ZUzq6iW1gmaBNl)$N5p8V&N3iNQC$(7Di1f$o-U2e{4L$RMj zCP%Rr2!1yU%i69AyctHuTOY}RnatI1J?rG4e9PR_%p+BZ^yRl=>MH`B>!R?xYAxUv zzy!^v zqyb7E3plLM;OKCua#05zK8TU#-L5-;N^hHaj)FE&D*2Tyvlx&+(y@>87+R;0qZCF_ zfe}~UK_3l$ux*by8yuwyrQT%U%j@Lfjh*fd7JpT6{iLU1ny(D}>nHcmb7@0v`g&?I zi2;v##&0(wyUd?;KAaAM8XT6q9-FmA2|QfpbXzazLf?sOtE`X6-f;W2 z6ntmGx5dY5QQ36(tbHp^Jy-|Ce;l%Z9;X5mVcr8H2}&S*?FRAif*L#_hjMSWH3I_~ zv!^NxIxx{5FZ5$T7p4+FNJrDQfzyv&8P%%{(0rB3QI45|6+3V3=Ll7>d_Bn(ufv4E znD|H*Jp~Zt*AWwlR0Wln$JG3HD1c!;Kld4N04bG+H$BQ|;LG}+tL(lOL~|z0`(BrW z0`3M?lV5bOxN@)K;7&PcDA5oMe`N^#VpmK9PAEblfh)*wFaVaVdvonmN-+koR#pdT~rsxYL;+t2kt1teV}c_d!ap}#sS%$MW@>tKal#Yh)souq8pjsQH_uu${m zp(-30C5dkwbtb7|WAF%7=iE)4xPR)r+>rG9Z?432AUK2^W034uH7z0V^#ds6oy zzUHwCD0RHByf~~1l+V^D*Nn@;%iAZnT=12HyKgmzL{1t&Wzsz3uD=}gv!-84R7G~t z-ewo+tvYNl7`RoSEf1G&;ey{o)gichk@K+~;vK*4o03i|z?vfro<=JMIHy(AnKjG9 zTFHUCbBLcMD;l_4WYR!&a>p+r2PSYCw+M;cQ-hzX4^IupFkrB2++&!c4|#iRPP1HN z!22uTZ}>b%_Ptoq*u0z${5Ir`p%hh^*Yn%t(xU<3Vmx%NmIm9(#6_}STSA!FJ+IAd z2=8>V1ztX|00S!jEB7|o=|w=C0u*u9*IpJ2jJMSy%6NfEe9-_~d2ioncw(>I~% z!gFfDUe{0s@OO+ZT982YDWABW(7=R0pFTS4l_6c_ZKQj622$XlwK!)4KTz!z*PdUQ^|B@|P;OjQON(L;9R2;8Qj?D-WUfN1TgBnGkgh zD{QV}!ky5d=MOzq!D`!%gilol;J1-+YEjA#x|a-6-%hB)cK*e>z)l9VKfTfuyWax7 z=GrHo|D_BDS81fH1{fTCuU@v1sSc*47n!1!+ac?~qkyzpBj^o3l@u1N3Ho7tFB=Zg zKq1y@JT06C@2_7s2z1v0>Slij5p_Axs_OM@Qr3oYg`M_Yc{F(H!;=u&q5`MAVkvwZ znIIWpp~B~{1TVL0hd*9q!tEkOeDS9`G$hy(_w?0aYvnU-4r@KIS24R~b3q%PPFX+L z@>U*rx(r%fv=!j!fzZvCzm%ZJM?=Lo0A(vWt2-8zrSoe)&)p-V|s0bz$f32^hBFoZL4+#W7rG|2e*oa`;h zfTk}NZg&HazTRK&IR9J?*fu;4KPJP379FEz=}cXyomzWe@~aY@slQRZ>6Q$5G29F# zW!2zojP-@45afp$i>UZ)(tr&Gw@wR$D#E3w0-O$W#-Qvj*^6ybfM>jY4s%l&4Ewrl zY8WsCf^FHarbY%BoO}0$9!K`AWAF;=202ixanJX3(}VXse~- zEwxnNkOR&Q%9*jI>d^T@-n-&Al5f_>)1(DC7-zdO?(+kkzkl2HKS-~7?)b$T3evzX zNnm#LECT`tHJB@{+aN;spxw~CJmd}VDQ;jWz`)k?b-PG(IM50|Vvv1m^ZJ^7bAuA> zH?hj8zCi=t+@T~68yei45*^B6QHCvR>hF0Ul!w#pL0tYVH2B)rnJPN1497bqnhqd* zUF?(5=nhf@@eh*80*Dt_UMF9xLVjt{Sw)>|PVUp{(ezPMJf)i6cU3-zA$lIc0h|XoUNkK=82(tx{8P&Oa= zOaAq`#l>NCxD!9yaF2??kwDF^6@&{$Gxc4IO3 zvKma}JQ$cLH-Id|miW+dU67gBSC=Y*LA{Xp)tmBiNRFje{f+6+cZcPv#egELedocm zn1td3m0gETxC}sZ#Nm*(whkzK%W35Qs|VFrUB87)=s?4du0%^GfT~WffeM-`lsF&X z_QnYLqn+A5C++}v4!#c&FOx<3Sll`EWH;2@8;uM@a(bn)>s{tgU8sy_NPW}Z21ifc z`Iz-f2bJxJ{v;RZ!_!Tvhp*eK!RuW_${R@y$X0WxPkx~VuY0QT08=#}r3Yup&PhY} zkzAoIL@AJ3w}(nS;h zQj89Vt2hN8Iv7FW?Bx1yo`ygfuc$p(K?e=V^8w@=h>z`{q|Drw1F!Oj)6&1qq3`kP z&qq#(7i_!NFo<}4*++?h@U;j>YTniEiqnA`amjv?DDJ_vpzn1V@m_;%;kP;Jc7nu- zyj;#t3=ptZE8Y;U4r-DY+q*Vsg1yf&+uUgee4`V$kCe*8qPt$7PZ9$*O+}tGu2Tf| z7-xr&2J;#9NG=Lk4-$%G*pA*W>-FBzB2;#(^I-vYZ*}U$lWX`LHf7_zPfV+P>zhzPAkZN@9X(*u!{TGAVCXyAP^KwgoYM237FWU$0uCsz? zXPYW~ztEs1Nd2m=gXN@s%g-_QHtKY36wVG&)fRD$~{@)9qSlpwdSO*|Z(Ly3*~ zMQ4O#fr2r0hZ)MC!qIRydb29{DRCsZw5x-a$A?E(Yc$}!yH0>~u{!WMHR|s`@kCFv z&T!=wLwKDrD(#==0G=Z}%?|U5ptzSks^3)&ZlCycOf^Iej9%^k@jOxs4%XR9godaC zO)$Z>k4pjWDfmV?a;bu#{=#tk!5yH?Dxd~U3J}ZKxccTY6WBJpHgdkxhmC1r^{)|r zNdNlyu0#*<*)^3OVTfn!ktOY1%O?je!&e+W6w#pmF8&~Cs~Vi~Di|0+`qa$G=VNP; z18;2Ut2c@dyA{z-8=qEz*5@{noEua@BjT7|8Pd1HQhMmqGujXm&SiOKiw+oG|4ckv zqXvdqzEARA>7bcM8#kZYjQAPnvxxywHMk#8`}NaL8qEG_j3ST8!PLa0*um3un2O45 z;(KcfmM&kh_)l^WQhT#`A9~J42adEHYYnJAKWK|vm;kx@W9G3}iqJ23J#5%U9=KGz zaOcNb;P+s1Tt$Pxz|BrimTc86^~-IzFAY5~B|XJ;uG|H)%q%jC3xSmj*OX zh7>a%%R$ZyF5P1UO#5rvrD-|2pq~NFVHGVoW7E4WP@1{-asc01j0{`HQ3a zz}%ZKL#j&xR!5Ni{c7BLyz7B zs=;SxpJNqqNKX$XzIj}21rIL|@>lw+L-N?W%+IHhzdRwI${ME%JxY4N8I9U-_GA;8fUla9YLqPLI&MWrlLdfG&mZ=x@p_shR=Ap6%aD=9&=%y>c?bJ+nVTL@Mvdfg! zGEjw9WxJrj0v*uGc%)i&8}T=vLR#;P92B3~&7XQt6Fgl$2bD(BVGUKYEpLSh-O>g_ zJVJUXQSv^@@~Il!$osLca-#wmNE&15x;hZH9?PqHVFH!o*?gH3@^HlI=jE-R6+z1W z!qmJuz-%1b&FV5O;Lojb(&k3-9hD$XuLenQp~>-GtyhBzq37558}*@SF-e zwTyXZm-8Mxc-uqR-ZQ}@P7M&Ne|@!1TLsh(DUL<|Qw6KHB%UEd%#%YJ+w8P5$T^aO zwZiqFU$L`Gs@f3L9ditx$ml}a2|3*GNFAP3+GaI$YXMzg>EIV>1E{(XG=IQM531>Z zH$QjRfvO&*xLy{l!^A}s?g{C^(aJc#0?dc9@8do7KR)rnt?0kf%Q|qCu6CUqs0<6I zJS70aLro@#AdC9bH0FORONJQyY1o}s1)uw_w4E%NN2b~zA+KRv zKe2TEB&RB@hHUGgXd8lXxtsUuDMN5G|J^9_+Yr7Sp|j~#YeIL`h5!erI&6{9zGzD_ zhY20Ixu=u(H^$Sp=g5A(nPd2#?g7SZ^vR44S~+9 zHI);u3kEHZX4x2sAQ)aQ5XztmGgmC`7@x6*OKCq*(y z-%g)=XaX5=Z@1sYyl;qiVy6$EDQp~GIzaTr(q-`U!!TqBO?2vfi-b1xo7pA?%IL#4 zF5_0C@0h3AJKDZkP8&kvpT0dUi0hvMS-wrfO#Y4JaU2rTcei!&Ad0*4hCz z@Z|TS7hoK{=s#JfC#M69zYpkNdZ-4SEJqhBS=E8(WqhvOL(G@UpRw(8)q#P4P1aF@ z+aTJS$J+fbK`{&KN*)ssgPoR(EL<*S)OTIiJn$FdFJ{bu65 z>f0pn%0J-7(BKLNJD!#&n{9@tQhQ2h{+2M1Gpiq8rwpg%-Fe7A%wcr2)0(4F3l81x zQ)(~W29t$ooDausa$@39+*S|V-+L}`ZC8U*T{CL}SqoBC&INJ$szRXyDQ{Xv138n2x?&HO z>_pu^Ur7*DTi7uitO~U~&w9l*)FC}=T!Pn24Z5lhCpxtt2-q#AYyV3X(o9-2M}5`d zTIEu`;eZMp^*t0(wWI@aC$>%DUgZ!+EH0H-1Srt7ElBw13aZ^h`#2vNLa$c8FA?*7 zX7)vP)#IAb${LYPsKvU%|DWyX|43YMj{k#O|Fa$azwiEMJNnOd^q=kMKikoNwj=4^ z`3kikmB1t^qgKU29vVI+mL4V&Az!=DtJ#AHaS@*)ybq~@nMt*^7Ox6C8j}1dJ0kN{UchQH(#ZkwV&DFulTYRf8wp&ohVPhR_#F4uAA< zTG6`h8DNcNCNI13)0eQU;ovNxprru2nH4U%<|;snt%$RfwX3^?~yW;A)gV|F4V)MuQSz8U4n9k3!CU7Ey)kCQ%;u zv5iJVIpfT`+c?dKwD47ED1mIjk#U!05-4WKc8I$vK-(+%IcY;BIQ_Tru79jLJbpvc z7BSU;Zx2^A&sypLXTBbX6t?YrOnAKMaIgey_3OBqjcG}Sm(TZ{vCs$C6Q&d784b8v zBSf)OQ-Zb?^H+_WB&hQ6FL{jd`L1(s6~E&9dMg&=_zl~HPOG`aCB>0IX3+Rbt*$ya z*w>osiW`7h#4agkMqFkTReZRkrVLURDpB!X`19r~IwWEVnAVxfnHA6lU2fWimQrQt z-_uY3S*{4l_w)mP-FAjqXJ)Uo?b@JderYccI}x0}iNoS8^`lfoYT1Q46vhwV=TKv_5__L>a!!X_DXUM*X-$4mZn%?VtJf{egvmdqO zu-sMEA#N;qOBH^o1vsx2s=(~XUg-f$uggB^<`^>2fZ^YKX3ELZAk0~M!u5$M{4~uM z_Bf>sMH&>s%6B!`?Ad7*#UchXocwnjnTTMks#sPk7mLv|gn&u5 zNQnh`knM9kGqtV=95dhci@Z~Xz^}mtX$(q`edTySI86hFdRoeQoJnx8XXf!+IaPQv zv3;&dVl$*rtNcdsYbUUi4#jq+XV+FZqFZHQp}$iM+t6U$cxC)k_Wz_3$CW0F`dMB(Pmjg5fUUKwka!Wz@8_SW2O(q zfqI{RK}K91mi58S-T~it|A0#Xma!g8SxDMd8UaK7qy62pVi3R^!f3cj73^;@?w`of z0ZI2$N7a~g;VmiSq|qr7$YpALH>62oJ5+w`D3)_Yv~F(Lz9Rtpt)kHn$0WdqC0Tbw zNe^D#2>ZSl+byil%=NlFSAa?3RlA`3O7K1H_Mrm>I$-4dd!)@x4$9Sd<1akL*Gn+y zXZFTrjry@W*KL$B-TK6^))U|7zw@@c>oGjMm$Bt|f&$oX`x8+nsR$PgTBAus1^8Jr z&dfcb0i@G%GxgK>`W!*!uOD4xDMTuzhh+7LKnMy)d#NVS7%XV$7s6s7)R^oq~vflB=Gz5p<#L z_&cE{Y+L9(MyeU}Qvw6JIcM89HQ1SE95M6_+Zes6{56Y2C_b-CeZL|Dd2V|@PKGLg z$>bERno|+B=Y39H>`;czj&L6L2?99q1VpGlP=_6xj0-ccEhXiel3cQqB7E`taIs*5 z2-61(57pf!z;IIB0YfYs`H|0Q7m^4N6>vAR{Qwa{Jvh0lM2PUNu66#OfHpAg&*K|t z#rC8NuAZjRn5IlU`Q@3UDYWJXRQzF7feWk864p+c!Us=b!O3p~Ec1;x?@U*M7f!BP zFG?`Xmg<$3xr}MeW`nA<4jt&5Q`Kq8VH_bD9T>)ZF z*jgH8sDllE!ENmrHJBNVb6W_LgX!|CFHc%)!Drgu58UBmP{5;RIFhagRkyAWAKa-3 zC3>z2Y-`Fea;qZL)lmuj{qM0Hn$`iu9}Wjk)hPhaQ6;sRR|MEv7bsgbO9F{KWcQZ^ zny}*-HG0rb8Fq7>J0yaCpBSEW{)2)fK;k9YRhzZp;q2&QgAx9{?Rk1j2#O$YZNYM| zOc|bz_>2EdkcT&s{`?8J?2-PF_3KxfGB9^G3ih!p!r3ID-=VvV;3cKU*EEI%`wDlQ z6SCI^@%_`ZZF>mN!NFwtql*Av^0c>}4!|~0d4s=WF zJXau*Kt+3yfqg{*K81K%bMza6ciI6@UkPQH-ZC>}lBNO@3~r_*iXqHS-b_4rod|l{ z^b0$3lyTQ4wi2lcOpAO9xRHBM2TYt!Xh@_I;jfZ5*Jy+exN0AX_fJ!VpC)IuV{txx z@wxEtaeW1BSIyF6exMDxwyMN1F?Dc%QZYrQDMOiVtV@x-4qW&obYjPp7PRjBUE_70 z1ZTCcWrUcaa^nIIp{Nh zY13Tg^1Cq!N|3bbeepw;0#xZ*A1jDZ1tLpI0YA1`Jw6<~;2~xJt*33bkt21%$h28P zJw^py8Q&Q=u}2l&e0DiCh1avcF}JDQaRNNd@Km*r(1j$!l*Z~nA|z`*{La6E>5-r^ zUoWHv9-EiuOMWRqr?imWONs&rl)8>o;p^UcZ}U|*F-&vuUXRq*SAn#wi{f!CB>2ca zW!3UV2f7)Gn4)&*!;NeYrol7VmiqYEj;9OCP)D%lXO|{G<}uZT>O~T44(Yxxnk)q@ zh3VrvC?tse`{VFo6Ga%J`)6(A(SqJ6B~!(0MYvibFZzyI9)4_DdEsY31p7ng$Ijt= zZ!D;OcKs{?K6^@%6;iN0ZQ4e8d$cM{iDeBacWOhiwobwm3nJ{=DK`8nM+q!1$NkmZ z&;n(y_}Q5|s_@lwzR3%eLDOn}Xco(VzopOSbw4A(<8;#FBiR}-G1h)50Mng&VviU6 zOj7`>6HW}lshU7E5l;}=rvt~HXit9MhUHi-v)`!|2C#PjtZ^dbl}ciy-oWD4B^s+ zckwbiRpFM2S)0TVmL0wXw*(9+!%ZPI$>sfeuyHC*=g4RLxe4Ejd&?Cdq=6}kElUB0 z#-55CY{htQi-xOekv#P7-O2A*q6onU<_Dd)NsvBWU%R0#51SreFxe?44@@GT7RGUY z>tOP2u#zPLt4_t6PdING-4M!Buuy>eel|lDLuzo{ul>*dR5>_s+}9#Vo&YaGM4i}Q zYeT?1ugu|IW#DgnaT-;rgGP{4#0;h%e&zJEN5rYXSElt{)^T{9f8PFp_LTtqm#o)r z2jO_~$ANI2OBGlL8Le*T;OBhCiI)M}j`^*anE%czL3dA+uXni_ywu=j*IU#E@5~8_ z88#F6_%xP2Ye|GZ`;JwnjVZx{)idRF*jBE(N*ZFRB*JL{XcOmF0?p5UJEO>Y;F@3` zB|1%neUkj=n@Gw)DH*$a8{3F`*jT$fgA`%8!6Iz+7rw6jrXOAENswwsYo~sf2c-yu z3zCsyFf11QRvXhvKUhoecw!q@S~1t?sIU&)=pM-TIi(5`)Hkn3U#UZ;lm0^vyziYW ziyw|m8iS+F&j$`4u$H>YQux)FI4y^4?wQQflam_z5qu&MF ztD^SwK4VmZqhCdO9`7f?qROGG7PA`Q@FZDY5zD}Qa?WK9>M}4YnRWlRvNCYFTzcqG zs{zTPU55AZzENQl5WUe!0JcysUF#E8z|xT#W7DMsm!0cg_6tctw2t(?f7llB^(p(u zq(LIgfA|oeRe){PnTI<%qjiB)vgkkEfNi7lhAS@(m4Rr@Y<`kK1~ONvB`g?Eey#U7 z`7VG2y?QR&&qQOn!%<~#wwE%nT@^ps@=_6I8_SHu@b69ec*jV;OC65fAJi&a)`ZX$ ze@5{p5=6Uki0O1GLRL`0mGJYjpnke!>TZh;%rprkoP17#yJ>aW`^)5^v-F12sc==; zS@z9hZ9*RGdef#TCF(HCnxZmSMFcv1Y38~_%Blf5@=tui1T{d|h`sZ*ks6o>@|4&eRD_WN!yh((rGRsHV`P}L z7MS(3nXic|!2ZI$?i%B|P$w6rxb?3YydG*=~DmvI|==ar#kBD(G;t|Qz^ z34QFlm0&e4e$1Is4Y;CG2)z*@j)@CB~-{sug1yPZaaLOJX3r`R@c zc$MMl#oy9Ut7FtQi1VUB(EaGq^NMhC?TXf6BBqUBGl|SdE5LbyT$v9F+Aw3Jw`T*} z7IfYyjNfx4K+Egxt0m8|-7VRcVu1JQ=#}xOY)*#2Y7(?}PmCfwcFBAg`4V4eh-m)f zK@v1|vpi|l*Mom&eD#m%5`c3{(J|8kJ-GAPk8KhEU1Z?VH#%OII;A^(HmNJam8UaO z=?9eH?eK^3-|je$ee+&@-zW_#>hkZ-?N@;!Q-zd2>pE~CDbOJP3kiZFBQ8DkR|HY> z8+UZ(G{I`R@%A8DEE_)6}qMq9R;hFlM4)d&^<1+60?H0>o@f$Q#D{H}Rm`(Q`@i zu$OyaG=oVQw9M42guZIPdD2i`LmCO#@7nG>hmYg@v7Msi=_-Nv^Tb!3^ZM|;-nDJ} zemQugRBV-FrwEUj##$_}%|9{xXSTjD&Ue4c1-D|FebbeqNrI0w{NbGuSMMW0SN)M! zS2I+goA2A2#i}Z7^hb2qgsJAyJwgxK!QTnk5vza<@X3D(3t*QiB$HG-E?NfU=#1Yn=KfV8|_M+I2t; z28X@PbAD)ocv==Ml3bZ-&F$6>`gq$igKW>wX@<*6cOSLyoW0>-@((UPnX2? z0zHcDT-vxiuy-^&m=aWAq_lnZOaPX#JD(lp|0Iu(b9j-NPAGv#t=U6eTzA&=S}m17 z*8=mnM%kE98`xGly)2B66_~!zcg?bBAT+a4*W!Oxvsk#g3Q83Tvd`us~<0`4rC6M(3N^tyJLG z!KjklbOJ0fb}A^U6Ja~6ga7sG zg33>8cM_Z=!SwYr>cb0IX1r&f=UAZux2Miq>u(}}P_{{YmW~YYTMi!J#C8FJrVeAj zG&T6FP{K603*+^Sh;vWyv5egKY3`q7MOc4*_`qdNec1K$vB7zd&DhS@-lah%VflCZ z+qP;IV5YyI?awDd)oRi=o^8s&6E&_~hvR~x<*v!=J;vZ|k#A^+Wr;l=Un>+Z;=DFJ zee8gjJgDy|SdL63K*7wNn@=$xMdW%&bHMhm;62Tyum9lZUCtGHCCv;@?fDhCU?d5o zr-T^&Fb!aPRX18_p$_pwHl1{HRcM$A_jrHK0M6EX{5$#* zstm(U+OakHO5o)tSFZ6x9sCKnO@Sg2u1CkUXqgdU{keA{-CG-i4$nl?x8Y-kMDpN|ZXUc}&3uKf zuxmm85w$K=K~-R)zV5hjQ30HOYiu>e@=c(2;C?d;Rrt*;V#L9w2p!1*0=ZcJsgJvN zT5pX6$6nufSluK8#<#7ebou1r!rv5+vqTwS9v5W0h~xJU8CkxG3R#dF9ucKICBX5O zVHSsG9bi5EYb9#8Ds10u>zdlA2)n#2G#@JJ!4C>e^gHI2Qk>j*MHX~n+{~e*^OG|C z_#I7ldVy`97yE>-G+}<}-!acmmLyQpPqr63hmWVVWp2#A(gUyVzKT*?6%cGKx774d z2K}nPc2?L<#pPN>e8ND23vq$rJi#PLjA*>=d0Y{G&-_T7!ga=C#UTlX@lYPTI zpaaR4uWTF-DxY+Q<_Ddv^u!o82~)iEZqc_f&Nj{xS< zSo`JfAZ>lPA9_ju6I}^LbiB+7d(^=v@C8c`em(~|uHGffNx}$kQOAo}1IT~tM7{J< z7f#!z?9|73NUlU8W$z*a&C8nYznKW|&9Qp(h@S%7`la`qrmPPwcQ&2$jV8f**22Yl zcbvy_#%{;$Qid0w&V9GUGNOcS!nuczhWHqWL$46#U0egQ-aq)J4YG#C5mXSdNe83W+xEk3L;ECG|hPF<WzO_jIXCB@HsTtC?yJR(CLVRpIT}Tc*?LMVU z$18#d@1ymTcT|A8%!2kvRvCBSEI!?V^Tx%hA0C?6b~)0kS0PlW2_>s5JIAR8a9<$3 zPp(DEuKvz=F3H#dR&0RFg$Shb_6=Va zB-`g>2Gt>(dVeM=L+qiGMl3B#aB06vAuCk_nxEHtw=|gow}hHx1m@|=s>u1LB2}SF z(VuREc~RoTf)3>mKGvfCqEW1Y2(B;K{54--e&wD|bA1T5cM{OCI367TI6v3EZX-ar zbl~x~N@~!(w_R}1R2v#LaY-6%R)F%WK1cLI@qHN;bt`j*L1j0GRrPpN}B^+gfOfC4bi8M9VD z{U0BWoA`wi;e5s8!(O=FW#kH3_~oYtbJt4Ook^H4;xQcS{zHIU*BBTWkup4+i@TDD zZF;6!d0V^4__)*D@9bFSdt48N zyWWs+R|4AUzM@-8@*u#C8UJ}RsF*e-aX-R1CI7E#){;8xcDHChqNoOO4S^nq+@vv1 zDBkxMAOE&}k#)us$JJ0{O;Z}SBX+tyGOqa`3zd@#g7+77pv_?iHM1MXXJYDj-jW)G zT+{Qh#Q1Qa@n$rei3afR$jh89P=a*wNm3#{-hYGiM9Tg$*1>N19uvcSlvK4}AFmCz z>CLIX;X5k<*`isiR*5>`6vEcG*;yS9TB_G>Ji*VMmvCY!mjIIQ7NY8(;9#i~986pJAvUXb#;KbzIc}uz;oU-NfiN|?VtMsl^s@^xyZn^|K1*&(cD& zvA($={Jra_jRLs8T;;um^LAfa^tS)mhj$bOy2}tT|0_+{_Qnj`x3emhOtkRlXppR4 z5ecw4$?o6(j+uYB@ioq}Sph=(oVI=m6NB|r$+16HF<%*5xh{(1LYPdhY`3~1NYi?? z_}sKWeI)9{XIwXJ=&Ia|-YX5%Eax9AnD21Bm-pKn9|LOFJsx{ITNJ*#HS=_DQUKQA zEdjIV3E(+7dYQtj3MWe5W6KR*-#k0CK4LrRO^pNSDuoD}-?X%UWtV}^r>v72H{&jJLi;vA$Qg$xJ?ua2boKEma9P31pm)Rf>;;+|Fa$apO`An@qhBn|7=J9 z@4Nrmj{dV9{bxJ+&vx{m?TF8irooa!LqV2-o4qra(0~z#g0dkqnTVz+vu~Q{sw9&-gkM0osOK{Kd!udi?yEzKo=L2ad*>H8w7Xx;P+C;kY!GesoWjCIyKSM#X*B)=|I)aC? z#7EL@Xi<>*a^b^WMe~Srci_hLgB0X_i(_MsZyCKkCf(^5PebJzv{29YR78+Xq!csL z(H>jF%ZYC(Xos^USBUT;I<9!doWMatA@43V_GQi^0h1)=QQS1#*KhAjw=G;CD`nDg zaDsxY*Vt(smAKjAw*0i*$YsFJE}Ap#l@`;PG3`U{pQm6!(#eI)%@XZ{C_m9eHY>;`fkI5X)i7b}J?z za2odZ-rPXaOh&=_Uuei&sXv}Ig@OvT3T=@YGw`rW4NG;dBS{j6d#1xYVk#vQUQ|(0 z$I_8|l-^~O&KcUObZi+-Iq--UvrrJ%p15|=e{{qgU7vLoW`<{W6D0rbFeSDO99qs8{yu z?J^ReJ|qSw2>?0olW^YgMMR9b*!JFv5jq*ld)PS^kW~Hinx;S|h>EN32w+%4lWUG! zBJnVl>CtVH5*tgXrh(yJZ_hGX(qZrV!b?S4FNx+Z3oIieF^CV8qoN-}NzIF1^QbnV zC7@~=ucwultbbSM(R=SZw)cV-P|g0I3)(5naCU%xtsFNEKk5Cbn0J{SI&)9v=SY*$ zq~va!<&#UOb$PDf$Bj9p;(fqT%48MA)mg_sa#=t-L*KqN+P#dj?@Rpgb(}-Xvt7J- zc)j26QY^o+GKa2}G0ySN(UG0}C2o{RMf%-FpZ>ToK~p4;LU{`vO?|LchD<6-{Cv{= zx-k_^gsfeQ6InpQhFd>I_|Z^sWNwH1{slC~bvN-H9tyJ^>}N`&C}@7(#aaA+>%wB} z(*9~H>bdz@_|MxFL_-(6#hd0)tITMtUk7fQY_6MMdxVO7@9$B|u*0tpJo>CWw21EY zI_jmRF$3Q{({K9eOQ^i!eFU2{73p&Awmz6cMFF|4Augpfq;SbKkmy6qAxmH47ZR`lobEYEjEIXm)o-Ui4H4d2Bvpxy0U=gjed8^%`BlW*RsOZ zG*?jt|F)DWg-sBzE7JWA$Hl!bAx8Nh9>%deIH@GDitx-loB57qv?C{RMcIvtT7yO> zUnDcZ363q}v9}q(**`R8o5vDTr9JWOd%y{vzbc%K|qySs>Lo!(cBb5W70 zkF`9LAr+lYmm~~HETWILU%y>TSU~(f_t=-)=h2P$7TWCHHDs&f^o6Z{1!d23pRs;V zLEAqT7N~^L&?`0gVsvy74NXS|^?#$H#sF=xLaTXH*?GkAUG55!oO!)jFPDxIfADQo z99cngyOcs0(kMvSUtd!1!U8HT(i(EbO10y&USQixPAet*$Kp2Yb>Heq=rqOasE|t%>GqMSisGRW8zuf(h-|#ZcDl+1$hWm z8Z_a2xd@f&^n={6b->Tg0xhD$I+@$oKQKZGo2W@Km5gjo1_!1*qoR_iV`Wsl-zyo{ zYJZ3Ig#uemI^6jHW^#k4D-U1W7}Qy;dPU6M=lEm(a@D>rw6xI zsOZY+5Awb%OX&KUI}#7Z=_oAk)&7;s6vQv$AU?Ci2?uifBvo+ysmRDYX?_9^Pgn{a zZxY@>?e8;>EXm?HCDL+2(*@@@ulq5h=cR!EUX)>#%sl$Vvx%+X5)-gErCpCLqN3xI zEj>n-RCH9tn3V+&qf50JhzbnaKthMLPUqdGpy0#w{_oEzC{J7Jv{%OhI#2xe`iuz$ zoiRh!_uXjdYFer2hM+N_j%5iw>8*OyQ#X(HhdLSe56`3I?i{5J z$^u$U@Jx8ZI*(Qe@*0{u>B!#AE%4Y0I-1}f4V_b5#3^RKuxJ=3sB7sxsX9zUdD*$M zDY&VAxcvCG{5m#xXOppA59jME-0R9$-B-}c{9*b0UI;y-LUF`9Zf=>HW8r4|l(Qv@6v9k|x9JXx`>9|TpJ)IHR&rhwQt_N1e z-!0bBy`tp<2My_n(Z--!JarXSgT&jsDGunmXHq_!$pFt}SB-kSF}@yAy|LSw4S-`_ z&t8@Ra@uTN8>{F@ZTa72tv5^%K7J%ZFa+lfgY3w{Yy99df9_J60yo$>lgG6Q!-WwiG> z4hZ1nl2B16qt$CQ8a*$Tkg>qk+d|fJh_;t%{d$6oM5>M$CwG(4@LtbowH*tNrYz4p&><$g> zEF68uMx&#aq>hTMcz;Ls`OwoiSzuG5NM!7FW)OU@_A}xf4V_C?Q}|TC3}^oCH8>eV zM(XvB`nq_X9Tz<(bnx&hV!!`4nk0hP*C|cmi(6=jWApvi-KuyPBK`1=ZQ;yt)%n@& zrf3SH#LtR6*H}SU{B|TIic*o_jg5-S#*1jme#GSN6rSUw;BGGUXc?JQE6c6oA;h8< zt1cOr=TPVki{u^K@jgjhy9{efXi0xE^8y}j5mH$%8`?~T-zhaOojs4%q#yKl{Gp)VM!x*w;WT8D;|C_ib0~}Xr<|uF#+Cn$Tj>YU zk%Y;%rbF2@6gyIp_1=ksct1Ta<*1%R$;0T`iz9TT>Qmg|bCHbtcFklvm{5`I2RFUc z@Ktnaxb&Yz7Yp2S4PU+DOFV^5aa=Vi2;(K@Po)|=Sn<$M zb+xWG4hpJI(U*SCv4|!d&$>TlT18TuKN+n%LuU%b|CZdH zM>Z!4(>F|(5qF*%c`}C?guZf$tkmGTc5l7e<=O>g5F~hSc!7*2E=sv(Yb~K-!ys!v z20Vm`bD=3+6yp)8e{A#5nZc>AoT&MC5&5Y0)3u3YbdD@(IwiY^G!rKdT69y(+C3U_s}*{#?L0~kwe~jUFBy5h(M~Zc zO<6n|=sH6~`m!ZOOJ+2r=g5BP;2;HMI9xqy5zP*kdtz_8yDX#YCCgj8k26Cu#4<6| zal$&AR^&BYXEqC5s~JEPq`_PBBfp#t#F8BrYnCZ!LHW*+&{bTAKFKZ({w@M#msdYX z*l>VH8SCSoJ9FrrQ-OQvJQpxA`Cce)UPbYgKD`87S3A9$D4fe&Md@t|(uVD9P<&Fh ze9Dm-CT$LV=C@l!eOg)_4Qe!$5T1XnESQd1(&-n)@$l#e+45cdTzE*sP5&QX2^;8P zbV6bAW=1G8ZF%=hgaIP2mHds^jPa#4@u9Qo98ylZv~MYH9^>ALjIDV8$h=A&XU6L- zW^`k`1>-aQr2Ssj(o7Jz_wuf;4v^T`V(5%B9R`I_~pIf!QE78%lXU`9b6Da8M#gzR5 zoD9$^^~Bx`^8jTJs8Wsjb0{u3r|?YSGOAczO1>%119^RxUghR2V9MrOt#+4&YXA1l z5vA!!gDr@wB^~1dweg{vhumN&>MC`3gpAhM*sN4vvBS}FJxatB<`+5|>dD6h;mJZ1 z{oUK|O@d%J|v*PpC2ZQ4Nd z&VGM;x2~dC^Y*)tPDi}=PM(zZ$In6g?w;c!WK=ff#~EC}0er1B3_H@6QB!QE$|F4N zvgPGIi9do=^qGU}b&ne6AFPxoDGU5irr`Z2hJ69?dco0@7=ADzG^y$4V*E4xRbJ4G z0gk{cx zmlf1cQqFOiAtSwl#Fx>p@i478H~%MkG?cpgo6-3v^N8Rfdftwcj$U6>jSTgeL&BUN zo5J+w(W|AaUu3?p!kdxH?)Ih&=m2w?aH9tssLLk)?Aaj<4!55*_aibI;pnvbZ9k8$ z%(&%O)@&fZ9eHz)_!!_?U5?VeX(rfJ+{bNXIFFt^Vu^D1Wrf%m=%>#wD#}yZmEm`A z0qJTf`@3(MN2$MQ#l`CL$V)2hPvbRCSeeFSw#)(>i*>=v?`2b^$dSyjiY#!HzxIV)CmaH1zk= zfr~K=6qJ@KCVfv}9`QxgDL%Tkh{9SeZW(qmgI;Ax8nYOVV+Y=GO35=og~UqJZ8K5W zmMNUIz8m9@!Q5s8O-?X)ej}jj+5+0ew$=N#$sEd~igeR_D2UAjw}%N}1{VJ#3(H3s z!KJL}7|S^kSf6a{aB9Fq-Bb7W|D@u2y~gnD_YxYiQo6C{QNI{C$efANPgq92)MV~P z1tyTy=l&j!hvt4}HUE1jn2N-W4^G!4t)X7I=li^GlaYa&+TbA^w-tke!a0A;BZjZ< zj>Zddz~F)JULk@DD2%C0IY?#|waI7-Xr15!AL~HtMn)Pc8f2fYA>%s6|K`Yd%;UYW zw%V2+#12x8|3<=PSm3Im^yv+Wbu?dbgC>b_@fU8Xw2PdK5FzN0?J`eA;Z@nE?Tjg? z=I(4$ZUGZ+1F|Dwom~|EbuleQV0`gf_qs%y!xFNr6FMh!a}kj~Sq}7I+%)rOr;u2{ zI+|2JCm?G{MuOrmJ1sH(-IO+Nd?0~_{FqTct#bh>garp=UZ9>e> zrcaLh4Kl(hyT2`?R%A4*vCG`8nE{3suIXPEC!?_h`>BLORFrnTx%SWJ1=QVrC&2tY z8+e;9Y){QvM6sV+ucj1Xc11VO@6Iy@pmwQ%NdmqPUi+cuVMgd@uMxHATSmw^O?60M z5t*W0M?>!|AVwywj1-||GQ0NRknerEQS+_dKF{^XQ8m z!TW9HX@Dr^Ew*Pija|94ikheT1xI)p;mq$y@zicy|J>w_{fPHdvD%UFlsoK@M-OK^ zrMQlS(A}=yxdmjGukj>ekcM=vSfAD4IIXW%eSKjM3pk$on)>m^D&nxdc>OeW9?8wk zv=;7IM>!d7NuG!egbtn`*ZZ@8SQ#_|rpm~Om2cBw!2pc6cD(61&=G3bcQ*M-_NGoMY>#CK~a39_l1-d}Y6Z++#sHorbvY3<*c4 zFQV6e$w&8Ol994U(A$|4?C>mfZ`6d;JTfiGvZRl&!p&pNi9(5s=(0G>T+d-Ylkd`>@TgM ziGxYQg;5N+tKRez6G=w!ZXU>uX=Vf4wt9nt9~|Jf6m@L0kqJ~MeCU0cH;S|od25cJ z=TwMn-!zUR{*1RytK*@gSLOBg#R$%!niF4oy9s2J7^&uMN?$|u)|)zOH!xq)IW+$b z4+)Y8zu6WrwTkY%+L+^<#(d+c1c8yBO~7|o^x=9K3!FCg{e0MR5y>Ttz97`%{hr{z zBl#IC?mjM@9gx3(xO5T?Hw)5GB&PfLHZPz-iS^HU+nAvJ7mrCD=I2xDic_~MF+yE@ zjA1$E&64f&A7(gk!!`EWinahM3i+J)MF{IAkCyz9Y%m%9xii1{86KML`B%>KA&x(- zHJLV^8+gd1jNC!K&P7CZYuhXDIfo2Nlmey%DTt@wt=VHOE)c%;q$4`PZfgeuIY{T@fzs?0<&q{Ra8m!08HDDd!YLLfzEd@DDg#BL5 zokPv_+KN>YONjb&<)icR9BQ;tu{a^NhL+2Mehs~52Aw=_fjTX$r@e6et8crCrX6WT zyf5+dzFAhNf$^7R^OugEE}XA~bVcc#=&0MXX7`82b)>mvXm}@DK*!`?e~!RBx}a3+ z^ZFVxs-oQ%FLolM^Owr*ZGFcH-o0O=S#Uk8>CM<2l)8@0jG3gbOHdHQF7<;ff@`Sd z`LO;0oQDQ(U%YI&nFZV;Z&_Z(`%y!~`zxCmj_W`bn<=It(l-m;5Dq%3W!HLj@FD}; z&th-w`AR{4A57d->K0JniLi2l{T%Yz_H!fOgBheL1$PoCWHey5wq?68D?BHSeF{ya zp}1$$Qu0`ra@45Xbl-rCS{Yc+H-yrVSFdk+YZWU9ym??EiTS29e|0Y2$Cy{t9>~qZ z^`KsE{pJ7h0^+}e_WjFX1Qyl4COrZkie{6@Yb44F5t+x1WGHT+nCXfbf!bpLFfOWo(fw^=%M7!z+?|O;pA_^$FrkIF%?n_rA zL{v-`(UUE%YmaK#Aa(KbOmosaB3BMO=GrcxCp0&PV}^h)uW;mukGr?GBniLDzh?n z81G*XW22ZWWYkM>T)t`{31bGmi%)T!yqe^E{~P9y?zts63U*P@L)Pq}A9;+>e$JP0 zeSm^G0uI%_xxoky3=e942{6LS(5|3LZ8AD?hCR#~>k;+FW1+|98DVV;-{jW`nysj+aU!S<9H8w+P`bK7eta z4%?Z$1Q$kl=usQAb&8DK#;it%yXVje5ov+48&nj1=pyfWG#T;4x>TISdR#-uUkRs! zOGtdx`^RCNU%T0>^ZFWSDExKp=Jt;mZ*9KivUqC+7430bu*Ca5`Q-1e{aQ?r@?=G! z-(ela*adnQ)U!eSK>oWIfmEcz_3Yvf^%eC0YdiWs5?7q#|H9J$Y)Aj^yZ_ma{<9tZ zXFK}OcJ!a^C}GMi?+=D=dMn!-WSKFH=8KJ!mLMYvp#>Dg$cWJ;=Srm!8L4EnsoIHR zdLr+!N|n?cx-fOLH~i%?if)+~FViF=ax3{V9iO{poKnXSv8o$Zth7+KY^~9J`}i zDu8AFyDvLaxUrm~(%2}(fa%XYgzQpmzYtEkQ@;h%(!Jju%0nXOP_}!Dv8)go`RqHw z)51nZNsC1~Z1}!{--WULY$YRwD{GAZ*vaU7ZtM7V7BYHaUMw-eO-3Qc+G^5RCQ%Da z8qYMxpX1*=ZmK~>Imho+THx==y}w%b|Fw7K@lbaE{~sb0(LySfgcQc?OxtDOvS)8$ zhQTm~7<;8f`=)(gl=ieM?W9#Il}c&jmP(r~EmFVNHB^f3KHu->{(k=XeLU``%XO}E z&U-oUv(7c$15~PszVy81FOc8PZ}`E@(DTgRnTOwNRTD3gbR`OqduY7YjLWEh%uafU zCFJ)#CK;_D3wu^Oj?L1kCd5m&rDoyvor|?Sym-d7gs#d_=LG0+c;os>6qFBHbRa|f z4z?N3{cpN!)DUN*Kj+&*-iNP?Ui!^}zqYHHXh1G6FmQAOwVGIH+bMoN{C8v65LKR1 zH8I6!+N5Xj@144;p-bIyEcmoh`5^4i(Qum90e*WvcJ`rJ^3ea`c(%)cY9fWX>XGsc z8T0z^8(VMosV4U9|M1lp?dB?*9~6*UOMI?fb1MqvU&P!kJC9=m-|{K*-#XS2S{g+o zwvVkPcxtL2N8>oh?)du)_j*(l{Vut!rp~O!vi^`!4E%L}$Ry_rI^@Tqv~>@lXXmu3 zu1zZSgdj7k^11=^4_FZ%iH{eu-`%6gMphHKE7PXCeyAoUztO+afc%{fL`4|Fe{}xP z;xza{=XgIB(+_esM93Y6A3P6#-Y^Q!4_c)fY#jEE_^M=Q^#I0?&m-Gc_J z;oq?#N1n+cj^7TPolz@pASOvN^N%j5CGsv$P?mw-S^oS24<73e^zt`~N zISnuK82CG^VL;N;DK&(`g_{?~;Mi21wxZk)?L_OoaOx{D`itN4`_?EQb@)xMwMc)s z>CxS@mUu0o_6k=WdcQg;8FUf-@Z6YpKBb7KyOj?z?xKD7DHOT0^r{Ju_1O;fi2F-| zyu*fQr_qf&FPh_;-NSg11nuwDad5Y#*d85TW3uB7%0C^i6EoMWhDhp_+xH~ouQvJ+ zw-wI~M^z1<3_m3GKB5-^{bhY)`MZ?rh`-7@pIfX`O;`~<_2}q_UyMIFD;ri5_&WUB z&9HZOyn^j8lwY%arZWwCj_*c0G-F{k@hSI_T|#*^F{@Oma`7a5Y}q)#>nZduT{&v@ zcpR4wJ9G8SHTb#sLz2Qw_$jZ>se2PXPAFWIlW`jDI^v88S8WiErLtBvg+i}45_tu0 z_+zTd>jLX;(D%)d1Nj(-_VH&o@zCDGy)_M|AwC}H6|>S%pDz2sKzG<1Rgo>Ps$EUY zzG3ruAnH{f5FYCQJw}x>)N+vj;0xal1-LI+Y;slw&pSO$rPoicC3+WkzJ3;Q{qEAJ zd$oA}?ql|Tiy#~)>{)j&1LHx-wX`v73Tuch8ad$+h>Oh2Ig{?<5GQ)}lYfnr; zyU97_9g4v9+G%IidR%+tir=$`;$um_u7hi5Rue-gC#O3=&&}I)K4qXC?mT`yU?TMV zIyjG?fRAg&>pRD-gq>$jnTI%1E+?{br`HlPSLvZ{%c}9(-IAT-;m?H`2hUEqg^wSe zJ}i_hts@HVEL0wgdP5%>&+ArELwvchX2_5oh}#n#WmEKI@oi_tE=+a}QGRpBT?P1O zXNjp#HvF)Efv7SDx6h5|7N7us0lU1$(X1u{ijqD z?8WnJd?9Deim{DT&GGuyjPV_eU{_AhxfAAOz9Ui=7MCdDx!mr<=eebbpG{dZjj%6& zXP~Y*KK59u*;rtUesW+=zk+S(r?w4SZTiCBvawNivbc`Fp>3Q6`H$Z(E+2xpo78>U zr>m&%`hW4-Wyw3^bvT;OUr(Bl|NexcH{qx7@uJUs zn^{30h#s`%c{dyIv2M{@$57Po@yUAqs)3m2S7nr0-p05&;tDHPtCkRLvuJY0IH~c- z@ji1~;&=&#Vl)x?IW`-i@O-U~N8QCW!f&N!lcL|-3#j5G6& z-b&|cqDJRV{kp-`L`JFnV=vfIoOSq4tv&93vzoZ_StnK2K}sD+oCC{n)pa;^WF}<3#+I8^?6_&aryqKUF)IeE{^l% zop_BkRvcr(gB`;{8n10bJ6?Xkzcs@T$3C8m_Q}Hj2aU(%D(+Mh5|8Ee=S;p56FO=GvD=~NT$$I|cjL>%sXAV{@Fy!E06nsacko|bB`cnMwuRMqqFNB`e-f9aSl zh=Xx&&R)8VaZ_#f&gFZTU|n>$R7)G{kPTHeYUDcL?bCfm_V9=H#O;pWV)(o2RNyR> z>oPIk+jP)-;_a&;b{h>47w;b?+{F4}xv>AkKJbHj=S#(T7>5exZDGtu9Qi%&#wx|U ztvg(pJ{|qXOzB7=8oIw9=HAc8nUM(*u9sGp|AvRr}jq-vq?>~%! z-mmOUvR|gw5VPI4@f!Rvt{z{|B^&xHWe!=yMLhc`I=zj?xU=`dZ7(@n%!^NK-^fKZ z5z}N8CmNxA$(VzQYY|r(z1`RLpMiCmL)gN1@Jo}S@wG(kCwPuK_a1=l=&As&Ul^xc7cDUTagPw&)7 z7+S3F;fa1#aX!8_1M%n)JivJ=;$CaPcy;TiHN?5Tb&1xff8To@Wl!j*(UfQN3gg`7 z+F8p4v+z1XmF8kU$a&#yv4h@%7AGFg6PS>jh?8zg|dLcl;g=`=vC=dRMf=c_MRD2fb=yai_jtC(XusZvH`2%9lFAH00U2Y>caG z2Psy~gMZ}vJ7n>XBaZmuyq=$gU z7=iX!98svXoz!1h?nWr$r0WJg)e-F_>XUM%aR|x@2h{}OHQ<@*hvX(ZVjf-GSgZlL zz4{Iu;dH5vX!sDgWa=zHRiz@03Kh2(hX!NMenl!!-VNadAmCF)y>|&eYe9#P0H?w)*hf*~HmRyt#1|9mC3$ak3)|O zTQ+1JgS>Fd85-6p*vH*<7(4;-I(bAVgCNM8G5W{{7x-6ELS07g54$?8oS=kunzLqO zC4u&se@}*5h5E-#5pL~(Yu2UgS3$`CY2)>C;RkJ0Y%$5Mt}uoL8|-%j6ke@-oNDcksX9>%i{&)2OR zd#aAOZ5O*oU$24K@KTbxA{FgtvT%8Vehrafxo)%-%J1bU&z*{K$M%M1Rz(UvUVmd5 zHW}?`efO2z3#@aUDG1o{QJ`I$~CC_o27ZA6^Oi7hE2L zuUA=4yRNnx<3!VwvMkusIagzj9PF39zvj?F>_1K_gp1^Hui&P|@`Rjv+pc$D&<~}e z-8XM@R7!$8``5iJk6}DqWDxM~^TIlUbMu1W%m%Cni`UE_KD>c2{D7|)LGQ(q@+;-= z$KelSdXwu+GxvEjH5WqvB}bBIu;ad<_VElqjAKhTtP-HV+#jiZqzQ2xWQEgx1Bmb?6&Yiuyz zn7otKeu8t2GmQPq;fLORufz;)#CT9q!Y{^prQlUVqX*U%nF^}g>@jX@isq=FM8BYH zsf_-Nc)D$|TEzdW9FNhUcx$D`V(2&RLxIs?)EB4w zxrZ_-Cvd#$N$8thdg-es{JJFf!JG-0H*c#RZ{*^-aBKeH~!r zj(Eu)Q9J?ucpBG#{ocXw13y0_5q_Ra9er1X_N-VyXi)L|XzK9oE3FW(E@4k8;!ni! zypRHNo$P8a$yVC7Hc)h8EujU~1uM+op$2z_zQaV00egOTJP^Mm*H@5~~ zha8dLQS}Q!N#Hx?!(Mz}aBMv01^l}1G+`6+XO-)2?s*XQ3)PDqQGY?Y z#O*Wu_IU2Li9NO9w{9}YOQB!JYrB!%V1MQMgTpN|vH1uM8Xf61G4Aq4IDvdGj` zY>DyP=wW;!>Ma>;5U>>GO?m6M{a|;(Nctmx)y%G`6ZhDXV8cnPrn7u}?Eri~c9vaRHuWKQyE!TBewevG^sLRNVP=z|;olfesu|wYxbC+da+^JVf z%yZs6anADhgwDQGhFti6^?Fk)svqKJ;Ep>BU)B*l@A0GgklS&{mE~c_zY;2g@)ZNT zFg`{Uxt>qJzT$*i;S%^`bN7@($GbEU!hDK+=?=W6WAVA(H5TLU^afjZ_~%La^MFRQ z&wXn{f0=+fV!uV=^Py;;kfXCEI5KJotJ9~Hs}O$+AK!X4#2$LcjlKO~Q7z$YRMPb9 zGtvuBSA7V2N8CI4itmYbP+4$W=IEJsL~v-4VgJ3gc)hIXN;LFec4c!@8D5Kxp^B{YP~%!an_NS5@-7a=&^{S#rMGWxwE_ zAMBCM%Fz|$_4KZz^%~_-FMG#R-Z(s8%zx2kFXlt$(vW%CXvgQ?Gfhe$XO{OPQ=T3^ z_ZYL5l~(kQpxt()c0;+7=jbc9qrGHiltn3^yq)}tG7-L>y(GEok~QcjgK|D`RP^v# z<@LXerVqe;qhau38p;WhBMe_6zLu>TXLJJbqY@~3apPViF>-FPelgK;@ zCh~7B+j;gn^s`@eKD>YZ7-WL%-e+$_&VJ>#@(u93~+@JMyLj z9sTgV<8zS+<4zdYVa#+U${n1fAMRyS4u>Vfu1Tq~`j_C(4Gl8dW|QE*++#)pq;m(x zGfSYC!$e1QjVR1J`=)(iq5fNs)17BQevdrH$KT(l$%)<3rg51V+xJFy=ULZmFEYrEVc8JF; z_pafcQBPWv=gz!g&?~2IS1|mqv$;NB5B3POcXxk|a$Y`nLkGbwmpP_~Q!$=b$oKD9 z3j0cAE^}hUwSmhI1-jwKg+eRYN zYJZG7`sFmvjn1mKYly1Jp^=fK{VPJ>FNeRHw8E;EPC(o^@w!LAE}>_>#vSNYa@T&I zDZUPVqvJx(5cokgEp8S8{rB?@ye-3h@>x%BYvk|qShOmwe+{8AJMZOjE9?)LUR{=; z-GqBn&+dktalC6Y%J?{U*d98LaT@E6s(?*L;AbB1!?TxIN0v_S>Zu5O?;T9to`-o= zV}j;PvL9T(Kk0rU>M3&NKWoI-w4Kf6Zn=;jOnQT(rhm;R z%x7m(V>N6YU&-;J8Eglkq>G2{&fnf z(tsWm86!%EX)^s(QijV?EGXLA5W7TEi@p3P?1cq7CiLC><;*6p_3o#a6x}g(`2)(j;w@ z(S~}YHR^KePbR9L8|g&)igfAzkravwGN>q0WI+cY2joFVpa2ws66gdv17)BBx&T$6 z2D*Z7pgZUR)PV-*2{b`3&>QptTA(lJ2l@kTFaYR)fj}4N0evtC7=XcG2rvXjz!*@0 z37`QwU;rjy0XE;{hJz7cBp`q}umG083RnXhU<>SkJ#YYyU=(ly&cFq@ z0yp3eJiut+3A}(e@BzNS5BP&IU@RC1#)AM52zY=Gf^_m=4mw3@{VS0<*y!Fc-`N^T7hJ z5G(@eU@=$%mV#wqIamQ!f>mHOSOeCAbznW%05*b6AOma$TR!1kS05`!cPz-K^JK!!T0r$XtPzoM^hu{%-44!~8P!9eA72qkT1kb>8 z@B&nUm*5q64c>sapc>SGT2Ke-!8_0Z-h&U|BlrXw!Dr9}zJRYPiZvhQWMq`O{*#}OaKkg0Ru1r3$OtPm;y7v1w+9wFdU2kBLM--fd#MxR=^tA09#-O z?12Mt1fzfxa0V{G6}SO+-~mPhPv8Z-fe-Kne!w4$0b{{9FdhVeK)?fh5CnpO0EB>0 z5C()G97F&Shy-E~1){+O5CdX?1jK=OkN^@v5=aIqU?P|VCWBNk1xy9gz;uuXW`LPs z7MKm@fVp5Em=6|!ggK@Kv`n0YC=~1iC;E=z~GP01O60fFUpf#()Y;01eOq126##umJ~{0yDq`L%}dG9E<=X z0RhZ`1+WBGz#7;9TVMz5fdg;^qkt1|1}?xAxB++I0Y(E);03&a5AX$kz#ohOW5GBu z9t40uzyo{`1cHG8gn&>G2819SL;w+p1Y!^cqQL|Z17d*$#DRE_01`nGNCqijBA5gw zgH$jDOa;@xbdUySfSF(xm<{HDxnLfc4;FxhU=c_Mi@_4G6f6VF!3wYvtOBdS8n70u z1M9&Cun}wm8DKNm0y4o?unlYnJHSq`3+x7aKo-~w_JRH205}K^gKTgF90fVx7&s12 zfRo@9I1SE#T#yIOf_!ieoCg=cMQ{mR1_j^>C~e1RYE2V=lkFb<3d0U!|Y03QT_U?2b?AQXfFAqWQ%Km;Oz7({_+FagAX zSRes$ARZ)uM34lMK?;}%CV|Ny6-)tB!89-(q=6Y=CYS|ggE?R>m1Tm+ZEWl#XFfI@H; zTm#oZ5x4%+y-~RT~Gq_E1x?IkWVbL$Pftp1xA5e<{~;;L$0SBBz~8k?RqdE2F5pDOXmR^(420nzeSG zTo2E{JbBIFO?e%)Bv0}bw9~ZDD(Wr^Jga1ox#?^tqr*?mcBY-v&R1p?2j;7oRcy-d zGQ9ptzN)$0fOBfrJ$UE3+UsVV>*mBNJJ;RKdcgS}o}Rq(>b}7l=QYMk%Fg!;OdD`P zGk6*ALa(sQj0?R*hs!SXi9R>rqL!qXcd>6`MaIQ`6YI+^_MalBb4fd`2mjK5*}9uA z>C9)9UmBQht#es-nJ53U-m2ium-W|4$}bPvl%`W)khzRsFnCAi=7J%64wn}g?mwq< z#prM`|B7)=#pWy2llA3SOmhEwX8hkX@Cs&{l64mH?vnsOpxtVCV5 zMI(Vo5Q(Z=G(t(W??DkSZf?F1_oTFO#TO7(lG5K&ZAO#6l8S{IT!q-#u+geMUM z2gC@1Bs_8G57&?aR7Fv6p&&R^5FmX+xch-=KJnemKS z{1Cn|FPOnGrZbsAJO(R-V;aQO)TPlFELsqqZcG<2gN#|65Hn*wOTff!2#ps)7nrev z*_2*JI9I2T%}2op5adSyt(Ex}V9ko`kkdFQr(bkkPCxyc9I1#yEOV%dNuO{17bJ-_`5uC?#v&n~8qC-H=c@XH zN$vKJX8R7yX`GhRpII!YfA*%-zES^M`@Z!-7W`J#AFOLVLN%L}*-PV+s{Y|8a{32LrDjbElSrat5t~6GVI&3w zD#p8SV<S*ag^N z7~zS7rH{#bpCn0?m}=ohr7_tY4lTgb*)NgGpwU>y3>w{xim_8OP3HelzJ)kAMvxFI zjV!8#E!5zcv3{sw3N;vXQjNcNquDP*x^x)-HUz`1RjL~0=Q?O?-Y=^^e}RJjrz$x1 zh>AhJjGL>IpPj3Vwt)f1jLD)h7^XB1&5TQDF__vMGd9PJ!KBjJW^_|J*OY67Bo2ql zq0vmabQ+CD=dzkJ(K%Eaony*0rExjZOf)Ln45n~tY*QASBh6%LW=1t-uuzqm8H>!s zFoUBxOj8Dz&83;Kn=^6QRES_=?4vWuOn=mA@Lr?R5qZ=x9yp-xdxiOo#fxhmTQsy>kQtG=sgM=+Z6PR z!IW*JXz~+r@<&q<9|}ut;DYWe>pm~^BQm(1-X@#18`OcpD+p-kM8SHGU zK;Uvs*;F5C7<2?YmyRDY43LS*rJB;1 z3^Owt4dKk?nwe5LbOzFCW@gNmJPZz%$zZW)W~QbH0!X7Fx|uYVDVxcnv&fJqD`HTY zTsG5`j#!aq`jbkC=4-8tFs3tDTsnFsozCH~nRGgnOIFNcp<>v?Cc7BJNSn;YVN&sn zMyH#ZnK3wIZ)(nlEQm=O)9iaTbS)a2WoBlI5N^)KU{LAk16&%Hjh@*e6pmvc0iB3J z_94`UZ0M~t27^QA!sdTTxj#j9n`K;c%>sihOOS|9!_Xw9u&`U~%9LePt%-IK#9V<9MU-QE82t^>?T zL2Xn2AgI;C^Jhu3pZ3=O{Hzxwwf+AHl9Jk{{z1}DE83rBH8k|mulm>XDR=~kV~WW3(NZIS8hiA?VM_ncC>TW)mENH+Fb*z{`D0#wl(zaL80^- zuGujf>kwY7W=6LZo#w4XP?RVxQXH!@RC|n0o9%_pI3sPH<{ZsC4}mmeijJ5UDL@f% zoJfQm(uG98&uPtq0whV%WJaAhu`nnqSfGR4*wsWQeSZ+e568}kOpg>uc)>gg57|?6 z1Y%^z5{fMA&r1G&#vnb@Xe+$! z^IznXCm&;4I>$J&{J+pU+C<@ZDJ|zCf1^jM9PMd>(*~Y|!}|9%X+F028%2I(XipEJ z_}|f@&CyC*J=)FkizffGF{@3hd_NRwJ}PRfLpvFM(V(R{$f7^TMXLv`hRxrr@HfKA zNB^+cAID#9<^KHa9~RvDn60hoU!VTN!rO#Ys{_5Z%KRz!Kdju3qr|qV{QT?}!RU!h z23h9k0Q{c(H{nNyBMSaJ9k*R$w2eP$?{8Noheam;DpR^Jz~QMRDn`0^z=2>4FD8jx zJ47e(Vq&5a$Wrlwm{^?Fkg0Sol}4rOU@HF>`mG*fmqTUhOrmIZ2o*$WCa35`&$ntmq9UuV}{8&JxkoC2yhz=n%X_oeNLoLnD{#CnX<)DGGg@=c$&(P+he9ig# zCgcyz!qvjU!X1!5qbw{~7M7tFwiXtl7FMCsG)v2$o+prrHs7DSSvWS|<2RXql(Z^5 zx3J*5p?;RM66w8w{L?;G7-a7l&G)fQ5_ogN`HVz5--ji16^&xal1?;95KlfJgGLSH&J8uCN}TW+ksufK?I zYnI?kHxv4fPJlix@qBx4318Hb$Id&M)We4vE96JoO8iH|K07v@Zfv=_XnS(~VSFDiXx5UqfO+(v}`ibD5N9Q)%VJ|KF&9(&87vbmY(z@;G{74b{orkEoZ4hI=mh9F(=QA89 z`1`ofnxE4}!FIN&!z1i_+}YD&9ng4Ey_348`y1YJVVw-f%L_uuCKPx;&5*ODoS z6#Yq8QILHwhv#eAI%fV;evR|@5s7(rj`2YvF8rIwZW+7UiCMOY=Mx;|7s*ZXWm;lv z5hY98uf6dhGBG-c>B0Zrx4dcpn{fhtF)=m-W7*IBzm*=0FsV+#_RiA!+lk-S_C-l; zwfFd=-u{t^@qWJU3>Vw~E4}?=`3!E-zvOGiPkGymi&&no3(bciiVL>(qxt@W<5*B6 zmxg{R!n}|~_E+?E@wYLn9bYH$ndBJn9pTG##MtkFxiI)YJx;){Y_TuHI|6fS$RG6m zZQl0Ahwm}bJP))pPmt|oOLGZtch37wEGNd3^OL`ihxqq%MVmbB#XuzAjvGd{|L?}! zU-bW%=CL3~SbPx1+rQOM>=MOiI?|eb{`>jt7y18?|7??k>@YVYJ`tzU{N{C6J7ZV7 z`Tlc$Y_}gFK7G+oy#JuP*fAdSBMog3g7s%O{*Lo!B#L|)w&;_#NVCO!ZYCZ7{&R7Q zIo+E}#vJ0~_hT>CQkc^azf8x!8FR^T%Q@+Haf>w~%^9W1wK=(VZ?%qWw@-avx3;q; z{M~w)th+tmk+Q${=eBFyXucS8#NVt7Th_8|FhsgF^1~*=vL&{i@uzfCkzj#s zmBa$~_MzBrAQAUALj*6AV6NF#HOax-l%Pi_bCBmSj6dugh) zYUy9n{UW(%`4M{-7PiTTCMIU&b4v&Ew~Mt|^Sw>8($bw4X-LbDC8@Aw`*W2yH;mll zVgABACc@qfKi2$~`W-B6-Iy)*I!l$3%KN^xZQ0aefAQ0pM2fSpoL~}fVuC-cENsHL zKj|mcr0S*F^qy!6FpneJ^kSr}Ee{T3EK4+q`gZJ?eY;QNNT&>*ME# ze=JB}wl4pD9mZX5sXdmK9@6rp(B@E+-dadgf3to-4r?#6yv?`vcTXUd zlx}=m;zt^_&FwGMr}aAS+qh$4MV6;|wzQ9pmDGqf_K;0w8I5&`gp|>;-$v5~k%8ax z(;R;xzvzR*3j9c$TB?tB0mJ{Sk>%~k^ATzP^paL$ zC#_xjr}QtWK6WC^Ewtu)GC$24zeu0k*h(d}iO*O(v;29_;% Date: Fri, 13 Mar 2026 10:14:27 -0400 Subject: [PATCH 31/31] make b*9ucket prefix configurable in sst deploy --- infra/sst.config.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/infra/sst.config.ts b/infra/sst.config.ts index 8398979..b344c22 100644 --- a/infra/sst.config.ts +++ b/infra/sst.config.ts @@ -49,6 +49,8 @@ export default $config({ ? aws.s3.BucketV2.get("Storage", process.env.EXISTING_BUCKET) : new sst.aws.Bucket("Storage"); + const bucketPrefix = process.env.BUCKET_PREFIX || "globi"; + const bucketName = bucket instanceof aws.s3.BucketV2 ? bucket.bucket : bucket.name; @@ -88,7 +90,7 @@ export default $config({ // SCYTHE_WORKER_MAX_RUNS: "1", SCYTHE_WORKER_SLOTS: "1", SCYTHE_STORAGE_BUCKET: bucketName, - SCYTHE_STORAGE_BUCKET_PREFIX: "globi", + SCYTHE_STORAGE_BUCKET_PREFIX: bucketPrefix, SCYTHE_TIMEOUT_SCATTER_GATHER_SCHEDULE: "10h", SCYTHE_TIMEOUT_SCATTER_GATHER_EXECUTION: "10h", SCYTHE_TIMEOUT_EXPERIMENT_SCHEDULE: "10h",