From 7fabae81683771b5aa77c4ee66ca87f07afa2faa Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Fri, 24 May 2024 14:20:03 +0200 Subject: [PATCH 01/24] First commit --- models/README.md | 1 + models/black_lodge/README.md | 18 +++++++++++++++ models/black_lodge/configs/config_model.py | 26 ++++++++++++++++++++++ 3 files changed, 45 insertions(+) create mode 100644 models/black_lodge/README.md create mode 100644 models/black_lodge/configs/config_model.py diff --git a/models/README.md b/models/README.md index b727d8c9..1fa58320 100644 --- a/models/README.md +++ b/models/README.md @@ -1,5 +1,6 @@ | model | algorithm | queryset | | -------------------------------------------------- | ------ | ------ | +| [black_lodge](https://github.com/prio-data/views_pipeline/tree/main/models/black_lodge) | XGBRFRegressor | qs_baseline | | [blank_space](https://github.com/prio-data/views_pipeline/tree/main/models/blank_space) | HurdleRegression | fatalities003_pgm_natsoc | | [electric_relaxation](https://github.com/prio-data/views_pipeline/tree/main/models/electric_relaxation) | RandomForestClassifier | escwa001_cflong | | [lavender_haze](https://github.com/prio-data/views_pipeline/tree/main/models/lavender_haze) | HurdleRegression | fatalities003_pgm_broad | diff --git a/models/black_lodge/README.md b/models/black_lodge/README.md new file mode 100644 index 00000000..87093df2 --- /dev/null +++ b/models/black_lodge/README.md @@ -0,0 +1,18 @@ +# black_lodge +## Overview +This folder contains code for black_lodge model, a baseline random forest for predicting fatalities, also known as fatalities002_baseline_rf in the [documentation paper of fatalities002](https://viewsforecasting.org/wp-content/uploads/VIEWS_documentation_models_Fatalities002.pdf). + +The model input data is the qs_baseline, and its algorithm is a random forest regressor with an XGBoost implementation (XGBRFRegressor). + +This is very simple model with only five data columns (each column representing one feature): The number of fatalities in the same country at $t-1$, three decay functions of time since there was at least five fatalities in a single month, for each of the UCDP conflict types -- state-based, one-sided, or non-state conflict -- and log population size (Hegre2020RP,Pettersson2021JPR).The features in the baseline are included in all the models described below. This ensures that all models in the ensemble provides at least moderately good predictions, while guaranteeing diversity in feature sets and modelling approaches. + +## To-Dos +[x] Take over model configs from [viewsforecasting](https://github.com/prio-data/viewsforecasting/blob/4dbc2cd2b6edb3169fc585f7dbb868b65fab0e2c/SystemUpdates/ModelDefinitions.py#L36) +[] Tidy config files (currently everything is in config_model) +[] Dataloader: Rewrite queryset for vimur +[] Training script +[] Forecasting script +[] Evaluation script +[] Orchestration script +[] Log on wandb +[] Log on Prefect with tasks \ No newline at end of file diff --git a/models/black_lodge/configs/config_model.py b/models/black_lodge/configs/config_model.py new file mode 100644 index 00000000..696cc750 --- /dev/null +++ b/models/black_lodge/configs/config_model.py @@ -0,0 +1,26 @@ +from xgboost import XGBRFRegressor + +def get_model_config(): + """ + Contains the common configuration settings for the model (model architecture, name, target variable, level of analysis and deployment status). + + Returns: + - model_config (dict): A dictionary containing model configuration settings. + """ + model_config = { + "name": "black_lodge", + "algorithm": XGBRFRegressor, + "depvar": "ln_ged_sb_dep", + "queryset": "fatalities002_baseline", + "level": "cm", + "sweep": False, + "force_retrain": False, + "steps": [*range(1, 36 + 1, 1)], + "deployment_status": "production", + "creator": "Sara", + "preprocessing": "float_it", #new + "data_train": "baseline002", #new + "n_jobs": 12, #new, move to hyperparameters + "n_estimators": 300 #new, move to hyperparameters + } + return model_config \ No newline at end of file From fb4c959beb334531c98408b7ac7af445a4daf43d Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Fri, 24 May 2024 14:21:47 +0200 Subject: [PATCH 02/24] Fix formatting --- models/black_lodge/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/models/black_lodge/README.md b/models/black_lodge/README.md index 87093df2..1fc7066f 100644 --- a/models/black_lodge/README.md +++ b/models/black_lodge/README.md @@ -7,12 +7,12 @@ The model input data is the qs_baseline, and its algorithm is a random forest re This is very simple model with only five data columns (each column representing one feature): The number of fatalities in the same country at $t-1$, three decay functions of time since there was at least five fatalities in a single month, for each of the UCDP conflict types -- state-based, one-sided, or non-state conflict -- and log population size (Hegre2020RP,Pettersson2021JPR).The features in the baseline are included in all the models described below. This ensures that all models in the ensemble provides at least moderately good predictions, while guaranteeing diversity in feature sets and modelling approaches. ## To-Dos -[x] Take over model configs from [viewsforecasting](https://github.com/prio-data/viewsforecasting/blob/4dbc2cd2b6edb3169fc585f7dbb868b65fab0e2c/SystemUpdates/ModelDefinitions.py#L36) -[] Tidy config files (currently everything is in config_model) -[] Dataloader: Rewrite queryset for vimur -[] Training script -[] Forecasting script -[] Evaluation script -[] Orchestration script -[] Log on wandb -[] Log on Prefect with tasks \ No newline at end of file +- [x] Take over model configs from [viewsforecasting](https://github.com/prio-data/viewsforecasting/blob/4dbc2cd2b6edb3169fc585f7dbb868b65fab0e2c/SystemUpdates/ModelDefinitions.py#L36) +- [ ] Tidy config files (currently everything is in config_model) +- [ ] Dataloader: Rewrite queryset for vimur +- [ ] Training script +- [ ] Forecasting script +- [ ] Evaluation script +- [ ] Orchestration script +- [ ] Log on wandb +- [ ] Log on Prefect with tasks \ No newline at end of file From 3867624b513a43044c0ca0541b427e4e35ee6f08 Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Tue, 28 May 2024 17:06:42 +0200 Subject: [PATCH 03/24] Create utils_input_data.py Copy pasted from src/utils in @xiaolong0728 blank_space. This script might be a good place to also do input data checks. --- common_utils/utils_input_data.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 common_utils/utils_input_data.py diff --git a/common_utils/utils_input_data.py b/common_utils/utils_input_data.py new file mode 100644 index 00000000..b86b675d --- /dev/null +++ b/common_utils/utils_input_data.py @@ -0,0 +1,15 @@ +def ensure_float64(df): + """ + Check if the DataFrame only contains np.float64 types. If not, raise a warning + and convert the DataFrame to use np.float64 for all its numeric columns. + """ + + non_float64_cols = df.select_dtypes(include=['number']).columns[df.select_dtypes(include=['number']).dtypes != np.float64] + + if len(non_float64_cols) > 0: + print(f"Warning: DataFrame contains non-np.float64 numeric columns. Converting the following columns: {', '.join(non_float64_cols)}") + + for col in non_float64_cols: + df[col] = df[col].astype(np.float64) + + return df \ No newline at end of file From d80bc94e604d3799a06acbf530d6eecb1028b790 Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Tue, 28 May 2024 17:19:35 +0200 Subject: [PATCH 04/24] Dataloaders Getting error though, probably having issues with views6: TypeError: __init__() got an unexpected keyword argument 'from_loa' --- .../black_lodge/configs/config_input_data.py | 73 +++++++++++++++++++ .../black_lodge/src/dataloaders/get_data.py | 71 ++++++++++++++++++ 2 files changed, 144 insertions(+) create mode 100644 models/black_lodge/configs/config_input_data.py create mode 100644 models/black_lodge/src/dataloaders/get_data.py diff --git a/models/black_lodge/configs/config_input_data.py b/models/black_lodge/configs/config_input_data.py new file mode 100644 index 00000000..bb8f1714 --- /dev/null +++ b/models/black_lodge/configs/config_input_data.py @@ -0,0 +1,73 @@ +import numpy as np +from viewser import Queryset, Column + +def get_input_data(): + """" + Contains viewser queryset to fetch input data (queryset name, target variable, level of analysis, transformations, and theme). + + Returns: + - qs_baseline (Queryset): Fatalities conflict history, cm level. Predicting ln(fatalities) using conflict predictors, ultrashort. + + Note: + - Queryset taken from [viewsforecasting/Tools/cm_querysets.py](https://github.com/prio-data/viewsforecasting/blob/4dbc2cd2b6edb3169fc585f7dbb868b65fab0e2c/Tools/cm_querysets.py#L16) + - Queryset will be used in src/dataloaders/get_data.py to fetch data. + """ + + qs_baseline = (Queryset("fatalities002_baseline", "country_month") + + # target variable + .with_column(Column("ln_ged_sb_dep", from_loa="country_month", from_column="ged_sb_best_sum_nokgi") + .transform.ops.ln() + .transform.missing.fill() + ) + + # timelag 0 of target variable + .with_column(Column("ln_ged_sb", from_loa="country_month", from_column="ged_sb_best_sum_nokgi") + .transform.ops.ln() + .transform.missing.fill() + ) + # Decay functions + # state-based (sb) + .with_column(Column("decay_ged_sb_5", from_loa="country_month", from_column="ged_sb_best_sum_nokgi") + .transform.missing.replace_na() + .transform.bool.gte(5) + .transform.temporal.time_since() + .transform.temporal.decay(24) + .transform.missing.replace_na() + ) + # one-sided (os) + .with_column(Column("decay_ged_os_5", from_loa="country_month", from_column="ged_os_best_sum_nokgi") + .transform.missing.replace_na() + .transform.bool.gte(5) + .transform.temporal.time_since() + .transform.temporal.decay(24) + .transform.missing.replace_na() + ) + # Spatial lag decay + .with_column(Column("splag_1_decay_ged_sb_5", from_loa="country_month", + from_column="ged_sb_best_sum_nokgi") + .transform.missing.replace_na() + .transform.bool.gte(5) + .transform.temporal.time_since() + .transform.temporal.decay(24) + .transform.spatial.countrylag(1, 1, 0, 0) + .transform.missing.replace_na() + ) + + # From WDI + + .with_column(Column("wdi_sp_pop_totl", from_loa="country_year", from_column="wdi_sp_pop_totl") + .transform.missing.fill() + .transform.temporal.tlag(12) + .transform.missing.fill() + .transform.missing.replace_na() + ) + + .with_theme("fatalities") + .describe("""Fatalities conflict history, cm level + + Predicting ln(fatalities) using conflict predictors, ultrashort + + """) + ) + return qs_baseline \ No newline at end of file diff --git a/models/black_lodge/src/dataloaders/get_data.py b/models/black_lodge/src/dataloaders/get_data.py new file mode 100644 index 00000000..9065b318 --- /dev/null +++ b/models/black_lodge/src/dataloaders/get_data.py @@ -0,0 +1,71 @@ +import sys +from pathlib import Path +import pandas as pd +import numpy as np + +PATH = Path(__file__) +sys.path.insert(0, str(Path(*[i for i in PATH.parts[:PATH.parts.index("views_pipeline")+1]]) / "common_utils")) +from set_path import setup_project_paths, setup_data_paths +setup_project_paths(PATH) + +from utils_input_data import ensure_float64 +from set_partition import get_partitioner_dict +from config_input_data import get_input_data + +def get_data(): + """ + Fetches the input data from the viewser queryset defined in get_input_data function of config_input_data.py + Checks if the data is already saved as a parquet file, if not, fetches the data and saves it as a parquet file. + Also ensures that all numeric columns are of type np.float64. + + Returns: + - data (pd.DataFrame): Input data fetched from the viewser queryset. + """ + print("Getting data...") + PATH_RAW, _, _ = setup_data_paths(PATH) + parquet_path = PATH_RAW / 'raw.parquet' + # print('PARQUET PATH', parquet_path) + if not parquet_path.exists(): + qs = get_input_data() + data = qs.publish().fetch() + data = ensure_float64(data) + data.to_parquet(parquet_path) + else: + data = pd.read_parquet(parquet_path) + + return data + +def get_partition_data(df, partition): + """ + Temporally subsets the input data based on the partition. + + Args: + - df (pd.DataFrame): Input data fetched from the viewser queryset. + - partition (str): The partition of the data (calibration, testing, forecasting). + + Returns: + - df (pd.DataFrame): Temporally subsetted data based on the partition. + """ + + partitioner_dict = get_partitioner_dict(partition) + + month_first = partitioner_dict['train'][0] + + if partition == 'forecasting': + month_last = partitioner_dict['train'][1] + 1 # no need to get the predict months as these are empty + + elif partition == 'calibration' or partition == 'testing': + month_last = partitioner_dict['predict'][1] + 1 # predict[1] is the last month to predict, so we need to add 1 to include it. + + else: + raise ValueError('partition should be either "calibration", "testing" or "forecasting"') + + month_range = np.arange(month_first, month_last,1) # predict[1] is the last month to predict, so we need to add 1 to include it. + + df = df[df.index.get_level_values("month_id").isin(month_range)].copy() # temporal subset + + return df + +if __name__ == "__main__": + get_data() + print("Data fetched successfully.") \ No newline at end of file From 4e6a57c0d1d61bb6262ff59f5d5ac4c4d561f409 Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Tue, 28 May 2024 17:29:24 +0200 Subject: [PATCH 05/24] Update README.md --- models/black_lodge/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/black_lodge/README.md b/models/black_lodge/README.md index 1fc7066f..49b5bbdf 100644 --- a/models/black_lodge/README.md +++ b/models/black_lodge/README.md @@ -9,7 +9,7 @@ This is very simple model with only five data columns (each column representing ## To-Dos - [x] Take over model configs from [viewsforecasting](https://github.com/prio-data/viewsforecasting/blob/4dbc2cd2b6edb3169fc585f7dbb868b65fab0e2c/SystemUpdates/ModelDefinitions.py#L36) - [ ] Tidy config files (currently everything is in config_model) -- [ ] Dataloader: Rewrite queryset for vimur +- [x] Dataloader: Rewrite queryset for vimur - [ ] Training script - [ ] Forecasting script - [ ] Evaluation script From 5b82f02975e05b6148babfffd8eefd17ad711d25 Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Wed, 29 May 2024 14:53:22 +0200 Subject: [PATCH 06/24] Add data check Logic taken from viewsforecasting notebook --- models/black_lodge/src/dataloaders/get_data.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/models/black_lodge/src/dataloaders/get_data.py b/models/black_lodge/src/dataloaders/get_data.py index 9065b318..6cc883bd 100644 --- a/models/black_lodge/src/dataloaders/get_data.py +++ b/models/black_lodge/src/dataloaders/get_data.py @@ -66,6 +66,19 @@ def get_partition_data(df, partition): return df + +def check_data(): + """ + Check missingness and infinity values in the input data + """ + data = get_data() + print("Checking missingness and infinity values in the input data...") + print("Missing values in the input data:") + print(data.isnull().sum()) + print("Infinity values in the input data:") + print(data.isin([np.inf, -np.inf]).sum()) + + if __name__ == "__main__": get_data() print("Data fetched successfully.") \ No newline at end of file From 215faa77025d6bc9d286dd7724aa91fb9c073bfc Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Wed, 29 May 2024 18:00:34 +0200 Subject: [PATCH 07/24] New config --- models/black_lodge/configs/config_hyperparameters.py | 12 ++++++++++++ models/black_lodge/configs/config_model.py | 10 +++------- 2 files changed, 15 insertions(+), 7 deletions(-) create mode 100644 models/black_lodge/configs/config_hyperparameters.py diff --git a/models/black_lodge/configs/config_hyperparameters.py b/models/black_lodge/configs/config_hyperparameters.py new file mode 100644 index 00000000..660867b7 --- /dev/null +++ b/models/black_lodge/configs/config_hyperparameters.py @@ -0,0 +1,12 @@ +def get_hp_config(): + hp_config = { + "clf":{ + "n_estimators": 300, + "n_jobs": 12 + }, + "reg":{ + "n_estimators": 300, + "n_jobs": 12 + } + } + return hp_config \ No newline at end of file diff --git a/models/black_lodge/configs/config_model.py b/models/black_lodge/configs/config_model.py index 696cc750..55305d47 100644 --- a/models/black_lodge/configs/config_model.py +++ b/models/black_lodge/configs/config_model.py @@ -1,5 +1,3 @@ -from xgboost import XGBRFRegressor - def get_model_config(): """ Contains the common configuration settings for the model (model architecture, name, target variable, level of analysis and deployment status). @@ -9,7 +7,7 @@ def get_model_config(): """ model_config = { "name": "black_lodge", - "algorithm": XGBRFRegressor, + "algorithm": "XGBRFRegressor", "depvar": "ln_ged_sb_dep", "queryset": "fatalities002_baseline", "level": "cm", @@ -19,8 +17,6 @@ def get_model_config(): "deployment_status": "production", "creator": "Sara", "preprocessing": "float_it", #new - "data_train": "baseline002", #new - "n_jobs": 12, #new, move to hyperparameters - "n_estimators": 300 #new, move to hyperparameters - } + "data_train": "baseline002", #new + } return model_config \ No newline at end of file From d2b17326d9de01ade6ed37f59f0c604a6074d676 Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Wed, 29 May 2024 18:00:49 +0200 Subject: [PATCH 08/24] Create train_model.py --- .../black_lodge/src/training/train_model.py | 69 +++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 models/black_lodge/src/training/train_model.py diff --git a/models/black_lodge/src/training/train_model.py b/models/black_lodge/src/training/train_model.py new file mode 100644 index 00000000..feba17fd --- /dev/null +++ b/models/black_lodge/src/training/train_model.py @@ -0,0 +1,69 @@ +import sys +from pathlib import Path +import pandas as pd + +PATH = Path(__file__) +sys.path.insert(0, str(Path(*[i for i in PATH.parts[:PATH.parts.index("views_pipeline")+1]]) / "common_utils")) +from set_path import setup_project_paths, setup_data_paths, setup_artifacts_paths +setup_project_paths(PATH) + +from xgboost import XGBRFRegressor + +from views_partitioning.data_partitioner import DataPartitioner +from views_forecasts.extensions import * +from stepshift.views import StepshiftedModels +from views_stepshift.run import ViewsRun +from hurdle_model import HurdleRegression +from get_data import get_partition_data + +from config_model import get_model_config +from config_hyperparameters import get_hp_config + + +def train(model_config, para_config): + print("Training...") + if not model_config["sweep"]: + PATH_RAW, _, _ = setup_data_paths(PATH) + PATH_ARTIFACTS = setup_artifacts_paths(PATH) + dataset = pd.read_parquet(PATH_RAW / 'raw.parquet') + if model_config["algorithm"] == "HurdleRegression": + model = HurdleRegression(clf_name=model_config["clf_name"], reg_name=model_config["reg_name"], clf_params=para_config["clf"], reg_params=para_config["reg"]) + else: + model = globals()[model_config["algorithm"]](**para_config) + # print(model) + + # Train partition + try: + stepshifter_model_calib = pd.read_pickle(PATH_ARTIFACTS / "model_calib_partition.pkl") + except: + stepshifter_model_calib = stepshift_training(model_config, "calib", model, get_partition_data(dataset, "calibration")) + stepshifter_model_calib.save(PATH_ARTIFACTS /"model_calib_partition.pkl") + + # Test partition + try: + stepshifter_model_test = pd.read_pickle(PATH_ARTIFACTS / "model_test_partition.pkl") + except: + stepshifter_model_test = stepshift_training(model_config, "test", model, get_partition_data(dataset, "testing")) + stepshifter_model_test.save(PATH_ARTIFACTS / "model_test_partition.pkl") + + # Future partition + try: + stepshifter_model_future = pd.read_pickle(PATH_ARTIFACTS / "model_forecast_partition.pkl") + except: + stepshifter_model_future = stepshift_training(model_config, "forecast", model, get_partition_data(dataset, "forecasting")) + stepshifter_model_future.save(PATH_ARTIFACTS / "model_forecast_partition.pkl") + + +def stepshift_training(model_config, partition_name, model, dataset): + steps = model_config["steps"] + target = model_config["depvar"] + partition = DataPartitioner({partition_name: model_config[f"{partition_name}_partitioner_dict"]}) + stepshifter_def = StepshiftedModels(model, steps, target) + stepshifter_model = ViewsRun(partition, stepshifter_def) + stepshifter_model.fit(partition_name, "train", dataset) + return stepshifter_model + +if __name__ == "__main__": + model_config = get_model_config() + para_config = get_hp_config() + train(model_config, para_config) \ No newline at end of file From d015d7515039bdc6c6afc1e330a397292e2c5c35 Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Wed, 29 May 2024 18:02:07 +0200 Subject: [PATCH 09/24] Update README.md --- models/black_lodge/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/black_lodge/README.md b/models/black_lodge/README.md index 49b5bbdf..298141a6 100644 --- a/models/black_lodge/README.md +++ b/models/black_lodge/README.md @@ -8,9 +8,9 @@ This is very simple model with only five data columns (each column representing ## To-Dos - [x] Take over model configs from [viewsforecasting](https://github.com/prio-data/viewsforecasting/blob/4dbc2cd2b6edb3169fc585f7dbb868b65fab0e2c/SystemUpdates/ModelDefinitions.py#L36) -- [ ] Tidy config files (currently everything is in config_model) +- [x] Tidy config files (currently everything is in config_model) - [x] Dataloader: Rewrite queryset for vimur -- [ ] Training script +- [x] Training script - [ ] Forecasting script - [ ] Evaluation script - [ ] Orchestration script From 01753641787b50f4dc63f99d008b126fc4b1be15 Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Wed, 29 May 2024 18:06:05 +0200 Subject: [PATCH 10/24] Typo --- documentation/model.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation/model.md b/documentation/model.md index 5c9a4315..37d8ecfb 100644 --- a/documentation/model.md +++ b/documentation/model.md @@ -4,7 +4,7 @@ In the context of the VIEWS pipeline, a model should be understood as: 1) A specific instantiation of a machine learning algorithm, -2) Trained using a predetermined and unique set of hyperpara.meters, +2) Trained using a predetermined and unique set of hyperparameters, 3) On a well-defined set of input features, 4) And targeting a specific outcome target. 5) In the case of stepshift models, a model is understood as **all** code and **all** artifacts necessary to generate a comprehensive 36 month forecast for the specified target. From 5492817954f7d7af944110904ff5def8607ebc66 Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Wed, 29 May 2024 18:06:11 +0200 Subject: [PATCH 11/24] Update README.md --- models/README.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/models/README.md b/models/README.md index 1fa58320..90b18d4c 100644 --- a/models/README.md +++ b/models/README.md @@ -1,6 +1,8 @@ +# Model Overview + | model | algorithm | queryset | | -------------------------------------------------- | ------ | ------ | -| [black_lodge](https://github.com/prio-data/views_pipeline/tree/main/models/black_lodge) | XGBRFRegressor | qs_baseline | +| [black_lodge](https://github.com/prio-data/views_pipeline/tree/main/models/black_lodge) | XGBRFRegressor | fatalities002_baseline | | [blank_space](https://github.com/prio-data/views_pipeline/tree/main/models/blank_space) | HurdleRegression | fatalities003_pgm_natsoc | | [electric_relaxation](https://github.com/prio-data/views_pipeline/tree/main/models/electric_relaxation) | RandomForestClassifier | escwa001_cflong | | [lavender_haze](https://github.com/prio-data/views_pipeline/tree/main/models/lavender_haze) | HurdleRegression | fatalities003_pgm_broad | @@ -8,3 +10,13 @@ | [purple_alien](https://github.com/prio-data/views_pipeline/tree/main/models/purple_alien) | HydraNet | simon_tests | | [wildest_dream](https://github.com/prio-data/views_pipeline/tree/main/models/wildest_dream) | HurdleRegression | fatalities003_pgm_conflict_sptime_dist | | [yellow_pikachu](https://github.com/prio-data/views_pipeline/tree/main/models/yellow_pikachu) | XGBRegressor | fatalities003_pgm_conflict_treelag | + +# Model Definition +In the context of the VIEWS pipeline, a model should be understood as: + +1) A specific instantiation of a machine learning algorithm, +2) Trained using a predetermined and unique set of hyperparameters, +3) On a well-defined set of input features, +4) And targeting a specific outcome target. +5) In the case of stepshift models, a model is understood as **all** code and **all** artifacts necessary to generate a comprehensive 36 month forecast for the specified target. +6) Note that, two models, identical in all other aspects, will be deemed distinct if varying post-processing techniques are applied to their generated predictions. For instance, if one model's predictions undergo calibration or normalization while the other's do not. \ No newline at end of file From 179a77bbed2328b23ef54e2ae0e78f8fc0e3e781 Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Mon, 17 Jun 2024 16:41:06 +0200 Subject: [PATCH 12/24] Update utils_input_data.py --- common_utils/utils_input_data.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/common_utils/utils_input_data.py b/common_utils/utils_input_data.py index b86b675d..96737efc 100644 --- a/common_utils/utils_input_data.py +++ b/common_utils/utils_input_data.py @@ -1,3 +1,5 @@ +import numpy as np + def ensure_float64(df): """ Check if the DataFrame only contains np.float64 types. If not, raise a warning From a5d5023d535b3c8adcdde1c2f10d70fa36859743 Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Tue, 18 Jun 2024 12:04:42 +0200 Subject: [PATCH 13/24] Update configs --- .../black_lodge/configs/config_deployment.py | 15 ++++++++ .../{config_model.py => config_meta.py} | 4 +-- models/black_lodge/configs/config_sweep.py | 34 +++++++++++++++++++ 3 files changed, 51 insertions(+), 2 deletions(-) create mode 100644 models/black_lodge/configs/config_deployment.py rename models/black_lodge/configs/{config_model.py => config_meta.py} (91%) create mode 100644 models/black_lodge/configs/config_sweep.py diff --git a/models/black_lodge/configs/config_deployment.py b/models/black_lodge/configs/config_deployment.py new file mode 100644 index 00000000..44bd8457 --- /dev/null +++ b/models/black_lodge/configs/config_deployment.py @@ -0,0 +1,15 @@ +def get_deployment_config(): + + """ + Contains the configuration for deploying the model into different environments. + This configuration is "behavioral" so modifying it will affect the model's runtime behavior and integration into the deployment system. + + Returns: + - deployment_config (dict): A dictionary containing deployment settings, determining how the model is deployed, including status, endpoints, and resource allocation. + """ + + deployment_config = { + "deployment_status": "baseline", # shadow, deployed, baseline, or deprecated + } + + return deployment_config \ No newline at end of file diff --git a/models/black_lodge/configs/config_model.py b/models/black_lodge/configs/config_meta.py similarity index 91% rename from models/black_lodge/configs/config_model.py rename to models/black_lodge/configs/config_meta.py index 55305d47..36436621 100644 --- a/models/black_lodge/configs/config_model.py +++ b/models/black_lodge/configs/config_meta.py @@ -1,4 +1,4 @@ -def get_model_config(): +def get_meta_config(): """ Contains the common configuration settings for the model (model architecture, name, target variable, level of analysis and deployment status). @@ -19,4 +19,4 @@ def get_model_config(): "preprocessing": "float_it", #new "data_train": "baseline002", #new } - return model_config \ No newline at end of file + return meta_config #formely model_config \ No newline at end of file diff --git a/models/black_lodge/configs/config_sweep.py b/models/black_lodge/configs/config_sweep.py new file mode 100644 index 00000000..27ce72c3 --- /dev/null +++ b/models/black_lodge/configs/config_sweep.py @@ -0,0 +1,34 @@ +def get_swep_config(): + + """ + Contains the configuration for hyperparameter sweeps using WandB. + This configuration is "operational" so modifying it will change the search strategy, parameter ranges, and other settings for hyperparameter tuning aimed at optimizing model performance. + + Returns: + - sweep_config (dict): A dictionary containing the configuration for hyperparameter sweeps, defining the methods and parameter ranges used to search for optimal hyperparameters. + """ + + sweep_config = { + 'name': 'black_lodge', + 'method': 'country' #or grid + } + + metric = { + 'name': '36month_mean_squared_error', + 'goal': 'minimize' + } + + sweep_config['metric'] = metric + + parameters_dict = { + "cls_n_estimators": {"values": [100, 200]}, + "cls_learning_rate": {"values": [0.05]}, + "cls_n_jobs": {"values": [12]}, + "reg_n_estimators": {"values": [100, 200]}, + "reg_learning_rate": {"values": [0.05]}, + "reg_n_jobs": {"values": [12]} + } #taken from xiaolong's code + + sweep_config['parameters'] = parameters_dict + + return sweep_config \ No newline at end of file From 2ad7d9a5c894b61bd54729513b6dd0f52b5aae4d Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Tue, 18 Jun 2024 12:04:47 +0200 Subject: [PATCH 14/24] New config name --- models/black_lodge/src/training/train_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/black_lodge/src/training/train_model.py b/models/black_lodge/src/training/train_model.py index feba17fd..4af8746d 100644 --- a/models/black_lodge/src/training/train_model.py +++ b/models/black_lodge/src/training/train_model.py @@ -16,7 +16,7 @@ from hurdle_model import HurdleRegression from get_data import get_partition_data -from config_model import get_model_config +from config_meta import get_meta_config from config_hyperparameters import get_hp_config From 469aa34c38bc2dcf2beef6beb8598838fb155cb2 Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Tue, 18 Jun 2024 16:17:57 +0200 Subject: [PATCH 15/24] Typo and small fixes --- models/black_lodge/configs/config_sweep.py | 4 ++-- models/black_lodge/src/dataloaders/get_data.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/models/black_lodge/configs/config_sweep.py b/models/black_lodge/configs/config_sweep.py index 27ce72c3..ee22a298 100644 --- a/models/black_lodge/configs/config_sweep.py +++ b/models/black_lodge/configs/config_sweep.py @@ -1,4 +1,4 @@ -def get_swep_config(): +def get_sweep_config(): """ Contains the configuration for hyperparameter sweeps using WandB. @@ -9,7 +9,7 @@ def get_swep_config(): """ sweep_config = { - 'name': 'black_lodge', + 'name': 'black_lodge_sweep', 'method': 'country' #or grid } diff --git a/models/black_lodge/src/dataloaders/get_data.py b/models/black_lodge/src/dataloaders/get_data.py index 6cc883bd..5b2697f7 100644 --- a/models/black_lodge/src/dataloaders/get_data.py +++ b/models/black_lodge/src/dataloaders/get_data.py @@ -24,7 +24,7 @@ def get_data(): print("Getting data...") PATH_RAW, _, _ = setup_data_paths(PATH) parquet_path = PATH_RAW / 'raw.parquet' - # print('PARQUET PATH', parquet_path) + # print('PARQUET PATH', parquet_path) # use this to debug if the parquet path is correct if not parquet_path.exists(): qs = get_input_data() data = qs.publish().fetch() @@ -70,6 +70,8 @@ def get_partition_data(df, partition): def check_data(): """ Check missingness and infinity values in the input data + + Taken from viewsforecasting repo, to be further developed. """ data = get_data() print("Checking missingness and infinity values in the input data...") From a0d73a9b23c0c766a24b8affa9e03e708b96d67d Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Tue, 18 Jun 2024 16:18:21 +0200 Subject: [PATCH 16/24] Create main.py Adaoted from purple_alien --- models/black_lodge/main.py | 41 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 models/black_lodge/main.py diff --git a/models/black_lodge/main.py b/models/black_lodge/main.py new file mode 100644 index 00000000..40a43109 --- /dev/null +++ b/models/black_lodge/main.py @@ -0,0 +1,41 @@ +import time + +import wandb + +import sys +from pathlib import Path + +PATH = Path(__file__) +sys.path.insert(0, str(Path(*[i for i in PATH.parts[:PATH.parts.index("views_pipeline")+1]]) / "common_utils")) # PATH_COMMON_UTILS +from set_path import setup_project_paths, setup_artifacts_paths +setup_project_paths(PATH) + +from cli_parser_utils import parse_args, validate_arguments #change once PR30 is merged + +from execute_model_runs import execute_sweep_run, execute_single_run + +#from mode_run_manager import model_run_manager + +if __name__ == "__main__": + + args = parse_args() + #print(args) #use for debugging + + validate_arguments(args) + + wandb.login() + + start_t = time.time() + + + if args.sweep == True: + # need to check if you are running a sweep or not, because the sweep will overwrite the train and evaluate flags + execute_sweep_run(args) + + elif args.sweep == False: + execute_single_run(args) + + + end_t = time.time() + minutes = (end_t - start_t)/60 + print(f'Done. Runtime: {minutes:.3f} minutes') \ No newline at end of file From c15209fddfeca00134515b44d9ad09fe412acee0 Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Tue, 18 Jun 2024 18:02:18 +0200 Subject: [PATCH 17/24] Management scripts Not yet tested --- .../management/execute_model_runs.py | 85 +++++++++++++++++++ .../management/execute_model_tasks.py | 75 ++++++++++++++++ 2 files changed, 160 insertions(+) create mode 100644 models/black_lodge/management/execute_model_runs.py create mode 100644 models/black_lodge/management/execute_model_tasks.py diff --git a/models/black_lodge/management/execute_model_runs.py b/models/black_lodge/management/execute_model_runs.py new file mode 100644 index 00000000..2f1e12f5 --- /dev/null +++ b/models/black_lodge/management/execute_model_runs.py @@ -0,0 +1,85 @@ +import wandb + +import sys +from pathlib import Path + +PATH = Path(__file__) +sys.path.insert(0, str(Path(*[i for i in PATH.parts[:PATH.parts.index("views_pipeline")+1]]) / "common_utils")) # PATH_COMMON_UTILS +from set_path import setup_project_paths, setup_artifacts_paths +setup_project_paths(PATH) + +from config_sweep import get_sweep_config +from config_hyperparameters import get_hp_config +from execute_model_tasks import execute_model_tasks + + +def execute_sweep_run(args): + """ + Function to execute a hyperparameter sweep of the model. This function is called when the user specifies a sweep in the command line. + + Args: + - args (argparse.Namespace): Arguments parsed from the command line. + + Returns: + - None + + Example: + ``` + python main.py --sweep + + ``` + """ + print('Running sweep...') + + sweep_config = get_sweep_config() + project = sweep_config['name'] + sweep_config['parameters']['run_type'] = {'value' : "calibration"} + sweep_config['parameters']['sweep'] = {'value' : True} + + sweep_id = wandb.sweep(sweep_config, project=project, entity='views_pipeline') # entity is the team name + + wandb.agent(sweep_id, execute_model_tasks, entity='views_pipeline') # entity is the team name - Seem like it needs to be botb in sweep_id and agent + + +def execute_single_run(args): + """ + Function to execute a single run of the model. This function is called when the user specifies a single run in the command line. + + Args: + - args (argparse.Namespace): Arguments parsed from the command line. + + Returns: + - None + + Examples: + ``` + python main.py --run_type calibration --train --evaluate --artifact_name my_model + + python main.py --run_type forecasting --artifact_name my_model + + python main.py --run_type testing --train --evaluate --artifact_name my_model + ``` + + """ + + # get hyperparameters. IS THE ISSUE UP HERE? + hyperparameters = get_hp_config() + hyperparameters['run_type'] = args.run_type + hyperparameters['sweep'] = False + + # get run type and denoting project name - check convention! + project = f"purple_alien_{args.run_type}" + + if args.run_type == 'calibration' or args.run_type == 'testing': + + #model_run_manager(config = hyperparameters, project = project, train = args.train, eval = args.evaluate, forecast = False, artifact_name = args.artifact_name) + execute_model_tasks(config = hyperparameters, project = project, train = args.train, eval = args.evaluate, forecast = False, artifact_name = args.artifact_name) + + elif args.run_type == 'forecasting': + + #print('True forecasting ->->->->') + #model_run_manager(config = hyperparameters, project = project, train = False, eval = False, forecast=True, artifact_name = args.artifact_name) + execute_model_tasks(config = hyperparameters, project = project, train = False, eval = False, forecast=True, artifact_name = args.artifact_name) + + else: + raise ValueError(f"Invalid run type: {args.run_type}") \ No newline at end of file diff --git a/models/black_lodge/management/execute_model_tasks.py b/models/black_lodge/management/execute_model_tasks.py new file mode 100644 index 00000000..128d588f --- /dev/null +++ b/models/black_lodge/management/execute_model_tasks.py @@ -0,0 +1,75 @@ +import wandb + +import sys +from pathlib import Path + +PATH = Path(__file__) +sys.path.insert(0, str(Path(*[i for i in PATH.parts[:PATH.parts.index("views_pipeline")+1]]) / "common_utils")) # PATH_COMMON_UTILS +from set_path import setup_project_paths, setup_artifacts_paths +setup_project_paths(PATH) + +from utils import get_data +from utils_wandb import add_wandb_monthly_metrics +from train_model import make, training_loop, train_model_artifact #handle_training +from evaluate_model import evaluate_posterior, evaluate_model_artifact #handle_evaluation +from generate_forecast import forecast_with_model_artifact #handle_forecasting + + +def execute_model_tasks(config = None, project = None, train = None, eval = None, forecast = None, artifact_name = None): + + """ + Executes various model-related tasks including training, evaluation, and forecasting. + + This function manages the execution of different tasks such as training the model, + evaluating an existing model, or performing forecasting. + It also initializes the WandB project. + + Args: + config: Configuration object containing parameters and settings. + project: The WandB project name. + train: Flag to indicate if the model should be trained. + eval: Flag to indicate if the model should be evaluated. + forecast: Flag to indicate if forecasting should be performed. + artifact_name (optional): Specific name of the model artifact to load for evaluation or forecasting. + """ + + # Define the path for the artifacts + PATH_ARTIFACTS = setup_artifacts_paths(PATH) + + #device = setup_device() + + # Initialize WandB + with wandb.init(project=project, entity="views_pipeline", config=config): # project and config ignored when running a sweep + + # add the monthly metrics to WandB + add_wandb_monthly_metrics() + + # Update config from WandB initialization above + config = wandb.config + + # Retrieve data (partition) based on the configuration + views_vol = get_data(config) # a bit HydraNet specific, but it is fine for now. If statment or move to handle_training, handle_evaluation, and handle_forecasting? + + # Handle the sweep runs + if config.sweep: # If we are running a sweep, always train and evaluate + + model, criterion, optimizer, scheduler = make(config, device) + training_loop(config, model, criterion, optimizer, scheduler, views_vol, device) + print('Done training') + + evaluate_posterior(model, views_vol, config, device) + print('Done testing') + + # Handle the single model runs: train and save the model as an artifact + if train: + #handle_training(config, device, views_vol, PATH_ARTIFACTS) + train_model_artifact(config, device, views_vol, PATH_ARTIFACTS) + + # Handle the single model runs: evaluate a trained model (artifact) + if eval: + #handle_evaluation(config, device, views_vol, PATH_ARTIFACTS, artifact_name) + evaluate_model_artifact(config, device, views_vol, PATH_ARTIFACTS, artifact_name) + + if forecast: + #handle_forecasting(config, device, views_vol, PATH_ARTIFACTS, artifact_name) + forecast_with_model_artifact(config, device, views_vol, PATH_ARTIFACTS, artifact_name) \ No newline at end of file From 22220f5deedc8851ad716f94b37636b2c63b1f3c Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Thu, 20 Jun 2024 14:37:03 +0200 Subject: [PATCH 18/24] Correct script location --- models/black_lodge/{ => src}/management/execute_model_runs.py | 0 models/black_lodge/{ => src}/management/execute_model_tasks.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename models/black_lodge/{ => src}/management/execute_model_runs.py (100%) rename models/black_lodge/{ => src}/management/execute_model_tasks.py (100%) diff --git a/models/black_lodge/management/execute_model_runs.py b/models/black_lodge/src/management/execute_model_runs.py similarity index 100% rename from models/black_lodge/management/execute_model_runs.py rename to models/black_lodge/src/management/execute_model_runs.py diff --git a/models/black_lodge/management/execute_model_tasks.py b/models/black_lodge/src/management/execute_model_tasks.py similarity index 100% rename from models/black_lodge/management/execute_model_tasks.py rename to models/black_lodge/src/management/execute_model_tasks.py From b922a64dccb4d46edc42bd37bb79a901f359cba2 Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Thu, 20 Jun 2024 14:37:12 +0200 Subject: [PATCH 19/24] Update to-dos --- models/black_lodge/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/models/black_lodge/README.md b/models/black_lodge/README.md index 298141a6..c639675e 100644 --- a/models/black_lodge/README.md +++ b/models/black_lodge/README.md @@ -8,11 +8,11 @@ This is very simple model with only five data columns (each column representing ## To-Dos - [x] Take over model configs from [viewsforecasting](https://github.com/prio-data/viewsforecasting/blob/4dbc2cd2b6edb3169fc585f7dbb868b65fab0e2c/SystemUpdates/ModelDefinitions.py#L36) -- [x] Tidy config files (currently everything is in config_model) +- [x] Tidy config files - [x] Dataloader: Rewrite queryset for vimur - [x] Training script - [ ] Forecasting script - [ ] Evaluation script -- [ ] Orchestration script -- [ ] Log on wandb -- [ ] Log on Prefect with tasks \ No newline at end of file +- [ ] Test management script +- [ ] Test main.py +- [ ] Log on wandb \ No newline at end of file From 61c09c217e99a119149dcf86b2c2754418400ec6 Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Thu, 20 Jun 2024 14:37:42 +0200 Subject: [PATCH 20/24] Update main.py --- models/black_lodge/main.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/models/black_lodge/main.py b/models/black_lodge/main.py index 40a43109..9723bc43 100644 --- a/models/black_lodge/main.py +++ b/models/black_lodge/main.py @@ -10,11 +10,10 @@ from set_path import setup_project_paths, setup_artifacts_paths setup_project_paths(PATH) -from cli_parser_utils import parse_args, validate_arguments #change once PR30 is merged - +from utils_cli_parser import parse_args, validate_arguments from execute_model_runs import execute_sweep_run, execute_single_run -#from mode_run_manager import model_run_manager +print("Current working directory: ", Path.cwd()) if __name__ == "__main__": From c7a6ab8b28cba256db65ed803f05d3bd344b63ef Mon Sep 17 00:00:00 2001 From: Sara Kallis Date: Mon, 24 Jun 2024 13:10:42 +0200 Subject: [PATCH 21/24] Add .gitkeep for folders --- models/black_lodge/artifacts/.gitkeep | 0 models/black_lodge/data/generated/.gitkeep | 0 models/black_lodge/data/processed/.gitkeep | 0 models/black_lodge/data/raw/.gitkeep | 0 models/black_lodge/notebooks/.gitkeep | 0 models/black_lodge/reports/.gitkeep | 0 6 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 models/black_lodge/artifacts/.gitkeep create mode 100644 models/black_lodge/data/generated/.gitkeep create mode 100644 models/black_lodge/data/processed/.gitkeep create mode 100644 models/black_lodge/data/raw/.gitkeep create mode 100644 models/black_lodge/notebooks/.gitkeep create mode 100644 models/black_lodge/reports/.gitkeep diff --git a/models/black_lodge/artifacts/.gitkeep b/models/black_lodge/artifacts/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/models/black_lodge/data/generated/.gitkeep b/models/black_lodge/data/generated/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/models/black_lodge/data/processed/.gitkeep b/models/black_lodge/data/processed/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/models/black_lodge/data/raw/.gitkeep b/models/black_lodge/data/raw/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/models/black_lodge/notebooks/.gitkeep b/models/black_lodge/notebooks/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/models/black_lodge/reports/.gitkeep b/models/black_lodge/reports/.gitkeep new file mode 100644 index 00000000..e69de29b From 834bf1c89a24ca12a9958e14076410ab6b5739b6 Mon Sep 17 00:00:00 2001 From: xiaolongsun <95378566+xiaolong0728@users.noreply.github.com> Date: Wed, 31 Jul 2024 14:35:10 +0200 Subject: [PATCH 22/24] latest template --- common_utils/hurdle_model.py | 12 +- common_utils/utils_cli_parser.py | 37 ++-- common_utils/utils_evaluation_metrics.py | 75 +++++++- common_utils/utils_model_outputs.py | 45 ++++- .../views_stepshifter_darts/__init__.py | 2 + .../stepshifter_darts.py | 173 ++++++++++++++++++ .../views_stepshifter_darts/validation.py | 24 +++ .../configs/config_hyperparameters.py | 7 +- models/black_lodge/configs/config_meta.py | 6 +- models/black_lodge/configs/config_sweep.py | 13 +- models/black_lodge/main.py | 20 +- .../src/forecasting/generate_forecast.py | 47 +++++ .../src/management/execute_model_runs.py | 82 +++------ .../src/management/execute_model_tasks.py | 67 ++++--- .../black_lodge/src/training/train_model.py | 82 +++------ models/black_lodge/src/utils/utils.py | 141 ++++++++++++++ models/black_lodge/src/utils/utils_wandb.py | 92 ++++++++++ 17 files changed, 718 insertions(+), 207 deletions(-) create mode 100644 common_utils/views_stepshifter_darts/__init__.py create mode 100644 common_utils/views_stepshifter_darts/stepshifter_darts.py create mode 100644 common_utils/views_stepshifter_darts/validation.py create mode 100644 models/black_lodge/src/forecasting/generate_forecast.py create mode 100644 models/black_lodge/src/utils/utils.py create mode 100644 models/black_lodge/src/utils/utils_wandb.py diff --git a/common_utils/hurdle_model.py b/common_utils/hurdle_model.py index 3e14f19f..03785e68 100644 --- a/common_utils/hurdle_model.py +++ b/common_utils/hurdle_model.py @@ -7,12 +7,9 @@ from sklearn.utils.estimator_checks import check_estimator from sklearn.utils.validation import check_X_y, check_array, check_is_fitted from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor -from sklearn.ensemble import RandomForestRegressor -from sklearn.ensemble import RandomForestClassifier -from sklearn.ensemble import HistGradientBoostingRegressor -from sklearn.ensemble import HistGradientBoostingClassifier -from xgboost import XGBRegressor -from xgboost import XGBClassifier +from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier +from sklearn.ensemble import HistGradientBoostingRegressor, HistGradientBoostingClassifier +from xgboost import XGBRegressor, XGBClassifier from xgboost import XGBRFRegressor, XGBRFClassifier from lightgbm import LGBMClassifier, LGBMRegressor @@ -105,5 +102,4 @@ def predict(self, X: Union[np.ndarray, pd.DataFrame]): return self.clf_.predict_proba(X)[:, 1] * self.reg_.predict(X) - - + \ No newline at end of file diff --git a/common_utils/utils_cli_parser.py b/common_utils/utils_cli_parser.py index cbc8f261..8775b783 100644 --- a/common_utils/utils_cli_parser.py +++ b/common_utils/utils_cli_parser.py @@ -1,3 +1,4 @@ + import sys import argparse @@ -32,6 +33,13 @@ def parse_args(): 'Note: If --sweep is specified, --evaluate will also automatically be flagged. ' 'Cannot be used with --run_type forecasting.') + parser.add_argument('-f', '--forecast', + action='store_true', + help='Flag to indicate if the model should produce predictions. ' + 'Note: If --sweep is specified, --forecast will also automatically be flagged. ' + 'Can only be used with --run_type forecasting.') + + parser.add_argument('-a', '--artifact_name', type=str, help='Specify the name of the model artifact to be used for evaluation. ' @@ -43,27 +51,32 @@ def parse_args(): return parser.parse_args() def validate_arguments(args): - if args.sweep: - if args.run_type != 'calibration': - print("Error: Sweep runs must have --run_type set to 'calibration'. Exiting.") - print("To fix: Use --run_type calibration when --sweep is flagged.") - sys.exit(1) - - if args.run_type in ['testing', 'forecasting'] and args.sweep: - print("Error: Sweep cannot be performed with testing or forecasting run types. Exiting.") - print("To fix: Remove --sweep flag or set --run_type to 'calibration'.") + if args.sweep and args.run_type != 'calibration': + print("Error: Sweep runs must have --run_type set to 'calibration'. Exiting.") + print("To fix: Use --run_type calibration when --sweep is flagged.") sys.exit(1) - if args.run_type == 'forecasting' and args.evaluate: + if args.evaluate and args.run_type == 'forecasting': print("Error: Forecasting runs cannot evaluate. Exiting.") print("To fix: Remove --evaluate flag when --run_type is 'forecasting'.") sys.exit(1) - if args.run_type in ['calibration', 'testing'] and not args.train and not args.evaluate and not args.sweep: + if (args.run_type in ['calibration', 'testing', 'forecasting'] + and not args.train and not args.evaluate and not args.forecast and not args.sweep): print(f"Error: Run type is {args.run_type} but neither --train, --evaluate, nor --sweep flag is set. Nothing to do... Exiting.") print("To fix: Add --train and/or --evaluate flag. Or use --sweep to run both training and evaluation in a WadnB sweep loop.") sys.exit(1) + if args.train and args.artifact_name: + print("Error: Both --train and --artifact_name flags are set. Exiting.") + print("To fix: Remove --artifact_name if --train is set, or vice versa.") + sys.exit(1) + + if args.forecast and args.run_type != 'forecasting': + print("Error: --forecast flag can only be used with --run_type forecasting. Exiting.") + print("To fix: Set --run_type to forecasting if --forecast is flagged.") + sys.exit(1) + # notes on stepshifted models: # There will be some thinking here in regards to how we store, denote (naming convention), and retrieve the model artifacts from stepshifted models. @@ -72,5 +85,3 @@ def validate_arguments(args): # And the rest of the code maded in a way to handle this naming convention without any issues. Could be a simple fix. # Alternatively, we could store the model artifacts in a subfolder for each stepshifted model. This would make it easier to handle the artifacts, but it would also make it harder to retrieve the latest artifact for a given run type. # Lastly, the solution Xiaolong is working on might allow us the store multiple models (steps) in one artifact, which would make this whole discussion obsolete and be the best solution. - - diff --git a/common_utils/utils_evaluation_metrics.py b/common_utils/utils_evaluation_metrics.py index 579b7d0d..46c9be84 100644 --- a/common_utils/utils_evaluation_metrics.py +++ b/common_utils/utils_evaluation_metrics.py @@ -1,6 +1,16 @@ + from dataclasses import dataclass from typing import Optional import pandas as pd +from statistics import mean, stdev, median + +import properscoring as ps +from sklearn.metrics import mean_squared_error, mean_absolute_error, mean_squared_log_error, brier_score_loss, average_precision_score, roc_auc_score +from views_forecasts.extensions import * + + + +# MUST BE ALIGNED WITH THE METRICS WE DECIDE TO USE IN THE WORKSHOP!!!! @dataclass class EvaluationMetrics: @@ -75,4 +85,67 @@ def evaluation_dict_to_dataframe(evaluation_dict: dict) -> pd.DataFrame: """ return pd.DataFrame.from_dict(evaluation_dict, orient='index') -# TBD: Align with metrics discussed in workshop + @staticmethod + def calculate_aggregate_metrics(evaluation_dict: dict) -> dict: + metrics_aggregate = { + 'mean': {}, + 'std': {}, + 'median': {} + } + + for metric in EvaluationMetrics.__annotations__.keys(): + metric_values = [getattr(evaluation, metric) for evaluation in evaluation_dict.values() if getattr(evaluation, metric) is not None] + if metric_values: + metrics_aggregate['mean'][metric] = mean(metric_values) + metrics_aggregate['std'][metric] = stdev(metric_values) + metrics_aggregate['median'][metric] = median(metric_values) + else: + metrics_aggregate['mean'][metric] = None + metrics_aggregate['std'][metric] = None + metrics_aggregate['median'][metric] = None + + return metrics_aggregate + + @staticmethod + def output_metrics(evaluation_dict): + aggregate = EvaluationMetrics.calculate_aggregate_metrics(evaluation_dict) + step_metrics_dict = {step: vars(metrics) for step, metrics in evaluation_dict.items()} + step_metrics_dict['mean'] = aggregate['mean'] + step_metrics_dict['std'] = aggregate['std'] + step_metrics_dict['median'] = aggregate['median'] + return step_metrics_dict + + +def generate_metric_dict(df, config): + """ + Generates a dictionary of evaluation metrics for a given forecasting configuration and dataset. + + Args: + df (pd.DataFrame): A pandas DataFrame containing the forecasted values and ground truth. + config (dict): A dictionary containing the forecasting configuration parameters. + + Returns: + evaluation_dict (dict): A dictionary of EvaluationMetrics instances for each forecasting step. + df_evaluation_dict (pd.DataFrame): A pandas DataFrame containing the evaluation metrics for each forecasting step. + + Note: + ! This function is temporary for the stepshifter model. + ! Change the metrics to those discussed previously. + For logged targets, calculating MSE is actually MSLE. + KLD and Jeffreys divergence are measures used to quantify the difference between two probability distributions. Why do we calculate these metrics in the context of forecasting? + Brier score is used for binary and categorical outcomes that can be structured as true or false + There are no classes in data, so we cannot calculate roc_auc_score, ap_score + """ + + evaluation_dict = EvaluationMetrics.make_evaluation_dict(steps=config.steps[-1]) + for step in config.steps: + evaluation_dict[f"step{str(step).zfill(2)}"].MSE = mean_squared_error(df[config.depvar], df[f"step_pred_{step}"]) + evaluation_dict[f"step{str(step).zfill(2)}"].MAE = mean_absolute_error(df[config.depvar], df[f"step_pred_{step}"]) + # evaluation_dict[f"step{str(step).zfill(2)}"].MSLE = mean_squared_log_error(df[config.depvar], df[f"step_pred_{step}"]) + evaluation_dict[f"step{str(step).zfill(2)}"].CRPS = ps.crps_ensemble(df[config.depvar], df[f"step_pred_{step}"]).mean() + # evaluation_dict[f"step{str(step).zfill(2)}"].Brier = brier_score_loss(df[config.depvar], df[f"step_pred_{step}"]) + # evaluation_dict[f"step{str(step).zfill(2)}"].AUC = roc_auc_score(df[config.depvar], df[f"step_pred_{step}"]) + # evaluation_dict[f"step{str(step).zfill(2)}"].AP = average_precision_score(df[config.depvar], df[f"step_pred_{step}"]) + evaluation_dict = EvaluationMetrics.output_metrics(evaluation_dict) + df_evaluation_dict = EvaluationMetrics.evaluation_dict_to_dataframe(evaluation_dict) + return evaluation_dict, df_evaluation_dict diff --git a/common_utils/utils_model_outputs.py b/common_utils/utils_model_outputs.py index dc79a454..4b142bbe 100644 --- a/common_utils/utils_model_outputs.py +++ b/common_utils/utils_model_outputs.py @@ -2,6 +2,11 @@ from typing import List, Optional import pandas as pd + +# we need to figure out if we are storing logged fatalities or not +# And this is also a good place to decide on the uncertainty quantification. Right now var, but maybe HDI or something else. +# you migth also waht the a non-step specifci list of pgm? So you can rectrate the full df from here? Otherwsie this could turn into a mess + @dataclass class ModelOutputs: """ @@ -17,7 +22,7 @@ class ModelOutputs: pg_id (Optional[List[int]]): The priogrid id. c_id (Optional[List[int]]): The country id. month_id (Optional[List[int]]): The month id. - step (Optional[List[int]]): The step ahead forecast. + out_sample_month (Optional[List[int]]): The step ahead forecast. """ y_score: Optional[List[float]] = field(default_factory=list) @@ -29,7 +34,7 @@ class ModelOutputs: pg_id: Optional[List[int]] = field(default_factory=list) c_id: Optional[List[int]] = field(default_factory=list) month_id: Optional[List[int]] = field(default_factory=list) - step: Optional[List[int]] = field(default_factory=list) + out_sample_month: Optional[List[int]] = field(default_factory=list) @classmethod def make_output_dict(cls, steps=36) -> dict: @@ -103,6 +108,36 @@ def output_dict_to_dataframe(dict_of_outputs) -> pd.DataFrame: return df -# we need to figure out if we are storing logged fatalities or not -# And this is also a good place to decide on the uncertainty quantification. Right now var, but maybe HDI or something else. -# you might also want the a non-step specific list of pgm? So you can recreate the full df from here? Otherwise this could turn into a mess + +def generate_output_dict(df, config): + """ + Generate a dictionary of ModelOutputs instances and a DataFrame from a DataFrame of model predictions. + + This function takes a DataFrame of model predictions and a configuration object, and generates a dictionary of ModelOutputs instances + + Args: + df (pd.DataFrame): A DataFrame containing model predictions. + config (dict): A configuration object containing model settings. + + Returns: + output_dict (dict): A dictionary where each key is a step label and each value is an instance of ModelOutputs. + df_output_dict (pd.DataFrame): A DataFrame of model outputs. + + Note: + ! This is temporary for stepshifter model + """ + output_dict = ModelOutputs.make_output_dict(steps=config.steps[-1]) + for step in config.steps: + df_step = df[[config.depvar, f"step_pred_{step}"]] + output_dict[f"step{str(step).zfill(2)}"].y_true = df_step[config.depvar].to_list() + output_dict[f"step{str(step).zfill(2)}"].y_score = df_step[f"step_pred_{step}"].to_list() + output_dict[f"step{str(step).zfill(2)}"].month_id = df_step.index.get_level_values("month_id").to_list() + if df.index.names[1] == "priogrid_gid": + output_dict[f"step{str(step).zfill(2)}"].pg_id = df_step.index.get_level_values("priogrid_gid").to_list() + elif df.index.names[1] == "country_id": + output_dict[f"step{str(step).zfill(2)}"].c_id = df_step.index.get_level_values("country_id").to_list() + output_dict[f"step{str(step).zfill(2)}"].out_sample_month = step + df_output_dict = ModelOutputs.output_dict_to_dataframe(output_dict) + df_output_dict = df_output_dict.reset_index() + df_output_dict = df_output_dict.drop(columns=df_output_dict.columns[0]) + return output_dict, df_output_dict \ No newline at end of file diff --git a/common_utils/views_stepshifter_darts/__init__.py b/common_utils/views_stepshifter_darts/__init__.py new file mode 100644 index 00000000..e8550abf --- /dev/null +++ b/common_utils/views_stepshifter_darts/__init__.py @@ -0,0 +1,2 @@ +from .stepshifter_darts import StepshifterModel +from darts.models import LightGBMModel, XGBModel, RandomForest \ No newline at end of file diff --git a/common_utils/views_stepshifter_darts/stepshifter_darts.py b/common_utils/views_stepshifter_darts/stepshifter_darts.py new file mode 100644 index 00000000..a1b5f504 --- /dev/null +++ b/common_utils/views_stepshifter_darts/stepshifter_darts.py @@ -0,0 +1,173 @@ +import pickle +import numpy as np +import pandas as pd +from darts import TimeSeries +from darts.models import LightGBMModel, XGBModel +from darts.models.forecasting.forecasting_model import ModelMeta +import warnings +warnings.filterwarnings("ignore") +import time +from typing import List, Dict + +from views_forecasts.extensions import * +from .validation import views_validate +from utils import get_parameters + + +class StepshifterModel: + def __init__(self, config: Dict, partitioner_dict: Dict[str, List[int]]): + self._initialize_model(config) + + self.steps = config['steps'] + self.target = config['depvar'] + + self._params = get_parameters(config) + self._steps_extent = max(self.steps) + self._train_start, self._train_end = partitioner_dict['train'] + self._test_start, self._test_end = partitioner_dict['predict'] + + self._models = {} + self._independent_variables = None + self._time = None + self._level = None + self._series = None + + + @views_validate + def fit(self, df: pd.DataFrame): + self._setup(df) + self._prepare_time_series(df) + self._fit_models() + + + @views_validate + def predict(self, df: pd.DataFrame) -> pd.DataFrame: + pred = self._predict_models() + pred[self.target] = df.loc[(slice(self._test_start, self._test_end),),:][self.target] + return pred + + + def _initialize_model(self, config: Dict): + self.clf = globals()[config['algorithm']] + if not isinstance(self.clf, ModelMeta): + raise ValueError(f"Model {config['algorithm']} is not a valid Darts forecasting model. Change the model in the config file.") + + + def _setup(self, df: pd.DataFrame): + self._time = df.index.names[0] + self._level = df.index.names[1] + self._independent_variables = [c for c in df.columns if c != self.target] + + + def _prepare_time_series(self, df: pd.DataFrame): + df_reset = df.reset_index(level=[1]) + self._series = TimeSeries.from_group_dataframe(df_reset, group_cols=self._level, + value_cols=self._independent_variables + [self.target]) + + + def _fit_models(self): + target = [series.slice(self._train_start, self._train_end + 1)[self.target] + for series in self._series] # ts.slice is different from df.slice + past_cov = [series.slice(self._train_start, self._train_end + 1)[self._independent_variables] + for series in self._series] + for step in self.steps: + model = self.clf(lags_past_covariates=[-step], **self._params) + model.fit(target, past_covariates=past_cov) + self._models[step] = model + + + def _predict_models(self): + target = [series.slice(self._train_start, self._train_end + 1)[self.target] + for series in self._series] + + preds_by_step = [self._predict_for_step(step, target) for step in self.steps] + return pd.concat(preds_by_step, axis=1) + + + def _predict_for_step(self, step, target): + model = self._models[step] + horizon = self._test_end - self._test_start + 1 + ts_pred = model.predict(n=horizon, + series=target, + # darts automatically locates the time period of past_covariates + past_covariates=[series[self._independent_variables] for series in self._series], + show_warnings=False) + return self._process_predictions(ts_pred, step) + + + def _process_predictions(self, ts_pred, step): + preds = [] + for pred in ts_pred: + df_pred = pred.pd_dataframe() + df_pred.index = pd.MultiIndex.from_product([df_pred.index, [pred.static_covariates.iloc[0, 0]]]) + df_pred.index.names = [self._time, self._level] + df_pred.columns = [f"step_pred_{step}"] + df_pred = df_pred.loc[slice(self._test_start, self._test_end, ), :] + preds.append(df_pred) + return pd.concat(preds).sort_index() + + + def save(self, path: str): + try: + with open(path, "wb") as file: + pickle.dump(self, file) + print(f"Model successfully saved to {path}") + except Exception as e: + print(f"Failed to save model: {e}") + + + @property + def models(self): + return self._models.values() + +''' +if __name__ == "__main__": + month = [*range(1, 600)] + pg = [123, 456] + idx = pd.MultiIndex.from_product([month, pg], names=['month_id', 'priogrid_gid']) + df = pd.DataFrame(index=idx) + df['ged_sb_dep'] = df.index.get_level_values(0).astype(float) + df['ln_ged_sb'] = df.index.get_level_values(0) + df.index.get_level_values(1) / 1000 + df['ln_pop_gpw_sum'] = df.index.get_level_values(0) * 10 + df.index.get_level_values(1) / 1000 + steps = [*range(1, 3 + 1, 1)] + partitioner_dict = {"train": (121, 131), "predict": (132, 135)} + target = 'ged_sb_dep' + + # df = pd.read_parquet('raw.parquet') + # steps = [*range(1, 36 + 1, 1)] + # partitioner_dict = {"train": (121, 444), "predict": (445, 492)} + # target = df.forecasts.target + # + start_t = time.time() + + hp_config = { + "name": "orange_pasta", + "algorithm": "LightGBMModel", + "depvar": "ged_sb_dep", + "steps": [*range(1, 36 + 1, 1)], + "parameters": { + "learning_rate": 0.01, + "n_estimators": 100, + "num_leaves": 31, + } + } + + stepshifter = StepshifterModel(hp_config, partitioner_dict) + stepshifter.fit(df) + stepshifter.save('./model.pkl') + + train_t = time.time() + minutes = (train_t - start_t) / 60 + print(f'Done training. Runtime: {minutes:.3f} minutes') + + # stepshift = pd.read_pickle('./model.pkl') + pred = stepshifter.predict() + pred.to_parquet('pred.parquet') + + end_t = time.time() + minutes = (end_t - train_t) / 60 + print(f'Done predicting. Runtime: {minutes:.3f} minutes') +''' + + + diff --git a/common_utils/views_stepshifter_darts/validation.py b/common_utils/views_stepshifter_darts/validation.py new file mode 100644 index 00000000..8426ecfa --- /dev/null +++ b/common_utils/views_stepshifter_darts/validation.py @@ -0,0 +1,24 @@ +import functools +import numpy as np +import pandas as pd + +class ValidationError(Exception): + pass + +def dataframe_is_right_format(dataframe: pd.DataFrame): + try: + assert len(dataframe.index.levels) == 2 + except AssertionError: + raise ValidationError("Dataframe must have a two-level index") + + try: + assert set(dataframe.dtypes) == {np.dtype(float)} + except AssertionError: + raise ValidationError("The dataframe must contain only np.float64 floats") + +def views_validate(fn): + @functools.wraps(fn) + def inner(*args,**kwargs): + dataframe_is_right_format(args[-1]) + return fn(*args,**kwargs) + return inner diff --git a/models/black_lodge/configs/config_hyperparameters.py b/models/black_lodge/configs/config_hyperparameters.py index 660867b7..fa330ccc 100644 --- a/models/black_lodge/configs/config_hyperparameters.py +++ b/models/black_lodge/configs/config_hyperparameters.py @@ -1,10 +1,7 @@ def get_hp_config(): hp_config = { - "clf":{ - "n_estimators": 300, - "n_jobs": 12 - }, - "reg":{ + "steps": [*range(1, 36 + 1, 1)], + "parameters": { "n_estimators": 300, "n_jobs": 12 } diff --git a/models/black_lodge/configs/config_meta.py b/models/black_lodge/configs/config_meta.py index 36436621..f5953592 100644 --- a/models/black_lodge/configs/config_meta.py +++ b/models/black_lodge/configs/config_meta.py @@ -5,16 +5,12 @@ def get_meta_config(): Returns: - model_config (dict): A dictionary containing model configuration settings. """ - model_config = { + meta_config = { "name": "black_lodge", "algorithm": "XGBRFRegressor", "depvar": "ln_ged_sb_dep", "queryset": "fatalities002_baseline", "level": "cm", - "sweep": False, - "force_retrain": False, - "steps": [*range(1, 36 + 1, 1)], - "deployment_status": "production", "creator": "Sara", "preprocessing": "float_it", #new "data_train": "baseline002", #new diff --git a/models/black_lodge/configs/config_sweep.py b/models/black_lodge/configs/config_sweep.py index ee22a298..e57e174a 100644 --- a/models/black_lodge/configs/config_sweep.py +++ b/models/black_lodge/configs/config_sweep.py @@ -9,8 +9,8 @@ def get_sweep_config(): """ sweep_config = { - 'name': 'black_lodge_sweep', - 'method': 'country' #or grid + 'name': 'black_lodge', + 'method': 'grid' } metric = { @@ -21,12 +21,9 @@ def get_sweep_config(): sweep_config['metric'] = metric parameters_dict = { - "cls_n_estimators": {"values": [100, 200]}, - "cls_learning_rate": {"values": [0.05]}, - "cls_n_jobs": {"values": [12]}, - "reg_n_estimators": {"values": [100, 200]}, - "reg_learning_rate": {"values": [0.05]}, - "reg_n_jobs": {"values": [12]} + "n_estimators": {"values": [100, 200]}, + "learning_rate": {"values": [0.05]}, + "n_jobs": {"values": [12]}, } #taken from xiaolong's code sweep_config['parameters'] = parameters_dict diff --git a/models/black_lodge/main.py b/models/black_lodge/main.py index 9723bc43..4196939d 100644 --- a/models/black_lodge/main.py +++ b/models/black_lodge/main.py @@ -1,40 +1,32 @@ import time - import wandb - import sys from pathlib import Path PATH = Path(__file__) -sys.path.insert(0, str(Path(*[i for i in PATH.parts[:PATH.parts.index("views_pipeline")+1]]) / "common_utils")) # PATH_COMMON_UTILS -from set_path import setup_project_paths, setup_artifacts_paths +sys.path.insert(0, str(Path( + *[i for i in PATH.parts[:PATH.parts.index("views_pipeline") + 1]]) / "common_utils")) # PATH_COMMON_UTILS +from set_path import setup_project_paths setup_project_paths(PATH) -from utils_cli_parser import parse_args, validate_arguments +from utils_cli_parser import parse_args, validate_arguments from execute_model_runs import execute_sweep_run, execute_single_run -print("Current working directory: ", Path.cwd()) if __name__ == "__main__": - args = parse_args() - #print(args) #use for debugging - validate_arguments(args) + # wandb login wandb.login() start_t = time.time() - if args.sweep == True: - # need to check if you are running a sweep or not, because the sweep will overwrite the train and evaluate flags execute_sweep_run(args) - elif args.sweep == False: execute_single_run(args) - end_t = time.time() - minutes = (end_t - start_t)/60 + minutes = (end_t - start_t) / 60 print(f'Done. Runtime: {minutes:.3f} minutes') \ No newline at end of file diff --git a/models/black_lodge/src/forecasting/generate_forecast.py b/models/black_lodge/src/forecasting/generate_forecast.py new file mode 100644 index 00000000..893deba0 --- /dev/null +++ b/models/black_lodge/src/forecasting/generate_forecast.py @@ -0,0 +1,47 @@ + +import sys +import pandas as pd +from pathlib import Path +import pickle + +PATH = Path(__file__) +sys.path.insert(0, str(Path( + *[i for i in PATH.parts[:PATH.parts.index("views_pipeline") + 1]]) / "common_utils")) # PATH_COMMON_UTILS +from set_path import setup_project_paths, setup_data_paths, setup_artifacts_paths +setup_project_paths(PATH) + +from utils import get_partition_data, get_standardized_df +from utils_artifacts import get_latest_model_artifact + + +def forecast_model_artifact(config, artifact_name): + PATH_RAW, _, PATH_GENERATED = setup_data_paths(PATH) + PATH_ARTIFACTS = setup_artifacts_paths(PATH) + + # if an artifact name is provided through the CLI, use it. + # Otherwise, get the latest model artifact based on the run type + if artifact_name: + print(f"Using (non-default) artifact: {artifact_name}") + + if not artifact_name.endswith('.pkl'): + artifact_name += '.pkl' + PATH_ARTIFACT = PATH_ARTIFACTS / artifact_name + else: + # use the latest model artifact based on the run type + print(f"Using latest (default) run type ({config.run_type}) specific artifact") + PATH_ARTIFACT = get_latest_model_artifact(PATH_ARTIFACTS, config.run_type) + + config["timestamp"] = PATH_ARTIFACT.stem[-15:] + dataset = pd.read_parquet(PATH_RAW / f'raw_{config.run_type}.parquet') + + try: + stepshift_model = pd.read_pickle(PATH_ARTIFACT) + except: + raise FileNotFoundError(f"Model artifact not found at {PATH_ARTIFACT}") + + df_predictions = stepshift_model.predict("forecasting", "predict", get_partition_data(dataset, config.run_type)) + df_predictions = get_standardized_df(df_predictions, config.run_type) + + predictions_path = f'{PATH_GENERATED}/predictions_{config.steps[-1]}_{config.run_type}_{config.timestamp}.pkl' + with open(predictions_path, 'wb') as file: + pickle.dump(df_predictions, file) diff --git a/models/black_lodge/src/management/execute_model_runs.py b/models/black_lodge/src/management/execute_model_runs.py index 2f1e12f5..a766aca9 100644 --- a/models/black_lodge/src/management/execute_model_runs.py +++ b/models/black_lodge/src/management/execute_model_runs.py @@ -1,85 +1,49 @@ -import wandb - import sys from pathlib import Path +import wandb PATH = Path(__file__) -sys.path.insert(0, str(Path(*[i for i in PATH.parts[:PATH.parts.index("views_pipeline")+1]]) / "common_utils")) # PATH_COMMON_UTILS -from set_path import setup_project_paths, setup_artifacts_paths +sys.path.insert(0, str(Path( + *[i for i in PATH.parts[:PATH.parts.index("views_pipeline") + 1]]) / "common_utils")) # PATH_COMMON_UTILS +from set_path import setup_project_paths setup_project_paths(PATH) -from config_sweep import get_sweep_config from config_hyperparameters import get_hp_config +from config_meta import get_meta_config +from config_sweep import get_sweep_config from execute_model_tasks import execute_model_tasks +from get_data import get_data +from utils import update_hp_config, update_sweep_config def execute_sweep_run(args): - """ - Function to execute a hyperparameter sweep of the model. This function is called when the user specifies a sweep in the command line. - - Args: - - args (argparse.Namespace): Arguments parsed from the command line. - - Returns: - - None - - Example: - ``` - python main.py --sweep - - ``` - """ - print('Running sweep...') + get_data(args) sweep_config = get_sweep_config() - project = sweep_config['name'] - sweep_config['parameters']['run_type'] = {'value' : "calibration"} - sweep_config['parameters']['sweep'] = {'value' : True} + meta_config = get_meta_config() + update_sweep_config(sweep_config, args, meta_config) - sweep_id = wandb.sweep(sweep_config, project=project, entity='views_pipeline') # entity is the team name - - wandb.agent(sweep_id, execute_model_tasks, entity='views_pipeline') # entity is the team name - Seem like it needs to be botb in sweep_id and agent + project = f"{sweep_config['name']}_sweep" # we can name the sweep in the config file + sweep_id = wandb.sweep(sweep_config, project=project, entity='views_pipeline') + wandb.agent(sweep_id, execute_model_tasks, entity='views_pipeline') def execute_single_run(args): - """ - Function to execute a single run of the model. This function is called when the user specifies a single run in the command line. - - Args: - - args (argparse.Namespace): Arguments parsed from the command line. - - Returns: - - None - - Examples: - ``` - python main.py --run_type calibration --train --evaluate --artifact_name my_model - - python main.py --run_type forecasting --artifact_name my_model + get_data(args) - python main.py --run_type testing --train --evaluate --artifact_name my_model - ``` + hp_config = get_hp_config() + meta_config = get_meta_config() + update_hp_config(hp_config, args, meta_config) - """ - - # get hyperparameters. IS THE ISSUE UP HERE? - hyperparameters = get_hp_config() - hyperparameters['run_type'] = args.run_type - hyperparameters['sweep'] = False - - # get run type and denoting project name - check convention! - project = f"purple_alien_{args.run_type}" + project = f"{hp_config['name']}_{args.run_type}" if args.run_type == 'calibration' or args.run_type == 'testing': - - #model_run_manager(config = hyperparameters, project = project, train = args.train, eval = args.evaluate, forecast = False, artifact_name = args.artifact_name) - execute_model_tasks(config = hyperparameters, project = project, train = args.train, eval = args.evaluate, forecast = False, artifact_name = args.artifact_name) + execute_model_tasks(config=hp_config, project=project, train=args.train, eval=args.evaluate, + forecast=False, artifact_name=args.artifact_name) elif args.run_type == 'forecasting': - - #print('True forecasting ->->->->') - #model_run_manager(config = hyperparameters, project = project, train = False, eval = False, forecast=True, artifact_name = args.artifact_name) - execute_model_tasks(config = hyperparameters, project = project, train = False, eval = False, forecast=True, artifact_name = args.artifact_name) + execute_model_tasks(config=hp_config, project=project, train=args.train, eval=False, forecast=args.forecast, + artifact_name=args.artifact_name) else: raise ValueError(f"Invalid run type: {args.run_type}") \ No newline at end of file diff --git a/models/black_lodge/src/management/execute_model_tasks.py b/models/black_lodge/src/management/execute_model_tasks.py index 128d588f..82985eef 100644 --- a/models/black_lodge/src/management/execute_model_tasks.py +++ b/models/black_lodge/src/management/execute_model_tasks.py @@ -1,28 +1,28 @@ -import wandb - import sys from pathlib import Path +import wandb PATH = Path(__file__) -sys.path.insert(0, str(Path(*[i for i in PATH.parts[:PATH.parts.index("views_pipeline")+1]]) / "common_utils")) # PATH_COMMON_UTILS -from set_path import setup_project_paths, setup_artifacts_paths +sys.path.insert(0, str(Path( + *[i for i in PATH.parts[:PATH.parts.index("views_pipeline") + 1]]) / "common_utils")) # PATH_COMMON_UTILS +from set_path import setup_project_paths setup_project_paths(PATH) -from utils import get_data +from evaluate_model import evaluate_model_artifact +from evaluate_sweep import evaluate_sweep +from generate_forecast import forecast_model_artifact +from train_model import train_model_artifact +from utils import get_model, split_hurdle_parameters from utils_wandb import add_wandb_monthly_metrics -from train_model import make, training_loop, train_model_artifact #handle_training -from evaluate_model import evaluate_posterior, evaluate_model_artifact #handle_evaluation -from generate_forecast import forecast_with_model_artifact #handle_forecasting - -def execute_model_tasks(config = None, project = None, train = None, eval = None, forecast = None, artifact_name = None): +def execute_model_tasks(config=None, project=None, train=None, eval=None, forecast=None, artifact_name=None): """ Executes various model-related tasks including training, evaluation, and forecasting. This function manages the execution of different tasks such as training the model, evaluating an existing model, or performing forecasting. - It also initializes the WandB project. + It also initializes the WandB project. Args: config: Configuration object containing parameters and settings. @@ -33,43 +33,42 @@ def execute_model_tasks(config = None, project = None, train = None, eval = None artifact_name (optional): Specific name of the model artifact to load for evaluation or forecasting. """ - # Define the path for the artifacts - PATH_ARTIFACTS = setup_artifacts_paths(PATH) - - #device = setup_device() - # Initialize WandB - with wandb.init(project=project, entity="views_pipeline", config=config): # project and config ignored when running a sweep - + with wandb.init(project=project, entity="views_pipeline", + config=config): # project and config ignored when running a sweep + # add the monthly metrics to WandB - add_wandb_monthly_metrics() + add_wandb_monthly_metrics() # Update config from WandB initialization above config = wandb.config - # Retrieve data (partition) based on the configuration - views_vol = get_data(config) # a bit HydraNet specific, but it is fine for now. If statment or move to handle_training, handle_evaluation, and handle_forecasting? + # W&B does not directly support nested dictionaries for hyperparameters + # This will make the sweep config super ugly, but we don't have to distinguish between sweep and single runs + if config['sweep'] and config['algorithm'] == "HurdleRegression": + config['parameters'] = {} + config['parameters']['clf'], config['parameters']['reg'] = split_hurdle_parameters(config) - # Handle the sweep runs - if config.sweep: # If we are running a sweep, always train and evaluate + model = get_model(config) + print(model) - model, criterion, optimizer, scheduler = make(config, device) - training_loop(config, model, criterion, optimizer, scheduler, views_vol, device) - print('Done training') + if config['sweep']: + print("Sweeping...") + stepshift_model = train_model_artifact(config, model) + print("Evaluating...") + evaluate_sweep(config, stepshift_model) - evaluate_posterior(model, views_vol, config, device) - print('Done testing') # Handle the single model runs: train and save the model as an artifact if train: - #handle_training(config, device, views_vol, PATH_ARTIFACTS) - train_model_artifact(config, device, views_vol, PATH_ARTIFACTS) + print("Training...") + train_model_artifact(config, model) # Handle the single model runs: evaluate a trained model (artifact) if eval: - #handle_evaluation(config, device, views_vol, PATH_ARTIFACTS, artifact_name) - evaluate_model_artifact(config, device, views_vol, PATH_ARTIFACTS, artifact_name) + print("Evaluating...") + evaluate_model_artifact(config, artifact_name) if forecast: - #handle_forecasting(config, device, views_vol, PATH_ARTIFACTS, artifact_name) - forecast_with_model_artifact(config, device, views_vol, PATH_ARTIFACTS, artifact_name) \ No newline at end of file + print("Forecasting...") + forecast_model_artifact(config, artifact_name) \ No newline at end of file diff --git a/models/black_lodge/src/training/train_model.py b/models/black_lodge/src/training/train_model.py index 4af8746d..02cc3408 100644 --- a/models/black_lodge/src/training/train_model.py +++ b/models/black_lodge/src/training/train_model.py @@ -1,69 +1,41 @@ -import sys -from pathlib import Path +from datetime import datetime import pandas as pd +from pathlib import Path PATH = Path(__file__) -sys.path.insert(0, str(Path(*[i for i in PATH.parts[:PATH.parts.index("views_pipeline")+1]]) / "common_utils")) from set_path import setup_project_paths, setup_data_paths, setup_artifacts_paths setup_project_paths(PATH) -from xgboost import XGBRFRegressor - -from views_partitioning.data_partitioner import DataPartitioner -from views_forecasts.extensions import * +from set_partition import get_partitioner_dict from stepshift.views import StepshiftedModels +from utils import get_partition_data +from views_forecasts.extensions import * +from views_partitioning.data_partitioner import DataPartitioner from views_stepshift.run import ViewsRun -from hurdle_model import HurdleRegression -from get_data import get_partition_data - -from config_meta import get_meta_config -from config_hyperparameters import get_hp_config - -def train(model_config, para_config): - print("Training...") - if not model_config["sweep"]: - PATH_RAW, _, _ = setup_data_paths(PATH) - PATH_ARTIFACTS = setup_artifacts_paths(PATH) - dataset = pd.read_parquet(PATH_RAW / 'raw.parquet') - if model_config["algorithm"] == "HurdleRegression": - model = HurdleRegression(clf_name=model_config["clf_name"], reg_name=model_config["reg_name"], clf_params=para_config["clf"], reg_params=para_config["reg"]) - else: - model = globals()[model_config["algorithm"]](**para_config) - # print(model) - - # Train partition - try: - stepshifter_model_calib = pd.read_pickle(PATH_ARTIFACTS / "model_calib_partition.pkl") - except: - stepshifter_model_calib = stepshift_training(model_config, "calib", model, get_partition_data(dataset, "calibration")) - stepshifter_model_calib.save(PATH_ARTIFACTS /"model_calib_partition.pkl") - # Test partition - try: - stepshifter_model_test = pd.read_pickle(PATH_ARTIFACTS / "model_test_partition.pkl") - except: - stepshifter_model_test = stepshift_training(model_config, "test", model, get_partition_data(dataset, "testing")) - stepshifter_model_test.save(PATH_ARTIFACTS / "model_test_partition.pkl") +def train_model_artifact(config, model): + # print(config) + PATH_RAW, _, _ = setup_data_paths(PATH) + PATH_ARTIFACTS = setup_artifacts_paths(PATH) - # Future partition - try: - stepshifter_model_future = pd.read_pickle(PATH_ARTIFACTS / "model_forecast_partition.pkl") - except: - stepshifter_model_future = stepshift_training(model_config, "forecast", model, get_partition_data(dataset, "forecasting")) - stepshifter_model_future.save(PATH_ARTIFACTS / "model_forecast_partition.pkl") + run_type = config['run_type'] + dataset = pd.read_parquet(PATH_RAW / f'raw_{run_type}.parquet') + stepshift_model = stepshift_training(config, run_type, model, get_partition_data(dataset, run_type)) + if not config["sweep"]: + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + model_filename = f"{config.run_type}_model_{timestamp}.pkl" + stepshift_model.save(PATH_ARTIFACTS / model_filename) + return stepshift_model -def stepshift_training(model_config, partition_name, model, dataset): - steps = model_config["steps"] - target = model_config["depvar"] - partition = DataPartitioner({partition_name: model_config[f"{partition_name}_partitioner_dict"]}) - stepshifter_def = StepshiftedModels(model, steps, target) - stepshifter_model = ViewsRun(partition, stepshifter_def) - stepshifter_model.fit(partition_name, "train", dataset) - return stepshifter_model -if __name__ == "__main__": - model_config = get_model_config() - para_config = get_hp_config() - train(model_config, para_config) \ No newline at end of file +def stepshift_training(config, partition_name, model, dataset): + steps = config["steps"] + target = config["depvar"] + partitioner_dict = get_partitioner_dict(partition_name) + partition = DataPartitioner({partition_name: partitioner_dict}) + stepshift_def = StepshiftedModels(model, steps, target) + stepshift_model = ViewsRun(partition, stepshift_def) + stepshift_model.fit(partition_name, "train", dataset) + return stepshift_model \ No newline at end of file diff --git a/models/black_lodge/src/utils/utils.py b/models/black_lodge/src/utils/utils.py new file mode 100644 index 00000000..72b46a4d --- /dev/null +++ b/models/black_lodge/src/utils/utils.py @@ -0,0 +1,141 @@ +import sys +import numpy as np +from lightgbm import LGBMRegressor +from xgboost import XGBRegressor +from pathlib import Path +import pickle + +PATH = Path(__file__) +sys.path.insert(0, str(Path( + *[i for i in PATH.parts[:PATH.parts.index("views_pipeline") + 1]]) / "common_utils")) # PATH_COMMON_UTILS +from set_path import setup_project_paths +setup_project_paths(PATH) + +from hurdle_model import HurdleRegression +from set_partition import get_partitioner_dict +from views_forecasts.extensions import * + + +def ensure_float64(df): + """ + Check if the DataFrame only contains np.float64 types. If not, raise a warning + and convert the DataFrame to use np.float64 for all its numeric columns. + """ + + non_float64_cols = df.select_dtypes(include=['number']).columns[df.select_dtypes(include=['number']).dtypes != np.float64] + + if len(non_float64_cols) > 0: + print(f"Warning: DataFrame contains non-np.float64 numeric columns. Converting the following columns: {', '.join(non_float64_cols)}") + + for col in non_float64_cols: + df[col] = df[col].astype(np.float64) + + return df + + +def get_model(config): + if config["algorithm"] == "HurdleRegression": + model = HurdleRegression(clf_name=config["model_clf"], reg_name=config["model_reg"], + clf_params=config["parameters"]["clf"], reg_params=config["parameters"]["reg"]) + else: + parameters = get_parameters(config) + model = globals()[config["algorithm"]](**parameters) + return model + + +def get_parameters(config): + ''' + Get the parameters from the config file. + If not sweep, then get directly from the config file, otherwise have to remove some parameters. + ''' + + if config["sweep"]: + keys_to_remove = ["algorithm", "depvar", "steps", "sweep", "run_type", "model_cls", "model_reg"] + parameters = {k: v for k, v in config.items() if k not in keys_to_remove} + else: + parameters = config["parameters"] + + return parameters + + +def get_partition_data(df, run_type): + partitioner_dict = get_partitioner_dict(run_type) + + month_first = partitioner_dict['train'][0] + + if run_type in ['calibration', 'testing', 'forecasting']: + month_last = partitioner_dict['predict'][1] + 1 # forecasting also needs to get predict months even if they are empty + else: + raise ValueError('partition should be either "calibration", "testing" or "forecasting"') + + month_range = np.arange(month_first, month_last, 1) # predict[1] is the last month to predict, so we need to add 1 to include it. + + df = df[df.index.get_level_values("month_id").isin(month_range)].copy() # temporal subset + + return df + + +def get_standardized_df(df, run_type): + if run_type in ['calibration', 'testing']: + cols = [df.forecasts.target] + df.forecasts.prediction_columns + elif run_type == "forecasting": + cols = df.forecasts.prediction_columns + df = df.replace([np.inf, -np.inf], 0)[cols] + df = df.mask(df < 0, 0) + return df + + +def save_model_outputs(df_evaluation, df_output, PATH_GENERATED, config): + Path(PATH_GENERATED).mkdir(parents=True, exist_ok=True) + print(f'PATH to generated data: {PATH_GENERATED}') + + # Save the DataFrame of model outputs + outputs_path = f'{PATH_GENERATED}/df_output_{config.steps[-1]}_{config.run_type}_{config.timestamp}.pkl' + with open(outputs_path, 'wb') as file: + pickle.dump(df_output, file) + + # Save the DataFrame of evaluation metrics + evaluation_path = f'{PATH_GENERATED}/df_evaluation_{config.steps[-1]}_{config.run_type}_{config.timestamp}.pkl' + with open(evaluation_path, 'wb') as file: + pickle.dump(df_evaluation, file) + + +def split_hurdle_parameters(parameters_dict): + """ + Split the parameters dictionary into two separate dictionaries, one for the + classification model and one for the regression model. + """ + + cls_dict = {} + reg_dict = {} + + for key, value in parameters_dict.items(): + if key.startswith('cls_'): + cls_key = key.replace('cls_', '') + cls_dict[cls_key] = value + elif key.startswith('reg_'): + reg_key = key.replace('reg_', '') + reg_dict[reg_key] = value + + return cls_dict, reg_dict + + +def update_hp_config(hp_config, args, meta_config): + hp_config['run_type'] = args.run_type + hp_config['sweep'] = False + hp_config['name'] = meta_config['name'] + hp_config['depvar'] = meta_config['depvar'] + hp_config['algorithm'] = meta_config['algorithm'] + if meta_config['algorithm'] == 'HurdleRegression': + hp_config['model_clf'] = meta_config['model_clf'] + hp_config['model_reg'] = meta_config['model_reg'] + + +def update_sweep_config(sweep_config, args, meta_config): + sweep_config['parameters']['run_type'] = {'value': args.run_type} + sweep_config['parameters']['sweep'] = {'value': True} + sweep_config['parameters']['depvar'] = {'value': meta_config['depvar']} + sweep_config['parameters']['algorithm'] = {'value': meta_config['algorithm']} + if meta_config['algorithm'] == 'HurdleRegression': + sweep_config['parameters']['model_clf'] = {'value': meta_config['model_clf']} + sweep_config['parameters']['model_reg'] = {'value': meta_config['model_reg']} \ No newline at end of file diff --git a/models/black_lodge/src/utils/utils_wandb.py b/models/black_lodge/src/utils/utils_wandb.py new file mode 100644 index 00000000..5ba07f61 --- /dev/null +++ b/models/black_lodge/src/utils/utils_wandb.py @@ -0,0 +1,92 @@ +from pathlib import Path +import sys +import wandb + +PATH = Path(__file__) +sys.path.insert(0, str(Path( + *[i for i in PATH.parts[:PATH.parts.index("views_pipeline") + 1]]) / "common_utils")) # PATH_COMMON_UTILS +from set_path import setup_project_paths +setup_project_paths(PATH) + + +# there are things in other utils that should be here... + +def add_wandb_monthly_metrics(): + """ + Defines the WandB metrics for monthly evaluation. + + This function sets up the metrics for logging monthly evaluation metrics in WandB. + It defines a step metric called "monthly/out_sample_month" and specifies that any + metric under the "monthly" namespace will use "monthly/out_sample_month" as its step metric. + + Usage: + This function should be called at the start of a WandB run to configure + how metrics are tracked over time steps. + + Example: + >>> wandb.init(project="example_project") + >>> add_wandb_monthly_metrics() + >>> wandb.log({"monthly/mean_squared_error": 0.02, "monthly/out_sample_month": 1}) + + Notes: + - The step metric "monthly/out_sample_month" will be used to log metrics for each time (i.e. forecasted month). + - Any metric prefixed with "monthly/" will follow the "monthly/out_sample_month" step metric. + + See Also: + - `wandb.define_metric`: WandB API for defining metrics and their step relationships. + """ + + # Define "new" monthly metrics for WandB logging + wandb.define_metric("monthly/out_sample_month") + wandb.define_metric("monthly/*", step_metric="monthly/out_sample_month") + + +def generate_wandb_log_dict(log_dict, dict_of_eval_dicts, step): + """ + ---Adapted version from simon's code--- + + Adds evaluation metrics to a WandB log dictionary for a specific time step (i.e. forcasted month). + + This function updates the provided log dictionary with evaluation metrics from + a specified feature and step, formatted for WandB logging. It appends the metrics + to the log dictionary using the "monthly/{metric_name}" format. + + Args: + log_dict (dict): The log dictionary to be updated with new metrics. + dict_of_eval_dicts (Dict[str, Dict[str, EvaluationMetrics]]): A dictionary of evaluation metrics, + where the keys are feature identifiers and the values are dictionaries with time steps as keys + and `EvaluationMetrics` instances as values. + step (str): The specific time step (month forecasted) for which metrics are logged (e.g., 'step01'). + + Returns: + dict: The updated log dictionary with the evaluation metrics for the specified feature and step. + + Example: + >>> log_dict = {} + >>> dict_of_eval_dicts = { + ... 'step01': EvaluationMetrics(MSE=0.1, AP=0.2, AUC=0.3, Brier=0.4), + ... 'step02': EvaluationMetrics(MSE=0.2, AP=0.3, AUC=0.4, Brier=0.5), + ... + ... } + >>> log_dict = generate_wandb_log_dict(log_dict, dict_of_eval_dicts, 'step01') + >>> print(log_dict) + { + 'monthly/MSE': 0.1, + 'monthly/AP': 0.2, + 'monthly/AUC': 0.3, + 'monthly/Brier': 0.4 + } + + Notes: + - Only non-None values from the `EvaluationMetrics` instance are added to the log dictionary. + - The metrics are formatted with the "monthly/{metric_name}" naming convention for WandB logging. + + See Also: + - `wandb.log`: WandB API for logging metrics. + """ + + for key, value in dict_of_eval_dicts[step].items(): + if value is not None: + log_dict[f"monthly/{key}"] = value + + return log_dict \ No newline at end of file From 6bed9a01d4d92d59df48bf0dd8bd41fc0c0a44da Mon Sep 17 00:00:00 2001 From: xiaolongsun <95378566+xiaolong0728@users.noreply.github.com> Date: Fri, 2 Aug 2024 10:14:13 +0200 Subject: [PATCH 23/24] fix forecasting problems --- common_utils/views_stepshift/run.py | 4 +- .../src/forecasting/generate_forecast.py | 9 ++- .../src/offline_evaluation/evaluate_model.py | 58 +++++++++++++++++++ .../src/offline_evaluation/evaluate_sweep.py | 36 ++++++++++++ models/black_lodge/src/utils/utils.py | 7 ++- 5 files changed, 107 insertions(+), 7 deletions(-) create mode 100644 models/black_lodge/src/offline_evaluation/evaluate_model.py create mode 100644 models/black_lodge/src/offline_evaluation/evaluate_sweep.py diff --git a/common_utils/views_stepshift/run.py b/common_utils/views_stepshift/run.py index f27dcca2..094d1692 100644 --- a/common_utils/views_stepshift/run.py +++ b/common_utils/views_stepshift/run.py @@ -132,12 +132,12 @@ def future_point_predict(self, time: int, data: pd.DataFrame, keep_specific: boo if proba: predictions = self._models.predict_proba( data.loc[time - self._models._steps_extent: time], - combine=True + combine=False ) else: predictions = self._models.predict( data.loc[time - self._models._steps_extent: time], - combine = True + combine =False ) if not keep_specific: diff --git a/models/black_lodge/src/forecasting/generate_forecast.py b/models/black_lodge/src/forecasting/generate_forecast.py index 893deba0..62519d20 100644 --- a/models/black_lodge/src/forecasting/generate_forecast.py +++ b/models/black_lodge/src/forecasting/generate_forecast.py @@ -1,4 +1,3 @@ - import sys import pandas as pd from pathlib import Path @@ -10,6 +9,7 @@ from set_path import setup_project_paths, setup_data_paths, setup_artifacts_paths setup_project_paths(PATH) +from set_partition import get_partitioner_dict from utils import get_partition_data, get_standardized_df from utils_artifacts import get_latest_model_artifact @@ -39,9 +39,12 @@ def forecast_model_artifact(config, artifact_name): except: raise FileNotFoundError(f"Model artifact not found at {PATH_ARTIFACT}") - df_predictions = stepshift_model.predict("forecasting", "predict", get_partition_data(dataset, config.run_type)) + partition = get_partitioner_dict(config.run_type)['predict'] + df_predictions = stepshift_model.future_point_predict(partition[0]-1, + get_partition_data(dataset, config.run_type), + keep_specific=True) df_predictions = get_standardized_df(df_predictions, config.run_type) predictions_path = f'{PATH_GENERATED}/predictions_{config.steps[-1]}_{config.run_type}_{config.timestamp}.pkl' with open(predictions_path, 'wb') as file: - pickle.dump(df_predictions, file) + pickle.dump(df_predictions, file) \ No newline at end of file diff --git a/models/black_lodge/src/offline_evaluation/evaluate_model.py b/models/black_lodge/src/offline_evaluation/evaluate_model.py new file mode 100644 index 00000000..a6f5374c --- /dev/null +++ b/models/black_lodge/src/offline_evaluation/evaluate_model.py @@ -0,0 +1,58 @@ +import sys +from pathlib import Path +import warnings +warnings.filterwarnings("ignore") +import wandb + +PATH = Path(__file__) +sys.path.insert(0, str(Path( + *[i for i in PATH.parts[:PATH.parts.index("views_pipeline") + 1]]) / "common_utils")) # PATH_COMMON_UTILS +from set_path import setup_project_paths, setup_data_paths, setup_artifacts_paths +setup_project_paths(PATH) + +from utils import save_model_outputs, get_partition_data, get_standardized_df +from utils_artifacts import get_latest_model_artifact +from utils_evaluation_metrics import generate_metric_dict +from utils_model_outputs import generate_output_dict +from utils_wandb import generate_wandb_log_dict +from views_forecasts.extensions import * + + +def evaluate_model_artifact(config, artifact_name): + PATH_RAW, _, PATH_GENERATED = setup_data_paths(PATH) + PATH_ARTIFACTS = setup_artifacts_paths(PATH) + + # if an artifact name is provided through the CLI, use it. + # Otherwise, get the latest model artifact based on the run type + if artifact_name: + print(f"Using (non-default) artifact: {artifact_name}") + + if not artifact_name.endswith('.pkl'): + artifact_name += '.pkl' + PATH_ARTIFACT = PATH_ARTIFACTS / artifact_name + else: + # use the latest model artifact based on the run type + print(f"Using latest (default) run type ({config.run_type}) specific artifact") + PATH_ARTIFACT = get_latest_model_artifact(PATH_ARTIFACTS, config.run_type) + + config["timestamp"] = PATH_ARTIFACT.stem[-15:] + dataset = pd.read_parquet(PATH_RAW / f'raw_{config.run_type}.parquet') + + try: + stepshift_model = pd.read_pickle(PATH_ARTIFACT) + except: + raise FileNotFoundError(f"Model artifact not found at {PATH_ARTIFACT}") + + df = stepshift_model.predict(config.run_type, "predict", get_partition_data(dataset, config.run_type)) + df = get_standardized_df(df, config.run_type) + + evaluation, df_evaluation = generate_metric_dict(df, config) + output, df_output = generate_output_dict(df, config) + for t in config.steps: + log_dict = {} + log_dict["monthly/out_sample_month"] = t + step = f"step{str(t).zfill(2)}" + log_dict = generate_wandb_log_dict(log_dict, evaluation, step) + wandb.log(log_dict) + + save_model_outputs(df_evaluation, df_output, PATH_GENERATED, config) \ No newline at end of file diff --git a/models/black_lodge/src/offline_evaluation/evaluate_sweep.py b/models/black_lodge/src/offline_evaluation/evaluate_sweep.py new file mode 100644 index 00000000..86f8f231 --- /dev/null +++ b/models/black_lodge/src/offline_evaluation/evaluate_sweep.py @@ -0,0 +1,36 @@ +from pathlib import Path +import pandas as pd +import wandb +from sklearn.metrics import mean_squared_error + +PATH = Path(__file__) +from set_path import setup_project_paths, setup_data_paths +setup_project_paths(PATH) + +from utils import get_partition_data, get_standardized_df +from utils_wandb import generate_wandb_log_dict +from utils_evaluation_metrics import generate_metric_dict + + +def evaluate_sweep(config, stepshift_model): + PATH_RAW, _, _ = setup_data_paths(PATH) + + dataset = pd.read_parquet(PATH_RAW / f'raw_{config.run_type}.parquet') + + df = stepshift_model.predict(config.run_type, "predict", get_partition_data(dataset, config.run_type)) + df = get_standardized_df(df, config.run_type) + + # Temporarily keep this because the metric to minimize is MSE + pred_cols = [f"step_pred_{str(i)}" for i in config.steps] + df["mse"] = df.apply(lambda row: mean_squared_error([row[config.depvar]] * 36, + [row[col] for col in pred_cols]), axis=1) + + wandb.log({'MSE': df['mse'].mean()}) + + evaluation, df_evaluation = generate_metric_dict(df, config) + for t in config.steps: + log_dict = {} + log_dict["monthly/out_sample_month"] = t + step = f"step{str(t).zfill(2)}" + log_dict = generate_wandb_log_dict(log_dict, evaluation, step) + wandb.log(log_dict) \ No newline at end of file diff --git a/models/black_lodge/src/utils/utils.py b/models/black_lodge/src/utils/utils.py index 72b46a4d..3b361c21 100644 --- a/models/black_lodge/src/utils/utils.py +++ b/models/black_lodge/src/utils/utils.py @@ -2,6 +2,7 @@ import numpy as np from lightgbm import LGBMRegressor from xgboost import XGBRegressor +from sklearn.ensemble import RandomForestClassifier from pathlib import Path import pickle @@ -63,8 +64,10 @@ def get_partition_data(df, run_type): month_first = partitioner_dict['train'][0] - if run_type in ['calibration', 'testing', 'forecasting']: - month_last = partitioner_dict['predict'][1] + 1 # forecasting also needs to get predict months even if they are empty + if run_type in ['calibration', 'testing']: + month_last = partitioner_dict['predict'][1] + 1 + elif run_type == 'forecasting': + month_last = partitioner_dict['predict'][0] else: raise ValueError('partition should be either "calibration", "testing" or "forecasting"') From 05d959aca92d3f8ff9728f39192d8b362df6a8dd Mon Sep 17 00:00:00 2001 From: Marina Date: Mon, 5 Aug 2024 19:48:18 +0200 Subject: [PATCH 24/24] forecasting script --- models/black_lodge/src/forecasting/generate_forecast.py | 0 models/black_lodge/src/offline_evaluation/evaluate_model.py | 0 models/black_lodge/src/offline_evaluation/evaluate_sweep.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 models/black_lodge/src/forecasting/generate_forecast.py create mode 100644 models/black_lodge/src/offline_evaluation/evaluate_model.py create mode 100644 models/black_lodge/src/offline_evaluation/evaluate_sweep.py diff --git a/models/black_lodge/src/forecasting/generate_forecast.py b/models/black_lodge/src/forecasting/generate_forecast.py new file mode 100644 index 00000000..e69de29b diff --git a/models/black_lodge/src/offline_evaluation/evaluate_model.py b/models/black_lodge/src/offline_evaluation/evaluate_model.py new file mode 100644 index 00000000..e69de29b diff --git a/models/black_lodge/src/offline_evaluation/evaluate_sweep.py b/models/black_lodge/src/offline_evaluation/evaluate_sweep.py new file mode 100644 index 00000000..e69de29b