Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "views_stepshifter"
version = "0.4.0"
version = "1.0.0"
description = ""
authors = [
"Xiaolong Sun <xiaolong.sun@pcr.uu.se>",
Expand All @@ -11,13 +11,14 @@ readme = "README.md"

[tool.poetry.dependencies]
python = ">=3.11,<3.15"
views_pipeline_core = ">=1.0.0,<2.0.0"
views_pipeline_core = ">=2.0.0,<3.0.0"
scikit-learn = "^1.6.0"
pandas = "^1.5.3"
numpy = "^1.25.2"
darts = "^0.30.0"
lightgbm = "4.6.0"
views_forecasts = "^0.5.5"
scipy = "1.15.1" # error with latest scipy 1.16.0. see https://github.com/statsmodels/statsmodels/issues?q=_lazywhere



Expand Down
2 changes: 1 addition & 1 deletion tests/test_hurdle_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def test_predict(sample_config, sample_partitioner_dict, sample_dataframe):
patch("views_stepshifter.models.hurdle_model.as_completed") as mock_as_completed, \
patch("views_stepshifter.models.hurdle_model.tqdm.tqdm") as mock_tqdm, \
patch("views_stepshifter.models.hurdle_model.ProcessPoolExecutor") as mock_ProcessPoolExecutor, \
patch("views_stepshifter.models.hurdle_model.ModelManager._resolve_evaluation_sequence_number") as mock_sequence_number:
patch("views_stepshifter.models.hurdle_model.ForecastingModelManager._resolve_evaluation_sequence_number") as mock_sequence_number:


# the else branch
Expand Down
4 changes: 2 additions & 2 deletions tests/test_stepshifter.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import numpy as np
from unittest.mock import patch, MagicMock, call
from views_stepshifter.models.stepshifter import StepshifterModel
from views_pipeline_core.managers.model import ModelManager
from views_pipeline_core.managers.model import ModelManager, ForecastingModelManager

@pytest.fixture
def config():
Expand Down Expand Up @@ -182,7 +182,7 @@ def test_predict(config, partitioner_dict, sample_dataframe):
patch("views_stepshifter.models.stepshifter.as_completed") as mock_as_completed, \
patch("views_stepshifter.models.stepshifter.tqdm.tqdm") as mock_tqdm, \
patch("views_stepshifter.models.stepshifter.ProcessPoolExecutor") as mock_ProcessPoolExecutor, \
patch("views_stepshifter.models.stepshifter.ModelManager._resolve_evaluation_sequence_number") as mock_sequence_number:
patch("views_stepshifter.models.stepshifter.ForecastingModelManager._resolve_evaluation_sequence_number") as mock_sequence_number:


# the else branch
Expand Down
36 changes: 18 additions & 18 deletions tests/test_stepshifter_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,18 +118,30 @@ def test_get_standardized_df():
"""
Test the _get_standardized_df method to ensure it correctly standardizes the DataFrame.
"""
df = pd.DataFrame({
df1 = pd.DataFrame({
"a": [1.0, -1.0, np.inf, -np.inf, 3.0],
"b": [4.0, 5.0, -6.0, 7.0, -8.0]
})
expected_df = pd.DataFrame({
expected_df1 = pd.DataFrame({
"a": [1.0, 0.0, 0.0, 0.0, 3.0],
"b": [4.0, 5.0, 0.0, 7.0, 0.0]
})
result_df = StepshifterManager._get_standardized_df(df)
pd.testing.assert_frame_equal(result_df, expected_df)


df2 = pd.DataFrame({
"a": [[1.0, -1.0, np.inf],
[-np.inf, 3.0, 4.0]],
"b": [[4.0, 5.0, -6.0],
[7.0, -8.0, 9.0]],
})
expected_df2 = pd.DataFrame({
"a": [[1.0, 0.0, 0.0],
[0.0, 3.0, 4.0]],
"b": [[4.0, 5.0, 0.0],
[7.0, 0.0, 9.0]],
})
result_df1 = StepshifterManager._get_standardized_df(df1)
result_df2 = StepshifterManager._get_standardized_df(df2)
pd.testing.assert_frame_equal(result_df1, expected_df1)
pd.testing.assert_frame_equal(result_df2, expected_df2)

def test_split_hurdle_parameters(stepshifter_manager_hurdle):
"""
Expand Down Expand Up @@ -164,9 +176,6 @@ def test_get_model(stepshifter_manager, stepshifter_manager_hurdle, mock_partiti
stepshifter_manager._get_model(mock_partitioner_dict)
mock_stepshifter_model.assert_called_once_with(stepshifter_manager.config, mock_partitioner_dict)
mock_hurdle_model.assert_not_called()




def test_train_model_artifact(stepshifter_manager, stepshifter_manager_hurdle):
"""
Expand Down Expand Up @@ -194,8 +203,6 @@ def test_train_model_artifact(stepshifter_manager, stepshifter_manager_hurdle):

mock_split_hurdle.assert_called_once()



def test_evaluate_model_artifact(stepshifter_manager):
"""
Test the _evaluate_model_artifact method to ensure it correctly evaluates the model artifact.
Expand Down Expand Up @@ -233,8 +240,6 @@ def test_evaluate_model_artifact(stepshifter_manager):
path_artifact = stepshifter_manager._model_path.artifacts / artifact_name
assert path_artifact == Path("predictions_test_run_202401011200000/non_default_artifact.pkl")



def test_forecast_model_artifact(stepshifter_manager):
"""
Test the _forecast_model_artifact method to ensure it correctly forecasts the model artifact.
Expand Down Expand Up @@ -278,7 +283,6 @@ def test_forecast_model_artifact(stepshifter_manager):
assert path_artifact == Path("predictions_test_run_202401011200000/non_default_artifact.pkl")
mock_logger.exception.assert_called_once_with(f"Model artifact not found at {path_artifact}")


def test_evaluate_sweep(stepshifter_manager):
"""
Test the _evaluate_sweep method.
Expand All @@ -297,7 +301,3 @@ def test_evaluate_sweep(stepshifter_manager):
# mock_read_dataframe.assert_called_once()
mock_model.predict.assert_called_once_with("test_run_type", eval_type)
mock_get_standardized_df.assert_called_once()




45 changes: 24 additions & 21 deletions views_stepshifter/manager/stepshifter_manager.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,28 @@
from views_pipeline_core.managers.model import ModelPathManager, ModelManager
from views_pipeline_core.managers.model import ModelPathManager, ForecastingModelManager
from views_pipeline_core.configs.pipeline import PipelineConfig
from views_pipeline_core.files.utils import read_dataframe
from views_pipeline_core.files.utils import read_dataframe, generate_model_file_name
from views_stepshifter.models.stepshifter import StepshifterModel
from views_stepshifter.models.hurdle_model import HurdleModel
from views_stepshifter.models.shurf_model import ShurfModel
import logging
import pickle
import pandas as pd
import numpy as np
from typing import Union, Optional, List, Dict
# from views_stepshifter.models.shurf import StepShiftedHurdleUncertainRF

logger = logging.getLogger(__name__)


class StepshifterManager(ModelManager):
class StepshifterManager(ForecastingModelManager):
def __init__(
self,
model_path: ModelPathManager,
wandb_notifications: bool = False,
wandb_notifications: bool = True,
use_prediction_store: bool = True,
) -> None:
super().__init__(model_path, wandb_notifications, use_prediction_store)
self._is_hurdle = self._config_meta["algorithm"] == "HurdleModel"
self._is_shurf = self._config_meta["algorithm"] == "SHURF"
self._is_shurf = self._config_meta["algorithm"] == "ShurfModel"

@staticmethod
def _get_standardized_df(df: pd.DataFrame) -> pd.DataFrame:
Expand All @@ -36,9 +36,16 @@ def _get_standardized_df(df: pd.DataFrame) -> pd.DataFrame:
The standardized DataFrame
"""

# post-process: replace inf and -inf with 0
df = df.replace([np.inf, -np.inf], 0)
df = df.mask(df < 0, 0)
def standardize_value(value):
# 1) Replace inf, -inf, nan with 0;
# 2) Replace negative values with 0
if isinstance(value, list):
return [0 if (v == np.inf or v == -np.inf or v < 0 or np.isnan(v)) else v for v in value]
else:
return 0 if (value == np.inf or value == -np.inf or value < 0 or np.isnan(value)) else value

df = df.applymap(standardize_value)

return df

def _split_hurdle_parameters(self):
Expand Down Expand Up @@ -78,8 +85,8 @@ def _get_model(self, partitioner_dict: dict):
"""
if self._is_hurdle:
model = HurdleModel(self.config, partitioner_dict)
# elif self._is_shurf:
# model = StepShiftedHurdleUncertainRF(self.config, partitioner_dict)
elif self._is_shurf:
model = ShurfModel(self.config, partitioner_dict)
else:
self.config["model_reg"] = self.config["algorithm"]
model = StepshifterModel(self.config, partitioner_dict)
Expand All @@ -96,7 +103,7 @@ def _train_model_artifact(self):
path_raw = self._model_path.data_raw
path_artifacts = self._model_path.artifacts
# W&B does not directly support nested dictionaries for hyperparameters
if self.config["sweep"] and self._is_hurdle:
if self.config["sweep"] and (self._is_hurdle or self._is_shurf):
self.config = self._split_hurdle_parameters()

run_type = self.config["run_type"]
Expand All @@ -109,7 +116,7 @@ def _train_model_artifact(self):
stepshift_model.fit(df_viewser)

if not self.config["sweep"]:
model_filename = ModelManager.generate_model_file_name(
model_filename = generate_model_file_name(
run_type, file_extension=".pkl"
)
stepshift_model.save(path_artifacts / model_filename)
Expand All @@ -128,7 +135,6 @@ def _evaluate_model_artifact(
Returns:
A list of DataFrames containing the evaluation results
"""
path_raw = self._model_path.data_raw
path_artifacts = self._model_path.artifacts
run_type = self.config["run_type"]

Expand Down Expand Up @@ -157,10 +163,9 @@ def _evaluate_model_artifact(
raise

df_predictions = stepshift_model.predict(run_type, eval_type)
if not self._is_shurf:
df_predictions = [
StepshifterManager._get_standardized_df(df) for df in df_predictions
]
df_predictions = [
StepshifterManager._get_standardized_df(df) for df in df_predictions
]
return df_predictions

def _forecast_model_artifact(self, artifact_name: str) -> pd.DataFrame:
Expand All @@ -173,7 +178,6 @@ def _forecast_model_artifact(self, artifact_name: str) -> pd.DataFrame:
Returns:
The forecasted DataFrame
"""
path_raw = self._model_path.data_raw
path_artifacts = self._model_path.artifacts
run_type = self.config["run_type"]

Expand Down Expand Up @@ -207,12 +211,11 @@ def _forecast_model_artifact(self, artifact_name: str) -> pd.DataFrame:
return df_prediction

def _evaluate_sweep(self, eval_type: str, model: any) -> List[pd.DataFrame]:
path_raw = self._model_path.data_raw
run_type = self.config["run_type"]

df_predictions = model.predict(run_type, eval_type)
df_predictions = [
StepshifterManager._get_standardized_df(df) for df in df_predictions
]

return df_predictions
return df_predictions
41 changes: 16 additions & 25 deletions views_stepshifter/models/hurdle_model.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from views_pipeline_core.managers.model import ModelManager
from views_pipeline_core.managers.model import ForecastingModelManager
from views_stepshifter.models.stepshifter import StepshifterModel
from views_stepshifter.models.validation import views_validate
from sklearn.utils.validation import check_is_fitted
Expand All @@ -7,8 +7,6 @@
import logging
import tqdm
from concurrent.futures import ProcessPoolExecutor, as_completed
# import multiprocessing
# multiprocessing.set_start_method('spawn')
from functools import partial
logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -149,29 +147,19 @@ def fit(self, df: pd.DataFrame):
self._models = models
self.is_fitted_ = True

# for step in tqdm.tqdm(self._steps, desc="Fitting model for step", leave=True):
# # Fit binary-like stage using a classification model, but the target is binary (0 or 1)
# binary_model = self._clf(lags_past_covariates=[-step], **self._clf_params)
# binary_model.fit(target_binary, past_covariates=self._past_cov)

# # Fit positive stage using the regression model
# positive_model = self._reg(lags_past_covariates=[-step], **self._reg_params)
# positive_model.fit(target_pos, past_covariates=past_cov_pos)
# self._models[step] = (binary_model, positive_model)
# self.is_fitted_ = True

def predict(self, run_type: str, eval_type: str = "standard") -> pd.DataFrame:
check_is_fitted(self, "is_fitted_")

if run_type != "forecasting":
final_preds = []

if eval_type == "standard":
total_sequence_number = (
ModelManager._resolve_evaluation_sequence_number(eval_type)
ForecastingModelManager._resolve_evaluation_sequence_number(eval_type)
)
if self.get_device_params().get("device") == "cuda":
pred = []
for sequence_number in tqdm.tqdm(
range(ModelManager._resolve_evaluation_sequence_number(eval_type)),
range(ForecastingModelManager._resolve_evaluation_sequence_number(eval_type)),
desc="Predicting for sequence number",
):
pred_by_step_binary = [
Expand All @@ -186,15 +174,15 @@ def predict(self, run_type: str, eval_type: str = "standard") -> pd.DataFrame:
)
for step in self._steps
]
final_pred = pd.concat(pred_by_step_binary, axis=0) * pd.concat(pred_by_step_positive, axis=0)
final_preds.append(final_pred)
return final_preds
pred = pd.concat(pred_by_step_binary, axis=0) * pd.concat(pred_by_step_positive, axis=0)
preds.append(pred)

else:
preds = [None] * total_sequence_number
with ProcessPoolExecutor() as executor:
futures = {
executor.submit(self._predict_by_sequence, sequence_number): sequence_number
for sequence_number in range(ModelManager._resolve_evaluation_sequence_number(eval_type))
for sequence_number in range(ForecastingModelManager._resolve_evaluation_sequence_number(eval_type))
}
for future in tqdm.tqdm(
as_completed(futures.keys()),
Expand All @@ -203,7 +191,10 @@ def predict(self, run_type: str, eval_type: str = "standard") -> pd.DataFrame:
):
sequence_number = futures[future]
preds[sequence_number] = future.result()
return preds
else:
raise ValueError(
f"{eval_type} is not supported now. Please use 'standard' evaluation type."
)

else:
if self.get_device_params().get("device") == "cuda":
Expand All @@ -217,10 +208,10 @@ def predict(self, run_type: str, eval_type: str = "standard") -> pd.DataFrame:
self._predict_by_step(self._models[step][1], step, 0)
)

final_preds = pd.concat(pred_by_step_binary, axis=0) * pd.concat(
preds = pd.concat(pred_by_step_binary, axis=0) * pd.concat(
pred_by_step_positive, axis=0
)
return final_preds

else:
with ProcessPoolExecutor() as executor:
futures_binary = {
Expand Down Expand Up @@ -257,4 +248,4 @@ def predict(self, run_type: str, eval_type: str = "standard") -> pd.DataFrame:
pd.concat(pred_by_step_binary, axis=0).sort_index()
* pd.concat(pred_by_step_positive, axis=0).sort_index()
)
return preds
return preds
Loading
Loading