Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions explainableai.egg-info/requires.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,5 @@ google-generativeai
python-dotenv
scipy
pillow


494 changes: 338 additions & 156 deletions explainableai/core.py

Large diffs are not rendered by default.

625 changes: 550 additions & 75 deletions explainableai/model_interpretability.py

Large diffs are not rendered by default.

76 changes: 37 additions & 39 deletions explainableai/utils.py
Original file line number Diff line number Diff line change
@@ -1,44 +1,42 @@
from sklearn.metrics import mean_squared_error, r2_score, accuracy_score, f1_score
from sklearn.inspection import permutation_importance
# utils.py

# Import colorama and its components
import colorama
from colorama import Fore, Style

# Initialize colorama
colorama.init(autoreset=True)

import pandas as pd
import numpy as np
import logging

logger=logging.getLogger(__name__)
# Configure logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)

def explain_model(model, X_train, y_train, X_test, y_test, feature_names):
logger.debug("Explaining model...")
try:
result = permutation_importance(model, X_test, y_test, n_repeats=10, random_state=42, n_jobs=-1)
feature_importance = {feature: importance for feature, importance in zip(feature_names, result.importances_mean)}

# Sort feature importance by absolute value
feature_importance = dict(sorted(feature_importance.items(), key=lambda item: abs(item[1]), reverse=True))

logger.info("Model explained...")
return {
"feature_importance": feature_importance,
"model_type": str(type(model)),
}
except Exception as e:
logger.error(f"Some error occurred in explaining model...{str(e)}")

def calculate_metrics(model, X_test, y_test):
logger.debug("Calculation of metrics...")
try:
y_pred = model.predict(X_test)

if len(np.unique(y_test)) == 2: # Binary classification
logger.info("Binary classification... ")
return {
"accuracy": accuracy_score(y_test, y_pred),
"f1_score": f1_score(y_test, y_pred, average='weighted')
}
else: # Regression or multi-class classification
logger.info("Multiclass classification...")
return {
"mse": mean_squared_error(y_test, y_pred),
"r2": r2_score(y_test, y_pred)
}
except Exception as e:
logger.error(f"Some error occurred in metric calculation...{str(e)}")
# Example utility function using colorama for colored logs
def log_data_processing_step(step_description):
logger.info(f"{Fore.BLUE}{step_description}{Style.RESET_ALL}")

# Example utility class
class DataProcessor:
def process_data(self, data):
logger.info(f"{Fore.YELLOW}Starting data processing...{Style.RESET_ALL}")
# Implement data processing logic here
logger.info(f"{Fore.YELLOW}Data processing completed.{Style.RESET_ALL}")

# Add your actual utility functions and classes below
# Ensure that any function or class using Fore or Style includes the imports above

def some_utility_function():
# Example function using Fore and Style
logger.info(f"{Fore.GREEN}This is a green message.{Style.RESET_ALL}")
# Rest of the function...

class SomeUtilityClass:
def example_method(self):
logger.info(f"{Fore.RED}This is a red message.{Style.RESET_ALL}")
# Rest of the method...


6 changes: 6 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,9 @@ scipy
pillow
xgboost
colorama
scikeras
tensorflow

pytest


22 changes: 20 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
# setup.py

from setuptools import setup, find_packages
import os

# Read the long description from README.md
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
Expand All @@ -23,7 +26,11 @@
'google-generativeai',
'python-dotenv',
'scipy',
'pillow'
'pillow',
'colorama', # Added missing dependency
'scikeras', # Added missing dependency
'tensorflow', # Added missing dependency
# Removed 'model_interpretability' assuming it's part of this package
],
entry_points={
'console_scripts': [
Expand Down Expand Up @@ -60,4 +67,15 @@
package_data={
'explainableai': ['data/*.csv', 'templates/*.html'],
},
)
# Optional: Add a test suite
# test_suite='tests',
# Optional: Specify development dependencies
extras_require={
'dev': [
'pytest',
'flake8',
'black',
# Add other development dependencies here
],
},
)
15 changes: 12 additions & 3 deletions tests/test_utils.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,19 @@
# tests/test_utils.py

import sys
import os
import pytest
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.datasets import make_classification, make_regression
from sklearn.model_selection import train_test_split
from explainableai.utils import explain_model, calculate_metrics
from dotenv import load_dotenv
import os

# Add the project root directory to sys.path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

from explainableai.utils import explain_model, calculate_metrics

# Load environment variables
load_dotenv()

def test_explain_model_regression():
Expand Down Expand Up @@ -58,4 +67,4 @@ def test_calculate_metrics_classification():
assert "f1_score" in metrics

if __name__ == "__main__":
pytest.main()
pytest.main()