-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
83 lines (65 loc) · 2.86 KB
/
app.py
File metadata and controls
83 lines (65 loc) · 2.86 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy import stats
import missingno as msno
# Create a dummy dataset
np.random.seed(0)
dummy_data = {
'Feature1': np.random.normal(100, 10, 100).tolist() + [np.nan, 200], # Normally distributed with an outlier
'Feature2': np.random.randint(0, 100, 102).tolist(), # Random integers
'Category': ['A', 'B', 'C', 'D'] * 25 + [np.nan, 'A'], # Categorical with some missing values
'Target': np.random.choice([0, 1], 102).tolist() # Binary target variable
}
# Convert the dictionary to a pandas DataFrame
df_dummy = pd.DataFrame(dummy_data)
# Display the first few rows of the dummy dataset
print(df_dummy.head())
def load_data(df):
return df
def handle_missing_values(df):
# Fill numeric columns with their mean
num_means = df.select_dtypes(include='number').mean()
df[num_means.index] = df[num_means.index].fillna(num_means)
# Fill missing categorical values with the mode (most frequent value)
cat_cols = df.select_dtypes(include=['object', 'category']).columns
for col in cat_cols:
mode_val = df[col].mode()[0] if not df[col].mode().empty else 'Unknown'
df[col] = df[col].fillna(mode_val)
return df
def remove_outliers(df):
z_scores = np.abs(stats.zscore(df.select_dtypes(include=[np.number])))
return df[(z_scores < 3).all(axis=1)] # Remove rows with any outliers
def scale_data(df, exclude_cols=None):
if exclude_cols is None:
exclude_cols = []
scaler = StandardScaler()
# Work on a copy to avoid SettingWithCopyWarning when df is a slice
df = df.copy()
num_cols = df.select_dtypes(include=[np.number]).columns.difference(exclude_cols)
# Use .loc for explicit assignment
df.loc[:, num_cols] = scaler.fit_transform(df.loc[:, num_cols])
return df
def encode_categorical(df, categorical_columns):
df = pd.get_dummies(df, columns=categorical_columns)
# Convert boolean dummy columns to int (0/1)
dummy_cols = [c for c in df.columns if any(c.startswith(cat + '_') for cat in categorical_columns)]
df[dummy_cols] = df[dummy_cols].astype(int)
return df
def save_data(df, output_filepath):
df.to_csv(output_filepath, index=False)
# Load the data
df_encoded = load_data(df_dummy)
# Handle missing values
df_encoded = handle_missing_values(df_encoded)
# Remove outliers
df_encoded = remove_outliers(df_encoded)
# Scale the data (exclude Target to preserve 0/1 labels)
df_encoded = scale_data(df_encoded, exclude_cols=['Target'])
# Encode categorical variables
df_encoded = encode_categorical(df_encoded, ['Category'])
# Display the preprocessed data
print(df_encoded.head())
# Save the cleaned and preprocessed DataFrame to a CSV file
save_data(df_encoded, 'preprocessed_dummy_data.csv')
print('Preprocessing complete. Preprocessed data saved as preprocessed_dummy_data.csv')