from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import OneHotEncoder

import warnings
warnings.filterwarnings('ignore')

X, y = fetch_california_housing(return_X_y = True)

print(X.shape)
(20640, 8)

model1 = DecisionTreeRegressor()
model2 = LinearRegression()
model3 = KNeighborsRegressor()

model_pipeline = [model1, model2, model3]
model_names = ['Tree', 'Linear', 'KNN']

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33)
cross_val_score(model, X_train, y_train, cv = 10)

array([0.5971567 , 0.61437534, 0.62928232, 0.631017  , 0.56024677,
       0.59049424, 0.54686426, 0.58641863, 0.60625477, 0.5571165 ])

model_pipeline = [model1, model2, model3]
model_names = ['Tree', 'Linear', 'KNN']

score = {}
i = 0

for model in model_pipeline:
    
    mean_score = np.mean(cross_val_score(model, X_train, y_train, cv=10))
    
    score[model_names[i]] = mean_score
    
    i = i+1

print(score)
{'Tree': 0.5873210431033433, 'Linear': 0.5256752159256888, 'KNN': 0.12634391462380018}

# -------------------------------------------------------------------------------------------------------

**# Random Forest**

numerical = pd.read_csv('numerical.csv')
numerical = numerical.drop(columns=['Unnamed: 0.1', 'Unnamed: 0'])
categorical = pd.read_csv('categorical.csv')
categorical = categorical.drop(columns=['Unnamed: 0.1', 'Unnamed: 0'])
targets = pd.read_csv('target.csv')

data = pd.concat([numerical, categorical, targets], axis = 1)
data['TARGET_B'].value_counts()

0    90569
1     4843
Name: TARGET_B, dtype: int64

category_0 = data[data.TARGET_B == 0].sample(4843)
category_1 = data[data.TARGET_B == 1]

data = pd.concat([category_0, category_1], axis = 0)
data.shape
(9686, 338)

data = data.dropna()
y = data['TARGET_B']
X = data.drop(['TARGET_B','ZIP'], axis=1)

numericalX = X.select_dtypes(np.number).reset_index()
categoricalX = X.select_dtypes(object).reset_index()

encoder = OneHotEncoder().fit(categoricalX)
encoded_categorical = pd.DataFrame(encoder.transform(categoricalX).toarray())

X = pd.concat([numericalX, encoded_categorical], axis = 1)
X.columns = X.columns.astype(str)
X.shape
(9686, 10055)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)

from sklearn.ensemble import RandomForestClassifier

clf = RandomForestClassifier(max_depth = 2, random_state = 0)
clf.fit(X_train, y_train)
clf.score(X_test, y_test)

0.8947151114781172

# -------------------------------------------------------------------------------------------------------

**# Hyper Parameter Tuning for Random Forest**

# Parameters Values

n_estimators = [200, 500, 1000, 2000, 4000]
min_samples_split = [2, 4, 8, 16, 32]
min_samples_leaf = [1, 2, 3, 4, 5]
max_features = ['sqrt', 'log2']
max_samples = ['None', 0.5, 0.8]

from sklearn.model_selection import GridSearchCV

param_grid = {
    
    'n_estimators': [50, 100],
    'min_samples_split': [2, 4],
    'min_samples_leaf': [1, 2]
}

grid_search = GridSearchCV(clf, param_grid, cv = 5, return_train_score = True)
grid_search.fit(X_train, y_train)
grid_search.best_params_

{'min_samples_leaf': 1, 'min_samples_split': 2, 'n_estimators': 100}

# -------------------------------------------------------------------------------------------------------

**# Feature Importance**

clf.fit(X_train, y_train)
X_train.head()

feature_names = X_train.columns
feature_names = list(feature_names)

data = pd.DataFrame(list(zip(feature_names, clf.feature_importances_)))
data.columns = ['columns_name', 'score_feature_importance']
data2 = data.sort_values(by = ['score_feature_importance'], ascending = False)

data2.score_feature_importance.sum()
1.0