script
stringlengths 113
767k
|
---|
# # Import Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
ConfusionMatrixDisplay,
classification_report,
)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import warnings
warnings.filterwarnings("ignore")
# # Import Dataset
train_df = pd.read_csv("/kaggle/input/customer-segmentation/Train.csv")
test_df = pd.read_csv("/kaggle/input/customer-segmentation/Test.csv")
df = pd.concat([train_df, test_df])
df.head(30)
# # Data Cleaning
# dropping the rows having NaN values
df = df.dropna()
df.info()
df["Segmentation"].value_counts()
print("Gender: ", df["Gender"].unique())
print("Ever_Married: ", df["Ever_Married"].unique())
print("Graduated: ", df["Graduated"].unique())
df["Gender"] = df["Gender"].apply(lambda x: 1 if x == "Male" else 0)
df["Ever_Married"] = df["Ever_Married"].apply(lambda x: 1 if x == "Yes" else 0)
df["Graduated"] = df["Graduated"].apply(lambda x: 1 if x == "Yes" else 0)
df["Spending_Score"].unique()
df["Spending_Score"].replace(to_replace="Low", value=0, inplace=True)
df["Spending_Score"].replace(to_replace="Average", value=1, inplace=True)
df["Spending_Score"].replace(to_replace="High", value=2, inplace=True)
df["Var_1"].unique()
df["Var_1"].replace(to_replace="Cat_1", value=1, inplace=True)
df["Var_1"].replace(to_replace="Cat_2", value=2, inplace=True)
df["Var_1"].replace(to_replace="Cat_3", value=3, inplace=True)
df["Var_1"].replace(to_replace="Cat_4", value=4, inplace=True)
df["Var_1"].replace(to_replace="Cat_5", value=5, inplace=True)
df["Var_1"].replace(to_replace="Cat_6", value=6, inplace=True)
df["Var_1"].replace(to_replace="Cat_7", value=7, inplace=True)
df["Segmentation"].unique()
df["Segmentation"].replace(to_replace="A", value=0, inplace=True)
df["Segmentation"].replace(to_replace="B", value=1, inplace=True)
df["Segmentation"].replace(to_replace="C", value=2, inplace=True)
df["Segmentation"].replace(to_replace="D", value=3, inplace=True)
df
label = {0: "A", 1: "B", 2: "C", 3: "D"}
plotdata = sns.pairplot(df.replace({"Segmentation": label}), hue="Segmentation")
plotdata.fig.suptitle("Pair Plot Analysis", y=1.08)
x = df[
[
"Gender",
"Ever_Married",
"Age",
"Graduated",
"Work_Experience",
"Spending_Score",
"Family_Size",
"Var_1",
]
].values
x
y = df.iloc[:, 10].values
y
# # Train & Test Spilitting
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1)
print(x_train.shape)
# # Logistic Regression
model = LogisticRegression(max_iter=600)
model.fit(x_train, y_train)
y_pred_lr = model.predict(x_test)
print("Accuracy : ", accuracy_score(y_test, y_pred_lr))
cr = classification_report(y_test, y_pred_lr)
print("\t\tClassification Report\n" + "--" * 28 + "\n", cr)
cm = confusion_matrix(y_test, y_pred_lr)
print(cm)
cm = confusion_matrix(y_test, y_pred_lr)
sns.heatmap(cm, annot=True)
cm_display = metrics.ConfusionMatrixDisplay(
confusion_matrix=confusion_matrix(y_test, y_pred_lr),
display_labels=["A", "B", "C", "D"],
)
cm_display.plot()
plt.show()
# # Random Forest
model_rf = RandomForestClassifier(n_estimators=30, criterion="entropy", random_state=0)
model_rf.fit(x_train, y_train)
y_pred_rf = model_rf.predict(x_test)
print("Accuracy : ", accuracy_score(y_test, y_pred_rf))
cr = classification_report(y_test, y_pred_rf)
print("\t\tClassification Report\n" + "--" * 28 + "\n", cr)
cm_display = metrics.ConfusionMatrixDisplay(
confusion_matrix=confusion_matrix(y_test, y_pred_rf),
display_labels=["A", "B", "C", "D"],
)
cm_display.plot()
plt.show()
# # KNN
iteration = 25
error_rate = []
acc = []
scores = {}
for i in range(1, iteration):
model_knn = KNeighborsClassifier(n_neighbors=i)
model_knn.fit(x_train, y_train)
y_pred_knn = model_knn.predict(x_test)
error_rate.append(np.mean(y_pred_knn != y_test))
scores[i] = metrics.accuracy_score(y_test, y_pred_knn)
acc.append(metrics.accuracy_score(y_test, y_pred_knn))
scores
plt.figure(figsize=(10, 6))
plt.plot(
range(1, iteration),
error_rate,
color="blue",
linestyle="dashed",
marker="o",
markerfacecolor="red",
markersize=10,
)
plt.title("Error Rate vs. K Value")
plt.xlabel("K")
plt.ylabel("Error Rate")
print("Minimum error:-", min(error_rate), "at K =", error_rate.index(min(error_rate)))
plt.figure(figsize=(10, 6))
plt.plot(
range(1, iteration),
acc,
color="blue",
linestyle="dashed",
marker="o",
markerfacecolor="red",
markersize=10,
)
plt.title("Accuracy vs. K Value")
plt.xlabel("K")
plt.ylabel("Accuracy")
print("Maximum accuracy:-", max(acc), "at K =", acc.index(max(acc)))
model_knn = KNeighborsClassifier(n_neighbors=23)
model_knn.fit(x_train, y_train)
y_pred_knn = model_knn.predict(x_test)
print("Accuracy : ", accuracy_score(y_test, y_pred_knn))
cr = classification_report(y_test, y_pred_knn)
print("\t\tClassification Report\n" + "--" * 28 + "\n", cr)
cm_display = metrics.ConfusionMatrixDisplay(
confusion_matrix=confusion_matrix(y_test, y_pred_knn),
display_labels=["A", "B", "C", "D"],
)
cm_display.plot()
plt.show()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
# # import data
dataset_raw = pd.read_csv(
"/kaggle/input/higher-education-predictors-of-student-retention/dataset.csv"
)
print(dataset_raw.shape)
dataset_raw.head().T
# # data description
# > Marital status - The marital status of the student. (Categorical)
# > Application mode - The method of application used by the student. (Categorical)
# > Application order - The order in which the student applied. (Numerical)
# > Course - The course taken by the student. (Categorical)
# > Daytime/evening attendance - Whether the student attends classes during the day or in the evening. (Categorical)
# > Previous qualification - The qualification obtained by the student before enrolling in higher education. (Categorical)
# > Nacionality - The nationality of the student. (Categorical)
# > Mother's qualification The qualification of the student's mother. (Categorical)
# > Father's qualification The qualification of the student's father. (Categorical)
# > Mother's occupation The occupation of the student's mother. (Categorical)
# > Father's occupation The occupation of the student's father. (Categorical)
# > Displaced - Whether the student is a displaced person. (Categorical)
# > Educational special needs - Whether the student has any special educational needs. (Categorical)
# > Debtor - Whether the student is a debtor. (Categorical)
# > Tuition fees up to date - Whether the student's tuition fees are up to date. (Categorical)
# > Gender - The gender of the student. (Categorical)
# > Scholarship holder - Whether the student is a scholarship holder. (Categorical)
# > Age at enrollment - The age of the student at the time of enrollment. (Numerical)
# > International - Whether the student is an international student. (Categorical)
# > Curricular units 1st sem (credited) - The number of curricular units credited by the student in the first semester. (Numerical)
# > Curricular units 1st sem (enrolled) - The number of curricular units enrolled by the student in the first semester. (Numerical)
# > Curricular units 1st sem (evaluations) - The number of curricular units evaluated by the student in the first semester. (Numerical)
# > Curricular units 1st sem (approved) - The number of curricular units approved by the student in the first semester. (Numerical)
dataset_raw.info()
dataset_raw.describe(include="all").T
dataset = dataset_raw.copy()
dataset.shape
dataset["Target"].value_counts()
# # target and features
target = dataset["Target"]
features = dataset.drop(["Target"], axis=1)
target.shape, features.shape
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
logit = LogisticRegression()
print(cross_val_score(logit, features, target, scoring="accuracy"))
print(cross_val_score(logit, features, target, scoring="f1_macro"))
from sklearn.model_selection import train_test_split, KFold
kf = KFold(n_splits=30, shuffle=True, random_state=2304)
for tr_idx, te_idx in kf.split(features):
X_train, X_test = features.iloc[tr_idx], features.iloc[te_idx]
y_train, y_test = target.iloc[tr_idx], target.iloc[te_idx]
X_train.shape, X_test.shape, y_train.shape, y_test.shape
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y_train = le.fit_transform(y_train)
print(y_train)
y_test = le.fit_transform(y_test)
y_test
# 0: Dropout, 1: Enrolled, 2: Graduate
# # modeling
from xgboost import XGBClassifier
model = XGBClassifier(
n_estimators=100, random_state=2304, eval_metric="mlogloss"
) # use_label_encoder=False
model.fit(X_train, y_train)
print(model.score(X_train, y_train))
print(model.score(X_test, y_test))
pred_proba = model.predict_proba(X_test)[:, 1]
pred_proba[:10]
pred_label = model.predict(X_test)
pred_label[:100]
y_test[:100]
classes = np.unique(y_train)
classes
from yellowbrick.classifier import confusion_matrix
plt.figure(figsize=(3, 3))
confusion_matrix(model, X_train, y_train, X_test, y_test, classes=classes)
plt.show()
import seaborn as sns
XGBClassifier_importances_values = model.feature_importances_
XGBClassifier_importances = pd.Series(
XGBClassifier_importances_values, index=X_train.columns
)
XGBClassifier_top34 = XGBClassifier_importances.sort_values(ascending=False)[:34]
plt.figure(figsize=(8, 6))
plt.title("Feature importances Top 34")
sns.barplot(x=XGBClassifier_top34, y=XGBClassifier_top34.index)
plt.show()
XGBClassifier_top34[:10]
dataset_important = dataset[
[
"Target",
"Curricular units 2nd sem (approved)",
"Tuition fees up to date",
"Curricular units 1st sem (enrolled)",
"Curricular units 2nd sem (enrolled)",
"Curricular units 1st sem (approved)",
"Scholarship holder",
"Curricular units 1st sem (evaluations)",
"Debtor",
"Curricular units 2nd sem (evaluations)",
]
]
dataset_important.head()
g = sns.pairplot(dataset_important, hue="Target")
plt.show()
# # update to increase accuracy
corr_rate_threshold = 0.85
cor_matrix = features.corr().abs()
cor_matrix
# remove mirror and diagonal values
upper_tri = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(bool))
upper_tri
# Drop columns with higher correlation than rate_corr_threshold
to_drop = [
column
for column in upper_tri.columns
if any(upper_tri[column] >= corr_rate_threshold)
]
print(to_drop)
selected_features = features.drop(features[to_drop], axis=1)
selected_features.head()
features = selected_features.copy()
kf = KFold(n_splits=30, shuffle=True, random_state=2304)
for tr_idx, te_idx in kf.split(features):
X_train, X_test = features.iloc[tr_idx], features.iloc[te_idx]
y_train, y_test = target.iloc[tr_idx], target.iloc[te_idx]
X_train.shape, X_test.shape, y_train.shape, y_test.shape
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y_train = le.fit_transform(y_train)
print(y_train)
y_test = le.fit_transform(y_test)
y_test
# 0: Dropout, 1: Enrolled, 2: Graduate
model.fit(X_train, y_train)
print(model.score(X_train, y_train))
print(model.score(X_test, y_test))
pred_proba = model.predict_proba(X_test)[:, 1]
pred_proba[:10]
pred_label = model.predict(X_test)
pred_label[:100]
from yellowbrick.classifier import confusion_matrix
plt.figure(figsize=(3, 3))
confusion_matrix(model, X_train, y_train, X_test, y_test, classes=classes)
plt.show()
|
from pathlib import Path
from functools import partial
import json
from time import time
import pandas as pd
import numpy as np
from scipy import stats
from sklearn.model_selection import (
StratifiedKFold,
RepeatedStratifiedKFold,
GridSearchCV,
train_test_split,
)
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.linear_model import Lasso, Ridge, LinearRegression, LogisticRegression
from sklearn.metrics import mean_squared_error, log_loss, roc_auc_score
rmse = partial(mean_squared_error, squared=False)
from sklearn.preprocessing import (
StandardScaler,
OneHotEncoder,
OrdinalEncoder,
MinMaxScaler,
LabelEncoder,
)
from sklearn.impute import SimpleImputer
from sklearn.base import BaseEstimator, TransformerMixin
from category_encoders import TargetEncoder, LeaveOneOutEncoder, WOEEncoder
from xgboost import XGBRegressor, XGBClassifier
from lightgbm import LGBMRegressor, LGBMClassifier, log_evaluation, early_stopping
from catboost import CatBoostRegressor, CatBoostClassifier
import torch
DEVICE = "gpu" if torch.cuda.is_available() else "cpu"
DEVICE_XGB = "gpu_hist" if torch.cuda.is_available() else "auto"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("notebook", font_scale=1.1)
# Uncomment to use AutoML
import flaml
from autogluon.tabular import TabularPredictor
import optuna
#
#
# This notebook contains a framework for classification analysis that I built over the number of playground episodes.
# ## The Code
# Use Optuna to find optimal weights instead of mean.
# See https://www.kaggle.com/code/tetsutani/ps3e11-eda-xgb-lgbm-cat-ensemble-lb-0-29267#Define-Model
class OptunaWeights:
def __init__(self, random_state):
self.study = None
self.weights = None
self.random_state = random_state
def _objective(self, trial, y_true, y_preds):
# Define the weights for the predictions from each model
weights = [trial.suggest_float(f"weight{n}", 0, 1) for n in range(len(y_preds))]
# Calculate the weighted prediction
weighted_pred = np.average(np.array(y_preds).T, axis=1, weights=weights)
# Calculate the score for the weighted prediction
score = np.sqrt(roc_auc_score(y_true, weighted_pred))
return score
def fit(self, y_true, y_preds, n_trials=300):
optuna.logging.set_verbosity(optuna.logging.ERROR)
sampler = optuna.samplers.CmaEsSampler(seed=self.random_state)
self.study = optuna.create_study(
sampler=sampler, study_name="OptunaWeights", direction="minimize"
)
objective_partial = partial(self._objective, y_true=y_true, y_preds=y_preds)
self.study.optimize(objective_partial, n_trials=n_trials)
self.weights = [
self.study.best_params[f"weight{n}"] for n in range(len(y_preds))
]
def predict(self, y_preds):
assert (
self.weights is not None
), "OptunaWeights error, must be fitted before predict"
weighted_pred = np.average(np.array(y_preds).T, axis=1, weights=self.weights)
return weighted_pred
def fit_predict(self, y_true, y_preds, n_trials=300):
self.fit(y_true, y_preds, n_trials=n_trials)
return self.predict(y_preds)
def weights(self):
return self.weights
METRICS = {"logloss": log_loss, "rocauc": roc_auc_score}
LOWER_IS_BETTER = {"logloss": True, "rocauc": False}
class ClassificationPlayer:
"""The main class to simplify EDA and modelling."""
def __init__(self, dataset_name, original_filename, target, metric="logloss"):
self.df_train = pd.read_csv(
f"/kaggle/input/{dataset_name}/train.csv", index_col=0
)
self.df_test = pd.read_csv(
f"/kaggle/input/{dataset_name}/test.csv", index_col=0
)
self.df_original = pd.read_csv(original_filename)
self.target = target
self.y_train = self.df_train[target]
self.y_original = self.df_original[target]
# self.cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=0)
self.cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=10, random_state=0)
self.metric_fn = METRICS[metric]
self.lower_is_better = LOWER_IS_BETTER[metric]
self.leaderboard = {}
self.models = {}
self.oof_preds = {}
self.test_preds = {}
self._view_data()
def perform_eda(self, num_features):
"""Perform basic EDA."""
self.num_features = num_features
self._check_missing()
self._plot_target()
self._plot_feature_distribution()
self._plot_correlation()
def train_model(
self,
model_fn=None,
num_features=None,
feature_fn=None,
use_original_data=False,
model_name=None,
early_stopping_rounds=200,
return_models=False,
verbose=False,
):
"""Train `model_fn` with self.cv, optinally with `feature_fn` to create addtional features and `use_original_data`.
Can save test predictions for submission.
"""
self.num_features = num_features
df_train = self.df_train.copy()
df_original = self.df_original.copy()
df_test = self.df_test.copy()
if feature_fn is not None:
feature_fn(df_train)
feature_fn(df_test)
if use_original_data:
feature_fn(df_original)
oof_preds = np.zeros(len(df_train))
pipelines = []
for fold, (idx_tr, idx_vl) in enumerate(self.cv.split(df_train, self.y_train)):
# Fold train: add the entire original data
df_tr, y_tr = df_train.iloc[idx_tr], self.y_train[idx_tr]
if use_original_data:
df_tr = pd.concat([df_tr, df_original])
y_tr = np.hstack([y_tr, self.y_original])
# Fold validation: just synthetic data
df_vl, y_vl = df_train.iloc[idx_vl], self.y_train[idx_vl]
# eval_set for early stopping
pipeline = self._build_pipeline(model_fn)
pipeline["proc"].fit(df_tr, y_tr)
X_vl = pipeline["proc"].transform(df_vl)
eval_set = [(X_vl, y_vl)]
if type(pipeline["model"]) == CatBoostClassifier:
pipeline.fit(
df_tr,
y_tr,
model__eval_set=eval_set,
model__early_stopping_rounds=early_stopping_rounds,
model__verbose=verbose,
)
elif type(pipeline["model"]) == XGBClassifier:
pipeline["model"].early_stopping_rounds = early_stopping_rounds
pipeline.fit(
df_tr, y_tr, model__eval_set=eval_set, model__verbose=verbose
)
elif type(pipeline["model"]) == LGBMClassifier:
callbacks = [early_stopping(early_stopping_rounds), log_evaluation(-1)]
pipeline.fit(
df_tr, y_tr, model__eval_set=eval_set, model__callbacks=callbacks
)
else:
pipeline.fit(df_tr, y_tr)
oof_preds[idx_vl] = pipeline.predict_proba(df_vl)[:, 1]
score = self.metric_fn(y_vl, oof_preds[idx_vl])
pipelines.append(pipeline)
if verbose:
print(f"Fold {fold} score = {score:.4f}")
score = self.metric_fn(self.y_train, oof_preds)
print(f" OOF score={score:.4f}")
if model_name is not None:
df = pd.DataFrame(data={"id": df_train.index, self.target: oof_preds})
df.to_csv(f"{model_name}_oof_preds.csv", index=None)
y_pred = np.mean(
[p.predict_proba(df_test)[:, 1] for p in pipelines], axis=0
)
df = pd.DataFrame(data={"id": df_test.index, self.target: y_pred})
df.to_csv(f"{model_name}_test_preds.csv", index=None)
self.leaderboard[model_name] = score
self.models[model_name] = pipelines
self.oof_preds[model_name] = oof_preds
self.test_preds[model_name] = y_pred
if return_models:
return pipelines
def show_leaderboard(self):
display(
pd.DataFrame(
self.leaderboard.values(),
index=self.leaderboard.keys(),
columns=["CV score"],
).sort_values("CV score", ascending=self.lower_is_better)
)
def build_mean_ensemble(self, model_names, ensemble_name):
"""Create an ensemble of provided model names by taking average of predictions.
Save oof_preds and test_preds.
"""
preds = np.mean([self.oof_preds[m] for m in model_names], axis=0)
df = pd.DataFrame(data={"id": self.df_train.index, self.target: preds})
df.to_csv(f"{ensemble_name}_oof_preds.csv", index=None)
score = self.metric_fn(self.y_train, preds)
print(f"Ensemble score={score:.4f}")
preds = np.mean([self.test_preds[m] for m in model_names], axis=0)
df = pd.DataFrame(data={"id": self.df_test.index, self.target: preds})
df.to_csv(f"{ensemble_name}_test_preds.csv", index=None)
def build_weighted_ensemble(self, model_names, ensemble_name, random_state=0):
"""Create an ensemble of provided model names by using weights optimized with Optuna.
Save oof_preds and test_preds.
"""
optweights = OptunaWeights(random_state=random_state)
preds = [self.oof_preds[m] for m in model_names]
preds = optweights.fit_predict(self.y_train, preds)
print("Weights")
print(list(zip(model_names, optweights.weights)))
df = pd.DataFrame(data={"id": self.df_train.index, self.target: preds})
df.to_csv(f"{ensemble_name}_oof_preds.csv", index=None)
score = self.metric_fn(self.y_train, preds)
print(f"Ensemble score={score:.4f}")
preds = np.mean([self.test_preds[m] for m in model_names], axis=0)
df = pd.DataFrame(data={"id": self.df_test.index, self.target: preds})
df.to_csv(f"{ensemble_name}_test_preds.csv", index=None)
def _view_data(self):
"""Glance at the data."""
df = pd.DataFrame(
[len(self.df_train), len(self.df_test), len(self.df_original)],
index=["train", "test", "original"],
columns=["count"],
)
display(df)
print("Train data")
print(self.df_train.info())
display(self.df_train.head())
def _check_missing(self):
"""Count missing data in train and test sets."""
df = pd.concat(
[
pd.DataFrame(
self.df_train.drop(columns=[self.target]).isnull().sum(),
columns=["missing train"],
),
pd.DataFrame(self.df_test.isnull().sum(), columns=["missing test"]),
pd.DataFrame(
self.df_original.drop(columns=[self.target]).isnull().sum(),
columns=["missing original"],
),
],
axis=1,
)
display(df)
def _plot_target(self):
"""Plot distribution of the target feature synthetic train vs. original dataset."""
df = pd.concat(
[
pd.DataFrame(self.df_train[self.target].value_counts()),
pd.DataFrame(self.df_original[self.target].value_counts()),
pd.DataFrame(
self.df_train[self.target].value_counts(normalize=True) * 100
).round(1),
pd.DataFrame(
self.df_original[self.target].value_counts(normalize=True) * 100
).round(1),
],
axis=1,
)
df.columns = ["train", "test", "train (%)", "test (%)"]
df.index.name = self.target
display(df)
def _plot_feature_distribution(self):
"""Plot feature distribution grouped by the 3 sets."""
features = self.df_test.columns
df_train = self.df_train.copy()
df_test = self.df_test.copy()
df_original = self.df_original.copy()
df_train["set"] = "train"
df_original["set"] = "original"
df_test["set"] = "test"
df_combined = pd.concat([df_train, df_test, df_original])
ncols = 2
nrows = np.ceil(len(features) / ncols).astype(int)
fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(15, nrows * 3))
for c, ax in zip(features, axs.flatten()):
if c in self.num_features:
sns.boxplot(data=df_combined, x=c, ax=ax, y="set")
else:
sns.countplot(data=df_combined, x="set", ax=ax, hue=c)
fig.suptitle("Distribution of features by set")
plt.tight_layout(rect=[0, 0, 1, 0.98])
plt.show()
def _plot_correlation(self):
"""Plot correlation between numerical features and the target feature."""
plt.figure(figsize=(8, 8))
features = self.num_features + [self.target]
corr = self.df_train[features].corr()
annot_labels = np.where(corr.abs() > 0.5, corr.round(1).astype(str), "")
upper_triangle = np.triu(np.ones_like(corr, dtype=bool))
sns.heatmap(
corr,
mask=upper_triangle,
vmin=-1,
vmax=1,
center=0,
square=True,
annot=annot_labels,
cmap="coolwarm",
linewidths=0.5,
fmt="",
)
plt.title("Correlation between numerical features and the target feature")
plt.show()
def _build_pipeline(self, model_fn):
num_proc = make_pipeline(SimpleImputer(strategy="mean"), StandardScaler())
processing = ColumnTransformer([("num", num_proc, self.num_features)])
return Pipeline([("proc", processing), ("model", model_fn())])
# ## Episode 12: Kidney Stone Prediction
# [Source: Kaggle](https://www.kaggle.com/datasets/vuppalaadithyasairam/kidney-stone-prediction-based-on-urine-analysis)
# ## Data at a glance
# Original dataset is [Kidney Stone Prediction based on Urine Analysis](https://www.kaggle.com/datasets/vuppalaadithyasairam/kidney-stone-prediction-based-on-urine-analysis). Metric is ROCAUC.
# This dataset can be used to predict the presence of kidney stones based on urine analysis.
# The 79 urine specimens, were analyzed in an effort to determine if certain physical characteristics of the urine might be related to the formation of calcium oxalate crystals.
# The six physical characteristics of the urine are:
# 1. specific gravity, the density of the urine relative to water;
# 2. pH, the negative logarithm of the hydrogen ion;
# 3. osmolarity (mOsm), a unit used in biology and medicine but not in physical chemistry. Osmolarity is proportional to the concentration of molecules in solution;
# 4. conductivity (mMho milliMho). One Mho is one reciprocal Ohm. Conductivity is proportional to the concentration of charged ions in solution;
# 5. urea concentration in millimoles per litre;
# 6. calcium concentration (CALC) in millimolesllitre.
# ## Initialize the player
player = ClassificationPlayer(
dataset_name="playground-series-s3e12",
original_filename="/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv",
target="target",
metric="rocauc",
)
# Notes:
#
# - All features are numerical.
# - A binary classification 0-1.
# - The dataset sizes are absolutely tiny!
# ## Basic EDA
# I will check
# - missing values
# - distribution of the target feature
# - distribution of features grouped by different datasets
# - correlation of numerical features and the target feature
# Use all features as numerical features
num_features = player.df_test.columns.tolist()
player.perform_eda(num_features)
# Notes:
#
# - This is a pretty balanced dataset.
# - Synthetic and original have similar ratio.
# - There are no missing values.
# - The three datasets are pretty similar.
# ## Baseline models
# For a classification task, I will train one model with default logistic regression and 3 GBDT models.
models = [
("logit", partial(LogisticRegression, random_state=0)),
("lgbm", partial(LGBMClassifier, random_state=0)),
("xgb", partial(XGBClassifier, random_state=0)),
("cb", partial(CatBoostClassifier, random_state=0)),
]
for model_name, model_fn in models:
print(model_name)
player.train_model(
model_fn=model_fn, num_features=num_features, model_name=model_name
)
print()
player.show_leaderboard()
# ## Which features are the most important?
# using CatBoost model
df = pd.DataFrame({"feature": num_features})
df["importance"] = np.array(
[p["model"].feature_importances_ for p in player.models["cb"]]
).mean(axis=0)
plt.figure(figsize=(8, 8))
sns.barplot(data=df.sort_values("importance"), x="importance", y="feature")
# Notes:
#
# `calc` is dominant.
# ## Adding original data
# by simply specifying `use_original_data=True` to `train_model` method.
prefix = "extra_data_"
for model_name, model_fn in models:
print(model_name)
player.train_model(
model_fn=model_fn,
num_features=num_features,
use_original_data=True,
model_name=prefix + model_name,
)
print()
player.show_leaderboard()
# Notes:
#
# - Adding extra data improves performance.
# ## Adding features
def add_features(df):
# Ratio of calcium concentration to urea concentration:
df["calc_urea_ratio"] = df["calc"] / df["urea"]
# Ratio of specific gravity to osmolarity:
df["gravity_osm_ratio"] = df["gravity"] / df["osmo"]
# Product of calcium concentration and osmolarity:
df["calc_osm_product"] = df["calc"] * df["osmo"]
# Product of specific gravity and conductivity:
df["gravity_cond_product"] = df["gravity"] * df["cond"]
# Ratio of calcium concentration to specific gravity:
df["calc_gravity_ratio"] = df["calc"] / df["gravity"]
# Ratio of urea concentration to specific gravity:
df["urea_gravity_ratio"] = df["urea"] / df["gravity"]
# Product of osmolarity and conductivity:
df["osm_cond_product"] = df["osmo"] * df["cond"]
# Ratio of calcium concentration to osmolarity:
df["calc_osm_ratio"] = df["calc"] / df["osmo"]
# Ratio of urea concentration to osmolarity:
df["urea_osm_ratio"] = df["urea"] / df["osmo"]
# Product of specific gravity and urea concentration:
df["gravity_urea_product"] = df["gravity"] * df["urea"]
prefix = "extra_features_"
new_features = [
"calc_urea_ratio",
# 'gravity_osm_ratio',
"calc_osm_product",
"gravity_cond_product",
"calc_gravity_ratio",
# 'urea_gravity_ratio',
"osm_cond_product",
"calc_osm_ratio",
# 'urea_osm_ratio',
# 'gravity_urea_product'
]
extra_features = num_features + new_features
for model_name, model_fn in models:
print(model_name)
player.train_model(
model_fn=model_fn,
num_features=extra_features,
feature_fn=add_features,
model_name=prefix + model_name,
)
print()
player.show_leaderboard()
df = pd.DataFrame({"feature": extra_features})
df["importance"] = np.array(
[p["model"].feature_importances_ for p in player.models["extra_features_cb"]]
).mean(axis=0)
plt.figure(figsize=(8, 8))
sns.barplot(data=df.sort_values("importance"), x="importance", y="feature")
prefix = "extra_features_extra_data_"
for model_name, model_fn in models:
print(model_name)
player.train_model(
model_fn=model_fn,
use_original_data=True,
num_features=extra_features,
feature_fn=add_features,
model_name=prefix + model_name,
)
print()
player.show_leaderboard()
# ## AutoML with FLAML
# Get transformed data for auto ML
player.num_features = extra_features
num_processing = player._build_pipeline(LogisticRegression)["proc"]
df = pd.concat([player.df_train, player.df_original])
add_features(df)
X = num_processing.fit_transform(df)
y = df[player.target]
TIME_BUDGET = 60 * 60 * 2
EARLY_STOPPING_ROUNDS = 200
tuned = True
if not tuned:
for model in ["lgbm", "xgboost", "catboost"]:
auto_flaml = flaml.AutoML()
auto_flaml.fit(
X,
y,
task="classification",
metric="roc_auc",
estimator_list=[model],
time_budget=TIME_BUDGET,
early_stop=EARLY_STOPPING_ROUNDS,
verbose=0,
)
print(model)
print(auto_flaml.best_config)
else:
lgbm_params = {
"n_estimators": 27,
"num_leaves": 5,
"min_child_samples": 11,
"learning_rate": 0.11829523417382827,
"log_max_bin": 5,
"colsample_bytree": 0.08246842146207267,
"reg_alpha": 1.654704291470382,
"reg_lambda": 0.008061086792332105,
}
xgb_params = {
"n_estimators": 17,
"max_leaves": 6,
"min_child_weight": 24.197965349022034,
"learning_rate": 0.2535908861429132,
"subsample": 0.9299408964701006,
"colsample_bylevel": 1.0,
"colsample_bytree": 1.0,
"reg_alpha": 0.7136202942152321,
"reg_lambda": 0.0014127090088201812,
}
cb_params = {
"early_stopping_rounds": 26,
"learning_rate": 0.014212146103441932,
"n_estimators": 80,
}
prefix = "extra_features_extra_data_tuned_"
models = [
("lgbm", partial(LGBMClassifier, random_state=0, **lgbm_params)),
("xgb", partial(XGBClassifier, random_state=0, **xgb_params)),
("cb", partial(CatBoostClassifier, random_state=0, **cb_params)),
]
for model_name, model_fn in models:
print(model_name)
player.train_model(
model_fn=model_fn,
use_original_data=True,
num_features=extra_features,
feature_fn=add_features,
model_name=prefix + model_name,
)
print()
player.show_leaderboard()
# ## Ensembling
# Simply taking average of predictions from models
player.build_mean_ensemble(
[
"extra_features_extra_data_tuned_lgbm",
"extra_features_extra_data_tuned_xgb",
"extra_features_extra_data_tuned_cb",
],
"tuned_mean_ensemble",
)
player.build_weighted_ensemble(
[
"extra_features_extra_data_tuned_lgbm",
"extra_features_extra_data_tuned_xgb",
"extra_features_extra_data_tuned_cb",
],
"tuned_weighted_ensemble",
)
# ## Submission
# ---
# ## Episode 10: Pulsar
# ### Data at a glance
# Original dataset is [Pulsar Classification For Class Prediction](https://www.kaggle.com/datasets/brsdincer/pulsar-classification-for-class-prediction). Metric is logloss.
# COLUMNS: Based on Integrated Profile of Observation
# - Mean_Integrated: Mean of Observations
# - SD: Standard deviation of Observations
# - EK: Excess kurtosis of Observations
# - Skewness: In probability theory and statistics, skewness is a measure of the asymmetry of the probability distribution of a real-valued random variable about its mean. Skewness of Observations.
# - Mean _ DMSNR _ Curve: Mean of DM SNR CURVE of Observations
# - SD _ DMSNR _ Curve: Standard deviation of DM SNR CURVE of Observations
# - EK _ DMSNR _ Curve: Excess kurtosis of DM SNR CURVE of Observations
# - Skewness _ DMSNR _ Curve: Skewness of DM SNR CURVE of Observations
# - Class: 0 - 1
# WHAT IS DM SNR CURVE:
# Radio waves emitted from pulsars reach earth after traveling long distances in space which is filled with free electrons. The important point is that pulsars emit a wide range of frequencies, and the amount by which the electrons slow down the wave depends on the frequency. Waves with higher frequency are sowed down less as compared to waves with higher frequency. It means dispersion.
# ### Initialize the player
# player = ClassificationPlayer(
# dataset_name='playground-series-s3e10',
# original_filename='/kaggle/input/pulsar-classification-for-class-prediction/Pulsar.csv',
# target='Class')
# Notes:
#
# - All features are numerical.
# - A binary classification 0-1.
# - The train and test sets are much bigger than the original dataset.
# ### Basic EDA
# I will check
# - missing values
# - distribution of the target feature
# - distribution of features grouped by different datasets
# - correlation of numerical features and the target feature
# # Use all features as numerical features
# num_features = player.df_test.columns.tolist()
# player.perform_eda(num_features)
# Notes:
#
# - This is an imbalanced dataset.
# - Synthetic and original have similar ratio.
# - There are no missing values.
# - The three datasets are pretty similar
# - There are lots of data points outside of the whiskers
# ### Baseline models
# For a classification task, I will train one model with default logistic regression and 3 GBDT models.
# models = [
# ('logit', partial(LogisticRegression, random_state=0)),
# ('lgbm', partial(LGBMClassifier, random_state=0)),
# ('xgb', partial(XGBClassifier, random_state=0)),
# ('cb', partial(CatBoostClassifier, random_state=0))
# ]
# for model_name, model_fn in models:
# print(model_name)
# player.train_model(model_fn=model_fn, num_features=num_features, model_name=model_name)
# print()
# player.show_leaderboard()
# ### Which features are the most important?
# using CatBoost model
# df = pd.DataFrame({'feature': num_features})
# df['importance'] = np.array([p['model'].feature_importances_ for p in player.models['cb']]).mean(axis=0)
# plt.figure(figsize=(8,8))
# sns.barplot(data=df.sort_values('importance'), x='importance', y='feature')
# Notes:
#
# `EK` is dominant.
# ### Adding original data
# prefix = 'extra_data_'
# for model_name, model_fn in models:
# print(model_name)
# player.train_model(model_fn=model_fn, num_features=num_features, use_original_data=True, model_name=prefix+model_name)
# print()
# player.show_leaderboard()
# Notes:
#
# Adding extra data reduces performance.
# ### Hypeparameters tuning with FLAML
# X_train = player.df_train.drop(columns=[player.target]).values
# flaml_tuned = True
# TIME_BUDGET = 60 * 60
# EARLY_STOPPING_ROUNDS = 500
# if not flaml_tuned:
# for model_name in ['lgbm', 'xgboost', 'catboost']:
# auto_flaml = flaml.AutoML()
# auto_flaml.fit(X_train, player.y_train, task='classification', estimator_list=[model_name], time_budget=TIME_BUDGET, early_stop=EARLY_STOPPING_ROUNDS, verbose=0)
# print(model_name)
# print(auto_flaml.best_config)
# lgbm_params = {'n_estimators': 1638, 'num_leaves': 11, 'min_child_samples': 3, 'learning_rate': 0.07882443919605875, 'log_max_bin': 9, 'colsample_bytree': 1.0, 'reg_alpha': 0.13230350130113055, 'reg_lambda': 0.06434703980686605}
# xgb_params = {'n_estimators': 1439, 'max_leaves': 1768, 'min_child_weight': 128.0, 'learning_rate': 0.03036795903376639, 'subsample': 0.7309168526251735, 'colsample_bylevel': 0.9813842382888714, 'colsample_bytree': 0.9900179111373784, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.7380418720213944}
# cb_params = {'early_stopping_rounds': 13, 'learning_rate': 0.18671482954894983, 'n_estimators': 100}
# models = [
# ('lgbm', partial(LGBMClassifier, random_state=0, **lgbm_params)),
# ('xgb', partial(XGBClassifier, random_state=0, **xgb_params)),
# ('cb', partial(CatBoostClassifier, random_state=0, **cb_params))
# ]
# prefix = 'tuned_'
# for model_name, model_fn in models:
# print(model_name)
# player.train_model(model_fn=model_fn, num_features=num_features, model_name=prefix+model_name)
# print()
# player.show_leaderboard()
# ### Mean Ensembling
# player.build_mean_ensemble(['tuned_lgbm', 'tuned_xgb', 'tuned_cb'], 'mean_ensemble')
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train = pd.read_csv("../input/tabular-playground-series-mar-2021/train.csv")
test = pd.read_csv("../input/tabular-playground-series-mar-2021/test.csv")
train.head()
train.describe()
train.info()
test.info()
corr_matrix = train.corr()
corr_matrix["target"].sort_values(ascending=False)
labels = train.pop("target")
train = train.drop(["cat10"], axis=1)
test = test.drop(["cat10"], axis=1)
numerics = ["int16", "int32", "int64", "float16", "float32", "float64"]
train_num = train.select_dtypes(include=numerics)
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
num_pipeline = Pipeline(
[
("std_scaler", StandardScaler()),
]
)
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, OrdinalEncoder
cat_attribs = list(train.select_dtypes(include=object))
cat_attribs
from sklearn.compose import ColumnTransformer
num_attribs = list(train_num)
full_pipeline = ColumnTransformer(
[("num", num_pipeline, num_attribs), ("cat", OneHotEncoder(), cat_attribs)]
)
train_prepared = full_pipeline.fit_transform(train)
test_prepared = full_pipeline.transform(test)
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
pca = TruncatedSVD()
logistic = LogisticRegression(max_iter=10000, tol=0.1)
pipe = Pipeline(steps=[("pca", pca), ("logistic", logistic)])
# Parameters of pipelines can be set using ‘__’ separated parameter names:
param_grid = {
"pca__n_components": [60, 75, 90, 105, 120],
"logistic__C": np.logspace(-4, 4, 4),
}
search = GridSearchCV(pipe, param_grid, n_jobs=-1)
search.fit(train_prepared, labels)
print("Best parameter (CV score=%0.3f):" % search.best_score_)
print(search.best_params_)
y_pred = search.predict(test_prepared)
output = pd.DataFrame({"PassengerId": test.id, "Survived": y_pred})
output.to_csv("my_submission.csv", index=False)
print("Your pipeline submission was successfully saved!")
|
# # Packages
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import mannwhitneyu
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ***
# ***
# # Reading Data
fuel = pd.read_table("/kaggle/input/gas-prices-in-brazil/2004-2021.tsv")
fuel.head()
fuel.info()
# The date columns need type transformation.
# ## Data transformation
fuel["DATA INICIAL"] = pd.to_datetime(fuel["DATA INICIAL"])
fuel["DATA FINAL"] = pd.to_datetime(fuel["DATA FINAL"])
fuel["year"] = fuel["DATA FINAL"].dt.year
fuel["month"] = fuel["DATA FINAL"].dt.month
fuel = fuel.reindex(
columns=[
"DATA INICIAL",
"DATA FINAL",
"year",
"month",
"REGIÃO",
"ESTADO",
"PRODUTO",
"NÚMERO DE POSTOS PESQUISADOS",
"UNIDADE DE MEDIDA",
"PREÇO MÉDIO REVENDA",
"DESVIO PADRÃO REVENDA",
"PREÇO MÍNIMO REVENDA",
"PREÇO MÁXIMO REVENDA",
"MARGEM MÉDIA REVENDA",
"COEF DE VARIAÇÃO REVENDA",
"PREÇO MÉDIO DISTRIBUIÇÃO",
"DESVIO PADRÃO DISTRIBUIÇÃO",
"PREÇO MÍNIMO DISTRIBUIÇÃO",
"PREÇO MÁXIMO DISTRIBUIÇÃO",
"COEF DE VARIAÇÃO DISTRIBUIÇÃO",
]
)
fuel.dtypes
fuel = fuel.drop(
[
"DESVIO PADRÃO REVENDA",
"PREÇO MÍNIMO REVENDA",
"PREÇO MÁXIMO REVENDA",
"MARGEM MÉDIA REVENDA",
"COEF DE VARIAÇÃO REVENDA",
"PREÇO MÉDIO DISTRIBUIÇÃO",
"DESVIO PADRÃO DISTRIBUIÇÃO",
"PREÇO MÍNIMO DISTRIBUIÇÃO",
"PREÇO MÁXIMO DISTRIBUIÇÃO",
"COEF DE VARIAÇÃO DISTRIBUIÇÃO",
],
axis="columns",
)
fuel = fuel.rename(
columns={
"DATA INICIAL": "first_date",
"DATA FINAL": "last_date",
"REGIÃO": "region",
"ESTADO": "state",
"PRODUTO": "product",
"NÚMERO DE POSTOS PESQUISADOS": "gas_stations_researched",
"UNIDADE DE MEDIDA": "measure_unit",
"PREÇO MÉDIO REVENDA": "sale_avg_price",
}
)
fuel.head()
# ***
# ***
# # Exploratory Analysis
# Setting graph style
sns.set_palette("Accent")
sns.set_style("darkgrid")
fuel["product"].unique()
# Gasoline, Diesel and Ethanol are the most common fuel types in Brazil.
plt.figure(figsize=(20, 15))
ax = plt.subplot(3, 1, 1)
ax.set_title("Gasoline Price Evolution from 2004 to 2021", fontsize=18, loc="left")
ax.set_xlabel("Year", fontsize=14)
ax.set_ylabel("Average price (R$)", fontsize=14)
ax = sns.lineplot(
x="year",
y="sale_avg_price",
hue="region",
data=fuel.query('product == "GASOLINA COMUM"'),
errorbar=None,
)
ax = plt.subplot(3, 1, 2)
ax.set_title("Ethanol Price Evolution from 2004 to 2021", fontsize=18, loc="left")
ax.set_xlabel("Year", fontsize=14)
ax.set_ylabel("Average price (R$)", fontsize=14)
ax = sns.lineplot(
x="year",
y="sale_avg_price",
hue="region",
data=fuel.query('product == "ETANOL HIDRATADO"'),
errorbar=None,
)
ax = plt.subplot(3, 1, 3)
ax.set_title("Diesel Price Evolution from 2004 to 2021", fontsize=18, loc="left")
ax.set_xlabel("Year", fontsize=14)
ax.set_ylabel("Average price (R$)", fontsize=14)
ax = sns.lineplot(
x="year",
y="sale_avg_price",
hue="region",
data=fuel.query('product == "ÓLEO DIESEL"'),
errorbar=None,
)
ax = ax
# All the fuel types had a similar price growth through years in all regions.
# The south and southeast regions showed the lowest prices for Diesel and Ethanol, but southeast had an increase in Gasoline price after 2015.
# North region had the highest average prices for all fuel types.
# We can take a deeper look into de states of these 3 regions.
# ### **North region**
fuel_north = fuel.copy()
fuel_north = fuel_north.query('region == "NORTE"')
fuel_north["growth"] = fuel_north.sale_avg_price.diff()
fuel_north["accel"] = fuel_north.growth.diff()
fig, axs = plt.subplots(3, 3, figsize=(20, 20))
# Plots
sns.lineplot(
x="year",
y="sale_avg_price",
hue="state",
data=fuel_north.query('product == "GASOLINA COMUM"'),
ax=axs[0, 0],
errorbar=None,
)
sns.lineplot(
x="year",
y="growth",
hue="state",
data=fuel_north.query('product == "GASOLINA COMUM"'),
ax=axs[0, 1],
errorbar=None,
)
sns.lineplot(
x="year",
y="accel",
hue="state",
data=fuel_north.query('product == "GASOLINA COMUM"'),
ax=axs[0, 2],
errorbar=None,
)
sns.lineplot(
x="year",
y="sale_avg_price",
hue="state",
data=fuel_north.query('product == "ETANOL HIDRATADO"'),
ax=axs[1, 0],
errorbar=None,
)
sns.lineplot(
x="year",
y="growth",
hue="state",
data=fuel_north.query('product == "ETANOL HIDRATADO"'),
ax=axs[1, 1],
errorbar=None,
)
sns.lineplot(
x="year",
y="accel",
hue="state",
data=fuel_north.query('product == "ETANOL HIDRATADO"'),
ax=axs[1, 2],
errorbar=None,
)
sns.lineplot(
x="year",
y="sale_avg_price",
hue="state",
data=fuel_north.query('product == "ÓLEO DIESEL"'),
ax=axs[2, 0],
errorbar=None,
)
sns.lineplot(
x="year",
y="growth",
hue="state",
data=fuel_north.query('product == "ÓLEO DIESEL"'),
ax=axs[2, 1],
errorbar=None,
)
sns.lineplot(
x="year",
y="accel",
hue="state",
data=fuel_north.query('product == "ÓLEO DIESEL"'),
ax=axs[2, 2],
errorbar=None,
)
# Plot titles
axs[0, 0].set_title(
"Gasoline Prices in North Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[0, 1].set_title(
"Gasoline Price Growth in North Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[0, 2].set_title(
"Gasoline Price Acceleration in North Region from 2004 to 2021",
fontsize=14,
loc="left",
)
axs[1, 0].set_title(
"Ethanol Prices in North Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[1, 1].set_title(
"Ethanol Price Growth in North Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[1, 2].set_title(
"Ethanol Price Acceleration in North Region from 2004 to 2021",
fontsize=14,
loc="left",
)
axs[2, 0].set_title(
"Diesel Prices in North Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[2, 1].set_title(
"Ethanol Price Growth in North Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[2, 2].set_title(
"Ethanol Price Growth in North Region from 2004 to 2021", fontsize=14, loc="left"
)
# Labels
axs[0, 0].set_xlabel("Year", fontsize=12)
axs[0, 0].set_ylabel("Price (R$)", fontsize=12)
axs[0, 1].set_xlabel("Year", fontsize=12)
axs[0, 1].set_ylabel("Growth (R$)", fontsize=12)
axs[0, 2].set_xlabel("Year", fontsize=12)
axs[0, 2].set_ylabel("Acceleration (R$)", fontsize=12)
axs[1, 0].set_xlabel("Year", fontsize=12)
axs[1, 0].set_ylabel("Price (R$)", fontsize=12)
axs[1, 1].set_xlabel("Year", fontsize=12)
axs[1, 1].set_ylabel("Growth (R$)", fontsize=12)
axs[1, 2].set_xlabel("Year", fontsize=12)
axs[1, 2].set_ylabel("Acceleration (R$)", fontsize=12)
axs[2, 0].set_xlabel("Year", fontsize=12)
axs[2, 0].set_ylabel("Price (R$)", fontsize=12)
axs[2, 1].set_xlabel("Year", fontsize=12)
axs[2, 1].set_ylabel("Growth (R$)", fontsize=12)
axs[2, 2].set_xlabel("Year", fontsize=12)
axs[2, 2].set_ylabel("Acceleration (R$)", fontsize=12)
# Reducing space between plots
fig.tight_layout()
plt.show()
# The state of Acre had the highest prices for Diesel and Gasoline. Ethanol price growth were less constant and there was not a predominat state with higher prices.
# Tocantis showed the lowest prices for Diesel (through whole period) and for Ethanol (until late 2015). It did'nt happen for Gasoline, that had Amazonas until 2011 and then Amapá with lowest prices.
# Until 2012 the price changes were more constant, but after it became more unstable, specially for Gasoline and Ethanol.
# ### **Southeast region**
fuel_southeast = fuel.copy()
fuel_southeast = fuel_southeast.query('region == "SUDESTE"')
fuel_southeast["growth"] = fuel_southeast.sale_avg_price.diff()
fuel_southeast["accel"] = fuel_southeast.growth.diff()
fig, axs = plt.subplots(3, 3, figsize=(20, 20))
sns.lineplot(
x="year",
y="sale_avg_price",
hue="state",
data=fuel_southeast.query('product == "GASOLINA COMUM"'),
ax=axs[0, 0],
errorbar=None,
)
sns.lineplot(
x="year",
y="growth",
hue="state",
data=fuel_southeast.query('product == "GASOLINA COMUM"'),
ax=axs[0, 1],
errorbar=None,
)
sns.lineplot(
x="year",
y="accel",
hue="state",
data=fuel_southeast.query('product == "GASOLINA COMUM"'),
ax=axs[0, 2],
errorbar=None,
)
sns.lineplot(
x="year",
y="sale_avg_price",
hue="state",
data=fuel_southeast.query('product == "ETANOL HIDRATADO"'),
ax=axs[1, 0],
errorbar=None,
)
sns.lineplot(
x="year",
y="growth",
hue="state",
data=fuel_southeast.query('product == "ETANOL HIDRATADO"'),
ax=axs[1, 1],
errorbar=None,
)
sns.lineplot(
x="year",
y="accel",
hue="state",
data=fuel_southeast.query('product == "ETANOL HIDRATADO"'),
ax=axs[1, 2],
errorbar=None,
)
sns.lineplot(
x="year",
y="sale_avg_price",
hue="state",
data=fuel_southeast.query('product == "ÓLEO DIESEL"'),
ax=axs[2, 0],
errorbar=None,
)
sns.lineplot(
x="year",
y="growth",
hue="state",
data=fuel_southeast.query('product == "ÓLEO DIESEL"'),
ax=axs[2, 1],
errorbar=None,
)
sns.lineplot(
x="year",
y="accel",
hue="state",
data=fuel_southeast.query('product == "ÓLEO DIESEL"'),
ax=axs[2, 2],
errorbar=None,
)
axs[0, 0].set_title(
"Gasoline Prices in Southeast Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[0, 1].set_title(
"Gasoline Price Growth in Southeast Region from 2004 to 2021",
fontsize=14,
loc="left",
)
axs[0, 2].set_title(
"Gasoline Price Acceleration in Southeast Region from 2004 to 2021",
fontsize=14,
loc="left",
)
axs[1, 0].set_title(
"Ethanol Prices in Southeast Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[1, 1].set_title(
"Ethanol Price Growth in Southeast Region from 2004 to 2021",
fontsize=14,
loc="left",
)
axs[1, 2].set_title(
"Ethanol Price Acceleration in Southeast Region from 2004 to 2021",
fontsize=14,
loc="left",
)
axs[2, 0].set_title(
"Diesel Prices in Southeast Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[2, 1].set_title(
"Ethanol Price Growth in Southeast Region from 2004 to 2021",
fontsize=14,
loc="left",
)
axs[2, 2].set_title(
"Ethanol Price Growth in Southeast Region from 2004 to 2021",
fontsize=14,
loc="left",
)
axs[0, 0].set_xlabel("Year", fontsize=12)
axs[0, 0].set_ylabel("Price (R$)", fontsize=12)
axs[0, 1].set_xlabel("Year", fontsize=12)
axs[0, 1].set_ylabel("Growth (R$)", fontsize=12)
axs[0, 2].set_xlabel("Year", fontsize=12)
axs[0, 2].set_ylabel("Acceleration (R$)", fontsize=12)
axs[1, 0].set_xlabel("Year", fontsize=12)
axs[1, 0].set_ylabel("Price (R$)", fontsize=12)
axs[1, 1].set_xlabel("Year", fontsize=12)
axs[1, 1].set_ylabel("Growth (R$)", fontsize=12)
axs[1, 2].set_xlabel("Year", fontsize=12)
axs[1, 2].set_ylabel("Acceleration (R$)", fontsize=12)
axs[2, 0].set_xlabel("Year", fontsize=12)
axs[2, 0].set_ylabel("Price (R$)", fontsize=12)
axs[2, 1].set_xlabel("Year", fontsize=12)
axs[2, 1].set_ylabel("Growth (R$)", fontsize=12)
axs[2, 2].set_xlabel("Year", fontsize=12)
axs[2, 2].set_ylabel("Acceleration (R$)", fontsize=12)
fig.tight_layout()
plt.show()
# São Paulo state has the lowest prices for Gasoline and Ethanol, but only after 2012 for Diesel.
# The Gasoline highest prices came from Espírito Santo until 2011 and then from Rio de Janeiro. It was similar with Ethanol, Espírito Santo up to 2017 then Rio de Janeiro.
# Espírito Santo had the highest prices in Diesel until 2012.
# Similar to North region, the prices suffered more changes after 2012, mostly with Diesel in Espírito Santo.
# ### **South region**
fuel_south = fuel.copy()
fuel_south = fuel_south.query('region == "SUL"')
fuel_south["growth"] = fuel_south.sale_avg_price.diff()
fuel_south["accel"] = fuel_south.growth.diff()
fig, axs = plt.subplots(3, 3, figsize=(20, 20))
sns.lineplot(
x="year",
y="sale_avg_price",
hue="state",
data=fuel_south.query('product == "GASOLINA COMUM"'),
ax=axs[0, 0],
errorbar=None,
)
sns.lineplot(
x="year",
y="growth",
hue="state",
data=fuel_south.query('product == "GASOLINA COMUM"'),
ax=axs[0, 1],
errorbar=None,
)
sns.lineplot(
x="year",
y="accel",
hue="state",
data=fuel_south.query('product == "GASOLINA COMUM"'),
ax=axs[0, 2],
errorbar=None,
)
sns.lineplot(
x="year",
y="sale_avg_price",
hue="state",
data=fuel_south.query('product == "ETANOL HIDRATADO"'),
ax=axs[1, 0],
errorbar=None,
)
sns.lineplot(
x="year",
y="growth",
hue="state",
data=fuel_south.query('product == "ETANOL HIDRATADO"'),
ax=axs[1, 1],
errorbar=None,
)
sns.lineplot(
x="year",
y="accel",
hue="state",
data=fuel_south.query('product == "ETANOL HIDRATADO"'),
ax=axs[1, 2],
errorbar=None,
)
sns.lineplot(
x="year",
y="sale_avg_price",
hue="state",
data=fuel_south.query('product == "ÓLEO DIESEL"'),
ax=axs[2, 0],
errorbar=None,
)
sns.lineplot(
x="year",
y="growth",
hue="state",
data=fuel_south.query('product == "ÓLEO DIESEL"'),
ax=axs[2, 1],
errorbar=None,
)
sns.lineplot(
x="year",
y="accel",
hue="state",
data=fuel_south.query('product == "ÓLEO DIESEL"'),
ax=axs[2, 2],
errorbar=None,
)
axs[0, 0].set_title(
"Gasoline Prices in South Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[0, 1].set_title(
"Gasoline Price Growth in South Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[0, 2].set_title(
"Gasoline Price Acceleration in South Region from 2004 to 2021",
fontsize=14,
loc="left",
)
axs[1, 0].set_title(
"Ethanol Prices in South Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[1, 1].set_title(
"Ethanol Price Growth in South Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[1, 2].set_title(
"Ethanol Price Acceleration in South Region from 2004 to 2021",
fontsize=14,
loc="left",
)
axs[2, 0].set_title(
"Diesel Prices in South Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[2, 1].set_title(
"Ethanol Price Growth in South Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[2, 2].set_title(
"Ethanol Price Growth in South Region from 2004 to 2021", fontsize=14, loc="left"
)
axs[0, 0].set_xlabel("Year", fontsize=12)
axs[0, 0].set_ylabel("Price (R$)", fontsize=12)
axs[0, 1].set_xlabel("Year", fontsize=12)
axs[0, 1].set_ylabel("Growth (R$)", fontsize=12)
axs[0, 2].set_xlabel("Year", fontsize=12)
axs[0, 2].set_ylabel("Acceleration (R$)", fontsize=12)
axs[1, 0].set_xlabel("Year", fontsize=12)
axs[1, 0].set_ylabel("Price (R$)", fontsize=12)
axs[1, 1].set_xlabel("Year", fontsize=12)
axs[1, 1].set_ylabel("Growth (R$)", fontsize=12)
axs[1, 2].set_xlabel("Year", fontsize=12)
axs[1, 2].set_ylabel("Acceleration (R$)", fontsize=12)
axs[2, 0].set_xlabel("Year", fontsize=12)
axs[2, 0].set_ylabel("Price (R$)", fontsize=12)
axs[2, 1].set_xlabel("Year", fontsize=12)
axs[2, 1].set_ylabel("Growth (R$)", fontsize=12)
axs[2, 2].set_xlabel("Year", fontsize=12)
axs[2, 2].set_ylabel("Acceleration (R$)", fontsize=12)
fig.tight_layout()
plt.show()
# The state of Paraná showed the lowest prices for Diesel and Ethanol (the whole period) and for Gasoline until 2012.
# Rio Grande do Sul had the highest prices for all fuel types.
# Again, there was a major change in price growth after 2012, specially with Gasoline.
# ***
# ***
# # Hypothesis Test
# The samples collected to create this dataset shows that prices in north are higher than in south, but to confirm it a statistical test can be run.
# As the prices grow similar for all fuel types, Gasoline was choosen for this test.
# **Hnull:**
# > There is not enough evidence to suggest that the North gas prices are significantly higher than the South gas prices.
# **Halt:**
# > The North gas prices are significantly higher than the South gas prices.
north_prices = fuel.query(
'region == "NORTE" & product == "GASOLINA COMUM"'
).sale_avg_price
south_prices = fuel.query(
'region == "SUL" & product == "GASOLINA COMUM"'
).sale_avg_price
stat, p = mannwhitneyu(north_prices, south_prices, alternative="greater")
if p < 0.05:
print(
"The North gas prices are significantly higher than the South gas prices, at a confidence level of 95%."
)
else:
print(
"There is not enough evidence to suggest that the North gas prices are significantly higher than the South gas prices, at a confidence level of 95%."
)
|
# # Abstrak
# This experiment presents on sentiment analysis using a convolutional neural network (CNN) applied to financial news. The objective of the experiment was to classify news articles as either positive and negative sentiment based on their content. The dataset used in the experiment consisted of a collection of financial news articles. The CNN model was trained on this dataset and achieved an accuracy of 80.20%, precision of 78.35%, recall of 74.80%, and an F1-score of 76.05%. These results demonstrate the effectiveness of the CNN approach in sentiment analysis on financial news, and suggest that it could be a valuable tool for financial decision-making and trading strategies. Overall, this experiment provides insights into the potential applications of CNNs in sentiment analysis and contributes in this area.
# # Introduction
# Sentiment analysis is the process of determining the emotional tone of a piece of text, and it has become increasingly important in today's fast-paced digital world. One area where sentiment analysis is particularly relevant is in financial news, where accurate and timely information can mean the difference between success and failure. In recent years, convolutional neural networks (CNNs) have emerged as a powerful tool for sentiment analysis, thanks to their ability to learn complex patterns and relationships in large datasets. In this context, using CNNs for sentiment analysis on financial news has become a promising area of research, with the potential to improve financial decision-making and create more effective trading strategies. In this experiment, we will explore the basics of sentiment analysis and how it can be applied to financial news using CNNs.
# # Literature review
# Convolutional Neural Networks (CNNs) are a popular type of deep learning model used for image and video processing, natural language processing, and speech recognition. Here are some of the pros and cons of using CNNs:
# Pros:
# * Highly effective at image recognition: CNNs can accurately classify and segment images, even when the images are complex and contain multiple objects or backgrounds.
# * Efficient at processing large datasets: CNNs can be trained on large datasets without overfitting, making them useful for big data applications.
# * Robust to image variation: CNNs can recognize images even when they are rotated, scaled, or partially occluded, making them useful for real-world applications.
# * Can be fine-tuned: CNNs can be fine-tuned to work well on specific tasks, such as object detection, face recognition, or speech recognition.
# * Automates feature extraction: CNNs can automatically extract relevant features from images, reducing the need for manual feature engineering.
# Cons:
# * Requires large datasets: CNNs require large amounts of labeled data to be trained effectively, which can be expensive and time-consuming to obtain.
# * Computationally expensive: CNNs can be computationally expensive to train and require powerful hardware, such as GPUs or TPUs.
# * Difficult to interpret: CNNs are often considered as "black box" models, as it is difficult to understand how they make their predictions, making them unsuitable for some applications where interpretability is important.
# * Sensitivity to hyperparameters: CNNs require a lot of tuning of hyperparameters to obtain optimal performance, which can be challenging and time-consuming.
# * Limited to structured data: CNNs are best suited for structured data, such as images or speech signals, and may not be suitable for other types of data, such as unstructured text data.
# # Experiment
# ## Install libary
import warnings
warnings.filterwarnings("ignore")
# ## Load data
import pandas as pd
df = pd.read_csv(
"/kaggle/input/sentiment-analysis-for-financial-news/all-data.csv",
delimiter=",",
encoding="latin-1",
header=None,
)
df = df.rename(columns=lambda x: ["Sentiment", "Sentence"][x])
df.info()
df = df[["Sentence", "Sentiment"]]
df.head()
# ## Exploratory data analysis
df = df[df.Sentiment != "neutral"]
df.head()
df.info()
# ### Sentiment distribution
import seaborn as sns
import matplotlib.pyplot as plt
sentiment = df["Sentiment"].value_counts()
plt.figure(figsize=(12, 4))
sns.barplot(x=sentiment.index, y=sentiment.values, alpha=0.8)
plt.ylabel("Number of Occurrences", fontsize=12)
plt.xlabel("sentiment", fontsize=12)
plt.xticks(rotation=90)
plt.show()
# ## Data preparation
# ### Data cleaning
from bs4 import BeautifulSoup
def strip_html_tags(text):
soup = BeautifulSoup(text, "html.parser")
[s.extract() for s in soup(["iframe", "script"])]
stripped_text = soup.get_text()
stripped_text = re.sub(r"[\r|\n|\r\n]+", "\n", stripped_text)
return stripped_text
def remove_accented_chars(text):
text = (
unicodedata.normalize("NFKD", text)
.encode("ascii", "ignore")
.decode("utf-8", "ignore")
)
return text
def stopwords_removal(words):
list_stopwords = nltk.corpus.stopwords.words("english")
return [word for word in words if word not in list_stopwords]
import re
import nltk
import tqdm
import unicodedata
import contractions
from nltk.tokenize import word_tokenize
def pre_process_corpus(docs):
norm_docs = []
for doc in tqdm.tqdm(docs):
# case folding
doc = doc.lower()
# remove special characters\whitespaces
doc = strip_html_tags(doc)
doc = doc.translate(doc.maketrans("\n\t\r", " "))
doc = remove_accented_chars(doc)
doc = contractions.fix(doc)
doc = re.sub(r"[^a-zA-Z0-9\s]", "", doc, re.I | re.A)
doc = re.sub(" +", " ", doc)
doc = doc.strip()
# tokenize
doc = word_tokenize(doc)
# filtering
doc = stopwords_removal(doc)
norm_docs.append(doc)
norm_docs = [" ".join(word) for word in norm_docs]
return norm_docs
df.Sentence = pre_process_corpus(df.Sentence)
df.head()
# ### Handling imbalance (oversampling)
from sklearn.utils import resample
# Separate majority and minority classes in training data for upsampling
data_majority = df[df["Sentiment"] == "positive"]
data_minority = df[df["Sentiment"] == "negative"]
print("majority class before upsample:", data_majority.shape)
print("minority class before upsample:", data_minority.shape)
# Upsample minority class
data_minority_upsampled = resample(
data_minority,
replace=True, # sample with replacement
n_samples=data_majority.shape[0], # to match majority class
random_state=123,
) # reproducible results
# Combine majority class with upsampled minority class
df_balance = pd.concat([data_majority, data_minority_upsampled])
# Display new class counts
print("After upsampling\n", df_balance.Sentiment.value_counts(), sep="")
# ### Data splitting
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
df.Sentence, df.Sentiment, test_size=0.1, random_state=42
)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# ### Tokenizer
from tensorflow.keras.preprocessing.text import Tokenizer
token = Tokenizer()
token.fit_on_texts(X_train)
vocab = len(token.index_word) + 1
print("Vocabulary size={}".format(len(token.word_index)))
print("Number of Documents={}".format(token.document_count))
# ### Sequence
X_train = token.texts_to_sequences(X_train)
X_test = token.texts_to_sequences(X_test)
train_lens = [len(s) for s in X_train]
test_lens = [len(s) for s in X_test]
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
h1 = ax[0].hist(train_lens)
h2 = ax[1].hist(test_lens)
from tensorflow.keras.preprocessing.sequence import pad_sequences
# padding
MAX_SEQUENCE_LENGTH = 30
X_train = pad_sequences(X_train, maxlen=MAX_SEQUENCE_LENGTH, padding="post")
X_test = pad_sequences(X_test, maxlen=MAX_SEQUENCE_LENGTH, padding="post")
X_train.shape, X_test.shape
# ### Encoding Labels
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
num_classes = 2 # positive -> 1, negative -> 0
y_train = le.fit_transform(y_train)
y_test = le.transform(y_test)
# ## Modelling
# ### Build model
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, Activation, Dropout
from tensorflow.keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D
vec_size = 300
model = Sequential()
model.add(Embedding(vocab, vec_size, input_length=MAX_SEQUENCE_LENGTH))
model.add(Conv1D(64, 8, activation="relu"))
model.add(MaxPooling1D(2))
model.add(Dropout(0.1))
model.add(Dense(8, activation="relu"))
model.add(Dropout(0.1))
model.add(Dense(4, activation="relu"))
model.add(Dropout(0.1))
model.add(GlobalMaxPooling1D())
model.add(Dense(1, activation="sigmoid"))
model.compile(
loss="binary_crossentropy",
optimizer=tf.optimizers.Adam(learning_rate=0.0001),
metrics=["accuracy"],
)
model.summary()
# ### Train model
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
epochs = 100
batch_size = 4
es = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=5)
mc = ModelCheckpoint(
"./best_model/best_model_cnn1d.h5",
monitor="val_accuracy",
mode="max",
verbose=1,
save_best_only=True,
)
history = model.fit(
X_train,
y_train,
batch_size=batch_size,
shuffle=True,
validation_split=0.1,
epochs=epochs,
verbose=1,
callbacks=[es, mc],
)
# ## Evaluation
# ### Model Accuracy
from keras.models import load_model
saved_model = load_model("./best_model/best_model_cnn1d.h5")
train_acc = saved_model.evaluate(X_train, y_train, verbose=1)
test_acc = saved_model.evaluate(X_test, y_test, verbose=1)
print("Train: %.2f%%, Test: %.2f%%" % (train_acc[1] * 100, test_acc[1] * 100))
# ### Identify Overfitting
# summarize history for accuracy
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# summarize history for loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# ### Confusion Matrix
def predictions(x):
prediction_probs = model.predict(x)
predictions = [1 if prob > 0.5 else 0 for prob in prediction_probs]
return predictions
from sklearn.metrics import (
confusion_matrix,
classification_report,
roc_auc_score,
roc_curve,
)
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
labels = ["positive", "negative"]
print("CNN 1D Accuracy: %.2f%%" % (accuracy_score(y_test, predictions(X_test)) * 100))
print(
"CNN 1D Precision: %.2f%%"
% (precision_score(y_test, predictions(X_test), average="macro") * 100)
)
print(
"CNN 1D Recall: %.2f%%"
% (recall_score(y_test, predictions(X_test), average="macro") * 100)
)
print(
"CNN 1D f1_score: %.2f%%"
% (f1_score(y_test, predictions(X_test), average="macro") * 100)
)
print("================================================\n")
print(classification_report(y_test, predictions(X_test)))
pd.DataFrame(
confusion_matrix(y_test, predictions(X_test)), index=labels, columns=labels
)
# ### ROC AUC
def plot_roc_curve(y_test, y_pred):
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
plt.plot(fpr, tpr)
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plot_roc_curve(y_test, predictions(X_test))
print("model AUC score: %.2f%%" % (roc_auc_score(y_test, predictions(X_test)) * 100))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from pathlib import Path
import matplotlib.pyplot as plt
import scipy.optimize as so
from scipy.special import erfc
df = pd.read_csv("/kaggle/input/rpc-event-charge/rpc_data_kaggle.csv", index_col=0)
df["voltage"] = df.voltage.astype(int)
df.head()
axs = (
df.query("is_detected and event_type == 1")
.groupby("voltage")
.event_charge.plot.hist(bins=np.arange(0, 5, 0.2), histtype="step", legend=True)
)
def expomodgaus(x, h, m, s, t):
"""Parameters definition:
from: https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
h = height
m = mean
s = sigma
t = tau
"""
return (
h
* s
/ t
* np.sqrt(np.pi / 2)
* np.exp(1 / 2 * (s / t) ** 2 - (x - m) / t)
* erfc(1 / np.sqrt(2) * (s / t - (x - m) / s))
)
# # Are these distributions following some particular pdf?
# Check the plot below
for ix, group in df.query("is_detected and event_type == 1").groupby("voltage"):
bins, edges = np.histogram(group.event_charge, bins=np.arange(0, 100, 0.1))
plt.plot(edges[:-1], bins)
plt.xlim(0, 20)
|
# # Data Preprocessing
# COMP20121 Machine Learning for Data Analytics
# Author: [Jun He](https://sites.google.com/site/hejunhomepage/)
# ## Learning objectives
# * Understand the necessity of data preprocessing
# * Introduce data preprocessing methods: 1. Data cleansing 2. Data scaling 3. Data transformation
# * Implement preprocessing with Sklearn and Pandas
# ## Part 1: Data cleaning
# ### Why data pre-processing?
# Raw data often are incomplete, noisy and may contain:
# * Obsolete fields
# * Missing values
# * Outliers
# * Erroneous values
# * Data format is not suitable for machine learning models
# **Data cleansing or cleaning** is the process of detecting and correcting missing values, incorrect values, inaccurate values or irrelevant values in data
# ### Example: missing or incorrect data
#
# * Example: a customer data set
# * Check whether there are missing or incorrect data
import pandas as pd
df = pd.read_csv("/kaggle/input/aimldata/data-cleaning-customer.csv")
print(df.to_latex())
# ### Understand data type: categorical and numerical
# * A column takes different values, also called a **variable, feature, attribute**.
# * Columns `Zip`, `Gender` and `Martial status` belong to **categorical variables**, which represent the category, class or type. A categorical variable can be encoded as numbers, but these numbers do not have the same meaning as a numerical value.
# * Categorical variables have no arithmetic operations. For example: what is the meaning of Zip code $$10048 + 90210=?$$
# * Categorical variables can be further classified into two types.
# * **Nominal**: no default or natural order. Examples:, town of residence, colour of car, male or female.
# * **Ordinal**: with an order. Example: Questionnaire responses coded: 1 = strongly disagree, 2 = disagree, 3 = indifferent, 4 = agree, 5 = strongly agree.
# * Columns `Age`, `Income` and `Transaction Amount` columns are **numerical variables**, which represent the measurement or number. They have variables have arithmetic operations. For example, Income $$ 75000 + 50000 = 125000$$
# ### Check zip code column
#
df.iloc[:, 1:2]
df_T = df.iloc[:, 1:2].T
print(df_T.to_latex())
# U.S. Zip Code is five digits. Why J2S7K7 or four digits?
# * Not all countries use same zip code format, 90210 (U.S.) vs. J2S7K7 (Canada)
# * We should expect unusual values in some fields, for example, in global commerce
# * 06269 (New England states): leading zero is truncated
# * In a database numeirc field, leading zero is often chopped-off
# ### Check Gender Column
df.iloc[:, 2:3]
df_T = df.iloc[:, 2:3].T
print(df_T.to_latex())
# * Is there any missing value?
# * Discuss anomaly with database administrator
# ### Income Field
df.iloc[:, 3:4]
df_T = df.iloc[:, 3:4].T
print(df_T.to_latex())
# **$10,000,000**
# * Assumed to measure gross annual income
# * Possibly valid
# * Still considered outlier (extreme data value)
# * Some statistical and data mining methods affected by outliers
# **-\$40,000?**
# * Income less than $0?
# * Value beyond bounds for expected income, therefore an error
# * Caused by data entry error?
# * Discuss anomaly with database administrator
# **\$99,999**
# * Other values appear rounded to nearest \$5,000
# * Value may be completely valid
# * Value represents database code used to denote missing value?
# * Confirm values in expected unit of measure, such as U.S. dollars
# * Which unit of measure for income?
# * Customer with zip code J2S7K7 in Canadian dollars?
# * Discuss anomaly with database administrator
# ### Check age column
df.iloc[:, 4:5]
df_T = df.iloc[:, 4:5].T
print(df_T.to_latex())
# Age Field Contains “C”?
# * Other records have numeric values for field
# * Record categorized into group labelled “C”
# * Value must be resolved
# * Data mining software expects numeric values for field
# Age Field Contains 0?
# * Zero-value used to indicate missing/unknown value?
# * Customer refused to provide their age?
# ### Check Martial status
df.iloc[:, 5:6]
#
# Marital Status Field Contains “S”?
# * What does this symbol mean?
# * Does “S” imply single or separated?
# * Discuss anomaly with database administrator
print(df.iloc[:, 5:6].to_latex())
# ## A big data cleaning task: missing values
# Sources of Missing Values
# * User forgot to fill in a field.
# * Data lost
# * Programming error.
# * Users chose not to fill out a field
# Missing Value types
# * Clearly labelled by `NA, n/a, NAN`
# * Others such as `--, -1, 99,999`
# ### Handle missing values
# * Missing values pose problems to data analysis methods
# * More common in databases containing large number of fields
# * Absence of information rarely beneficial to task of analysis
# * In contrast, having more data almost always better
# * Careful analysis required to handle issue
# **Example:** Load a house data set and check missing or incorrect data
# Note: in Kaggle Code setting, you must turn on Internet for access the link `https://`
import pandas as pd
# url = "https://raw.githubusercontent.com/dataoptimal/posts/master/data%20cleaning%20with%20python%20and%20pandas/property%20data.csv"
url = "/kaggle/input/aimldata/data-cleaning-property.csv"
df2 = pd.read_csv(url)
df2
print(df2.to_latex())
# ### Check standard missing values
# * **standard missing values** either take `NaN` values or blank
# * `datafram.isnull() method` checks both these standard missing value. Return Boolean True: `False` or `True`
# * Example: check missing values in ST_NUM and NUM_BEDROOMS columns by the method `dataframe.isna()`
# Looking at the ST_NUM column
df2["ST_NUM_NA"] = df2["ST_NUM"].isna()
print(df2[["ST_NUM", "ST_NUM_NA"]].to_latex())
# ### Detect non-standard missing values
# * Missing values sometimes are denoted by `na`, `?`, `--` and other notation.
# * They belong to non-standard missing values and cannot be detected by method `dataframe.isna()` or `isnull()`
# * For example, the 8th value `na` is a missing value but is detected by `False`.
df2["NUM_BEDROOMS_NA"] = df2["NUM_BEDROOMS"].isna()
print(df2[["NUM_BEDROOMS", "NUM_BEDROOMS_NA"]].to_latex())
# ### Import non-standard missing values as standard missing values
# * Manually check the notation of non-standard missing values
# * Mannualy create a list of missing values: `missing_values = ["n.a.","?","NA","n/a", "na", "--"]`
# * Need to read data using user-defined missing values `na_values = missing_values`
# * Example: the 8th value `na` is replaced by a standard missing value `NAN`
# Making a list of missing value types
missing_values = ["n.a.", "?", "NA", "n/a", "na", "--"]
# url = "https://raw.githubusercontent.com/dataoptimal/posts/master/data%20cleaning%20with%20python%20and%20pandas/property%20data.csv"
url = "/kaggle/input/aimldata/data-cleaning-property.csv"
df = pd.read_csv(url, na_values=missing_values)
df2 = pd.read_csv(url, na_values=missing_values)
# Looking at the NUM_BEDROOMS column
df2
print(df2.to_latex())
# ### Calculate total missing values for each feature
# * use combined method `isna().sum()` or `isnull().sum()`
# * return the total number of standard missing value in each feature, but it non-standard (unexpected or incorrect) values probably are not counted
print(df2.isna().sum().to_latex())
df2["NUM_BEDROOMS_NA"] = df2["NUM_BEDROOMS"].isna()
print(df2[["NUM_BEDROOMS", "NUM_BEDROOMS_NA"]].to_latex())
# ### Handle missing data: Drop missing values
# * In Pandas dataframe, use method \alert{df.dropna()}
# * Simple but dangerous! Not recommended
# * Assume that only 5% of data values are missing from a data set of 30 features, and the missing values are spread evenly throughout the data,
# * Then almost 80% of the records would have at least one missing value
# * Example: drop all rows with missing values and keep the outcome in Dataframe 4
# * Only one sample is left.
df3 = df2.dropna()
df3
print(df3.to_latex())
# ### Replace missing values with user-defined constant
# * In Pandas dataframe, use method `fillna`
# * Example: replace the NAN value in ST_NUM column by 125
df2["ST_NUM"].fillna(125, inplace=True)
print(df2["ST_NUM"])
print(df2["ST_NUM"].to_latex())
# ### Replace missing values with median or mean
# * First clacluate the median /mean of the feature column, then replace missing value by the median
# * **Median** = the value separating the higher half from the lower half of a data sample, e.g. median = 3 for `[1,1,3,5,6]`
# * **Mean** = the sum of the sampled data divided by the number of items in the data sample, e.g. mean = 3.2 for `[1,1,3,5,6]`
# * Domain experts should be consulted regarding approach to replace missing values
# * Mean only works in numerical data
# Example: replace the missing value in NUM_BEDROOMS column by media by method `dataframe['NUM_BEDROOMS'].median()`
# Replace using median
df2["NUM_BEDROOMS"].fillna(df2["NUM_BEDROOMS"].median(), inplace=True)
df2["NUM_BEDROOMS"]
print(df["NUM_BEDROOMS"].to_latex(), df2["NUM_BEDROOMS"].to_latex())
# ### Replace missing Values with mode
# * **Mode** = the value that appears most often in a data sample, e.g. mode = Y for `[Y, Y, N, Y]`
# * Mode works in both numerical and categorical data
# * Cacluate the mode of the feature column, then replace missing value by the mode
# * Example: the OWN_OCCUPIED column takes values Y and N and Mode = Y
# * Method `dataframe['OWN_OCCUPIED'].mode()[0]`
#
# Replace using mode
df2["OWN_OCCUPIED"].fillna(df2["OWN_OCCUPIED"].mode()[0], inplace=True)
df2["OWN_OCCUPIED"]
print(df["OWN_OCCUPIED"].to_latex(), df2["OWN_OCCUPIED"].to_latex())
# ### Replace a missing value in a specific location
# * Mannual replace a missing value in a specific location
# *Example:* replace 4th value in PID column by `100005000`
df2.loc[4, "PID"] = 100005000
df2["PID"]
print(df["PID"].to_latex(), df2["PID"].to_latex())
# ## Unexpected or incorrect values
# ### Detect unexpected or incorrect values
# * Need specific checking for unexpected or incorrect values
# * Example: `OWN_OCCUPIED` column takes `Yes` or `No` values, but the value `12` is wrong. Method `isna` cannot check this value
# * A solution is to exclude any value rather `Y` and `N` during data input statge
# Looking at the OWN_OCCUPIED column
print(df2["OWN_OCCUPIED"])
print(df2["OWN_OCCUPIED"].isna())
df2.loc[3, "OWN_OCCUPIED"] = "Y"
df2["OWN_OCCUPIED"]
print(df["OWN_OCCUPIED"].to_latex(), df2["OWN_OCCUPIED"].to_latex())
# Making a list of missing value types
# missing_values = ["n.a.","?","NA","n/a", "na", "--"]
# url = "https://raw.githubusercontent.com/dataoptimal/posts/master/data%20cleaning%20with%20python%20and%20pandas/property%20data.csv"
# df2 = pd.read_csv(url, na_values = missing_values)
# df2
# ## duplicate data
df3 = pd.DataFrame(
{
"brand": ["Yum Yum", "Yum Yum", "Indomie", "Indomie", "Indomie"],
"style": ["cup", "cup", "cup", "pack", "pack"],
"rating": [4, 4, 3.5, 15, 5],
}
)
df3
print(df3.to_latex())
df3.duplicated()
print(df3.to_latex(), df3.duplicated().to_latex())
df3.drop_duplicates()
print(df.to_latex())
# ## Part 2 Data normalization
# ### Why scale numerical features to a range?
# * Numerical features might have different ranges
# * Example: in building a machine learning model for predicting the performance of a player in baseball, two features have ranges as
# (1) Batting average: `[ 0.0, 0.400 ]`, (2) Number of home runs: `[ 0, 70 ]`
# * `Number of home runs` with greater ranges tend to have larger influence on machine learning model’s results than `Batting average`
# * Therefore, numeric feature values should be normalized
# * Standardizes scale of effect of each feature on machine learning model’s results
# ### Method 1: Min-Max scaler
# * Min-max scaler scales the value of a numerical variable to the interval, often between 0 and 1
# $$ X’ =\frac{X-\min}{\max -\min}$$
# * Example: min-max normalization of a column (1,2,3,4,5). $\max=5, \min =1$.
# * After min_max normalization, (0, 0.25, 0.5, 0.75, 1)
# * In Sklean library, `MinMaxScaler` transforms features by scaling each feature to a given range (default at (0,1)).
from sklearn import preprocessing
import numpy as np
# A feature is a column. (1,2,3,4,5) must be represented into a column, not a row!
X = np.array([[1.0], [2.0], [3.0], [4.0], [5.0]])
print("before min-max normalization \n", X)
scaler = preprocessing.MinMaxScaler() # create a scaler
X_new = scaler.fit_transform(X) # fit and transform data
print("after min-max normalization \n", X_new)
# ### Outliers
# * MIN-MAX scaler is sensitive to **outliers** (extreme values)
# * Example: (1,2,3, 4,100). 100 is an outlier, extremely large compared with other values. After min_max scaling, (0., 0.01010101, 0.02020202, 0.03030303,1.)
# A feature is a column. (1,2,3,4,5) must be represented into a column, not a row!
from sklearn.preprocessing import RobustScaler
X = np.array([[1.0], [2.0], [3.0], [4.0], [100.0]])
scaler = preprocessing.MinMaxScaler() # create a scaler
X_new = scaler.fit_transform(X) # fit and transform data
print("before min-max normalization \n", X)
print("after min-max normalization \n", X_new)
# ### Method 2: Standard Scaler
# * Standard Scaler scales a numerical feature to mean =0 and standard deviation =1
# * The standard score of a sample $X$ is calculated as:
# $$
# Z = \frac{X- \mu}{std}
# $$
# where $\mu$ is the mean of $X$ and $std$ its the standard deviation
# * **Mean $\mu$** = the sum of the sampled data divided by the number of items in the data samples $X$
# * **Standard deviation $std$** = a measure of the amount of variation or dispersion of a data samples $X$
# Example: Given the feature column X= (1,2,3,4,5), $\mu=3, std =1.14$.
# Using Sklearn method `preprocessing.StandardScaler()` to create a standard scaler
# After scaling, $\mu=1, std =1$.
from sklearn import preprocessing
import numpy as np
X = np.array([[1.0], [2.0], [3.0], [4.0], [5.0]])
print("before min-max normalization \n", X)
print("mean", np.mean(X, axis=0))
print("std", np.std(X, axis=0))
scaler = preprocessing.StandardScaler() # create a scaler
X_new = scaler.fit_transform(X) # fit and transform data
print("after min-max normalization \n", X_new)
print("mean", np.mean(X_new, axis=0))
print("std", np.std(X_new, axis=0))
# ## Part 3 Data transformation
# ### why convert categorical features into numerical?
# * In most data analysis problems, datasets contain categorical features such as `Gender` and `Martial status`
# * However, many machine learnng models like artificial neural networks cannot handle categorical features
# * These categorical features must encoded into numerical ones as an input to a model
# * There are no silver bullets
# ### Method 1: OrdinalEncoder (Lable Encoder)
# * Encode categorical **feature values** as an integer array
# * **Feature names** are NOT encoded
# * Example: `No = 0`, `Yes =1`
# create a data frame
import pandas as pd
# import numpy as np
df = pd.DataFrame(
[
["M", "O-", "medium"],
["M", "O-", "high"],
["F", "O+", "high"],
["F", "AB", "low"],
["F", "B+", "NA"],
]
) # create a data frame
df.columns = ["gender", "blood_type", "edu_level"] # add columns name to data frame
df
print(df.to_latex())
from sklearn.preprocessing import OrdinalEncoder
encoder = OrdinalEncoder() # create an encoder with order
df["edu_level"] = encoder.fit_transform(
df["edu_level"].values.reshape(-1, 1)
) # fit encoder with data and transfer data
df
# ## Order, order
# * We are not happy with the order: medium = 3, high = 1
# * We want to encode in the order: low =1, medium = 2, high = 3, and NA =0
# * In `OrdinalEncoder`, we can do it by setting `categories=[['NA', 'low', 'medium', 'high']]`
import pandas as pd
import numpy as np
df = pd.DataFrame(
[
["M", "O-", "medium"],
["M", "O-", "high"],
["F", "O+", "high"],
["F", "AB", "low"],
["F", "B+", "NA"],
]
) # create a data frame
df.columns = ["gender", "blood_type", "edu_level"] # add columns name to data frame
df
from sklearn.preprocessing import OrdinalEncoder
encoder = OrdinalEncoder(
categories=[["NA", "low", "medium", "high"]]
) # create an encoder with order
df["edu_level"] = encoder.fit_transform(
df["edu_level"].values.reshape(-1, 1)
) # fit encoder with the column edu_level but only this column
df.head()
# ### Method 2: OneHotEncoder
# * Example: `'gender', 'blood_type'` are nominal variables without an order.
# * They are encoded into integers `2, 1,0` with an order $2>1>0$.
# * This may be interpreted as being ordered. We don't want this order.
# * Nominal data are NOT suitable for ordinal encoding
# * **OneHotEncoder** transforms each categorical feature with `n_categories` possible values into `n_categories` binary features, with one of them 1, and all others 0.
# * `Male` is encoded to `[0, 1]`, `Female` to `[1, 0]` without order
# * `OneHotEncoder`: the input is an array-like of integers or strings
# * It creates a binary column for each category and returns an array
import pandas as pd
import numpy as np
df = pd.DataFrame(
[
["M", "O-", "medium"],
["M", "O-", "high"],
["F", "O+", "high"],
["F", "AB", "low"],
["F", "B+", "NA"],
]
) # create a data frame
df.columns = ["gender", "blood_type", "edu_level"] # add columns name to data frame
df
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder() # create an encoder
X_gender = encoder.fit_transform(
df["gender"].values.reshape(-1, 1)
).toarray() # fit encoder with data and transfer data
print("gender", X_gender)
X_blood = encoder.fit_transform(
df["blood_type"].values.reshape(-1, 1)
).toarray() # fit encoder with data and transfer data
print("blood_type", X_blood)
# * `O-` = 0. 0. 0. 1., `O+` = 0. 0. 1. 0.
# ### Put encoded data to a data frame
# * So far, we have encoded `['gender', 'blood_type', 'edu_level']` separately
# * We can concatenate the results into one array using `numpy.concatenate` with parameter `axis=1`
# * Then we create a new data frame with 6 columns and add their feature names
#
X_Encode = np.concatenate((X_gender, X_blood, X_edu), axis=1)
df_Encode = pd.DataFrame(X_Encode)
df_Encode.columns = [
"gender_F",
"gender_M",
"blood_B+",
"blood_AB",
"blood_O+",
"blood_O-",
"edu_level",
] # add columns name to data frame
df_Encode
|
# ## Video Game Recommendation System
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
warnings.filterwarnings("ignore")
# ### Importing and Transforming Dataset
# The dataset was obtained from Video Game Sales with Ratings in Kaggle, which were web scraped by Gregory Smith from VGChartz Video Games Sales. The collection of data includes details such as the game's title, genre, the platform it runs on, the company that published it, and other relevant information. From year 1980 up to 2020, the dataset includes a wide range of video game releases that spans over four decades.
video_games_df = pd.read_csv(
"/kaggle/input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv"
)
print(f"No. of records: {video_games_df.shape[0]}")
video_games_df.head(5)
# We removed certain features from the dataset as they are not significant for our recommendation system such as the release year, developer and the sales for each region.
video_games_filtered_df = video_games_df[
["Name", "Platform", "Genre", "Critic_Score", "User_Score", "Rating"]
]
video_games_filtered_df.info()
# ### Exploratory Data Analysis
# Check the total number of missing values for each feature in the dataset
video_games_filtered_df.isna().sum().sort_values(ascending=False)
# Remove the records with missing data in the `Name` and `Genre` features. Substitute the term `Unknown` for any missing information in the `Rating` field.
# Remove missing values
video_games_filtered_df.dropna(subset=["Name", "Genre"], axis=0, inplace=True)
video_games_filtered_df = video_games_filtered_df.reset_index(drop=True)
# Value substitution
video_games_filtered_df.fillna({"Rating": "Unknown"}, inplace=True)
video_games_filtered_df[["Name", "Genre", "Rating"]].isna().sum()
# Examine the frequency of data types for each categorical feature: `Genre`, `Platform`, and `Rating`.
features = video_games_filtered_df[["Genre", "Platform", "Rating"]].columns
for idx, feature in enumerate(features):
plt.figure(figsize=(14, 4))
sns.countplot(data=video_games_filtered_df, x=feature)
plt.show()
# From the charts above, we can say that:
# - There is a scarcity of data available for certain platforms such as SCD, WS, and GG, and ratings such as 'K-A', 'AO’, and 'RP'.
# - Almost half of the dataset has undefined rating value.
# We will only consider video games with a defined rating value. Therefore, all ratings marked as 'Unknown' will be dropped from the dataset
video_games_filtered_df = video_games_filtered_df.query("Rating != 'Unknown'")
plt.figure(figsize=(14, 4))
sns.countplot(data=video_games_filtered_df, x="Rating")
plt.show()
# Create additional features that correspond to `User_Score` and `Critic_score` variables. Replace all missing and 'tbd' values with a specific value -- the imputed data is calculated as the mean value of the respective feature within a particular genre, e.g. the average of all scores under the 'Action' category.
# Replace 'tbd' value to NaN
video_games_filtered_df["User_Score"] = np.where(
video_games_filtered_df["User_Score"] == "tbd",
np.nan,
video_games_filtered_df["User_Score"],
).astype(float)
# Group the records by Genre, then aggregate them calculating the average of both Critic Score and User Score
video_game_grpby_genre = video_games_filtered_df[
["Genre", "Critic_Score", "User_Score"]
].groupby("Genre", as_index=False)
video_game_score_mean = video_game_grpby_genre.agg(
Ave_Critic_Score=("Critic_Score", "mean"), Ave_User_Score=("User_Score", "mean")
)
# Merge the average scores with the main dataframe
video_games_filtered_df = video_games_filtered_df.merge(
video_game_score_mean, on="Genre"
)
video_games_filtered_df
video_games_filtered_df["Critic_Score_Imputed"] = np.where(
video_games_filtered_df["Critic_Score"].isna(),
video_games_filtered_df["Ave_Critic_Score"],
video_games_filtered_df["Critic_Score"],
)
video_games_filtered_df["User_Score_Imputed"] = np.where(
video_games_filtered_df["User_Score"].isna(),
video_games_filtered_df["Ave_User_Score"],
video_games_filtered_df["User_Score"],
)
video_games_filtered_df
# Compare the summary statistics of `User_Score` and `Critic_Score` and the new feature with imputed values, i.e.`User_Score_Imputed` and `Critic_Score_Imputed`. The results below show that filling in missing values has no significant impact on the average and the standard deviation.
video_games_filtered_df[
["Critic_Score", "Critic_Score_Imputed", "User_Score", "User_Score_Imputed"]
].describe()
# Drop all the fields related to critic and user scores except for the new features with imputed values.
video_games_final_df = video_games_filtered_df.drop(
columns=["User_Score", "Critic_Score", "Ave_Critic_Score", "Ave_User_Score"], axis=1
)
video_games_final_df = video_games_final_df.reset_index(drop=True)
video_games_final_df = video_games_final_df.rename(
columns={"Critic_Score_Imputed": "Critic_Score", "User_Score_Imputed": "User_Score"}
)
video_games_final_df.info()
# Analyze the data distribution for `Critic_Score` and `User_Score`, and assess the correlation between these two features.
hist, bins = np.histogram(video_games_final_df["Critic_Score"], bins=10, range=(0, 100))
plt.bar(bins[:-1], hist, width=(bins[1] - bins[0]), align="edge")
plt.xlabel("Critic Score")
plt.ylabel("Frequency")
plt.title("Critic Score Distribution for all Video Games")
plt.show()
hist, bins = np.histogram(video_games_final_df["User_Score"], bins=10, range=(0, 10))
plt.bar(bins[:-1], hist, width=(bins[1] - bins[0]), align="edge")
plt.xlabel("User Score")
plt.ylabel("Frequency")
plt.title("User Score Distribution for all Video Games")
plt.show()
plt.figure(figsize=(8, 8))
ax = sns.regplot(
x=video_games_final_df["User_Score"],
y=video_games_final_df["Critic_Score"],
line_kws={"color": "black"},
scatter_kws={"s": 4},
)
ax.set(
xlabel="User Score", ylabel="Critic Score", title="User Scores vs. Critic Scores"
)
# Display the dataframe information to quickly understand its structure and characteristics.
video_games_final_df.info()
# ### Converting Categorical Features to Dummy Indicators
# Obtain all categorical features, except for the title of the game.
categorical_columns = [
name
for name in video_games_final_df.columns
if video_games_final_df[name].dtype == "O"
]
categorical_columns = categorical_columns[1:]
print(f"There are {len(categorical_columns)} categorical features:\n")
print(", ".join(categorical_columns))
# Transform all categorical attributes into binary dummy variables where the value is 0 (representing No) or 1 (representing Yes).
video_games_df_dummy = pd.get_dummies(
data=video_games_final_df, columns=categorical_columns
)
video_games_df_dummy.head(5)
# After the conversion, the variables have expanded from the original 6 columns to a total of 40 columns.
video_games_df_dummy.info()
# ### Standardizing the Numerical Features
# Transform numerical data to a standardized form by scaling them to have a mean of 0 and a standard deviation of 1. The purpose of standardization is to ensure that all features are on a similar scale and have equal importance in determining the output variable.
features = video_games_df_dummy.drop(columns=["Name"], axis=1)
scale = StandardScaler()
scaled_features = scale.fit_transform(features)
scaled_features = pd.DataFrame(scaled_features, columns=features.columns)
scaled_features.head(5)
# ### Creating a Model
# The machine learning algorithm `NearestNeighbors` will be utilized to identify the data points nearest to a given input, with the aid of the `cosine similarity` measurement to determine the similarity or dissimilarity between data points.
model = NearestNeighbors(n_neighbors=11, metric="cosine", algorithm="brute").fit(
scaled_features
)
print(model)
# As we included `n_neighbors=1` as a parameter for our model, it will generate 11 indices and distances of games that are similar to the user input, including the input itself.
vg_distances, vg_indices = model.kneighbors(scaled_features)
print("List of indexes and distances for the first 5 game:\n")
print(vg_indices[:5], "\n")
print(vg_distances[:5])
# `TfidfVectorizer` is a feature extraction method commonly used in natural language processing and information retrieval tasks. In this case, it is used to suggest a video game title based on the user input (i.e. game that doesn't exist in the records) by evaluating the importance of words in the input relative to the existing records.
game_names = video_games_df_dummy["Name"].drop_duplicates()
game_names = game_names.reset_index(drop=True)
vectorizer = TfidfVectorizer(use_idf=True)
vectorizer.fit(game_names)
print(vectorizer)
game_title_vectors = vectorizer.transform(game_names)
game_title_vectors
# ### Testing the Model
# The program utilizes the above-mentioned model to provide video game recommendations to users. It will ask user to enter the game's name and, optionally, the platform to filter the results. The list of recommended games will be arranged in ascending order based on the calculated distances. On the other hand, if the game's name is not in the record, the program will suggest a new name of the game that has the closest match to the input.
def VideoGameTitleRecommender(video_game_name):
"""
This function will recommend a game title that has the closest match to the input
"""
query_vector = vectorizer.transform([video_game_name])
similarity_scores = cosine_similarity(query_vector, game_title_vectors)
closest_match_index = similarity_scores.argmax()
closest_match_game_name = game_names[closest_match_index]
return closest_match_game_name
def VideoGameRecommender(video_game_name, video_game_platform="Any"):
"""
This function will provide game recommendations based on various features of the game
"""
default_platform = "Any"
# User input: Game Title and Platform
if video_game_platform != default_platform:
video_game_idx = video_games_final_df.query(
"Name == @video_game_name & Platform == @video_game_platform"
).index
if video_game_idx.empty:
video_game_idx = video_games_final_df.query(
"Name == @video_game_name"
).index
if not video_game_idx.empty:
print(
f"Note: The game is not available on the specified platform. Recommendations will be based only on the game's title.\n"
)
video_game_platform = default_platform
# User input: Game Title only
else:
video_game_idx = video_games_final_df.query("Name == @video_game_name").index
if video_game_idx.empty:
# If the game entered by the user doesn't exist in the records, the program will recommend a new game similar to the input
closest_match_game_name = VideoGameTitleRecommender(video_game_name)
print(f"'{video_game_name}' doesn't exist in the records.\n")
print(
f"You may want to try '{closest_match_game_name}', which is the closest match to the input."
)
else:
# User input: Game Title only
if video_game_platform == default_platform:
# Place in a separate dataframe the indices and distances, then sort the record by distance in ascending order
vg_combined_dist_idx_df = pd.DataFrame()
for idx in video_game_idx:
# Remove from the list any game that shares the same name as the input
vg_dist_idx_df = pd.concat(
[
pd.DataFrame(vg_indices[idx][1:]),
pd.DataFrame(vg_distances[idx][1:]),
],
axis=1,
)
vg_combined_dist_idx_df = pd.concat(
[vg_combined_dist_idx_df, vg_dist_idx_df]
)
vg_combined_dist_idx_df = vg_combined_dist_idx_df.set_axis(
["Index", "Distance"], axis=1, inplace=False
).reset_index(drop=True)
vg_combined_dist_idx_df = vg_combined_dist_idx_df.sort_values(
by="Distance", ascending=True
)
video_game_list = video_games_final_df.iloc[
vg_combined_dist_idx_df["Index"]
]
# Remove any duplicate game names to provide the user with a diverse selection of recommended games
video_game_list = video_game_list.drop_duplicates(
subset=["Name"], keep="first"
)
# Get the first 10 games in the list
video_game_list = video_game_list.head(10)
# Get the distance of the games similar to the input
recommended_distances = np.array(
vg_combined_dist_idx_df["Distance"].head(10)
)
# User input: Game Title and Platform
else:
# Remove from the list any game that shares the same name as the input
recommended_idx = vg_indices[video_game_idx[0]][1:]
video_game_list = video_games_final_df.iloc[recommended_idx]
# Get the distance of the games similar to the input
recommended_distances = np.array(vg_distances[video_game_idx[0]][1:])
print(
f"Top 10 Recommended Video Games for '{video_game_name}' [platform:{video_game_platform}]"
)
video_game_list = video_game_list.reset_index(drop=True)
recommended_video_game_list = pd.concat(
[
video_game_list,
pd.DataFrame(recommended_distances, columns=["Similarity_Distance"]),
],
axis=1,
)
display(recommended_video_game_list.style.hide_index())
# __TEST DATA #1__
# __Input:__ Video Game Title
# __Expected Result:__ The program merges recommendations from all platforms of the game, arranges the similiarity distances in ascending order, then displays only the first 10 games that has the smallest calculated distance.
VideoGameRecommender("Call of Duty: World at War")
# __TEST DATA #2__
# __Input:__ Video Game Title and Platform
# __Expected Result:__ The platform helps to limit the results and display only recommended games based on the specified game and platform.
# NOTE: If a platform has limited data like GG and PCFX, the program might suggest games from other platforms based on various factors when calculating the features similarity.
VideoGameRecommender("Call of Duty: World at War", "PC")
# __TEST DATA #3__
# __Input:__ Video Game Title and Platform
# __Constraint:__ Video game is not available on the specified platform
# __Expected Result:__ Since the video game is not available on the specified platform, the recommendation is based solely on the game title and ignores the platform.
VideoGameRecommender("Call of Duty: World at War", "XB")
# __TEST DATA #4__
# __Input:__ Video Game Title
# __Constraint:__ Video game is not available in the records
# __Expected Result:__ No recommendation is shown but the program provides the user with the game title that has closest match to the input.
VideoGameRecommender("Call of Duty")
|
import matplotlib.pyplot as plt
import seaborn as sns
import keras
from keras.models import Sequential, Model
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from tensorflow.keras.applications import VGG19, VGG16
import cv2
import os
import random
import tensorflow as tf
import numpy as np
import pandas as pd
from keras.models import load_model
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from sklearn.metrics.pairwise import cosine_similarity
image_array = np.load("/kaggle/input/tu-n-5-vgg19-37494b/images.npy")
label_array = np.load("/kaggle/input/tu-n-5-vgg19-37494b/labels_onehot.npy")
categories = [
"Automotive",
"Baby",
"Home Improvement",
"Kitchen & Dining",
"Pet Supplies",
]
# Tải mô hình từ tệp "my_model.h5"
# feat_extractor = load_model('/kaggle/input/tu-n-5-vgg19-37494b/my_model.h5')
# feat_extractor.summary()
# load the model
vgg_model = VGG19(weights="imagenet")
# remove the last layers in order to get features instead of predictions
feat_extractor = Model(
inputs=vgg_model.input, outputs=vgg_model.get_layer("fc2").output
)
# print the layers of the CNN
feat_extractor.summary()
from tensorflow.keras.applications.imagenet_utils import preprocess_input
importedImages = []
for img in image_array:
# Convert the images to array
image_batch = np.expand_dims(img, axis=0)
importedImages.append(image_batch)
images = np.vstack(importedImages)
processed_imgs = preprocess_input(images.copy())
# extract the images features
imgs_features = feat_extractor.predict(processed_imgs)
print("features successfully extracted!")
imgs_features.shape
files = []
for i in range(0, len(image_array)):
files.append(str(i) + ".jpeg")
# compute cosine similarities between images
cosSimilarities = cosine_similarity(imgs_features)
# store the results into a pandas dataframe
cos_similarities_df = pd.DataFrame(cosSimilarities, columns=files, index=files)
cos_similarities_df
# function to retrieve the most similar products for a given one
import re
def image_recommend(pid, num_recommend=5):
"""
PID: Product ID of the original item in our dataset
num_recommend : Number of most similar images to retrieve
"""
# Displaying the original product- Image, PID, Name, Brand
print("-----------------------------------------------------------------------")
print("Original product:")
print("-----------------------------------------------------------------------")
print("\nProduct ID : ", pid)
plt.imshow(image_array[pid])
plt.show()
# getting the indexes and scores of the N most similar products
closest_imgs = (
cos_similarities_df[files[pid]]
.sort_values(ascending=False)[1 : num_recommend + 1]
.index
)
closest_imgs_scores = cos_similarities_df[files[pid]].sort_values(ascending=False)[
1 : num_recommend + 1
]
_re_digits = re.compile(
r"\d+"
) # We use regex to extract only the pids from file names
closest_imgs_pid = []
for element in closest_imgs:
closest_imgs_pid += [int(n) for n in _re_digits.findall(element)]
# Displaying the recommended products- Image, PID, Name, Brand and Similarity Score
print("-----------------------------------------------------------------------")
print("Most similar products:")
print("-----------------------------------------------------------------------")
for i in closest_imgs:
print("\nProduct ID : ", int(i[:-5]))
print("Similarity score : ", closest_imgs_scores[i])
plt.imshow(image_array[int(i[:-5])])
plt.show()
image_recommend(1000, 5)
def image_recommend(pid, num_recommend=5):
"""
pid: Product ID của sản phẩm gốc trong tập dữ liệu
num_recommend: số lượng sản phẩm tương tự để đề xuất
"""
closest_imgs = (
cos_similarities_df[files[pid]]
.sort_values(ascending=False)[1 : num_recommend + 1]
.index
)
closest_imgs_scores = cos_similarities_df[files[pid]].sort_values(ascending=False)[
1 : num_recommend + 1
]
_re_digits = re.compile(r"\d+")
closest_imgs_pid = []
for element in closest_imgs:
closest_imgs_pid += [int(n) for n in _re_digits.findall(element)]
fig, axs = plt.subplots(1, num_recommend + 1, figsize=(15, 5))
axs[0].imshow(image_array[pid])
axs[0].set_title("Sản phẩm gốc")
axs[0].axis("off")
for i, img in enumerate(closest_imgs):
img_id = int(img[:-5])
axs[i + 1].imshow(image_array[img_id])
axs[i + 1].set_title(
f"#{i+1}: Mã SP {img_id}\nĐiểm tương đồng: {closest_imgs_scores[img]:.2f}"
)
axs[i + 1].axis("off")
plt.show()
from faker import Faker
import random
# Tạo đối tượng faker để sinh ngẫu nhiên tên người dùng
fake = Faker()
# Tạo DataFrame với 5 hàng và 6 cột, bao gồm tên user và tên ảnh
df = pd.DataFrame(columns=["User", "Image1", "Image2", "Image3", "Image4", "Image5"])
# Sinh ngẫu nhiên dữ liệu cho DataFrame
for i in range(5):
user = fake.name()
images = [str(random.randint(0, 1249)) for i in range(5)]
df.loc[i] = [user] + images
print(df)
def recommend_images_for_user(user, num_recommend=5):
"""
user: tên người dùng trong DataFrame df
num_recommend: số lượng sản phẩm tương tự để đề xuất
"""
# Lấy ID của tất cả các hình ảnh được chọn để khuyến nghị
img_ids = [int(df[df["User"] == user][f"Image{i}"].values[0]) for i in range(1, 6)]
# Thực hiện khuyến nghị cho từng hình ảnh
for i, img_id in enumerate(img_ids):
print(f"Khuyến nghị cho {user} - Hình ảnh {i+1}:")
image_recommend(img_id, num_recommend)
recommend_images_for_user(df["User"][0], num_recommend=5)
|
import warnings
import numpy as np
import pandas as pd
from scipy.stats import probplot
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
from matplotlib import cycler
import seaborn as sns
import gc
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
pd.plotting.register_matplotlib_converters()
warnings.filterwarnings("ignore")
med = pd.read_csv("/kaggle/input/gdz-elektrik-datathon-2023/med.csv")
df = pd.read_csv("/kaggle/input/gdz-elektrik-datathon-2023/train.csv")
sample_submission = pd.read_csv(
"/kaggle/input/gdz-elektrik-datathon-2023/sample_submission.csv"
)
# Asagidaki grafik ve yontemler Kagglein Time Series kursundan alinip uyarlandi.
# # Trend Analysis
df["Tarih"] = pd.to_datetime(df["Tarih"])
df = df.set_index("Tarih")
moving_average = df.rolling(
window=365 * 24, # 365-day window
center=True, # puts the average at the center of the window
min_periods=183 * 24, # choose about half the window size
).mean() # compute the mean (could also do median, std, min, max, ...)
ax = df.plot(style=".", color="0.5")
moving_average.plot(
ax=ax,
linewidth=3,
title="Enerji Tuketimi - 365-Day Moving Average",
legend=False,
)
from statsmodels.tsa.deterministic import DeterministicProcess
dp = DeterministicProcess(
index=df.index, # dates from the training data
constant=True, # dummy feature for the bias (y_intercept)
order=1, # the time dummy (trend)
drop=True, # drop terms if necessary to avoid collinearity
)
# `in_sample` creates features for the dates given in the `index` argument
X = dp.in_sample()
X.head()
x_index = X.index
from sklearn.linear_model import LinearRegression
y = df["Dağıtılan Enerji (MWh)"] # the target
# The intercept is the same as the `const` feature from
# DeterministicProcess. LinearRegression behaves badly with duplicated
# features, so we need to be sure to exclude it here.
model = LinearRegression(fit_intercept=False)
model.fit(X, y)
y_pred = pd.Series(model.predict(X), index=x_index)
from datetime import datetime
date_range = pd.date_range(
datetime(2022, 8, 1, 0, 0, 0), datetime(2022, 8, 31, 23, 0, 0), freq="H"
)
X = dp.out_of_sample(steps=744)
y_fore = pd.Series(model.predict(X), index=date_range)
y_fore.head()
plot_params = dict(
color="0.75",
style=".-",
markeredgecolor="0.25",
markerfacecolor="0.25",
legend=False,
)
ax = df.plot(title="Enerji Tuketimi - Linear Trend Forecast", **plot_params)
ax = y_pred.plot(ax=ax, linewidth=3, label="Trend")
ax = y_fore.plot(ax=ax, linewidth=3, label="Trend Forecast", color="C3")
_ = ax.legend()
monthly_trend = y_fore[-1] - y_fore[0]
monthly_trend
# # Seasonality Analysis
from pathlib import Path
from warnings import simplefilter
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.linear_model import LinearRegression
from statsmodels.tsa.deterministic import CalendarFourier, DeterministicProcess
simplefilter("ignore")
# Set Matplotlib defaults
plt.style.use("seaborn-whitegrid")
plt.rc("figure", autolayout=True, figsize=(11, 5))
plt.rc(
"axes",
labelweight="bold",
labelsize="large",
titleweight="bold",
titlesize=16,
titlepad=10,
)
plot_params = dict(
color="0.75",
style=".-",
markeredgecolor="0.25",
markerfacecolor="0.25",
legend=False,
)
# annotations: https://stackoverflow.com/a/49238256/5769929
def seasonal_plot(X, y, period, freq, ax=None):
if ax is None:
_, ax = plt.subplots()
palette = sns.color_palette(
"husl",
n_colors=X[period].nunique(),
)
ax = sns.lineplot(
x=freq,
y=y,
hue=period,
data=X,
ci=False,
ax=ax,
palette=palette,
legend=False,
)
ax.set_title(f"Seasonal Plot ({period}/{freq})")
for line, name in zip(ax.lines, X[period].unique()):
y_ = line.get_ydata()[-1]
ax.annotate(
name,
xy=(1, y_),
xytext=(6, 0),
color=line.get_color(),
xycoords=ax.get_yaxis_transform(),
textcoords="offset points",
size=14,
va="center",
)
return ax
def plot_periodogram(ts, detrend="linear", ax=None):
from scipy.signal import periodogram
fs = pd.Timedelta("1Y") / pd.Timedelta("1H")
freqencies, spectrum = periodogram(
ts,
fs=fs,
detrend=detrend,
window="boxcar",
scaling="spectrum",
)
if ax is None:
_, ax = plt.subplots()
ax.step(freqencies, spectrum, color="purple")
ax.set_xscale("log")
ax.set_xticks([1, 2, 4, 6, 12, 26, 52, 104, 52 * 7])
ax.set_xticklabels(
[
"Annual (1)",
"Semiannual (2)",
"Quarterly (4)",
"Bimonthly (6)",
"Monthly (12)",
"Biweekly (26)",
"Weekly (52)",
"Semiweekly (104)",
"Daily",
],
rotation=90,
)
ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
ax.set_ylabel("Variance")
ax.set_title("Periodogram")
return ax
X = df.copy()
# days within a week
X["day"] = X.index.dayofweek # the x-axis (freq)
X["week"] = X.index.week # the seasonal period (period)
X["hour"] = X.index.hour # the seasonal period (period)
# days within a year
X["dayofyear"] = X.index.dayofyear
X["year"] = X.index.year
fig, (ax0, ax1, ax2) = plt.subplots(3, 1, figsize=(15, 11))
seasonal_plot(X, y="Dağıtılan Enerji (MWh)", period="week", freq="day", ax=ax0)
seasonal_plot(X, y="Dağıtılan Enerji (MWh)", period="year", freq="dayofyear", ax=ax1)
seasonal_plot(X, y="Dağıtılan Enerji (MWh)", period="day", freq="hour", ax=ax2)
plot_periodogram(df["Dağıtılan Enerji (MWh)"])
|
# ## Cyclistic membership analysis
# #### This case study from the Google Data Analytics professional certificate provided on coursera.org. This project focuses on analyzing a fictional company named "Cyclistic" based in Chicago. The data source for this project is provided by the real bike-sharing company "Divvy" [here](https://divvy-tripdata.s3.amazonaws.com/index.html). The objective of this project is to analyze the two types of memberships from this service which are labeled as Casual and Annual members.
# ### 1. The business objective and preparing the data
# The scenario is as follows "a junior data analyst working in the marketing analyst team at Cyclistic, a bike-share company in Chicago. The director of marketing believes the company’s future success depends on maximizing the number of annual memberships. Therefore, your team wants to understand how casual riders and annual members use Cyclistic bikes differently. From these insights, your team will design a new marketing strategy to convert casual riders into annual members. But first, Cyclistic executives must approve your recommendations, so they must be backed up with compelling data insights and professional data visualizations".
# The first step is to establish the business question."Cyclistic" is trying to increase its revenue, and to do this they want to know how they can convert daily and weekly riders into anual passes. Their theory is that these annual passes generate more revenue, this could be because its a year long commitment and guarantees a steady stream or because it is cheaper to use the service for short periods than to commit to the annual pass. What we need to answer with the data is first to see if this theory holds and then analyze the data to convert these casual riders into annual members. By answering these questions we can inform the executive team on how to proceed with the marketing campaign which will attract the casual riders into becoming annual members, by doing this we will answer the business question which is how the company can generate more revenue from the service.
# As the file sizes are too large to do a quick analysis using sheets I will organize the data by using BigQuery. To use Bigquery first we have to download the following package and create a Client object:
# Importing the BQ API client
from google.cloud import bigquery
# Constructing the reference for the
client = bigquery.Client(project="ultra-depot-382223")
# The next step is to reference the dataset:
# Contructing the reference for the Cyclistic trip data
dataset_ref = client.dataset("cyclistic_trip_data", project="ultra-depot-382223")
# ### 2. Processing the data
# For the the processing of the data I ahve taken the following steps to clean and order the data:
# 1. Once I have downloaded, renamed the data I have separated the original data into a specific folder.
# 2. I have uploaded this original data into BigQuery and stored them all in the same Dataset in an ascending order.
# 3. I will now proceed to clean the data, for this I will use BigQuery where I will use the following checklist for the cleaning process:
# -Remove duplicates: By using the command SELECT DISTINCT I can view how many rows are returned that are unique, then repeat this process for every month:
#
query = """
SELECT
DISTINCT COUNT(ride_id) AS unique_rides,
COUNT(ride_id) AS total_ids
FROM `ultra-depot-382223.cyclistic_trip_data.23-02`;
"""
|
# İş Problemi
# Şirketi terk edecek müşterileri tahmin edebilecek bir makine öğrenmesi modeli
# geliştirilmesi beklenmektedir.
# Veri Seti Hikayesi
# Telco müşteri kaybı verileri, üçüncü çeyrekte Kaliforniya'daki 7043 müşteriye ev telefonu ve İnternet hizmetleri sağlayan hayali
# bir telekom şirketi hakkında bilgi içerir. Hangi müşterilerin hizmetlerinden ayrıldığını, kaldığını veya hizmete kaydolduğunu
# gösterir.
# CustomerId Müşteri İd’si
# Gender Cinsiyet
# SeniorCitizen Müşterinin yaşlı olup olmadığı (1, 0)
# Partner Müşterinin bir ortağı olup olmadığı (Evet, Hayır)
# Dependents Müşterinin bakmakla yükümlü olduğu kişiler olup olmadığı (Evet, Hayır
# tenure Müşterinin şirkette kaldığı ay sayısı
# PhoneService Müşterinin telefon hizmeti olup olmadığı (Evet, Hayır)
# MultipleLines Müşterinin birden fazla hattı olup olmadığı (Evet, Hayır, Telefon hizmeti yok)
# InternetService Müşterinin internet servis sağlayıcısı (DSL, Fiber optik, Hayır)
# OnlineSecurity Müşterinin çevrimiçi güvenliğinin olup olmadığı (Evet, Hayır, İnternet hizmeti yok)
# OnlineBackup Müşterinin online yedeğinin olup olmadığı (Evet, Hayır, İnternet hizmeti yok)
# DeviceProtection Müşterinin cihaz korumasına sahip olup olmadığı (Evet, Hayır, İnternet hizmeti yok)
# TechSupport Müşterinin teknik destek alıp almadığı (Evet, Hayır, İnternet hizmeti yok)
# StreamingTV Müşterinin TV yayını olup olmadığı (Evet, Hayır, İnternet hizmeti yok)
# StreamingMovies Müşterinin film akışı olup olmadığı (Evet, Hayır, İnternet hizmeti yok)
# Contract Müşterinin sözleşme süresi (Aydan aya, Bir yıl, İki yıl)
# PaperlessBilling Müşterinin kağıtsız faturası olup olmadığı (Evet, Hayır)
# PaymentMethod Müşterinin ödeme yöntemi (Elektronik çek, Posta çeki, Banka havalesi (otomatik), Kredi kartı (otomatik))
# MonthlyCharges Müşteriden aylık olarak tahsil edilen tutar
# TotalCharges Müşteriden tahsil edilen toplam tutar
# Churn Müşterinin kullanıp kullanmadığı (Evet veya Hayır)
# 2. Data Preparation
import pandas as pd
import numpy as np
df = pd.read_csv("/kaggle/input/telecom-dataset/telco.csv")
df.head()
df["TotalCharges"][0]
df.shape
# Analyzing variables
# Adım 1: Numerik ve kategorik değişkenleri yakalayınız.
def grab_col_names(dataframe, cat_th=10, car_th=20):
# cat_cols, cat_but_car
# 1- Categorical variables
cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"]
# 2- Numeric but actually categorical (class)
num_but_cat = [
col
for col in dataframe.columns
if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"
]
# 3 - Categorical but actually each cardinal, that is, unique
cat_but_car = [
col
for col in dataframe.columns
if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"
]
# 4 - Collect the cat_cols and num_but_cat variables
cat_cols = cat_cols + num_but_cat
# 5- Subtract the cardinal variable from cat_cols
cat_cols = [col for col in cat_cols if col not in cat_but_car]
# num_cols
num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"]
num_cols = [col for col in num_cols if col not in num_but_cat]
print(f"Observations: {dataframe.shape[0]}")
print(f"Variables: {dataframe.shape[1]}")
print(f"cat_cols: {len(cat_cols)}")
print(f"num_cols: {len(num_cols)}")
print(f"cat_but_car: {len(cat_but_car)}")
print(f"num_but_cat: {len(num_but_cat)}")
return cat_cols, num_cols, cat_but_car
cat_cols, num_cols, cat_but_car = grab_col_names(df)
# Adım 2: Gerekli düzenlemeleri yapınız. (Tip hatası olan değişkenler gibi)
cat_cols
num_cols
cat_but_car
df.dtypes
df.SeniorCitizen = df.SeniorCitizen.astype("object")
df["MonthlyCharges"].nunique()
df.isnull().sum()
df.TotalCharges = pd.to_numeric(df.TotalCharges, errors="coerce")
df.isnull().sum()
df.TotalCharges = df.TotalCharges.astype("float")
# Adım 3: Numerik ve kategorik değişkenlerin veri içindeki dağılımını gözlemleyiniz
# Adım 4: Kategorik değişkenler ile hedef değişken incelemesini yapınız.
df.groupby("Churn")["gender"].value_counts()
# Adım 5: Aykırı gözlem var mı inceleyiniz.
def outlier_thresholds(dataframe, col_name, q1=0.5, q3=0.95):
quartile1 = dataframe[col_name].quantile(q1)
quartile3 = dataframe[col_name].quantile(q3)
interquantile_range = quartile3 - quartile1
up_limit = quartile3 + 1.5 * interquantile_range
low_limit = quartile1 - 1.5 * interquantile_range
return low_limit, up_limit
def check_outlier(dataframe, col_name):
low_limit, up_limit = outlier_thresholds(dataframe, col_name)
if dataframe[
(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)
].any(axis=None):
return True
else:
return False
for col in num_cols:
print(col, check_outlier(df, col))
# Adım 6: Eksik gözlem var mı inceleyiniz.
df.isnull().sum()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
sample_csv = pd.read_csv(
"/kaggle/input/mkn-ml2-2021-competition-1-timeseries/sample.csv"
)
test_csv = pd.read_csv("/kaggle/input/mkn-ml2-2021-competition-1-timeseries/test.csv")
train_csv = pd.read_csv("/kaggle/input/mkn-ml2-2021-competition-1-timeseries/train.csv")
train_csv.head()
from datetime import datetime
import matplotlib.pylab as plt # for visualization
# for making sure matplotlib plots are generated in Jupyter notebook itself
from matplotlib.pylab import rcParams
rcParams["figure.figsize"] = 10, 6
df = train_csv.copy()
df = df.set_index(["ds"])
df.head()
print(len(df))
## plot graph
plt.xlabel("Date & Time")
plt.ylabel("y value")
plt.plot(df[:96])
plt.show()
# ## Предобработка и оценка стационарности:
# Determine rolling statistics
rolmean = df.rolling(
window=24
).mean() # window size 12 denotes 12 months, giving rolling mean at yearly level
rolstd = df.rolling(window=24).std()
# Plot rolling statistics
plt.figure()
plt.plot(df, color="blue", label="Original")
plt.plot(rolmean, color="red", label="Rolling Mean")
plt.plot(rolstd, color="black", label="Rolling Std")
plt.legend(loc="best")
plt.title("Rolling Mean & Standard Deviation")
plt.show(block=False)
# Стационарность:
# - const среднее значение и дисперсия
# - Критерий Дика-Фуллера
from statsmodels.tsa.stattools import adfuller
# Perform Augmented Dickey–Fuller test:
adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(df.y, autolag="AIC")
print("Results of Dickey Fuller Test: ")
print("p-value:", pvalue)
print("ADF statistic:", adf)
print(critical_values)
# Augmented Dickey-Fuller (ADF) test is a type of statistical test called a unit root test. Unit roots are a cause for non-stationarity.
# **Null Hypothesis (H0)**: Time series has a unit root. (Time series is not stationary).
# **Alternate Hypothesis (H1)**: Time series has no unit root (Time series is stationary).
# If the null hypothesis can be rejected, we can conclude that the time series is stationary.
# There are two ways to rejects the null hypothesis:
# On the one hand, the null hypothesis can be rejected if the p-value is below a set significance level. The defaults significance level is 5%
# - **p-value > significance level (default: 0.05)**: Fail to reject the null hypothesis (H0), the data has a unit root and is non-stationary.
# - **p-value <= significance level (default: 0.05)**: Reject the null hypothesis (H0), the data does not have a unit root and is stationary.
# On the other hand, the null hypothesis can be rejects if the test statistic is less than the critical value.
# - **ADF statistic > critical value**: Fail to reject the null hypothesis (H0), the data has a unit root and is non-stationary.
# - **ADF statistic < critical value**: Reject the null hypothesis (H0), the data does not have a unit root and is stationary.
# Способы приведения к стационарности:
# - преобразование (log, BoxCox)
# - дифференцирование
from scipy import stats
# LOG
df_log = df.copy()
df_log.y = np.log(df.y)
plt.plot(df_log)
plt.show()
_, pvalue, _, _, _, _ = adfuller(df_log.y, autolag="AIC")
print("LOG p-value:", pvalue)
# BoxCox
df_boxcox = df.copy()
df_boxcox.y, lmbda_boxcox = stats.boxcox(df.y)
plt.plot(df_boxcox)
plt.show()
_, pvalue, _, _, _, _ = adfuller(df_boxcox.y, autolag="AIC")
print("BoxCox p-value:", pvalue)
print("Lambda: ", lmbda_boxcox)
# diff
df_diff = df.copy()
df_diff.y = np.append([0], np.diff(df.y))
plt.plot(df_diff)
plt.show()
_, pvalue, _, _, _, _ = adfuller(df_diff.y, autolag="AIC")
print("Diff p-value:", pvalue)
# boxcox + diff
df_diff_boxcox = df.copy()
df_diff_boxcox.y = np.append([0], np.diff(df_boxcox.y))
plt.plot(df_diff_boxcox)
plt.show()
_, pvalue, _, _, _, _ = adfuller(df_diff_boxcox.y, autolag="AIC")
print("BoxCox + Diff p-value:", pvalue)
# log + diff
df_diff_log = df.copy()
df_diff_log.y = np.append([0], np.diff(df_log.y))
plt.plot(df_diff_log)
plt.show()
_, pvalue, _, _, _, _ = adfuller(df_diff_log.y, autolag="AIC")
print("Log + Diff p-value:", pvalue)
# boxcox + diff^2
df_diff2_boxcox = df.copy()
df_diff2_boxcox.y = np.append([0], np.diff(df_diff_boxcox.y))
plt.plot(df_diff2_boxcox)
plt.show()
_, pvalue, _, _, _, _ = adfuller(df_diff2_boxcox.y, autolag="AIC")
print("Log + Diff^2 p-value:", pvalue)
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
print("RAW")
f, ax = plt.subplots(nrows=2, ncols=1, figsize=(16, 8))
plot_acf(df.y, lags=24, ax=ax[0])
plot_pacf(df.y, lags=24, ax=ax[1])
plt.show()
print("BoxCox + diff")
f, ax = plt.subplots(nrows=2, ncols=1, figsize=(16, 8))
plot_acf(df_diff_log.y, lags=12, ax=ax[0])
plot_pacf(df_diff_log.y, lags=12, ax=ax[1])
plt.show()
|
# # Predict Vehicle Price using 'car data.csv'
# ## Introduction
# The **objective** of this Homework is to use Random Forests to predict vehicle sell price.
# This week its focus on fitting the data against a random forest model and getting some results. Next week we will continue with this exercise and try to improve the result.
# It is recommended to use Mean Squared Error (MSE) as your evaluation metric.
# ## About the dataset
# You may use any or all of the provided [datasets](https://www.kaggle.com/nehalbirla/vehicle-dataset-from-cardekho), but we recommend starting with ‘car_data.csv’.
# This dataset contains information about used cars listed on www.cardekho.com This data can be used for a lot of purposes such as price prediction to exemplify the use of linear regression in Machine Learning. The columns in the given dataset is as follows:
# - Car Name/Model
# - Year
# - Selling Price (100k Indian rupee)
# - Present Price (100k Indian rupee)
# - Mileage (kms)
# - Fuel Type
# - Seller Type: Defines whether the seller is a dealer or an individual.
# - Transmission: Defines whether the car has a manual or automatic transmission.
# - (Number of Previous) Owners
# ## Imports
# Import all libraries we are using in our notebook.
# We also need to get our input data available inside the notebook.
# 1. Imports recommended by Kaggle
# Kaggle's default Python 3 environment comes with many helpful analytics libraries installed
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # visualizations
import seaborn as sns # data visualization library based on matplotlib, in general easier to use than matplotlib.
# 2. Imports need to Get our input data available in the notebook.
# Input data files are available in the read-only "../input/" directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# 3. Imports needed to Use Random Forest.
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
# ## Loading the Dataset
# 1. Load the data from CSV file
car_filepath = "../input/vehicle-dataset-from-cardekho/car data.csv"
car_data = pd.read_csv(car_filepath)
# 2. Clean up column names
car_data = car_data.rename(
columns={
"Selling_Price": "Selling Price (100k rupee)",
"Present_Price": "Present Price (100k rupee)",
"Kms_Driven": "Mileage (km)",
"Fuel_Type": "Fuel Type",
"Seller_Type": "Seller Type",
"Owner": "Number of Previous Owners",
"Car_Name": "Car Model",
}
)
# ## Quick Inspection of the Data
# Show the first 5 rows of data, so that we get a rough understanding about the dataset.
car_data.head()
# Show basic information about the data set, so that we can inspect data types and non-null counts.
car_data.info()
# Lets double-check if we have any null values...
car_data.isnull().sum()
# -> There is no null data present in this data set.
# Now, let's have a quick look at some statistics about or data...
car_data.describe()
# The above statistics give us a little bit insight into how our dataset is distributed.
# However, we'll need to do some Exploratory Data Analysis to better understand our data.
# ## Exploratory Data Analysis (EDA)
# ### 1. Number of Previous Owners
# **Question:** What is the relationship between number of previous owners and selling price?
# **Expectation:** More previous owners relates to a lower (median) selling prices.
# **Observation**: This expectation is not fulfilled for the car(s) that have 3 previous owners. (see below)
sns.boxplot(
data=car_data, x="Number of Previous Owners", y="Selling Price (100k rupee)"
)
# **Question:** Why does the category with 3 owners have a higher (median) selling price?
# **Observation:** There is only 1 car in our dataset that had 3 previous owners, so I would not consider it statistically significant. (see below)
car_data["Number of Previous Owners"].value_counts()
# ### 2. Car Model
# **Question:** Do we actually have multiple cars with the same Car Model?
# **Observation:** Yes, we do have multiple cars with the same Car Model. (see below)
car_data["Car Model"].value_counts()
# Let's isolate the most common car "city" and see if we can find something interesting...
car_data[car_data["Car Model"] == "city"].Year.value_counts()
# 2015 is the most common year for the Car Model "city", so let's only focus on that data...
city_cars_from_2015 = car_data[
(car_data["Car Model"] == "city") & (car_data["Year"] == 2015)
]
city_cars_from_2015
# **Question:** What is the relationship between Selling Price and Mileage (for the most common Car Model and Year)?
# **Expectation:** Higher Mileage relates to lower Selling Price.
# **Observation:** Based on this data, there does not appears to be an obvious relationship between Selling Price and Mileage. However, there might be a stronger relationship between Selling Price and Present Price.
sns.scatterplot(
city_cars_from_2015,
x="Mileage (km)",
y="Selling Price (100k rupee)",
hue="Present Price (100k rupee)",
)
# ### 3. Transmission Type
# **Question:** How many cars do we have for each Transmission Type?
car_data["Transmission"].value_counts()
# **Question:** Is there a relationship between Transmission Type and Selling Price?
# **Observation:** The majority of cars with Manual Transmissions appear to have lower Selling Prices.
sns.kdeplot(
data=car_data, x="Selling Price (100k rupee)", hue="Transmission", fill=True, cut=0
)
# ### 4. Seller Type
# **Question:** How many cars do we have for each Seller Type?
car_data["Seller Type"].value_counts()
# **Question:** Is there a relationship between Seller Type and Selling Price?
# **Observation:** Cars sold by individuals have a lower Selling Price than cars solled by dealerships.
sns.kdeplot(
data=car_data, x="Selling Price (100k rupee)", hue="Seller Type", fill=True, cut=0
)
# ### 5. Year
# **Question:** How many cars do we have for each Transmission Type?
(car_data["Year"]).value_counts()
# **Warning** - we should not make a kdeplot for categories with 1 entry, so let's remove data entries with year 2004 and 2018.
# **Question:** Is there a relationship between Year and Selling Price?
# **Expectation:** Newer cars have higher Selling Prices.
# **Observation:** Things are not as simple as 'expected'.
# 1. Remove the years of 2004 and 2018
car_data_subset = car_data[(car_data["Year"] != 2018) & (car_data["Year"] != 2004)]
car_data_subset["Year"].value_counts()
# 2. Create the plot
sns.kdeplot(
data=car_data_subset, x="Selling Price (100k rupee)", hue="Year", fill=True, cut=0
)
# This plot is a little too crowded to visually inspect. Let's just look at the statistics per year...
car_data_subset[["Year", "Selling Price (100k rupee)"]].groupby("Year").describe()
fig = plt.figure(figsize=(12, 6))
sns.boxplot(
data=car_data_subset[["Year", "Selling Price (100k rupee)"]],
x="Year",
y="Selling Price (100k rupee)",
)
fig = plt.figure(figsize=(12, 6))
car_data_subset[["Year", "Selling Price (100k rupee)"]].groupby("Year").median()
sns_plot = sns.lineplot(
car_data_subset[["Year", "Selling Price (100k rupee)"]].groupby("Year").mean()
)
# Remove Legend and use Y axis label instead.
sns_plot.set_ylabel("Median Selling Price (100k rupee)")
sns_plot.get_legend().remove()
# ### 6. Fuel Type
# **Question:** Is there a relationship between Fuel Type and Selling Price?
# **Observation:** Diesel Cars show a tendency to have higher Selling Price than Petrol Cars.
sns.kdeplot(
data=car_data, x="Selling Price (100k rupee)", hue="Fuel Type", fill=True, cut=0
)
# ### 7. Mileage
# **Question:** Is there a relationship between Mileage and Selling Price?
# **Observation:** No clear relationship in the scatter plot.
sns.scatterplot(
car_data,
x="Mileage (km)",
y="Selling Price (100k rupee)",
hue="Number of Previous Owners",
)
# ### 8. Present Price
# **Question:** Is there a relationship between Selling Price and Present Price?
# **Observation:** Yes, there appears to be a relationship.
sns.scatterplot(
car_data,
x="Present Price (100k rupee)",
y="Selling Price (100k rupee)",
hue="Number of Previous Owners",
)
# Lets now look at additional features that might impact the selling price...
# **Observation:** Cars with Manual Transmissions generally do not have the highest Selling Prices. (except for that one outlier)
sns.scatterplot(
car_data,
x="Present Price (100k rupee)",
y="Selling Price (100k rupee)",
hue="Transmission",
)
# **Observation:** Individual Sellers generally have lower Selling Prices than Dealerships.
sns.scatterplot(
car_data,
x="Present Price (100k rupee)",
y="Selling Price (100k rupee)",
hue="Seller Type",
)
# **Observation:** Older Cars tend to have a lower 'Selling Prices / Present Price' ratio.
sns.scatterplot(
car_data, x="Present Price (100k rupee)", y="Selling Price (100k rupee)", hue="Year"
)
car_data_subset = car_data
car_data_subset["Ratio of Selling Price over Present Price"] = (
car_data_subset["Selling Price (100k rupee)"]
/ car_data_subset["Present Price (100k rupee)"]
)
sns.scatterplot(
car_data_subset,
x="Present Price (100k rupee)",
y="Ratio of Selling Price over Present Price",
hue="Year",
)
# **Observation:** If you only care about the most recent years, then it might be possible to use a linear regression model.
sns.lmplot(
data=car_data,
x="Present Price (100k rupee)",
y="Selling Price (100k rupee)",
hue="Year",
)
sns.scatterplot(
data=car_data,
x="Present Price (100k rupee)",
y="Selling Price (100k rupee)",
hue="Mileage (km)",
)
# ## Random Forest without any optimizations
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
# ### 1. Map Categories to Numerical Values.
# **Question:** Which Columns do we have?
car_data.columns
# **Question:** What are the data types of each column?
car_data.dtypes
# **Apply One Hot Encoding to all Categorical Values...**
car_data_one_hot_encoded = pd.get_dummies(car_data)
car_data_one_hot_encoded.head()
# ### 2. Use Random Forest to Predict Car Price.
# 1. Seperate Prediction Target y from Features X
y = car_data_one_hot_encoded["Selling Price (100k rupee)"]
X = car_data_one_hot_encoded.drop(columns=["Selling Price (100k rupee)"])
# 2. Split data into Training and Validation data for both features and target.
# The split is based on a random number generator. Supplying a numeric value to
# the random_state argument guarantees we get the same split every time we
# run this script.
train_X, validation_X, train_y, validition_y = train_test_split(X, y, random_state=0)
# 3. Defined the model
forest_model = RandomForestRegressor(random_state=1)
# 4. Fit the model
forest_model.fit(train_X, train_y)
# 5. Get predicted prices using validation dataset
car_data_price_predictions = forest_model.predict(validation_X)
mse_without_optimizations = mean_squared_error(validition_y, car_data_price_predictions)
print(f"MSE (without optimizations) = {mse_without_optimizations}")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Predicting the diameter of an asteroid
# ***Part A - data Set Preparation***
# Missing values
# # Importing Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
# # Load Data Set
df = pd.read_csv("../input/prediction-of-asteroid-diameter/Asteroid_Updated.csv")
# # Data Set Exploration
# Exploring the data set to understand better
df.head()
df.shape
df.columns
len(df.columns)
# # Part A - Data Set Preparation
# **A1. Renaming Columns**
# Renaming columns for better understanding
new_columns_dict = {
"name": "obj_name",
"a": "semi-major_axis(au)",
"e": "eccentricity",
"i": "x-y_inclination(deg)",
"om": "longitude_asc_node",
"w": "argument_perihelion",
"q": "perihelion_dist(au)",
"ad": "aphelion_dist(au)",
"per_y": "orbital_period",
"data_arc": "data_arc(d)",
"condition_code": "condition_code",
"n_obs_used": "n_obs_used",
"H": "abs_mag_para",
"neo": "near_earth_obj",
"pha": "physically_hazardous_asteroid",
"diameter": "diameter",
"extent": "axial_ellipsoid_dim(Km)",
"albedo": "geo_albedo",
"rot_per": "rot_per(h)",
"GM": "std_gravitational_para",
"BV": "bv_color_mag_diff",
"UB": "ub_color_mag_diff",
"IR": "ir_color_mag_diff",
"spec_B": "SMASSII_spec_tax_type",
"spec_T": "Tholen_spec_tax_type",
"G": "mag_slope_para",
"moid": "earth_min_oribit_inter_dist(au)",
"class": "class",
"n": "mean_motion(deg/d)",
"per": "orbital_period(d)",
"ma": "mean_anomaly(deg)",
}
df = df.rename(columns=new_columns_dict)
df.columns
# **A2. Missing Values**
# count of missing values
missing = pd.concat([pd.isnull(df).sum(), 100 * pd.isnull(df).mean()], axis=1)
missing.columns = ["count", "%"]
missing.sort_values("count")
# The target feature 'diameter' has significant number of missing values
df["diameter"].notnull().sum()
# Still it has considerable number of rows to proceed after removing vacant rows
# removing rows where diamter values are missing
df = df.dropna(axis=0, subset=["diameter"])
len(df.index)
# The total number of rows are matching the number of rows with non null diameter values
#
# New percentage of missing values
missing2 = pd.concat([pd.isnull(df).sum(), 100 * pd.isnull(df).mean()], axis=1)
missing2.columns = ["count", "%"]
missing2.sort_values("count")
# It's confirmed that missing values from 'diameter' columns has been removed. The data set still has input variables with high percentage of missing values. Using those to in ML model may fetch wrong results. Thus removing columns with high percentage of missing values
# columns to drop
drop_list = [
"abs_mag_para",
"geo_albedo",
"obj_name",
"rot_per(h)",
"SMASSII_spec_tax_type",
"bv_color_mag_diff",
"Tholen_spec_tax_type",
"ub_color_mag_diff",
"mag_slope_para",
"axial_ellipsoid_dim(Km)",
"std_gravitational_para",
"ir_color_mag_diff",
]
df.drop(drop_list, axis=1, inplace=True)
len(df.columns)
# Initially 31 columns were there; now it has reduced to 19
# **A3. Data type corrections**
# data types
df.dtypes
# Checking object features
df.select_dtypes("object").head()
# diamter should not be an object; its a numerical value
# converting diameter to numerical value
# df['diameter']=pd.to_numeric(df['diameter'])
# Error in conversion at position 15
# checking the value at position 15
df["diameter"].iloc[15]
# its a string. converting it to number
df.at[15, "diameter"] = 226
# checking the value again
df["diameter"].iloc[15]
df["diameter"] = pd.to_numeric(df["diameter"])
# Now the conversion is succesful
# **A4. Unique Values**
# unique values of object features
df["condition_code"].unique()
df["near_earth_obj"].unique()
df["physically_hazardous_asteroid"].unique()
df["class"].unique()
# 'condition code' has mix of data types. Convert it to integers
df["condition_code"] = df["condition_code"].astype("int")
df["condition_code"].unique()
# Now all the values seem fine
# **A5.Value Counts**
# Check whether each features has enough variation to construct model; if in a particular column, one of the value dominates others by huge margin, the ML model may fetch biased results.
df["near_earth_obj"].value_counts()
df["physically_hazardous_asteroid"].value_counts()
df["class"].value_counts()
# Enough variations are there to keep the features
# Checking out the integer features
df.select_dtypes("int64")
# number of obseravtions should be of integer type. so keeping it the same
# Checking out float features
df.select_dtypes("float64")
# All the above features suit float type
df.describe().T
# Data Arc still has missing values. Looking at the distribution of each features
df.hist(figsize=(20, 20))
plt.show()
# Data Arc seems to have high variation. boxplot for same will give better idea
sns.boxplot(df["data_arc(d)"])
# Data arc is much skewed so its better to fill the missing values with median than mean
df["data_arc(d)"] = df["data_arc(d)"].fillna(df["data_arc(d)"].median())
df.describe().T
df.info()
# **Further checking on Object Features**
# Deep dive into object variables for checking whether they are interconnected
data_type = df.dtypes
data_type[(data_type == "object")].index.tolist()
# A common unit in astronomy is the astronomical unit (au), and is roughly equal to the distance from the Earth to the Sun or 150 million kilometers.
# **Near Earth Object (NEO)** : An asteroid or comet that is less than 1.3 au from the sun.
# **Physically Hazardous**: Determined by whether it is an NEO and its size.
# **Class**: This is the orbit class, such as if it is part of the main asteroid belt, orbits a larger planet, or is near earth.
# **Physically hazardous** classifier is based out of **diameter**(target), so it cannot be used for modelling.
# **Near Earth Object** classifier is already explained in class, so it is not needed for further exploration
drop_list = ["near_earth_obj", "physically_hazardous_asteroid"]
df.drop(drop_list, axis=1, inplace=True)
# **checking how the target (diameter) is distributed over orbit class**
# Set Plot Colors
sns.set_palette("inferno", 11)
sns.boxplot(x="class", y="diameter", data=df)
plt.yscale("log")
# There does appear to be some relationship between the diameter of an asteroid and where it orbits. Orbits are determined by strength of gravity, which is determined by the size of the two objects and how close they are to each other. So, it makes sense that larger asteroids are going to be closer to the larger planets, than to Earth
# **Explore the target variable**
df["diameter"].describe()
# **How the target variable distributed over the class classifier**
class_group = df.groupby("class").agg(
{"diameter": ["mean", "median", "min", "max", "std", "var", "count"]}
)
print(class_group)
# **Distribution of Condition code over diameter**
sns.boxplot(x="condition_code", y="diameter", data=df)
plt.yscale("log")
# It is evdient that diameter doesn't change much over different condition code. So no need to keep it for modelling
df.drop("condition_code", axis=1, inplace=True)
# **Explore the numerical features- Correlation**
plt.figure(figsize=(20, 10))
sns.set(font_scale=1.4)
sns.heatmap(df.corr(), annot=True, cmap="inferno", fmt=".2f", annot_kws={"size": 16})
# Few features are correlated. Removing them
# orbital_period(d) and orbital_period
df.drop("orbital_period", axis=1, inplace=True)
# perihelion_distance and earth_min_orbit_inter_dist(au)
df.drop("earth_min_oribit_inter_dist(au)", axis=1, inplace=True)
df.shape
# copying the current data frame into another variable for future purposes
df2 = df
# assign dummy values to categorical variable
df = pd.get_dummies(df, columns=["class"])
df.columns
df.head()
df.shape
# # Part B - Model building
# Split data into features and target
y = df["diameter"] # target
x = df.drop(columns="diameter") # independent features
# Standardize the features
# apply a standardized scaler to the data
SS_scaler = StandardScaler()
# Fit the standard scaler to the data
x_std = SS_scaler.fit_transform(x)
# Create Training and Testing data
X_train, X_test, Y_train, Y_test = train_test_split(
x_std, y, test_size=0.2, random_state=42
)
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# # Linear Regression
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X_train, Y_train)
diameter_prediction = model.predict(X_test)
mse = mean_squared_error(Y_test, diameter_prediction)
rmse = np.sqrt(mse)
print("root mean squared error:" + str(rmse))
r2 = r2_score(Y_test, diameter_prediction)
print("r2:", r2)
# r2 square need to be improved
print(f"constant={model.intercept_}")
print(f"coefficients={model.coef_}")
# **Adjusted r2**
n = 137636 # number of observation
p = 23 # number of independent variables
R2 = 0.3008495396647336
adj_r2 = 1 - (1 - R2) * (n - 1) / (n - p - 1)
print(f"Adjusted r2 score={adj_r2}")
# **Ridge Regression**
from sklearn.linear_model import Ridge
model = Ridge()
model.fit(X_train, Y_train)
model.predict(X_test)
diameterPrediction = model.predict(X_test)
mse = mean_squared_error(Y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : " + str(rmse))
r2 = r2_score(Y_test, diameterPrediction)
print("R2 Score : ", r2)
# Still r2 score is not improved. so introduce hyperparameter tuning for ridge regression
from sklearn.model_selection import GridSearchCV # iteration tool
ridge = Ridge()
parameters = {
"alpha": [
1e-15,
1e-10,
1e-8,
1e-3,
1e-2,
1,
5,
10,
20,
25,
30,
35,
40,
45,
50,
60,
70,
85,
100,
105,
125,
145,
150,
160,
168,
170,
172,
171,
180,
200,
210,
]
}
ridge_regressor = GridSearchCV(
ridge, parameters, scoring="neg_mean_squared_error", cv=5
)
ridge_regressor.fit(X_train, Y_train)
print(ridge_regressor.best_params_) # best alpha value to use
# applying alpha=170 in ridge instead of default value 1
model = Ridge(alpha=210)
model.fit(X_train, Y_train)
model.predict(X_test)
diameterPrediction = model.predict(X_test)
mse = mean_squared_error(Y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : " + str(rmse))
r2 = r2_score(Y_test, diameterPrediction)
print("R2 Score : ", r2)
# We can see a minute improvement in the r2 value
# **Lasso**
from sklearn import linear_model
model = linear_model.Lasso(alpha=0.1)
model.fit(X_train, Y_train)
model.predict(X_test)
diameterPrediction = model.predict(X_test)
mse = mean_squared_error(Y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : " + str(rmse))
r2 = r2_score(Y_test, diameterPrediction)
print("R2 Score : ", r2)
# r2 score is better than ridge regression even before hyperparameter tuning
# **Lasso with hyper parameter tuning**
from sklearn.linear_model import Lasso
from sklearn.model_selection import GridSearchCV
lasso = Lasso()
parameters = {
"alpha": [
1e-15,
1e-10,
1e-8,
1e-3,
1e-2,
1,
5,
10,
20,
25,
30,
35,
40,
45,
50,
60,
70,
85,
100,
105,
125,
145,
150,
160,
168,
170,
172,
171,
180,
200,
]
}
lasso_regressor = GridSearchCV(
lasso, parameters, scoring="neg_mean_squared_error", cv=5
)
lasso_regressor.fit(X_train, Y_train)
print(lasso_regressor.best_params_) # best alpha value to use
# using this alpha in lasso
from sklearn import linear_model
model = linear_model.Lasso(alpha=1e-15)
model.fit(X_train, Y_train)
model.predict(X_test)
diameterPrediction = model.predict(X_test)
mse = mean_squared_error(Y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : " + str(rmse))
r2 = r2_score(Y_test, diameterPrediction)
print("R2 Score : ", r2)
# This hyper parameter tuning reduced the r2 value. So maybe we have to change grid search to randomised search, k fold search, cross validation etc.
# **KNN**
from sklearn.neighbors import KNeighborsRegressor
model = KNeighborsRegressor()
model.fit(X_train, Y_train)
model.predict(X_test)
diameterPrediction = model.predict(X_test)
mse = mean_squared_error(Y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : " + str(rmse))
r2 = r2_score(Y_test, diameterPrediction)
print("R2 Score : ", r2)
# r2 has improved. Now its more than 0.5
# KNN hyper parameter tuning
"""
from sklearn.model_selection import KFold
KNN=KNeighborsRegressor()
seed=13
kfold=KFold(n_splits=3,shuffle=True,random_state=seed)
#Define candidate hyperparameters
hp_candidates=[{'n_neighbors':[4,5,6,7],'weights':['uniform','distance']}]
#search for best parameters
grid=GridSearchCV(estimator=KNN,param_grid=hp_candidates,cv=kfold,scoring='r2')
grid.fit(X_train,Y_train)
"""
# grid.best_params_ #finding best hyper parameters
"""
model=KNeighborsRegressor(n_neighbors=6,weights='distance')
model.fit(X_train,Y_train)
model.predict(X_test)
diameterPrediction = model.predict(X_test)
mse = mean_squared_error(Y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : "+str(rmse))
r2 = r2_score(Y_test,diameterPrediction)
print("R2 Score : ",r2)
"""
# **SVR**
"""
from sklearn.svm import SVR
model = SVR()
model.fit(X_train, Y_train)
model.predict(X_test)
diameterPrediction = model.predict(X_test)
mse = mean_squared_error(Y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : "+str(rmse))
r2 = r2_score(Y_test,diameterPrediction)
print("R2 Score : ",r2)
"""
# Here the r2 score is less than KNN
# Hyper parameter tuning for improving r2 score
"""
param_grid={'C':[0.1,1,10,100,1000],
# 'gama':[1,0.1,0.01,0.001,0.0001],
# 'kernel':['linear','rbf','poly','sigmoid']}
#grid=GridSearchCV(SVR(),param_grid,refit=True,verbose=3)
#grid.fit(x_train,y_train)
#time consuming processing
"""
# **Decision Tree**
from sklearn.tree import DecisionTreeRegressor
model = DecisionTreeRegressor()
model.fit(X_train, Y_train)
model.predict(X_test)
diameterPrediction = model.predict(X_test)
mse = mean_squared_error(Y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : " + str(rmse))
r2 = r2_score(Y_test, diameterPrediction)
print("R2 Score : ", r2)
# **Random Forest**
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
model.fit(X_train, Y_train)
model.predict(X_test)
diameterPrediction = model.predict(X_test)
mse = mean_squared_error(Y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : " + str(rmse))
r2 = r2_score(Y_test, diameterPrediction)
print("R2 Score : ", r2)
# **Ada Boost**
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error # for getting the mean squared error
from sklearn.metrics import r2_score # to get the accuracy of each model
# Create adaboost classifer object
AdaModel = AdaBoostRegressor(n_estimators=100, learning_rate=1)
# Train Adaboost Classifer
model = AdaModel.fit(X_train, Y_train)
# Predict the response for test dataset
diameterPrediction = model.predict(X_test)
mse = mean_squared_error(Y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : " + str(rmse))
r2 = r2_score(Y_test, diameterPrediction)
print("R2 Score : ", r2)
# #Time taken : 10 sec
# AdaBoost - With customized Base Model = linear regression
# Import Support Vector Regressor
from sklearn.linear_model import LinearRegression
LR = LinearRegression()
# Create adaboost classifer object
abc = AdaBoostRegressor(n_estimators=200, base_estimator=LR, learning_rate=1)
model = abc.fit(X_train, Y_train)
diameterPrediction = model.predict(X_test)
mse = mean_squared_error(Y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : " + str(rmse))
r2 = r2_score(Y_test, diameterPrediction)
print("R2 Score : ", r2)
# AdaBoost - With customized Base Model = knn
"""
from sklearn.neighbors import KNeighborsRegressor
KNN = KNeighborsRegressor()
# Create adaboost classifer object
abc =AdaBoostRegressor(n_estimators=200, base_estimator=KNN,learning_rate=1)
model = abc.fit(X_train, Y_train)
"""
"""
diameterPrediction = model.predict(X_test)
mse = mean_squared_error(Y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : "+str(rmse))
r2 = r2_score(Y_test,diameterPrediction)
print("R2 Score : ",r2)
"""
# Gradient Boost
# Create gradientboost REGRESSOR object
from sklearn.ensemble import GradientBoostingRegressor
gradientregressor = GradientBoostingRegressor(
max_depth=2, n_estimators=3, learning_rate=1.0
)
# Train gradientboost REGRESSOR
model = gradientregressor.fit(X_train, Y_train)
# Predict the response for test dataset
diameterPrediction = model.predict(X_test)
mse = mean_squared_error(Y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : " + str(rmse))
r2 = r2_score(Y_test, diameterPrediction)
print("R2 Score : ", r2)
"""
#Hyper Parameter tuning (Will take more time)
from sklearn.model_selection import GridSearchCV
LR = {'learning_rate':[0.15,0.1,0.10,0.05], 'n_estimators':[100,150,200,250]}
tuning = GridSearchCV(estimator =GradientBoostingRegressor(),
param_grid = LR, scoring='r2')
tuning.fit(X_train,y_train)
tuning.best_params_, tuning.best_score_
"""
# I am choosing 'n_estimators=200,learning_rate=0.15'
gradientregressor = GradientBoostingRegressor(
max_depth=2, n_estimators=200, learning_rate=0.15
)
model = gradientregressor.fit(X_train, Y_train)
diameterPrediction = model.predict(X_test)
r2 = r2_score(Y_test, diameterPrediction)
print("R2 Score : ", r2)
# XG Boost
from xgboost import XGBRegressor
from sklearn.svm import SVR
# Running various models
models = []
models.append(("SVM", SVR()))
models.append(("XGB", XGBRegressor(eta=0.01, gamma=10))) # eta = 0.01,gamma = 10
"""'
import time
# evaluate each model in turn
results = []
names = []
for name, model in models:
start_time = time.time()
model.fit(X_train, Y_train)
y_pred = model.predict(X_test)
predictions = [round(value) for value in y_pred]
# evaluate predictions
R2 = r2_score(Y_test, predictions)
print("R2 Score", (R2),name)
print("Time_Taken", (time.time() - start_time))
"""
# CAT Boost
from catboost import CatBoostRegressor, Pool
df2["class"]
# Split data into features and target.
y = df2["diameter"]
X = df2.drop(columns="diameter")
from catboost import CatBoostRegressor, Pool
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=42
)
pool_train = Pool(X_train, Y_train, cat_features=["class"])
pool_test = Pool(X_test, cat_features=["class"])
import time
start = time.time()
cbr = CatBoostRegressor(iterations=500, max_depth=2)
cbr.fit(pool_train)
diameterPrediction = cbr.predict(X_test)
mse = mean_squared_error(y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : " + str(rmse))
r2 = r2_score(y_test, diameterPrediction)
print("R2 Score : ", r2)
end = time.time()
diff = end - start
print("Execution time:", diff)
# Light GBM
import lightgbm
"""
start = time.time()
lgbmr = lightgbm.LGBMRegressor()
lgbmr.fit(X_train, Y_train)
y_pred = lgbmr.predict(X_test)
mse = mean_squared_error(Y_test, diameterPrediction)
rmse = np.sqrt(mse)
print("root mean square error : "+str(rmse))
r2 = r2_score(Y_test,diameterPrediction)
print("R2 Score : ",r2)
end = time.time()
diff = end - start
print('Execution time:', diff)
"""
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
break
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import torch
import os, sys, json, cv2, random, torchvision
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from torchvision import transforms
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
auc,
f1_score,
roc_curve,
classification_report,
confusion_matrix,
)
import seaborn as sns
from torch.utils.data import DataLoader, Dataset
from torch.utils.tensorboard import SummaryWriter
from PIL import Image
import torch.nn as nn
from itertools import cycle
from numpy import interp
net = torchvision.models.vit_b_16(pretrained=True)
net.heads = nn.Sequential(nn.Linear(768, 4))
def read_split_data(root, plot_image=False):
filepaths = []
labels = []
bad_images = []
random.seed(0)
assert os.path.exists(root), "wdnmd, 你tm路径不对啊!"
classes = [
cla for cla in os.listdir(root) if os.path.isdir(os.path.join(root, cla))
]
classes.sort()
class_indices = {k: v for v, k in enumerate(classes)}
json_str = json.dumps({v: k for k, v in class_indices.items()}, indent=4)
with open("classes_indices.json", "w") as json_file:
json_file.write(json_str)
every_class_num = []
supported = [".jpg", ".png", ".jpeg", ".PNG", ".JPG", ".JPEG"]
for klass in classes:
classpath = os.path.join(root, klass)
images = [
os.path.join(root, klass, i)
for i in os.listdir(classpath)
if os.path.splitext(i)[-1] in supported
]
every_class_num.append(len(images))
flist = sorted(os.listdir(classpath))
desc = f"{klass:23s}"
for f in tqdm(flist, ncols=110, desc=desc, unit="file", colour="blue"):
fpath = os.path.join(classpath, f)
fl = f.lower()
index = fl.rfind(".")
ext = fl[index:]
if ext in supported:
try:
img = cv2.imread(fpath)
filepaths.append(fpath)
labels.append(klass)
except:
bad_images.append(fpath)
print("defective image file: ", fpath)
else:
bad_images.append(fpath)
Fseries = pd.Series(filepaths, name="filepaths")
Lseries = pd.Series(labels, name="labels")
df = pd.concat([Fseries, Lseries], axis=1)
print(f"{len(df.labels.unique())} kind of images were found in the dataset")
train_df, test_df = train_test_split(
df, train_size=0.8, shuffle=True, random_state=123, stratify=df["labels"]
)
train_image_path = train_df["filepaths"].tolist()
val_image_path = test_df["filepaths"].tolist()
train_image_label = [class_indices[i] for i in train_df["labels"].tolist()]
val_image_label = [class_indices[i] for i in test_df["labels"].tolist()]
sample_df = train_df.sample(n=50, replace=False)
ht, wt, count = 0, 0, 0
for i in range(len(sample_df)):
fpath = sample_df["filepaths"].iloc[i]
try:
img = cv2.imread(fpath)
h = img.shape[0]
w = img.shape[1]
ht += h
wt += w
count += 1
except:
pass
have = int(ht / count)
wave = int(wt / count)
aspect_ratio = have / wave
print(
"{} images were found in the dataset.\n{} for training, {} for validation".format(
sum(every_class_num), len(train_image_path), len(val_image_path)
)
)
print(
"average image height= ",
have,
" average image width= ",
wave,
" aspect ratio h/w= ",
aspect_ratio,
)
if plot_image:
plt.bar(range(len(classes)), every_class_num, align="center")
plt.xticks(range(len(classes)), classes)
for i, v in enumerate(every_class_num):
plt.text(x=i, y=v + 5, s=str(v), ha="center")
plt.xlabel("image class")
plt.ylabel("number of images")
plt.title("class distribution")
plt.show()
return (
train_image_path,
train_image_label,
val_image_path,
val_image_label,
class_indices,
)
def train_one_epoch(model, train_loader, optimizer, device, epoch, scalar=None):
model.train()
loss_function = nn.CrossEntropyLoss()
sample_num, train_acc, train_loss = 0, 0, 0
optimizer.zero_grad()
train_bar = tqdm(train_loader, file=sys.stdout, colour="red")
for step, data in enumerate(train_bar):
optimizer.zero_grad()
images, labels = data
sample_num += images.shape[0]
images = images.to(device)
labels = labels.to(device)
if scalar is not None:
with torch.cuda.amp.autocast():
outputs = model(images)
loss = loss_function(outputs, labels)
else:
outputs = model(images)
loss = loss_function(outputs, labels)
train_acc += (torch.argmax(outputs, dim=1) == labels).sum().item()
train_loss += loss.item()
if scalar is not None:
scalar.scale(loss).backward()
scalar.step(optimizer)
scalar.update()
else:
loss.backward()
optimizer.step()
train_bar.desc = "[train epoch {}] loss: {:.3f}, acc: {:.3f}".format(
epoch, train_loss / (step + 1), train_acc / sample_num
)
return round(train_loss / (step + 1), 3), round(train_acc / sample_num, 3)
@torch.no_grad()
def val_step(model, valid_loader, device, epoch):
model.eval()
loss_function = nn.CrossEntropyLoss()
sample_num, valid_acc, valid_loss = 0, 0, 0
valid_bar = tqdm(valid_loader, file=sys.stdout, colour="red")
for step, data in enumerate(valid_bar):
images, labels = data
sample_num += images.shape[0]
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = loss_function(outputs, labels)
valid_loss += loss.item()
valid_acc += (torch.argmax(outputs, dim=1) == labels).sum().item()
valid_bar.desc = "[valid epoch {}] loss: {:.3f}, acc: {:.3f}".format(
epoch, valid_loss / (step + 1), valid_acc / sample_num
)
return round(valid_loss / (step + 1), 3), round(valid_acc / sample_num, 3)
def Plot_ROC(net, val_loader, save_name, device):
try:
json_file = open("./classes_indices.json", "r")
class_indict = json.load(json_file)
except Exception as e:
print(e)
exit(-1)
score_list = []
label_list = []
net.load_state_dict(torch.load(save_name))
for i, data in enumerate(val_loader):
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = torch.softmax(net(images), dim=1)
score_tmp = outputs
score_list.extend(score_tmp.detach().cpu().numpy())
label_list.extend(labels.cpu().numpy())
score_array = np.array(score_list)
# 将label转换成onehot形式
label_tensor = torch.tensor(label_list)
label_tensor = label_tensor.reshape((label_tensor.shape[0], 1))
label_onehot = torch.zeros(label_tensor.shape[0], len(class_indict.keys()))
label_onehot.scatter_(dim=1, index=label_tensor, value=1)
label_onehot = np.array(label_onehot)
print("score_array:", score_array.shape) # (batchsize, classnum)
print("label_onehot:", label_onehot.shape) # torch.Size([batchsize, classnum])
# 调用sklearn库,计算每个类别对应的fpr和tpr
fpr_dict = dict()
tpr_dict = dict()
roc_auc_dict = dict()
for i in range(len(class_indict.keys())):
fpr_dict[i], tpr_dict[i], _ = roc_curve(label_onehot[:, i], score_array[:, i])
roc_auc_dict[i] = auc(fpr_dict[i], tpr_dict[i])
# micro
fpr_dict["micro"], tpr_dict["micro"], _ = roc_curve(
label_onehot.ravel(), score_array.ravel()
)
roc_auc_dict["micro"] = auc(fpr_dict["micro"], tpr_dict["micro"])
# macro
# First aggregate all false positive rates
all_fpr = np.unique(
np.concatenate([fpr_dict[i] for i in range(len(class_indict.keys()))])
)
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(len(set(label_list))):
mean_tpr += interp(all_fpr, fpr_dict[i], tpr_dict[i])
# Finally average it and compute AUC
mean_tpr /= len(class_indict.keys())
fpr_dict["macro"] = all_fpr
tpr_dict["macro"] = mean_tpr
roc_auc_dict["macro"] = auc(fpr_dict["macro"], tpr_dict["macro"])
# 绘制所有类别平均的roc曲线
plt.figure(figsize=(12, 12))
lw = 2
plt.plot(
fpr_dict["micro"],
tpr_dict["micro"],
label="micro-average ROC curve (area = {0:0.2f})"
"".format(roc_auc_dict["micro"]),
color="deeppink",
linestyle=":",
linewidth=4,
)
plt.plot(
fpr_dict["macro"],
tpr_dict["macro"],
label="macro-average ROC curve (area = {0:0.2f})"
"".format(roc_auc_dict["macro"]),
color="navy",
linestyle=":",
linewidth=4,
)
colors = cycle(["aqua", "darkorange", "cornflowerblue"])
for i, color in zip(range(len(class_indict.keys())), colors):
plt.plot(
fpr_dict[i],
tpr_dict[i],
color=color,
lw=lw,
label="ROC curve of class {0} (area = {1:0.2f})"
"".format(class_indict[str(i)], roc_auc_dict[i]),
)
plt.plot([0, 1], [0, 1], "k--", lw=lw, label="Chance", color="red")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver operating characteristic to multi-class")
plt.legend(loc="lower right")
plt.savefig("./save_images/multi_classes_roc.png")
plt.show()
def Predictor(net, test_loader, save_name, device):
try:
json_file = open("./classes_indices.json", "r")
class_indict = json.load(json_file)
except Exception as e:
print(e)
exit(-1)
errors = 0
y_pred, y_true = [], []
net.load_state_dict(torch.load(save_name))
net.eval()
with torch.no_grad():
for data in test_loader:
images, labels = data
images, labels = images.to(device), labels.to(device)
preds = torch.argmax(torch.softmax(net(images), dim=1), dim=1)
for i in range(len(preds)):
y_pred.append(preds[i].cpu())
y_true.append(labels[i].cpu())
tests = len(y_pred)
for i in range(tests):
pred_index = y_pred[i]
true_index = y_true[i]
if pred_index != true_index:
errors += 1
acc = (1 - errors / tests) * 100
print(f"there were {errors} errors in {tests} tests for an accuracy of {acc:6.2f}%")
ypred = np.array(y_pred)
ytrue = np.array(y_true)
f1score = f1_score(ytrue, ypred, average="weighted") * 100
print(f"The F1-score was {f1score:.3f}")
class_count = len(list(class_indict.values()))
classes = list(class_indict.values())
cm = confusion_matrix(ytrue, ypred)
plt.figure(figsize=(16, 8))
plt.subplot(1, 2, 1)
sns.heatmap(cm, annot=True, vmin=0, fmt="g", cmap="Blues", cbar=False)
plt.xticks(np.arange(class_count) + 0.5, classes, rotation=45, fontsize=14)
plt.yticks(np.arange(class_count) + 0.5, classes, rotation=0, fontsize=14)
plt.xlabel("Predicted", fontsize=14)
plt.ylabel("True", fontsize=14)
plt.title("Confusion Matrix")
plt.subplot(1, 2, 2)
sns.heatmap(cm / np.sum(cm), annot=True, fmt=".1%")
plt.xticks(np.arange(class_count) + 0.5, classes, rotation=45, fontsize=14)
plt.yticks(np.arange(class_count) + 0.5, classes, rotation=0, fontsize=14)
plt.xlabel("Predicted", fontsize=14)
plt.ylabel("True", fontsize=14)
plt.savefig("./save_images/confusion_matrix.png")
plt.show()
clr = classification_report(y_true, y_pred, target_names=classes, digits=4)
print("Classification Report:\n----------------------\n", clr)
class MyDataset(Dataset):
def __init__(self, image_path, image_labels, transforms=None):
self.image_path = image_path
self.image_labels = image_labels
self.transforms = transforms
def __getitem__(self, item):
image = Image.open(self.image_path[item]).convert("RGB")
label = self.image_labels[item]
if self.transforms:
image = self.transforms(image)
return image, label
def __len__(self):
return len(self.image_path)
@staticmethod
def collate_fn(batch):
image, label = tuple(zip(*batch))
image = torch.stack(image, dim=0)
label = torch.as_tensor(label)
return image, label
root = r"/kaggle/input/big-cats-image-classification-dataset/animals/"
batch_size = 32
device = "cuda" if torch.cuda.is_available() else "cpu"
epochs = 1
lr = 0.0001
weight_decay = 0.00001
data_transform = {
"train": transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.RandomHorizontalFlip(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
),
"valid": transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
),
}
def main(root, batch_size, model, epochs, lr):
if os.path.exists("./weights") is False:
os.makedirs("./weights")
if os.path.exists("./save_images") is False:
os.makedirs("./save_images")
(
train_image_path,
train_image_label,
val_image_path,
val_image_label,
class_indices,
) = read_split_data(root)
# 实例化训练数据集
train_dataset = MyDataset(
train_image_path, train_image_label, data_transform["train"]
)
# 实例化验证数据集
val_dataset = MyDataset(val_image_path, val_image_label, data_transform["valid"])
sys_name = sys.platform
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=0,
collate_fn=train_dataset.collate_fn,
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=0,
collate_fn=val_dataset.collate_fn,
)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
for epoch in range(epochs):
train_loss, train_accuracy = train_one_epoch(
model, train_loader, optimizer, device, epoch, scalar=None
)
valid_loss, valid_accuracy = val_step(model, val_loader, device, epoch)
torch.save(model.state_dict(), "./weights/model.pth".format(epoch))
print("Finished Training!!!")
Predictor(model, val_loader, "./weights/model.pth", device)
Plot_ROC(model, val_loader, "./weights/model.pth", device)
if __name__ == "__main__":
main(root, batch_size, net.to(device), epochs, lr)
|
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Define the parameters for image preprocessing and augmentation
datagen = ImageDataGenerator(
rescale=1.0 / 255, # Rescale pixel values to [0, 1]
rotation_range=20, # Randomly rotate images up to 20 degrees
width_shift_range=0.1, # Randomly shift images horizontally up to 10%
height_shift_range=0.1, # Randomly shift images vertically up to 10%
shear_range=0.1, # Randomly shear images up to 10%
zoom_range=0.1, # Randomly zoom images up to 10%
horizontal_flip=True, # Randomly flip images horizontally
fill_mode="nearest", # Fill any gaps created by rotation or shifting with the nearest pixel value
validation_split=0.2, # Split the dataset into training and validation with an 80-20 ratio
)
# Set the path to your image dataset and the batch size
data_dir = "/kaggle/input/medical-tests-multi-class-image-dataset/CNN_dataset"
batch_size = 32
# Use the image data generator to generate batches of augmented images for training and validation
train_generator = datagen.flow_from_directory(
data_dir,
target_size=(224, 224), # Resize images to 224x224 pixels
batch_size=batch_size,
class_mode="categorical", # Set class mode to categorical for multi-class classification tasks
subset="training", # Specify that we want to use the training set
)
val_generator = datagen.flow_from_directory(
data_dir,
target_size=(224, 224),
batch_size=batch_size,
class_mode="categorical",
subset="validation", # Specify that we want to use the validation set
)
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
# Define the CNN architecture
model = Sequential()
model.add(Conv2D(32, (3, 3), activation="relu", input_shape=(224, 224, 3)))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(Dropout(0.5))
model.add(
Dense(5, activation="softmax")
) # num_classes is the number of classes in your dataset
model.summary()
# Compile the model
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
# Train the model on the training set and validate on the validation set
history = model.fit(train_generator, epochs=10, validation_data=val_generator)
# Evaluate the model on the test set
test_loss, test_acc = model.evaluate(test_generator)
print("Test accuracy:", test_acc)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# **Importing Libraries**
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import random
from sklearn.model_selection import train_test_split
from sklearn import *
import keras
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
import re
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
df = pd.read_csv("/kaggle/input/mushroom-classification/mushrooms.csv")
df
df.info()
df.shape
# **Check for the Missing Values in the Dataset**
df.isnull().sum()
# No Missing Values
df
df["Label"] = df["class"].map({"e": 0, "p": 1})
df
y_train = df["Label"]
# We need Numerical data for training the Random Forest Classifier Model.
# To do the necessary conversion, we can use Label Encoding.
# **What is Label Encoding?**
# Label Encoding refers to converting the labels into numeric form so as to convert it into the machine-readable form. Machine learning algorithms can then decide in a better way on how those labels must be operated. It is an important pre-processing step for the structured dataset in supervised learning.
def Label_encode(feat):
LabelE = LabelEncoder()
LabelE.fit(feat)
print(feat.name, LabelE.classes_)
return LabelE.transform(feat)
for col in df.columns:
df[str(col)] = Label_encode(df[str(col)])
df
x_df = df.drop("class", 1)
x_df = df.drop("Label", 1)
x_train = x_df
x_train
x_train.shape
y_train.shape
X_train, X_test, y_train, y_test = train_test_split(
x_train, y_train, test_size=0.20, random_state=42
)
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
# **Function to evaluate the performance of the model -**
def evaluate(model, test_features, test_labels):
predictions = model.predict(test_features)
errors = abs(predictions - test_labels)
mape = 100 * abs(np.mean(errors / test_labels))
accuracy = metrics.accuracy_score(test_labels, predictions)
print("Model Performance")
print("Average Error: {:0.4f} degrees.".format(np.mean(errors)))
print("Accuracy = {:0.2f}%.".format(accuracy))
print("Exact Accuracy Value: ")
return accuracy
evaluate(rfc, X_test, y_test)
y_predict = rfc.predict(X_test)
print("accuracy: {}%".format(round(accuracy_score(y_test, y_predict) * 100, 4)))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Importing the data
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import StratifiedKFold, KFold
from xgboost import XGBClassifier
from xgboost import XGBRegressor
from sklearn.metrics import roc_auc_score
from sklearn.metrics import mean_squared_log_error
data = pd.read_csv("/kaggle/input/us-college-completion-rate-analysis/train.csv")
test = pd.read_csv("/kaggle/input/us-college-completion-rate-analysis/x_test.csv")
data = data.drop(columns=["Unnamed: 0"], axis=1)
test = test.drop(columns=["Unnamed: 0"], axis=1)
print(data.shape, test.shape)
data.dtypes
def summary(df):
print(f"data shape: {df.shape}")
sum = pd.DataFrame(df.dtypes, columns=["data type"])
sum["missing"] = df.isnull().sum().values * 100
sum["%missing"] = df.isnull().sum().values / df.shape[0]
sum["unique"] = df.nunique().values
desc = pd.DataFrame(df.describe().transpose())
sum["mean"] = desc["mean"].values
sum["std"] = desc["std"].values
sum["min"] = desc["min"].values
sum["max"] = desc["max"].values
return sum
summary(data)
summary(test)
sns.displot(data, x="Completion_rate", kde=True)
import math
features = test.columns
n_bins = 50
histplot_hyperparams = {"kde": True, "alpha": 0.4, "stat": "percent", "bins": n_bins}
columns = features
n_cols = 4
n_rows = math.ceil(len(columns) / n_cols)
fig, ax = plt.subplots(n_rows, n_cols, figsize=(20, n_rows * 4), dpi=300)
ax = ax.flatten()
for i, column in enumerate(columns):
plot_axes = [ax[i]]
sns.kdeplot(data[column], label="Train", ax=ax[i])
sns.kdeplot(test[column], label="Test", ax=ax[i])
for i in range(i + 1, len(ax)):
ax[i].axis("off")
fig.legend(
handles, labels, loc="upper center", bbox_to_anchor=(0.5, 1.05), fontsize=25, ncol=3
)
plt.tight_layout()
columns = features
n_cols = 4
n_rows = math.ceil(len(columns) / n_cols)
fig, ax = plt.subplots(n_rows, n_cols, figsize=(20, n_rows * 4), dpi=300)
ax = ax.flatten()
for i, column in enumerate(columns):
plot_axes = [ax[i]]
sns.boxplot(data[column], ax=ax[i])
for i in range(i + 1, len(ax)):
ax[i].axis("off")
corr = data.iloc[:, :].corr()
plt.subplots(figsize=(9, 9), dpi=300)
sns.heatmap(corr, annot=True, cmap="Blues")
plt.title("Correlation Matrix", size=16)
train = data.drop("Completion_rate", axis=1)
train["istrain"] = 1
test["istrain"] = 0
X = pd.concat([train, test], axis=0)
y = X["istrain"]
X = X.drop("istrain", axis=1)
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=37)
clf = XGBClassifier(seed=37)
for i, (train_index, test_index) in enumerate(skf.split(X, y)):
x0, x1 = X.iloc[train_index], X.iloc[test_index]
y0, y1 = y.iloc[train_index], y.iloc[test_index]
clf.fit(x0, y0)
pred = clf.predict_proba(x1)[:, 1]
print(f"Fold {i+1} AUC Score:", roc_auc_score(y1, pred))
data["Completion_rate"] = np.log(data["Completion_rate"])
xgb_params = {
"booster": "gbtree",
"objective": "reg:squarederror",
"eval_metric": "rmse",
"learning_rate": 0.1,
"max_depth": 8,
"n_estimators": 9999,
"early_stopping_rounds": 200,
"subsample": 1.0,
"colsample_bytree": 1.0,
"seed": 42,
}
data
X = data.drop("Completion_rate", axis=1)
y = data["Completion_rate"]
kf = KFold(n_splits=5, shuffle=True, random_state=37)
best_iteration_xgb = []
scores = []
MODELS = []
for i, (train_index, valid_index) in enumerate(kf.split(X, y)):
print("#" * 25)
print("### Fold", i + 1)
print("#" * 25)
X_train = X.iloc[train_index]
y_train = y.iloc[train_index]
X_valid = X.iloc[valid_index]
y_valid = y.iloc[valid_index]
model = XGBRegressor(**xgb_params)
model.fit(
X_train, y_train, eval_set=[(X_train, y_train), (X_valid, y_valid)], verbose=0
)
MODELS.append(model)
fold_score = mean_squared_log_error(
np.exp(y_valid), np.exp(model.predict(X_valid)), squared=False
)
print(f"Fold RMSLE Score:", fold_score)
scores.append(fold_score)
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = X_train.columns
fold_importance_df["importance"] = model.feature_importances_
best_iteration_xgb.append(model.best_ntree_limit)
print("Fold Feature Importance:")
display(fold_importance_df.sort_values(by="importance", ascending=False).head(10))
print()
print(f"Average Vaildation RMSLE Score:", sum(scores) / 5)
from sklearn.model_selection import train_test_split
train, dev = train_test_split(data, test_size=0.2, random_state=3)
x_train, x_dev = train.iloc[:, 1:], dev.iloc[:, 1:]
# x_train,x_dev = train.iloc[:,2:],dev.iloc[:,2:]
y_train, y_dev = train["Completion_rate"], dev["Completion_rate"]
x_train
# r2_score(y_pred_auto,y_dev)
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn import svm
from xgboost import XGBRegressor
lr = LinearRegression()
ridge = Ridge()
lasso = Lasso()
dt = DecisionTreeRegressor(max_depth=7)
svr = svm.SVR()
rf = RandomForestRegressor(max_depth=19, n_estimators=55)
gb = GradientBoostingRegressor(max_depth=6, n_estimators=250, learning_rate=0.1)
xgb = XGBRegressor(
n_estimators=250, max_depth=8, learning_rate=0.2031, n_jobs=-1, random_state=3
)
lr.fit(x_train, y_train)
ridge.fit(x_train, y_train)
lasso.fit(x_train, y_train)
dt.fit(x_train, y_train)
svr.fit(x_train, y_train)
rf.fit(x_train, y_train)
gb.fit(x_train, y_train)
xgb.fit(x_train, y_train)
xgb = XGBRegressor(
n_estimators=250, max_depth=8, learning_rate=0.2031, n_jobs=-1, random_state=3
)
xgb.fit(X, y)
y_pred_lr = lr.predict(x_dev)
y_pred_ridge = ridge.predict(x_dev)
y_pred_lasso = lasso.predict(x_dev)
y_pred_dt = dt.predict(x_dev)
y_pred_svr = svr.predict(x_dev)
y_pred_rf = rf.predict(x_dev)
y_pred_gb = gb.predict(x_dev)
y_pred_xgb = xgb.predict(x_dev)
r2_score(gb.predict(x_train), y_train), r2_score(y_pred_gb, y_dev)
from sklearn.model_selection import GridSearchCV
rf = RandomForestRegressor(random_state=3)
paras = [{"max_depth": [18, 19, 20, 21, 22], "n_estimators": [45, 50, 55, 60]}]
grid_search_rf = GridSearchCV(
estimator=rf, param_grid=paras, cv=5, scoring="neg_mean_squared_error"
)
grid_search_rf.fit(x_train, y_train)
# grid_search_rf.best_params_
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
xgb_model = xgb.XGBRegressor()
param_grid = {
# 'learning_rate': [0.01, 0.05, 0.1, 0.5],
"max_depth": [3, 5, 7, 10],
"n_estimators": [50, 100, 150, 200, 300, 500],
# 'subsample': [0.6, 0.8, 1.0],
# 'colsample_bytree': [0.6, 0.8, 1.0],
}
grid_search = GridSearchCV(
estimator=xgb_model,
param_grid=param_grid,
cv=5,
n_jobs=-1,
scoring="neg_mean_squared_error",
)
grid_search.fit(x_train, y_train)
print("Best parameters found: ", grid_search.best_params_)
print("Lowest RMSE found: ", np.sqrt(np.abs(grid_search.best_score_)))
best_xgb = grid_search.best_estimator_
y_pred = best_xgb.predict(x_test)
mse_xgb = mean_squared_error(y_pred_xgb, y_dev)
mse_xgb
# gb = GradientBoostingRegressor(random_state=3)
# paras = [{"max_depth":[5,6,7],"n_estimators":[200,250],"learning_rate":[0.1,0.05]}]
# grid_search_gb = GridSearchCV(estimator=gb,param_grid=paras,cv=5,scoring="neg_mean_squared_error")
# grid_search_gb.fit(x_train,y_train)
# grid_search_gb.best_params_
from sklearn.metrics import mean_squared_error
mse_lr = mean_squared_error(y_pred_lr, y_dev)
mse_ridge = mean_squared_error(y_pred_ridge, y_dev)
mse_lasso = mean_squared_error(y_pred_lasso, y_dev)
mse_dt = mean_squared_error(y_pred_dt, y_dev)
mse_svr = mean_squared_error(y_pred_svr, y_dev)
mse_rf = mean_squared_error(y_pred_rf, y_dev)
mse_gb = mean_squared_error(y_pred_gb, y_dev)
mse_xgb = mean_squared_error(y_pred_xgb, y_dev)
# mse_automl=mean_squared_error(y_pred_auto,y_dev)
mse_lr, mse_ridge, mse_lasso, mse_dt, mse_svr, mse_rf
data_V = {
"Models": [
"Linear Regression",
"Ridge Regression",
"Lasso Regression",
"Decision Tree",
"Support Vector Regression",
"Random Forest",
"gb",
"xgb",
],
"MSE": [mse_lr, mse_ridge, mse_lasso, mse_dt, mse_svr, mse_rf, mse_gb, mse_xgb],
}
# data_V = {'Models': ['Linear Regression', 'Ridge Regression', 'Lasso Regression', 'Decision Tree', 'Support Vector Regression', 'Random Forest',"gb","xgb","auto"],
# 'MSE': [mse_lr, mse_ridge, mse_lasso, mse_dt, mse_svr, mse_rf,mse_gb,mse_xgb,mse_automl]}
df = pd.DataFrame(data_V)
df.sort_values(by="MSE", ascending=True, inplace=True)
df.reset_index(drop=True, inplace=True)
df
test = pd.read_csv("/kaggle/input/us-college-completion-rate-analysis/x_test.csv")
x_test = test.iloc[:, 1:]
x_test
x_train
y_pred = xgb.predict(X)
y_pred
# ### Creating our submission
submission = pd.DataFrame.from_dict({"Completion_rate": y_pred})
submission
# submission['Completion_rate']
submission.to_csv("submission.csv", index=True, index_label="id")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # **TASK 1 : CLASSIFICATION**
# USING ONE HOT ENCODING
import pandas as pd
df = pd.read_csv("/kaggle/input/cleaned-dataset/Data-Cleaned.csv")
df = df.drop("Unnamed: 0", axis=1)
df = df.drop("MONTHS_BALANCE", axis=1)
df
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from imblearn.over_sampling import SMOTE
# Load the dataset
df = pd.read_csv("/kaggle/input/cleaned-dataset/Data-Cleaned.csv")
df = df.drop("Unnamed: 0", axis=1)
df = df.drop("MONTHS_BALANCE", axis=1)
# Select relevant columns and encode categorical variables using One-Hot encoding
cat_cols = [
"OCCUPATION_TYPE",
"CODE_GENDER",
"FLAG_OWN_CAR",
"FLAG_OWN_REALTY",
"NAME_INCOME_TYPE",
"NAME_EDUCATION_TYPE",
"NAME_FAMILY_STATUS",
"NAME_HOUSING_TYPE",
]
df = pd.get_dummies(df, columns=cat_cols)
from sklearn.preprocessing import StandardScaler
num_cols = ["AMT_INCOME_TOTAL", "CNT_FAM_MEMBERS", "YEARS_BIRTH", "YEARS_EMPLOYED"]
scaler = StandardScaler()
df[num_cols] = scaler.fit_transform(df[num_cols])
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
df.drop(["STATUS"], axis=1), df["STATUS"], test_size=0.3, random_state=42
)
# Use SMOTE to over-sample the minority class
smote = SMOTE(random_state=42)
X_train_smote, y_train_smote = smote.fit_resample(X_train, y_train)
# create list of models to try
models = [
{"name": "Logistic Regression", "model": LogisticRegression(max_iter=200)},
{"name": "Decision Tree", "model": DecisionTreeClassifier()},
{
"name": "Random Forest - Tuned",
"model": RandomForestClassifier(
n_estimators=300, max_depth=16, min_samples_leaf=10, max_features=8
),
},
{
"name": "XGBoost - Tuned",
"model": XGBClassifier(n_estimators=300, learning_rate=0.1, max_depth=5),
},
]
# train and evaluate each model
for model in models:
clf = model["model"]
clf.fit(X_train_smote, y_train_smote)
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
print(f"{model['name']} results:")
print(f"Accuracy: {accuracy}")
print(f"Precision: {precision}")
print(f"Recall: {recall}")
print(f"F1 Score: {f1}")
print()
y_train
# ### ***If the goal is to identify customers who are likely to default on their credit card bills in order to take proactive measures to prevent this from happening, you may want to prioritize recall over precision. This is because missing a customer who is likely to default could be more costly than incorrectly identifying a customer as likely to default when they actually won't.***
df
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from imblearn.under_sampling import RandomUnderSampler
# Load the dataset
df = pd.read_csv("/kaggle/input/cleaned-dataset/Data-Cleaned.csv")
df = df.drop("Unnamed: 0", axis=1)
df = df.drop("MONTHS_BALANCE", axis=1)
# Select relevant columns and encode categorical variables
cat_cols = [
"OCCUPATION_TYPE",
"CODE_GENDER",
"FLAG_OWN_CAR",
"FLAG_OWN_REALTY",
"NAME_INCOME_TYPE",
"NAME_EDUCATION_TYPE",
"NAME_FAMILY_STATUS",
"NAME_HOUSING_TYPE",
]
le = LabelEncoder()
for col in cat_cols:
df[col] = le.fit_transform(df[col])
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
df.drop(["STATUS"], axis=1), df["STATUS"], test_size=0.3, random_state=42
)
# undersample training set
undersampler = RandomUnderSampler(random_state=42)
X_train_under, y_train_under = undersampler.fit_resample(X_train, y_train)
# create list of models to try
models = [
{"name": "Logistic Regression", "model": LogisticRegression()},
{"name": "Decision Tree", "model": DecisionTreeClassifier()},
{
"name": "Random Forest - Tuned",
"model": RandomForestClassifier(
n_estimators=300, max_depth=16, min_samples_leaf=10, max_features=8
),
},
{
"name": "XGBoost - Tuned",
"model": XGBClassifier(n_estimators=300, learning_rate=0.1, max_depth=5),
},
]
# train and evaluate each model
for model in models:
clf = model["model"]
clf.fit(X_train_under, y_train_under)
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
print(f"{model['name']} results:")
print(f"Accuracy: {accuracy}")
print(f"Precision: {precision}")
print(f"Recall: {recall}")
print(f"F1 Score: {f1}")
print()
# If logistic regression is giving only 36% accuracy, then it could be due to several reasons:
# -**The data may not be well separated by a linear boundary**, and hence logistic regression may not be a suitable algorithm.
# -The data may be imbalanced, with one class having significantly fewer examples than the other, leading to a low accuracy(*which is not the cas here since undersampling is done*).
# # **TASK 2: CLUSTERING**
# # RISK ASSESMENT
# **Finding Most Important Features**
# Split the data into features and STATUS
X = df.drop(["STATUS"], axis=1)
y = df["STATUS"]
# Train a Random Forest classifier on the data
rfc = RandomForestClassifier(n_estimators=300, random_state=42)
rfc.fit(X, y)
# Get the feature importances
importances = rfc.feature_importances_
# Get the column names of the features
feature_names = X.columns.tolist()
# Combine the two into a dictionary
feature_importances = dict(zip(feature_names, importances))
# Sort the dictionary by the importance score
sorted_features = sorted(feature_importances.items(), key=lambda x: x[1], reverse=True)
# Print the top 10 features
print("Top 10 features:")
for feature, importance in sorted_features[:10]:
print(f"{feature}: {importance}")
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler, OneHotEncoder
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("/kaggle/input/cleaned-dataset/Data-Cleaned.csv")
df = df.drop("Unnamed: 0", axis=1)
df = df.drop("MONTHS_BALANCE", axis=1)
# Select the relevant columns
cols = [
"YEARS_EMPLOYED",
"YEARS_BIRTH",
"AMT_INCOME_TOTAL",
"OCCUPATION_TYPE",
"NAME_FAMILY_STATUS",
"NAME_INCOME_TYPE",
"CNT_FAM_MEMBERS",
"NAME_EDUCATION_TYPE",
"FLAG_PHONE",
"CNT_CHILDREN",
"STATUS",
]
df = df[cols]
# Perform one-hot encoding on categorical features
cat_cols = [
"OCCUPATION_TYPE",
"NAME_FAMILY_STATUS",
"NAME_INCOME_TYPE",
"NAME_EDUCATION_TYPE",
]
df = pd.get_dummies(df, prefix="OCCUPATION_TYPE_", columns=["OCCUPATION_TYPE"])
# Scale the numerical features
num_cols = [
"YEARS_EMPLOYED",
"YEARS_BIRTH",
"AMT_INCOME_TOTAL",
"CNT_FAM_MEMBERS",
"CNT_CHILDREN",
]
scaler = StandardScaler()
df[num_cols] = scaler.fit_transform(df[num_cols])
# Select the top 10 contributing features
top_features = [
"YEARS_EMPLOYED",
"YEARS_BIRTH",
"AMT_INCOME_TOTAL",
"OCCUPATION_TYPE__Laborers",
"OCCUPATION_TYPE__Sales staff",
"OCCUPATION_TYPE__Core staff",
"OCCUPATION_TYPE__Managers",
"OCCUPATION_TYPE__Drivers",
"OCCUPATION_TYPE__High skill tech staff",
"OCCUPATION_TYPE__Accountants",
"STATUS",
]
df = df[top_features]
# Fit KMeans clustering model
kmeans = KMeans(n_clusters=3, random_state=0).fit(df.drop(["STATUS"], axis=1))
# Visualize clusters
colors = {0: "red", 1: "blue", 2: "green"}
df["cluster"] = kmeans.labels_
df["color"] = df["cluster"].map(colors)
ax = df.plot.scatter(x="YEARS_EMPLOYED", y="AMT_INCOME_TOTAL", c=df["color"], alpha=0.5)
ax.set_xlabel("YEARS_EMPLOYED")
ax.set_ylabel("AMT_INCOME_TOTAL")
plt.show()
import pandas as pd
df = pd.read_csv("/kaggle/input/cleaned-dataset/Data-Cleaned.csv")
df = df.drop("Unnamed: 0", axis=1)
df = df.drop("MONTHS_BALANCE", axis=1)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
# Drop irrelevant features
df.drop(
["FLAG_MOBIL", "FLAG_WORK_PHONE", "FLAG_PHONE", "FLAG_EMAIL"], axis=1, inplace=True
)
# Convert categorical features to numerical using Label Encoding
cat_cols = df.select_dtypes(include="object").columns.tolist()
le = LabelEncoder()
for col in cat_cols:
df[col] = le.fit_transform(df[col].astype(str))
# Scale the features
scaler = MinMaxScaler()
df[df.columns] = scaler.fit_transform(df[df.columns])
# Elbow method to find optimal number of clusters
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init="k-means++", random_state=42)
kmeans.fit(df)
wcss.append(kmeans.inertia_)
plt.figure(figsize=(8, 6))
sns.lineplot(x=range(1, 11), y=wcss, marker="o", color="blue")
plt.title("Elbow Method")
plt.xlabel("Number of clusters")
plt.ylabel("WCSS")
plt.show()
# KMeans clustering with optimal number of clusters
kmeans = KMeans(n_clusters=3, init="k-means++", random_state=42)
pred_clusters = kmeans.fit_predict(df)
# Visualize the clusters in 2D using PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(df)
pca_df = pd.DataFrame(pca.transform(df), columns=["pca1", "pca2"])
pca_df["Cluster"] = pred_clusters
sns.scatterplot(x="pca1", y="pca2", hue="Cluster", data=pca_df, palette="Set1")
plt.title("Clustering Results")
plt.show()
df.dtypes
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
# Load the dataset
df = pd.read_csv("/kaggle/input/cleaned-dataset/Data-Cleaned.csv")
df.drop(
[
"Unnamed: 0",
"MONTHS_BALANCE",
"FLAG_MOBIL",
"FLAG_WORK_PHONE",
"FLAG_PHONE",
"FLAG_EMAIL",
],
axis=1,
inplace=True,
)
# Convert categorical features to numerical using Label Encoding
cat_cols = df.select_dtypes(include="object").columns.tolist()
le = LabelEncoder()
for col in cat_cols:
df[col] = le.fit_transform(df[col].astype(str))
# Scale the features
scaler = MinMaxScaler()
df[df.columns] = scaler.fit_transform(df[df.columns])
# Elbow method to find optimal number of clusters
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init="k-means++", random_state=42)
kmeans.fit(df)
wcss.append(kmeans.inertia_)
plt.figure(figsize=(8, 6))
sns.lineplot(x=range(1, 11), y=wcss, marker="o", color="blue")
plt.title("Elbow Method")
plt.xlabel("Number of clusters")
plt.ylabel("WCSS")
plt.show()
# KMeans clustering with optimal number of clusters
kmeans = KMeans(n_clusters=3, init="k-means++", random_state=42)
pred_clusters = kmeans.fit_predict(df)
df["Cluster"] = pred_clusters
# 3D scatter plot of clusters
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection="3d")
xs = df["YEARS_EMPLOYED"]
ys = df["AMT_INCOME_TOTAL"]
zs = df["NAME_HOUSING_TYPE"]
c = df["Cluster"]
ax.scatter(xs, ys, zs, c=c, cmap="Set1")
ax.set_xlabel("YEARS_EMPLOYED")
ax.set_ylabel("AMT_INCOME_TOTAL")
ax.set_zlabel("NAME_HOUSING_TYPE")
plt.title("Clustering Results")
plt.show()
df
import pandas as pd
df = pd.read_csv("/kaggle/input/cleaned-dataset/Data-Cleaned.csv")
df = df.drop("Unnamed: 0", axis=1)
df = df.drop("MONTHS_BALANCE", axis=1)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
# Drop irrelevant features
df.drop(
["FLAG_MOBIL", "FLAG_WORK_PHONE", "FLAG_PHONE", "FLAG_EMAIL"], axis=1, inplace=True
)
# Convert categorical features to numerical using Label Encoding
cat_cols = df.select_dtypes(include="object").columns.tolist()
le = LabelEncoder()
for col in cat_cols:
df[col] = le.fit_transform(df[col].astype(str))
# Scale the features
scaler = MinMaxScaler()
df[df.columns] = scaler.fit_transform(df[df.columns])
df = df[["YEARS_EMPLOYED", "AMT_INCOME_TOTAL"]]
# Elbow method to find optimal number of clusters
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init="k-means++", random_state=42)
kmeans.fit(df)
wcss.append(kmeans.inertia_)
plt.figure(figsize=(8, 6))
sns.lineplot(x=range(1, 11), y=wcss, marker="o", color="blue")
plt.title("Elbow Method")
plt.xlabel("Number of clusters")
plt.ylabel("WCSS")
plt.show()
# KMeans clustering with optimal number of clusters
kmeans = KMeans(n_clusters=3, init="k-means++", random_state=42)
pred_clusters = kmeans.fit_predict(df)
# Visualize the clusters in 2D using PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(df)
pca_df = pd.DataFrame(pca.transform(df), columns=["pca1", "pca2"])
pca_df["Cluster"] = pred_clusters
sns.scatterplot(x="pca1", y="pca2", hue="Cluster", data=pca_df, palette="Set1")
plt.title("Clustering Results")
plt.show()
cluster_df = pd.DataFrame(df[["YEARS_EMPLOYED", "AMT_INCOME_TOTAL"]])
cluster_df["Cluster"] = pred_clusters
cluster_means = cluster_df.groupby("Cluster").mean()
print(cluster_means)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
# Load data
df = pd.read_csv("/kaggle/input/cleaned-dataset/Data-Cleaned.csv")
df = df.drop("Unnamed: 0", axis=1)
df = df.drop("MONTHS_BALANCE", axis=1)
# Drop irrelevant features
df.drop(
["FLAG_MOBIL", "FLAG_WORK_PHONE", "FLAG_PHONE", "FLAG_EMAIL"], axis=1, inplace=True
)
# Convert categorical features to numerical using Label Encoding
cat_cols = df.select_dtypes(include="object").columns.tolist()
le = LabelEncoder()
for col in cat_cols:
df[col] = le.fit_transform(df[col].astype(str))
# Extract relevant features
df = df[["YEARS_EMPLOYED", "AMT_INCOME_TOTAL", "NAME_HOUSING_TYPE"]]
# Fit scaler on original data and transform data
scaler = MinMaxScaler()
df_scaled = scaler.fit_transform(df)
# Elbow method to find optimal number of clusters
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init="k-means++", random_state=42)
kmeans.fit(df_scaled)
wcss.append(kmeans.inertia_)
plt.figure(figsize=(8, 6))
sns.lineplot(x=range(1, 11), y=wcss, marker="o", color="blue")
plt.title("Elbow Method")
plt.xlabel("Number of clusters")
plt.ylabel("WCSS")
plt.show()
# KMeans clustering with optimal number of clusters
kmeans = KMeans(n_clusters=3, init="k-means++", random_state=42)
pred_clusters = kmeans.fit_predict(df_scaled)
# Inverse transform scaled data to get original data
df_orig = scaler.inverse_transform(df_scaled)
# Compute mean of original data for each cluster
df_means = pd.DataFrame(df_orig, columns=df.columns)
df_means["Cluster"] = pred_clusters
df_means = df_means.groupby("Cluster").mean()
# Invert the standardization process to get original means
scale = scaler.scale_
min_val = scaler.min_
df_means[["YEARS_EMPLOYED", "AMT_INCOME_TOTAL", "NAME_HOUSING_TYPE"]] = (
df_means[["YEARS_EMPLOYED", "AMT_INCOME_TOTAL", "NAME_HOUSING_TYPE"]] * scale
+ min_val
)
# Print the results
print(df_means)
df
# Print the mean of each cluster for the original data
cluster_means = df.groupby(pred_clusters).mean()
print(cluster_means)
df["Cluster"] = pred_clusters
# Group the data by cluster and find the mode of each categorical variable
cat_cols = ["NAME_HOUSING_TYPE"]
cat_mode = df.groupby("Cluster")[cat_cols].apply(lambda x: x.mode().iloc[0])
# Print the mode of each categorical variable for each cluster
print(cat_mode)
|
def attendance(attendance):
if attendance > 80:
return "Good"
elif attendance > 60:
return "Ok"
else:
return "Need improvement"
s = int(input("Enter attendance"))
attendance(s)
l1 = [23, 67, 34, 90, 95]
for i in l1:
print(attendance(i))
list(map(attendance, l1))
# set(map(attendance, l1))
l2 = [23, 64, 78, 45, 78]
l3 = [34, 7, 36, 9, 45]
result = list(map(lambda x, y: x - y, l2, l3))
result
|
# Ml modules
import sklearn
# common modules imports
import numpy as np
import pandas as pd
# for visualizations
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
mpl.rc("axes", labelsize=14)
mpl.rc("xtick", labelsize=12)
mpl.rc("ytick", labelsize=12)
# DATASET PATH
TRAIN_PATH = r"/kaggle/input/playground-series-s3e12/train.csv"
TEST_PATH = r"/kaggle/input/playground-series-s3e12/test.csv"
SAMPLE_SUB = r"/kaggle/input/playground-series-s3e12/sample_submission.csv"
# # Load data
train_df = pd.read_csv(TRAIN_PATH, index_col=["id"])
test_df = pd.read_csv(TEST_PATH, index_col=["id"])
sample_sub_df = pd.read_csv(SAMPLE_SUB, index_col=["id"])
features = ["gravity", "ph", "osmo", "cond", "urea", "calc"]
target_feature = ["target"]
# # Exploratory Data Analysis
# ## About The Data and features
# To predict the presence of kidney stones based on urine analysis. the urine specimens, analyzed in an effort to determine if certain physical characteristics of the urine might be related to the formation of calcium oxalate crystals.
# - The `six physical` characteristics/`features` of the urine are:
#
# - (1) `specific gravity`, the density of the urine relative to water.
# - (2) `pH`, the negative logarithm of the hydrogen ion.
# - (3) `osmolarity (mOsm)`, a unit used in biology and medicine but not in physical chemistry. Osmolarity is proportional to the concentration of molecules in solution.
# - (4) `conductivity (mMho milliMho)`, One Mho is one reciprocal Ohm. Conductivity is proportional to the concentration of charged ions in solution.
# - (5) `urea concentration in millimoles per litre.`
# - (6) `calcium concentration (CALC) in millimolesllitre.`
# # More information about features
# - `Specific gravity` is a measure of the density of a substance compared to the density of water.
# In the context of urine and kidney stones, specific gravity is used as a diagnostic tool to evaluate the concentration of solutes in the urine.
# When a person has kidney stones, the concentration of solutes in their urine can be high, leading to a higher specific gravity.
# A specific gravity value above 1.020 is considered high and may indicate the presence of kidney stones or other urinary tract problems.
# - `pH` of urine is a measure of its acidity or alkalinity. In the context of kidney stones, urine pH is an important factor as it can affect the formation of different types of kidney stones.
# Most kidney stones are formed from calcium oxalate, which tends to form in acidic urine. Therefore, if the urine pH is too acidic (less than 5.5), it can increase the risk of calcium oxalate stone formation. On the other hand, if the urine pH is too alkaline (greater than 7.2), it can increase the risk of calcium phosphate stone formation.
# Urinary tract infections (UTIs) can also affect urine pH. UTIs can increase the pH of urine, making it more alkaline, which can increase the risk of struvite stone formation.Therefore, measuring urine pH can be helpful in determining the type of kidney stone a person is likely to form and can help in devising preventive strategies.
# - `Osmolarity` is a measure of the concentration of solutes in a solution. It can provide information about the concentration of solutes that can contribute to stone formation. High osmolarity in urine means that there are higher amounts of solutes, such as calcium, oxalate, and uric acid, which can lead to the formation of kidney stones. In contrast, low osmolarity indicates that the urine is more dilute and contains fewer solutes, which may reduce the risk of stone formation.
# - `conductivity` of urine refers to the concentration of dissolved ions in the urine.conductivity can be used as a diagnostic tool to determine the presence of certain types of stones. For example, calcium-based stones tend to be highly conductive, while other types of stones, such as uric acid stones, are less conductive.
# - `Urea` is a waste product that is produced by the liver during the breakdown of proteins and is excreted in the urine. measuring the concentration of urea in the urine can provide information about the solute concentration, which can contribute to the formation of kidney stones.High urea concentration in urine can indicate dehydration or a high protein diet, both of which can increase the risk of stone formation. However, low urea concentration may also indicate certain medical conditions, such as liver disease or low protein intake, which can affect the formation of kidney stones.
# - `concentration of calcium` in the urine can provide information about the risk of stone formation.Most kidney stones are made up of calcium oxalate, and high levels of calcium in the urine can increase the risk of stone formation. However, low levels of calcium in the urine can also increase the risk of stone formation, as it can lead to an increase in oxalate levels, which can contribute to stone formation.
# Taking a look at data
train_df.head(5)
train_df.isnull().any()
train_df.info()
# The data dosent contain any null value and all features are numeric
#
train_df.describe()
train_df[train_df.target == 0].describe()
train_df[train_df.target == 1].describe()
def plot_numerical_data(X, hue):
fig, axes = plt.subplots(1, 2, figsize=(15, 4))
sns.histplot(ax=axes[0], x=X, hue=hue, data=train_df, element="step", kde=True)
sns.boxplot(ax=axes[1], x=hue, y=X, hue=hue, data=train_df)
plot_numerical_data("gravity", "target")
# From density and box plot we can see `specific gravity` after 1.020 shows increase in positives for kidney stones and drop in negatives which conclude that `heigh specefic gravity(above 1.020) can indicate persence of kidney stone but it alone can't say it surely`
plot_numerical_data("ph", "target")
def print_mean(col):
df = train_df[col][train_df.target == 0].describe()
print(
"-ve mean & std:",
train_df[col][train_df.target == 0].mean(),
"+-",
train_df[col][train_df.target == 0].std(),
)
print(
"+ve mean & std:",
train_df[col][train_df.target == 0].mean(),
"+-",
train_df[col][train_df.target == 1].std(),
)
print_mean("ph")
# The distribution of both negitives and positives are simillar in this dataset
# The `normal pH` of urine should ideally be `around 6.0 to 7.5` if the urine pH is `too acid` (`less than 5.5`) `or` if it is `too alkaline` `(greater than 7.2)` then there is a probability of kidney stones begin present
plot_numerical_data("osmo", "target")
print_mean("osmo")
# `Heigh Osmolarity` of above or around 800 can `indicate presence of kidney stones` whereas `low Osmolarity` `reduces the chances of kidney stones`
plot_numerical_data("cond", "target")
# low conductivty reduces the chances of kidney stones where as heigh conducvity can tells us presence of kidney stones
plot_numerical_data("urea", "target")
print_mean("urea")
plot_numerical_data("calc", "target")
print_mean("calc")
# With `heigh concentration of calcium ` there is `more probability of kidney stones` begin present where as `low concentration of calcium reduces the risk of kidney stones`
# # Correlation
corr = train_df.corr(method="pearson")
matrix = np.triu(corr)
sns.heatmap(corr, annot=True, cmap="Blues", mask=matrix)
# - osmo and urea shows very strong correlation
# - osmo:gravity and osmo & cond shows a pretty decent correlation.
# - Apart from all ph shows a negitive correaltion to target
# # Scaling and Model Training
tr = train_df.copy()
data = tr.drop(columns=["target"])
target = tr["target"]
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
import xgboost
from sklearn.metrics import roc_auc_score
from colorama import Fore
from sklearn.metrics import confusion_matrix
data_pipline = Pipeline([("scaler", StandardScaler())])
X = data
Y = target
split = StratifiedKFold(random_state=42, shuffle=True)
Fold = 0
# # Logistic Regression
from sklearn.linear_model import LogisticRegression
lg_train_score = []
lg_val_score = []
lg_model_list = []
for train_index, eval_index in split.split(X, Y):
Fold = Fold + 1
x_train, y_train = X.iloc[train_index], Y.iloc[train_index]
x_eval, y_eval = X.iloc[eval_index], Y.iloc[eval_index]
model = LogisticRegression()
X_train_prepared = data_pipline.fit_transform(x_train)
X_eval_prepared = data_pipline.fit_transform(x_eval)
print(f"Fold :{Fold}")
model.fit(X_train_prepared, y_train)
train_pred = roc_auc_score(y_train, model.predict_proba(X_train_prepared)[:, 1])
eval_pred = roc_auc_score(y_eval, model.predict_proba(X_eval_prepared)[:, 1])
lg_train_score.append(train_pred)
lg_val_score.append(eval_pred)
lg_model_list.append([data_pipline, model])
print(f"Val roc AUC score: {Fore.BLUE} {eval_pred}")
print()
print(f"{Fore.BLACK} Mean Val roc scores : {Fore.GREEN}{np.mean(lg_val_score)}")
from sklearn.metrics import classification_report
pred = model.predict(X_eval_prepared)
print("confusion matrix :\n ", confusion_matrix(y_eval, pred))
print(
"roc auc score : ",
roc_auc_score(y_eval, model.predict_proba(X_eval_prepared)[:, 1]),
)
print("classification report : \n", classification_report(y_eval, pred))
# # SVC
from sklearn.svm import SVC
svc_train_score = []
svc_val_score = []
svc_model_list = []
Fold = 0
for train_index, eval_index in split.split(X, Y):
Fold = Fold + 1
x_train, y_train = X.iloc[train_index], Y.iloc[train_index]
x_eval, y_eval = X.iloc[eval_index], Y.iloc[eval_index]
model = SVC(probability=True)
X_train_prepared = data_pipline.fit_transform(x_train)
X_eval_prepared = data_pipline.fit_transform(x_eval)
print(f"Fold :{Fold}")
model.fit(X_train_prepared, y_train)
train_pred = roc_auc_score(y_train, model.predict_proba(X_train_prepared)[:, 1])
eval_pred = roc_auc_score(y_eval, model.predict_proba(X_eval_prepared)[:, 1])
svc_train_score.append(train_pred)
svc_val_score.append(eval_pred)
svc_model_list.append([data_pipline, model])
print(f"{Fore.BLACK}Val roc AUC score: {Fore.BLUE} {eval_pred}")
print()
print(f"{Fore.BLACK}Mean Val roc scores : {Fore.GREEN}{np.mean(svc_val_score)}")
from sklearn.metrics import classification_report
pred = model.predict(X_eval_prepared)
print("confusion matrix :\n ", confusion_matrix(y_eval, pred))
print(
"roc auc score : ",
roc_auc_score(y_eval, model.predict_proba(X_eval_prepared)[:, 1]),
)
print("classification report : \n", classification_report(y_eval, pred))
# # Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
rf_train_score = []
rf_val_score = []
rf_model_list = []
Fold = 0
for train_index, eval_index in split.split(X, Y):
Fold = Fold + 1
x_train, y_train = X.iloc[train_index], Y.iloc[train_index]
x_eval, y_eval = X.iloc[eval_index], Y.iloc[eval_index]
model = RandomForestClassifier()
X_train_prepared = data_pipline.fit_transform(x_train)
X_eval_prepared = data_pipline.fit_transform(x_eval)
print(f"Fold :{Fold}")
model.fit(X_train_prepared, y_train)
train_pred = roc_auc_score(y_train, model.predict_proba(X_train_prepared)[:, 1])
eval_pred = roc_auc_score(y_eval, model.predict_proba(X_eval_prepared)[:, 1])
rf_train_score.append(train_pred)
rf_val_score.append(eval_pred)
rf_model_list.append([data_pipline, model])
print(f"{Fore.BLACK} Val roc AUC score: {Fore.BLUE} {eval_pred}")
print()
print(f"{Fore.BLACK} Mean Val roc scores : {Fore.GREEN}{np.mean(rf_val_score)}")
from sklearn.metrics import classification_report
pred = model.predict(X_eval_prepared)
print("confusion matrix :\n ", confusion_matrix(y_eval, pred))
print(
"roc auc score : ",
roc_auc_score(y_eval, model.predict_proba(X_eval_prepared)[:, 1]),
)
print("classification report : \n", classification_report(y_eval, pred))
# # AdaBoostClassifier
from sklearn.ensemble import AdaBoostClassifier
ab_train_score = []
ab_val_score = []
ab_model_list = []
Fold = 0
for train_index, eval_index in split.split(X, Y):
Fold = Fold + 1
x_train, y_train = X.iloc[train_index], Y.iloc[train_index]
x_eval, y_eval = X.iloc[eval_index], Y.iloc[eval_index]
model = AdaBoostClassifier()
X_train_prepared = data_pipline.fit_transform(x_train)
X_eval_prepared = data_pipline.fit_transform(x_eval)
print(f"Fold :{Fold}")
model.fit(X_train_prepared, y_train)
train_pred = roc_auc_score(y_train, model.predict_proba(X_train_prepared)[:, 1])
eval_pred = roc_auc_score(y_eval, model.predict_proba(X_eval_prepared)[:, 1])
ab_train_score.append(train_pred)
ab_val_score.append(eval_pred)
ab_model_list.append([data_pipline, model])
print(f"{Fore.BLACK} Val roc AUC score: {Fore.BLUE} {eval_pred}")
print()
print(f"{Fore.BLACK} Mean Val roc scores : {Fore.GREEN}{np.mean(ab_val_score)}")
from sklearn.metrics import classification_report
pred = model.predict(X_eval_prepared)
print("confusion matrix :\n ", confusion_matrix(y_eval, pred))
print(
"roc auc score : ",
roc_auc_score(y_eval, model.predict_proba(X_eval_prepared)[:, 1]),
)
print("classification report : \n", classification_report(y_eval, pred))
# # xgboost
import xgboost
xg_train_score = []
xg_val_score = []
xg_model_list = []
Fold = 0
for train_index, eval_index in split.split(X, Y):
Fold = Fold + 1
x_train, y_train = X.iloc[train_index], Y.iloc[train_index]
x_eval, y_eval = X.iloc[eval_index], Y.iloc[eval_index]
model = xgboost.XGBClassifier()
X_train_prepared = data_pipline.fit_transform(x_train)
X_eval_prepared = data_pipline.fit_transform(x_eval)
print(f"Fold :{Fold}")
model.fit(X_train_prepared, y_train)
train_pred = roc_auc_score(y_train, model.predict_proba(X_train_prepared)[:, 1])
eval_pred = roc_auc_score(y_eval, model.predict_proba(X_eval_prepared)[:, 1])
xg_train_score.append(train_pred)
xg_val_score.append(eval_pred)
xg_model_list.append([data_pipline, model])
print(f"{Fore.BLACK} Val roc AUC score: {Fore.BLUE} {eval_pred}")
print("---")
print(f"{Fore.BLACK} Mean Val roc scores : {Fore.GREEN}{np.mean(xg_val_score)}")
from sklearn.metrics import classification_report
pred = model.predict(X_eval_prepared)
print("confusion matrix :\n ", confusion_matrix(y_eval, pred))
print(
"roc auc score : ",
roc_auc_score(y_eval, model.predict_proba(X_eval_prepared)[:, 1]),
)
print("classification report : \n", classification_report(y_eval, pred))
# # Voting Ensemble
from sklearn.ensemble import VotingClassifier
classifiers = [
("lr", lg_model_list[4][1]),
("svc", svc_model_list[4][1]),
("rf_clf", rf_model_list[4][1]),
("ada", ab_model_list[4][1]),
("xg", xg_model_list[4][1]),
]
vot_train_score = []
vot_val_score = []
vot_model_list = []
Fold = 0
for train_index, eval_index in split.split(X, Y):
Fold = Fold + 1
x_train, y_train = X.iloc[train_index], Y.iloc[train_index]
x_eval, y_eval = X.iloc[eval_index], Y.iloc[eval_index]
model = voting_clf = VotingClassifier(
estimators=classifiers, verbose=True, n_jobs=-1, voting="soft"
)
X_train_prepared = data_pipline.fit_transform(x_train)
X_eval_prepared = data_pipline.fit_transform(x_eval)
print(f"Fold :{Fold}")
model.fit(X_train_prepared, y_train)
train_pred = roc_auc_score(y_train, model.predict_proba(X_train_prepared)[:, 1])
eval_pred = roc_auc_score(y_eval, model.predict_proba(X_eval_prepared)[:, 1])
vot_train_score.append(train_pred)
vot_val_score.append(eval_pred)
vot_model_list.append([data_pipline, model])
print(f"{Fore.BLACK} Val roc AUC score: {Fore.BLUE} {eval_pred}")
print("---")
print(f"{Fore.BLACK} Mean Val roc scores : {Fore.GREEN}{np.mean(vot_val_score)}")
from sklearn.metrics import classification_report
pred = model.predict(X_eval_prepared)
print("confusion matrix :\n ", confusion_matrix(y_eval, pred))
print(
"roc auc score : ",
roc_auc_score(y_eval, model.predict_proba(X_eval_prepared)[:, 1]),
)
print("classification report : \n", classification_report(y_eval, pred))
# # Test Data Predictions and submission
test_data = data_pipline.transform(test_df)
sample_sub_df.target = vot_model_list[4][1].predict_proba(test_data)[0:, 1]
sample_sub_df.to_csv("submission.csv")
|
# # **Imports**
# Bellow are all the imports used in the **Notebook**.
# Common
import keras
from glob import glob
from tqdm import tqdm
import tensorflow as tf
from numpy import zeros, random
# Data
from tensorflow.image import resize
from keras.preprocessing.image import load_img, img_to_array
# Data viz
import matplotlib.pyplot as plt
# Model
from keras.models import Model, Sequential, load_model
from keras.layers import (
Conv2D,
Conv2DTranspose,
concatenate,
MaxPool2D,
Dropout,
BatchNormalization,
Layer,
Input,
add,
multiply,
UpSampling2D,
)
# Model Viz
from tensorflow.keras.utils import plot_model
# Callback
from keras.callbacks import Callback
# Torch
import torch
import torch.nn as nn
import torch.nn.functional as F
# # **Data**
# The **foremost** thing that we need to accomplish is to **load the data**.
def load_image(path):
img = resize(img_to_array(load_img(path)) / 255.0, (256, 256))
return img
# This function takes in the **path of the image** and load it using **keras functions**.
root_path = "../input/butterfly-dataset/leedsbutterfly/images/"
image_paths = sorted(glob(root_path + f"*.png"))
mask_paths = []
for path in image_paths:
mask_path = path.replace("images", "segmentations")
mask_path = mask_path.replace(".png", "_seg0.png")
mask_paths.append(mask_path)
print(f"Total Number of Images : {len(image_paths)}")
# This way, by **replacing the text** from the **path** we can easily get the **exact segmentation mask** for a **particular image**.
images = zeros(shape=(len(image_paths), 256, 256, 3))
masks = zeros(shape=(len(image_paths), 256, 256, 3))
for n, (img_path, mask_path) in tqdm(
enumerate(zip(image_paths, mask_paths)), desc="Loading"
):
images[n] = load_image(img_path)
masks[n] = load_image(mask_path)
# Now, our **images and masks** are loaded. It's time to **visualize them. So that we can gain some insights about the data.
# # **Data Visualization**
def show_image(image, title=None, alpha=1):
plt.imshow(image, alpha=alpha)
plt.title(title)
plt.axis("off")
# The **below function** will **plot the mask** for us, **with various variations** and we can also use it as a **callback**.
def show_mask(GRID, fig_size=(8, 20), model=None, join=False, alpha=0.5):
# Config the GRID
n_rows, n_cols = GRID
n_images = n_rows * n_cols
n = 1
plt.figure(figsize=fig_size)
for i in range(1, n_images + 1):
if model is None:
if join:
# Seect a Random Image and mask
id = random.randint(len(images))
image, mask = images[id], masks[id]
# plot the Mask over the Image
plt.subplot(n_rows, n_cols, i)
show_image(image)
show_image(mask, alpha=alpha)
else:
if i % 2 == 0:
plt.subplot(n_rows, n_cols, i)
show_image(mask)
else:
# Seect a Random Image and mask
id = random.randint(len(images))
image, mask = images[id], masks[id]
# Plot Image
plt.subplot(n_rows, n_cols, i)
show_image(image)
else:
if join:
if i % 2 == 0:
# plot the Mask over the Image
plt.subplot(n_rows, n_cols, i)
show_image(image)
show_image(pred_mask, alpha=alpha, title="Predicted Mask")
else:
# Seect a Random Image and mask
id = random.randint(len(images))
image, mask = images[id], masks[id]
pred_mask = model.predict(tf.expand_dims(image, axis=0))[0]
pred_mask[pred_mask > 0.5] == 1
pred_mask[pred_mask <= 0.5] == 0
# plot the Mask over the Image
plt.subplot(n_rows, n_cols, i)
show_image(image)
show_image(mask, alpha=alpha, title="Original Mask")
else:
if n == 1:
# Seect a Random Image and mask
id = random.randint(len(images))
image, mask = images[id], masks[id]
pred_mask = model.predict(tf.expand_dims(image, axis=0))[0]
pred_mask[pred_mask > 0.5] == 1
pred_mask[pred_mask <= 0.5] == 0
# plot the Mask over the Image
plt.subplot(n_rows, n_cols, i)
show_image(image, title="Original Image")
n += 1
elif n == 2:
# plot the Mask over the Image
plt.subplot(n_rows, n_cols, i)
show_image(mask, title="Original Mask")
n += 1
elif n == 3:
# plot the Mask over the Image
plt.subplot(n_rows, n_cols, i)
show_image(pred_mask, title="Predicted Mask")
n = 1
plt.show()
GRID = [5, 4]
show_mask(GRID, fig_size=(15, 20))
# This can be a **tough task** for the model because the **image background** contains **a lot of objects**. Thus, using an **Attention UNet** would be a good idea.
GRID = [5, 4]
show_mask(GRID, fig_size=(15, 20), join=True)
# Plotting the mask over the image gives us a **better visualization**.
# # **Attention UNet - Encoder**
# * The **main task** of the **Encoder** is to **downsample the images** by a **factor of 2**, but at the same time **learn the features** present in the image.
# * The idea behind encoder is that it will gradually learn all the **useful features** and preserve them in a **latent representation**, which is present in the **last encoding layer**.
# * A **small amount of dropout** is also added between the **convolutional layers** in the encoder so that each **layer is forced to learn the most useful features**.
class Encoder(Layer):
def __init__(self, filters, rate, pooling=True, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.filters = filters
self.rate = rate
self.pooling = pooling
self.c1 = Conv2D(
filters,
kernel_size=3,
strides=1,
padding="same",
kernel_initializer="he_normal",
activation="relu",
)
self.drop = Dropout(rate)
self.c2 = Conv2D(
filters,
kernel_size=3,
strides=1,
padding="same",
kernel_initializer="he_normal",
activation="relu",
)
self.pool = MaxPool2D()
def call(self, X):
x = self.c2(self.drop(self.c1(X)))
if self.pooling:
y = self.pool(x)
return y, x
else:
return x
def get_config(self):
base_config = super().get_config()
return {
**base_config,
"filters": self.filters,
"rate": self.rate,
"pooling": self.pooling,
}
# class Encoder(nn.Module):
# def __init__(self, filters, rate, pooling=True):
# super(Encoder, self).__init__()
# self.filters = filters
# self.rate = rate
# self.pooling = pooling
# self.c1 = nn.Conv2d(3, filters, kernel_size=3, stride=1, padding=1)
# self.drop = nn.Dropout(rate)
# self.c2 = nn.Conv2d(filters, filters, kernel_size=3, stride=1, padding=1)
# self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
# def call(self, x):
# x = F.relu(self.c1(x))
# x = self.drop(x)
# x = F.relu(self.c2(x))
# if self.pooling:
# y = self.pool(x)
# return y, x
# else:
# return x
# def get_config(self):
# return {
# "filters":self.filters,
# "rate":self.rate,
# "pooling":self.pooling,
# }
# # **Attention UNet - Decoder**
# * The **decoder** is just the **opposite** of the **encoder** in terms of **functioning** because it **Upsamples** the **input images** or the **input feature maps** by a **factor of 2**.
# * The input to **the decoder** are the **latent representations** learned by the encoder. This means the **decoder** has access only to the **most useful features** and it **tries to replicate the segmentation mask** from these features.
# * One **major reason** behind the **success of UNet architecture** are the **skip connections** from the **encoder to the decoder layer**. This allowed the **decoder to learn** the **spatial information** present in the original image.|
class Decoder(Layer):
def __init__(self, filters, rate, **kwargs):
super(Decoder, self).__init__(**kwargs)
self.filters = filters
self.rate = rate
self.cT = Conv2DTranspose(
filters,
kernel_size=3,
strides=2,
padding="same",
kernel_initializer="he_normal",
activation="relu",
)
self.net = Encoder(filters, rate, pooling=False)
def call(self, X):
x, skip_x = X
x = self.cT(x)
c = concatenate([x, skip_x])
f = self.net(c)
return f
def get_config(self):
base_config = super().get_config()
return {**base_config, "filters": self.filters, "rate": self.rate}
# class Decoder(nn.Module):
# def __init__(self, filters, rate):
# super(Decoder, self).__init__()
# self.filters = filters
# self.rate = rate
# self.cT = nn.ConvTranspose2d(filters, filters, kernel_size=3, stride=2, padding=1, output_padding=1)
# self.net = Encoder(filters, rate, pooling=False)
# def call(self, x):
# x = self.c2(self.drop(self.c1(x)))
# if self.pooling:
# y = self.pool(x)
# return y, x
# else:
# return x
# def get_config(self):
# return {
# "filters": self.filters,
# "rate": self.rate
# }
# # **Attention UNet - Attention Gate**
# The **idea behind the attention gate** is to add a **particular gate or a layer** between the **skip connections** so that the **skip connections can be refined** and only the **most important spatial information is fed to the decoder**.
class AttentionGate(Layer):
def __init__(self, filters, **kwargs):
super(AttentionGate, self).__init__(**kwargs)
self.filters = filters
self.normal = Conv2D(
filters,
kernel_size=3,
strides=1,
padding="same",
kernel_initializer="he_normal",
activation="relu",
)
self.down = Conv2D(
filters,
kernel_size=3,
strides=2,
padding="same",
kernel_initializer="he_normal",
activation="relu",
)
self.learn = Conv2D(1, kernel_size=1, strides=1, activation="sigmoid")
self.resample = UpSampling2D()
def call(self, X):
x, skip_x = X
x = self.normal(x)
skip = self.down(skip_x)
a = add([x, skip])
l = self.learn(a)
l = self.resample(l)
f = multiply([l, skip_x])
return f
def get_config(self):
base_config = super().get_config()
return {**base_config, "filters": self.filters}
# class AttentionGate(nn.Module):
# def __init__(self, filters):
# super(AttentionGate, self).__init__()
# self.filters = filters
# self.normal = nn.Conv2d(filters, filters, kernel_size=3, stride=1, padding=1)
# self.down = nn.Conv2d(filters, filters, kernel_size=3, stride=2, padding=1)
# self.learn = nn.Conv2d(filters, 1, kernel_size=1, stride=1)
# self.resample = nn.Upsample(scale_factor=2, mode='nearest')
# def call(self, X):
# x, skip_x = X
# x = self.normal(x)
# skip = self.down(skip_x)
# a = x + skip
# l = torch.sigmoid(self.learn(a))
# l = self.resample(l)
# f = l * skip_x
# return f
# def get_config(self):
# return {"filters": self.filters}
# # **Attention UNet**
# So the **Encoder, Decoder and the Attention Gate** is ready. It's time to combine all of them in complete our **Attention Unet** architecture.
# Inputs
image_input = Input(shape=(256, 256, 3), name="InputImage")
# Encoder Phase
p1, c1 = Encoder(32, 0.1, name="EncoderBlock1")(image_input)
p2, c2 = Encoder(64, 0.1, name="EncoderBlock2")(p1)
p3, c3 = Encoder(128, 0.2, name="EncoderBlock3")(p2)
p4, c4 = Encoder(256, 0.2, name="EncoderBlock4")(p3)
# Latent Representation
encoding = Encoder(512, 0.3, pooling=False, name="Encoding")(p4)
# Deocder + Attention Phase
a1 = AttentionGate(256, name="Attention1")([encoding, c4])
d1 = Decoder(256, 0.2, name="DecoderBlock1")([encoding, a1])
a2 = AttentionGate(128, name="Attention2")([d1, c3])
d2 = Decoder(128, 0.2, name="DecoderBlock2")([d1, a2])
a3 = AttentionGate(64, name="Attention3")([d2, c2])
d3 = Decoder(64, 0.2, name="DecoderBlock3")([d2, a3])
a4 = AttentionGate(32, name="Attention4")([d3, c1])
d4 = Decoder(32, 0.1, name="DecoderBlock4")([d3, a4])
# Output Layer
mask_out = Conv2D(
3, kernel_size=1, strides=1, activation="sigmoid", padding="same", name="MaskOut"
)(d4)
# Model
att_unet = Model(inputs=[image_input], outputs=[mask_out], name="AttentionUNet")
# Compile
att_unet.compile(loss="binary_crossentropy", optimizer="adam")
# # **Attention UNet - Visualization**
# plot_model(att_unet, "AttentionUNet.png", show_shapes=True)
# # **Custom Callback**
# It will be a **good idea to visualize models performance after each epoch.**
class ShowProgress(Callback):
def on_epoch_end(self, epoch, logs=None):
show_mask(GRID=[1, 1], model=self.model, join=False, fig_size=(20, 8))
self.model.save("AttentionUnet.h5")
# # **Training**
# **Training Attention UNet** is simple. Just train it like we train other models.
att_unet.fit(images, masks, validation_split=0.1, epochs=20, callbacks=[ShowProgress()])
# # **Evaluation**
att_unet = load_model(
"../input/attention-unet-butterfly-segmentation/AttentionUnet.h5",
custom_objects={
"Encoder": Encoder,
"Decoder": Decoder,
"AttentionGate": AttentionGate,
},
)
show_mask(GRID=[10, 6], model=att_unet, join=False, fig_size=(20, 30))
show_mask(GRID=[10, 6], model=att_unet, join=True, fig_size=(20, 30), alpha=0.8)
|
# # Using a Pretrained VGG16 to classify retinal damage from OCT Scans
# This notebook is inspired by these pages:
# - [VGG16 Transfer Learning \- Pytorch \| Kaggle](https://www.kaggle.com/code/carloalbertobarbano/vgg16-transfer-learning-pytorch)
# - [IgorSusmelj/pytorch\-styleguide: An unofficial styleguide and best practices summary for PyTorch](https://github.com/IgorSusmelj/pytorch-styleguide)
# ## Import
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import numpy as np
from tqdm import tqdm
import torchvision
from torchvision import datasets, models, transforms
from torchinfo import summary
import matplotlib.pyplot as plt
import time
import os
import copy
from pathlib import Path
plt.ion()
use_gpu = torch.cuda.is_available()
if use_gpu:
print("Using CUDA")
# - `torch.backends.cudnn.benchmark = True` enables cudnn auto-tuner to find the best algorithm to use for your hardware configuration
# - `np.random.seed(1)` sets the seed of the NumPy pseudo-random number generator to 1, which allows to repdoduce the same sequences of random numbers across runs.
# - `torch.manual_seed(1)` sets the seed for generating random tensors in the CPU by PyTorch.
# - `torch.cuda.manual_seed(1)` sets the seed for generating random tensors in the GPU by PyTorch.
# set flags / seeds
torch.backends.cudnn.benchmark = True
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
# ## Prepare Dataset and Dataloader
# ### Setting data directory
# - `data_dir` holds a path to the data directory.
# - `TRAIN`, `VAL` and `TEST` hold string values for three different subsets of data.
data_dir = "../input/kermany2018/oct2017/OCT2017 "
TRAIN = "train"
VAL = "val"
TEST = "test"
# ### Setting transforms
# For train:
# - augmenting by randomly cropping and flipping the images to improve the model's ability to generalize to new data
# - converting images to pytorch tensors
# For val and test:
# - resizing the images to 224x224 pixels to fit the VGG input size
# - converting images to pytorch tensors
# VGG-16 Takes 224x224 images as input, so we resize all of them
data_transforms = {
TRAIN: transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
),
VAL: transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
]
),
TEST: transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
]
),
}
# ### Setting datasets and dataloaders
# - datasets
# - `datasets.ImageFolder()` expects a directory containing subdirectories of images (one subdirectory per class), and creates a dataset where each image is paired with its corresponding label.
# - `transform` argument is passed to specify image transformations to be applied defined above.
# - The three datasets (TRAIN, VAL and TEST) are stored in a dictionary.
# - dataloaders
# - For each dataset, we creates a data loader by passing the corresponding `Dataset` object.
# - `shuffle` parameter is set to `True` to shuffle the elements in each batch.
# - `num_workers` is set to 2 to use 2 subprocesses to load in the background.
# - The three dataloaders (TRAIN, VAL and TEST) are stored in a dictionary.
# - Finally we store the sizes of the three datasets in a dictionary `dataset_sizes`.
image_datasets = {
x: datasets.ImageFolder(os.path.join(data_dir, x), transform=data_transforms[x])
for x in [TRAIN, VAL, TEST]
}
dataloaders = {
x: torch.utils.data.DataLoader(
image_datasets[x], batch_size=32, shuffle=True, num_workers=2
)
for x in [TRAIN, VAL, TEST]
}
dataset_sizes = {x: len(image_datasets[x]) for x in [TRAIN, VAL, TEST]}
for x in [TRAIN, VAL, TEST]:
print("Loaded {} images under {}".format(dataset_sizes[x], x))
print("Classes: ")
class_names = image_datasets[TRAIN].classes
print(image_datasets[TRAIN].classes)
# ## Utils
def imshow(inp, title=None):
inp = inp.numpy().transpose((1, 2, 0))
# plt.figure(figsize=(10, 10))
plt.axis("off")
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001)
def show_databatch(inputs, classes):
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
# Get a batch of training data
inputs, classes = next(iter(dataloaders[TRAIN]))
show_databatch(inputs, classes)
def visualize_model(model, data_loader, num_images=6):
was_training = model.training
# Set model for evaluation
model.eval()
images_so_far = 0
with torch.no_grad():
for i, data in enumerate(data_loader):
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
inputs, labels = data
inputs_size = inputs.size(0)
inputs = inputs.to(device)
labels = labels.to(device)
model.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
predicted_labels = [preds[j] for j in range(inputs_size)]
print("Ground truth:")
show_databatch(inputs.detach().cpu(), labels.detach().cpu())
print("Prediction:")
show_databatch(inputs.detach().cpu(), predicted_labels)
del inputs, labels, outputs, preds, predicted_labels
torch.cuda.empty_cache()
images_so_far += inputs_size
if images_so_far >= num_images:
break
model.train(mode=was_training) # Revert model back to original training state
# This helper function will give us the accuracy of our model on the test set.
def eval_model(model, data_loader, criterion, max_iters=None):
since = time.time()
avg_loss = 0
avg_acc = 0
loss_test = 0
acc_test = 0
total_size = 0
test_batches = len(data_loader)
print("Evaluating model")
print("-" * 10)
model.eval()
with torch.no_grad():
for i, data in enumerate(data_loader):
if max_iters is None:
pass
elif i > max_iters:
break
if i % 5 == 0:
message = (
f"\rTest batch {i}/{test_batches}"
+ " |"
+ f"Avg loss (test): {avg_loss:.4f}"
+ " |"
+ f"Avg acc (test): {avg_acc:.4f}"
)
print(message, end="", flush=True)
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
inputs, labels = data
inputs_size = inputs.size(0)
inputs = inputs.to(device)
labels = labels.to(device)
model.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
loss_test += loss.item()
acc_test += torch.sum(preds == labels)
total_size += inputs_size
avg_loss = loss_test / total_size
avg_acc = acc_test / total_size
del inputs, labels, outputs, preds
torch.cuda.empty_cache()
avg_loss = loss_test / total_size
avg_acc = acc_test / total_size
elapsed_time = time.time() - since
print()
print("=" * 10)
print(
"Evaluation completed in {:.0f}m {:.0f}s".format(
elapsed_time // 60, elapsed_time % 60
)
)
print("Avg loss (test): {:.4f}".format(avg_loss))
print("Avg acc (test): {:.4f}".format(avg_acc))
print("-" * 10)
def model_summary(model):
batch_size = 2
return summary(
model,
input_size=(batch_size, 3, 224, 224),
col_names=[
"output_size",
"params_percent",
"trainable",
],
)
# ## Model creation
class VGG16TL(nn.Module):
def __init__(self, n_classes=4) -> None:
super().__init__()
self.n_classes = n_classes
self.vgg = self._setup_vgg()
def forward(self, x):
x = self.vgg(x)
return x
def _setup_vgg(self):
# load pretrained model
vgg = models.vgg16_bn(
weights="DEFAULT",
)
# freeze weights of feature extractor
for param in vgg.features.parameters():
param.requires_grad = False
# replace last layer
in_features = vgg.classifier[-1].in_features
vgg.classifier[-1] = nn.Linear(in_features, self.n_classes)
return vgg
class ResNet50TL(nn.Module):
def __init__(self, n_classes=4) -> None:
super().__init__()
self.n_classes = n_classes
self.resnet = self._setup_resnet()
def forward(self, x):
x = self.resnet(x)
return x
def _setup_resnet(self):
# load pretrained model
resnet = models.resnet50(
weights="DEFAULT",
)
# freeze weights of feature extractor
for param in resnet.parameters():
param.requires_grad = False
# replace last layer
in_features = resnet.fc.in_features
resnet.fc = nn.Sequential(
nn.Linear(in_features, 128),
nn.ReLU(inplace=True),
nn.Dropout(inplace=True),
nn.Linear(128, self.n_classes),
)
return resnet
class EfficientNetTL(nn.Module):
def __init__(self, n_classes=4) -> None:
super().__init__()
self.n_classes = n_classes
self.efficientnet = self._setup_efficientnet()
def forward(self, x):
x = self.efficientnet(x)
return x
def _setup_efficientnet(self):
# load pretrained model
efficientnet = models.efficientnet_v2_s(
weights="DEFAULT",
)
# freeze weights of feature extractor
for param in efficientnet.parameters():
param.requires_grad = False
# replace last layer
in_features = efficientnet.classifier[-1].in_features
efficientnet.classifier[-1] = nn.Linear(in_features, self.n_classes)
return efficientnet
# ### Model Summaries
vgg16 = VGG16TL()
model_summary(vgg16)
resnet50 = ResNet50TL()
model_summary(resnet50)
efficientnet = EfficientNetTL()
model_summary(efficientnet)
# ## Training
class Trainer:
def __init__(
self,
model,
train_data_loader,
val_data_loader,
criterion,
optimizer,
num_epochs: int = 10,
resume: bool = False,
path_to_checkpoint=None,
checkpoint_dir=None,
):
self.model = model
self.train_data_loader = train_data_loader
self.val_data_loader = val_data_loader
self.criterion = criterion
self.optimizer = optimizer
self.num_epochs = num_epochs
self.resume = resume
self.path_to_checkpoint = path_to_checkpoint
self.n_iter = 0
self.start_epoch = 0
self.epoch = 0
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.checkpoint_dir = checkpoint_dir
self.train_losses = []
self.train_acc = []
if self.resume:
ckpt = self.load_checkpoint(self.path_to_checkpoint)
self.model.load_state_dict(ckpt["net"])
self.start_epoch = ckpt["epoch"]
self.n_iter = ckpt["n_iter"]
self.optimizer.load_state_dict(ckpt["optim"])
print("Last checkpoint restored")
self.model.to(self.device)
def train_step(self):
self.model.train()
correct = 0
total = 0
# use prefetch_generator and tqdm for iterating through data
pbar = tqdm(
enumerate(self.train_data_loader), total=len(self.train_data_loader)
)
start_time = time.time()
# for loop going through dataset
for i, data in pbar:
# data preparation
img, label = data
img = img.to(self.device)
label = label.to(self.device)
# It's very good practice to keep track of preparation time and computation time using tqdm to find any issues in your dataloader
prepare_time = start_time - time.time()
# forward and backward pass
out = self.model(img)
loss = self.criterion(out, label)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
_, predicted = torch.max(out.detach(), 1)
total += label.size(0)
correct += (predicted == label).sum().item()
accuracy = 100 * correct / total
# compute computation time and *compute_efficiency*
# If compute_efficiency is nearly 1, prepare_time is negligible, that's good.
process_time = start_time - time.time() - prepare_time
compute_efficiency = process_time / (process_time + prepare_time)
pbar.set_description(
f"Compute efficiency: {compute_efficiency:.2f}, "
f"loss: {loss.item():.2f}, acc: {accuracy:.2f}, epoch: {self.epoch}/{self.num_epochs}"
)
start_time = time.time()
self.train_losses.append(loss.item())
self.train_acc.append(accuracy)
self.n_iter += 1
def validation_step(self):
# bring model to evaluation mode
self.model.eval()
correct = 0
total = 0
pbar = tqdm(
enumerate(self.val_data_loader),
total=len(self.val_data_loader),
)
with torch.no_grad():
for i, data in pbar:
# data preparation
img, label = data
img = img.to(self.device)
label = label.to(self.device)
out = self.model(img)
_, predicted = torch.max(out.detach(), 1)
total += label.size(0)
correct += (predicted == label).sum().item()
accuracy = 100 * correct / total
print(f"Accuracy on validation set: {accuracy:.2f}")
def load_checkpoint(self, path_to_checkpoint):
ckpt = torch.load(path_to_checkpoint)
return ckpt
def save_checkpoint(self):
ckpt = {
"net": self.model.state_dict(),
"epoch": self.epoch,
"n_iter": self.n_iter,
"optim": self.optimizer.state_dict(),
}
ckpt_file_name = f"ckpt.pt"
ckpt_path = os.path.join(self.checkpoint_dir, ckpt_file_name)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
torch.save(ckpt, ckpt_path)
print(f"checkpoint is saved at {ckpt_path}!")
def plot_train_progress(self):
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(self.train_losses, label="train_loss", color="tab:blue")
ax2.plot(self.train_acc, label="train_acc", color="tab:orange")
h1, l1 = ax1.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
ax1.legend(h1 + h2, l1 + l2, loc="lower right")
fig.show()
def train(self):
for epoch in range(self.start_epoch, self.num_epochs):
# train step
self.train_step()
# validation step
if epoch % 1 == 0:
self.validation_step()
self.epoch += 1
# save checkpoint
self.save_checkpoint()
# ### VGG16
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(vgg16.parameters(), lr=0.001, momentum=0.9)
trainer_vgg = Trainer(
model=vgg16,
train_data_loader=dataloaders[TRAIN],
val_data_loader=dataloaders[VAL],
criterion=criterion,
optimizer=optimizer,
num_epochs=1,
resume=False,
checkpoint_dir="/kaggle/working/ckpt/vgg/",
)
start_time = time.time()
trainer_vgg.train()
train_time = time.time() - start_time
trainer_vgg.plot_train_progress()
print("Train time:\t", train_time, "[sec]")
vgg16 = trainer_vgg.model
# ### ResNet50
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(resnet50.parameters(), lr=0.001, momentum=0.9)
trainer_resnet = Trainer(
model=resnet50,
train_data_loader=dataloaders[TRAIN],
val_data_loader=dataloaders[VAL],
criterion=criterion,
optimizer=optimizer,
num_epochs=1,
resume=False,
checkpoint_dir="/kaggle/working/ckpt/resnet/",
)
start_time = time.time()
trainer_resnet.train()
train_time = time.time() - start_time
trainer_resnet.plot_train_progress()
print("Train time:\t", train_time, "[sec]")
resnet50 = trainer_resnet.model
# ### EfficientNet
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(efficientnet.parameters(), lr=0.001, momentum=0.9)
trainer_efficientnet = Trainer(
model=efficientnet,
train_data_loader=dataloaders[TRAIN],
val_data_loader=dataloaders[VAL],
criterion=criterion,
optimizer=optimizer,
num_epochs=1,
resume=False,
checkpoint_dir="/kaggle/working/ckpt/efficientnet/",
)
start_time = time.time()
trainer_efficientnet.train()
train_time = time.time() - start_time
trainer_efficientnet.plot_train_progress()
print("Train time:\t", train_time, "[sec]")
efficientnet = trainer_efficientnet.model
# ## Model evaluation and visualization (after training)
# Let's evaluate our model again after 2 epochs of training
eval_model(vgg16, dataloaders[TEST], criterion)
eval_model(resnet50, dataloaders[TEST], criterion)
eval_model(efficientnet, dataloaders[TEST], criterion)
|
# ### Sources
# * https://www.kaggle.com/code/startupsci/titanic-data-science-solutions
# * https://www.kaggle.com/code/arthurtok/introduction-to-ensembling-stacking-in-python
# * https://www.kaggle.com/code/alexisbcook/titanic-tutorial
# # Define Steps
# The 7 steps of a kaggle data competition are;
# 1. Question or problem definition.
# 2. Acquire training and testing data.
# 3. Wrangle, prepare, cleanse the data.
# 4. Analyze, identify patterns, and explore the data.
# 5. Model, predict and solve the problem.
# 6. Visualize, report, and present the problem solving steps and final solution.
# 7. Supply or submit the results.
# Luckily, the first four steps are fairly easy and straight forward.
# ## 1. Problem Definition
# Let's define the question or problem for this data set. From the Kaggle competition-
# "Knowing from a training set of samples listing passengers who survived or did not survive the Titanic disaster, can our model determine based on a given test dataset not containing the survival information, if these passengers in the test dataset survived or not."
# Here are some addititional facts about the Titanic incident.
# * Sailed on April 15, 1912.
# * Sank after colliding with an iceberg.
# * Killed 1502 out of the 2224 passengers and crew. 32% survival rate.
# * A large contributor to the loss of life was the less than optimal number of life boats.
# * While some luck did play a part in the survival of a passenger, there were other attributes as well that signaled a greater chance of survival.
# ## 2. Acquire Data
import pandas as pd
train_df = pd.read_csv("../input/titanic-data/train.csv")
test_df = pd.read_csv("../input/titanic-data/test.csv")
# ## 3. Wrangle, prepare, cleanse the data
# Let's take a look at the data columns we're working.
train_df.head()
# From the view above, we are able to gather a few information bits.
# * PassengerID - numeric, unique ID.
# * Survived - numeric, 0 for dead, 1 for survived.
# * Pclass - numeric, 1-3 for the ticket class of that passenger.
# * Name - string, unique to the passenger (could have duplicates in rare circumstances).
# * Sex - string, male or female.
# * Age - numeric, age of passenger in whole number.
# * SibSp - numeric, number of passengers and sibilings who were also onboard.
# * Parch - numeric, number of parents and children who were also onboard.
# * Ticket - string, the ticket number (could be unique, unsure).
# * Fare - numeric, the amount of money paid for the ticket.
# * Cabin - string, the cabin number of the passenger.
# * Embarked - string, C = Cherbourg, Q = Queenstown, S = Southampton.
print(train_df.info())
print(test_df.info())
# Our training data contains null values in the age, cabin, and embarked columns while the testing data contains nulls in age, fare, and cabin columns.
print(train_df.describe())
print(train_df.describe(include=["O"]))
# The above info let's us know that-
# * The training dataset has a 38% survival rate (compared to the 32% actual survival rate).
# * Average passenger age was 29.
# * Average SibSp count was .5 and average Parch count was .4.
# * Names are unique.
# * Only 'male' and 'female' in training data set. Males make up 65% of the data.
# ## 4. Data Exploration
# "This is where the fun begins" - Anakin Skywalker. Lets bring in some plotting libraries to help us.
# Lets bring in some plotting libraries to help us.
import seaborn as sns
import matplotlib.pyplot as plt
# Now let's do some transformation and feature mapping to help with visualizations.
drop_elements = ["PassengerId", "Name", "Ticket", "Cabin", "Embarked"]
train_viz = train_df.drop(drop_elements, axis=1)
train_viz["Sex"] = train_df["Sex"].map({"female": 0, "male": 1}).astype(int)
# If you noticed, we dropped a few columns (PassengerID, Name, Ticket number, and Cabin number) that logically would not make sense to visualize.
# We also mapped the sexes, 0 to female and 1 to male.
colormap = plt.cm.RdBu
plt.figure(figsize=(14, 12))
plt.title("Pearson Correlation of Features", y=1.05, size=15)
sns.heatmap(
train_viz.astype(float).corr(),
linewidths=0.1,
vmax=1.0,
square=True,
cmap=colormap,
linecolor="white",
annot=True,
)
# Here is a correlation plot using some code from ANISOTROPIC's notebook.
# Looking at the Survived column, we see a strong correlation between sex and class for survival rate. A female passenger is shown to correlate more with surviving, and as a passengers class increases (1 being first class, 2 being second class, etc.), their change of surviving decreases.
# Other notes, a persons fare is strongly correlated with their class (makes sense), as well as age being correlated with class. Age and sibling/ spouse count were also slightly correlated. Parent and child counts were also strongly correlated with spouse and sibling counts.
# Of note, age currently shows no correlation with survival rate, though we will continue our analysis to see if this changes.
import math
age_hold = [age for age in list(set(train_viz["Age"])) if not (math.isnan(age))]
age_hold.sort()
survive_rate = []
for age in age_hold:
survive_rate.append(
len(train_viz.loc[(train_viz["Age"] == age) & (train_viz["Survived"] == 1)])
/ len(train_viz.loc[train_viz["Age"] == age])
)
# The above code is going to help us visualize the survival rate associated with age. We drop all NA values, sort our list, then calculate the survive rate for each discrete age.
d = {"Age": age_hold, "Survival_Rate": survive_rate}
df = pd.DataFrame(data=d, dtype=float)
sns.barplot(data=df, x="Age", y="Survival_Rate", color="steelblue")
plt.xticks([10, 20, 30, 40, 50, 60, 70, 80])
plt.show()
|
# Import files
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
# Get files
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Read files
df = pd.read_csv(
"/kaggle/input/in-class-competition-data-clustering-2023/Data4cluster.csv"
)
submission = pd.read_csv(
"/kaggle/input/in-class-competition-data-clustering-2023/SubData.csv"
)
df
submission
# Analyse df
df.describe()
df.info()
df.isna().sum()
corr = df.corr()
sns.heatmap(corr)
corr
# Prepare data
df = df.drop("ID", axis=1)
df
X = df
# Clustering
from sklearn.cluster import KMeans
model = KMeans(n_clusters=3, random_state=42)
model.fit(X)
prediction = model.labels_
prediction
cluster_centres = model.cluster_centers_
cluster_centres
unique, counts = np.unique(prediction, return_counts=True)
dict(zip(unique, counts))
plt.scatter(X.iloc[:, 0], X.iloc[:, 1], c=prediction, s=50, cmap="viridis")
centers = cluster_centres
plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, alpha=0.5)
# Prepare submission
submission["Expected"] = prediction
submission.to_csv("submission.csv", index=False)
submission = pd.read_csv("submission.csv")
submission
|
import pandas as pd
import re
import string
from wordcloud import WordCloud
from collections import Counter
import numpy as np
import warnings
warnings.filterwarnings("ignore")
import nltk
from nltk import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
import pandas as pd
df = pd.read_csv("/kaggle/input/korean-preprocessed/cleaned_Mental_Health.csv")
df.sample()
df = df.dropna(how="any")
indexmentalhealth = df[(df["Subreddit"] == "mentalhealth")].index
df.drop(indexmentalhealth, inplace=True)
df["Subreddit"].value_counts()
def remove_url(text):
re_url = re.compile("https?://\S+|www\.\S+")
return re_url.sub("", str(text))
def remove_stopwords(text):
new_list = []
words = word_tokenize(text)
stopwrds = stopwords.words("english")
for word in words:
if word not in stopwrds:
new_list.append(word)
return " ".join(new_list)
def remove_newline(text):
return text.replace("\n", " ").replace("\r", "")
def remove_emojis(text):
emoj = re.compile(
"["
"\U0001F600-\U0001F64F" # emoticons
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U00002500-\U00002BEF" # chinese char
"\U00002702-\U000027B0"
"\U00002702-\U000027B0"
"\U000024C2-\U0001F251"
"\U0001f926-\U0001f937"
"\U00010000-\U0010ffff"
"\u2640-\u2642"
"\u2600-\u2B55"
"\u200d"
"\u23cf"
"\u23e9"
"\u231a"
"\ufe0f" # dingbats
"\u3030"
"]+",
re.UNICODE,
)
return re.sub(emoj, "", text)
def convert_lowercase(text):
text = text.lower()
return str(text)
# Removes HTML syntaxes
def remove_html(data):
html_tag = re.compile(r"<.*?>")
data = html_tag.sub(r"", data)
return data
def remove_whitespaces(text):
text = re.sub(r"[^\w\s\']", " ", text)
text = re.sub(" +", " ", text)
return text
def remove_brackets(text):
text = re.sub(r"\[|\]|\(|\)|\{|\}|\<|\>", "", text)
return text
import re
# Define the abbreviations dictionary
abbr_dict = {
"what's": "what is",
"what're": "what are",
"who's": "who is",
"who're": "who are",
"where's": "where is",
"where're": "where are",
"when's": "when is",
"when're": "when are",
"how's": "how is",
"how're": "how are",
"i'm": "i am",
"we're": "we are",
"you're": "you are",
"they're": "they are",
"it's": "it is",
"he's": "he is",
"she's": "she is",
"that's": "that is",
"there's": "there is",
"there're": "there are",
"i've": "i have",
"we've": "we have",
"you've": "you have",
"they've": "they have",
"who've": "who have",
"would've": "would have",
"not've": "not have",
"i'll": "i will",
"we'll": "we will",
"you'll": "you will",
"he'll": "he will",
"she'll": "she will",
"it'll": "it will",
"they'll": "they will",
"isn't": "is not",
"wasn't": "was not",
"aren't": "are not",
"weren't": "were not",
"can't": "can not",
"couldn't": "could not",
"don't": "do not",
"didn't": "did not",
"shouldn't": "should not",
"wouldn't": "would not",
"doesn't": "does not",
"haven't": "have not",
"hasn't": "has not",
"hadn't": "had not",
"won't": "will not",
"gotta": "got to",
"wanna": "want to",
"imma": "i am going to",
"lemme": "let me",
"let's": "let us",
"here's": "here is",
"y'all": "you all",
"gimme": "give me",
"ain't": "am not",
"aint": "am not",
}
# Define the function to replace the abbreviations
def replace_abbreviations(text):
text = re.sub("’", "'", text) # Replace '’' with '\'
for word in text.split():
if word.lower() in abbr_dict:
text = re.sub(
r"\b{}\b".format(word),
abbr_dict[word.lower()],
text,
flags=re.IGNORECASE,
)
return text
import texthero as hero
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
df["Subreddit"] = label_encoder.fit_transform(df["Subreddit"])
labels = list(label_encoder.classes_)
df.sample()
df["Sentence"][0]
from sklearn.model_selection import train_test_split
X = df["Sentence"].values
y = df["Subreddit"].values
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42, stratify=df[["Subreddit"]]
)
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight(
class_weight="balanced", classes=np.unique(y_train), y=y_train
)
class_weights = dict(zip(np.unique(y_train), class_weights))
class_weights
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import cross_val_score
clf = Pipeline([("vectorizer_tfidf", TfidfVectorizer()), ("LR", LogisticRegression())])
clf.get_params().keys()
from sklearn.model_selection import StratifiedKFold, cross_val_score, GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
import joblib
params = [
{
"LR__solver": ["saga"],
"LR__penalty": ["elasticnet", "l1", "l2", "none"],
"LR__max_iter": [50, 100, 200, 500, 1000, 2500],
"LR__C": [0.001, 0.01, 0.1, 1, 10, 100, 1000],
"LR__multi_class": ["auto", "ovr", "multinomial"],
"LR__class_weight": [class_weights],
},
{
"LR__solver": ["newton-cg", "lbfgs"],
"LR__penalty": ["l2", "none"],
"LR__max_iter": [50, 100, 200, 500, 1000, 2500],
"LR__C": [0.001, 0.01, 0.1, 1, 10, 100, 1000],
"LR__multi_class": ["auto", "ovr", "multinomial"],
"LR__class_weight": [class_weights],
},
]
params2 = [
{
"LR__solver": ["saga"],
"LR__penalty": ["l2"],
"LR__max_iter": [1000],
"LR__C": [0.01, 0.1],
"LR__multi_class": ["auto", "ovr", "multinomial"],
"LR__class_weight": [class_weights],
},
]
cv = StratifiedKFold(n_splits=5)
search = RandomizedSearchCV(
clf,
scoring="balanced_accuracy",
cv=cv,
n_iter=100,
param_distributions=params2,
refit=True,
n_jobs=-1,
verbose=2,
)
search.fit(X_train, y_train)
clf = search.best_estimator_
print("Best parameters: ", search.best_params_)
print("Best score: ", search.best_score_)
joblib.dump(clf, "LR_best_model.pkl")
y_pred = clf.predict(X_test)
print("y_pred:", y_pred)
print("y_test:", y_test)
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred, target_names=labels))
print(search.cv_results_)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Read dataset
stk = pd.read_csv("/kaggle/input/reliance-stock-price-dataset/reliance_data.csv")
stk.head()
stk.shape
stk.columns = stk.columns.str.lower().str.replace("%", "")
stk.columns = stk.columns.str.replace(" ", "_")
stk.info()
# check missing values
print(stk.isnull().sum())
print(stk.shape)
# Exploratory Data
stk.describe()
import seaborn as sns
from matplotlib import pyplot as plt
# check the correlation between the variables
corr = stk.corr()
plt.figure(figsize=(10, 8))
sns.heatmap(
corr,
vmin=None,
vmax=None,
cmap=None,
center=None,
annot_kws=None,
linewidths=0,
linecolor="black",
cbar=True,
)
# Create a list of the variables
variables = ["prev_close", "open", "high", "low", "close", "volume", "turnover"]
# Create a boxplot for each variable to check the distribution of data
for variable in variables:
sns.boxplot(x=variable, data=stk)
plt.title(variable)
plt.show()
# ### Spliting data into train and test sets
# import model selcection for splitting the data
from sklearn import model_selection
from sklearn.preprocessing import StandardScaler
# split data
train, test = model_selection.train_test_split(stk, test_size=0.2, random_state=42)
# create the independent variable x_train and dependent variable y_train
train_var = train.drop(
[
"date",
"symbol",
"series",
"last",
"trades",
"deliverable_volume",
"deliverble",
"close",
],
axis=1,
)
test_var = test.drop(
[
"date",
"symbol",
"series",
"last",
"trades",
"deliverable_volume",
"deliverble",
"close",
],
axis=1,
)
# Normalize the data using StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(train_var)
X_test = scaler.transform(test_var)
y_train = train.close
y_test = test.close
len(X_train), len(y_train), len(X_test), len(y_test)
# ### Training and testing
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_squared_error
# Fit linear regression to the training dataset
lin = LinearRegression()
lin.fit(X_train, y_train)
# Get the coefficient and intercept of the line
print(lin.coef_)
lin.intercept_
# Predict the test set result of training data
y_pred = lin.predict(X_test)
y_pred
# create dataframe for the prediction
test_predictions = pd.DataFrame(
{
"Date": test["date"],
"Series": test["series"],
"Actual_value": test["close"],
"Predicted_value": y_pred,
},
columns=["Date", "Series", "Actual_value", "Predicted_value"],
)
test_predictions
# Evaluate the model on the testing set
mse = mean_squared_error(y_test, y_pred)
rmse = mean_squared_error(y_test, y_pred, squared=False)
r2 = r2_score(y_test, y_pred)
print("MSE:", mse)
print("RMSE:", rmse)
print("R-squared:", r2)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_data = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv")
test_data.head()
test_data["Age"].mean()
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_absolute_error
from xgboost import XGBClassifier
def get_mae(n_estimators, learning_rate, max_depth, train_X, val_X, train_y, val_y):
model = XGBClassifier(
n_estimators=n_estimators,
learning_rate=learning_rate,
max_depth=max_depth,
random_state=0,
early_stopping_rounds=5,
)
model.fit(train_X, train_y, eval_set=[(val_X, val_y)], verbose=False)
preds_val = model.predict(val_X)
mae = mean_absolute_error(val_y, preds_val)
return mae
for col in train_data.columns:
train_data[col + "_was_missing"] = train_data[col].isnull()
for col in test_data.columns:
test_data[col + "_was_missing"] = test_data[col].isnull()
s = train_data.dtypes == "object"
object_cols = list(s[s].index)
train_data["Age"] = train_data["Age"].fillna(train_data["Age"].mean())
test_data["Age"] = test_data["Age"].fillna(test_data["Age"].mean())
# train_data['Transported'] = train_data['Transported'].replace(True, 1)
# train_data['Transported'] = train_data['Transported'].replace(False, 0)
train_data["Cabin"] = train_data["Cabin"].fillna("F/T")
train_data["Deck"] = train_data["Cabin"].apply(lambda x: x[0])
train_data["Cabin"] = train_data["Cabin"].apply(lambda x: x[-1])
train_data["Cabin"] = train_data["Cabin"].replace("P", 0)
train_data["Cabin"] = train_data["Cabin"].replace("S", 1)
train_data["Cabin"] = train_data["Cabin"].replace("T", 0.5)
test_data["Cabin"] = test_data["Cabin"].fillna("F/T")
test_data["Deck"] = test_data["Cabin"].apply(lambda x: x[0])
test_data["Cabin"] = test_data["Cabin"].apply(lambda x: x[-1])
test_data["Cabin"] = test_data["Cabin"].replace("P", 0)
test_data["Cabin"] = test_data["Cabin"].replace("S", 1)
test_data["Cabin"] = test_data["Cabin"].replace("T", 0.5)
test_data["Destination"] = test_data["Destination"].fillna(
test_data["Destination"].mode()
)
train_data["Destination"] = train_data["Destination"].fillna(
train_data["Destination"].mode()
)
test_data["VIP"] = test_data["VIP"].fillna(False)
train_data["VIP"] = train_data["VIP"].fillna(False)
test_data["VRDeck"] = test_data["VRDeck"].fillna(test_data["VRDeck"].mean())
train_data["VRDeck"] = train_data["VRDeck"].fillna(train_data["VRDeck"].mean())
test_data["Spa"] = test_data["Spa"].fillna(test_data["Spa"].mean())
train_data["Spa"] = train_data["Spa"].fillna(train_data["Spa"].mean())
test_data["ShoppingMall"] = test_data["ShoppingMall"].fillna(
test_data["ShoppingMall"].mean()
)
train_data["ShoppingMall"] = train_data["ShoppingMall"].fillna(
train_data["ShoppingMall"].mean()
)
test_data["FoodCourt"] = test_data["FoodCourt"].fillna(test_data["FoodCourt"].mean())
train_data["FoodCourt"] = train_data["FoodCourt"].fillna(train_data["FoodCourt"].mean())
test_data["RoomService"] = test_data["RoomService"].fillna(
test_data["RoomService"].mean()
)
train_data["RoomService"] = train_data["RoomService"].fillna(
train_data["RoomService"].mean()
)
test_data["HomePlanet"] = test_data["HomePlanet"].fillna(test_data["HomePlanet"].mode())
train_data["HomePlanet"] = train_data["HomePlanet"].fillna(
train_data["HomePlanet"].mode()
)
train_data.head()
parch_dict = {}
for ID in train_data["PassengerId"]:
group_num = ID[:4]
if group_num in parch_dict.keys():
parch_dict[group_num] += 1
else:
parch_dict[group_num] = 1
ind = 0
for ID in train_data["PassengerId"]:
group_num = ID[:4]
train_data.loc[ind, "Parch"] = parch_dict[group_num]
ind += 1
parch_dict = {}
for ID in test_data["PassengerId"]:
group_num = ID[:4]
if group_num in parch_dict.keys():
parch_dict[group_num] += 1
else:
parch_dict[group_num] = 1
ind = 0
for ID in test_data["PassengerId"]:
group_num = ID[:4]
test_data.loc[ind, "Parch"] = parch_dict[group_num]
ind += 1
s = train_data.dtypes == "object"
object_cols = object_cols + list(s[s].index)
# test_data['FoodCourt'] = test_data['FoodCourt'].apply(lambda x: x = 0 for x < 19000 )
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
features = [
"Deck",
"VRDeck",
"RoomService",
"FoodCourt",
"ShoppingMall",
"Spa",
"CryoSleep",
"Cabin",
"Age",
"Parch",
"VIP",
]
X = pd.get_dummies(train_data[features])
y = train_data["Transported"]
train_X, val_X, train_y, val_y = train_test_split(
X, y, train_size=0.8, test_size=0.2, random_state=13
)
train_y = train_y.replace(True, 1)
train_y = train_y.replace(False, 0)
test_n = [100, 300, 500]
test_rate = [0.01]
test_depth = [4, 5, 6]
small = 1
best_n = 5
best_depth = 5
best_rate = 0
print(object_cols)
n_estimators = 300
max_depth = 6
learn_rate = 0.02
from sklearn.model_selection import cross_val_score
y = train_data["Transported"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = XGBClassifier(
n_estimators=n_estimators,
learning_rate=learn_rate,
max_depth=max_depth,
random_state=0,
)
scores = -1 * cross_val_score(model, X, y, cv=5, scoring="neg_mean_absolute_error")
print(scores)
y = train_data["Transported"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = XGBClassifier(
n_estimators=n_estimators,
learning_rate=learn_rate,
max_depth=max_depth,
random_state=0,
)
model.fit(X, y, verbose=False)
predictions = model.predict(X_test)
arr = np.array([])
for val in predictions:
if val == 1:
arr = np.append(arr, "True")
else:
arr = np.append(arr, "False")
print(arr)
output = pd.DataFrame(
{"PassengerId": test_data.PassengerId, "Transported": predictions}
)
output.to_csv("submission.csv", index=False)
print("Your submission was successfully saved!")
|
# **Linear regression with one variable from scratch**
# While studying Machine Learning basics I\`ve decided to implement Linear Regression algorithm from scratch so that I\`ll know that I understood the topic properly.
# At first let\`s import all libraries that we need for this task:
import numpy as np
import matplotlib.pyplot as plt
# Now let\'s remember Linear Regression\`s formula:
# $$\begin{align*} \newline f_{w,b}(x^{(i)}) = wx^{(i)} + b \newline \end{align*} $$
# Here we have:
# $x$ - our variable,feature;
# $y$ - target;
# $w$ and $b$ - parameter\'s to be adjusted in the process of training.
# Then let\'s generate $x$ and $y$ data for training and vizualize it:
x = np.arange(50)
delta = np.random.uniform(-10, 10, size=(50,))
y = x + delta
plt.figure(figsize=(16, 6))
plt.xlabel("x values")
plt.ylabel("y values")
plt.ylim([-20, 70])
plt.scatter(x, y)
# To train our linear regression model means - to find minimum value of a cost function. We will use a Squared Error Cost Function for this purpose
# $$\begin{align*}J(w,b) = \frac{1}{2m} \sum\limits_{i = 0}^{m-1} (f_{w,b}(x^{(i)}) - y^{(i)})^2 \end{align*}$$
# Let\'s write a funtion for it:
def cost_func(x_arr, y_arr, bias, weight):
m = x_arr.shape[0]
total_cost = 0
for i in range(m):
pred = bias + weight * x_arr[i]
y = y_arr[i]
cost = (pred - y) ** 2
total_cost += cost
return total_cost / (2 * m)
# To find a minimum value for $J(w,b)$ we will use a Gradient Descent for finding tha appropriate values for our parammeters - $w$ and $b$.
# To implement a Gradient Descent we need to create a loop that runs untill cost function reaches it\`s minimum. And one iteration will be:$$\begin{align*} \newline
# \; w &= w - \alpha \frac{\partial J(w,b)}{\partial w}\; \newline
# b &= b - \alpha \frac{\partial J(w,b)}{\partial b} \newline
# \newline \end{align*} $$
# Here we have $\alpha$ - learning rate, for now we will hardcode it. And also we have partial derivatives:$$\begin{align*} \newline
# \; \frac{\partial J(w,b)}{\partial w} \; \newline
# \frac{\partial J(w,b)}{\partial b} \newline \newline
# \end{align*}$$
# These are characterizing direction and rate of step (describing a slope of the function in the particular point). They are defined as:
# $$\begin{align*} \newline
# \frac{\partial J(w,b)}{\partial w} &= \frac{1}{m} \sum\limits_{i = 0}^{m-1} (f_{w,b}(x^{(i)}) - y^{(i)})x^{(i)}\\
# \frac{\partial J(w,b)}{\partial b} &= \frac{1}{m} \sum\limits_{i = 0}^{m-1} (f_{w,b}(x^{(i)}) - y^{(i)})\\
# \newline \end{align*}$$
# Let's write corresponding fuctions for partial derivatives and Gradient Descent itself:
def derivative_J_b(x_arr, y_arr, bias, weight):
m = x_arr.shape[0]
total_cost = 0
for i in range(m):
pred = bias + weight * x_arr[i]
y = y_arr[i]
cost = pred - y
total_cost += cost
return total_cost / m
def derivative_J_w(x_arr, y_arr, bias, weight):
m = x_arr.shape[0]
total_cost = 0
for i in range(m):
pred = bias + weight * x_arr[i]
y = y_arr[i]
cost = (pred - y) * x_arr[i]
total_cost += cost
return total_cost / m
# gradient descent
def grad_descent(x_array, y_array, bias_start, weight_start):
old_cost = cost_func(x_array, y_array, bias_start, weight_start)
alpha = 0.0001
convergence = False
bias = bias_start
weight = weight_start
while convergence == False:
temp_bias = bias - alpha * derivative_J_b(x_array, y_array, bias, weight)
temp_weight = weight - alpha * derivative_J_w(x_array, y_array, bias, weight)
new_cost = cost_func(x_array, y_array, temp_bias, temp_weight)
if new_cost >= old_cost:
convergence = True
else:
weight = temp_weight
bias = temp_bias
old_cost = new_cost
return bias, weight
# And now let\'s define our parameters $w$ and $b$ , train our model and vizualize the resulting line:
w = 0
b = 0
b, w = grad_descent(x, y, b, w)
m = x.shape[0]
pred_arr = []
for i in range(m):
pred = b + w * x[i]
pred_arr.append(pred)
plt.figure(figsize=(16, 6))
plt.xlabel("x values")
plt.ylabel("y values")
plt.scatter(x, y)
plt.plot(x, pred_arr)
plt.show()
# Seems like a good result. Let\'s try to reverse $y$ values and look at the result:
y = np.flip(y)
b, w = grad_descent(x, y, b, w)
m = x.shape[0]
pred_arr = []
for i in range(m):
pred = b + w * x[i]
pred_arr.append(pred)
plt.figure(figsize=(16, 6))
plt.xlabel("x values")
plt.ylabel("y values")
plt.scatter(x, y)
plt.plot(x, pred_arr)
plt.show()
|
import cupy as cp
import cuml, cudf
from sklearn.model_selection import train_test_split
from cuml.linear_model import Ridge
from cuml.neighbors import KNeighborsRegressor
from cuml.svm import SVR
from cuml.ensemble import RandomForestRegressor
from cuml.preprocessing.TargetEncoder import TargetEncoder
from sklearn.model_selection import GroupKFold, KFold
from cuml.metrics import log_loss
train_cr = cudf.read_csv(
"../input/ncaaw-march-mania-2021/WNCAATourneyCompactResults.csv"
)
train_seeds = cudf.read_csv("../input/ncaaw-march-mania-2021/WNCAATourneySeeds.csv")
submission = cudf.read_csv(
"../input/ncaaw-march-mania-2021/WSampleSubmissionStage1.csv"
)
train_cr.head()
train_seeds.head()
submission.head()
train_seeds["seed_int"] = [
int(train_seeds["Seed"][x][1:3]) for x in range(len(train_seeds))
]
drop_lbls = ["DayNum", "WScore", "LScore", "WLoc", "NumOT"]
train_seeds.drop(labels=["Seed"], inplace=True, axis=1)
train_cr.drop(labels=drop_lbls, inplace=True, axis=1)
train_cr.head()
train_seeds.head()
ren1 = {"TeamID": "WTeamID", "seed_int": "WS"}
ren2 = {"TeamID": "LTeamID", "seed_int": "LS"}
df1 = cudf.merge(
left=train_cr,
right=train_seeds.rename(columns=ren1),
how="left",
on=["Season", "WTeamID"],
)
df2 = cudf.merge(
left=df1, right=train_seeds.rename(columns=ren2), on=["Season", "LTeamID"]
)
df_w = cudf.DataFrame()
df_w["dff"] = df2.WS - df2.LS
df_w["rsl"] = 1
df_l = cudf.DataFrame()
df_l["dff"] = -df_w["dff"]
df_l["rsl"] = 0
df_prd = cudf.concat((df_w, df_l))
X = df_prd.dff.values.reshape(-1, 1)
y = df_prd.rsl.values
X_test = cp.zeros(shape=(len(submission), 1))
for ind, row in submission.to_pandas().iterrows():
yr, o, t = [int(x) for x in row.ID.split("_")]
X_test[ind, 0] = (
train_seeds[
(train_seeds.TeamID == o) & (train_seeds.Season == yr)
].seed_int.values[0]
- train_seeds[
(train_seeds.TeamID == t) & (train_seeds.Season == yr)
].seed_int.values[0]
)
|
# # 타이타닉 대회
# 타이타닉호에 탑승한 승객에 대한 다양한 정보가 포함된 데이터 세트를 제공받고, 그 정보를 사용하여 사람들이 생존했는지 여부를 예측할 수 있는지 살펴봅니다.
# Kaggle 대회는 두 개의 훈련 세트와 테스트 세트가 있습니다.
# **훈련 세트**는 우리 모델을 훈련하는 데 사용할 수 있는 데이터를 포함합니다. 다양한 기술적 데이터를 포함하는 여러 개의 특성 열이 있으며, 이 경우에는 **생존(Survived)** 에 대해 예측하려는 대상 값 열도 있습니다
# **테스트 세트**는 모든 동일한 특성 열을 포함하지만, 예측 대상 값 열이 빠져있습니다. 또한 테스트 세트는 일반적으로 훈련 세트보다 적은 관측치(행)를 가지고 있습니다.
# 훈련 세트와 테스트 세트 두 파일은 **test.csv**와 **train.csv**로 이름이 지정됩니다.
# 먼저 훈련 세트와 테스트 세트를 불러 오고, 그 크기를 살펴봅시다.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
print("Dimensions of train: {}".format(train.shape))
print("Dimensions of test: {}".format(test.shape))
# # 데이터 탐색
# 데이터의 각 특성은 다음과 같습니다:
# * **PassengerId**: 각 승객의 고유 식별자
# * **Survived**: 목표값으로, 0은 승객이 사망한 것을 의미하고, 1은 생존한 것을 의미
# * **Pclass**: 승객 등급
# * **Name**, **Sex**, **Age**: 이름, 성, 나이
# * **SibSp**: 승객이 타이타닉 호에 탑승한 동생/배우자 수
# * **Parch**: 승객이 타이타닉 호에 탑승한 부모/자녀 수
# * **Ticket**: 티켓 ID
# * **Fare**: 지불한 가격 (파운드)
# * **Cabin**: 승객의 객실 번호
# * **Embarked**: 승객이 타이타닉 호에 탑승한 장소
# 훈련 세트의 상위 5개열 추출합니다.
train.head()
# 이 경우, 타이타닉 호 재난을 이해하고 생존 결과에 영향을 미칠 수 있는 변수를 구체적으로 파악하는 것이 중요합니다.
# 타이타닉 영화를 본 사람이라면 여성과 아이들이 구명보트를 선호 받았으며 (실제로도 그렇게 이루어졌습니다), 승객들 간의 큰 계급 격차를 기억할 것입니다.
# 이는 **나이**, **성별** 및 **승객 등급(PClass)**이 생존 예측에 좋은 지표일 수 있다는 것을 나타냅니다. 이를 시각화하면서 **성별**과 **승객 등급(PClass)**을 탐색하여 시작하겠습니다.
# 먼저 시각화를 위한 패키지들을 추가하고, 가장 중요한 생사여부에 대한 정보를 시각적으로 표시하도록 하겠습니다. 훈련데이터가 들어간 데이터 프레임 **train_df**에서 **Survived** 키를 통해 생사여부의 정보를 가져올 수 있습니다.
import seaborn as sns
import matplotlib.pyplot as plt
f, ax = plt.subplots(1, 2, figsize=(18, 8))
train["Survived"].value_counts().plot.pie(
explode=[0, 0.1], autopct="%1.1f%%", ax=ax[0], shadow=True
)
ax[0].set_title("Survived")
ax[0].set_ylabel("")
sns.countplot(x="Survived", data=train, ax=ax[1]) # "Survived가 x축인지 y 인지 알려주어야"
ax[1].set_title("Survived")
plt.show()
# 0은 사망, 1은 생존을 의미합니다. 즉 탑승객의 60% 이상이 사망했다는 결론을 얻을 수 있습니다.
# 이번에는 남녀별 생존 비율을 확인해 보도록 하겠습니다.
# 다음 코드는 **train_df['Survived']**의 데이터에서 성별을 기준으로 필터링된 값을 가지고 비교합니다.
f, ax = plt.subplots(1, 2, figsize=(18, 8))
train["Survived"][train["Sex"] == "male"].value_counts().plot.pie(
explode=[0, 0.1], autopct="%1.1f%%", ax=ax[0], shadow=True
)
train["Survived"][train["Sex"] == "female"].value_counts().plot.pie(
explode=[0, 0.1], autopct="%1.1f%%", ax=ax[1], shadow=True
)
ax[0].set_title("Survived (male)")
ax[1].set_title("Survived (female)")
plt.show()
sex_pivot = train.pivot_table(index="Sex", values="Survived")
sex_pivot.plot.bar()
plt.show()
# 생존한 경우 **Survived** 열에는 승객이 생존하지 못한 경우 0, 생존한 경우 1이 포함되어 있으므로, 성별에 따라 데이터를 분할하고 이 열의 평균을 계산할 수 있습니다.
# 따라서 위 그래프에서 남자의 사망률은 80% 이상인 반면 여자의 사망률은 약 25%정도임을 확인할 수 있습니다
# **Pclass** 열에 대해서도 같은 작업을 수행해 보겠습니다.
class_pivot = train.pivot_table(index="Pclass", values="Survived")
class_pivot.plot.bar()
plt.show()
# 이번에는 그래프가 아닌 **pandas**의 자체 **table** 기능을 사용해서 객실 등급 데이터인 **Pclass**를 탐색해 보도록 하겠습니다.
pd.crosstab(
[train["Sex"], train["Survived"]], train["Pclass"], margins=True
).style.background_gradient(cmap="summer_r")
# 여기에서 우리가 확인할 수 있는 정보들은 다음과 같습니다.
# * 1등 객실 여성의 생존률은 91/94 = 97%, 3등 객실 여성의 생존률은 50%
# * 남성의 경우에 1등 객실 생존률은 37%, 3등 객실은 13%
# 즉 낮은 등급의 객실의 사망률이 높았다는 것으로, 좋은 자리값을 했다는 것을 볼 수 있습니다.
# # 나이 열(변수)을 탐색하고 변환하기
# **Sex**와 **PClass** 열은 범주형(categorical) 특징이라고 부릅니다. 즉, 값은 몇 가지 분리된 옵션을 나타냅니다(예: 승객이 남성인지 여성인지 여부).
train["Age"].describe()
# **Age** 열은 **0.42**에서 **80.0**까지의 숫자를 포함하고 있습니다. 또 다른 주목할 점은 이 열에는 **714**개의 값이 있으며, 이전 수업에서 발견한 학습 데이터 세트의 **814**개의 행보다 적은 것을 알 수 있습니다. 이는 결측값이 일부 존재한다는 것을 나타냅니다.
# 우리는 살아남은 사람들과 나이에 따라 다른 연령대의 사망자들을 시각적으로 비교하기 위해 두 개의 히스토그램을 생성할 수 있습니다.
survived = train[train["Survived"] == 1]
died = train[train["Survived"] == 0]
survived["Age"].plot.hist(alpha=0.5, color="red", bins=50)
died["Age"].plot.hist(alpha=0.5, color="blue", bins=50)
plt.legend(["Survived", "Died"])
plt.show()
# 간단하게 판단할게 아니지만, 일부 연령 범위에서는 더 많은 승객들이 생존한 것으로 나타납니다. 여기서 빨간 막대가 파란 막대보다 높은 경우입니다.
# 이를 머신러닝 모델에 유용하게 만들기 위해, **pandas.cut()** 함수를 사용해 연속적인 특성을 범주형 특성으로 분할하여 구간으로 나눌 수 있습니다.
# 위에서 나타난 바와 같이 **Age** 열에는 결측값이 포함되어 있어 이를 처리해줘야합니다. 또한, 수정한 내용은 테스트 데이터에도 적용해야합니다.
# * **andas.fillna()** 메서드를 사용하여 모든 결측값을 -0.5로 채
# * **Age** 열을 6개 구간으로 나눔
# * **결측값**, -1에서 0 사이
# * **유아**, 0에서 5 사이
# * **어린이**, 5에서 12 사이
# * **청소년**, 12에서 18 사이
# * **젊은 성인**, 18에서 35 사이
# * **성인**, 35에서 60 사이
# * **노인**, 60에서 100 사이
# 아래 그림은 이 함수가 데이터를 어떻게 변환하는지를 보여줍니다.
# 
# 아래 코드는 **process_age()** 함수를 사용하여 **train**과 **test** 데이터프레임의 **Age** 컬럼을 범주형으로 변환합니다. 이후 변환된 데이터를 이용하여 **pivot table**을 만들고 시각화합니다.
# 여기서 **process_age()** 함수는 입력받은 데이터프레임(df)의 "**Age**" 열을 가공하여 "**Age_categories**" 열을 추가하는 기능을 합니다.
def process_age(df, cut_points, label_names):
df["Age"] = df["Age"].fillna(-0.5)
df["Age_categories"] = pd.cut(df["Age"], cut_points, labels=label_names)
return df
cut_points = [-1, 0, 5, 12, 18, 35, 60, 100]
label_names = [
"Missing",
"Infant",
"Child",
"Teenager",
"Young Adult",
"Adult",
"Senior",
]
train = process_age(train, cut_points, label_names)
test = process_age(test, cut_points, label_names)
pivot = train.pivot_table(index="Age_categories", values="Survived")
pivot.plot.bar()
plt.show()
# # 머신 러닝 위해 데이터 준비하기
# 우리는 다음 세 가지 열이 생존을 예측하는 데 유용할 수 있다고 식별했습니다.
# * **성별**
# * **Pclass**
# * **나이**, 또는 더 구체적으로 새롭게 만든 **Age_categories**
# 모델을 구축하기 전에 이러한 열을 기계 학습에 맞게 준비해야합니다. 대부분의 머신러 알고리즘은 텍스트 레이블을 이해하지 못하므로 값을 숫자로 변환해야합니다.
# 또한 관계가 없는 경우 숫자 관계를 시사하지 않도록 주의해야합니다. **데이터 사전**은 **Pclass** 열의 값이 1, 2 및 3임을 알려줍니다.
train["Pclass"].value_counts()
# 각 승객 클래스에 대한 순서관계가 있긴 하지만, 숫자 1, 2, 3 간의 관계와 동일하지 않습니다.
# 이 관계를 제거하기 위해 **Pclass**의 각 고유 값에 대해 더미 열을 만들 수 있습니다:
# **pandas.get_dummies()** 함수를 사용하여 **Pclass**에 대한 더미 열을 생성할 수 있습니다. 이 함수는 위에서 보여준 열들을 생성해줍니다.
# **Pclass** 열에 대한 더미 열을 만들고 원래 데이터 프레임에 다시 추가하는 함수를 만듭니다. 그리고 그 함수를 **Pclass, Sex, Age_categories** 열에 대해 **train** 및 **test** 데이터 프레임에 적용합니다.
def create_dummies(df, column_name):
dummies = pd.get_dummies(df[column_name], prefix=column_name)
df = pd.concat([df, dummies], axis=1)
return df
for column in ["Pclass", "Sex", "Age_categories"]:
train = create_dummies(train, column)
test = create_dummies(test, column)
# # 첫번째 머신러닝 모델 만들기
# 데이터가 준비되었으니 첫번째 모델을 훈련시킬 준비가 되었습니다. 첫번째 모델로는 **Logistic Regression**을 사용합니다. 이는 분류 작업을 할 때 보통 처음으로 사용되는 모델입니다.
# 이를 위해 머신러닝을 수행하는 데 많은 도구를 제공하는 **scikit-learn** 라이브러리를 사용합니다. **scikit-learn** 워크플로우는 주로 4단계로 구성됩니다.
# * 사용하고자 하는 머신러닝 모델을 생성합니다.
# * 모델을 훈련 데이터에 맞춥니다.
# * 모델을 사용하여 예측합니다.
# * 예측 결과의 정확도를 평가합니다.
# **scikit-learn**에서 모델은 모두 별도의 클래스로 구현되며, 첫번째 단계는 생성할 클래스를 식별하는 것입니다. 이번에는 **LogisticRegression** 클래스를 사용하고자 합니다.
# 클래스를 import하고 LogisticRegression 객체를 생성합니다.
# 마지막으로 LogisticRegression.fit() 메서드를 사용하여 모델을 학습합니다. .fit() 메서드는 두 개의 인자 X와 y를 받습니다. X는 모델을 학습시킬 feature의 2차원 배열(데이터프레임과 유사한 형태)이어야 하며, y는 예측하고자 하는 target(또는 예측하고자 하는 열)의 1차원 배열(시리즈와 유사한 형태)이어야 합니다.
# 이제 create_dummies() 함수로 생성된 모든 열을 사용하여 모델을 훈련해보겠습니다
from sklearn.linear_model import LogisticRegression
columns = [
"Pclass_1",
"Pclass_2",
"Pclass_3",
"Sex_female",
"Sex_male",
"Age_categories_Missing",
"Age_categories_Infant",
"Age_categories_Child",
"Age_categories_Teenager",
"Age_categories_Young Adult",
"Age_categories_Adult",
"Age_categories_Senior",
]
lr = LogisticRegression()
lr.fit(train[columns], train["Survived"])
LogisticRegression(
C=1.0,
class_weight=None,
dual=False,
fit_intercept=True,
intercept_scaling=1,
max_iter=100,
multi_class="ovr",
n_jobs=1,
penalty="l2",
random_state=None,
solver="liblinear",
tol=0.0001,
verbose=0,
warm_start=False,
)
# 머신 러닝 모델을 학습했습니다. 이제 모델의 정확도를 파악하기 위해 몇 가지 예측을 해야합니다.
# 예측에 사용할 수 있는 테스트 데이터프레임이 있습지만 이 데이터 프레임에는 생존 컬럼이 없기 때문에 정확도를 파악하려면 Kaggle에 제출해야합니다.
# 학습 데이터프레임에 맞추고 예측할 수도 있지만 이렇게하면 모델이 **오버핏**될 가능성이 높습니다. 즉, 학습한 데이터와 동일한 데이터에서 테스트하기 때문에 잘 수행되지만 새로운데이터에서 훨씬 안좋은 결과를 보여주는 경우가 많습니다.
# 학습 데이터프레임을 두 개로 나눌 수 있습니다.
# * 모델을 학습하는 데 사용할 부분 (보통 관측치의 80 %)
# * 예측을 수행하고 모델을 테스트하는 데 사용할 부분 (보통 관측치의 20 %)
# 머신 러닝에서 이러한 두 부분을 각각 훈련 및 테스트라고 부릅니다. 하지만 우리는 Kaggle에 제출할 예측을 위해 사용할 테스트 데이터프레임과의 혼란을 방지하기 위해 여기서부터는 이러한 유형의 최종 예측에 사용되는 데이터를 홀드아웃 데이터라고 부르도록 합시다.
# **scikit-learn** 라이브러리에는 데이터를 나누기 위해 사용할 수있는 유용한 **model_selection.train_test_split()** 함수가 있습니다. **train_test_split()** 함수는 모든 훈련 및 테스트에 사용할 데이터를 포함하는 **X**와 **y** 두 매개 변수를 받고 **train_X, train_y, test_X, test_y** 네 가지 개체를 반환합니다.
# **train_test_split()** 함수에서는 **test_size**와 **random_state**와 같은 추가적인 매개변수를 사용합니다.
holdout = test # from now on we will refer to this
# dataframe as the holdout data
from sklearn.model_selection import train_test_split
all_X = train[columns]
all_y = train["Survived"]
train_X, test_X, train_y, test_y = train_test_split(
all_X, all_y, test_size=0.20, random_state=0
)
# # 예측 및 정확도 측정
# 이제 우리는 데이터를 훈련 세트와 테스트 세트로 나누었으므로, 훈련 세트에서 모델을 다시 피팅하고 그 모델을 사용하여 테스트 세트에서 예측을 할 수 있습니다.
# 모델을 피팅한 후에는 **LogisticRegression.predict()** 메소드를 사용하여 예측을 할 수 있습니다.
# **predict()** 메소드는 하나의 매개변수 X를 사용하며, 이는 우리가 예측하려는 관측치의 특징들로 이루어진 2차원 배열입니다. X는 우리가 모델을 피팅할 때 사용한 배열과 정확히 **동일한 특징**을 가져야 합니다. 이 메소드는 예측의 **1차원 배열**을 **반환**합니다.
# 다음은 Kaggle의 Titanic 대회의 평가 섹션 "올바르게 예측된 승객의 비율"로 계산된 점수를 사용하여 정확도를 계산합니다.
# 첫 번째 정확도 점수를 계산해봅시다.
from sklearn.metrics import accuracy_score
lr = LogisticRegression()
lr.fit(train_X, train_y)
predictions = lr.predict(test_X)
accuracy = accuracy_score(test_y, predictions)
print(accuracy)
# # 교차 검증(Cross validation)을 사용하여 더 정확한 오류 측정
# 모델의 정확도 점수는 20%의 테스트 세트에 대해 81.0%입니다. 이 데이터셋이 꽤 작기 때문에 모델이 과적합되어 있어서 전혀 보지 못한 데이터에서는 잘 동작하지 않을 가능성이 높습니다.
# 때문에 교차 검증 기술을 사용하여 데이터를 다른 분할로 학습 및 테스트하고 정확도 점수를 평균화할 수 있습니다.
# 가장 일반적인 교차 검증 방법은 **k-fold 교차 검증**입니다. **Fold** 는 모델을 학습하는 각 반복을 의미하고, **k**는 폴드의 수를 의미합니다.
# **ross_val_score()** 함수는 각 **fold**의 **정확도 점수**들을 **numpy ndarray** 형태로 반환합니다. **cross_val_score()** 함수는 다양한 교차 검증 기술과 평가 지표를 사용할 수 있지만, 기본적으로는 **k-fold 검증**과 **정확도 점수**를 사용합니다.
# 다음 코드는 **cross_val_score()** 함수를 사용하여 **교차 검증**을 수행하고, 만들어진 점수들의 **평균을 계산**합니다.
from sklearn.model_selection import cross_val_score
lr = LogisticRegression()
scores = cross_val_score(lr, all_X, all_y, cv=10)
scores.sort()
accuracy = scores.mean()
print(scores)
print(accuracy)
# # 새로운 데이터에 대한 예측 생성
# 우리가 수행한 k-fold 검증 결과, **76.4%** 부터 **87.6%** 까지 범위가 있는걸로 보아, 정확도 숫자가 각각의 폴드마다 다르다는 것을 볼 수 있습니다.
# 우리의 **평균 정확도 점수**는 **80.2%** 였으며, 이는 간단한 **train/test 분할**에서 얻은 **81.0%** 와 크게 차이 나지 않습니다. 그러나 항상 모델에서 얻는 오류 메트릭이 정확한지 확인하기 위해 교차 검증을 사용해야 합니다.
lr = LogisticRegression()
lr.fit(all_X, all_y)
holdout_predictions = lr.predict(holdout[columns])
# # 서브레슨 파일 생성하기
# Titanic 대회 평가 페이지에서는 다음과 같이 **sublesson** 파일에 대한 요구 사항을 명시하고 있습니다:
# "**PassengerId**"와 "**Survived**" 두 개의 열만 포함하며, 헤더 행을 포함해 정확히 418개의 항목이 있어야 합니다. "**PassengerId**"는 어떤 순서로든 정렬할 수 있습니다.
# 파일은 정확히 2개의 열을 가져야 합니다:
# * PassengerId (어떤 순서로든 정렬됨)
# * Survived (1은 생존, 0은 사망을 나타냄)
# 앞서 본 **holdout_predictions**와 **holdout dataframe**의 **PassengerId** 열을 포함하는 새로운 데이터프레임을 만들어야 합니다. 이 두 데이터는 원래의 순서를 유지하므로 데이터를 일치시키는 데 걱정할 필요가 없습니다.
# 이를 위해, **pandas.DataFrame()** 함수에 **딕셔너리**를 전달할 수 있습니다.
holdout_ids = holdout["PassengerId"]
sublesson_df = {"PassengerId": holdout_ids, "Survived": holdout_predictions}
sublesson = pd.DataFrame(sublesson_df)
# 마지막으로, **DataFrame.to_csv()** 메소드를 사용하여 데이터프레임을 **CSV 파일**로 저장합니다. index 매개변수가 False로 설정되어 있는지 확인해야합니다. 그렇지 않으면 CSV에 추가 열이 추가됩니다.
sublesson.to_csv("sublesson.csv", index=False)
|
# ***1D ARRAY***
import numpy as np
a = np.array([2, 3, 8, 5])
a
# ***IMPLICIT CONVERSION***
b = np.array([1, 2, 3, 4.5])
b
# ***USING ARANGE METHOD***
c = np.arange(10)
c
c = np.arange(10, 20, 2)
c
# ***MEMORY MANAGEMENT***
help(np.array)
d = np.array([1, 2, 3, 4], dtype=np.int8)
d
d = np.array([1, 2, 300, 4], dtype=np.int16)
d
# ***USING LINSPACE METHODS***
e = np.linspace(10, 20, 5)
e
# ***ATTRIBUTES***
a.ndim
a.shape
# **2D ARRAY**
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
a
# ***ATTRIBUTES***
a.ndim
a.shape
# ***one's methods***
import numpy as np
a = np.ones([3, 5])
a
# ***ZEROS METHODS***
b = np.zeros([2, 3])
b
# ***EYE METHODS***
c = np.eye(3)
c
# ***DIAGONAL METHODS***
d = np.diag([1, 2, 3])
d
# ***EXTRACTING DIAGONALS***
np.diag(d)
# **3D ARRAY**
import numpy as np
a = np.array([[311, 312, 313], [321, 322, 323], [331, 332, 333]])
a
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from io import StringIO
import os
import os.path
import glob as glob
import warnings
# Now we will do EDA(Exploratory Data Analysis)
def get_sensor_names_from_features(feature_names):
feat_sensor_names = np.array([None for feat in feature_names])
for fi, feat in enumerate(feature_names):
if feat.startswith("raw_acc"):
feat_sensor_names[fi] = "Acc"
pass
elif feat.startswith("proc_gyro"):
feat_sensor_names[fi] = "Gyro"
pass
elif feat.startswith("raw_magnet"):
feat_sensor_names[fi] = "Magnet"
pass
elif feat.startswith("watch_acceleration"):
feat_sensor_names[fi] = "WAcc"
pass
elif feat.startswith("watch_heading"):
feat_sensor_names[fi] = "Compass"
pass
elif feat.startswith("location"):
feat_sensor_names[fi] = "Loc"
pass
elif feat.startswith("location_quick_features"):
feat_sensor_names[fi] = "Loc"
pass
elif feat.startswith("audio_naive"):
feat_sensor_names[fi] = "Aud"
pass
elif feat.startswith("audio_properties"):
feat_sensor_names[fi] = "AP"
pass
elif feat.startswith("discrete"):
feat_sensor_names[fi] = "PS"
pass
elif feat.startswith("lf_measurements"):
feat_sensor_names[fi] = "LF"
pass
else:
raise ValueError("!!! Unsupported feature name: %s" % feat)
pass
return feat_sensor_names
def validate_column_names_are_consistent(old_column_names, new_column_names):
if len(old_column_names) != len(new_column_names):
raise ValueError("!!! Inconsistent number of columns.")
for ci in range(len(old_column_names)):
if old_column_names[ci] != new_column_names[ci]:
raise ValueError(
"!!! Inconsistent column %d) %s != %s"
% (ci, old_column_names[ci], new_column_names[ci])
)
pass
return
def get_label_pretty_name(label):
if "FIX_walking" in label:
return "Walking"
if "FIX_running" in label:
return "Running"
if "LOC_main_workplace" in label:
return "At main workplace"
if "OR_indoors" in label:
return "Indoors"
if "OR_outside" in label:
return "Outside"
if "LOC_home" in label:
return "At home"
if "FIX_restaurant" in label:
return "At a restaurant"
if "OR_exercise" in label:
return "Exercise"
if "LOC_beach" in label:
return "At the beach"
if "OR_standing" in label:
return "Standing"
if "WATCHING_TV" in label:
return "Watching TV"
else:
label.replace("label:", "")
if label.endswith("_"):
label = label[:-1] + ")"
pass
label = label.replace("__", " (").replace("_", " ")
label = label[0] + label[1:].lower()
label = label.replace("i m", "I'm")
return label
def get_phone_label(label):
if label == "FIX_walking":
return "Walking"
if label == "FIX_running":
return "Running"
if label == "LOC_main_workplace":
return "At main workplace"
if label == "OR_indoors":
return "Indoors"
if label == "OR_outside":
return "Outside"
if label == "LOC_home":
return "At home"
if label == "FIX_restaurant":
return "At a restaurant"
if label == "OR_exercise":
return "Exercise"
if label == "LOC_beach":
return "At the beach"
if label == "OR_standing":
return "Standing"
if label == "WATCHING_TV":
return "Watching TV"
if label.endswith("_"):
label = label[:-1] + ")"
pass
label = label.replace("__", " (").replace("_", " ")
label = label[0] + label[1:].lower()
label = label.replace("i m", "I'm")
# if lable is phone related then return the label
if "Phone" not in label:
return label
else:
return False
def get_sensor_names_from_features(feature_names):
feat_sensor_names = np.array([None for feat in feature_names])
for fi, feat in enumerate(feature_names):
if feat.startswith("raw_acc"):
feat_sensor_names[fi] = "Acc"
pass
elif feat.startswith("proc_gyro"):
feat_sensor_names[fi] = "Gyro"
pass
elif feat.startswith("raw_magnet"):
feat_sensor_names[fi] = "Magnet"
pass
elif feat.startswith("watch_acceleration"):
feat_sensor_names[fi] = "WAcc"
pass
elif feat.startswith("watch_heading"):
feat_sensor_names[fi] = "Compass"
pass
elif feat.startswith("location"):
feat_sensor_names[fi] = "Loc"
pass
elif feat.startswith("location_quick_features"):
feat_sensor_names[fi] = "Loc"
pass
elif feat.startswith("audio_naive"):
feat_sensor_names[fi] = "Aud"
pass
elif feat.startswith("audio_properties"):
feat_sensor_names[fi] = "AP"
pass
elif feat.startswith("discrete"):
feat_sensor_names[fi] = "PS"
pass
elif feat.startswith("lf_measurements"):
feat_sensor_names[fi] = "LF"
pass
else:
raise ValueError("!!! Unsupported feature name: %s" % feat)
pass
return feat_sensor_names
def get_features_from_data(users_df):
for ci, col in enumerate(users_df.columns):
if col.startswith("label:"):
first_label_ind = ci
break
pass
feature_names = users_df.columns[1:first_label_ind]
return np.array(feature_names)
def project_features_to_selected_sensors(feature_names, sensors_to_use):
feature_names_arr = []
for sensor in sensors_to_use:
if sensor == "Acc":
for feature in feature_names:
# print (type(feature))
if feature.startswith("raw_acc"):
feature_names_arr.append(feature)
elif sensor == "WAcc":
for feature in feature_names:
if feature.startswith("watch_acceleration"):
feature_names_arr.append(feature)
elif sensor == "Gyro":
for feature in feature_names:
if feature.startswith("proc_gyro"):
feature_names_arr.append(feature)
elif sensor == "Magnet":
for feature in feature_names:
if feature.startswith("raw_magnet"):
feature_names_arr.append(feature)
elif sensor == "Compass":
for feature in feature_names:
if feature.startswith("watch_heading"):
feature_names_arr.append(feature)
elif sensor == "Loc":
for feature in feature_names:
if feature.startswith("location"):
feature_names_arr.append(feature)
elif sensor == "Aud":
for feature in feature_names:
if feature.startswith("audio_naive"):
feature_names_arr.append(feature)
elif sensor == "AP":
for feature in feature_names:
if feature.startswith("audio_properties"):
feature_names_arr.append(feature)
elif sensor == "PS":
for feature in feature_names:
if feature.startswith("discrete"):
feature_names_arr.append(feature)
elif sensor == "LF":
for feature in feature_names:
if feature.startswith("lf_measurements"):
feature_names_arr.append(feature)
return feature_names_arr
def estimate_standardization_params(X):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
mean_vec = np.nanmean(X, axis=0)
std_vec = np.nanstd(X, axis=0)
return (mean_vec, std_vec)
def standardize_features(X, mean_vec, std_vec):
# Subtract the mean, to centralize all features around zero:
X_centralized = X - mean_vec.reshape((1, -1))
# Divide by the standard deviation, to get unit-variance for all features:
# * Avoid dividing by zero, in case some feature had estimate of zero variance
normalizers = np.where(std_vec > 0.0, std_vec, 1.0).reshape((1, -1))
X_standard = X_centralized / normalizers
return X_standard
def get_label_names(users_df):
# Search for the column of the first label:
for ci, col in enumerate(users_df.columns):
if col.startswith("label:"):
first_label_ind = ci
break
pass
label_names = np.array(users_df.columns[first_label_ind:-1])
for li, label in enumerate(label_names):
# In the CSV the label names appear with prefix 'label:', but we don't need it after reading the data:
assert label.startswith("label:")
# label_names[li] = label.replace('label:','');
pass
return list(label_names)
def print_accuracy_repoprt(predictions, y_test):
accuracy = np.mean(predictions == y_test)
# Count occorrences of true-positive, true-negative, false-positive, and false-negative:
tp = np.sum(np.logical_and(predictions, y_test))
tn = np.sum(np.logical_and(np.logical_not(predictions), np.logical_not(y_test)))
fp = np.sum(np.logical_and(predictions, np.logical_not(y_test)))
fn = np.sum(np.logical_and(np.logical_not(predictions), y_test))
# Sensitivity (=recall=true positive rate) and Specificity (=true negative rate):
sensitivity = float(tp) / (tp + fn)
specificity = float(tn) / (tn + fp)
# Balanced accuracy is a more fair replacement for the naive accuracy:
balanced_accuracy = (sensitivity + specificity) / 2.0
# Precision:
# Beware from this metric, since it may be too sensitive to rare labels.
# In the ExtraSensory Dataset, there is large skew among the positive and negative classes,
# and for each label the pos/neg ratio is different.
# This can cause undesirable and misleading results when averaging precision across different labels.
precision = float(tp) / (tp + fp)
accuracy_list = [accuracy, sensitivity, specificity, balanced_accuracy, precision]
print("-" * 10)
print("Accuracy*: %.2f" % accuracy)
print("Sensitivity (TPR): %.2f" % sensitivity)
print("Specificity (TNR): %.2f" % specificity)
print("Balanced accuracy: %.2f" % balanced_accuracy)
print("Precision**: %.2f" % precision)
print("-" * 10)
return accuracy_list
import pandas as pd
# sample_user=pd.read_csv('/kaggle/input/exrrasensory-datase/user1.features_labels.csv/user1.features_labels.csv')
sample_user = pd.read_csv(
"/kaggle/input/exrrasensory-dataset/1538C99F-BA1E-4EFB-A949-6C7C47701B20.features_labels.csv/1538C99F-BA1E-4EFB-A949-6C7C47701B20.features_labels.csv"
)
def prepare_X_Y_for_ML(users_df):
# prepare data for machine learning
# 1. get all features available
feature_names = get_features_from_data(users_df)
# 2. get the features sensors feat from features
feat_sensor_names = get_sensor_names_from_features(feature_names)
# 3. select the sensors to use in the machine learning
# sensors_to_use = ['Acc','WAcc'];
# 4. get Data accoring to selected sensors with feaures;
# feature_names_arr = []
# feature_names_arr = project_features_to_selected_sensors(feature_names, sensors_to_use)
X = users_df[feature_names]
# 5. stanrdize the features substracting the mean value and dividing by standard deviation
# so that all their values will be roughly in the same range:
(mean_vec, std_vec) = estimate_standardization_params(X)
X = standardize_features(X, mean_vec, std_vec)
X[np.isnan(X)] = 0.0
# 6. X is ready for training
# 7. Prepare Y target lables for training
label_names = get_label_names(users_df)
Y = users_df[label_names]
# 8. clean nan values and converted to binary labels
# Read the binary label values, and the 'missing label' indicators:
trinary_labels_mat = users_df[label_names]
# This should have values of either 0., 1. or NaN
M = np.isnan(trinary_labels_mat)
# M is the missing label matrix
Y = np.where(M, 0, trinary_labels_mat) > 0.0
# Y is the label matrix
y_df = pd.DataFrame(Y)
y_df.rename(columns=dict(enumerate(label_names, 0)), inplace=True)
return (X, y_df, M, feature_names, label_names)
sample_user.info()
# process the data to get features data and context label data
(X, Y, M, feature_names, label_names) = prepare_X_Y_for_ML(sample_user)
XY = pd.concat([X, Y], axis=1, sort=False)
XY.head()
for column in sample_user.columns:
print(column)
Y
Y
# finding relation between labels
corr = Y[label_names].corr().sort_values(by=label_names, ascending=False)
corr
# removing nan
corr.dropna(how="all")
# showing heatmap for correlation
corr.dropna(how="all").style.background_gradient(cmap="coolwarm", axis=None)
n_examples_per_label = np.sum(np.array(Y), axis=0)
labels_and_counts = zip(label_names, n_examples_per_label)
sorted_labels_and_counts = sorted(
labels_and_counts, reverse=True, key=lambda pair: pair[1]
)
print("number of examples for every context label:")
print("-" * 20)
i = 0
label_x_arr = []
label_y_arr = []
for label, count in sorted_labels_and_counts:
i = i + 1
label_x_arr.append(label)
label_y_arr.append(count)
print(" %i : %s - %d minutes" % (i, label, count))
pass
labels_df = pd.DataFrame(sorted_labels_and_counts)
labels_df.rename(columns={0: "label"}, inplace=True)
labels_df.rename(columns={1: "count"}, inplace=True)
labels_df.plot(
x="label", y="count", kind="bar", legend=False, grid=True, figsize=(20, 8)
)
labels_df = pd.DataFrame(sorted_labels_and_counts)
labels_df.rename(columns={0: "label"}, inplace=True)
labels_df.rename(columns={1: "count"}, inplace=True)
labels_df.plot(
x="label", y="count", kind="bar", legend=False, grid=True, figsize=(20, 8)
)
feat_sensor_names = get_sensor_names_from_features(feature_names)
print(pd.unique(feat_sensor_names))
features_of_selected_sensors = project_features_to_selected_sensors(
feature_names, ["Acc", "WAcc"]
)
features_of_selected_sensors
# We will implement two Machine learning Models:
# 1.Logistic Regression
# 2.KNN Model
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from io import StringIO
import os
import os.path
import glob as glob
import warnings
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
from sklearn.multiclass import OneVsRestClassifier
from skmultilearn.problem_transform import BinaryRelevance
from sklearn.naive_bayes import GaussianNB
from skmultilearn.problem_transform import ClassifierChain
from sklearn import metrics
X_train, X_test, y_train, y_test = train_test_split(
X[features_of_selected_sensors],
Y["label:FIX_walking"],
test_size=0.30,
random_state=42,
)
logmodel = LogisticRegression(max_iter=200)
logmodel.fit(X_train, y_train)
predictions = logmodel.predict(X_test)
logmodel_results = print_accuracy_repoprt(predictions, y_test)
# Trying KNN MOdel
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(XY[features_of_selected_sensors])
scaled_features = scaler.transform(XY[features_of_selected_sensors])
df_feat = pd.DataFrame(scaled_features, columns=features_of_selected_sensors)
df_feat.head()
X_train, X_test, y_train, y_test = train_test_split(
scaled_features, XY["label:FIX_walking"], test_size=0.30, random_state=42
)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, pred))
print(classification_report(y_test, pred))
knn_results = print_accuracy_repoprt(pred, y_test)
# Choosing a good k value using elbow method
error_rate = []
# Will take some time
for i in range(1, 40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
plt.figure(figsize=(10, 6))
plt.plot(
range(1, 40),
error_rate,
color="blue",
linestyle="dashed",
marker="o",
markerfacecolor="red",
markersize=10,
)
plt.title("Error Rate vs. K Value")
plt.xlabel("K")
plt.ylabel("Error Rate")
knn = KNeighborsClassifier(n_neighbors=17)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
print("WITH K=17")
print("\n")
print(confusion_matrix(y_test, pred))
print("\n")
print(classification_report(y_test, pred))
knn_results = print_accuracy_repoprt(pred, y_test)
# we have shown for label Walking that KNN gives much better results for k=17
# Now we will do Multilabel Classification
arr_labels = [
"label:FIX_walking",
"label:LYING_DOWN",
"label:LOC_home",
"label:LOC_main_workplace",
"label:SITTING",
"label:OR_standing",
"label:SLEEPING",
]
mlp_logmodel_results = list()
for label in arr_labels:
X_train, X_test, y_train, y_test = train_test_split(
X[features_of_selected_sensors], Y[label], test_size=0.30, random_state=42
)
logmodel = LogisticRegression(max_iter=200)
logmodel.fit(X_train, y_train)
predictions = logmodel.predict(X_test)
print(classification_report(y_test, predictions))
mlp_logmodel_results.append(print_accuracy_repoprt(predictions, y_test))
|
# # Agenda of the meeting
# * How recent developments in AI could impact us as software engineers?
# * What exactly goes behind the scenes in a simple AI/ML Model? how does it work?
# * Intro to Natural Language Processing, what are LLMs, How ChatGPT works?
# # Recents Advancements in NLP
#
# * All of us have beeing seeing the staggering advancements in Natural Language Processing field recently with ChatGPT and
# big LLMs(Large Language Models) that power ChatGPT and similar things.
#
# * Why should we as software engineers should care?
# ## My Own experience With GPT
# ## Writing JDBC Multithreaded code for some Mysql analysis.
# 
# ## Generating mock data using sample tables for the same.
# 
# # What would have taken normally 30 - 40 mins, took me less than 5 mins.
# Ofcourse I have to tweak the code to make some changes here and there. 1 bug was there. I fixed it. still a huge improvement in productivity.
# ## This is just GPT-3.5. GPT-4 is much much better at coding and logical reasoning.
# GPT-4 can solve full fledged problem statements. Normally what would take few days or weeks, might be possible in few hours.
# ## What this means for us.(Just based on my observation)
# 1. If we are already skilled at something, then tools like ChatGPT might be a big productivity boost.
# 2. These tools allow us to try more things, try more projects, since time taken to implement a new project comes down.
# 3. Many times it generates code with minor bugs/errors, so we have to be cautious while using it.
# ### What chatGPT and code generation tools are good at,
# * Generating code for simple use cases.
# * Generating boiler plate code.
# * Given a code snippet, explain the code in English.
# * We can use it as a chatbot for any API/framework documentation.
# * Generating Unit tests.
# * Writing Shell scripts for trivial tasks.
# ### What chatGPT and code generation tools are not good at,
# * Proper designing and structuring of code.
# * Deciding what to build(ultimately we have to decide and start thinking more like PMs).
# # Intro to Machine Learning Basics
# What do we mean by Machine Learning? AI is an umbrella term that covers a lot of topics and ML is the core part of it. so we will cover ML basics here.
# > Input --> Algorithm --> output
# ### Traditional Programming
# > We provide - Input, algorithm
# > We get - Output
# ### Machine Learning
# > We provide - Input, Output (As training dataset)
# > We get - algorithm (i.e. the computer itself will learn what algorithm to use.)
# Based on the learned algorithm, the computer will act on inputs it has not yet seen.
from transformers import AutoImageProcessor, ResNetForImageClassification
import torch
from datasets import load_dataset
from matplotlib import pyplot as plt
dataset = load_dataset("huggingface/cats-image")
image = dataset["test"]["image"][0]
plt.imshow(image, interpolation="nearest")
plt.show()
processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50")
model = ResNetForImageClassification.from_pretrained("microsoft/resnet-50")
inputs = processor(image, return_tensors="pt")
with torch.no_grad():
logits = model(**inputs).logits
# model predicts one of the 1000 ImageNet classes
predicted_label = logits.argmax(-1).item()
print(model.config.id2label[predicted_label])
|
# Since it is getting harder to update this every week and also some people might need it more often than a week, I have put together everything into python code.
# Please use the below code to pull the data from the websites mentioned in the data overview section.
# All the data credits go to the corresponding owners. Please make sure you mention their names if you use it somewhere.
# **Code to get data from coin market cap:**
# Please uncomment the "get_data" function in the last line run it in local
# -*- coding: utf-8 -*-
import re
import sys
import csv
import time
import random
import requests
from datetime import date
from bs4 import BeautifulSoup
end_date = str(date.today()).replace("-", "")
base_url = (
"https://coinmarketcap.com/currencies/{0}/historical-data/?start=20130428&end="
+ end_date
)
currency_name_list = [
"bitcoin",
"ethereum",
"ripple",
"bitcoin-cash",
"nem",
"litecoin",
"dash",
"ethereum-classic",
"iota",
"neo",
"stratis",
"monero",
"waves",
"bitconnect",
"omisego",
"qtum",
"numeraire",
]
def get_data(currency_name):
print("Currency : ", currency_name)
url = base_url.format(currency_name)
html_response = requests.get(url).text.encode("utf-8")
soup = BeautifulSoup(html_response, "html.parser")
table = soup.find_all("table")[0]
elements = table.find_all("tr")
with open("./{0}_price.csv".format(currency_name.replace("-", "_")), "w") as ofile:
writer = csv.writer(ofile)
for element in elements:
writer.writerow(element.get_text().strip().split("\n"))
time.sleep(1)
if __name__ == "__main__":
for currency_name in currency_name_list:
# get_data(currency_name)
pass
# **Code to get bitcoin dataset:**
# Code to get the features from blockchain info site. Please uncomment the function call in local to run.
import time
import requests
import pandas as pd
urls = [
"https://blockchain.info/charts/market-price",
"https://blockchain.info/charts/total-bitcoins",
"https://blockchain.info/charts/market-cap",
"https://blockchain.info/charts/trade-volume",
"https://blockchain.info/charts/blocks-size",
"https://blockchain.info/charts/avg-block-size",
"https://blockchain.info/charts/n-orphaned-blocks",
"https://blockchain.info/charts/n-transactions-per-block",
"https://blockchain.info/charts/median-confirmation-time",
"https://blockchain.info/charts/hash-rate",
"https://blockchain.info/charts/difficulty",
"https://blockchain.info/charts/miners-revenue",
"https://blockchain.info/charts/transaction-fees",
"https://blockchain.info/charts/cost-per-transaction-percent",
"https://blockchain.info/charts/cost-per-transaction",
"https://blockchain.info/charts/n-unique-addresses",
"https://blockchain.info/charts/n-transactions",
"https://blockchain.info/charts/n-transactions-total",
"https://blockchain.info/charts/n-transactions-excluding-popular",
"https://blockchain.info/charts/n-transactions-excluding-chains-longer-than-100",
"https://blockchain.info/charts/output-volume",
"https://blockchain.info/charts/estimated-transaction-volume",
"https://blockchain.info/charts/estimated-transaction-volume-usd",
]
suffix_to_add = "?timespan=8years&format=csv"
def get_btc_data():
counter = 0
for url in urls:
header = ["Date", "btc_" + url.split("/")[-1].replace("-", "_")]
print(header[-1])
temp_df = pd.read_csv(url + suffix_to_add, header=None, names=header)
if counter == 0:
df = temp_df.copy()
else:
df = pd.merge(df, temp_df, on="Date", how="left")
print(temp_df.shape, df.shape)
counter += 1
time.sleep(1)
df.to_csv("../input_v9/bitcoin_dataset.csv", index=False)
# get_btc_data()
# **Code to get Ethereum dataset from EtherScan:**
# Please find below the code to get ethereum related info from etherscan.io. Please uncomment the last line in local and run.
import time
import requests
import pandas as pd
urls = [
"https://etherscan.io/chart/etherprice",
"https://etherscan.io/chart/tx",
"https://etherscan.io/chart/address",
"https://etherscan.io/chart/marketcap",
"https://etherscan.io/chart/hashrate",
"https://etherscan.io/chart/difficulty",
"https://etherscan.io/chart/blocks",
"https://etherscan.io/chart/uncles",
"https://etherscan.io/chart/blocksize",
"https://etherscan.io/chart/blocktime",
"https://etherscan.io/chart/gasprice",
"https://etherscan.io/chart/gaslimit",
"https://etherscan.io/chart/gasused",
"https://etherscan.io/chart/ethersupplygrowth",
"https://etherscan.io/chart/ens-register",
]
suffix_to_add = "?output=csv"
def get_ether_data():
counter = 0
for url in urls:
header = ["Date", "TimeStamp", "eth_" + url.split("/")[-1].replace("-", "_")]
print(header[-1])
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36",
}
with open("temp.csv", "w") as ofile:
response = requests.get(url + suffix_to_add, headers=headers).text
ofile.write(response)
temp_df = pd.read_csv("temp.csv")
col_names = temp_df.columns.tolist()
if col_names[-1] == "Value" or col_names[-1] == "Value (Wei)":
col_names = col_names[:2] + [header[-1]]
temp_df.columns = col_names
else:
temp_df = temp_df[["Date(UTC)", "UnixTimeStamp", "Supply", "MarketCap"]]
temp_df.columns = [
"Date(UTC)",
"UnixTimeStamp",
"eth_supply",
"eth_marketcap",
]
if counter == 0:
df = temp_df.copy()
else:
df = pd.merge(df, temp_df, on=["Date(UTC)", "UnixTimeStamp"], how="left")
print(temp_df.shape, df.shape)
counter += 1
time.sleep(1)
df.to_csv("../input_v9/bitcoin_dataset.csv", index=False)
# get_ether_data()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
x = [5, 7, 8, 7, 2, 17, 2, 9, 4, 11, 12, 9, 6]
y = [99, 86, 87, 88, 111, 86, 103, 87, 94, 78, 77, 85, 86]
plt.scatter(x, y)
plt.show()
import numpy
import matplotlib.pyplot as plt
x = numpy.random.normal(5.0, 1.0, 1000)
y = numpy.random.normal(10.0, 2.0, 1000)
plt.scatter(x, y)
plt.show()
|
# # CUSTOMER BEHAVIOR ANALYSIS
# Customer behavior analysis is a vital process that can provide businesses with valuable insights into their customers' behaviors and preferences. In this project, I analized the CDNOW dataset to determine customer buying patterns based on Recency, Frequency, and Monetary Value (RFM).
# **-** Using Python, I performed RFM analysis to determine each customer's Recency, Frequency, and Monetary Value based on their transaction history. This analysis will help us understand how recently and how often customers make purchases, as well as the average amount they spend.
# **-** Next, I used the K-Means algorithm to segment customers into groups based on their RFM scores. This segmentation will help identify distinct customer groups and tailor marketing strategies and promotions to each group's specific needs and preferences.
# **-** I also developed a machine learning model to predict the probability of customer purchase and the likely purchase amount using XGBRegressor and XGBClassifier. By predicting customer behavior, businesses can better understand their customers' needs and preferences and adjust their marketing strategies accordingly.
# **-** Finally, I conducted cohort analysis to determine customer lifetime value (CLV) and measure the effectiveness of our marketing strategies. By analyzing customer behavior over time, we can gain insights into how our customer base changes and adapt our strategies to meet their evolving needs.
# Overall, this project will provide valuable insights into customer behavior, allowing businesses to improve customer engagement, retention, and revenue.
#
# import relevant packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objs as go
import warnings
warnings.filterwarnings("ignore")
from sklearn import preprocessing
import joblib
import plydata.cat_tools as cat
import plotnine as pn
from xgboost import XGBClassifier, XGBRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
df = pd.read_csv("cdnow.csv")
# ## Customer Segmentation
# We segment the customers into groups of certain similarities to get a broad view of customers spread. Then we can begin to answer some business questions
df = df.assign(date=lambda x: pd.to_datetime(x["date"])).dropna()
df.info()
df.drop("Unnamed: 0", axis=1, inplace=True)
# ### RFM Analysis
# RFM analysis is a marketing technique used to analyze customer behavior based on three factors:
# Recency, Frequency, and Monetary value of purchases.
# It helps businesses identify their most valuable customers and design targeted marketing
# strategies.
# make recency(OrderDate) from behind data
max_date = df["date"].max()
recency_features_df = (
df[["customer_id", "date"]]
.groupby("customer_id")
.apply(lambda x: (x["date"].max() - max_date) / pd.to_timedelta(1, "day"))
.to_frame()
.set_axis(["recency"], axis=1)
)
# Make price (MonetaryValue) features from lower data
price_feature_df = (
df[["customer_id", "date", "price"]]
.groupby("customer_id")
.aggregate({"price": "sum"})
.set_axis(["money_value"], axis=1)
)
# Make Frequency (count) features from lower data
frequency_features_df = (
df[["customer_id", "date"]]
.groupby("customer_id")
.count()
.set_axis(["frequency"], axis=1)
)
# Combine features
rfm_df = pd.concat(
[recency_features_df, frequency_features_df, price_feature_df], axis=1
)
rfm_df.head(3)
# ### K-Means Clustering
# scale the dataframe
scaler = StandardScaler()
scaled_df = scaler.fit_transform(rfm_df)
# First, we run a range of different K-values to find the optimal K
inertia = []
val_range = range(1, 11)
for i in val_range:
kmean = KMeans(n_clusters=i)
kmean.fit(pd.DataFrame(scaled_df))
inertia.append(kmean.inertia_)
inertia
plt.plot(val_range, inertia, "bo-")
plt.xlabel("K-Values")
plt.ylabel("Inertia")
plt.title("The Elbow Method")
plt.show()
# Initializes a KMeans object with optimum K-value (K=5)
kmeans = KMeans(n_clusters=4, random_state=42)
kmeans.fit_predict(scaled_df)
labels = kmeans.labels_
kmeans.inertia_
# Let's plot view the clusters using PCA.
# We use PCA to reduce the number of dimensions,i.e, the number of columns down to two.
pca = PCA(n_components=2)
principal_components = pca.fit_transform(scaled_df)
pca_df = pd.DataFrame(data=principal_components, columns=["PCA1", "PCA2"])
pca_df
pca_df["cluster"] = labels
pca_df
# plot the clusters
plt.figure(figsize=(8, 4))
ax = sns.scatterplot(
x="PCA1",
y="PCA2",
hue="cluster",
data=pca_df,
palette=["red", "green", "blue", "black"],
s=50,
)
plt.title("Clustering using K-Means Algorithm")
plt.show()
# add the the labels to rfm_df to see each customer and their corresponding label/group.
clustered_df = rfm_df.copy()
clustered_df["cluster"] = labels
# check the mean of each feature for each group
clustered_df.groupby("cluster").mean()
def get_status(row):
if row["cluster"] == 0:
return "Losing"
elif row["cluster"] == 1:
return "average"
elif row["cluster"] == 2:
return "loyal"
else:
return "VIP"
clustered_df["Cus_status"] = clustered_df.apply(get_status, axis=1)
clustered_df = clustered_df.reset_index()
clustered_df.head(3)
clustered_df["Cus_status"].value_counts()
sns.countplot(x="Cus_status", data=clustered_df)
plt.show()
# **Now we have identified different groups to which each customer belong and can therefore develop specific marketing strategies suitable for each group.**
# **However, We can further refine our dataset to obtain specific insights such as identifying the customer with the highest spend probability, predicting a customer's potential spending amount, recognizing missed opportunities, identifying customers who have made recent purchases but are unlikely to make more.
# These insights can support further investigations that generate meaningful information crucial for creating a successful and targeted marketing campaign.**
# ## Machine learning
# ##### -What the customers spend in the next 90 days?(regression)
# ##### -What is the probability of a customer to make a purchase in the next 90 days?(classification)
df.head()
df.info()
# visualize: individual customer purchases
ids = np.random.choice(df["customer_id"], 20)
# selected_ids = ids[0:12]
# selected_ids
ids
cust_id_subset_df = (
df[df["customer_id"].isin(ids)].groupby(["customer_id", "date"]).sum().reset_index()
)
# create the plot
(
pn.ggplot(
data=cust_id_subset_df, mapping=pn.aes(x="date", y="price", group="customer_id")
)
+ pn.geom_line()
+ pn.geom_point()
+ pn.facet_wrap("customer_id")
+ pn.scale_x_date(date_breaks="2 year", date_labels="%Y")
)
n_days = 90
max_order_date = df["date"].max()
cutoff = max_order_date - pd.to_timedelta(n_days, unit="d")
temporal_behind_df = df[df["date"] <= cutoff]
temporal_ahead_df = df[df["date"] >= cutoff]
temporal_ahead_df.head()
# make target from higher data
targets_df = (
temporal_ahead_df.drop("quantity", axis=1)
.groupby("customer_id")
.sum()
.rename({"price": "spend_90_total"}, axis=1)
.assign(spend_90_flag=1)
)
targets_df.head()
# make recency(OrderDate) from behind data
max_date = temporal_behind_df["date"].max()
recency_features_df = (
temporal_behind_df[["customer_id", "date"]]
.groupby("customer_id")
.apply(lambda x: (x["date"].max() - max_date) / pd.to_timedelta(1, "day"))
.to_frame()
.set_axis(["recency"], axis=1)
)
# recency_features_df.head()
# Make Frequency (count) features from lower data
frequency_features_df = (
temporal_behind_df[["customer_id", "date"]]
.groupby("customer_id")
.count()
.set_axis(["frequency"], axis=1)
)
# Make price (MonetaryValue) features from lower data
price_feature_df = (
temporal_behind_df[["customer_id", "date", "price"]]
.groupby("customer_id")
.aggregate({"price": ["sum", "mean"]})
.set_axis(["Sales_sum", "Sales_mean"], axis=1)
)
# Combine features
Features_df = (
pd.concat([recency_features_df, frequency_features_df, price_feature_df], axis=1)
.merge(targets_df, left_index=True, right_index=True, how="left")
.fillna(0)
)
Features_df.head()
X = Features_df[["recency", "frequency", "Sales_sum", "Sales_mean"]]
# nwxt 90-days
y_spend = Features_df["spend_90_total"]
xgb_reg_spec2 = XGBRegressor(objective="reg:squarederror", random_state=123)
param_grid = {"learning_rate": [0.01, 0.1, 0.3, 0.5]}
xgb_reg_model2 = GridSearchCV(
estimator=xgb_reg_spec2,
param_grid=param_grid,
scoring="neg_mean_absolute_error",
cv=5,
n_jobs=-1,
)
xgb_reg_model2.fit(X, y_spend)
# Print the best parameters and best score
print(f"Best parameters: {xgb_reg_model2.best_params_}")
print(f"Best score: {xgb_reg_model2.best_score_}")
y_pred = xgb_reg_model2.predict(X)
y_pred[:10]
# Next 90-days spend probability
y_prob = Features_df["spend_90_flag"]
# Define the XGBClassifier with some initial hyperparameters
from sklearn.metrics import roc_auc_score
xgb_clf = XGBClassifier(
objective="binary:logistic",
learning_rate=0.1,
max_depth=5,
subsample=0.75,
colsample_bytree=0.75,
gamma=0.1,
reg_alpha=0.1,
reg_lambda=0.1,
random_state=123,
)
# Fit the model on the training data
xgb_clf.fit(X, y_prob)
# Evaluate the model on the test data
y_pred = xgb_clf.predict_proba(X)[:, 1]
auc_score = roc_auc_score(y_prob, y_pred)
print("ROC AUC score: {:.4f}".format(auc_score))
y_pred_prob = xgb_clf.predict_proba(X)
print("ROC AUC score: {:.4f}".format(auc_score))
# ## Feature Importance
# Importance | Spend Amount Model
imp_spend_amount_dict = xgb_reg_model2.best_estimator_.get_booster().get_score(
importance_type="gain"
)
# Create a pandas DataFrame from a dictionary of important spending amounts
imp_spend_amount_df = pd.DataFrame(
{
"features": list(imp_spend_amount_dict.keys()),
"value": list(imp_spend_amount_dict.values()),
}
)
# Reorder the "features" column based on the corresponding "value" column
imp_spend_amount_df = imp_spend_amount_df.assign(
features=lambda df: cat.cat_reorder(df["features"], df["value"])
)
(
pn.ggplot(data=imp_spend_amount_df, mapping=pn.aes("features", "value"))
+ pn.geom_col()
+ pn.coord_flip()
)
importance_scores = xgb_clf.feature_importances_
feature_names = X.columns # Assuming X is a pandas DataFrame
sorted_idx = importance_scores.argsort()
imp_spend_prob_df = pd.DataFrame(
{"features": feature_names[sorted_idx], "value": importance_scores[sorted_idx]}
)
# Reorder the "features" column based on the corresponding "value" column
imp_spend_prob_df = imp_spend_prob_df.assign(
features=lambda df: cat.cat_reorder(df["features"], df["value"])
)
imp_spend_amount_df.value_counts(normalize=True)
(
pn.ggplot(data=imp_spend_prob_df, mapping=pn.aes("features", "value"))
+ pn.geom_col()
+ pn.coord_flip()
)
# Save Predictions
predictions_df = pd.concat(
[
Features_df.reset_index(),
pd.DataFrame(y_pred).set_axis(["spend_pred"], axis=1),
pd.DataFrame(y_pred_prob)[[1]].set_axis(["spend_prob"], axis=1),
],
axis=1,
)
# add customer length of stay in the companyto the prediction_df
df["Customer_Start_Date"] = df.groupby("customer_id")["date"].transform("min")
df["Days_In_Company"] = (
df["date"] - df["Customer_Start_Date"] + pd.Timedelta(days=1)
).dt.days
predictions_df["Days_In_Company"] = df["Days_In_Company"]
# save
# predictions_df.to_pickle("artifacts/predictions_df.pkl")
# pd.read_pickle("folderName/predictions_df.pkl")
# load model
# model = joblib.load("folderName/xgb_clf_model")
# model.predict(X)
# **#Question 1:**
# which customers have the highest spend probability in the next 90 days?
# - Target for new products similar to what they have pruchased in the past.
Highest_prob_df = predictions_df.sort_values("spend_prob", ascending=False)
Highest_prob_df
# **#Question 2: Which customers have recently purchased but are unlikely to buy?**
# - Incentivize actions to increase probability
# - Provide didcounts, encourage referring a friend, nuture by letting them know what's coming.
predictions_df[predictions_df["recency"] > -90][predictions_df["spend_prob"] < 0.20][
predictions_df["Days_In_Company"] >= 520
].sort_values("spend_prob", ascending=False)
# **#Question 3:** Missed opportunities: We could unlock Big spenders
# - Send bundle offers encouraging volume purchases
# - Focus on missed opportunities
predictions_df[predictions_df["spend_90_total"] == 0.0].sort_values(
"spend_pred", ascending=False
)
# Investigate clusters
cluster_0 = clustered_df[clustered_df["cluster"] == 0]["customer_id"].unique()
cluster_1 = clustered_df[clustered_df["cluster"] == 1]["customer_id"].unique()
cluster_2 = clustered_df[clustered_df["cluster"] == 2]["customer_id"].unique()
cluster_3 = clustered_df[clustered_df["cluster"] == 3]["customer_id"].unique()
# Customers with the highest chance of buying in cluster_0
cust_id_subset_df = predictions_df[predictions_df["recency"] < -90][
predictions_df["spend_prob"] >= 0.30
][predictions_df["customer_id"].isin(cluster_0)].sort_values(
"spend_prob", ascending=False
)
cust_id_subset_df
# # Cohort Analysis
# Define the function to extract the year and month of a date
def get_year_month(date):
return (date.year, date.month)
# Add a 'Order_Month' column to the dataframe
df["Order_Month"] = df["date"].apply(get_year_month)
# convert 'Order_Month' to datetime format
df["Order_Month"] = pd.to_datetime(
df["Order_Month"].apply(
lambda x: pd.to_datetime("-".join(map(str, x)), format="%Y-%m")
)
)
# extract year-month from 'Order_Month' as 'Order_Month'
df["Order_Month"] = df["Order_Month"].apply(lambda x: x.strftime("%Y-%m"))
df["Customer_Start_Date"] = df.groupby("customer_id")["date"].transform("min")
df["Cohort_Month"] = df["Customer_Start_Date"].apply(lambda x: x.strftime("%Y-%m"))
df.head(3)
cohort_data = df.groupby(["Customer_Start_Date", "Order_Month", "Cohort_Month"]).agg(
{"price": "sum", "customer_id": "count"}
)
cohort_data = cohort_data.rename(
columns={"price": "total_revenue", "customer_id": "total_customers"}
)
cohort_data = cohort_data.reset_index()
cohort_data
cohort_data.head()
df["Order_Month"] = pd.to_datetime(df["Order_Month"])
cohort_data["Cohort_Month"] = pd.to_datetime(cohort_data["Cohort_Month"])
cohort_data["Order_Month"] = pd.to_datetime(
cohort_data["Order_Month"]
) # Convert to datetime format
cohort_data["Cohort_Index"] = (
(cohort_data["Order_Month"].dt.year - cohort_data["Cohort_Month"].dt.year) * 12
+ (cohort_data["Order_Month"].dt.month - cohort_data["Cohort_Month"].dt.month)
+ 1
)
cohort_data.head()
# Pivot the data to create the cohort analysis table
cohort_table = pd.pivot_table(
cohort_data,
values="total_customers",
index="Cohort_Month",
columns="Cohort_Index",
aggfunc=np.mean,
)
# Display the cohort analysis table
cohort_table
# calculate the size of each cohort
cohort_sizes = cohorts["Total_Customers"].groupby(level=0).first()
# calculate the retention rate for each cohort
cohorts["Retention_Rate"] = cohorts["Total_Customers"] / cohort_sizes
# calculate the average customer lifetime value for each cohort
cohorts["Customer_Lifetime_Value"] = (
cohorts["total_revenue"] / cohorts["Total_Customers"]
)
|
t = () # intilazing a blank tuple
type(t)
t = ("NMT", 3.23, 20, "BAS", 0.23, 25)
type(t)
# **Indexing**
t[1] # similar to list
# **slicing**
t[0:2]
t[:5] # similar to list and string
a = ("NMT", 3.23, 20, "BAS", 0.23, 25, ["red", "pink"], (20, 36))
a[7][0]
a[6][0] = "green"
a
# **looping**
p = ("NMT", 3.23, 20, "BAS", 0.23, 25, ["red", "pink"], (20, 36))
p
for o in p:
print(o)
l = len(p)
l
o = 0
while l > 0:
print(p[o])
o = o + 1 # increment the value of o to go to next element
l = l - 1
# **Zip function**
l1 = ["Naveen", "Basuraj", "Shivraj"]
l2 = [20, 25, 30]
z = zip(l1, l2)
final_list = list(z)
print(final_list)
for i in final_list:
print(i)
for i, j in final_list:
print(i, j)
l = [["na", 25], ["ma", 26], ["pa", 58]] # assign multiple variable simulataneously
l
for i, j in l:
print(i, j)
# **Methods**
t = (2, 8, 7, 6, 8, 9)
len(t)
# **concatenate/merge 2 tuple**
t2 = (99, 100)
t3 = t + t2
t3
max(t3)
min(t3)
t3.count(8)
sum(t3)
t3.index(8)
|
import pathlib
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Define the dataset path
data_dir = "../input/rice-image-dataset/Rice_Image_Dataset"
data_dir = pathlib.Path(data_dir)
# Define the classes
class_names = ["Arborio", "Basmati", "Ipsala", "Jasmine", "Karacadag"]
arborio = list(data_dir.glob("Arborio/*"))[:600]
basmati = list(data_dir.glob("Basmati/*"))[:600]
ipsala = list(data_dir.glob("Ipsala/*"))[:600]
jasmine = list(data_dir.glob("Jasmine/*"))[:600]
karacadag = list(data_dir.glob("Karacadag/*"))[:600]
def load_images_and_labels(image_paths, class_names):
images = []
labels = []
for label, class_name in enumerate(class_names):
for img_path in image_paths[class_name]:
img = image.load_img(
img_path, target_size=(32, 32)
) # Change the target size here
img_array = image.img_to_array(img)
images.append(img_array)
labels.append(label)
images = np.array(images)
labels = np.array(labels)
return images, labels
def build_nn_model(input_shape, num_layers, num_neurons):
model = Sequential()
model.add(Flatten(input_shape=input_shape))
for _ in range(num_layers):
model.add(Dense(num_neurons, activation="relu"))
model.add(Dense(len(class_names), activation="softmax"))
model.compile(
optimizer=Adam(), loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
return model
def build_nn_model(input_shape, num_layers, num_neurons):
model = Sequential()
model.add(Flatten(input_shape=input_shape))
for _ in range(num_layers):
model.add(Dense(num_neurons, activation="relu"))
model.add(Dense(len(class_names), activation="softmax"))
model.compile(
optimizer=Adam(), loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
return model
def train_and_evaluate_models(
X, y, num_layers_list, num_neurons_list, train_test_splits
):
results = []
for num_layers in num_layers_list:
for num_neurons in num_neurons_list:
for split in train_test_splits:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=split, random_state=42
)
# Preprocess the data
datagen = ImageDataGenerator(rescale=1.0 / 255)
train_gen = datagen.flow(X_train, y_train, batch_size=32)
test_gen = datagen.flow(X_test, y_test, batch_size=32)
# Train MLP Classifier
mlp = MLPClassifier(
hidden_layer_sizes=[num_neurons] * num_layers,
max_iter=1000,
random_state=42,
)
mlp.fit(train_gen.x.reshape(train_gen.x.shape[0], -1), train_gen.y)
mlp_train_acc = accuracy_score(
train_gen.y,
mlp.predict(train_gen.x.reshape(train_gen.x.shape[0], -1)),
)
mlp_test_acc = accuracy_score(
test_gen.y, mlp.predict(test_gen.x.reshape(test_gen.x.shape[0], -1))
)
# Train TensorFlow Neural Network
nn = build_nn_model(X_train.shape[1:], num_layers, num_neurons)
nn.fit(train_gen, epochs=10, verbose=0)
nn_train_acc = nn.evaluate(train_gen, verbose=0)[1]
nn_test_acc = nn.evaluate(test_gen, verbose=0)[1]
result = {
"num_layers": num_layers,
"num_neurons": num_neurons,
"train_test_split": split,
"mlp_train_acc": mlp_train_acc,
"mlp_test_acc": mlp_test_acc,
"nn_train_acc": nn_train_acc,
"nn_test_acc": nn_test_acc,
}
results.append(result)
print(f"Completed training for combination: {result}")
results_df = pd.DataFrame(results)
return results_df
# Load images and their labels
image_paths = {
"Arborio": list(data_dir.glob("Arborio/*"))[:600],
"Basmati": list(data_dir.glob("Basmati/*"))[:600],
"Ipsala": list(data_dir.glob("Ipsala/*"))[:600],
"Jasmine": list(data_dir.glob("Jasmine/*"))[:600],
"Karacadag": list(data_dir.glob("Karacadag/*"))[:600],
}
X, y = load_images_and_labels(image_paths, class_names)
# Set hyperparameters
num_layers_list = [1, 2, 3, 4, 5]
num_neurons_list = [32, 64, 128]
train_test_splits = [0.1, 0.2, 0.3, 0.4, 0.5]
# Train and evaluate the models
results_df = train_and_evaluate_models(
X, y, num_layers_list, num_neurons_list, train_test_splits
)
# Save the results to a CSV file
results_df.to_csv("model_results.csv", index=False)
# Display the results
print(results_df)
results_df
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# import os
# os.environ['PYTORCH_CUDA_ALLOC_CONF'] = '0:4096'
# ====================================================
# CFG
# ====================================================
class CFG:
wandb = True
competition = "lecr"
debug = False
apex = True
print_freq = 20
num_workers = 4
# model = "microsoft/deberta-v3-base"
# model = "microsoft/mdeberta-v3-base"
# model = "sentence-transformers/all-MiniLM-L6-v2"
model = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
# model = "mpnet_basev2_first_pretrain"
# model = "output_simcse_model_with_pretrain_sep_epo66_945"
# model = "bert-large-multilingual-cased"
gradient_checkpointing = True
scheduler = "cosine" # ['linear', 'cosine']
batch_scheduler = True
num_cycles = 0.5
num_warmup_steps = 0
epochs = 100
encoder_lr = 2e-5
decoder_lr = 2e-5
min_lr = 1e-6
eps = 1e-6
layerwise_learning_rate_decay = 0.9
adam_epsilon = 1e-6
betas = (0.9, 0.999)
batch_size = 32
max_len = 160
weight_decay = 0.01
gradient_accumulation_steps = 1
max_grad_norm = 1000
seed = 42
n_fold = 1
trn_fold = [0]
train = True
if CFG.debug:
CFG.epochs = 2
CFG.trn_fold = [0]
# ====================================================
# Library
# ====================================================
import os
import gc
import re
import ast
import sys
import copy
import json
import time
import math
import shutil
import string
import pickle
import random
import joblib
import itertools
from pathlib import Path
import warnings
warnings.filterwarnings("ignore")
import scipy as sp
import numpy as np
import pandas as pd
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
pd.set_option("display.width", 1000)
from tqdm.auto import tqdm
from sklearn.metrics import f1_score
from sklearn.model_selection import StratifiedKFold, GroupKFold, KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedGroupKFold
from sklearn.metrics import mean_squared_error
import torch
print(f"torch.__version__: {torch.__version__}")
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.functional as F
from torch.optim import Adam, SGD, AdamW
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader, Dataset
# os.system('pip uninstall -y transformers')
# os.system('pip uninstall -y tokenizers')
# os.system('python -m pip install --no-index --find-links=../input/pppm-pip-wheels transformers')
# os.system('python -m pip install --no-index --find-links=../input/pppm-pip-wheels tokenizers')
import tokenizers
import transformers
print(f"tokenizers.__version__: {tokenizers.__version__}")
print(f"transformers.__version__: {transformers.__version__}")
from transformers import AutoTokenizer, AutoModel, AutoConfig
from transformers import (
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from adv_utils import FGM, PGD, AWP, EMA
from adv_utils import *
device = (
torch.device("cuda:1") if torch.cuda.device_count() > 1 else torch.device("cuda:0")
)
# device = torch.device('cpu')
OUTPUT_DIR = "./output_simcse_model/"
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
# ====================================================
# Utils
# ====================================================
def get_score(y_trues, y_preds):
mcrmse_score, scores = MCRMSE(y_trues, y_preds)
return mcrmse_score, scores
def get_logger(filename=OUTPUT_DIR + "train"):
from logging import getLogger, INFO, StreamHandler, FileHandler, Formatter
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=f"{filename}.log")
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
LOGGER = get_logger()
def seed_everything(seed=42):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(seed=888)
def display(tmp):
print(tmp)
# ====================================================
# Data Loading
# ====================================================
train_df = pd.read_csv("/kaggle/input/lecr-train/train_df_sbert_first.csv")
dev_df = pd.read_csv("/kaggle/input/lecr-train/dev_df_sbert_first.csv")
train_df = train_df[train_df["label"] == 1].copy().reset_index(drop=True)
dev_df_neg = dev_df[dev_df["label"] == 0].copy().reset_index(drop=True)
all_ids_neg = dev_df_neg["topic_id"].values.tolist()
samples = random.choices(all_ids_neg, k=500)
dev_df_neg = dev_df_neg[dev_df_neg["topic_id"].isin(samples)].reset_index(drop=True)
dev_df = dev_df[dev_df["label"] == 1].copy().reset_index(drop=True)
dev_df = pd.concat([dev_df, dev_df_neg])
print(f"train.shape: {train_df.shape}")
display(train_df.head())
print(f"dev.shape: {dev_df.shape}")
display(dev_df.head())
# ====================================================
# tokenizer
# ====================================================
# tokenizer = AutoTokenizer.from_pretrained(CFG.model+'/tokenizer/')
tokenizer = AutoTokenizer.from_pretrained(CFG.model)
tokenizer.save_pretrained(OUTPUT_DIR + "tokenizer/")
CFG.tokenizer = tokenizer
# ====================================================
# Define max_len
# ====================================================
# lengths = []
# for _, row in tqdm(train_df.iterrows(), total=len(train_df)):
# length = len(tokenizer(row['topic_text'], add_special_tokens=False)['input_ids'])
# lengths.append(length)
# length = len(tokenizer(row['content_text'], add_special_tokens=False)['input_ids'])
# lengths.append(length)
#
# pd_tmp = pd.DataFrame()
# pd_tmp['Text_len'] = lengths
# print(pd_tmp['Text_len'].describe([.90, .95, .99, .995]))
# LOGGER.info(f"max_len: {CFG.max_len}")
# ====================================================
# Dataset
# ====================================================
def prepare_input(cfg, text):
# text = text.replace('[SEP]', '</s>')
inputs = cfg.tokenizer.encode_plus(
text,
return_tensors=None,
add_special_tokens=True,
max_length=CFG.max_len,
pad_to_max_length=True,
truncation=True,
)
for k, v in inputs.items():
inputs[k] = torch.tensor(v, dtype=torch.long)
return inputs
class TrainDataset(Dataset):
def __init__(self, cfg, df):
self.cfg = cfg
self.text_topic = df["topic_text"].values
self.text_content = df["content_text"].values
self.labels = df["label"].values
def __len__(self):
return len(self.labels)
def __getitem__(self, item):
inputs = prepare_input(
self.cfg, [self.text_topic[item], self.text_content[item]]
)
return inputs
class DevDataset(Dataset):
def __init__(self, cfg, df):
self.cfg = cfg
self.text_topic = df["topic_text"].values
self.text_content = df["content_text"].values
self.labels = df["label"].values
def __len__(self):
return len(self.labels)
def __getitem__(self, item):
inputs_topic = prepare_input(self.cfg, self.text_topic[item])
inputs_content = prepare_input(self.cfg, self.text_content[item])
label = torch.tensor(self.labels[item], dtype=torch.float)
return inputs_topic, inputs_content, label
def collate(inputs):
mask_len = int(inputs["attention_mask"].sum(axis=1).max())
for k, v in inputs.items():
inputs[k] = inputs[k][:, :mask_len]
return inputs
# ====================================================
# Model
# ====================================================
class MeanPooling(nn.Module):
def __init__(self):
super(MeanPooling, self).__init__()
def forward(self, last_hidden_state, attention_mask):
input_mask_expanded = (
attention_mask.unsqueeze(-1).expand(last_hidden_state.size()).float()
)
sum_embeddings = torch.sum(last_hidden_state * input_mask_expanded, 1)
sum_mask = input_mask_expanded.sum(1)
sum_mask = torch.clamp(sum_mask, min=1e-9)
mean_embeddings = sum_embeddings / sum_mask
return mean_embeddings
class WeightedLayerPooling(nn.Module):
def __init__(self, num_hidden_layers, layer_start: int = 4, layer_weights=None):
super(WeightedLayerPooling, self).__init__()
self.layer_start = layer_start
self.num_hidden_layers = num_hidden_layers
self.layer_weights = (
layer_weights
if layer_weights is not None
else nn.Parameter(
torch.tensor(
[1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float
)
)
)
def forward(self, all_hidden_states):
all_layer_embedding = all_hidden_states[self.layer_start :, :, :, :]
weight_factor = (
self.layer_weights.unsqueeze(-1)
.unsqueeze(-1)
.unsqueeze(-1)
.expand(all_layer_embedding.size())
)
weighted_average = (weight_factor * all_layer_embedding).sum(
dim=0
) / self.layer_weights.sum()
return weighted_average
class CustomModel(nn.Module):
def __init__(self, cfg, config_path=None, pretrained=False):
super().__init__()
self.cfg = cfg
if config_path is None:
self.config = AutoConfig.from_pretrained(
cfg.model, output_hidden_states=True
)
# self.config.hidden_dropout = 0.
# self.config.hidden_dropout_prob = 0.
# self.config.attention_dropout = 0.
# self.config.attention_probs_dropout_prob = 0.
LOGGER.info(self.config)
else:
self.config = torch.load(config_path)
if pretrained:
self.model = AutoModel.from_pretrained(cfg.model, config=self.config)
else:
self.model = AutoModel.from_config(self.config)
# if self.cfg.gradient_checkpointing:
# self.model.gradient_checkpointing_enable
self.pool = MeanPooling()
self.fc_dropout = nn.Dropout(0.1)
self.fc = nn.Linear(self.config.hidden_size, 1)
self._init_weights(self.fc)
def _init_weights(self, module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, inputs):
outputs = self.model(**inputs)
last_hidden_states = outputs[0]
feature = self.pool(last_hidden_states, inputs["attention_mask"])
return feature
# ====================================================
# Loss
# ====================================================
def simcse_sup_loss(feature_topic, feature_content) -> "tensor":
"""
Unsupervised loss function y_pred (tensor): BERT's output, [batch_size * 2, 768]
"""
y_true = torch.arange(0, feature_topic.size(0), device=device)
# Calculate the similarity in the batch to obtain the similarity matrix (diagonal matrix)
sim = F.cosine_similarity(
feature_topic.unsqueeze(1), feature_content.unsqueeze(0), dim=2
)
# # Set the diagonal of the similarity matrix to a very small value, eliminating its own influence
# sim = sim - torch.eye(y_pred.shape[0], device=device) * 1e12
# The similarity matrix is divided by the temperature coefficient
sim = sim / 0.05
# Calculate the cross-entropy loss of the similarity matrix with the y_true
loss = F.cross_entropy(sim, y_true)
loss = torch.mean(loss)
return loss
# ====================================================
# Helper functions
# ====================================================
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return "%dm %ds" % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return "%s (remain %s)" % (asMinutes(s), asMinutes(rs))
def train_fn(
fold,
train_loader,
model,
criterion,
optimizer,
epoch,
scheduler,
device,
valid_loader,
valid_labels,
best_score,
fgm,
awp,
ema_inst,
):
model.train()
scaler = torch.cuda.amp.GradScaler(enabled=CFG.apex)
losses = AverageMeter()
start = end = time.time()
global_step = 0
save_step = int(len(train_loader) / 1)
for step, (inputs_topic, inputs_content, labels) in enumerate(train_loader):
inputs_topic = collate(inputs_topic)
for k, v in inputs_topic.items():
inputs_topic[k] = v.to(device)
inputs_content = collate(inputs_content)
for k, v in inputs_content.items():
inputs_content[k] = v.to(device)
batch_size = labels.size(0)
with torch.cuda.amp.autocast(enabled=CFG.apex):
feature_topic = model(inputs_topic)
feature_content = model(inputs_content)
# print(feature.shape)
loss = simcse_sup_loss(feature_topic, feature_content)
# print(loss)
if CFG.gradient_accumulation_steps > 1:
loss = loss / CFG.gradient_accumulation_steps
losses.update(loss.item(), batch_size)
scaler.scale(loss).backward()
# ---------------------fgm-------------
# fgm.attack(epsilon=1.0) # Embedding was modified
# with torch.cuda.amp.autocast(enabled=CFG.apex):
# feature_topic = model(inputs_topic)
# feature_content = model(inputs_content)
# loss_avd = simcse_sup_loss(feature_topic, feature_content)
# if CFG.gradient_accumulation_steps > 1:
# loss_avd = loss_avd / CFG.gradient_accumulation_steps
# losses.update(loss_avd.item(), batch_size)
# scaler.scale(loss_avd).backward()
# fgm.restore() # # Restore the parameters of Embedding
# ---------------------fgm-------------
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(), CFG.max_grad_norm
)
if (step + 1) % CFG.gradient_accumulation_steps == 0:
scaler.step(optimizer)
scaler.update()
if ema_inst:
ema_inst.update()
optimizer.zero_grad()
global_step += 1
if CFG.batch_scheduler:
scheduler.step()
end = time.time()
if step % CFG.print_freq == 0 or step == (len(train_loader) - 1):
print(
"Epoch: [{0}][{1}/{2}] "
"Elapsed {remain:s} "
"Loss: {loss.val:.4f}({loss.avg:.4f}) "
"Grad: {grad_norm:.4f} "
"LR: {lr:.8f} ".format(
epoch + 1,
step,
len(train_loader),
remain=timeSince(start, float(step + 1) / len(train_loader)),
loss=losses,
grad_norm=grad_norm,
lr=scheduler.get_lr()[0],
)
)
if CFG.wandb and step % 40 == 0:
print(
{
f"[fold{fold}] loss": losses.val,
f"[fold{fold}] lr": scheduler.get_lr()[0],
}
)
if (step + 1) % save_step == 0 and epoch > -1:
if ema_inst:
ema_inst.apply_shadow()
# eval
score = valid_fn(valid_loader, model, criterion, device)
# # scoring
# score, scores = get_score(valid_labels, predictions)
LOGGER.info(f"Epoch {epoch + 1} - step: {step:.4f} score: {score:.4f}")
if CFG.wandb:
print(
{
f"[fold{fold}] epoch": epoch + 1,
f"[fold{fold}] score": score,
f"[fold{fold}] best_score": best_score,
}
)
if score >= best_score:
best_score = score
LOGGER.info(
f"Epoch {epoch + 1} - Save Best loss: {best_score:.4f} Model"
)
torch.save(
{"model": model.state_dict()},
#'predictions': predictions},
OUTPUT_DIR + f"{CFG.model.replace('/', '-')}_fold{fold}_best.pth",
)
if ema_inst:
ema_inst.restore()
return losses.avg, best_score
from scipy import stats
def valid_fn(valid_loader, model, criterion, device):
losses = AverageMeter()
model.eval()
sim_tensor = torch.tensor([], device=device)
label_array = np.array([])
start = time.time()
for step, (inputs_topic, inputs_content, labels) in enumerate(valid_loader):
inputs_topic = collate(inputs_topic)
for k, v in inputs_topic.items():
inputs_topic[k] = v.to(device)
inputs_content = collate(inputs_content)
for k, v in inputs_content.items():
inputs_content[k] = v.to(device)
labels = labels.to("cpu").numpy()
with torch.no_grad():
feature_topic = model(inputs_topic)
feature_content = model(inputs_content)
sim = F.cosine_similarity(feature_topic, feature_content, dim=-1)
sim_tensor = torch.cat((sim_tensor, sim), dim=0)
label_array = np.append(label_array, np.array(labels))
# sim_tmp = sim.cpu().numpy()
# print(labels)
# print(sim_tmp)
# score_tmp = stats.spearmanr(labels, sim_tmp)
end = time.time()
print("Eval cost time : ", end - start)
score = stats.spearmanr(label_array, sim_tensor.cpu().numpy()).correlation
return score
def get_optimizer_grouped_parameters(
model, model_type, learning_rate, weight_decay, layerwise_learning_rate_decay
):
no_decay = ["bias", "LayerNorm.weight"]
# initialize lr for task specific layer
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if "classifier" in n or "pooler" in n
],
"weight_decay": 0.0,
"lr": learning_rate,
},
]
# initialize lrs for every layer
num_layers = model.config.num_hidden_layers
layers = [getattr(model, model_type).embeddings] + list(
getattr(model, model_type).encoder.layer
)
layers.reverse()
lr = learning_rate
for layer in layers:
lr *= layerwise_learning_rate_decay
optimizer_grouped_parameters += [
{
"params": [
p
for n, p in layer.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": weight_decay,
"lr": lr,
},
{
"params": [
p
for n, p in layer.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
"lr": lr,
},
]
return optimizer_grouped_parameters
# ====================================================
# train loop
# ====================================================
def train_loop(folds, fold):
LOGGER.info(f"========== fold: {fold} training ==========")
# ====================================================
# loader
# ====================================================
# train_folds = folds[folds['fold'] != fold].reset_index(drop=True)
# valid_folds = folds[folds['fold'] == fold].reset_index(drop=True)
# valid_labels = valid_folds[CFG.target_cols].values
train_folds = train_df
valid_folds = dev_df
train_dataset = DevDataset(CFG, train_folds)
valid_dataset = DevDataset(CFG, valid_folds)
train_loader = DataLoader(
train_dataset,
batch_size=CFG.batch_size,
shuffle=True,
num_workers=CFG.num_workers,
pin_memory=True,
drop_last=True,
)
valid_loader = DataLoader(
valid_dataset,
batch_size=CFG.batch_size * 2,
shuffle=False,
num_workers=CFG.num_workers,
pin_memory=True,
drop_last=False,
)
# ====================================================
# model & optimizer
# ====================================================
model = CustomModel(CFG, config_path=None, pretrained=True)
# model = CustomModel(cfg=None, config_path=CFG.model + '/config.pth', pretrained=False)
# state = torch.load(CFG.model + '/mpnet_basev2_first_pretrain_fold0_best.pth',
# map_location=torch.device('cpu'))
# model.load_state_dict(state['model'])
torch.save(model.config, OUTPUT_DIR + "config.pth")
model.to(device)
def get_optimizer_params(model, encoder_lr, decoder_lr, weight_decay=0.0):
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p
for n, p in model.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"lr": encoder_lr,
"weight_decay": weight_decay,
},
{
"params": [
p
for n, p in model.model.named_parameters()
if any(nd in n for nd in no_decay)
],
"lr": encoder_lr,
"weight_decay": 0.0,
},
{
"params": [p for n, p in model.named_parameters() if "model" not in n],
"lr": decoder_lr,
"weight_decay": 0.0,
},
]
return optimizer_parameters
optimizer_parameters = get_optimizer_params(
model,
encoder_lr=CFG.encoder_lr,
decoder_lr=CFG.decoder_lr,
weight_decay=CFG.weight_decay,
)
optimizer = AdamW(
optimizer_parameters, lr=CFG.encoder_lr, eps=CFG.eps, betas=CFG.betas
)
# ====================================================
# scheduler
# ====================================================
def get_scheduler(cfg, optimizer, num_train_steps):
if cfg.scheduler == "linear":
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=cfg.num_warmup_steps,
num_training_steps=num_train_steps,
)
elif cfg.scheduler == "cosine":
cfg.num_warmup_steps = num_train_steps * 0.05
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=cfg.num_warmup_steps,
num_training_steps=num_train_steps,
num_cycles=cfg.num_cycles,
)
return scheduler
num_train_steps = int(len(train_folds) / CFG.batch_size * CFG.epochs)
scheduler = get_scheduler(CFG, optimizer, num_train_steps)
# ====================================================
# loop
# ====================================================
# criterion = nn.SmoothL1Loss(reduction='mean')
# #criterion = RMSELoss(reduction="mean")
criterion = None
# loss = loss_fn(rep_a=rep_a, rep_b=rep_b, label=label)
best_score = 0
fgm = FGM(model)
awp = None
ema_inst = EMA(model, 0.999)
ema_inst.register()
for epoch in range(CFG.epochs):
start_time = time.time()
# train
valid_labels = None
avg_loss, best_score = train_fn(
fold,
train_loader,
model,
criterion,
optimizer,
epoch,
scheduler,
device,
valid_loader,
valid_labels,
best_score,
fgm,
awp,
ema_inst,
)
# predictions = torch.load(OUTPUT_DIR + f"{CFG.model.replace('/', '-')}_fold{fold}_best.pth",
# map_location=torch.device('cpu'))['predictions']
# valid_folds[[f"pred_{c}" for c in CFG.target_cols]] = predictions
torch.cuda.empty_cache()
gc.collect()
return valid_folds
import torch
from GPUtil import showUtilization as gpu_usage
from numba import cuda
def free_gpu_cache():
print("Initial GPU Usage")
gpu_usage()
torch.cuda.empty_cache()
cuda.select_device(0)
cuda.close()
cuda.select_device(0)
print("GPU Usage after emptying the cache")
gpu_usage()
free_gpu_cache()
if __name__ == "__main__":
def get_result(oof_df):
labels = oof_df[CFG.target_cols].values
preds = oof_df[[f"pred_{c}" for c in CFG.target_cols]].values
score, scores = get_score(labels, preds)
LOGGER.info(f"Score: {score:<.4f} Scores: {scores}")
torch.cuda.empty_cache()
if CFG.train:
oof_df = pd.DataFrame()
for fold in range(CFG.n_fold):
if fold in CFG.trn_fold:
_oof_df = train_loop(train_df, fold)
gpu_usage()
torch.cuda.memory_summary(device=None, abbreviated=False)
|
# # Heart Disease Prediction Using Logistic Regression
# ## Table of Content
# ### 1. What is Logistic Regression?
# ### 2. Importing Libraries
# ### 3. Uploading Dataset
# ### 4. Data PreProcessing
# ### 5. EDA
# ### 6. Data Splitting
# ### 7. Model Selection and Training
# ### 8. Model Evaluation
# ### 9. AUC - ROC Curve
# ### 10. Conclusion
# ## 1. What is Logistic Regression?
# ### Logistic regression is one of the most popular Machine Learning algorithms, which comes under the Supervised Learning technique. It is used for predicting the categorical dependent variable using a given set of independent variables.
# ### Logistic regression predicts the output of a categorical dependent variable. Therefore the outcome must be a categorical or discrete value. It can be either Yes or No, 0 or 1, true or False, etc. but instead of giving the exact value as 0 and 1, it gives the probabilistic values which lie between 0 and 1.
# ### Logistic Regression is much similar to the Linear Regression except that how they are used. Linear Regression is used for solving Regression problems, whereas Logistic regression is used for solving the classification problems.
# ### The goal of logistic regression is to estimate the probability of a binary outcome based on one or more predictor variables. Unlike linear regression, which models the relationship between a continuous dependent variable and one or more independent variables, logistic regression is used when the dependent variable is categorical and takes on only two values, such as yes/no, 0/1, true/false, etc.
# ## Logistic Function (Sigmoid Function):
# ### The sigmoid function is a mathematical function used to map the predicted values to probabilities.
# ### It maps any real value into another value within a range of 0 and 1.
# ### The value of the logistic regression must be between 0 and 1, which cannot go beyond this limit, so it forms a curve like the "S" form. The S-form curve is called the Sigmoid function or the logistic function.
# ### In logistic regression, we use the concept of the threshold value, which defines the probability of either 0 or 1. Such as values above the threshold value tends to 1, and a value below the threshold values tends to 0.
# ## 2. Importing Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
# ## 3. Uploading dataset
heart_disease_data = pd.read_csv("heart_disease.csv")
heart_disease_data.head()
heart_disease_data.info()
# ## 4. Data Preprocessing
heart_disease_data.isnull().sum()
# remove the null or missing value in Ca feature using mean
heart_disease_data["Ca"] = heart_disease_data["Ca"].fillna(
heart_disease_data["Ca"].mean()
)
# remove the null or missing value in Thal feature using fillna (ffill)
heart_disease_data["Thal"] = heart_disease_data["Thal"].fillna("ffill")
heart_disease_data.isnull().sum()
heart_disease_data.duplicated().sum()
heart_disease_data.describe()
# rename feature (column) name
heart_disease_data.rename(columns={"AHD": "Target"}, inplace=True)
heart_disease_data.columns
# preprocessing on Gender and Thal features
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
heart_disease_data["Gender"] = label_encoder.fit_transform(heart_disease_data["Sex"])
heart_disease_data["ChestPain"] = label_encoder.fit_transform(
heart_disease_data["ChestPain"]
)
heart_disease_data["Thal"] = label_encoder.fit_transform(heart_disease_data["Thal"])
heart_disease_data["Target"] = label_encoder.fit_transform(heart_disease_data["Target"])
# heart_disease_data = heart_disease_data.drop(['Taget'], axis=1)
heart_disease_data.head()
def correlation(dataset, threshold):
col_corr = set()
corr_matrix = dataset.corr()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if abs(corr_matrix.iloc[i, j]) > threshold:
colname = corr_matrix.columns[i]
col_corr.add(colname)
return col_corr
corr_features = correlation(heart_disease_data, 0.5)
corr_features
X = heart_disease_data.drop(
["Age", "ChestPain", "RestECG", "Gender", "Chol", "RestBP", "Target"], axis=1
)
Y = heart_disease_data["Target"]
heart_disease_data.corr()
data_female = [rows for _, rows in heart_disease_data.groupby("Gender")][0]
data_male = [rows for _, rows in heart_disease_data.groupby("Gender")][1]
data_male.head()
data_female = data_female.drop(["Gender"], axis=1)
data_male = data_male.drop(["Gender"], axis=1)
data_male.head()
# ## 5. EDA (Exploratory data analysis)
a = heart_disease_data["Gender"].value_counts()
labels = ["Male", "Female"]
explode = [0.1, 0]
colors = ["#ADD8E6", "g"]
plt.pie(
a,
labels=labels,
autopct="%1.0f%%",
pctdistance=0.4,
labeldistance=1.3,
explode=explode,
colors=colors,
)
plt.legend(title="Gender")
plt.show()
a
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib as mpl
from matplotlib.gridspec import GridSpec
plt.figure(1, figsize=(12, 10))
the_grid = GridSpec(2, 3)
plt.subplot(the_grid[0, 0], aspect=1, title="Female v/s Chest Pain Type")
data_female.ChestPain.groupby(data_female.ChestPain).sum().plot(
kind="pie", autopct="%.2f"
)
plt.subplot(the_grid[0, 1], aspect=1, title="Male v/s Chest Pain Type")
data_male.ChestPain.groupby(data_male.ChestPain).sum().plot(kind="pie", autopct="%.2f")
plt.subplot(the_grid[0, 2], aspect=1, title="Overall details Chest Pain Type")
heart_disease_data.ChestPain.groupby(heart_disease_data.ChestPain).sum().plot(
kind="pie", autopct="%.2f"
)
plt.suptitle("Pie Chart for Chest Pain Type", fontsize=14)
# ### There are three pie chart for chest pain type, first for female v/s chest pain, second for male v/s chest pain And third for overall analysis of chest pain for male and female.
plt.figure(1, figsize=(12, 10))
plt.subplot(the_grid[0, 0], aspect=1, title="Female v/s Target")
data_female.Age.groupby(data_female.Target).sum().plot(
kind="pie", autopct="%.2f", textprops={"fontsize": 12}
)
plt.subplot(the_grid[0, 1], aspect=1, title="Male v/s Target")
data_male.Age.groupby(data_male.Target).sum().plot(
kind="pie", autopct="%.2f", textprops={"fontsize": 12}
)
plt.subplot(the_grid[0, 2], aspect=1, title="Overall details (Target)")
heart_disease_data.Age.groupby(heart_disease_data.Target).sum().plot(
kind="pie", autopct="%.2f", textprops={"fontsize": 12}
)
plt.suptitle("Pie Chart for Target", fontsize=14)
# ### In this Pie chart shows the Target of heart disease for male, female and Overall Analysis.
sns.heatmap(heart_disease_data.corr(), annot=True)
x = heart_disease_data["Age"][0:30]
y = heart_disease_data["Chol"][0:30]
Target = heart_disease_data["Target"][0:30]
fig = px.bar(x, y, color=Target)
fig.show()
# ## 6. Data Spliting
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3)
print("x_train:", x_train.shape)
print("x_test: ", x_test.shape)
print("y_train:", y_train.shape)
print("y_test: ", y_test.shape)
# ## 7. Model Selection and Training
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(x_train, y_train)
# ## 8. Model Evaluation
log_reg.score(x_test, y_test)
predict = log_reg.predict(x_test)
predict
# ## Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, predict)
cm
ax = sns.heatmap(cm, annot=True, cmap="Blues")
ax.set_title("Confusion Matrix with labels\n\n")
ax.set_xlabel("\nPredicted Values")
ax.set_ylabel("Actual Values ")
plt.show()
import numpy as np
ax = sns.heatmap(cm / np.sum(cm), annot=True, fmt=".2%", cmap="Blues")
ax.set_title("Confusion Matrix with labels\n\n")
ax.set_xlabel("\nPredicted Values")
ax.set_ylabel("Actual Values ")
plt.show()
# find accuracy score
from sklearn.metrics import accuracy_score
accuracy_score(y_test, predict)
# find precision score
from sklearn.metrics import precision_score
precision_score(y_test, predict)
# find recall score
from sklearn.metrics import recall_score
recall_score(y_test, predict)
# find f1 score
from sklearn.metrics import f1_score
f1_score(y_test, predict)
from sklearn.metrics import classification_report
print(classification_report(y_test, predict))
# ## 9. AUC - ROC Curve
# ### ROC curve (receiver operating characteristic curve) is a graph showing the performance of a classification model at all classification thresholds. This curve plots two parameters:
# ### True Positive Rate
# ### False Positive Rate
# ### True Positive Rate (TPR) is a synonym for recall and is therefore defined as follows:
# ## TPR = TP / TP + FN
# ### False Positive Rate (FPR) is defined as follows:
# ## FPR = FP / FP +TN
# ## AUC: Area Under the ROC Curve
# ### AUC stands for "Area under the ROC Curve." That is, AUC measures the entire two-dimensional area underneath the entire ROC curve (think integral calculus) from (0,0) to (1,1).
# ### AUC provides an aggregate measure of performance across all possible classification thresholds. One way of interpreting AUC is as the probability that the model ranks a random positive example more highly than a random negative example. For example, given the following examples, which are arranged from left to right in ascending order of logistic regression predictions:
# import metrics library from sklearn
from sklearn import metrics
y_pred_proba = log_reg.predict_proba(x_test)[::, 1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
# plot the ROC curve
plt.plot(fpr, tpr)
plt.ylabel("True Positive Rate")
plt.xlabel("False Positive Rate")
plt.show()
y_pred_proba = log_reg.predict_proba(x_test)[::, 1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
# plot the ROC curve
plt.plot(fpr, tpr, label="AUC = " + str(auc))
plt.ylabel("True Positive Rate")
plt.xlabel("False Positive Rate")
plt.legend(loc=4)
plt.show()
|
# https://www.youtube.com/watch?v=VC8Jc9_lNoY
SEED = 1984
N_SPLITS = 10
target = "target"
import numpy as np
import pandas as pd
pd.set_option("max_columns", 100)
pd.set_option("max_rows", 200)
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import spearmanr
from scipy.cluster import hierarchy
from scipy.spatial.distance import squareform
from sklearn.model_selection import (
StratifiedKFold,
RepeatedStratifiedKFold,
cross_val_score,
)
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import (
StandardScaler,
PolynomialFeatures,
MinMaxScaler,
RobustScaler,
FunctionTransformer,
)
from sklearn.kernel_approximation import Nystroem
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.ensemble import (
GradientBoostingClassifier,
HistGradientBoostingClassifier,
RandomForestClassifier,
ExtraTreesClassifier,
)
from sklearn.neighbors import KNeighborsClassifier
from pygam import GAM, LogisticGAM, s, f, te, l
from xgboost import XGBClassifier
from sklearn.calibration import CalibrationDisplay, CalibratedClassifierCV
# # Data & EDA
train = pd.read_csv("../input/playground-series-s3e12/train.csv", index_col="id")
test = pd.read_csv("../input/playground-series-s3e12/test.csv", index_col="id")
print(f"Shape for Train {train.shape} and Test {test.shape}")
print(
f"Nan values in Train : {train[test.columns].isna().sum().sum()} | in Test : {train.isna().sum().sum()}"
)
print(f"Available columns for training : \n {test.columns}")
train.head()
origin = pd.read_csv(
"/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
)
print(f"Shape for origin {origin.shape}")
print(f"Nan values in origin : {origin[test.columns].isna().sum().sum()}")
# ## Target
fig, ax = plt.subplots(1, 2, figsize=(15, 2))
ax = ax.flatten()
train[target].value_counts().sort_index().plot.barh(ax=ax[0], color="skyblue").set(
title="Target in Train"
)
ax[0].bar_label(ax[0].containers[0], fmt="%.2d", padding=2)
(train[target].value_counts(normalize=True) * 100).sort_index().plot.barh(
ax=ax[1], color="skyblue"
).set(title="% Class in Train")
ax[1].bar_label(ax[1].containers[0], fmt="%.1f%%", padding=2)
for i in range(2):
ax[i].spines[["right", "bottom"]].set_visible(False)
ax[i].xaxis.set_ticks_position("top")
fig, ax = plt.subplots(1, 2, figsize=(15, 2))
ax = ax.flatten()
origin[target].value_counts().sort_index().plot.barh(ax=ax[0], color="gold").set(
title="Target in Origin"
)
ax[0].bar_label(ax[0].containers[0], fmt="%.2d", padding=2)
(origin[target].value_counts(normalize=True) * 100).sort_index().plot.barh(
ax=ax[1], color="gold"
).set(title="% Class in Origin")
ax[1].bar_label(ax[1].containers[0], fmt="%.1f%%", padding=2)
for i in range(2):
ax[i].spines[["right", "bottom"]].set_visible(False)
ax[i].xaxis.set_ticks_position("top")
# ## Other columns
fig, ax = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
train[[f for f in test.columns]].nunique().plot.barh(ax=ax[0], color="skyblue").set(
title="Unique values per column in Train"
)
ax[0].bar_label(ax[0].containers[0], fmt="%.1d", padding=2)
test.nunique().plot.barh(ax=ax[1], color="g").set(
title="Unique values per column in Test"
)
ax[1].bar_label(ax[1].containers[0], fmt="%.1d", padding=2)
origin[[f for f in test.columns]].nunique().plot.barh(ax=ax[2], color="gold").set(
title="Unique values per column in Origin"
)
ax[2].bar_label(ax[2].containers[0], fmt="%.1d", padding=2)
for i in range(3):
ax[i].spines[["right", "bottom"]].set_visible(False)
ax[i].xaxis.set_ticks_position("top")
for df, name, color in zip(
[train, test, origin], ["Train", "Test", "Origin"], ["skyblue", "green", "gold"]
):
fig, ax = plt.subplots(1, 3, figsize=(15, 3), sharey=True)
plt.suptitle(f"Mix/Max in {name}", y=1.2, fontsize=20)
df[[f for f in test.columns]].min().plot.barh(ax=ax[0], color=color).set(
title=f"Min in {name}"
)
ax[0].bar_label(ax[0].containers[0], fmt="%.2f", padding=2)
df[[f for f in test.columns]].median().plot.barh(ax=ax[1], color=color).set(
title=f"Median in {name}"
)
ax[1].bar_label(ax[1].containers[0], fmt="%.2f", padding=2)
df[[f for f in test.columns]].max().plot.barh(ax=ax[2], color=color).set(
title=f"Max in {name}"
)
ax[2].bar_label(ax[2].containers[0], fmt="%.2f", padding=2)
for i in range(3):
ax[i].spines[["right", "bottom"]].set_visible(False)
ax[i].xaxis.set_ticks_position("top")
df_temp1 = pd.concat(
[train.loc[train[target] == 1], train.loc[train[target] == 0]], axis=0
)
df_temp2 = pd.concat(
[test, train[test.columns].sample(frac=test.shape[0] / train.shape[0])], axis=0
)
df_temp2["is_test"] = 0
df_temp2.loc[test.index, "is_test"] = 1
fig, ax = plt.subplots(len(test.columns), 4, figsize=(16, len(test.columns) * 3))
for i, f in enumerate(test.columns):
if i == 0:
legend = True
else:
legend = False
sns.kdeplot(data=df_temp1, hue="target", x=f, legend=legend, ax=ax[i, 0])
sns.boxplot(
data=train, x="target", y=f, ax=ax[i, 1], palette=["skyblue", "lightsalmon"]
)
sns.boxplot(
data=origin, x="target", y=f, ax=ax[i, 2], palette=["skyblue", "lightsalmon"]
)
sns.kdeplot(data=df_temp2, hue="is_test", x=f, legend=legend, ax=ax[i, 3])
ax[i, 1].set_title(f"{f}", loc="right", weight="bold", fontsize=20)
ax[i, 1].set_xlabel("Target in Train", fontsize=10)
ax[i, 2].set_xlabel("Target in Origin", fontsize=10)
for g in range(4):
ax[i, g].spines[["top", "right"]].set_visible(False)
# fig.legend([1, 0], loc='upper left', fontsize = 10, ncol=3, bbox_to_anchor=(0.12, 1))
# fig.legend(["train", "test"], loc='upper right', fontsize = 10, ncol=3, bbox_to_anchor=(0.9, 1))
plt.tight_layout()
plt.show()
# ## Prediction with calc
# Due to https://www.kaggle.com/code/seascape/target-calc
# AUC on public with **calc** only is 0.8573
print(
"AUC with train['calc'] : {:.5f}".format(
roc_auc_score(train[target], train["calc"])
)
)
# ## Correlation
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
for i, (df, t) in enumerate(zip([train, origin, test], ["Train", "Origin", "Test"])):
matrix = df[test.columns].corr()
sns.heatmap(
matrix, annot=True, fmt=".1f", cmap="coolwarm", mask=np.triu(matrix), ax=ax[i]
)
ax[i].set_title(f"Correlations in {t}", fontsize=15)
fig, ax = plt.subplots(
len(test.columns), len(test.columns) - 1, figsize=(16, (len(test.columns) - 1) * 2)
)
plt.subplots_adjust(hspace=0.4, wspace=0.3)
for i, c1 in enumerate(test.columns):
for j, c2 in enumerate(test.columns[:-1]):
if j < i:
sns.scatterplot(
data=train, x=c1, y=c2, hue=target, legend=False, ax=ax[i - 1, j]
)
ax[i - 1, j].spines[["top", "right"]].set_visible(False)
ax[i - 1, j].set(xticklabels=[], yticklabels=[])
ax[i - 1, j].set_xlabel(c1, fontsize=9)
ax[i - 1, j].set_ylabel(c2, fontsize=9)
else:
fig.delaxes(ax[i - 1, j])
fig.legend([0, 1], loc="upper center", fontsize=10, ncol=3, bbox_to_anchor=(0.8, 1))
plt.tight_layout()
plt.show()
# ## PCA
from sklearn.decomposition import PCA
scal = StandardScaler()
X = scal.fit_transform(train[test.columns])
pca = PCA()
pca_samples = pca.fit_transform(X)
cum_sum = pca.explained_variance_ratio_.cumsum() * 100
plt.plot(range(1, 1 + len(cum_sum)), cum_sum)
plt.title(
f"{100*pca.explained_variance_[:1].sum()/pca.explained_variance_.sum():.0f}% explained variance with 1 features (/{len(test.columns)})"
)
# # Future engineering
df = pd.concat([train[test.columns], origin[test.columns], test], axis=0)
stdscal = StandardScaler()
stdscal.fit(df)
df[[f"{f}_stdscal" for f in test.columns]] = stdscal.transform(df)
train[[f"{f}_stdscal" for f in test.columns]] = df[
[f"{f}_stdscal" for f in test.columns]
].iloc[0 : train.shape[0]]
origin[[f"{f}_stdscal" for f in test.columns]] = df[
[f"{f}_stdscal" for f in test.columns]
].iloc[train.shape[0] : train.shape[0] + origin.shape[0]]
test[[f"{f}_stdscal" for f in test.columns]] = df[
[f"{f}_stdscal" for f in test.columns]
].iloc[train.shape[0] + origin.shape[0] :]
print(train.shape, origin.shape, test.shape)
new_features = [f"calc/{c}" for c in ["gravity", "ph", "osmo", "cond", "urea"]] + [
f"cond/{c}" for c in ["gravity", "ph", "osmo", "urea"]
]
for df in [train, origin, test]:
for c in ["calc", "gravity", "ph", "osmo", "cond", "urea"]:
df[f"log_{c}"] = np.log(df[c])
df[f"{c}_2"] = df[c] ** 2
for c in ["gravity", "ph", "osmo", "cond", "urea"]:
df[f"calc/{c}"] = df["calc"] / (1 + df["calc"] + df[c])
df[f"bcalc/{c}"] = df["calc"] / (1 + df[c])
df[f"calc/{c}_stdscal"] = df["calc_stdscal"] / (1 + df[f"{c}_stdscal"])
for c in ["gravity", "ph", "osmo", "urea"]:
df[f"cond/{c}"] = df["cond"] / (1 + df["cond"] + df[c])
df[f"bcond/{c}"] = df["cond"] / (1 + df[c])
df[f"cond/{c}_stdscal"] = df["cond_stdscal"] / (1 + df[f"{c}_stdscal"])
feats = [
"log_calc",
"calc",
"cond",
"cond/gravity",
"calc/cond",
"calc/gravity",
"gravity",
"gravity_2",
]
df_temp1 = pd.concat(
[train.loc[train[target] == 1], train.loc[train[target] == 0]], axis=0
)
df_temp2 = pd.concat(
[test, train[test.columns].sample(frac=test.shape[0] / train.shape[0])], axis=0
)
df_temp2["is_test"] = 0
df_temp2.loc[test.index, "is_test"] = 1
fig, ax = plt.subplots(len(feats), 4, figsize=(16, len(feats) * 3))
for i, f in enumerate(feats):
if i == 0:
legend = True
else:
legend = False
sns.kdeplot(data=df_temp1, hue="target", x=f, legend=legend, ax=ax[i, 0])
sns.boxplot(
data=train, x="target", y=f, ax=ax[i, 1], palette=["skyblue", "lightsalmon"]
)
sns.boxplot(
data=origin, x="target", y=f, ax=ax[i, 2], palette=["skyblue", "lightsalmon"]
)
sns.kdeplot(data=df_temp2, hue="is_test", x=f, legend=legend, ax=ax[i, 3])
ax[i, 1].set_title(f"{f}", loc="right", weight="bold", fontsize=20)
ax[i, 1].set_xlabel("Target in Train", fontsize=10)
ax[i, 2].set_xlabel("Target in Origin", fontsize=10)
for g in range(4):
ax[i, g].spines[["top", "right"]].set_visible(False)
# fig.legend([1, 0], loc='upper left', fontsize = 10, ncol=3, bbox_to_anchor=(0.12, 1))
# fig.legend(["train", "test"], loc='upper right', fontsize = 10, ncol=3, bbox_to_anchor=(0.9, 1))
plt.tight_layout()
plt.show()
def cv_score(model, features, cv, verbose=False, add_origin=False):
trn_scores, val_scores = [], []
for fold, (trn_idx, val_idx) in enumerate(cv.split(train, train[target])):
X_trn, X_val = train.iloc[trn_idx][features], train.iloc[val_idx][features]
y_trn, y_val = train.iloc[trn_idx][target], train.iloc[val_idx][target]
if add_origin:
model.fit(
pd.concat([X_trn, origin[features]], axis=0),
pd.concat([y_trn, origin[target]], axis=0),
)
else:
model.fit(X_trn, y_trn)
use_predict, use_predict_proba1 = False, False
m = model
if type(m) == Pipeline:
m = m.steps[-1][1]
if type(m) == CalibratedClassifierCV:
m = m.calibrated_classifiers_[0].base_estimator
if type(m) == LogisticGAM:
use_predict_proba1 = True
y_trn_pred = (
model.predict(X_trn)
if use_predict
else model.predict_proba(X_trn)
if use_predict_proba1
else model.predict_proba(X_trn)[:, 1]
)
y_val_pred = (
model.predict(X_val)
if use_predict
else model.predict_proba(X_val)
if use_predict_proba1
else model.predict_proba(X_val)[:, 1]
)
trn_scores.append(roc_auc_score(y_trn, y_trn_pred))
val_scores.append(roc_auc_score(y_val, y_val_pred))
if verbose:
print(
f"Fold {fold+1}: AUC = {val_scores[fold]:.5f} Overfitting : {trn_scores[fold] - val_scores[fold]:.5f}"
)
return (
np.mean(val_scores),
np.mean(trn_scores),
np.mean(np.array(trn_scores) - np.array(val_scores)),
)
results = []
def score_model(model, features, label=None, use_original=True, n_splits=N_SPLITS):
"""Cross-validate a model with feature selection"""
trn_scores, val_scores = [], []
oof = np.zeros_like(train[target], dtype=float)
kf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=SEED)
for fold, (trn_idx, val_idx) in enumerate(kf.split(train, train[target])):
X_trn, X_val = train.iloc[trn_idx][features], train.iloc[val_idx][features]
y_trn, y_val = train.iloc[trn_idx][target], train.iloc[val_idx][target]
if use_original:
X_trn = pd.concat([X_trn, origin[features]], axis=0)
y_trn = pd.concat([y_trn, origin[target]], axis=0)
model.fit(X_trn, y_trn)
use_predict, use_predict_proba1 = False, False
m = model
if type(m) == Pipeline:
m = m.steps[-1][1]
if type(m) == CalibratedClassifierCV:
m = m.calibrated_classifiers_[0].base_estimator
if type(m) == LogisticGAM:
use_predict_proba1 = True
y_trn_pred = (
model.predict(X_trn)
if use_predict
else model.predict_proba(X_trn)
if use_predict_proba1
else model.predict_proba(X_trn)[:, 1]
)
y_val_pred = (
model.predict(X_val)
if use_predict
else model.predict_proba(X_val)
if use_predict_proba1
else model.predict_proba(X_val)[:, 1]
)
trn_scores.append(roc_auc_score(y_trn, y_trn_pred))
val_scores.append(roc_auc_score(y_val, y_val_pred))
print(
f"Fold {fold+1}: AUC = {val_scores[fold]:.5f} Overfitting : {trn_scores[fold] - val_scores[fold]:.5f}"
)
oof[val_idx] = y_val_pred
_mean_overfit = np.mean(np.array(trn_scores) - np.array(val_scores))
print(
f"Average AUC : {np.mean(val_scores):.5f} Overfitting : {_mean_overfit:.5f} Std : {np.std(val_scores):.5f}"
)
if label is not None:
if label in [f[0] for f in results]:
del results[[f[0] for f in results].index(label)]
results.append(
(
label,
model,
np.mean(val_scores),
_mean_overfit,
np.std(val_scores),
oof,
use_original,
features,
)
)
display_model(label, oof)
def display_model(label, oof):
"""Plot two diagrams with the oof values (calibration and histogram)"""
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
plt.suptitle(label, y=1.0, fontsize=20)
ax[0].set_title("Calibration")
CalibrationDisplay.from_predictions(
train[target], oof, n_bins=50, strategy="quantile", ax=ax[0]
)
ax[1].set_title("Histogram")
ax[1].hist(oof, bins=100)
for i in range(2):
ax[i].spines[["top", "right"]].set_visible(False)
def check_new_features(model, base_features, new_features=new_features, n_repeats=10):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=n_repeats, random_state=SEED)
cvs, cts, cofs = cv_score(model, base_features, cv)
print(
f"With {base_features} : Valid {cvs:.4f} Training {cts:.4f} Overfitting {cofs:.4f}"
)
for f in new_features:
if f not in base_features:
cvs, cts, cofs = cv_score(model, base_features + [f], cv)
print(
f"With {f} : Valid {cvs:.4f} Training {cts:.4f} Overfitting {cofs:.4f}"
)
# # XGBoost
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=20, random_state=10)
cvs, cts, cofs = cv_score(
SVC(C=0.01, gamma="scale", probability=True, random_state=10, kernel="rbf"),
features=["log_calc"],
cv=cv,
add_origin=False,
)
print(f"Valid {cvs:.4f} Training {cts:.4f} Overfitting {cofs:.4f}")
cvs, cts, cofs = cv_score(
make_pipeline(
RobustScaler(),
SVC(C=0.4, gamma=0.5, probability=True, random_state=SEED, kernel="rbf"),
),
features=["cond/gravity", "calc", "calc/gravity"],
cv=cv,
add_origin=True,
)
print(f"Valid {cvs:.4f} Training {cts:.4f} Overfitting {cofs:.4f}")
check_new_features(
make_pipeline(
RobustScaler(),
SVC(C=0.4, gamma=0.5, probability=True, random_state=SEED, kernel="rbf"),
),
["cond", "calc"],
new_features=["calc/gravity", "calc/ph", "calc/cond"],
n_repeats=10,
)
check_new_features(
make_pipeline(
RobustScaler(),
SVC(C=0.5, gamma=0.5, probability=True, random_state=SEED, kernel="rbf"),
),
["cond", "calc/gravity"],
new_features=[
"calc/gravity",
"calc/ph",
"calc/cond",
"cond/gravity",
"cond/ph",
"calc/osmo",
"calc/urea",
],
n_repeats=10,
)
check_new_features(
make_pipeline(
RobustScaler(),
SVC(C=0.5, gamma=0.5, probability=True, random_state=SEED, kernel="rbf"),
),
["calc", "cond/gravity"],
new_features=["calc/gravity", "calc/ph", "calc/cond"],
n_repeats=10,
)
check_new_features(
make_pipeline(
RobustScaler(),
SVC(C=0.5, gamma=0.5, probability=True, random_state=SEED, kernel="rbf"),
),
["calc", "bcond/gravity"],
new_features=["bcalc/gravity", "bcalc/ph", "bcalc/cond"],
n_repeats=10,
)
# # KNeighborsClassifier
for n in [1]:
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=20, random_state=10)
cvs, cts, cofs = cv_score(
make_pipeline(
FunctionTransformer(lambda X: X * np.array([[0.01, 1]])),
KNeighborsClassifier(64),
),
features=["cond", "calc"],
cv=cv,
add_origin=False,
)
print(f"Valid {cvs:.4f} Training {cts:.4f} Overfitting {cofs:.4f}")
for n in [75]:
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=20, random_state=10)
cvs, cts, cofs = cv_score(
make_pipeline(
FunctionTransformer(lambda X: X * np.array([[0.01, 1]])),
KNeighborsClassifier(n),
),
features=["cond", "calc"],
cv=cv,
add_origin=True,
)
print(f"Valid {cvs:.4f} Training {cts:.4f} Overfitting {cofs:.4f}")
for n in [61, 62, 63, 64, 65, 66, 67, 68, 69]:
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=20, random_state=10)
cvs, cts, cofs = cv_score(
KNeighborsClassifier(n), features=["calc"], cv=cv, add_origin=False
)
print(f"Valid {cvs:.4f} Training {cts:.4f} Overfitting {cofs:.4f}")
cvs, cts, cofs = cv_score(
SVC(C=0.3, gamma="scale", probability=True, random_state=SEED, kernel="rbf"),
features=["calc"],
cv=cv,
add_origin=False,
)
print(f"Valid {cvs:.4f} Training {cts:.4f} Overfitting {cofs:.4f}")
for c in [44]:
cvs, cts, cofs = cv_score(
SVC(C=0.44, gamma="scale", probability=True, random_state=SEED, kernel="rbf"),
features=["calc"],
cv=cv,
add_origin=True,
)
print(f"Valid {cvs:.4f} Training {cts:.4f} Overfitting {cofs:.4f}")
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=20, random_state=10)
model = make_pipeline(
FunctionTransformer(lambda X: X * np.array([[0.01, 1]])), KNeighborsClassifier(64)
)
auc = cross_val_score(
model, train[["cond", "calc"]], train[target], scoring="roc_auc", cv=cv
).mean()
print(f"AUC = {auc:.4f}")
# AUC = 0.819
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=20, random_state=10)
model = SVC(C=0.3, gamma="scale", probability=True, random_state=10, kernel="rbf")
auc = cross_val_score(
model, train[["calc"]], train[target], scoring="roc_auc", cv=cv
).mean()
print(f"AUC = {auc:.4f}")
# # SVC
score_model(
make_pipeline(
RobustScaler(),
CalibratedClassifierCV(
SVC(C=0.5, gamma=0.5, probability=True, random_state=SEED, kernel="rbf"),
method="isotonic",
),
),
["cond", "calc", "calc/gravity"],
None,
use_original=True,
)
score_model(
make_pipeline(
RobustScaler(),
CalibratedClassifierCV(
SVC(C=0.5, gamma=0.5, probability=True, random_state=SEED, kernel="rbf"),
method="isotonic",
),
),
# ["cond", "calc", "calc/gravity", "cond/gravity"], "SVC", use_original = True)
# ["cond", "calc", "gravity"], "SVC", use_original = True)
["calc", "cond/gravity", "calc/gravity"],
None,
use_original=True,
)
# # GBM
check_new_features(
make_pipeline(
GradientBoostingClassifier(
learning_rate=0.12,
n_estimators=49,
max_features=1,
min_samples_leaf=23,
max_depth=1,
random_state=SEED,
)
),
["cond", "calc", "calc/ph", "calc/gravity", "cond/ph"],
n_repeats=50,
)
score_model(
make_pipeline(
GradientBoostingClassifier(
learning_rate=0.1,
n_estimators=50,
max_features=1,
min_samples_leaf=23,
max_depth=1,
random_state=SEED,
)
),
# ["cond", "calc"], "GBM", use_original = True)
["calc"],
None,
use_original=False,
)
score_model(
make_pipeline(
CalibratedClassifierCV(
GradientBoostingClassifier(
learning_rate=0.12,
n_estimators=49,
max_features=1,
min_samples_leaf=23,
max_depth=1,
random_state=SEED,
),
method="isotonic",
)
),
# ["cond", "calc"], "GBM", use_original = True)
["cond", "calc", "calc/ph", "calc/gravity", "cond/ph"],
None,
use_original=True,
)
# # HistGradientBoostingClassifier
score_model(
make_pipeline( # MinMaxScaler(),
CalibratedClassifierCV(
HistGradientBoostingClassifier(
learning_rate=0.01,
max_iter=100,
min_samples_leaf=30,
max_leaf_nodes=3,
random_state=SEED,
),
method="isotonic",
)
),
["cond", "calc"],
None,
use_original=True,
)
# # Gaussian Process Classifier
from sklearn.gaussian_process.kernels import RBF
kernel = 1.0 * RBF(1.0)
score_model(
make_pipeline(
RobustScaler(),
CalibratedClassifierCV(
GaussianProcessClassifier(random_state=SEED), method="isotonic"
),
),
["cond", "calc", "calc/gravity"],
"GaussianProcess",
use_original=False,
)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=10, random_state=SEED)
cvs, cts, cofs = cv_score(
make_pipeline(
RobustScaler(),
CalibratedClassifierCV(
GaussianProcessClassifier(random_state=SEED), method="isotonic"
),
),
features=["calc", "calc/gravity"],
cv=cv,
add_origin=True,
)
print(f"Valid {cvs:.5f} Training {cts:.4f} Overfitting {cofs:.4f}")
score_model(
make_pipeline(
RobustScaler(),
CalibratedClassifierCV(
GaussianProcessClassifier(random_state=SEED), method="isotonic"
),
),
["calc", "calc/gravity"],
"GaussianProcess",
use_original=True,
)
# # Logistic
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=10, random_state=SEED)
cvs, cts, cofs = cv_score(
make_pipeline(
StandardScaler(),
Nystroem(gamma=2, n_components=120),
CalibratedClassifierCV(LogisticRegression(C=0.1), method="isotonic"),
),
features=["log_calc"],
cv=cv,
add_origin=True,
)
print(f"Valid {cvs:.5f} Training {cts:.4f} Overfitting {cofs:.4f}")
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=10, random_state=SEED)
cvs, cts, cofs = cv_score(
make_pipeline(
StandardScaler(),
Nystroem(gamma=6, n_components=130),
CalibratedClassifierCV(LogisticRegression(C=0.1), method="isotonic"),
),
features=["calc"],
cv=cv,
add_origin=True,
)
print(f"Valid {cvs:.5f} Training {cts:.4f} Overfitting {cofs:.4f}")
score_model(
make_pipeline(
StandardScaler(),
Nystroem(gamma=2, n_components=120),
CalibratedClassifierCV(LogisticRegression(C=0.1), method="isotonic"),
),
["log_calc"],
f"Logistic2",
use_original=True,
)
score_model(
make_pipeline(
StandardScaler(),
Nystroem(gamma=6, n_components=130),
CalibratedClassifierCV(LogisticRegression(C=0.1), method="isotonic"),
),
["calc"],
f"Logistic",
use_original=True,
)
score_model(
make_pipeline(
StandardScaler(),
PolynomialFeatures(5), # StandardScaler(),
CalibratedClassifierCV(
LogisticRegression(C=0.4, max_iter=4000), method="isotonic"
),
),
["calc"],
None,
use_original=False,
)
# # GAM
SPLINES = 15
gam_params = {
"terms": s(3, n_splines=SPLINES, lam=0.25)
+ s(5, n_splines=SPLINES, lam=0.25)
+ te(0, 5, lam=0.25)
+ te(2, 5, lam=0.25)
+ te(3, 4, lam=0.25)
+ te(3, 5, lam=0.25)
+ te(4, 5, lam=0.25)
# , "lam": 0.25, "fit_intercept":False}),
,
"fit_intercept": False,
}
score_model(
make_pipeline(
MinMaxScaler(),
# CalibratedClassifierCV(LogisticGAM(**gam_params), method="isotonic")),
LogisticGAM(**gam_params),
),
test.columns,
None,
use_original=False,
)
SPLINES = 15
gam_params = {
"terms": s(0, n_splines=SPLINES, lam=0.25)
+ s(1, n_splines=SPLINES, lam=0.25)
+ te(0, 1, lam=0.25)
# , "lam": 0.25, "fit_intercept":False}),
,
"fit_intercept": False,
}
score_model(
make_pipeline(
MinMaxScaler(),
# CalibratedClassifierCV(LogisticGAM(**gam_params), method="isotonic")),
LogisticGAM(**gam_params),
),
["cond", "calc"],
None,
use_original=False,
)
# # RandomForest
score_model(
CalibratedClassifierCV(
RandomForestClassifier(
n_estimators=400, max_features=6, min_samples_leaf=4, random_state=SEED
),
method="isotonic",
),
test.columns,
None,
use_original=True,
)
score_model(
CalibratedClassifierCV(
RandomForestClassifier(
n_estimators=500, max_features=1, min_samples_leaf=15, random_state=SEED
),
method="isotonic",
),
["cond", "calc"],
None,
use_original=True,
)
# # ExtraTrees
check_new_features(
ExtraTreesClassifier(
n_estimators=100,
max_features=1,
min_samples_leaf=14,
random_state=SEED,
criterion="entropy",
),
["calc"],
new_features=["osmo", "cond", "ph", "urea", "gravity", "calc/gravity", "cond/osmo"],
n_repeats=10,
)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=10, random_state=SEED)
cvs, cts, cofs = cv_score(
ExtraTreesClassifier(
n_estimators=100,
max_features=1,
min_samples_leaf=16,
random_state=SEED,
criterion="entropy",
),
features=["calc"],
cv=cv,
add_origin=False,
)
print(f"Valid {cvs:.4f} Training {cts:.4f} Overfitting {cofs:.4f}")
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=20, random_state=SEED)
for f in [17, 18, 19, 20]:
cvs, cts, cofs = cv_score(
ExtraTreesClassifier(
n_estimators=100,
max_features=2,
min_samples_leaf=f,
random_state=SEED,
criterion="entropy",
),
features=["calc", "cond"],
cv=cv,
add_origin=True,
)
print(f"Valid {cvs:.5f} Training {cts:.4f} Overfitting {cofs:.4f}")
score_model(
CalibratedClassifierCV(
ExtraTreesClassifier(
n_estimators=100,
max_features=2,
min_samples_leaf=18,
random_state=SEED,
criterion="entropy",
),
method="isotonic",
),
["cond", "calc"],
f"ExtraTrees",
use_original=True,
)
# ["cond","calc", "calc/gravity", "cond/osmo"], f'ExtraTrees', use_original=True)
# ["cond","calc", "calc/gravity"], f'ExtraTrees', use_original=False)
# # Ensemble
oof = pd.DataFrame(index=train.index)
for m in results:
oof[m[0]] = m[5]
ridge_params = {"alpha": 0, "fit_intercept": False, "positive": True}
corr = spearmanr(oof).correlation
# Ensure the correlation matrix is symmetric
corr = (corr + corr.T) / 2
np.fill_diagonal(corr, 1)
# We convert the correlation matrix to a distance matrix before performing
# hierarchical clustering using Ward's linkage.
distance_matrix = 1 - np.abs(corr)
dist_linkage = hierarchy.ward(squareform(distance_matrix))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
dendro = hierarchy.dendrogram(
dist_linkage, labels=oof.columns, ax=ax1, leaf_rotation=90
)
dendro_idx = np.arange(0, len(dendro["ivl"]))
matrix = corr[dendro["leaves"], :][:, dendro["leaves"]]
sns.heatmap(
matrix, annot=True, fmt=".2f", cmap="coolwarm", mask=np.triu(matrix), ax=ax2
)
ax2.set_xticklabels(dendro["ivl"], rotation="vertical")
ax2.set_yticklabels(dendro["ivl"], rotation="horizontal")
fig.tight_layout()
plt.show()
def blend(oof, seed=SEED):
score_list = []
oof_blend = pd.Series(0, index=oof.index)
folds = StratifiedKFold(n_splits=N_SPLITS, shuffle=True, random_state=seed)
for fold, (trn_idx, val_idx) in enumerate(folds.split(oof, train[target])):
y_trn, y_val = train[target][trn_idx], train[target][val_idx]
m = 0
for f in oof.columns:
m = max(m, roc_auc_score(y_val, oof.iloc[val_idx][f]))
blend_model = Ridge(**ridge_params)
blend_model.fit(oof.iloc[trn_idx], y_trn)
y_val_pred = blend_model.predict(oof.iloc[val_idx]) / np.sum(blend_model.coef_)
score_list.append(roc_auc_score(y_val, y_val_pred))
oof_blend.iloc[val_idx] = y_val_pred
ok = "ok" if m < score_list[fold] else "ko"
coefs = [np.round(c, 2) for c in blend_model.coef_]
print(
f"Fold {fold+1:2}: AUC = {score_list[fold]:.5f} "
f"weights = {coefs} "
f"Max original AUC {m:.5f} => {ok}"
)
all_aucs = [m[2] for m in results if m[0] in oof.columns]
libs = [m[0] for m in results if m[0] in oof.columns] + ["blend"]
all_res = {}
for i, f in enumerate(libs[:-1]):
all_res[f] = all_aucs[i]
# all_res[f] = roc_auc_score(train[target], oof[f])
print("AUC in model {} : {:.5f}".format(f, all_res[f]))
all_res["blend"] = np.mean(score_list) # roc_auc_score(train[target], oof_blend)
print(
f"Average AUC : {np.mean(score_list):.5f} (std : {np.std(score_list):.5f}) | OOF : {roc_auc_score(train[target], oof_blend):.5f}"
)
fig, ax = plt.subplots(1, 3, figsize=(10, len(oof.columns)), sharey=True)
res = pd.Series(all_res)
overfits = [m[3] for m in results if m[0] in oof.columns] + [0]
stds = [m[4] for m in results if m[0] in oof.columns] + [np.std(score_list)]
color = ["skyblue" for i in range(len(res))]
color[res.index.get_loc("blend")] = "orange"
res.plot.barh(ax=ax[0], color=color).set(title="AUC")
ax[0].set_xlim(0.75, 0.85)
pd.Series(overfits, index=libs).plot.barh(ax=ax[1], color=color).set(
title="Overfitting"
)
# ax[0].set_xlim(0.75, 0.85)
pd.Series(stds, index=libs).plot.barh(ax=ax[2], color=color).set(title="OOF std")
for i in range(3):
ax[i].bar_label(ax[i].containers[0], fmt="%.5f", padding=2)
ax[i].spines[["right", "bottom"]].set_visible(False)
ax[i].xaxis.set_ticks_position("top")
display_model("OOF blend", oof_blend)
blend(oof)
blend(oof[["Logistic", "Logistic2"]])
blend(oof[["Logistic", "SVC", "ExtraTrees"]])
# # Inference
final = ["Logistic", "SVC", "ExtraTrees"]
opti_blend = Ridge(**ridge_params)
opti_blend.fit(oof[final], train[target])
print(
f"AUC (train) : {roc_auc_score(train[target], opti_blend.predict(oof[final])/np.sum(opti_blend.coef_)):.4f}"
f"\n\nCoef for blend :"
)
display(pd.Series(opti_blend.coef_.round(2), final, name="weight"))
df_params = pd.DataFrame(
[[m[1] for m in results], [m[6] for m in results], list([m[7] for m in results])],
columns=[m[0] for m in results],
index=["model", "use_original", "features"],
).transpose()
df_params = df_params.loc[final]
display(df_params)
def fit_model_grouped(model, train, features):
model.fit(train[features], train[target])
all_preds = pd.DataFrame(0, columns=final, index=test.index)
# df_params['test_pred'] = None
for i in range(len(df_params)):
print(
f"Retraining {df_params.index[i]} {'with original data' if df_params.iloc[i].use_original else ''}"
)
if df_params.index[i] != "Keras":
features = df_params.iloc[i].features
if df_params.iloc[i].use_original:
fit_model_grouped(
df_params.iloc[i].model, pd.concat([train, origin], axis=0), features
)
else:
fit_model_grouped(df_params.iloc[i].model, train, features)
all_preds[df_params.index[i]] = df_params.iloc[i].model.predict_proba(
test[features]
)[:, 1]
else:
all_preds[df_params.index[i]] = keras_preds
all_preds[target] = opti_blend.predict(all_preds) / np.sum(opti_blend.coef_)
all_preds[target].to_csv("submission.csv")
# Final control
sub = pd.read_csv("/kaggle/working/submission.csv")
plt.figure(figsize=(5, 4))
plt.title("Preds")
plt.hist(sub[target], bins=100)
plt.show()
sub.head(10)
|
# BASE
import numpy as np
import pandas as pd
import math
# VISUALS
import seaborn as sns
import matplotlib.pyplot as plt
# From Property Services Regulatory Authority
# Property price records between 19..-2022: https://www.propertypriceregister.ie/
df_all = pd.read_csv(
"/kaggle/input/ireland-property-price-register/PPR-ALL.csv", encoding="ISO-8859-1"
)
df_all.sample(5)
print("Dataset shape:", df_all.shape)
print("=============================================")
print("Dataset dtypes:")
print(df_all.dtypes)
print("=============================================")
print("Dataset describe:")
df_all.describe()
print("Unique County values in Dataset:", df_all["County"].unique())
print("=============================================")
print("Unique ... values in Dataset:", df_all["Not Full Market Price"].unique())
print("=============================================")
print("Unique ... values in Dataset:", df_all["VAT Exclusive"].unique())
print("=============================================")
print("Unique ... values in Dataset:", df_all["Description of Property"].unique())
print("=============================================")
print("Unique ... values in Dataset:", df_all["Property Size Description"].unique())
yearz = []
for x in df_all["Date of Sale (dd/mm/yyyy)"]:
year = x.split("/")[-1]
year = int(year)
yearz.append(year)
yearz.sort()
yearz[0]
df_all[df_all["Eircode"].apply(lambda eir: str(eir).startswith("D12"))]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import time
import random
import collections
import cv2
import tensorflow as tf
from keras.applications.vgg19 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Dropout
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.applications import (
ResNet50,
ResNet101,
ResNet152,
ResNet50V2,
ResNet101V2,
ResNet152V2,
)
import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
i = 0
h = 2
v = 3
fig, axes = plt.subplots(h, v, figsize=(12, 10))
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames[:4]:
img = cv2.imread(os.path.join(dirname, filename))
if i < h * v:
img = cv2.cvtColor(img, cv2.IMREAD_GRAYSCALE)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
ax = list(axes.flatten())[i]
ax.imshow(img)
ax.set_title(dirname.split("/")[6])
# ax.axis("off")
ax.set_xlabel("Image" + str(i + 1), size=15)
i += 1
plt.show()
train_dir = "/kaggle/input/face-mask-12k-images-dataset/Face Mask Dataset/Train/"
test_dir = "/kaggle/input/face-mask-12k-images-dataset/Face Mask Dataset/Test/"
val_dir = "/kaggle/input/face-mask-12k-images-dataset/Face Mask Dataset/Validation/"
print(
"num_of_classes: {} /".format(len(os.listdir(train_dir))),
"name_of_classes: {}".format(os.listdir(train_dir)),
)
print(
"num_of_train_withoutmask {}/".format(len(os.listdir(train_dir + "WithoutMask"))),
"num_of_train_withmask {}".format(len(os.listdir(train_dir + "WithMask"))),
)
print(
"num_of_test_withoutmask {}/".format(len(os.listdir(test_dir + "WithoutMask"))),
"num_of_test_withmask {}".format(len(os.listdir(test_dir + "WithMask"))),
)
print(
"num_of_val_withoutmask {}/".format(len(os.listdir(val_dir + "WithoutMask"))),
"num_of_val_withmask {}".format(len(os.listdir(val_dir + "WithMask"))),
)
# start=time.perf_counter()
img_shape = []
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
img = cv2.imread(os.path.join(dirname, filename))
img_shape.append(img.shape)
img_shape_df = pd.DataFrame(img_shape, columns=["Length", "Width", "Channel"])
img_shape_df.head()
# stop=time.perf_counter()
# print('{:0.4f} secs elapsed'.format(stop-start))
img_shape_df.describe()
list_counts = [img_shape_df.Length.value_counts()]
print(list_counts)
sns.kdeplot(img_shape_df.Length, shade=True, bw_adjust=3, fill=True, color="green")
plt.grid()
plt.show()
h = 128
w = 128
train_dir = "../input/face-mask-12k-images-dataset/Face Mask Dataset/Train"
test_dir = "../input/face-mask-12k-images-dataset/Face Mask Dataset/Test"
val_dir = "../input/face-mask-12k-images-dataset/Face Mask Dataset/Validation"
train_datagen = ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
rescale=1.0 / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode="nearest",
)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
val_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_gen = train_datagen.flow_from_directory(
train_dir,
target_size=(h, w),
batch_size=32,
color_mode="rgb",
class_mode="categorical",
)
test_gen = test_datagen.flow_from_directory(
test_dir,
target_size=(h, w),
batch_size=32,
color_mode="rgb",
class_mode="categorical",
)
val_gen = val_datagen.flow_from_directory(
val_dir,
target_size=(h, w),
batch_size=32,
color_mode="rgb",
class_mode="categorical",
)
input_shape = [128, 128, 3]
initializer = tf.keras.initializers.GlorotNormal()
def build_model():
model = Sequential()
model.add(
Conv2D(
32,
(3, 3),
padding="same",
input_shape=input_shape,
activation="relu",
kernel_initializer=initializer,
)
)
model.add(
BatchNormalization(momentum=0.90)
) # axis=-1, momentum=0.99 (0.9-0.99), epsilon=0.001, center=True, scale=True,
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), padding="same", activation="relu"))
model.add(BatchNormalization(momentum=0.90))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense((128), activation="relu"))
model.add(BatchNormalization(momentum=0.90))
model.add(Dense((64), activation="relu"))
model.add(Dense(2, activation="sigmoid"))
return model
model = build_model()
model.summary()
start = time.perf_counter()
loss_acc_values = []
model = build_model()
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
Checkpoints = ModelCheckpoint(
"model_mask.h5",
monitor="val_loss",
verbose=0,
save_best_only=True,
mode="min",
save_freq="epoch",
)
Earlystop = EarlyStopping(
monitor="val_loss", min_delta=0, patience=20, verbose=1, restore_best_weights=True
)
callbacks = [Earlystop, Checkpoints]
epochs = 10
train_numbers = 10000
valid_numbers = 800
test_numbers = 992
loss_acc_values.append(
model.fit_generator(
generator=train_gen,
validation_data=val_gen,
epochs=epochs,
callbacks=callbacks,
steps_per_epoch=train_numbers // 32,
validation_steps=valid_numbers // 32,
)
)
model.save("model_mask.h5")
stop = time.perf_counter()
print("{:0.4f} mins elapsed".format((stop - start) / 60))
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
for i, value in enumerate(loss_acc_values[0].history):
ax = axes.flatten()[i]
layer_num = 0
for history in loss_acc_values:
ax.plot(history.history[value], color="g")
if value == "accuracy" or value == "val_accuracy":
ax.axhspan(0.97, 0.99, color="skyblue", alpha=0.3)
elif value == "loss" or value == "val_loss":
ax.axhspan(0.1, 0.01, color="lightgreen")
ax.set_title(value, size=15, color="r", loc="left")
ax.set_xlabel("Number of Epocs")
ax.grid()
plt.show()
evaluation = model.evaluate_generator(test_gen)
print("Accuracy on test set:", evaluation[1])
print("Loss on test set:", evaluation[0])
|
# 
# # TABLE OF CONTENT
#
# * [1. Introduction](#1)
#
# * [2. Data Importing and Checking](#2)
#
# * [3. Accessing The Numerical Variables](#3)
#
# * [4. Accessing The Categorical Variables](#4)
#
# * [5. Model Building](#5)
#
# * [6. Rescalling the features](#6)
#
# * [7. Prediction](#7)
#
# * [8. Using Random Forest](#8)
# * [9. Feature Selection](#9)
# * [10. Conclusion](#10)
# # 1. Introduction
# #### **Problem Statement**
#
# For this demonstration, you will use the bank marketing data set. So, let’s try and understand the problem statement to utilise the information available in the best possible way and proceed in the right direction as per the business problem at hand.
#
# So, a bank ran a marketing campaign in the past and has obtained data pertaining to nearly 11,000 customers, which includes variables such as their age, jobs, bank balance, education, loan status and so on. Based on this data, the bank wants to develop its future strategies based on the insights that it drew from the previous campaign and improve for the next campaign so that more customers agree to open term deposits with the bank.
#
# Hence, ‘deposit’ is the target variable here. A ‘Yes’ in the ‘deposit’ column indicates that the campaign was successful and the customer agreed to open a term deposit account with the bank. In contrast, a ‘No’ in the ‘deposit’ column indicates that the campaign was not very successful and the customer could not be convinced to open a term deposit account.
#
# Essentially, the bank wants to:
# Build a model that quantitatively relates to the success of the marketing campaign with variables such as job, marital status, education, bank balance, etc.
# Identify the features of the data set that affect the successful conversion of customers.
# To know the accuracy of the model, i.e., how well these variables predict the success of the campaign.
#
# You may download the data set and the Python notebook below. We recommend that you open the file on your computer and follow along with the demonstrations in the videos; this will help you understand the model building process easily and quickly.
# # 2. Data Importing and Checking
import numpy as np, pandas as pd
import matplotlib.pyplot as plt, seaborn as sns
from IPython.core.display import display, HTML
display(HTML("<style>.container {width:100% !important;}</style>"))
import warnings
warnings.filterwarnings("ignore")
bank_data = pd.read_csv("/kaggle/input/bank-marketing-v2/bank marketing v2.csv")
bank_data.head()
bank_data.info()
bank_data.deposit.value_counts()
bank_data.deposit.value_counts(normalize=True)
# * We can see that 52% of the people those who have bank account, never deposit single time in their bank account.
# * Rest 47% did deposited some amount atleast.
# #### **Modifying the target variable to have 0/1 values**
bank_data.deposit = bank_data.deposit.map({"yes": 1, "no": 0})
bank_data.deposit.value_counts(normalize=True)
#
# # 3. Accessing Categorical Variables
bank_data.education.value_counts(normalize=True)
cat_cols = bank_data.select_dtypes("object").columns
cat_cols
plt.figure(figsize=[20, 7])
for ind, col in enumerate(cat_cols):
plt.subplot(2, 5, ind + 1)
bank_data[col].value_counts(normalize=True).plot.barh()
plt.title(col)
plt.show()
bank_data.job.value_counts()
#
# # 4. Accessing Numerical Variables
num_cols = bank_data.select_dtypes("number").columns
num_cols
# **Dropping `day` and `duration` columns**
bank1 = bank_data.drop(["duration", "day"], axis=1)
bank1.columns
num_cols = bank1.select_dtypes("number").columns
num_cols = num_cols.drop("deposit")
num_cols
plt.figure(figsize=[6, 4])
for ind, col in enumerate(num_cols):
plt.subplot(1, 2, ind + 1)
bank1[col].plot.box()
plt.title(col)
plt.show()
# #### **Creating dummy variables for the categorical variables**
# Handling `default`, `loan`, `housing`
def binary_map(col):
return col.map({"no": 0, "yes": 1})
binary_cols = ["default", "loan", "housing"]
bank1[binary_cols] = bank1[binary_cols].apply(binary_map)
bank1.housing.value_counts()
bank_data.housing.value_counts()
# **Creating dummy features for education, marital, p_recency, poutcome, contact, job, month**
dumm_cols = ["education", "marital", "p_recency", "contact", "poutcome", "job", "month"]
bank_dummies = pd.get_dummies(bank1[dumm_cols], drop_first=True)
bank_dummies.head()
bank_dummies.shape
bank1.drop(dumm_cols, axis=1)
# **Concatenating dummies back on**
bank1 = pd.concat([bank1, bank_dummies], axis=1)
bank1.drop(dumm_cols, axis=1, inplace=True)
bank1.shape
bank1.columns
#
# # 5. Model Building
# * Dividing into train and test sets
# * MinMax scaling for numeric features
# * Build multiple predictive models
# #### **Dividing into train and test datasets**
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(
bank1, test_size=0.2, random_state=42, stratify=bank1.deposit
)
df_train.shape, df_test.shape
df_train.deposit.value_counts(normalize=True)
df_test.deposit.value_counts(normalize=True)
# #### **MinMax scaling for numeric features**
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
df_train[["age", "balance"]].describe()
df_train[["age", "balance"]] = scaler.fit_transform(df_train[["age", "balance"]])
# #### **Building predictive models**
X_train = df_train.drop("deposit", axis=1)
y_train = df_train["deposit"]
X_test = df_test.drop("deposit", axis=1)
y_test = df_test["deposit"]
X_train.shape
y_train.shape, y_test.shape
# ### **Beginning with Logistic Regression**
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression(random_state=42)
# This code creates an instance of a logistic regression model using scikit-learn's LogisticRegression class, and sets the random seed to 42 using the random_state parameter. Logistic regression is a statistical model used to predict the probability of a binary or categorical outcome based on one or more predictor variables.
# By setting the random seed, it ensures that the results of the model will be reproducible, meaning if you run the same code multiple times, you should get the same results. This can be important when working with machine learning models, as it allows for better tracking of changes in performance over time and across different experiments.
logreg.fit(X_train, y_train)
# This code fits the logistic regression model (logreg) to the training data, where X_train is the feature matrix (i.e., a two-dimensional array of shape [n_samples, n_features]) and y_train is the target variable (i.e., a one-dimensional array of length n_samples).
# Fitting the model involves estimating the model parameters that best fit the training data, which is done using an optimization algorithm that minimizes the logistic loss function. This results in a set of weights and biases that define the decision boundary between the two classes in the feature space.
# **Evaluating the model**
y_train_pred = logreg.predict(X_train)
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
accuracy_score(y_train, y_train_pred)
# It's important to note that the accuracy score alone may not always be a sufficient metric to evaluate the performance of a classification model, especially when the class distribution is imbalanced or the cost of misclassification is different for different classes. In such cases, other metrics such as **precision, recall, F1-score, and area under the ROC curve (AUC-ROC)** may provide a more comprehensive view of the model's performance.
confusion_matrix(y_train, y_train_pred)
print(classification_report(y_train, y_train_pred))
# The classification_report() function from scikit-learn's metrics module generates a report that includes metrics such as precision, recall, F1-score, and support for each class, as well as the macro- and micro-averaged scores across all classes. The precision, recall, and F1-score are computed for each class separately, and are weighted by the number of true instances of that class.
# **Performance on test set**
y_test_pred = logreg.predict(X_test)
accuracy_score(y_test, y_test_pred)
# ### **Using RandomForest**
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(random_state=42, n_estimators=50, oob_score=True)
# * **random_state=42:** sets the random seed to 42, which ensures that the results of the model will be reproducible.
# * **n_estimators=50:** sets the number of decision trees in the forest to 50. Increasing the number of trees can improve the performance of the model, but may also increase the computational cost and risk overfitting the data.
# * **oob_score=True:** enables out-of-bag (OOB) estimation of the model's accuracy. In random forest, each tree is trained on a bootstrap sample of the training data, which means that some samples are not used in the training of each tree. These OOB samples can be used to estimate the performance of the model without the need for cross-validation or a separate validation set.
rf.fit(X_train, y_train)
# **Performance on the train set**
y_train_pred = rf.predict(X_train)
accuracy_score(y_train, y_train_pred)
# **Performance on unseen data**
y_test_pred = rf.predict(X_test)
accuracy_score(y_test, y_test_pred)
#
# # 6. Model evaluation: Cross validation
from sklearn.model_selection import cross_val_score
cross_val_score(logreg, X_train, y_train, cv=5, n_jobs=-1)
cross_val_score(rf, X_train, y_train, cv=5, n_jobs=-1)
cross_val_score(rf, X_train, y_train, cv=5, n_jobs=-1).mean()
# **Takeaway: Cross validation score gives a far more reliable estimate of the generalized perforance on unseen data**
# **Note**: OOB Score in RandomForest is somewhat similar to cross val score
rf.oob_score_
# **Scoring methods in Cross val score**
import sklearn
sklearn.metrics.SCORERS.keys()
cross_val_score(rf, X_train, y_train, cv=5, n_jobs=-1, scoring="recall")
#
# # 7. Feature Selection
X_train.shape
# #### **Recursive Feature Elimination - RFE**
from sklearn.feature_selection import RFE
logreg = LogisticRegression(random_state=42)
rfe = RFE(estimator=logreg, n_features_to_select=10)
# This code creates an instance of scikit-learn's RFE (Recursive Feature Elimination) class, which is used for feature selection in machine learning. The RFE class requires two arguments:
# * **estimator=logreg:** specifies the estimator to be used for the feature selection process. In this case, the LogisticRegression model logreg is used as the estimator.
# * **n_features_to_select=10:** specifies the number of features to select in the final feature subset. In this case, 10 features will be selected.
rfe.fit(X_train, y_train)
rfe.ranking_
X_train.columns[rfe.support_]
X_train2 = X_train.loc[:, rfe.support_]
X_train2.shape
X_train2.columns
# **Evaluation using cross val score**
cross_val_score(logreg, X_train2, y_train, n_jobs=-1)
# ## **Cross validation for feature selection**
num_features = X_train.shape
num_features[1]
cv_scores = []
logreg = LogisticRegression(random_state=42)
logreg.fit(X_train, y_train)
y_train_pred = logreg.predict(X_train)
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
accuracy_score(y_train, y_train_pred)
confusion_matrix(y_train, y_train_pred)
print(classification_report(y_train, y_train_pred))
# #### **Performance on test set**
y_test_pred = logreg.predict(X_test)
accuracy_score(y_test, y_test_pred)
#
# # 8. Using RandomForest
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(random_state=42, n_estimators=50, oob_score=True)
rf.fit(X_train, y_train)
# #### **Performance On train Set**
y_train_pred = rf.predict(X_train)
accuracy_score(y_train, y_train_pred)
# Performance on unseen data
y_test_pred = rf.predict(X_test)
accuracy_score(y_test, y_test_pred)
# ### **Model evaluation: Cross validation**
from sklearn.model_selection import cross_val_score
cross_val_score(logreg, X_train, y_train, cv=5, n_jobs=-1)
cross_val_score(rf, X_train, y_train, cv=5, n_jobs=-1)
cross_val_score(rf, X_train, y_train, cv=5, n_jobs=-1).mean()
# **Takeaway:** Cross validation score gives a far more reliable estimate of the generalized perforance on unseen data.
# **Note:** OOB Score in RandomForest is somewhat similar to cross val score
rf.oob_score_
import sklearn
sklearn.metrics.SCORERS.keys()
cross_val_score(rf, X_train, y_train, cv=5, n_jobs=-1, scoring="recall")
#
# # 9. Feature Selection
X_train.shape
# **Recursive Feature Elimination - RFE:**
# RFE algorithm works by first training a model on the full set of features and then ranking the features based on their importance score, which is typically obtained from the coefficients of a linear model or the feature importances of a tree-based model. The least important feature(s) are then removed from the feature set, and the process is repeated until a desired number of features is reached. The optimal number of features is often determined by cross-validation.
rfe = RFE(estimator=logreg, n_features_to_select=10)
# The **estimator** parameter specifies the machine learning algorithm that will be used to train the model and estimate the importance of each feature. In this case, the estimator is **logreg**, which is an instance of the logistic regression algorithm from the **sklearn.linear_model** module.
rfe.fit(X_train, y_train)
rfe.ranking_
X_train.columns[rfe.support_]
X_train2 = X_train.loc[:, rfe.support_]
X_train2.shape
X_train2.columns
# Evaluation using cross val score
cross_val_score(logreg, X_train2, y_train, n_jobs=-1)
# ### **Cross validation for feature selection**
num_features = X_train.shape
num_features[1]
cv_scores = []
logreg = LogisticRegression(random_state=42)
for features in range(1, num_features[1] + 1):
rfe = RFE(logreg, n_features_to_select=features)
scores = cross_val_score(rfe, X_train, y_train, cv=4)
cv_scores.append(scores.mean())
plt.figure(figsize=[10, 5])
plt.plot(range(1, num_features[1] + 1), cv_scores)
plt.show()
# Using RFECV
from sklearn.feature_selection import RFECV
rfecv = RFECV(estimator=logreg, cv=4)
rfecv.fit(X_train, y_train)
rfecv.grid_scores_
plt.figure(figsize=[10, 5])
plt.plot(range(1, num_features[1] + 1), rfecv.grid_scores_)
plt.show()
rfecv.n_features_
# ### **Hyper-parameter tuning using Cross Validation**
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
rf = RandomForestClassifier(random_state=42, n_jobs=-1)
# **n_jobs=-1:**This sets the number of parallel jobs to run for fitting and predicting. A value of -1 means to use all available processors.
hyper_params = {
"max_depth": [3, 5, 10, 15, 20],
"max_features": [3, 5, 7, 11, 15],
"min_samples_leaf": [20, 50, 100, 200, 400],
"n_estimators": [10, 25, 50, 80, 100],
}
# * **max_depth:** This specifies the maximum depth of each decision tree in the random forest. It takes a list of integers as values, ranging from 3 to 20.
# * **max_features:** This specifies the maximum number of features to consider when splitting a node in a decision tree. It takes a list of integers as values, ranging from 3 to 15.
# * **min_samples_leaf:** This specifies the minimum number of samples required to be at a leaf node in a decision tree. It takes a list of integers as values, ranging from 20 to 400.
# * **n_estimators:** This specifies the number of decision trees to include in the random forest. It takes a list of integers as values, ranging from 10 to 100
model_cv = GridSearchCV(
estimator=rf,
param_grid=hyper_params,
verbose=1,
cv=5,
n_jobs=-1,
return_train_score=True,
)
# * **estimator=rf:** This specifies the estimator or the machine learning model to be tuned, which is a RandomForestClassifier object that was previously defined.
# * **param_grid=hyper_params:** This specifies the grid of hyperparameters to be searched, which is the hyper_params dictionary object that was previously defined.
# * **verbose=1:** This controls the verbosity level of the output during the hyperparameter tuning process. A value of 1 means that progress messages are printed to the console.
# * **cv=5:** This specifies the number of folds to be used in the cross-validation process. In this case, 5-fold cross-validation will be used.
# * **n_jobs=-1:** This specifies the number of parallel jobs to run for fitting and predicting. A value of -1 means to use all available processors.
# * **return_train_score=True:** This specifies whether to return the training scores in addition to the validation scores during the hyperparameter tuning process.
model_cv.fit(X_train, y_train)
model_cv.best_score_
model_cv.best_estimator_
cv_df = pd.DataFrame(model_cv.cv_results_)
cv_df.head()
cv_df.sort_values(by="rank_test_score").head()
sel_cols = [
"param_max_depth",
"param_max_features",
"param_min_samples_leaf",
"param_n_estimators",
"rank_test_score",
"mean_test_score",
]
cv_df.sort_values(by="rank_test_score")[sel_cols].head(20)
# #### **Understand better the effect of Hyper-parameter**
cv_df.columns
cv_df.groupby("param_max_depth")["mean_train_score", "mean_test_score"].mean().plot(
figsize=[8, 5]
)
plt.show()
cv_df.groupby("param_max_depth")["mean_train_score", "mean_test_score"].agg(
np.median
).plot(figsize=[8, 5])
plt.show()
cv_df.groupby("param_n_estimators")["mean_train_score", "mean_test_score"].agg(
np.mean
).plot(figsize=[8, 5])
plt.show()
cv_df.groupby("param_max_features")["mean_train_score", "mean_test_score"].agg(
np.median
).plot(figsize=[8, 5])
plt.show()
cv_df.groupby("param_max_features")["mean_train_score", "mean_test_score"].agg(
np.mean
).plot(figsize=[8, 5])
plt.show()
cv_df.groupby("param_min_samples_leaf")["mean_train_score", "mean_test_score"].agg(
np.median
).plot(figsize=[8, 5])
plt.show()
# ### **Fine-tuning using GridSearch**
hyper_parameters = {
"min_samples_leaf": [5, 10, 20, 50],
"n_estimators": [50, 60, 70],
"max_features": [10, 12, 14, 16],
}
rf = RandomForestClassifier(max_depth=12, random_state=42, n_jobs=-1)
model_cv2 = GridSearchCV(
estimator=rf,
param_grid=hyper_parameters,
verbose=1,
cv=5,
return_train_score=True,
n_jobs=-1,
)
model_cv2.fit(X_train, y_train)
model_cv2.best_score_
model_cv2.best_estimator_
# #### **RandomizedSearchCV**
from sklearn.model_selection import RandomizedSearchCV
hyper_params = {
"max_depth": range(3, 20),
"max_features": range(3, 17),
"min_samples_leaf": range(20, 400, 50),
"n_estimators": range(10, 101, 10),
}
model_rcv = RandomizedSearchCV(
estimator=rf,
param_distributions=hyper_params,
verbose=1,
cv=5,
return_train_score=True,
n_jobs=-1,
n_iter=50,
)
model_rcv.fit(X_train, y_train)
model_rcv.best_score_
model_cv.best_score_
# #### **Extracting the best model and asessing test performance**
model_cv2.best_score_
rf_best = model_cv2.best_estimator_
rf_best
y_test_pred = rf_best.predict(X_test)
accuracy_score(y_test, y_test_pred)
|
# # Transfer Learning with TPU
# Import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.data.experimental import AUTOTUNE
from tensorflow.keras import Model
from tensorflow.keras.applications import ResNet152V2
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Input, Dense, GlobalAveragePooling2D, MaxPooling2D
from tensorflow.keras.layers import BatchNormalization, Dropout
from tensorflow.keras.optimizers import Adam
from kaggle_datasets import KaggleDatasets
# Detect TPU, return appropriate distribution strategy
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print("Running on TPU ", tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
# Get GCS path and select the file with 224x224 images
gcs_ds_path = KaggleDatasets().get_gcs_path("tpu-getting-started")
gcs_path = gcs_ds_path + "/tfrecords-jpeg-224x224"
# Set parameters
BUFFER_SIZE = 60000
BATCH_SIZE = 16 * strategy.num_replicas_in_sync
# BATCH_SIZE = 4
IMAGE_SIZE = [224, 224]
HEIGHT = 224
WIDTH = 224
NUM_TRAINING_IMAGES = 12753
NUM_TEST_IMAGES = 7382
EPOCHS = 4
STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE
# Get the path to all the files within the tfrecords-jpeg-224x224 folder
training_filepath = tf.io.gfile.glob(gcs_path + "/train/*.tfrec")
validation_filepath = tf.io.gfile.glob(gcs_path + "/val/*.tfrec")
test_filepath = tf.io.gfile.glob(gcs_path + "/test/*.tfrec")
# Load TFRecord file from the folder as bytes
raw_training_dataset = tf.data.TFRecordDataset(training_filepath)
raw_validation_dataset = tf.data.TFRecordDataset(validation_filepath)
raw_test_dataset = tf.data.TFRecordDataset(test_filepath)
# Create a dictionary describing the features
labeled_feature_description = {
"class": tf.io.FixedLenFeature([], tf.int64),
"image": tf.io.FixedLenFeature([], tf.string),
}
unlabeled_feature_description = {
"id": tf.io.FixedLenFeature([], tf.string),
"image": tf.io.FixedLenFeature([], tf.string),
}
# Class name of flowers
CLASSES = [
"pink primrose",
"hard-leaved pocket orchid",
"canterbury bells",
"sweet pea",
"wild geranium", # 00-04
"tiger lily",
"moon orchid",
"bird of paradise",
"monkshood",
"globe thistle", # 05-09
"snapdragon",
"colt's foot",
"king protea",
"spear thistle",
"yellow iris", # 10-14
"globe-flower",
"purple coneflower",
"peruvian lily",
"balloon flower",
"giant white arum lily", # 15-19
"fire lily",
"pincushion flower",
"fritillary",
"red ginger",
"grape hyacinth", # 20-24
"corn poppy",
"prince of wales feathers",
"stemless gentian",
"artichoke",
"sweet william", # 25-29
"carnation",
"garden phlox",
"love in the mist",
"cosmos",
"alpine sea holly", # 30-34
"ruby-lipped cattleya",
"cape flower",
"great masterwort",
"siam tulip",
"lenten rose", # 35-39
"barberton daisy",
"daffodil",
"sword lily",
"poinsettia",
"bolero deep blue", # 40-44
"wallflower",
"marigold",
"buttercup",
"daisy",
"common dandelion", # 45-49
"petunia",
"wild pansy",
"primula",
"sunflower",
"lilac hibiscus", # 50-54
"bishop of llandaff",
"gaura",
"geranium",
"orange dahlia",
"pink-yellow dahlia", # 55-59
"cautleya spicata",
"japanese anemone",
"black-eyed susan",
"silverbush",
"californian poppy", # 60-64
"osteospermum",
"spring crocus",
"iris",
"windflower",
"tree poppy", # 65-69
"gazania",
"azalea",
"water lily",
"rose",
"thorn apple", # 70-74
"morning glory",
"passion flower",
"lotus",
"toad lily",
"anthurium", # 75-79
"frangipani",
"clematis",
"hibiscus",
"columbine",
"desert-rose", # 80-84
"tree mallow",
"magnolia",
"cyclamen ",
"watercress",
"canna lily", # 85-89
"hippeastrum ",
"bee balm",
"pink quill",
"foxglove",
"bougainvillea", # 90-94
"camellia",
"mallow",
"mexican petunia",
"bromelia",
"blanket flower", # 95-99
"trumpet creeper",
"blackberry lily",
"common tulip",
"wild rose", # 100-103
]
# Create a function to read and extract images from dataset
def _parse_labeled_image_function(example_proto):
example = tf.io.parse_single_example(example_proto, labeled_feature_description)
image = tf.io.decode_jpeg(example["image"])
image = tf.cast(image, tf.float32) / 255.0
image = tf.image.resize(image, IMAGE_SIZE)
label = tf.cast(example["class"], tf.int32)
return image, label
def _parse_unlabeled_image_function(example_proto):
example = tf.io.parse_single_example(example_proto, unlabeled_feature_description)
image = tf.io.decode_jpeg(example["image"])
image = tf.cast(image, tf.float32) / 255.0
image = tf.image.resize(image, IMAGE_SIZE)
idnum = example["id"]
return image, idnum
# Parse and extract images
# Parse labeled images, shuffle and batch
training_dataset = (
raw_training_dataset.map(_parse_labeled_image_function)
.repeat()
.shuffle(BUFFER_SIZE)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
# Parse unlabeled images and batch
validation_dataset = (
raw_validation_dataset.map(_parse_labeled_image_function)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
# Parse unlabeled images and batch
test_dataset = (
raw_test_dataset.map(_parse_unlabeled_image_function)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
# Display images in a 5x5 grid
image_batch, label_batch = next(iter(training_dataset))
def display_images(image_batch, label_batch):
plt.figure(figsize=[20, 12])
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.imshow(image_batch[i])
plt.title(CLASSES[label_batch[i].numpy()])
plt.axis("off")
plt.show()
display_images(image_batch, label_batch)
# Create a function to augment brightness, contrast, flip and crop images
def augment_image(image, label):
# Add 10px padding and random crop
image = tf.image.resize_with_crop_or_pad(image, HEIGHT + 10, WIDTH + 10)
image = tf.image.random_crop(image, size=[*IMAGE_SIZE, 3])
# Random flip
image = tf.image.random_flip_left_right(image)
# Random brightness
image = tf.image.random_brightness(image, 0.2)
# Random contrast
image = tf.image.random_contrast(image, lower=0.8, upper=1.2)
# Random saturation
image = tf.image.random_saturation(image, lower=0.8, upper=1.2)
return image, label
# Parse unlabeled images, augment, shuffle and batch
training_dataset_augmented = (
raw_training_dataset.map(_parse_labeled_image_function)
.map(augment_image)
.repeat()
.shuffle(BUFFER_SIZE)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
# Display images in a 5x5 grid
image_batch_augmented, label_batch_augmented = next(iter(training_dataset_augmented))
display_images(image_batch_augmented, label_batch_augmented)
# Create a function to build the model
def build_model():
inputs = Input(shape=(HEIGHT, WIDTH, 3))
model = ResNet152V2(include_top=False, input_tensor=inputs, weights="imagenet")
# Freeze the pretrained weights
model.trainable = False
# Rebuild top
x = GlobalAveragePooling2D()(model.output)
x = BatchNormalization()(x)
# x = Dropout(0.2)(x)
x = Dropout(0.3)(x)
outputs = Dense(104, activation="softmax")(x)
# Compile
model = Model(inputs, outputs)
model.compile(
optimizer=Adam(learning_rate=1e-2),
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
return model
# Train the model
with strategy.scope():
model = build_model()
hist = model.fit(
training_dataset_augmented,
epochs=EPOCHS * 2,
validation_data=validation_dataset,
steps_per_epoch=STEPS_PER_EPOCH,
)
# Create a function to unfreeze the model the top 20 layers
# But, we'll keep BatchNormalization layers frozen
def unfreeze_model(model):
for layer in model.layers[-20:]:
if not isinstance(layer, BatchNormalization):
layer.trainable = True
model.compile(
optimizer=Adam(learning_rate=1e-4),
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
# Unfreeze and train the model
unfreeze_model(model)
hist = model.fit(
training_dataset_augmented,
epochs=EPOCHS,
validation_data=validation_dataset,
steps_per_epoch=STEPS_PER_EPOCH,
)
# Predict images from test set
test_images = test_dataset.map(lambda image, idnum: image)
prob = model.predict(test_images)
pred = np.argmax(prob, axis=-1)
print(pred)
# Prepare file for submission
test_ids_ds = test_dataset.map(lambda image, idnum: idnum).unbatch()
test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES))).numpy().astype("U")
# np.savetxt(
# '/kaggle/working/submission.csv',
# np.rec.fromarrays([test_ids, pred]),
# fmt=['%s', '%d'],
# delimiter=',',
# header='id,label',
# comments='',
# )
dim2list = [[test_ids[i], pred[i]] for i in range(len(test_ids))]
df = pd.DataFrame(dim2list, columns=["id", "label"])
df.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.linear_model import LinearRegression
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Pre-processing
df = pd.read_csv("/kaggle/input/link-eth-daily-to-16-apr-2023/LINK-ETH.csv")
df = df.rename(columns={"Date": "date", "Close": "price"})
df.date = pd.to_datetime(df.date) # Format date column
df.sort_values(by="date", inplace=True) # Sort chronologically
df.reset_index(inplace=True, drop=True) # Re-index Data column
df["ind"] = [x + 1 for x in range(len(df))] # Add usable index column
df = df.dropna() # Delete N/A rows
print(df)
# # Pre-processing (highs)
# df_high = df_high.rename(columns = {
# 'Date' : 'date',
# 'Value' : 'price'
# })
# df_high.date = pd.to_datetime(df_high.date) # Format date column
# df_high.sort_values(by="date", inplace=True) # Sort chronologically
# df_high.reset_index(inplace=True, drop=True) # Re-index Data column
# df_high["ind"] = [x + 592 for x in range(len(df_high))] # Add usable index column
# df_high = df_high.dropna()
# # Pre-processing (lows)
# df_low = df_low.rename(columns = {
# 'Date' : 'date',
# 'Value' : 'price'
# })
# df_low.date = pd.to_datetime(df_low.date) # Format date column
# df_low.sort_values(by="date", inplace=True) # Sort chronologically
# df_low.reset_index(inplace=True, drop=True) # Re-index Data column
# df_low["ind"] = [x + 592 for x in range(len(df_low))] # Add usable index column
# df_low = df_low.dropna() # Delete N/A rows
# # Pre-processing (under/overvalued)
# df_under_over.date = pd.to_datetime(df_under_over.date) # Format date column
# df_under_over.sort_values(by="date", inplace=True) # Sort chronologically
# df_under_over.reset_index(inplace=True, drop=True) # Re-index Data column
# df_under_over["ind"] = [x + 592 for x in range(len(df_under_over))] # Add usable index column
# # Split df into undervalued and overvalued
# df_under = df_under_over[["date","ind","undervalued"]].copy()
# df_under = df_under.rename(columns = {'undervalued' : 'price'})
# df_under = df_under.dropna() # Delete N/A rows
# df_under.reset_index(inplace=True, drop=True) # Re-index Data column
# df_over = df_under_over[["date","ind","overvalued"]].copy()
# df_over = df_over.rename(columns={"overvalued": "price"})
# df_over = df_over.dropna() # Delete N/A rows
# df_over.reset_index(inplace=True, drop=True) # Re-index Data column
# # Define array of df to be analysed
# dfs = [df,df_high,df_low,df_under,df_over]
# Define function for log transformation
# Log-log tranform
df["log_days"] = np.log10(df["ind"])
df["log_price"] = np.log10(df["price"])
# # Log-log tranform (highs)
# df_high['log_days'] = np.log10(df_high['ind'])
# df_high['log_price'] = np.log10(df_high['price'])
# # Log-log tranform (lows)
# df_low['log_days'] = np.log10(df_low['ind'])
# df_low['log_price'] = np.log10(df_low['price'])
# # Log-log tranform (under)
# df_under['log_days'] = np.log10(df_under['ind'])
# df_under['log_price'] = np.log10(df_under['price'])
# # Log-log tranform (over)
# df_over['log_days'] = np.log10(df_over['ind'])
# df_over['log_price'] = np.log10(df_over['price'])
# Linear regression model
days = len(df)
x = df.log_days.values[:days].reshape(-1, 1)
y = df.log_price[:days]
model = LinearRegression()
model.fit(x, y)
x_range = np.linspace(x.min(), x.max(), 100)
y_range = model.predict(x_range.reshape(-1, 1))
z_range = np.power(10, model.fit(x, y).predict(x))
# # Linear regression model (highs)
# x_high = df_high.log_days.values[:days].reshape(-1,1)
# y_high = df_high.log_price[:days]
# model.fit(x_high,y_high)
# x_range_high = np.linspace(x_high.min(), x_high.max(), 100)
# y_range_high = model.predict(x_range_high.reshape(-1, 1))
# z_range_high = np.power(10,model.fit(x_high,y_high).predict(x))
# # Linear regression model (lows)
# x_low = df_low.log_days.values[:days].reshape(-1,1)
# y_low = df_low.log_price[:days]
# model.fit(x_low,y_low)
# x_range_low = np.linspace(x_low.min(), x_low.max(), 100)
# y_range_low = model.predict(x_range_low.reshape(-1, 1))
# z_range_low = np.power(10,model.fit(x_low,y_low).predict(x))
# # Linear regression model (under)
# x_under = df_under.log_days.values[:days].reshape(-1,1)
# y_under = df_under.log_price[:days]
# model.fit(x_under,y_under)
# x_range_under = np.linspace(x_under.min(), x_under.max(), 100)
# y_range_under = model.predict(x_range_under.reshape(-1, 1))
# z_range_under = np.power(10,model.fit(x_under,y_under).predict(x))
# # Linear regression model (over)
# x_over = df_over.log_days.values[:days].reshape(-1,1)
# y_over = df_over.log_price[:days]
# model.fit(x_over,y_over)
# x_range_over = np.linspace(x_over.min(), x_over.max(), 100)
# y_range_over = model.predict(x_range_over.reshape(-1, 1))
# z_range_over = np.power(10,model.fit(x_over,y_over).predict(x))
# Define fair value and over/undervaluation
df["fair_value"] = z_range
print(df)
"""Working on making this automated..."""
# Create visualisation
fig = make_subplots()
fig.add_trace(
go.Scatter(
x=df["log_days"],
y=df["log_price"],
name="Price",
opacity=1,
hovertemplate="%{y:.4f}",
),
)
# fig.add_trace(
# go.Scatter(
# x=x_range_high,
# y=y_range_high,
# name="Upper",
# opacity=1,
# hoverinfo="y",
# hovertemplate="%{y:.4f}"
# ),
# )
# fig.add_trace(
# go.Scatter(
# x=x_range_over,
# y=y_range_over,
# name="Overvalued",
# opacity=1,
# hoverinfo="y",
# hovertemplate="%{y:.4f}"
# ),
# )
fig.add_trace(
go.Scatter(
x=x_range,
y=y_range,
name="Fair Value",
opacity=1,
hoverinfo="y",
hovertemplate="%{y:.4f}",
),
)
# fig.add_trace(
# go.Scatter(
# x=x_range_under,
# y=y_range_under,
# name="Undervalued",
# opacity=1,
# hoverinfo="y",
# hovertemplate="%{y:.4f}"
# ),
# )
# fig.add_trace(
# go.Scatter(
# x=x_range_low,
# y=y_range_low,
# name="Lower",
# opacity=1,
# hoverinfo="y",
# hovertemplate="%{y:.4f}"
# ),
# )
fig.update_yaxes(
zeroline=False,
# type='log',
nticks=7,
minor=dict(ticks="inside", ticklen=2, showgrid=True),
)
fig.update_layout(
title={
"text": "Log price vs Log days",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "bottom",
},
xaxis_title="Log days",
yaxis_title="Log price ($USD)",
hovermode="x unified",
font_family="Arial",
margin_t=40,
margin_b=0,
margin_l=0,
margin_r=0,
legend=dict(
title="", orientation="h", yanchor="middle", y=0.1, xanchor="center", x=0.5
),
)
fig.show()
# Create visualisation (back transformed)
fig1 = make_subplots()
fig1.add_trace(
go.Scatter(
x=df["ind"], y=df["price"], name="Price", opacity=1, hovertemplate="%{y:$.2f}"
),
)
# fig1.add_trace(
# go.Scatter(
# x=df['ind'],
# y=z_range_high,
# name="Upper",
# opacity=1,
# hoverinfo="y",
# hovertemplate="%{y:$.2f}"
# ),
# )
# fig1.add_trace(
# go.Scatter(
# x=df['ind'],
# y=z_range_over,
# name="Overvalued",
# opacity=1,
# hoverinfo="y",
# hovertemplate="%{y:$.2f}"
# ),
# )
fig1.add_trace(
go.Scatter(
x=df["ind"],
y=z_range,
name="Fair Value",
opacity=1,
hoverinfo="y",
hovertemplate="%{y:$.2f}",
),
)
# fig1.add_trace(
# go.Scatter(
# x=df['ind'],
# y=z_range_under,
# name="Undervalued",
# opacity=1,
# hoverinfo="y",
# hovertemplate="%{y:$.2f}"
# ),
# )
# fig1.add_trace(
# go.Scatter(
# x=df['ind'],
# y=z_range_low,
# name="Lower",
# opacity=1,
# hoverinfo="y",
# hovertemplate="%{y:$.2f}"
# ),
# )
fig1.update_yaxes(
type="log",
zeroline=False,
nticks=7,
minor=dict(ticks="inside", ticklen=2, showgrid=True),
)
fig1.update_layout(
title={
"text": "LINK-ETH Regression",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "bottom",
},
xaxis_title="Days since inception",
yaxis_title="Price ($USD)",
yaxis_tickformat="$",
hovermode="x",
font_family="Arial",
margin_t=40,
margin_b=0,
margin_l=0,
margin_r=0,
legend=dict(
title="", orientation="h", yanchor="middle", y=0.1, xanchor="center", x=0.5
),
)
fig1.show()
|
# Importing the libraries needed for the project
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objs as go
import plotly.io as pio
# ## 1. Loading the data
# Set the path to the folder containing the CSV files
folder_path = "/kaggle/input/synthea"
# Create an empty dictionary to store all data frames (df)
df = {}
# Loop over all files in the folder with the .csv extension
for filename in os.listdir(folder_path):
if filename.endswith(".csv"):
# Read the CSV file into a Pandas data frame and use the filename (without extension) as the key in the dictionary
file_path = os.path.join(folder_path, filename)
key = filename[
:-4
] # Remove the last 4 characters (i.e. ".csv") from the filename
df[key] = pd.read_csv(file_path)
# ## 2.1 Explore the data
df["allergies"].head(1)
df["careplans"].head(1)
df["conditions"].head(1)
df["devices"].head(1)
df["encounters"].head(1)
df["imaging_studies"].head(1)
df["immunizations"].head(1)
df["medications"].head(1)
df["observations"].head(1)
df["organizations"].head(1)
df["patients"].head(1)
df["payer_transitions"].head(1)
df["payers"].head(1)
df["procedures"].head(1)
df["providers"].head(1)
df["supplies"]
# Overall, the data frames focussed on describing the patient medical condition are: "allergies", "careplans", "conditions", "devices", "encounters", "imaging_studies", "immunizations", "medications", "observations", and "procedures.
# On the contrary, the following data frames will not be considered
# - "organizations": shows contact information/location data related to the medical providers (e.g. hospitals, clinics).
# - "payers" : shows financial data related to the insurance company.
# - "payer_transitions": shows the patient's insurance affiliation over the time.
# - "supplies": does not show any data.
# Finally, the data frame "patients" will be only considered to select a random Id that belongs to a female patient and perform task 2.2
# ## 2.2 Make a visualization of a single patient trajectory as she transitions through the medical care system over time
# Creating a user-defined function to pick up a random Id from a female patient
import random
def get_random_female_id(df):
# Filter the DataFrame by the "GENDER" column
female_df = df[df["GENDER"] == "F"]
# Select a random "Id" value from the resulting subset
random_id = random.choice(female_df["Id"].tolist())
return random_id
# Choose a random Id to visualize
patient_id = get_random_female_id(df["patients"])
patient_id
# List of all the data frames with patient medical information
df_names = [
"allergies",
"careplans",
"conditions",
"devices",
"encounters",
"imaging_studies",
"immunizations",
"medications",
"observations",
"procedures",
]
# Create an empty list to store the events associated with the patient
patient_events = []
# Loop over all the data frames and extract the events associated with the patient
for df_name in df_names:
df_patient = df[df_name][df[df_name]["PATIENT"] == patient_id]
patient_events.append(df_patient)
# Combine all the events into a single data frame
patient_events = pd.concat(patient_events)
# Convert the "START" column to datetime type
patient_events["START"] = pd.to_datetime(patient_events["START"], errors="coerce")
# Convert "DATE" column to datetime type
patient_events["DATE"] = pd.to_datetime(patient_events["DATE"], errors="coerce")
# Define a custom function to convert START and DATE columns to timestamp strings
def to_timestamp(row):
if not pd.isna(row["START"]):
return row["START"].strftime("%d/%m/%Y")
elif not pd.isna(row["DATE"]):
return row["DATE"].strftime("%d/%m/%Y")
else:
return None # handle invalid values if any
# Apply the custom function to create the new TIMESTAMP column
patient_events["TIMESTAMP"] = patient_events.apply(to_timestamp, axis=1)
# remove rows with missing values in the TIMESTAMP column
patient_events.dropna(subset=["TIMESTAMP"], inplace=True)
# Convert the TIMESTAMP column to datetime type
patient_events["TIMESTAMP"] = pd.to_datetime(
patient_events["TIMESTAMP"], format="%d/%m/%Y", errors="coerce"
)
# Sort the data frame by the TIMESTAMP column
patient_events_sorted = patient_events.sort_values(by="TIMESTAMP", ascending=True)
# Convert DataFrame to a list of dictionaries
data = patient_events_sorted.to_dict("records")
# Define the layout for the table with patient_id in the title
layout = go.Layout(
title=f"Patient Events for Patient {patient_id}",
height=500,
margin=dict(l=50, r=50, t=50, b=50),
)
# Create the table
column_names = [
"TIMESTAMP",
"ENCOUNTER",
"DESCRIPTION",
"REASONDESCRIPTION",
"VALUE",
"UNITS",
]
table = go.Figure(
data=[
go.Table(
header=dict(
values=list(patient_events_sorted[column_names].columns),
fill_color="lightblue",
align="left",
),
cells=dict(
values=[
[
val.strftime("%Y/%m/%d")
if isinstance(val, pd.Timestamp)
else val
for val in patient_events_sorted[col]
]
if col == "TIMESTAMP"
else [
str(val) if val is not None and not pd.isna(val) else ""
for val in patient_events_sorted[col]
]
for col in column_names
],
fill_color="lavender",
align="left",
),
)
],
layout=layout,
)
# Display the table
pio.show(table)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from sqlalchemy import create_engine
import pymysql
import sqlalchemy
import sqlite3
import json
# standard Python library for handling HTTP is requests
import requests
api_key = "qG7e2aOMcV6QP9em5OKbpbBjVeoY1_1fzNzNh-PWjrdxg4rzUoasIuk4XeKidqP5i-joRAL-NaIOzM_pDucQCR79f6PCFedbdtr_No7RVa_gOaIgvHgT_-WiwzT9Y3Yx"
headers = {"Authorization": "Bearer {}".format(api_key)}
# the api endpoint url. We are working with the 'businesses' endpoint
search_api_url = "https://api.yelp.com/v3/businesses/search"
# inserting some parameters. Lets try and a list of 50 businesses with the term “coffee” located in ‘61820’, the main zip code for Champaign.
# Review all parameters and documentation at https://www.yelp.com/developers/documentation/v3/business_search
params = {"term": "coffee", "location": "61820", "limit": 50}
response = requests.get(search_api_url, headers=headers, params=params, timeout=5)
# extract JSON data from the response
data = response.json()
print(data)
# Load data to a data frame
df = pd.DataFrame(data["businesses"])
# display the top rows. Default value is 5. Lets view all 50.
# Documentation for attributes available at https://www.yelp.com/developers/documentation/v3/business_search
# Spend some time understanding all the attributes.
df.head()
headers = {"Authorization": "Bearer {}".format(api_key)}
# the api endpoint url. We are working with the 'businesses' endpoint
search_api_url = "https://api.yelp.com/v3/businesses/search"
# inserting some parameters. Lets try and a list of 50 businesses with the term “coffee” located in ‘61820’, the main zip code for Champaign.
# Review all parameters and documentation at https://www.yelp.com/developers/documentation/v3/business_search
params1 = {"term": "restaurants", "location": "61820", "limit": 50}
response = requests.get(search_api_url, headers=headers, params=params1, timeout=5)
# extract JSON data from the response
data1 = response.json()
print(data1)
# Load data to a data frame
df1 = pd.DataFrame(data1["businesses"])
# display the top rows. Default value is 5. Lets view all 50.
# Documentation for attributes available at https://www.yelp.com/developers/documentation/v3/business_search
# Spend some time understanding all the attributes.
df1.head()
headers = {"Authorization": "Bearer {}".format(api_key)}
# the api endpoint url. We are working with the 'businesses' endpoint
search_api_url = "https://api.yelp.com/v3/businesses/search"
# inserting some parameters. Lets try and a list of 50 businesses with the term “coffee” located in ‘61820’, the main zip code for Champaign.
# Review all parameters and documentation at https://www.yelp.com/developers/documentation/v3/business_search
params7 = {"term": "Bakeries", "location": "61820", "sort_by": "rating", "limit": 5}
# we can feed these variables into the "get"function
# we also set timeout = 5 to stop Requests from waiting for a response after 5 seconds.
response = requests.get(search_api_url, headers=headers, params=params7, timeout=5)
# extract JSON data from the response
data7 = response.json()
print(data7)
# Load data to a data frame
df7 = pd.DataFrame(data7["businesses"])
# display the top rows. Default value is 5. Lets view all 50.
# Documentation for attributes available at https://www.yelp.com/developers/documentation/v3/business_search
# Spend some time understanding all the attributes.
df7.head()
headers = {"Authorization": "Bearer {}".format(api_key)}
# the api endpoint url. We are working with the 'businesses' endpoint
search_api_url = "https://api.yelp.com/v3/businesses/search"
# inserting some parameters. Lets try and a list of 50 businesses with the term “coffee” located in ‘61820’, the main zip code for Champaign.
# Review all parameters and documentation at https://www.yelp.com/developers/documentation/v3/business_search
params8 = {"term": "Restaurants", "location": "61820", "sort_by": "rating", "limit": 5}
# we can feed these variables into the "get"function
# we also set timeout = 5 to stop Requests from waiting for a response after 5 seconds.
response = requests.get(search_api_url, headers=headers, params=params8, timeout=5)
# extract JSON data from the response
data8 = response.json()
print(data8)
# Load data to a data frame
df8 = pd.DataFrame(data8["businesses"])
# display the top rows. Default value is 5. Lets view all 50.
# Documentation for attributes available at https://www.yelp.com/developers/documentation/v3/business_search
# Spend some time understanding all the attributes.
df8.head()
headers = {"Authorization": "Bearer {}".format(api_key)}
# the api endpoint url. We are working with the 'businesses' endpoint
search_api_url = "https://api.yelp.com/v3/businesses/search"
# inserting some parameters. Lets try and a list of 50 businesses with the term “coffee” located in ‘61820’, the main zip code for Champaign.
# Review all parameters and documentation at https://www.yelp.com/developers/documentation/v3/business_search
params9 = {"term": "Coffee", "location": "61820", "sort_by": "rating", "limit": 5}
# we can feed these variables into the "get"function
# we also set timeout = 5 to stop Requests from waiting for a response after 5 seconds.
response = requests.get(search_api_url, headers=headers, params=params9, timeout=5)
# extract JSON data from the response
data9 = response.json()
print(data9)
# Load data to a data frame
df9 = pd.DataFrame(data9["businesses"])
# display the top rows. Default value is 5. Lets view all 50.
# Documentation for attributes available at https://www.yelp.com/developers/documentation/v3/business_search
# Spend some time understanding all the attributes.
df9.head()
|
# importing necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings(action="ignore")
# importing application data file and seeing top 5 dataset values
df1 = pd.read_csv("application_data.csv")
df1.head()
# find the shape of the data i.e. number of rows and number of columns
df1.shape
# to find the information about the data
df1.info(max_cols=122, show_counts=True)
# to find the percentage of null values in descending order
print(round((100 * df1.isnull().mean()).sort_values(ascending=False), 2).to_string())
# deleting the null values above 40%
df_app = df1.dropna(thresh=len(df1) * 0.6, axis="columns")
# finding the shape of the data after dropping the null values
df_app.shape
# to find the percentage of null values in descending order
print(round((100 * df_app.isnull().mean()).sort_values(ascending=False), 2).to_string())
# imputing the numerical value with median
df_app = df_app.fillna(df_app.median())
# imputing the categorical value with mode
mod = df_app["OCCUPATION_TYPE"].mode()[0]
df_app["OCCUPATION_TYPE"] = df_app["OCCUPATION_TYPE"].fillna(mod)
mod = df_app["NAME_TYPE_SUITE"].mode()[0]
df_app["NAME_TYPE_SUITE"] = df_app["NAME_TYPE_SUITE"].fillna(mod)
# to find the percentage of null values in descending order
print(round((100 * df_app.isnull().mean()).sort_values(ascending=False), 2).to_string())
# Segmenting categorical columns for analysis
cat_col = [
"TARGET",
"NAME_CONTRACT_TYPE",
"CODE_GENDER",
"NAME_INCOME_TYPE",
"NAME_EDUCATION_TYPE",
"NAME_FAMILY_STATUS",
"AMT_REQ_CREDIT_BUREAU_HOUR",
"AMT_REQ_CREDIT_BUREAU_DAY",
"AMT_REQ_CREDIT_BUREAU_WEEK",
"AMT_REQ_CREDIT_BUREAU_QRT",
"DEF_30_CNT_SOCIAL_CIRCLE",
"DEF_60_CNT_SOCIAL_CIRCLE",
]
# converting object datatype to category datatype
for col in cat_col:
df_app[col] = pd.Categorical(df_app[col])
# checking statistics for birth and employed column
l = ["DAYS_BIRTH", "DAYS_EMPLOYED"]
df_app[l].describe()
# ### We can see negative values or data discrepency in these two columns
# Converting negative values to positive values
df_app[l] = abs(df_app[l])
# Converting days of birth in years and creating a column age-group
df_app["AGE"] = df_app["DAYS_BIRTH"] / 365
bins = [0, 20, 30, 40, 50, 100]
slots = ["18-20", "20-30", "30-40", "40-50", "50 Above"]
df_app["AGE_GROUP"] = pd.cut(df_app["AGE"], bins=bins, labels=slots)
# ## Outliers
# Segmenting continuous variable
cont_col = [
"CNT_CHILDREN",
"AMT_REQ_CREDIT_BUREAU_MON",
"AMT_REQ_CREDIT_BUREAU_YEAR",
"AMT_INCOME_TOTAL",
"AMT_CREDIT",
"AMT_ANNUITY",
"AMT_GOODS_PRICE",
"DAYS_BIRTH",
"EXT_SOURCE_2",
"EXT_SOURCE_3",
"OBS_30_CNT_SOCIAL_CIRCLE",
"OBS_60_CNT_SOCIAL_CIRCLE",
"DAYS_EMPLOYED",
]
# Bar plot for checking outliers
for i in cont_col:
sns.boxplot(y=df_app[i])
plt.show()
# ## Data Imbalance
# Pie chart for loan defaulters
y = df_app["TARGET"].value_counts(normalize=True).values
lab = [0, 1]
plt.pie(y, labels=lab, autopct="%0.0f%%")
plt.legend()
plt.title("Imbalance between Non-Defaulters and Defaulter")
plt.show()
# ## Univariate Analysis
# Pie chart for loan type
y = df_app["NAME_CONTRACT_TYPE"].value_counts(normalize=True).values
lab = ["Cash loans", "Revolving loans"]
plt.pie(y, labels=lab, autopct="%0.0f%%")
plt.legend()
plt.title("Imbalance between Cash loans and Revolving loans")
plt.show()
# Pie chart for sex ratio
y = df_app["CODE_GENDER"].value_counts(normalize=True).values
lab = ["F", "M", "XNA"]
plt.pie(y, labels=lab, autopct="%0.0f%%")
plt.title("Ratio of Females and Males taking loan")
plt.legend()
plt.show()
# Histogram to see the distribution of age group taking loan
sns.histplot(df_app["AGE_GROUP"], stat="percent")
plt.show()
# Barplot for Type of occupations taking loan
plt.figure(figsize=[12, 7])
(df_app["OCCUPATION_TYPE"].value_counts()).plot.bar(color="orange", width=0.8)
plt.title("Percentage of Type of Occupations", fontdict={"fontsize": 20}, pad=20)
plt.show()
plt.figure(figsize=(20, 40))
i = 1
for col in cat_col:
plt.subplot(7, 2, i)
sns.countplot(x=col, data=df_app)
i += 1
plt.show()
# ## Bivariate Analysis
# Plotting pairplot between amount variable to draw reference against loan repayment status
amount = df_app[
["AMT_INCOME_TOTAL", "AMT_CREDIT", "AMT_ANNUITY", "AMT_GOODS_PRICE", "TARGET"]
]
amount = amount[
(amount["AMT_GOODS_PRICE"].notnull()) & (amount["AMT_ANNUITY"].notnull())
]
ax = sns.pairplot(amount, hue="TARGET", palette=["b", "r"])
ax.fig.legend(labels=["Defaulter", "Repayer"])
plt.show()
# countplot to see gender according to target variable
sns.countplot(x=df_app["CODE_GENDER"], hue=df_app["TARGET"])
plt.show()
# ## Multivariate Analysis
# checking is there is any correlation between mobile phone, work phone etc, email, Family members,Region rating with target
contact_col = [
"FLAG_MOBIL",
"FLAG_EMP_PHONE",
"FLAG_WORK_PHONE",
"FLAG_CONT_MOBILE",
"FLAG_PHONE",
"FLAG_EMAIL",
"TARGET",
]
Contact_corr = df_app[contact_col].corr()
fig = plt.figure(figsize=(10, 10))
ax = sns.heatmap(
Contact_corr,
xticklabels=Contact_corr.columns,
yticklabels=Contact_corr.columns,
annot=True,
cmap="RdYlGn",
linewidth=1,
)
# ### We can see from above graph that contact columns have no correlation with target variable.
# heat map for showing positive correlation
contact_col = [
"AMT_INCOME_TOTAL",
"AMT_CREDIT",
"AMT_ANNUITY",
"AMT_GOODS_PRICE",
"EXT_SOURCE_2",
"TARGET",
]
sns.heatmap(df_app[contact_col].corr(), annot=True)
plt.show()
# # we divide out data set into two parts target 1 and target 0
# creating new datadrame for target=0
target0 = df_app[df_app["TARGET"] == 0]
target0.head()
# creating new datadrame for target=1
target1 = df_app[df_app["TARGET"] == 1]
target1.head()
# now we need to find top 10 correlations
corr0 = target0.corr()
corr_df0 = corr0.where(np.triu(np.ones(corr0.shape), k=1).astype(np.bool))
corr_df0 = corr_df0.unstack().reset_index().dropna(subset=[0])
corr_df0.columns = ["VAR1", "VAR2", "Correlation_Value"]
corr_df0["Corr_abs"] = abs(corr_df0["Correlation_Value"])
corr_df0.sort_values(by="Corr_abs", ascending=False, inplace=True)
corr_df0.head(10)
# now we need to find top 10 correlations
corr1 = target1.corr()
corr_df1 = corr1.where(np.triu(np.ones(corr1.shape), k=1).astype(np.bool))
corr_df1 = corr_df1.unstack().reset_index().dropna(subset=[0])
corr_df1.columns = ["VAR1", "VAR2", "Correlation_Value"]
corr_df1["Corr_abs"] = abs(corr_df1["Correlation_Value"])
corr_df1.sort_values(by="Corr_abs", ascending=False, inplace=True)
corr_df1.head(10)
# importing previous application file and seeing top 5 dataset values
df = pd.read_csv("previous_application.csv")
df.head()
# find the shape of the data i.e. number of rows and number of columns
df.shape
# to find the information about the data
df.info()
# to find the percentage of null values in descending order
print(round((100 * df.isnull().mean()).sort_values(ascending=False), 2))
# deleting the null values above 40%
df_prev = df.dropna(thresh=len(df) * 0.6, axis="columns")
# finding the shape of the data after dropping the null values
df_prev.shape
# imputing the numerical value with median
df_prev = df_prev.fillna(df.median())
# imputing the categorical value with mode
mod = df_prev["PRODUCT_COMBINATION"].mode()[0]
df_prev["PRODUCT_COMBINATION"] = df_prev["PRODUCT_COMBINATION"].fillna(mod)
# to find the percentage of null values in descending order
print(round((100 * df_prev.isnull().mean()).sort_values(ascending=False), 2))
# ### We can see that now there are no missing values in the data after imputation
# ## Outliers
# finding outliers using box plot in segmented continuous columns
cont_col = [
"AMT_ANNUITY",
"AMT_APPLICATION",
"AMT_CREDIT",
"AMT_GOODS_PRICE",
"SELLERPLACE_AREA",
]
for i in cont_col:
sns.boxplot(y=df_prev[i])
plt.show()
# ### There are outliers present in AMT_ANNUITY, AMT_APPLICATION, AMT_CREDIT, AMT_GOODS_PRICE, SELLERPLACE_AREA .
# ## Data Imbalance
# finding data imbalance by plotting pie chart
y = df_prev["NAME_CONTRACT_STATUS"].value_counts(normalize=True).values
lab = ["Approved", "Refused", "Canceled", "Unused offer"]
plt.pie(y, labels=lab, autopct="%0.0f%%")
plt.legend()
plt.title("Imbalance between Approved, Refused, Cancelled and Unused offer")
plt.show()
# ### We can see that there are 62% of applicants loan got approved with 19% loan got refused(company rejected the loan) and 17% cancelled(by client) and 2% unsed offer(cancelled by the client).
# to check statistics for DAYS_DECISION column
df_prev["DAYS_DECISION"].describe()
# ### We can see negative values or data discrepancy in the DAYS_DECISION column.
# converting days_decision vlues to positive values
df_prev["DAYS_DECISION"] = abs(df_prev["DAYS_DECISION"])
plt.figure(figsize=(18, 7))
plt.rcParams["axes.labelsize"] = 20
plt.rcParams["axes.titlesize"] = 22
plt.rcParams["axes.titlepad"] = 30
plt.xticks(rotation=90)
plt.yscale("log")
plt.title("Distribution of purposes with contract status ")
sns.countplot(
data=df_prev,
x=df_prev["NAME_CASH_LOAN_PURPOSE"],
order=df_prev["NAME_CASH_LOAN_PURPOSE"].value_counts(normalize=True).index,
hue="NAME_CONTRACT_STATUS",
palette="magma",
)
plt.show()
# ## Merged Dataframe
df = pd.merge(df_app, df_prev, on="SK_ID_CURR", how="inner")
df.head()
df.shape
plt.figure(figsize=(18, 7))
plt.rcParams["axes.labelsize"] = 20
plt.rcParams["axes.titlesize"] = 22
plt.rcParams["axes.titlepad"] = 30
plt.xticks(rotation=90)
plt.yscale("log")
plt.title("Distribution of purposes with target variable")
sns.countplot(
data=df,
x=df["NAME_CASH_LOAN_PURPOSE"],
order=df["NAME_CASH_LOAN_PURPOSE"].value_counts(normalize=True).index,
hue="TARGET",
palette="magma",
)
plt.show()
# ### Loan purpose has high number of unknown values (XAP, XNA)
plt.figure(figsize=(10, 7))
plt.title("Distribution of purposes with contract status ")
sns.countplot(
data=df,
x=df["NAME_CONTRACT_STATUS"],
order=df["NAME_CONTRACT_STATUS"].value_counts().index,
hue="TARGET",
)
plt.show()
|
import warnings
warnings.filterwarnings("ignore")
import tensorflow as tf
import os
import tensorflow_datasets as tfds
import tensorflow_addons as tfa
import tensorflow_probability as tfp
# CHANGED FOR TPU 1VM:
# Detect hardware, return appropriate distribution strategy
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect(
tpu="local"
) # "local" for 1VM TPU
strategy = tf.distribute.TPUStrategy(tpu)
except tf.errors.NotFoundError:
strategy = tf.distribute.MirroredStrategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
height = 512
width = 512
font_size = 20
def apply_visual_attention(path):
img = cv2.imread(path, 0)
resized_img = cv2.resize(img, (height, width))
denoised_img = cv2.medianBlur(resized_img, 5)
th = cv2.adaptiveThreshold(
denoised_img,
maxValue=255,
adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
thresholdType=cv2.THRESH_BINARY,
blockSize=11,
C=2,
)
return img
import os
import cv2
import numpy as np
# Set the path to the dataset folders
train_image_path = "/kaggle/input/cod10k/COD10K-v3/Train/Image/"
train_gt_path = "/kaggle/input/cod10k/COD10K-v3/Train/GT_Object/"
test_image_path = "/kaggle/input/cod10k/COD10K-v3/Test/Image/"
test_gt_path = "/kaggle/input/cod10k/COD10K-v3/Test/GT_Object/"
img_size2 = (512, 512)
# Function to load images and ground truth instances
def load_data(image_path, gt_path, maxi):
images = []
gt_instances = []
c = 0
for filename in sorted(os.listdir(image_path)):
# Check if file is a JPG image
c += 1
if c >= maxi:
break
if c % 50 == 0:
print(c)
if filename.endswith(".jpg"):
img = apply_visual_attention(image_path + filename)
img = cv2.resize(img, img_size2)
images.append(img)
# Load ground truth instance and resize to (256,256)
gt = cv2.imread(gt_path + filename[:-4] + ".png")
gt = cv2.resize(gt, img_size2)
gt_instances.append(gt)
return np.array(images), np.array(gt_instances)
# Load training data
train_images, train_gt_instances = load_data(train_image_path, train_gt_path, 1000)
# Load testing data
test_images, test_gt_instances = load_data(test_image_path, test_gt_path, 200)
print("done")
# train_images, train_gt_instances,test_images, test_gt_instances
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend
from tensorflow.keras.losses import binary_crossentropy
backend.clear_session()
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Input,
Conv2D,
MaxPooling2D,
Dropout,
concatenate,
UpSampling2D,
)
# Define the U-Net model
def unet(input_shape=(512, 512, 3)):
inputs = Input(input_shape)
# Encoder
conv1 = Conv2D(64, 3, activation="relu", padding="same")(inputs)
conv1 = Conv2D(64, 3, activation="relu", padding="same")(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation="relu", padding="same")(pool1)
conv2 = Conv2D(128, 3, activation="relu", padding="same")(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation="relu", padding="same")(pool2)
conv3 = Conv2D(256, 3, activation="relu", padding="same")(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
# Decoder
up4 = UpSampling2D(size=(2, 2))(pool3)
conv4 = Conv2D(256, 2, activation="relu", padding="same")(up4)
merge4 = concatenate([conv3, conv4], axis=3)
conv4 = Conv2D(256, 3, activation="relu", padding="same")(merge4)
conv4 = Conv2D(256, 3, activation="relu", padding="same")(conv4)
up5 = UpSampling2D(size=(2, 2))(conv4)
conv5 = Conv2D(128, 2, activation="relu", padding="same")(up5)
merge5 = concatenate([conv2, conv5], axis=3)
conv5 = Conv2D(128, 3, activation="relu", padding="same")(merge5)
conv5 = Conv2D(128, 3, activation="relu", padding="same")(conv5)
up6 = UpSampling2D(size=(2, 2))(conv5)
conv6 = Conv2D(64, 2, activation="relu", padding="same")(up6)
merge6 = concatenate([conv1, conv6], axis=3)
conv6 = Conv2D(64, 3, activation="relu", padding="same")(merge6)
conv6 = Conv2D(64, 3, activation="relu", padding="same")(conv6)
# Output layer
outputs = Conv2D(3, 1, activation="sigmoid")(conv6)
# Define the model
model = Model(inputs=inputs, outputs=outputs)
return model
with strategy.scope():
input_shape = (512, 512, 1)
generator = unet(input_shape)
optimizer = tf.keras.optimizers.Adam(lr=0.001)
loss_fn = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
generator.compile(
optimizer=optimizer,
loss=loss_fn,
metrics=["accuracy", tf.keras.metrics.Precision()],
)
generator.summary()
print(train_images[0].shape)
print(train_gt_instances[0].shape)
num_epochs = 25
batch_size = 64
# Train the model
history = generator.fit(
train_images,
train_gt_instances,
epochs=num_epochs,
batch_size=batch_size,
validation_data=(test_images, test_gt_instances),
# reset_metrics=True
)
model.save("\kaggle\working\version1_camo.h5")
import matplotlib.pyplot as plt
img_path = (
"/kaggle/input/cod10k/COD10K-v3/Train/Image/COD10K-CAM-1-Aquatic-1-BatFish-1.jpg"
)
otimg = cv2.imread(img_path)
otimg = apply_visual_attention(img_path)
timg = cv2.resize(otimg, img_size2)
plt.imshow(timg)
timg = np.array([timg])
print(timg.shape)
predictions = generator.predict(timg)
plt.imshow(predictions[0])
# timg='/kaggle/input/cod10k/COD10K-v3/Test/Image/COD10K-CAM-1-Aquatic-13-Pipefish-528.jpg'
timg = img_path.split("/")
timgp = timg.pop().split(".")
timgp = [timgp[0], "png"]
timgp = ".".join(timgp)
timg[6] = "GT_Object"
timg = timg + [timgp]
timg = "/".join(timg)
print(timg)
timg = cv2.resize(cv2.imread(timg), img_size2)
# timg=feature(timg)
plt.imshow(timg)
|
# 
# **¿Sabias que es posible capturar los datos de los diferentes motores de búsqueda?**
# Podemos obtener cualquier información de Google, Yahoo, Youtube... Y muchas más plataformas. Además, de una forma muy simple.
# [SerpApi](https://serpapi.com/) es una API gratuita (siempre que no excedas 100 consultas mensuales) que permite a los desarrolladores obtener información de búsqueda de Google y otros motores de búsqueda importantes de una manera fácil y eficiente.
# **¡Empecemos!**
# En primer lugar, debes de crear una cuenta para obtener la clave secreta (API_KEY) para poder realizar las solicitudes. Para ello debes acceder a esta web: https://serpapi.com/users/sign_up. Después de registrarte (es muy sencillo), deberás ir al correo con el que te has registrado para confirmar el correo y ya estarás preparado para utilizar la API.
# Además, te recomiendo explorar la documentación y familiarizarte con la API: https://serpapi.com/search-api
# Una vez hayas obtenido tu API_KEY, el siguiente paso será instalar el módulo **google-search-results** para poder acceder a SerpAPI.
# Instalar modulo google-search-results
# ¡Perfecto! Ahora podemos empezar a jugar
# Asigna tu API_KEY
API_GOOGLE = ""
# Vamos a realizar una búsqueda de los restaurantes más valorados en Sevilla (o de la zona que prefieras). Para ello, usaremos Google Maps y aqui puedes echar un vistazo a la documentación: https://serpapi.com/maps-local-results
# Cargar las librerías necesarias
import pandas as pd
from serpapi import GoogleSearch
# Asignamos las coordenadas de la zona en donde queramos encontrar los restaurantes. En este caso, Sevilla
location = "37.3754318,-5.9962577"
# Creamos una lista de tipos de restaurantes a buscar. Debes de añadir el nombre que buscarías en Google Maps.
restaurant_types = [
"restaurante vegano",
"restaurante asiático",
"restaurante italiano",
"restaurante tapas",
]
# Ahora vamos a crear un dataframe (una estructura de datos en forma de tabla) para almacenar y estructurar la información. En este caso, almacenaremos: **Tipo, Nombre, Valoración y el Numero de valoraciones.**
# Inicializar un DataFrame vacío
columns = ["tipo", "nombre", "valoración", "num_valoraciones"]
df_restaurants = pd.DataFrame(columns=columns)
# Y, finalmente, hacemos la petición y obtenemos los datos de los restaurantes
# Hacer la request y obtener datos
for rest_type in restaurant_types:
search_params = {
"api_key": API_GOOGLE,
"engine": "google_maps",
"q": rest_type,
"location": location,
}
search = GoogleSearch(search_params)
results = search.get_dict()
# Construir el DataFrame
for result in results["local_results"]:
name = result.get("title", "N/A")
rating = result.get("rating", "N/A")
num_ratings = result.get("reviews", "N/A")
restaurant_data = {
"tipo": rest_type,
"nombre": name,
"valoración": rating,
"num_valoraciones": num_ratings,
}
df_temp = pd.DataFrame([restaurant_data])
df_restaurants = pd.concat([df_restaurants, df_temp], ignore_index=True)
# Mostrar dataframe
df_restaurants.head()
|
# Data was built using "suicide-watch" by NIKHILESWAR KOMATI under the
# CC BY-SA 4.0 licence: https://creativecommons.org/licenses/by-sa/4.0/
# in this notebook, I will be building a Bayesian Model to help classify the depressed
# data with levels
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# suicide data, originally without sentiment, added to improve
# df_1 = pd.read_csv('/kaggle/input/processed-suicide-data/processedSuicidedata_standard.csv')
df_1 = pd.read_csv(
"/kaggle/input/suicide-processed-with-sentiment/processedSuicidedata_sentiment.csv"
)
# read in the data with normal feautures
df_2 = pd.read_csv(
"/kaggle/input/depression-data-tfidf-sentiment-analysis/depression_data_TFIDF_sent.csv"
)
# read in the data with extra feautures
df_3 = pd.read_csv(
"/kaggle/input/tfidf-sentiment-w-extra-features/depression_data_TFIDF_sent_extra_features (0).csv"
)
# First, need to do sentiment analysis using Vader on the suicide data and to add that as an attribute.
# install Vader
#!pip install vaderSentiment
# getting the senitment first, will be commented out
"""from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
sent_analyser = SentimentIntensityAnalyzer()
sample_num = df_1.shape[0]
score_vector_1 = []
for i in range(sample_num):
score = sent_analyser.polarity_scores(df_1.at[i,'text'])
score_vector_1.append(score['compound'])
df_1.loc[:,'sentiment'] = score_vector_1
df_1
"""
# Train the model using Bayeisan approach, check accuracy and F-score in 10-fold cross validation, after saving data for later use. Older code will be commented out.
# df_1.to_csv('processedSuicidedata_sentiment.csv',index=False)
# df_1
x_col = df_1.columns[1:286]
X = df_1[x_col]
y_col = df_1.columns[286]
Y_true = df_1[y_col]
# 10-fold cross validation for metric refining:
from sklearn.model_selection import KFold
# taken from documentation
kf = KFold(n_splits=10, shuffle=True, random_state=1000)
for train, test in kf.split(X):
# print("%s %s" % (train, test))
# split X and Y into X_train, Y_train, X_test, Y_test
X_train = X[:, train]
|
# **Our Analysis**
# As the battle of the Indian eCommerce heavyweights continues to accelerate, we have witnessed three separate sale events compressed into the last four weeks of this festive season. Flipkart has come out with all guns blazing following its multi-billion-dollar funding round, leaving Amazon with little choice but to follow suit with its own aggressive promotions. At this stage of a highly competitive eCommerce cycle, market share is a prize worth its weight in gold and neither Flipkart nor Amazon are prepared to blink first.
# At DataWeave, our proprietary data aggregation and analysis platform enables us to seamlessly analyze these sale events, focusing on multiple dimensions, including website, category, sub-category, brand, prices, discounts, and more. Over the past six weeks, we have been consistently monitoring the prices of the top 200 ranked products spread over sub-categories spanning electronics, fashion, and furniture. In total, we amassed data on over 65,000 products during this period.
# The first of these pivotal sale events was held between the 20th and 24th September, which we earlier analyzed in detail. Another major sale soon followed, contested by Amazon, Flipkart and Myntra for varying periods between the 4th and 9th of October. Lastly, was the Diwali season sale held by Amazon, Flipkart, and Myntra between the 14th and 18th of October, joined by Jabong between the 12th and 15th of October.
# In analyzing these significant sale events for all eCommerce websites, we observed an extensive range of products enjoying high absolute discounts, but with no additional discounts during the sale, i.e. prices remained unchanged between the day before the sale and the first day of the sale. The following infographic highlights some of the sub-categories and products where this phenomenon was more pronounced during the recently concluded Diwali season sale. Here, discount percentages are average absolute discounts of products with unchanged discounts during the sale.
# 
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing Libraries
import matplotlib.pyplot as plt
import seaborn as sns
# # Data Processing
# import csv file
df = pd.read_csv(
"/kaggle/input/diwali-sales-dataset/Diwali Sales Data.csv",
encoding="unicode_escape",
)
df.head()
df.tail()
df.shape
df.info()
# # Drop coloumns
df.drop(["Status", "unnamed1"], axis=1, inplace=True)
df.info()
# # Drop null values
df.isnull().sum()
# drop null values
df.dropna(inplace=True)
df.isnull().sum()
# change data type
df["Amount"] = df["Amount"].astype("int")
df["Amount"].dtypes
df.columns
df.describe()
# use describe() for specific columns
df[["Age", "Orders", "Amount"]].describe()
# # Exploratory Data Analysis
# # Gender
# plotting a bar chart for Gender and it's count
ax = sns.countplot(x="Gender", data=df)
for bars in ax.containers:
ax.bar_label(bars)
# plotting a bar chart for gender vs total amount
sales_gen = (
df.groupby(["Gender"], as_index=False)["Amount"]
.sum()
.sort_values(by="Amount", ascending=False)
)
sns.barplot(x="Gender", y="Amount", data=sales_gen)
# *From above graphs we can see that most of the buyers are females and even the purchasing power of females are greater than men*
# # Age
a = sns.countplot(data=df, x="Age Group", hue="Gender")
for bars in a.containers:
a.bar_label(bars)
# Total Amount vs Age Group
sales_age = (
df.groupby(["Age Group"], as_index=False)["Amount"]
.sum()
.sort_values(by="Amount", ascending=False)
)
sns.barplot(x="Age Group", y="Amount", data=sales_age)
# *From above graphs we can see that most of the buyers are of age group between 26-35 yrs female*
# # State
sales_state = (
df.groupby(["State"], as_index=False)["Orders"]
.sum()
.sort_values(by="Orders", ascending=False)
.head(10)
)
sns.set(rc={"figure.figsize": (15, 5)})
sns.barplot(data=sales_state, x="State", y="Orders")
# From above graphs we can see that most of the orders & total sales/amount are from Uttar Pradesh, Maharashtra and Karnataka respectively
# # Marital Status
a = sns.countplot(data=df, x="Marital_Status")
sns.set(rc={"figure.figsize": (7, 5)})
for bars in a.containers:
ax.bar_label(bars)
sales_state = (
df.groupby(["Marital_Status", "Gender"], as_index=False)["Amount"]
.sum()
.sort_values(by="Amount", ascending=False)
)
sns.set(rc={"figure.figsize": (6, 5)})
sns.barplot(data=sales_state, x="Marital_Status", y="Amount", hue="Gender")
# *From above graphs we can see that most of the buyers are married (women) and they have high purchasing power*
# # Occupation
sns.set(rc={"figure.figsize": (20, 5)})
ax = sns.countplot(data=df, x="Occupation")
for bars in ax.containers:
ax.bar_label(bars)
sns.set(rc={"figure.figsize": (20, 5)})
ax = sns.countplot(data=df, x="Occupation")
for bars in ax.containers:
ax.bar_label(bars)
# From above graphs we can see that most of the buyers are working in IT, Healthcare and Aviation sector
# # Product Category
sns.set(rc={"figure.figsize": (20, 5)})
a = sns.countplot(data=df, x="Product_Category")
for bars in a.containers:
ax.bar_label(bars)
sales_state = (
df.groupby(["Product_Category"], as_index=False)["Amount"]
.sum()
.sort_values(by="Amount", ascending=False)
.head(10)
)
sns.set(rc={"figure.figsize": (20, 5)})
sns.barplot(data=sales_state, x="Product_Category", y="Amount")
# *From above graphs we can see that most of the sold products are from Food, Clothing and Electronics category*
sales_state = (
df.groupby(["Product_ID"], as_index=False)["Orders"]
.sum()
.sort_values(by="Orders", ascending=False)
.head(10)
)
sns.set(rc={"figure.figsize": (20, 5)})
sns.barplot(data=sales_state, x="Product_ID", y="Orders")
# top 10 most sold products (same thing as above)
fig1, ax1 = plt.subplots(figsize=(12, 7))
df.groupby("Product_ID")["Orders"].sum().nlargest(10).sort_values(ascending=False).plot(
kind="bar"
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(
"../input/indicators-of-anxiety-or-depression/Indicators_of_Anxiety_or_Depression_Based_on_Reported_Frequency_of_Symptoms_During_Last_7_Days.csv"
)
df.head(3)
df.iloc[2]
len(df)
df.isnull().sum()
df = df.dropna()
len(df)
df
df.to_csv("cleaned_data.csv")
newdf = df.reset_index(drop="True")
newdf.to_csv("Data.csv")
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
plt.figure(figsize=(8, 4))
ax1 = sns.countplot(x="Time Period Label", data=newdf, palette="Blues")
legend_labels, _ = ax1.get_legend_handles_labels()
plt.title("Reservation status in different hotels", size=20)
plt.xlabel("hotel")
plt.ylabel("number of reservations")
newdf["Group"].value_counts()[:].plot(kind="bar")
plt.figure(figsize=(14, 7))
sns.countplot(x=newdf["Subgroup"]["Female", "Male"])
sns.barplot(x=newdf["Group"], y=newdf["Indicator"].count)
|
import numpy as np
import pandas as pd
import time
from tqdm import tqdm
import os
import random as rd
import torch
print(torch.__version__)
from torch.utils.data import DataLoader, TensorDataset
from torch import nn, optim
from torchsummary import summary
import torchvision
from torchvision import transforms as T
from torchvision import datasets
from torchvision.io import read_image
print("La version de torch est : ", torch.__version__)
print("Le calcul GPU est disponible ? ", torch.cuda.is_available())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
# !nvidia-smi
from PIL import Image
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
print(os.getcwd())
# > Ce jeu de données est composé de plus de 5000 images X-ray de poumons. Dans ce notebook, nous cherchons à différencier les poumons malades des poumons sains grâce à différents CNN.
# ## Prise en main et téléchargement des données
data_dir = "/kaggle/input/chest-xray-pneumonia/chest_xray/"
trainset_wo_transform = datasets.ImageFolder(os.path.join(data_dir, "train"))
print(type(trainset_wo_transform))
print(type(trainset_wo_transform[0])) # tuple=(image sous format PIL, label)
print(type(trainset_wo_transform[0][0]))
print(trainset_wo_transform[0][0].size)
print(trainset_wo_transform[-1][0].size)
# > Les images sont sous format PIL. Elles ne sont pas toutes de la même taille. Nous allons utiliser la fonction data_transform() pour importer les données. Ainsi, elles seront standardisées et augmentées : elles feront toutes une taille de 224x224, seront normalisées et auront subi une rotation aléatoire.
# NB: mean = 0.4747 et std = 0.2253 calculés pour notre propre jeu de données (cf annexe pour le calcul)
def data_transform():
data_transformed = T.Compose(
[
T.ToTensor(),
T.Resize((224, 224)),
T.RandomRotation(degrees=20),
T.Normalize(mean=0.4747, std=0.2253),
]
)
return data_transformed
# D'autres idées d'augmentation auraient pu être en plus :
# RandomResizedCrop(size=224, scale=(0.5, 1.0), ratio=(0.5, 2.0))
# RandomRotation(degrees=30)
trainset = datasets.ImageFolder(
os.path.join(data_dir, "train"), transform=data_transform()
)
print(type(trainset))
print(type(trainset[0])) # tuple : image sous forme de tensor + le label
print(type(trainset[0][0]))
print(trainset[0][0].size())
print(trainset[-1][0].size())
# --> les images sont bien sous format torch.Tensor et de même taille :)
# NB : attention, test et val sont interchangés --> cf tailles respectives
validationset = datasets.ImageFolder(
os.path.join(data_dir, "test"), transform=data_transform()
)
testset = datasets.ImageFolder(
os.path.join(data_dir, "val"), transform=data_transform()
)
print(len(validationset))
print(len(testset))
# ## Équilibre des classes
print(trainset.classes) # normal (=negative / 0) et pneumonia (=positive / 1)
print(trainset.class_to_idx)
# Envisager plus tard viral et bacterien
print(validationset.classes)
print(trainset.class_to_idx)
# Voyons si les classes sont équilibrées
# TRAIN SET
n_train = 0 # normal
p_train = 0 # pneumonia
unknown = 0 # en cas de label manquant
for i, item in enumerate(trainset):
# print(item[1])
if item[1] == 1:
p_train += 1
elif item[1] == 0:
n_train += 1
else:
unknown += 1
print("Il y a {} images de poumons sains dans train".format(n_train))
print("Il y a {} images de poumons malades dans train".format(p_train))
# TEST SET
n_test = 0 # normal
p_test = 0 # pneumonia
unknown = 0 # en cas de label manquant
for item in testset:
# print(item[1])
if item[1] == 1:
p_test += 1
elif item[1] == 0:
n_test += 1
else:
unknown += 1
print("Il y a {} images de poumons sains dans test".format(n_test))
print("Il y a {} images de poumons malades dans test".format(p_test))
# VALIDATION SET
n_val = 0 # normal
p_val = 0 # pneumonia
unknown = 0 # en cas de label manquant
for item in validationset:
if item[1] == 1:
p_val += 1
elif item[1] == 0:
n_val += 1
else:
unknown += 1
print("Il y a {} images de poumons sains dans validation".format(n_val))
print("Il y a {} images de poumons malades dans validation".format(p_val))
# > Le jeu de données est désequilibré (75% pneumonie VS 25% normal). Les poumons sains sont sous-réprésentés.
# --> réalisons un sur-echantillonnage des images de poumons normaux sur les données d'entrainement.
# Dans un premier temps, méthode naïve : on place dans trainset 2 fois les images de poumons normaux
# Pour ne charger que les images du folder train/NORMAL, nous devons créer notre propre class
from torch.utils.data import Dataset
from skimage import io
import cv2
class MyDataset(Dataset):
def __init__(self, image_paths, transform):
self.image_paths = image_paths
self.transform = transform
def get_class_label(self):
y = 0
return y
def __getitem__(self, index):
image_path = self.image_paths[index]
# x = Image.open(image_path)
x = io.imread(image_path, as_gray=True)
x = cv2.cvtColor(x, cv2.COLOR_GRAY2BGR)
y = 0
if self.transform is not None:
x = self.transform(x)
return x, y
def __len__(self):
return len(self.image_paths)
# On charge toutes les données etiquetées 0 dans un jeu nommé trainset2
image_paths = []
for file in os.listdir(data_dir + "train/NORMAL"):
image_paths.append(data_dir + "train/NORMAL/" + file)
print(len(image_paths))
trainset2 = MyDataset(image_paths, transform=data_transform())
print("shape de trainset[0][0]")
print(trainset[0][0].size())
print("shape de trainset2[0][0]")
print(trainset2[0][0].size())
# on vérifie que trainset ainsi créé est cohérent :
print(type(trainset2[0]) == type(trainset[0]))
print(type(trainset2[0][0]) == type(trainset[0][0]))
print(type(trainset2[0][1]) == type(trainset[0][1]))
print(type(trainset2[-1]))
print(type(trainset2[-1][0].shape))
print(type(trainset2[-1][1]))
# On concatène les 2 trainset pour obtenir un trainset enrichi
print("original", len(trainset))
l = []
l.append(trainset)
l.append(trainset2)
trainset = torch.utils.data.ConcatDataset(l)
print(len(trainset))
# !! Ne pas éxecuter cette cellule plusieurs fois, sinon on cumule les images de poumons sains
# --> risque de redondance --> surapprentissage
# on fait la même chose pour trainset w/o transform
lwo = []
lwo.append(trainset_wo_transform)
lwo.append(MyDataset(image_paths, transform=None))
trainset_wo_transform = torch.utils.data.ConcatDataset(lwo)
# ## Visualisation des images
plt.figure()
plt.subplot(221)
plt.title("not transformed")
plt.imshow(trainset_wo_transform[0][0])
plt.subplot(222)
plt.title("transformed")
plt.imshow(trainset[0][0].permute(1, 2, 0))
plt.subplot(223)
plt.title("not transformed")
plt.imshow(trainset_wo_transform[-2][0])
plt.subplot(224)
plt.title("transformed")
plt.imshow(trainset[-2][0].permute(1, 2, 0))
plt.tight_layout()
# A près transformation, les images sont bien
# sous format tensor avec une unique taille
trainloader = DataLoader(trainset, batch_size=32, shuffle=True)
testloader = DataLoader(testset, batch_size=32, shuffle=True)
validationloader = DataLoader(validationset, batch_size=32, shuffle=True)
print(type(trainloader))
print(type(iter(trainloader)))
train_features = next(iter(trainloader))
val_features = iter(trainloader)
print(type(train_features))
print(type(train_features[0]))
print(train_features[0].size())
# train_features est une liste contenant des objet de type tensor, de dimension 32 * 3 * 300 * 300
# = batch size * channels * height * width
# ## CNN faits main
# > Dans cette partie, je crée un CNN "from scratch".
# Dummy CNN
class CNN(nn.Module):
def __init__(self, num_classes):
super(CNN, self).__init__()
self.conv_1 = nn.Conv2d(
in_channels=3, out_channels=12, kernel_size=3, padding=1
)
self.max_pool_1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv_2 = nn.Conv2d(
in_channels=12, out_channels=64, kernel_size=3, padding=1
)
self.max_pool_2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv_3 = nn.Conv2d(
in_channels=64, out_channels=64, kernel_size=3, padding=1
)
self.max_pool_3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(64 * 28 * 28, 128)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(128, num_classes) # car 2 classes : pneumonie ou pas
def forward(self, x):
out = self.conv_1(x)
out = self.max_pool_1(out)
out = self.conv_2(out)
out = self.max_pool_2(out)
out = self.conv_3(out)
out = self.max_pool_3(out)
out = out.reshape(out.size(0), -1)
out = self.fc1(out)
out = self.relu1(out)
out = self.fc2(out)
return out
summary(
CNN(2).to(device),
(
train_features[0].shape[1],
train_features[0].shape[2],
train_features[0].shape[3],
),
)
# sauvegarde du modèle et de l'état de l'optimiseur dans fichier
def save_state(epoch, model, optim, fichier):
state = {
"epoch": epoch,
"model_state": model.state_dict(),
"optim_state": optim.state_dict(),
}
torch.save(state, fichier) # pas besoin de passer par pickle
# Si le fichier existe, on charge le modèle et l'optimiseur
def load_state(fichier, model, optimizer):
epoch = 0
if os.path.isfile(fichier):
state = torch.load(fichier)
model.load_state_dict(state["model_state"])
# optimizer.load_state_dict(state['optim_state'])
# optimizer.load_state_dict(state['optim_state'])
epoch = state["epoch"]
return epoch
def train(model, num_epochs, trainloader, validationloader, criterion, optimizer):
print(device)
start_time = time.time()
model = model.to(device)
# print(model.device)
criterion = criterion.to(device)
# writer = SummaryWriter(f"{TB_PATH}/{model.name}")
train_loss = []
train_accuracy = []
train_f1 = []
val_loss = []
val_accuracy = []
val_f1 = []
# on conserve TP, TN, FP, FN uniquement pour le validation dataset
print(f"running {model.name}")
# Pour le checkpointing
# fichier = "/kaggle/working/{}.pth".format(model.name)
# start_epoch = load_state(fichier,model,optim)
# print("start_epoch = ", start_epoch)
start_epoch = 0
# Pour le early stopping
# early_stopping_counter = 0
# best_val_loss = + 100000000
# early_stopping_patience = 1
for epoch in tqdm(range(start_epoch, num_epochs)):
cumul_loss = 0 # la loss pour chq époque
correct, total = 0, 0
TP, TN, FP, FN = 0, 0, 0, 0
for images, labels in trainloader:
# model.train()
images, labels = images.to(device), labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
_, predicted = torch.max(outputs.data, 1)
TP += ((predicted == labels) & (predicted == 1)).sum().item()
TN += ((predicted == labels) & (predicted == 0)).sum().item()
FP += ((predicted != labels) & (predicted == 1)).sum().item()
FN += ((predicted != labels) & (predicted == 0)).sum().item()
total += labels.size(0)
correct += (predicted == labels).sum().item()
cumul_loss += loss.item()
# writer.add_scalar("train_loss", cumul_loss, epoch) # cumul_loss/len(trainloader)
# writer.add_scalar("train_accuracy", cumul_acc, epoch)
# writer.add_scalar("train_f1", cumul_f1, epoch)
# On conserve les valeurs pour l'époque i :
train_loss.append(cumul_loss / len(trainloader))
train_accuracy.append(correct / total)
f1 = (2 * TP) / (2 * TP + FP + FN)
train_f1.append(f1)
# TPs.append(TP) # on ne les conserve que pour la validation, pas d'interet en train
# TNs.append(TN)
# FPs.append(FP)
# FNs.append(FN)
print(
"Epoch [{}/{}], train_loss: {:.4f}".format(
epoch + 1, num_epochs, cumul_loss
)
)
# CHECK POINTING
# if epoch % 2 == 0:
# save_state(epoch,model,optimizer,fichier)
# VALIDATION
if (
epoch % 1 == 0
): # On evalue la performance en validation pour toutes les n époques
# model.eval() # on spécifie qu'on est dans une démarche d'évaluation
with torch.no_grad(): # car on teste la performance : on ne va pas updater les poids
cumul_loss = 0
correct, total = 0, 0
TP, TN, FP, FN = 0, 0, 0, 0
for images, labels in validationloader:
images, labels = images.to(device), labels.to(device)
# images, labels = images.cuda(), labels.cuda()
outputs = model(images)
loss = criterion(outputs, labels)
_, predicted = torch.max(outputs.data, 1)
TP += ((predicted == labels) & (predicted == 1)).sum().item()
TN += ((predicted == labels) & (predicted == 0)).sum().item()
FP += ((predicted != labels) & (predicted == 1)).sum().item()
FN += ((predicted != labels) & (predicted == 0)).sum().item()
total += labels.size(0)
correct += (
(predicted == labels).sum().item()
) # sans le item on aurait un tensor(n) au lieu de juste n
cumul_loss += loss.item()
# writer.add_scalar("validation_loss",cumul_loss,epoch)
# writer.add_scalar("validation_accuracy", cumul_acc, epoch)
# writer.add_scalar("validation_f1", cumul_f1, epoch)
val_loss.append(cumul_loss / len(validationloader))
val_accuracy.append(correct / total)
f1 = (2 * TP) / (2 * TP + FP + FN)
val_f1.append(f1)
# EARLY STOPPING
# if val_loss[-1] < best_val_loss:
# best_val_loss = val_loss[-1]
# best_epoch = epoch
# early_stopping_counter = 0
# else:
# early_stopping_counter += 1
# if early_stopping_counter >= early_stopping_patience:
# print('Early stopping')
# print(('best epoch = ', best_epoch))
# break
print(
"Epoch [{}/{}], validation_loss: {:.4f}".format(
epoch + 1, num_epochs, cumul_loss
)
)
print("duree du train : ", (time.time() - start_time) / 60, " minutes.")
return (
train_loss,
train_accuracy,
train_f1,
val_loss,
val_accuracy,
val_f1,
TP,
TN,
FP,
FN,
)
def plot_training_curves(result_train):
x = [i for i in range(len(result_train[0]))]
# on fait en sorte que x_val soit de la meme taille que x
x_val = np.linspace(x[0], x[-1], len(result_train[3]))
print(x)
print(x_val)
# print(VGG_results)
# Les loss
fig, ax1 = plt.subplots()
# plt.subplot(231)
plt.title("Losses")
color1 = "tab:red"
ax1.plot(x, [item for item in result_train[0]], color=color1, label="train loss")
ax1.set_ylabel("TRAIN", color=color1)
ax1.legend()
# plt.subplot(234)
ax2 = ax1.twinx()
color2 = "tab:green"
ax2.plot(
x_val,
[item for item in result_train[3]],
marker=".",
color=color2,
label="val loss",
)
ax2.set_ylabel("VALIDATION", color=color2)
ax2.legend()
plt.xlabel("Epochs")
plt.xticks(x)
fig.tight_layout()
# Les accuracies
fig, ax1 = plt.subplots()
# plt.subplot(232)
plt.title("Accuracies")
color1 = "tab:red"
ax1.plot(x, result_train[1], color=color1, linestyle="--", label="train accuracy")
ax1.set_ylabel("TRAIN", color=color1)
ax1.legend()
ax2 = ax1.twinx()
color2 = "tab:green"
ax2.plot(
x_val,
result_train[4],
color=color2,
linestyle="--",
marker=".",
label="val accuracy",
)
ax2.set_ylabel("VALIDATION", color=color2)
ax2.legend()
plt.xlabel("Epochs")
plt.xticks(x)
fig.tight_layout()
# Les f1
fig, ax1 = plt.subplots()
# plt.subplot(233)
plt.title("f1 scores")
color1 = "tab:red"
ax1.plot(x, result_train[2], color=color1, linestyle="-.", label="train f1 score")
ax1.set_ylabel("TRAIN", color=color1)
ax1.legend()
ax2 = ax1.twinx()
# plt.subplot(236)
color2 = "tab:green"
ax2.plot(
x_val,
result_train[5],
color=color2,
linestyle="-.",
marker=".",
label="val f1 score",
)
ax2.set_ylabel("VALIDATION", color=color2)
ax2.legend()
plt.xlabel("Epochs")
plt.xticks(x)
fig.tight_layout()
plt.show()
def plot_training_curves_2axes(result_train):
x = [i for i in range(len(result_train[0]))]
# on fait en sorte que x_val soit de la meme taille que x
x_val = np.linspace(x[0], x[-1], len(result_train[3]))
print(x)
print(x_val)
# print(VGG_results)
# Les loss
fig, ax1 = plt.subplots()
# plt.subplot(231)
plt.title("Losses")
color1 = "tab:red"
ax1.plot(x, [item for item in result_train[0]], color=color1, label="train loss")
ax1.set_ylabel("TRAIN", color=color1)
ax1.legend()
# plt.subplot(234)
# ax2 = ax1.twinx()
color2 = "tab:green"
ax1.plot(
x_val,
[item for item in result_train[3]],
marker=".",
color=color2,
label="val loss",
)
ax1.set_ylabel("VALIDATION", color=color2)
ax1.legend()
plt.xlabel("Epochs")
plt.xticks(x)
fig.tight_layout()
# Les accuracies
fig, ax1 = plt.subplots()
# plt.subplot(232)
plt.title("Accuracies")
color1 = "tab:red"
ax1.plot(x, result_train[1], color=color1, linestyle="--", label="train accuracy")
ax1.set_ylabel("TRAIN", color=color1)
ax1.legend()
# ax2 = ax1.twinx()
color2 = "tab:green"
ax1.plot(
x_val,
result_train[4],
color=color2,
linestyle="--",
marker=".",
label="val accuracy",
)
ax1.set_ylabel("VALIDATION", color=color2)
ax1.legend()
plt.xlabel("Epochs")
plt.xticks(x)
fig.tight_layout()
# Les f1
fig, ax1 = plt.subplots()
# plt.subplot(233)
plt.title("f1 scores")
color1 = "tab:red"
ax1.plot(x, result_train[2], color=color1, linestyle="-.", label="train f1 score")
ax1.set_ylabel("TRAIN", color=color1)
ax1.legend()
# ax2 = ax1.twinx()
# plt.subplot(236)
color2 = "tab:green"
ax1.plot(
x_val,
result_train[5],
color=color2,
linestyle="-.",
marker=".",
label="val f1 score",
)
ax1.set_ylabel("VALIDATION", color=color2)
ax1.legend()
plt.xlabel("Epochs")
plt.xticks(x)
fig.tight_layout()
plt.show()
def plot_confusion_matrix(TP, TN, FP, FN):
confusion_matrix = np.array([[TP, FP], [FN, TN]])
# my_colors = [[[TP, FP], [FN, TN]], cmap = plt.cm.Blues]
cmap = ListedColormap(["white", "white"], ["white", "white"])
plt.imshow(confusion_matrix, cmap=cmap)
plt.title("Confusion matrix")
# plt.colorbar()
plt.xlabel("Predicted")
plt.ylabel("Truth")
plt.xticks([0, 1], ["Positive", "Negative"])
plt.yticks([0, 1], ["Positive", "Negative"])
for i in range(2):
for j in range(2):
plt.text(i, j, confusion_matrix[i, j], ha="center", va="center")
plt.show()
#!rm -rf '/kaggle/working/1st_dummy_CNN.pth'
model = CNN(2)
model.name = "1st_dummy_CNN" # +time.asctime()
optimizer = optim.Adam(model.parameters(), lr=0.0001) # defining the optimizer
criterion = nn.CrossEntropyLoss(
weight=torch.Tensor([0.66, 1.5])
) # defining the loss function
num_epochs = 15
# notebook.display()
my_CNN = train(model, num_epochs, trainloader, validationloader, criterion, optimizer)
plot_training_curves(my_CNN)
plot_confusion_matrix(my_CNN[6], my_CNN[7], my_CNN[8], my_CNN[9])
# >
# Premier graphe - Loss : La courbe rouge ( la fonction de coût en train) diminue. C'est bien, c'est ce que l'on esperait. En revanche, la courbe verte est en dent de scie, et ne diminue pas : ce n'est pas ce que l'on esperait! Je n'ai pas réussi à résoudre ce problème de généralisation.
# Deuxième graphe - Accuracy : L'accuracy en train augmente au cours des époques, mais pas en validation. Au moins, c'est cohérent avec le premier graphe.
# Troisième graphe - F1 score : mêmes résultats que pour les graphes précédents.
#
# > Matrice de confusion -
# On observe que le modèle prédit beaucoup plus de pneumonies que la réalité (beaucoup trop de False positives). Le modèle n'est pas assez spécifique. Pourtant, avec avec la ligne "criterion = nn.CrossEntropyLoss(weight = torch.Tensor([0.66, 1.5]) )" je fais en sorte de pénaliser plus fort la prédiction de pneumonie que celle de poumons sains, pour diminuer les faux positifs...
#
# ## VGG
# > Dans cette partie, j'utilise le réseau VGG et je l'applique à ma tache de prediction.
import torchvision.models as models
base_VGG = models.vgg16(pretrained=True)
# print(base_VGG)
print(summary(base_VGG.to(device), input_size=(3, 224, 224)))
VGG = models.vgg16(pretrained=True)
VGG.name = "my_VGG"
# On ne souhaite pas updater les poids des couches inferieures :
for param in VGG.parameters():
param.requires_grad = False
# On modifie le classifier : On garde la meme structure et on ajoute une dernière couche
# de telle sorte que l'output soit de la taille de notre nombre de classe (ici, 2)
VGG.classifier = nn.Sequential(
nn.Linear(VGG.classifier[0].in_features, 4096),
nn.ReLU(),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(),
nn.Dropout(),
nn.Linear(4096, 1000),
nn.ReLU(),
nn.Dropout(),
nn.Linear(1000, 2),
)
# print(VGG)
print(summary(VGG.to(device), input_size=(3, 224, 224)))
# Les premieres couches sont figées, on ne va entrainer que le classifier
optimizer = optim.Adam(VGG.parameters(), lr=0.001)
weights = torch.Tensor(
[0.66, 1.5]
) # comme les classes sont encore déséquilibrées (plus de 1 que de 0)
# on pénalise plus la prediction de 0 que de 1
criterion = nn.CrossEntropyLoss(weight=weights)
num_epochs = 15
VGG_results = train(
VGG, num_epochs, trainloader, validationloader, criterion, optimizer
)
plot_training_curves(VGG_results)
plot_training_curves_2axes(VGG_results)
plot_confusion_matrix(VGG_results[6], VGG_results[7], VGG_results[8], VGG_results[9])
# > On retrouve le meme problème que précédemment : la loss en validation ne présente pas une belle courbe de décroissance (et l'accuracy et le F1 score diminuent).
# > La matrice de confusion présente également la même tendance que précédemment : il y a toujours trop de faux positifs. J'ai pénalisé plus fort la prédiction des pneumonies, et on observe qu'en effet, il y a un peu moins de faux positifs que précédemment, mais ce n'est pas suffisant pour améliorer le problème de spécificité.
# ## Analyse post Hoc
# ### A - Visualisation des effets des filtres
# > La fonction suivante permet de visualiser les outputs de chaque couche de convolution, pour voir leur effets respectifs
def visualise_layers(model, image):
outputs = []
names = []
image = image.cpu()
image = image.unsqueeze(0)
for layer in model.features._modules.values():
layer = layer.cpu()
image = layer.forward(image)
if isinstance(layer, nn.Conv2d):
outputs.append(image)
names.append(str(layer))
processed = []
for feature_map in outputs:
feature_map = feature_map.squeeze(0)
gray_scale = torch.sum(feature_map, 0)
gray_scale = gray_scale / feature_map.shape[0]
processed.append(gray_scale.data.cpu().numpy())
fig = plt.figure(figsize=(30, 50))
for i in range(len(processed)):
a = fig.add_subplot(5, 4, i + 1)
imgplot = plt.imshow(processed[i])
a.axis("off")
a.set_title(names[i].split("(")[0], fontsize=30)
visualise_layers(VGG, trainset[0][0])
# > Sur la deuxième image, on observe que les bords ressortent assez fortement : cette couche semble faire de la detection de contours. Pour les couches suivantes, leur rôle est un peu plus flou. On peut s'interroger sur l'intensité des pixels : Pourquoi sont-ce les pixels sur les extrémités de l'image qui sont les plus intenses, alors que ce sont les régions les moins pertinentes pour notre diagnostique.
# ### B - Saliency
def getSaliency(model, img, label):
model.zero_grad()
img = img.cpu()
img.requires_grad = True
img.grad = None
outputs = nn.Softmax(dim=1)(model(img.unsqueeze(0)))
output = outputs[0, label]
output.backward()
sal = img.grad.abs()
if sal.dim() > 2:
sal = torch.max(sal, dim=0)[0]
fig = plt.figure(figsize=(8, 8))
fig.add_subplot(1, 2, 1)
plt.imshow(img.detach().cpu().permute(1, 2, 0), cmap="gray")
fig.add_subplot(1, 2, 2)
plt.imshow(sal.to("cpu"), cmap="seismic", interpolation="bilinear")
plt.show()
return sal
getSaliency(VGG.cpu(), trainset[0][0], trainset[0][1])
getSaliency(VGG.cpu(), trainset[-1][0], trainset[-1][1])
# > Je m'attendais à ce que ce soient les pixels les plus au centre, associés à ceux des poumons qui présentent les intensités les plus fortes. Ce n'est pas le cas : Il n'y a pas de zone particulièrement intense, et les pixels les plus intenses sont en bordure d'image ...
# # Annexes
# #### Annexe 1 - Calcul de la moyenne et std de nos images pour la normalisation
# Calculer les moyennes et écarts-types par canal de couleur
meansR, meansV, meansB = [], [], []
stdsR, stdsV, stdsB = [], [], []
for image in validationset: # Pour chaque canal de couleur (rouge, vert, bleu)
meanR = image[0][0, :, :].mean()
meanV = image[0][1, :, :].mean()
meanB = image[0][2, :, :].mean()
# print(mean)
meansR.append(meanR)
meansV.append(meanV)
meansB.append(meanB)
stdR = image[0][0, :, :].std()
stdV = image[0][1, :, :].std()
stdB = image[0][2, :, :].std()
stdsR.append(stdR)
stdsV.append(stdV)
stdsB.append(stdB)
# Calculer la moyenne des moyennes et des écarts-types
# means contient la moyenne des
meanR = torch.tensor(meansR).mean()
meanV = torch.tensor(meansV).mean()
meanB = torch.tensor(meansB).mean()
stdR = torch.tensor(stdsR).mean()
stdV = torch.tensor(stdsV).mean()
stdB = torch.tensor(stdsB).mean()
print(f"Moyenne des moyennes R, V, B: {meanR}, {meanV}, {meanB}")
print(f"Moyenne des écarts-types: {stdR}, {stdV}, {stdB}")
# ## Tensor board
# Ne fonctionne pas sur kaggle ?? cf : https://www.kaggle.com/product-feedback/89671
import tensorboard
from torch.utils.tensorboard import SummaryWriter
from tensorboard import notebook
# root = '/tmp/'
# if not os.path.exists(root):
# os.mkdir(root)
#!rm -rf tmp
TB_PATH = "/kaggle/working/"
# %load_ext tensorboard
# notebook.display()
|
# This notebook reveals my solution for __RFM Analysis Task__ offered by Renat Alimbekov.
# This task is part of the __Task Series__ for Data Analysts/Scientists
# __Task Series__ - is a rubric where Alimbekov challenges his followers to solve tasks and share their solutions.
# So here I am :)
# Original solution can be found at - https://alimbekov.com/rfm-python/
# The task is to perform RFM Analysis.
# * __olist_orders_dataset.csv__ and __olist_order_payments_dataset.csv__ should be used
# * order_delivered_carrier_date - should be used in this task
# * Since the dataset is not actual by 2021, thus we should assume that we were asked to perform RFM analysis the day after the last record
# # Importing the modules
import pandas as pd
import numpy as np
import squarify
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("ggplot")
import warnings
warnings.filterwarnings("ignore")
# # Loading the data
orders = pd.read_csv("../input/brazilian-ecommerce/olist_orders_dataset.csv")
payments = pd.read_csv("../input/brazilian-ecommerce/olist_order_payments_dataset.csv")
# # Dataframes join
orders["order_delivered_carrier_date"] = pd.to_datetime(
orders["order_delivered_carrier_date"]
) # datetime conversion
payments = payments.set_index("order_id") # preparation before the join
orders = orders.set_index("order_id") # preparation before the join
joined = orders.join(payments) # join on order_id
joined.isna().sum().sort_values(ascending=False)
joined.nunique().sort_values(ascending=False)
# It seems like we have missing values. And unfortunately order_delivered_carrier_date also has missing values. Thus, they should be dropped
last_date = joined["order_delivered_carrier_date"].max() + pd.to_timedelta(1, "D")
RFM = (
joined.dropna(subset=["order_delivered_carrier_date"])
.reset_index()
.groupby("customer_id")
.agg(
Recency=("order_delivered_carrier_date", lambda x: (last_date - x.max()).days),
Frequency=("order_id", "size"),
Monetary=("payment_value", "sum"),
)
)
# Sanity check - do we have NaN values or not?
RFM.isna().sum()
RFM.describe([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]).T
# So, here we can see that we have some outliers in Freqency and Monetary groups. Thus, they should be dropped and be analyzed separately
# # Recency
plt.figure(figsize=(12, 6))
sns.boxplot(x="Recency", data=RFM)
plt.title("Boxplot of Recency")
# # Frequency
RFM["Frequency"].value_counts(normalize=True) * 100
# I guess here we should select only frequency values that are greater than 5, because by doing this we only drop 0.11% of records
RFM["Frequency"].apply(
lambda x: "less or equal to 5" if x <= 5 else "greater than 5"
).value_counts(normalize=True) * 100
RFM = RFM[RFM["Frequency"] <= 5]
# # Monetary
RFM["Monetary"].describe([0.25, 0.5, 0.75, 0.9, 0.95, 0.99])
# Here, it seems like 95% percentile should be used to drop the outliers
plt.figure(figsize=(12, 6))
plt.title("Distribution of Monetary < 95%")
sns.distplot(RFM[RFM["Monetary"] < 447].Monetary)
RFM = RFM[RFM["Monetary"] < 447]
# # RFM groups
# I have used quantiles for assigning scores for Recency and Monetary.
# * groups are 0-33, 33-66, 66-100 quantiles
# For Frequency I have decided to group them by hand
# * score=1 if the frequency value is 1
# * otherwise, the score will be 2
RFM["R_score"] = pd.qcut(RFM["Recency"], 3, labels=[1, 2, 3]).astype(str)
RFM["M_score"] = pd.qcut(RFM["Monetary"], 3, labels=[1, 2, 3]).astype(str)
RFM["F_score"] = RFM["Frequency"].apply(lambda x: "1" if x == 1 else "2")
RFM["RFM_score"] = RFM["R_score"] + RFM["F_score"] + RFM["M_score"]
# 1. CORE - '123' - most recent, frequent, revenue generating
# 2. GONE - '311', '312', '313' - gone, one-timers
# 3. ROOKIE - '111', '112', '113' - just have joined
# 4. WHALES - '323', '213', '223 - most revenue generating
# 5. LOYAL - '221', '222', '321', '322' - loyal users
# 6. REGULAR - '121', '122', '211', '212', - average users
#
RFM[RFM["R_score"] == "1"]["Recency"].mean()
def segment(x):
if x == "123":
return "Core"
elif x in ["311", "312", "313"]:
return "Gone"
elif x in ["111", "112", "113"]:
return "Rookie"
elif x in ["323", "213", "223"]:
return "Whale"
elif x in ["221", "222", "321", "322"]:
return "Loyal"
else:
return "Regular"
RFM["segments"] = RFM["RFM_score"].apply(segment)
RFM["segments"].value_counts(normalize=True) * 100
segmentwise = RFM.groupby("segments").agg(
RecencyMean=("Recency", "mean"),
FrequencyMean=("Frequency", "mean"),
MonetaryMean=("Monetary", "mean"),
GroupSize=("Recency", "size"),
)
segmentwise
font = {"family": "normal", "weight": "normal", "size": 18}
plt.rc("font", **font)
fig = plt.gcf()
ax = fig.add_subplot()
fig.set_size_inches(16, 16)
squarify.plot(
sizes=segmentwise["GroupSize"],
label=segmentwise.index,
color=["gold", "teal", "steelblue", "limegreen", "darkorange", "coral"],
alpha=0.8,
)
plt.title("RFM Segments", fontsize=18, fontweight="bold")
plt.axis("off")
plt.show()
|
import pandas as pd
from sklearn.model_selection import train_test_split
# 读取CSV文件
data = pd.read_csv("/kaggle/input/nlp-train/train.csv")
data.columns = ["id", "text", "label"]
# 将数据集划分为训练集、验证集和测试集
train_data, val_data = train_test_split(data, test_size=0.1, random_state=42)
train_data, test_data = train_test_split(train_data, test_size=0.1, random_state=42)
# 将划分后的数据集保存到CSV文件中
train_data.to_csv("train.csv", index=False)
val_data.to_csv("val.csv", index=False)
test_data.to_csv("test.csv", index=False)
def preprocess_function(examples):
inputs = [prefix + doc for doc in examples["text"]]
model_inputs = tokenizer(
inputs, max_length=max_input_length, padding=True, truncation=True
)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = examples["label"]
if isinstance(labels, list):
if any(isinstance(l, list) for l in labels):
raise ValueError("label should not have nested lists.")
else:
labels = tokenizer(
labels, max_length=max_target_length, padding=True, truncation=True
)
else:
labels = tokenizer(
[labels], max_length=max_target_length, padding=True, truncation=True
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
TokenModel = "bert-base-chinese"
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(TokenModel) # 用不上
model_checkpoint = "facebook/bart-large-cnn" # 只有这个有用
if model_checkpoint in ["t5-small", "t5-base", "t5-larg", "t5-3b", "t5-11b"]: # 用不上
prefix = "summarize: "
else:
prefix = "" # BART-12-3
max_input_length = 150 # input, source text
max_target_length = 80 # summary, target text
def preprocess_function(examples):
inputs = [prefix + doc for doc in examples["text"]]
model_inputs = tokenizer(
inputs, max_length=max_input_length, padding=True, truncation=True
)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(
examples["label"],
max_length=max_target_length,
padding=True,
truncation=True,
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
from datasets import dataset_dict
import datasets
train_data = datasets.load_dataset(
"csv", data_files={"train": "/kaggle/working/train.csv"}
)["train"]
val_data = datasets.load_dataset(
"csv", data_files={"validation": "/kaggle/working/val.csv"}
)["validation"]
test_data = datasets.load_dataset(
"csv", data_files={"test": "/kaggle/working/test.csv"}
)["test"]
t = datasets.load_dataset(
"csv", data_files={"t": "/kaggle/input/nlp-dataset/preliminary_a_test.csv"}
)["t"]
dd = datasets.DatasetDict(
{"train": train_data, "validation": val_data, "test": test_data}
)
raw_datasets = dd
# tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)
tokenized_datasets = raw_datasets.map(
preprocess_function,
batched=True,
load_from_cache_file=False,
remove_columns=["text", "label", "id"],
)
import datasets
import random
import pandas as pd
from IPython.display import display, HTML
def show_random_elements(dataset, num_examples=5):
assert num_examples <= len(
dataset
), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset) - 1)
while pick in picks:
pick = random.randint(0, len(dataset) - 1)
picks.append(pick)
df = pd.DataFrame(dataset[picks])
for column, typ in dataset.features.items():
if isinstance(typ, datasets.ClassLabel):
df[column] = df[column].transform(lambda i: typ.names[i])
display(HTML(df.to_html()))
# 预览数据集
show_random_elements(raw_datasets["train"])
from transformers import (
AutoModelForSeq2SeqLM,
DataCollatorForSeq2Seq,
Seq2SeqTrainingArguments,
Seq2SeqTrainer,
)
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
logger = logging.get_logger(__name__)
# 抽取部分模型finetune
def copy_layers(
src_layers: nn.ModuleList, dest_layers: nn.ModuleList, layers_to_copy: List[int]
) -> None:
layers_to_copy = nn.ModuleList([src_layers[i] for i in layers_to_copy])
assert len(dest_layers) == len(
layers_to_copy
), f"{len(dest_layers)} != {len(layers_to_copy)}"
dest_layers.load_state_dict(layers_to_copy.state_dict())
LAYERS_TO_COPY = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [
0
], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11], # the first, 7th and 12th decode layers
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
LAYERS_TO_SUPERVISE = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def create_student_by_copying_alternating_layers(
teacher: Union[str, PreTrainedModel],
save_path: Union[str, Path] = "student",
e: Union[int, None] = None,
d: Union[int, None] = None,
copy_first_teacher_layers=False,
e_layers_to_copy=None,
d_layers_to_copy=None,
**extra_config_kwargs,
) -> Tuple[PreTrainedModel, List[int], List[int]]:
_msg = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(teacher, str):
AutoTokenizer.from_pretrained(teacher).save_pretrained(
save_path
) # purely for convenience
teacher = AutoModelForSeq2SeqLM.from_pretrained(teacher).eval()
else:
assert isinstance(
teacher, PreTrainedModel
), f"teacher must be a model or string got type {type(teacher)}"
init_kwargs = teacher.config.to_diff_dict()
try:
teacher_e, teacher_d = (
teacher.config.encoder_layers,
teacher.config.decoder_layers,
)
if e is None:
e = teacher_e
if d is None:
d = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d})
except AttributeError: # T5
teacher_e, teacher_d = (
teacher.config.num_layers,
teacher.config.num_decoder_layers,
)
if e is None:
e = teacher_e
if d is None:
d = teacher_d
init_kwargs.update({"num_layers": e, "num_decoder_layers": d})
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(extra_config_kwargs)
# Copy weights
student_cfg = teacher.config_class(**init_kwargs)
student = AutoModelForSeq2SeqLM.from_config(student_cfg)
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
info = student.load_state_dict(teacher.state_dict(), strict=False)
assert (
info.missing_keys == []
), info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
e_layers_to_copy, d_layers_to_copy = list(range(e)), list(range(d))
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}"
)
student.save_pretrained(save_path)
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
e_layers_to_copy: List[int] = pick_layers_to_copy(e, teacher_e)
if d_layers_to_copy is None:
d_layers_to_copy: List[int] = pick_layers_to_copy(d, teacher_d)
try:
copy_layers(
teacher.model.encoder.layers, student.model.encoder.layers, e_layers_to_copy
)
copy_layers(
teacher.model.decoder.layers, student.model.decoder.layers, d_layers_to_copy
)
except (
AttributeError
): # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block, student.encoder.block, e_layers_to_copy)
copy_layers(teacher.decoder.block, student.decoder.block, d_layers_to_copy)
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}"
)
student.config.init_metadata = dict(
teacher_type=teacher.config.model_type,
copied_encoder_layers=e_layers_to_copy,
copied_decoder_layers=d_layers_to_copy,
)
student.save_pretrained(save_path)
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
def pick_layers_to_copy(n_student, n_teacher):
try:
val = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first {n_student}"
)
return list(range(n_student))
model, list_en, list_de = create_student_by_copying_alternating_layers(
model, "trian.pth", 12, 3
)
batch_size = 2
args = Seq2SeqTrainingArguments(
output_dir="results",
num_train_epochs=1, # demo
do_train=True,
do_eval=True,
per_device_train_batch_size=batch_size, # demo
per_device_eval_batch_size=batch_size,
# learning_rate=3e-05,
warmup_steps=500,
weight_decay=0.1,
label_smoothing_factor=0.1,
predict_with_generate=True,
logging_dir="logs",
logging_steps=50,
save_total_limit=3,
)
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)
import jieba
import numpy as np
def compute_metrics(eval_pred):
predictions, labels = eval_pred
decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True)
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Rouge expects a newline after each sentence
decoded_preds = ["\n".join(jieba.cut(pred.strip())) for pred in decoded_preds]
decoded_labels = ["\n".join(jieba.cut(label.strip())) for label in decoded_labels]
result = metric.compute(
predictions=decoded_preds, references=decoded_labels, use_stemmer=True
)
# Extract a few results
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
# Add mean generated length
prediction_lens = [
np.count_nonzero(pred != tokenizer.pad_token_id) for pred in predictions
]
result["gen_len"] = np.mean(prediction_lens)
return {k: round(v, 4) for k, v in result.items()}
trainer = Seq2SeqTrainer(
model,
args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)
trainer.train()
import torch
# 保存与加载模型
torch.save(model.state_dict(), "/kaggle/working/bart.pth")
import torch
model.load_state_dict(torch.load("/kaggle/working/bart.pth"))
import torch
TokenModel = "bert-base-chinese"
from transformers import AutoTokenizer
model_checkpoint = "fnlp/bart-base-chinese" # 只有这个有用
from transformers import (
AutoModelForSeq2SeqLM,
DataCollatorForSeq2Seq,
Seq2SeqTrainingArguments,
Seq2SeqTrainer,
)
# 指定设备为GPU,如果没有可用的GPU,则使用CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint).to(device)
import csv
import torch
from torch.nn.utils.rnn import pad_sequence
# 假设你的 CSV 文件名为 input.csv,输入数据在第二列,并且每个单元格包含多个数字以空格分隔
csv_file = "/kaggle/input/nlp-dataset/preliminary_a_test.csv" # 根据实际情况填入你的文件名
input_tensors = []
# 读取 CSV 文件
with open(csv_file, "r") as file:
reader = csv.reader(file)
for row in reader:
# 解析第二列的数据为 ID 列表
num_strings = row[1].split(" ")
id_list = list(map(int, num_strings))
# 对 ID 列表进行填充或截断,使其长度固定为100
fixed_length = 100 # 设定固定长度为100
if len(id_list) < fixed_length:
id_list += [0] * (fixed_length - len(id_list)) # 使用0进行填充
else:
id_list = id_list[:fixed_length] # 进行截断
# 将 ID 列表转换为张量
input_tensor = torch.tensor(id_list, dtype=torch.long).to(device)
input_tensors.append(input_tensor)
# 添加特殊标记
# 假设 <s> 的 ID 值为 2,</s> 的 ID 值为 3
start_token = 2
end_token = 3
for i in range(len(input_tensors)):
input_tensors[i] = torch.cat(
[
torch.tensor([start_token], dtype=torch.long).to(device),
input_tensors[i],
torch.tensor([end_token], dtype=torch.long).to(device),
],
dim=0,
)
# 将输入张量列表转换为 BART 模型需要的格式
input_tensors_padded = pad_sequence(input_tensors, batch_first=True).to(device)
input_ids = input_tensors_padded
input_masks = input_ids != 0 # 生成掩码张量,标记输入中的非填充部分
def generate_summary(test_samples, model):
inputs = tokenizer(
test_samples,
padding="max_length",
truncation=True,
max_length=max_input_length,
return_tensors="pt",
)
input_ids = inputs.input_ids.to(model.device)
attention_mask = inputs.attention_mask.to(model.device)
outputs = model.generate(input_ids, attention_mask=attention_mask)
output_str = tokenizer.batch_decode(outputs, skip_special_tokens=True)
# 过滤'[unused2]'标记
output_str_filtered = [
" ".join([token for token in summary.split() if token != "[unused2]"])
for summary in output_str
]
return output_str_filtered
data = pd.read_csv("/kaggle/input/nlp-dataset/preliminary_a_test.csv")
data.columns = ["id", "text"]
test_samples = data["text"]
test_samples
test_samples = test_samples.apply(lambda x: " ".join(x.split()))
# 检查数据类型
print(type(test_samples[0])) # 输出 <class 'str'>
import csv
import tqdm
fp = open("pred.csv", "w", newline="")
writer = csv.writer(fp)
tot = 0
for i in range(test_samples.shape[0]):
pred = generate_summary(test_samples[i], model)
writer.writerow([tot, pred])
tot += 1
fp.close()
test_samples = "22 12 74 71 64 56 16 248 14 40 13 83 52 43 44 23 21 25 11 97 147 126 231 10 34 12 68 685 1021 52 43 44 23 21 11 97 147 126 231 11 34 12 12 14 32 93 276 309 14 47 16 90 16 39 36 87 10 24 42"
print(type(test_samples))
a = generate_summary(test_samples[0], model)
a
from transformers import (
AutoModelForSeq2SeqLM,
DataCollatorForSeq2Seq,
Seq2SeqTrainingArguments,
Seq2SeqTrainer,
)
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 模型和标记器名称
model_checkpoint = "fnlp/bart-base-chinese"
tokenizer_checkpoint = "bert-base-chinese"
from transformers import AutoTokenizer
# 加载模型和标记器
tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint)
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint).to("cuda")
# 从测试集中挑选4个样本
test_examples = test_data["text"][:4]
"""inputs = tokenizer(
test_examples,
padding="max_length",
truncation=True,
max_length=max_input_length,
return_tensors="pt",
)"""
input_ids = inputs.input_ids.to(model.device)
attention_mask = inputs.attention_mask.to(model.device)
# 生成
outputs = model.generate(input_ids, attention_mask=attention_mask, max_length=128)
# 将token转换为文字
output_str = tokenizer.batch_decode(outputs, skip_special_tokens=True)
output_str = [s.replace(" ", "") for s in output_str]
print(output_str)
# 从测试集中挑选4个样本
test_examples = test_data["text"][:5]
input_ids = inputs.input_ids.to(model.device)
attention_mask = inputs.attention_mask.to(model.device)
# 生成
outputs = model.generate(input_ids, attention_mask=attention_mask, max_length=128)
# 将token转换为文字
# output_str = tokenizer.batch_decode(outputs, skip_special_tokens=True)
output_str = [s.replace(" ", "") for s in output_str]
print(output_str)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/bitcoin-and-fear-and-greed/dataset.csv")
df
# # **Bitcoin Fear and greed days split overall**
# Define the colors for each bar
colors = ["red", "blue", "green", "purple", "orange"]
bar_chart = df["Value_Classification"].hist()
bar_chart.set_title("Bitcoin fear and greed index 1 Feb to 31 Mar 2023")
bar_chart.set_ylabel("Number of days")
print(df["Value_Classification"].value_counts())
# # **Bitcoin Fear and greed per month**
df["Date"] = pd.to_datetime(df["Date"])
# Extract the short name of the month from the date column
df["month"] = df["date"].dt.strftime("%b")
df
from matplotlib.colors import ListedColormap
# Create the pivot table
pivot = pd.pivot_table(
df, index="month", columns="Value_Classification", values="Value", aggfunc="count"
)
# Define the month order
month_order = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
# Reorder the index
pivot = pivot.reindex(month_order)
# Define the column order
column_order = ["Extreme Fear", "Fear", "Neutral", "Greed", "Extreme Greed"]
# Reorder the columns
pivot = pivot.reindex(column_order, axis=1)
# Change the theme
plt.style.use("ggplot")
# Create a custom color map
cmap = ListedColormap(["red", "#ffa500", "#ffffff", "#20d420", "#119711"])
# Create the bar chart
ax = pivot.plot(
kind="bar",
title="Bitcoin fear and greed index per Month 1 Feb to 31 Mar 2023",
xlabel="Months",
ylabel="Number of days",
figsize=(16, 6),
colormap=cmap,
legend=False,
)
# Create a custom legend
ax.legend(column_order)
# Show the plot
plt.show()
# Group the data by quarters
df["quarter"] = df.groupby(pd.Grouper(key="Date", freq="Q"))["Date"].transform("first")
# Create the pivot table
qt_pivot = pd.pivot_table(
df, index="quarter", columns="Value_Classification", values="Value", aggfunc="count"
)
# Define the column order
column_order = ["Extreme Fear", "Fear", "Neutral", "Greed", "Extreme Greed"]
# Reorder the columns
qt_pivot = qt_pivot.reindex(column_order, axis=1)
# Change the theme
plt.style.use("ggplot")
# Create a custom color map
cmap = ListedColormap(["red", "#ffa500", "#ffffff", "#20d420", "#119711"])
# Create a 2x2 grid of subplots
fig, axs = plt.subplots(2, 2, figsize=(14, 10), sharex=True, sharey=True)
# Plot each quarter's data on a separate subplot
for i, (quarter, data) in enumerate(pivot.iterrows()):
ax = axs[i // 2][i % 2]
data.plot(
kind="bar",
title=quarter.strftime("%b-%Y"),
xlabel="",
ylabel="",
ax=ax,
colormap=cmap,
legend=False,
)
# Create a custom legend
axs[0][0].legend(column_order)
# Show the plot
plt.show()
# Group the data by quarters
df["quarter"] = df.groupby(pd.Grouper(key="Date", freq="Q"))["Date"].transform("first")
# Create the pivot table
pivot = pd.pivot_table(
df, index="quarter", columns="Value_Classification", values="Value", aggfunc="count"
)
# Define the column order
column_order = ["Extreme Fear", "Fear", "Neutral", "Greed", "Extreme Greed"]
# Reorder the columns
pivot = pivot.reindex(column_order, axis=1)
# Change the theme
plt.style.use("ggplot")
# Create a custom color map
cmap = ListedColormap(["red", "#ffa500", "#ffffff", "#00ff00"])
# Create a 2x2 grid of subplots
fig, axs = plt.subplots(2, 2, figsize=(14, 10), sharex=True, sharey=True)
# Plot each quarter's data on a separate subplot
for i, (quarter, data) in enumerate(pivot.iterrows()):
ax = axs[i // 2][i % 2]
data.plot(
kind="bar",
title=quarter.strftime("%b-%Y"),
xlabel="",
ylabel="",
ax=ax,
colormap=cmap,
legend=False,
)
# Create a custom legend
axs[0][0].legend(column_order)
# Show the plot
plt.show()
|
# # Concepts and ideas
# This notebook attempts at creating a model to predict/estimate a given neutrino direction, from a set of coordinates measured by several sensors in one event.
# A RNN with LSTM neural network was defined to solve this problem:
# - 5 linear layers
# - RELU as activation function
# - L1Loss as model metric
# - ADAM as optimizer
# # Merge Data
def get_train_df_from_a_batch(train_batch_df, sensors_df, train_meta_df, batch_number):
"""
Converts train_batch, train_meta and sensor_geometry into a 'train_df' dataframe containing features and targets
It filters 'auxiliary' field to only 'False' values (reduces db in 27%), due to challenge explanation:
' If True, the pulse was not fully digitized, is of lower quality, and was more likely to originate from noise.'
It uses polars dataframes only.
"""
train_batch_df = train_batch_df.filter(pl.col("auxiliary") == False)
sensors_df = sensors_df.with_columns(
pl.col("sensor_id").cast(pl.Int16, strict=False)
)
train_df = train_batch_df.join(sensors_df, how="left", on="sensor_id")
train_meta_batch_df = train_meta_df.filter(pl.col("batch_id") == batch_number)
train_df = train_df.join(train_meta_batch_df, how="left", on="event_id")
train_df = train_df.drop(
columns=["batch_id", "auxiliary"]
) # train_df is filtered for 1 batch_id and auxiliary = False, these columns are useless
train_df = train_df.drop(columns=["first_pulse_index"])
# train_df = train_df.with_columns(xy = pl.col('x') * pl.col('y'))
del train_meta_batch_df # memory
del train_batch_df # memory
print(f"Train dataframe:\n")
print(train_df)
return train_df
# # 3D Plotting
def plot_3D(trn_df, event_num):
"""
Plots x, y, and z from sensors vs azimuth and zenith calculated, per 1 event
"""
# Get x, y, z, azimuth and zenith values from sensors
train_df = trn_df.filter(pl.col("event_id") == event_num)
m = 0
M = len(train_df.collect())
xs = train_df.collect()[m:M, "x"]
ys = train_df.collect()[m:M, "y"]
zs = train_df.collect()[m:M, "z"]
azim = train_df.collect()[m:M, "azimuth"]
zen = train_df.collect()[m:M, "zenith"]
# Calculate the Cartesian coordinates of the vector
xp = np.sin(zen) * np.cos(azim)
yp = np.sin(zen) * np.sin(azim)
zp = np.cos(zen)
# Set figure
fig = plt.figure(figsize=(12, 20))
ax = fig.add_subplot(111, projection="3d")
# Plot the vector as a line from (0,0,0) to (x,y,z)
ax.scatter(xp, yp, zp, color="g")
ax.scatter(xs, ys, zs, color="b")
ax.view_init(-160, 30)
# Add labels for the x, y, and z axes
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.title(f"Event {event_num}")
# Show the plot
plt.show()
# # EDA
def EDA_report(data):
"""
Generates an EDA report using sweetviz package. Use "data" as your dataset
"""
import datetime
import sweetviz as sw
from IPython.display import FileLink, display
now = datetime.datetime.now()
report_filename = f"EDA_report{now}.html"
analyze_report = sw.analyze(data)
analyze_report.show_html(report_filename, open_browser=True)
link = FileLink(report_filename)
print("\nClick here to open report:")
display(link)
return None
# ## Correlations
# Utility functions from Tutorial
def make_mi_scores(X, y):
from sklearn.feature_selection import mutual_info_regression
for colname in ["object", "category"]:
if colname in X.dtypes:
X[colname], _ = X[colname].factorize()
# All discrete features should now have integer dtypes
discrete_features = [pd.api.types.is_integer_dtype(t) for t in X.dtypes]
mi_scores = mutual_info_regression(
X, y, discrete_features=discrete_features, random_state=0
)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
# X = train_analysis.to_pandas()
# y_az = X['azimuth']
# X = X.drop (columns = ['azimuth'])
# mi_scores = make_mi_scores(X, y_az)
# del train_analysis #memory
# mi_scores
# # Score Function
def angular_dist_score(az_true, zen_true, az_pred, zen_pred, batch_size=1):
"""
calculate the MAE of the angular distance between two directions.
The two vectors are first converted to cartesian unit vectors,
and then their scalar product is computed, which is equal to
the cosine of the angle between the two vectors. The inverse
cosine (arccos) thereof is then the angle between the two input vectors
Parameters:
-----------
az_true : float (or array thereof)
true azimuth value(s) in radian
zen_true : float (or array thereof)
true zenith value(s) in radian
az_pred : float (or array thereof)
predicted azimuth value(s) in radian
zen_pred : float (or array thereof)
predicted zenith value(s) in radian
Returns:
--------
dist : float
mean over the angular distance(s) in radian
"""
if not (
np.all(np.isfinite(az_true))
and np.all(np.isfinite(zen_true))
and np.all(np.isfinite(az_pred))
and np.all(np.isfinite(zen_pred))
):
raise ValueError("All arguments must be finite")
import numexpr as ne
n = len(az_true)
angle_sum = 0.0
for i in range(0, n, batch_size):
end = min(i + batch_size, n)
sa1 = np.sin(az_true[i:end]).astype(np.float32)
ca1 = np.cos(az_true[i:end]).astype(np.float32)
sz1 = np.sin(zen_true[i:end]).astype(np.float32)
cz1 = np.cos(zen_true[i:end]).astype(np.float32)
sa2 = np.sin(az_pred[i:end]).astype(np.float32)
ca2 = np.cos(az_pred[i:end]).astype(np.float32)
sz2 = np.sin(zen_pred[i:end]).astype(np.float32)
cz2 = np.cos(zen_pred[i:end]).astype(np.float32)
scalar_prod = ne.evaluate("sz1*sz2*(ca1*ca2 + sa1*sa2) + cz1*cz2")
scalar_prod = np.clip(scalar_prod, -1, 1)
angle_sum += np.sum(np.arccos(scalar_prod))
return angle_sum / (n * batch_size)
# # Train and score a RNN LSTM neural network
import torch
from torch.utils.data import Dataset, DataLoader
class CustomRNN(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, output_size):
super(CustomRNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = torch.nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = torch.nn.Linear(hidden_size, output_size)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
out, _ = self.lstm(x, (h0, c0))
out = self.fc(out[:, -1, :])
return out
def train_model(
train_dataset, batch_size, num_epochs, learning_rate, device, model_path=None
):
scores = []
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
model = CustomRNN(
input_size=train_dataset.features.shape[1],
hidden_size=64,
num_layers=2,
output_size=2,
)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
criterion = torch.nn.mean_absolute_error()
if os.path.exists(model_path):
model.load_state_dict(torch.load(model_path))
model.to(device)
for epoch in tqdm(range(num_epochs)):
running_loss = 0.0
for i, (inputs, labels) in enumerate(train_loader):
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
scores.append(running_loss / len(train_loader))
if model_path is not None:
torch.save(model.state_dict(), model_path)
score = np.mean(scores)
return model, score
def predict_model(test_dataset, batch_size, device, model_path):
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
model = CustomRNN(
input_size=test_dataset.features.shape[1],
hidden_size=64,
num_layers=2,
output_size=2,
)
model.load_state_dict(torch.load(model_path))
model.to(device)
predictions = []
with torch.no_grad():
for inputs, labels in test_loader:
inputs = inputs.to(device)
outputs = model(inputs)
predictions.extend(outputs.cpu().numpy())
return np.array(predictions)
# # Model train
# ## Imports
import matplotlib.pyplot as plt
import polars as pl
import numpy as np
import pandas as pd
import os, gc
from sklearn.model_selection import train_test_split
from sklearn.metrics import SCORERS, mean_absolute_error
from tqdm import tqdm
from torch import cuda
print("\nFinished loading imports.\n")
if cuda.is_available():
device_lgbm = "gpu"
device_nn = "cuda"
else:
device_lgbm = "cpu"
device_nn = "cpu"
print(f"Device for training is {device_lgbm}.\n")
input_path = "/kaggle/input/"
work_path = "/kaggle/working/"
scores_nn_path = f"{work_path}scores_nn.csv"
scores_nn_df = pd.DataFrame([])
model_path = f"{work_path}model.pt"
saved_scores_nn_path = f"{input_path}scores_nn.csv"
saved_model_path = f"{input_path}model.pt"
for dirname, _, filenames in os.walk(input_path):
for filename in filenames:
filepath = os.path.join(dirname, filename)
if "sensor" in filepath:
sensors_df = pl.read_csv(filepath).lazy()
print("'sensor_geometry' file loaded.")
elif "score" in filepath:
scores_nn_df = pd.read_csv(filepath)
elif "train_meta" in filepath:
train_meta_filepath = filepath
print("'train_meta' file path found and loaded.")
print("\nAll paths are set.\n")
# ## 3D Plotting and EDA
# train_analysis = train_df.collect().sample(frac=0.001)
# EDA_report (train_analysis.to_pandas())
# 
# 
# %matplotlib inline
# 3D plotting of batch 1
batch_1_path = "/kaggle/input/icecube-neutrinos-in-deep-ice/train/batch_1.parquet"
train_meta_df = pl.read_parquet(train_meta_filepath).lazy()
train_batch_df = pl.read_parquet(batch_1_path).lazy()
train_df = get_train_df_from_a_batch(
train_batch_df.collect(), sensors_df.collect(), train_meta_df.collect(), 1
).lazy()
del train_batch_df # memory
del train_meta_df # memory
plot_3D(train_df, 3266196) # event_id = 3266196
plot_3D(train_df, 3266196) # event_id = 3266196
# 
# ## Train model
y_preds = []
submission_df = pl.DataFrame([]).lazy()
counts = 1
max = 30
for dirname, _, filenames in os.walk(input_path):
for filename in filenames:
filepath = os.path.join(dirname, filename)
if ("batch" in filepath) and ("train" in dirname):
batch_number = int(filename.split("_")[1].split(".")[0])
print(
f"TRAINING BATCH ID {batch_number} - {counts} BATCHES OF {max}\n\ntrain_batch_{batch_number}' file loaded.\n\n"
)
if len(scores_nn_df) != 0 and (
batch_number in scores_nn_df.batch_id.values
):
print("\nBatch already trained. Skipping to next batch.\n")
continue
train_meta_df = pl.read_parquet(train_meta_filepath).lazy()
print("'train_meta' file loaded.")
train_batch_df = pl.read_parquet(filepath).lazy()
print(f"\nLoading 'train_batch' file.\n")
print(train_batch_df.collect())
train_df = get_train_df_from_a_batch(
train_batch_df.collect(),
sensors_df.collect(),
train_meta_df.collect(),
batch_number,
)
del train_meta_df # memory
del train_batch_df
gc.collect()
train_df = train_df.sample(frac=0.05)
trn_df, tst_df = train_test_split(train_df, test_size=0.2, random_state=42)
print("\nTraining model...\n")
print(trn_df)
train_dataset = CustomDataset(trn_df)
if os.path.exists(saved_model_path):
model_filepath = saved_model_path
else:
model_filepath = model_path
model, m_score = train_model(
train_dataset,
batch_size=128,
num_epochs=10,
learning_rate=1e-3,
device=device_nn,
model_path=model_filepath,
)
del train_df # memory
del trn_df
del train_dataset # memory
gc.collect()
print("\nModel score:", m_score)
print("\nPredicting values for model score...\n")
print(tst_df)
test_dataset = CustomDataset(tst_df)
y_preds.append(
predict_model(
test_dataset,
batch_size=128,
device=device_nn,
model_path=model_filepath,
)
)
y_pred = np.array(y_preds).reshape(-1, 2)
y_preds = []
torch.cuda.empty_cache()
az_pred = y_pred[:, 0]
print("\nAzimuth preds:\n", az_pred)
ze_pred = y_pred[:, 1]
print("\nZenith preds:\n", ze_pred)
i = np.random.choice(
list(range(0, len(az_pred))), int(len(az_pred) * 1), replace=False
)
score = angular_dist_score(
tst_df["azimuth"].to_numpy()[i],
tst_df["zenith"].to_numpy()[i],
az_pred[i],
ze_pred[i],
)
print("\nScore:", score)
print("\nCache cleaned.\n")
if len(scores_nn_df) != 0:
scores_nn_df = scores_nn_df.append(
pd.DataFrame([{"batch_id": batch_number, "score": score}])
)
else:
scores_nn_df = pd.DataFrame(
[{"batch_id": batch_number, "score": score}]
)
scores_nn_df.to_csv(scores_nn_path, index=False)
print(scores_nn_df)
batch_results = {
"event_id": tst_df["event_id"],
"azimuth_pred": az_pred,
"zenith_pred": ze_pred,
"azimuth_true": tst_df["azimuth"],
"zenith_true": tst_df["zenith"],
}
del test_dataset # memory
del tst_df
gc.collect()
batch_results_df = pl.DataFrame(batch_results).lazy()
if submission_df.select(pl.count()).collect()[0, 0] == 0:
submission_df = batch_results_df
else:
submission_df = pl.concat([submission_df, batch_results_df])
if counts == max:
break
counts += 1
# | Batch | Score | What changed |
# | :- | :- | :- |
# | 240 | 1.330 | Initial |
# | 240 | 1.233 | epochs: 5 to 10 |
# | 240 | 1.249 | LR: 1e-3 to 1e-2 (changed back to 1e-3)|
# | 240 | 1.219 | Model: added one more layer (32 to 16)|
# | 240 | 1.290 | Model: added one more layer (16 to 8), remove layer |
# | Batch | Score (SGD) | Score (Adam) | Score (delete: first_pulse_index) |
# | :- | :- | :- | :- |
# | 240 | 1.41547 | 1.266042 | 1.24487 |
# | 295 | 1.36850 | 1.198497 | 1.18006 |
# | 158 | 1.41533 | 1.243094 | 1.22356 |
# | 35 | 1.40576 | 1.232627 | 1.22108 |
# | 145 | 1.39520 | 1.201339 | 1.17850 |
print(submission_df.collect().sort("event_id"))
submission_df.groupby("event_id").mean().collect()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
import plotly.express as px
import pycomp
from pycomp.viz.insights import *
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train = pd.read_csv("/kaggle/input/widsdatathon2021/TrainingWiDS2021.csv", index_col=0)
test = pd.read_csv("/kaggle/input/widsdatathon2021/UnlabeledWiDS2021.csv", index_col=0)
data_dictionary_df = pd.read_csv(
"/kaggle/input/widsdatathon2021/DataDictionaryWiDS2021.csv"
)
pd.set_option("display.max_rows", train.shape[0] + 1) # all rows
pd.set_option("display.max_columns", 500) # all columns
pd.set_option("max_colwidth", 400) # display of complete text in a row
data_dictionary_df = data_dictionary_df[
["Category", "Variable Name", "Data Type", "Description"]
]
data_dictionary_df
train = train.drop(
["encounter_id", "hospital_id", "icu_id", "pre_icu_los_days", "readmission_status"],
axis=1,
)
train.head()
train.tail()
train.describe()
test.head()
test.tail()
train.shape, test.shape
train.dtypes.value_counts()
test.dtypes.value_counts()
columns_info = pd.DataFrame()
columns_info["unique values"] = train.nunique()
columns_info["type"] = train.dtypes
columns_info
null_columns = train.columns[train.isnull().any()]
train[null_columns].isnull().sum()
labels = []
values = []
for col in null_columns:
labels.append(col)
values.append(train[col].isnull().sum())
ind = np.arange(len(labels))
width = 0.9
fig, ax = plt.subplots(figsize=(12, 50))
rects = ax.barh(ind, np.array(values), color="violet")
ax.set_yticks(ind + ((width) / 2.0))
ax.set_yticklabels(labels, rotation="horizontal")
ax.set_xlabel("Count of missing values")
ax.set_ylabel("Column Names")
ax.set_title("Variables with missing values")
null_columns = test.columns[test.isnull().any()]
test[null_columns].isnull().sum()
labels = []
values = []
for col in null_columns:
labels.append(col)
values.append(test[col].isnull().sum())
ind = np.arange(len(labels))
width = 0.9
fig, ax = plt.subplots(figsize=(12, 50))
rects = ax.barh(ind, np.array(values), color="violet")
ax.set_yticks(ind + ((width) / 2.0))
ax.set_yticklabels(labels, rotation="horizontal")
ax.set_xlabel("Count of missing values")
ax.set_ylabel("Column Names")
ax.set_title("Variables with missing values")
target = train["diabetes_mellitus"]
diab_positive = len(target[target == 1])
diab_negative = len(target[target == 0])
total_records = len(target)
print(
"Number of records positive (1):",
diab_positive,
"(",
round(diab_positive / total_records, 3) * 100,
"%)",
)
print(
"Number of records negative (0):",
diab_negative,
"(",
round(diab_negative / total_records, 3) * 100,
"%)",
)
fig = px.histogram(
train[["age", "gender", "ethnicity", "diabetes_mellitus", "bmi"]].dropna(),
x="age",
y="diabetes_mellitus",
color="gender",
marginal="box", # or violin, rug
hover_data=train[
["age", "gender", "ethnicity", "diabetes_mellitus", "bmi"]
].columns,
)
fig.show()
def count_plot(col_name, fig_size=(30, 30)):
"Defining function to make all count plot graphs for train and test data together and hence comparable"
fig = plt.figure(figsize=fig_size)
fig.add_subplot(2, 1, 1)
ax1 = sns.countplot(
x=col_name,
data=train,
order=train[col_name].value_counts().index,
palette="Set3",
)
for p in ax1.patches:
ax.annotate(
"{:.1f}".format(p.get_height()), (p.get_x() + 0.4, p.get_height() + 100)
)
ax1.set_title("Train data distribution", fontsize="large")
ax1.set_ylabel(col_name)
fig.add_subplot(2, 1, 2)
ax2 = sns.countplot(
x=col_name,
data=test,
order=train[col_name].value_counts().index,
palette="Set3",
)
for p in ax2.patches:
ax.annotate(
"{:.1f}".format(p.get_height()), (p.get_x() + 0.4, p.get_height() + 100)
)
ax2.set_title("Test data distribution", fontsize="large")
ax2.set_ylabel(col_name)
plt.show()
sns.catplot(x="diabetes_mellitus", kind="count", data=train)
# Cut the window in 2 parts
f, (ax_box, ax_hist) = plt.subplots(
2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)}
)
# Add a graph in each part
plt.figure(figsize=(30, 30))
sns.boxplot(train["age"], ax=ax_box)
sns.distplot(train["age"], ax=ax_hist)
# Remove x axis name for the boxplot
ax_box.set(xlabel="")
# Cut the window in 2 parts
f, (ax_box, ax_hist) = plt.subplots(
2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)}
)
# Add a graph in each part
plt.figure(figsize=(30, 30))
sns.boxplot(test["age"], ax=ax_box)
sns.distplot(test["age"], ax=ax_hist)
# Remove x axis name for the boxplot
ax_box.set(xlabel="")
plt.hist(test["age"], bins=8) # Here you can play with number of bins
plt.title("Age distribution")
plt.xlabel("Age")
plt.ylabel("Patient")
plt.show()
plt.hist(test["age"], bins=8) # Here you can play with number of bins
plt.title("Age distribution")
plt.xlabel("Age")
plt.ylabel("Patient")
plt.show()
fig, ax = plt.subplots(figsize=(10, 10))
sns.distplot(train["height"], color="y")
fig, ax = plt.subplots(figsize=(10, 10))
sns.distplot(train["weight"], color="r")
plot = sns.catplot(
"diabetes_mellitus",
col="elective_surgery",
data=train,
kind="count",
height=6,
aspect=0.7,
)
plot.fig.suptitle("Elective surgery and Diabetes Mellitus", size=20, y=1.05)
plt.figure(figsize=(30, 30))
sns.catplot(y="bilirubin_apache", x="diabetes_mellitus", data=train)
count_plot("icu_admit_source", fig_size=(30, 30))
count_plot("hospital_admit_source", fig_size=(30, 30))
count_plot("icu_stay_type", fig_size=(30, 30))
count_plot("icu_type", fig_size=(30, 30))
count_plot("hospital_admit_source", fig_size=(30, 30))
count_plot("gender", fig_size=(30, 30))
fig = plt.figure(figsize=(10, 10))
sns.set(font_scale=1)
plt.title("Pie-chart for Ethnicity")
train["ethnicity"].value_counts().plot.pie(autopct="%1.1f%%")
fig, ax = plt.subplots(figsize=(16, 8))
fig.suptitle("Ethnicity Distribution", size=20)
explode = (0.05, 0.05, 0.05, 0.05, 0.3, 0.5)
labels = [
"Caucasian",
"African American",
"Other/Unknown",
"Hispanic",
"Asian",
"Native American",
]
sizes = train["ethnicity"].value_counts()
ax.pie(
sizes,
explode=explode,
startangle=60,
labels=labels,
autopct="%1.0f%%",
pctdistance=0.9,
)
ax.add_artist(plt.Circle((0, 0), 0.4, fc="white"))
plt.show()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8))
fig.suptitle(
"ICU Type and Stay Type Distribution",
size=20,
)
axs = [ax1, ax2]
explode = (0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05)
labels = [
"Med-Surg ICU",
"CCU-CTICU",
"MICU",
"Neuro ICU",
"Cardiac ICU",
"SICU",
"CSICU",
"CTICU",
]
sizes = train["icu_type"].value_counts()
ax1.pie(
sizes,
explode=explode,
startangle=60,
labels=labels,
autopct="%1.0f%%",
pctdistance=0.6,
)
ax1.add_artist(plt.Circle((0, 0), 0.4, fc="white"))
explode = (0.05, 0.05, 0.3)
labels = ["admit", "transfer", "readmit"]
sizes = train["icu_stay_type"].value_counts()
ax2.pie(
sizes,
explode=explode,
startangle=60,
labels=labels,
autopct="%1.1f%%",
pctdistance=0.9,
)
ax2.add_artist(plt.Circle((0, 0), 0.4, fc="white"))
plt.show()
plot_countplot(
df=train,
col="intubated_apache",
hue="diabetes_mellitus",
palette="Pastel1",
title="intubated_apache and diabetes mellitus",
)
d_map = {1: "Diabetic", 0: "Not Diabetic"}
plot_double_donut_chart(
df=train,
col1="ventilated_apache",
col2="diabetes_mellitus",
label_names_col1=d_map,
colors1=["pink", "hotpink"],
colors2=["lightskyblue", "dodgerblue"],
title="ventilated_apache and diabetes mellitus",
)
corr_Matrix = train.select_dtypes(exclude=object).corr().abs()
corr_Matrix
corr_triangle = corr_Matrix.where(
np.triu(np.ones(corr_Matrix.shape), k=1).astype(np.bool)
)
corr_triangle
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Load dataset using NetCDF4
import netCDF4 as nc
fh = nc.Dataset(
"/kaggle/input/geoscience/sst.mon.mean.nc", mode="r"
) # file handle, open in read only mode
fh
# Separate lon,lat,sst and time in different variables
import pandas as pd
lon = fh.variables["lon"][:]
lat = fh.variables["lat"][:]
sst = fh.variables["sst"][:]
time = fh.variables["time"][:]
# Plot data on Basemap taking the Nino 3.4 region ([170:290] for longitude and [84:96] for latitude)
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
def plot_map(lon, lat, data):
m = Basemap(
projection="cyl",
resolution="l",
llcrnrlat=np.min(fh.variables["lat"][:]),
urcrnrlat=np.max(fh.variables["lat"][:]),
llcrnrlon=np.min(fh.variables["lon"][:]),
urcrnrlon=np.max(fh.variables["lon"][:]),
)
m.drawcoastlines()
# plt.show()
lons, lats = np.meshgrid(lon, lat)
x, y = m(lons, lats)
m.drawcoastlines()
levels = np.linspace(min(np.unique(data)), max(np.unique(data)), 21)
# levels=[-30,-20,-12,-9,-6,-2,-1,+1,+2,+6,+9,+12,+20,+30]
temp = m.contourf(x, y, data, levels=levels, cmap="seismic")
cb = m.colorbar(temp, "bottom", size="15%", pad="10%")
# m.drawcoastlines()
plt.title("sst")
cb.set_label("sst")
plt.show()
plt.clf()
# Plot the mean data over the years for the selected region on Basemap
plot_map(lon[170:290], lat[84:96], sst[:, 84:96, 170:290].mean(axis=0))
# Prepare sst anomaly (sst value - mean sst value) for the whole dataset as model input
X = sst[:, :, :].reshape(sst.shape[0], -1)
X = X - X.mean(axis=0)
print(X.shape)
df = pd.DataFrame(X)
df.head()
print(X)
# Calculate sst anomaly for the nino 3.4 region
nino_3_4_sst_anomaly = sst[:, 84:96, 170:290].reshape(
sst.shape[0], (96 - 84) * (290 - 170)
)
# calculate spatial mean nino 3.4 sst anomaly for each year
mean_nino_3_4_sst_anomaly = nino_3_4_sst_anomaly.mean(axis=1)
# Detrend mean nino 3.4 sst anomaly
mean_nino_3_4_sst_anomaly = mean_nino_3_4_sst_anomaly - mean_nino_3_4_sst_anomaly.mean(
axis=0
)
# Plot detrended mean_nino_3_4_sst_anomaly
plt.plot(mean_nino_3_4_sst_anomaly)
plt.show()
# Prepare labels such that, if mean nino 3.4 sst anomaly value >.5 then 'El-nino' , mean nino 3.4 sst anomaly value <.5 then 'La-nina' and the other values will be discarded. Prepare the corresponding input values also.
# 1- El-nino, 0- La-nina
def prepare_model_input(mean_nino_3_4_sst_anomaly, X):
index = []
ENSO_label = []
for i in range(len(mean_nino_3_4_sst_anomaly)):
if mean_nino_3_4_sst_anomaly[i] > 0.5:
index.append(i)
ENSO_label.append(1)
elif mean_nino_3_4_sst_anomaly[i] < 0.5:
index.append(i)
ENSO_label.append(0)
X = X[index]
y = ENSO_label
return (X, y)
X, y = prepare_model_input(mean_nino_3_4_sst_anomaly, X)
import math
math.isnan(X[2038, 64400])
# Transform masked array into numpy array by replacing nan values by 0.
import math
def transform_masked_array(X):
for i in range(X.shape[0]):
for j in range(X.shape[1]):
if math.isnan(X[i, j]):
X[i, j] = 0
X = transform_masked_array(X)
df2 = pd.DataFrame(X)
df2
# Split into training and testing set with test set fraction as .33
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
# Plot mean training and testing set on map
plot_map(lon[170:290], lat[84:96], X_train.mean(axis=0).reshape(96 - 84, 290 - 170))
plot_map(lon[170:290], lat[84:96], X_test.mean(axis=0).reshape(96 - 84, 290 - 170))
# Define and fit PCA
from sklearn.decomposition import PCA
n_components = 150
pca = PCA(n_components=n_components, svd_solver="randomized", whiten=True).fit(X_train)
X_train.shape
# Transform training and testing set using PCA
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
X_train_pca.shape
# Plot mean PCA map
plot_map(lon[170:290], lat[84:96], pca.mean_.reshape(96 - 84, 290 - 170))
# Plot explained variance and variance ratio on graph
plt.plot(pca.explained_variance_ratio_)
plt.show()
from tqdm.notebook import tqdm
import torch
# Create a device variable which will be used to shift model and data to GPU if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Define pytorch NN classifier which reflects highest testing accuracy(around .99) with minimum number of layers.
class FFN(torch.nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(FFN, self).__init__()
self.fc1 = torch.nn.Linear(input_dim, hidden_dim)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_dim, hidden_dim)
self.fc3 = torch.nn.Linear(hidden_dim, output_dim)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
out = self.relu(out)
out = self.fc3(out)
out = self.sigmoid(out)
return out
# Define training function
def train(model, train_loader, optimizer, criterion, device):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
# Define testing function
def test(model, test_loader, criterion, device):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += criterion(output, target).item() # sum up batch loss
pred = output.argmax(
dim=1, keepdim=True
) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
test_accuracy = 100.0 * correct / len(test_loader.dataset)
return test_loss, test_accuracy
# First create a pytorch dataset from the numpy train data
dataset_train = torch.utils.data.TensorDataset(
torch.from_numpy(X_train_pca).float(), torch.from_numpy(np.array(y_train)).float()
)
# Create a dataloader object which will create batches of data
dataloader_train = torch.utils.data.DataLoader(
dataset_train, batch_size=64, shuffle=True
)
# Train the classifier
model = FFN(n_components, 100, 2).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
criterion = torch.nn.CrossEntropyLoss()
for epoch in tqdm(range(100)):
train(model, dataloader_train, optimizer, criterion, device)
test_loss, test_accuracy = test(model, dataloader_train, criterion, device)
print(
"Epoch: {}, Test Loss: {}, Test Accuracy: {}".format(
epoch, test_loss, test_accuracy
)
)
# Report prediction accuracy and confusion matrix
from sklearn.metrics import confusion_matrix
import seaborn as sns
import pandas as pd
y_pred = (
model(torch.from_numpy(X_test_pca).float().to(device)).argmax(dim=1).cpu().numpy()
)
print("Test Accuracy: {}".format(np.mean(y_pred == y_test)))
cm = confusion_matrix(y_test, y_pred)
df_cm = pd.DataFrame(
cm,
index=[i for i in ["El Nino", "La Nina"]],
columns=[i for i in ["El Nino", "La Nina"]],
)
# randomly select 5 maps from testing set, plot them on map, predict classes using their PCA data and report them along with their true label.
import random
for i in range(5):
index = random.randint(0, len(X_test_pca))
plot_map(lon[170:290], lat[84:96], X_test[index].reshape(96 - 84, 290 - 170))
print("True label: ", y_test[index])
print(
"Predicted label: ",
model(torch.from_numpy(X_test_pca[index].reshape(1, -1)).float().to(device))
.argmax(dim=1)
.cpu()
.numpy()[0],
)
# Plot confusion matrix
plt.figure(figsize=(10, 7))
sns.heatmap(df_cm, annot=True)
|
import pandas as pd
import rasterio
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.utils.data import Dataset, DataLoader
import flax
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.training import train_state
from flax import struct, jax_utils
from flax.training.common_utils import shard
import optax
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import functools
from typing import Any, List, Type, Union, Optional, Dict
import albumentations as albu
import random
import shutil
# gpu and tpu saving didn't work in same way
try:
from flax.training import orbax_utils
from orbax.checkpoint import PyTreeCheckpointer
USE_ORBAX_WITH_FLAX = True
except:
from orbax.checkpoint import (
Checkpointer,
PyTreeCheckpointHandler,
JsonCheckpointHandler,
PyTreeCheckpointer,
)
import nest_asyncio
nest_asyncio.apply()
USE_ORBAX_WITH_FLAX = False
class CFG:
# if you want to train model you must set inference = False
# if you want to test model you must set inference = True and set pretrained to actual folder
# other CFG parameters won't be used if you set inference = True (except seed, test_size and channels)
inference = True
pretrained = "/kaggle/input/deeplabv3-resnet-101/ckpt"
# you can change these parameters, but you don't have to
seed = 42
# specify correct optimizer name for optax (getattr(optax, optimizer_name: str))
# https://optax.readthedocs.io/en/latest/api.html - list of optimizers
optimizer = "adam"
# specify correct parameters dict, you can find them here - https://optax.readthedocs.io/en/latest/api.html
optimizer_params = {"b1": 0.95, "b2": 0.98, "eps": 1e-8}
# scheduler_params with such keys will be set to ttl_iters after calculating of total steps (ttl_iters)
ttl_iters_keys = ["transition_steps", "decay_steps"]
# specify correct scheduler name for optax (getattr(optax, scheduler_name: str))
# https://optax.readthedocs.io/en/latest/api.html#schedules - list of schedulers
scheduler = "cosine_onecycle_schedule"
# specify correct parameters dict, you can find them here - https://optax.readthedocs.io/en/latest/api.html#schedules
scheduler_params = {
"transition_steps": None,
"peak_value": 1e-2,
"pct_start": 0.25,
"div_factor": 25,
"final_div_factor": 100,
}
# hyperparameters
epochs = 50
test_size = 0.1
batch_size = 32
# input image shape, currently using 10 of 11 Landsat-8 channels, excluding channel number 8
# list of Landsat-8 channels - http://magnetometry.ru/study/tables/landsat8.pdf
shape = (1, 256, 256, 10)
# if you want to use specific channels to train the model, specify them in Tuple[int] format and change the shape tuple to the correct format
channels = None
# number of workers for torch DataLoader, don't set too high, I prefer to use 4 workers
num_workers = 4
# path to save checkpoint
ckpt_path = "./ckpt"
# metadata keys
metadata = ["config", "model", "loss"]
# which architecture to use, only when inference = False
model = "PAN_ResNet101"
class DEEPLABV3_RESNET18:
block = "BasicBlock"
layers = [2, 2, 2, 2]
replace_stride_with_dilation = [False, False, False]
strides = [2, 2, 2]
upsampling = 32
decoder_channels = 256
atrous_rates = (6, 12, 24)
classes = 1
activation = ""
class DEEPLABV3_RESNET34:
block = "BasicBlock"
layers = [3, 4, 6, 3]
replace_stride_with_dilation = [False, False, False]
strides = [2, 2, 2]
upsampling = 32
decoder_channels = 256
atrous_rates = (6, 12, 24)
classes = 1
activation = ""
class DEEPLABV3_RESNET50:
block = "Bottleneck"
layers = [3, 4, 6, 3]
replace_stride_with_dilation = [False, True, True]
strides = [2, 2, 4]
upsampling = 8
decoder_channels = 512
atrous_rates = (6, 12, 24)
classes = 1
activation = ""
class DEEPLABV3_RESNET101:
block = "Bottleneck"
layers = [3, 4, 23, 3]
replace_stride_with_dilation = [False, True, True]
strides = [2, 2, 4]
upsampling = 8
decoder_channels = 512
atrous_rates = (6, 12, 24)
classes = 1
activation = ""
class DEEPLABV3_RESNET152:
block = "Bottleneck"
layers = [3, 8, 36, 3]
replace_stride_with_dilation = [False, True, True]
strides = [2, 2, 4]
upsampling = 8
decoder_channels = 512
atrous_rates = (6, 12, 24)
classes = 1
activation = ""
class PAN_RESNET18:
block = "BasicBlock"
layers = [2, 2, 2, 2]
replace_stride_with_dilation = [False, False, False]
strides = [2, 2, 2]
upsampling = 4
decoder_channels = 32
classes = 1
activation = ""
class PAN_RESNET34:
block = "BasicBlock"
layers = [3, 4, 6, 3]
replace_stride_with_dilation = [False, False, False]
strides = [2, 2, 2]
upsampling = 4
decoder_channels = 32
classes = 1
activation = ""
class PAN_RESNET50:
block = "Bottleneck"
layers = [3, 4, 6, 3]
replace_stride_with_dilation = [False, False, True]
strides = [2, 2, 2]
upsampling = 4
decoder_channels = 32
classes = 1
activation = ""
class PAN_RESNET101:
block = "Bottleneck"
layers = [3, 4, 23, 3]
replace_stride_with_dilation = [False, False, True]
strides = [2, 2, 2]
upsampling = 4
decoder_channels = 32
classes = 1
activation = ""
class PAN_RESNET152:
block = "Bottleneck"
layers = [3, 8, 36, 3]
replace_stride_with_dilation = [False, False, True]
strides = [2, 2, 2]
upsampling = 4
decoder_channels = 32
classes = 1
activation = ""
class LOSS:
alpha = 0.3
beta = 0.7
gamma = 1.0
delta = 0.2
theta = 0.8
mu = 0.5
smooth = 1e-8
rng = jax.random.PRNGKey(CFG.seed)
def seed_everything(seed: int):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
seed_everything(CFG.seed)
print(flax.__version__)
print(jax.devices())
# read image into np.ndarray
def read_img(path: str, channels: List[int] = None):
if channels is None:
img = rasterio.open(path).read().transpose((1, 2, 0))
else:
img = rasterio.open(path).read(channels).transpose((1, 2, 0))
img = np.float32(img) / 65535
return img
# read mask into np.ndarray
def read_mask(path: str):
mask = rasterio.open(path).read().transpose((1, 2, 0))
mask = np.int32(mask)
return mask
# full dataset
# df = pd.read_csv('/kaggle/input/fire-segmentation-db/fire-segmentation-db.csv')
# cleaned dataset
df = pd.read_csv("/kaggle/input/fire-segmentation-clean-db/fire-segmentation-db.csv")
# read single sample
iloc = 3402
img = read_img(df.iloc[iloc]["image"], channels=(7, 6, 2))
mask = read_mask(df.iloc[iloc]["mask"])
# print sample shape
print(img.shape)
print(mask.shape)
# vizualize image sample
_ = plt.imshow(img)
# vizualize mask sample
_ = plt.imshow(mask)
class FireDataset(Dataset):
def __init__(
self,
df: Any,
transform: Any = None,
inference: bool = False,
channels: List[int] = None,
):
self.df = df
self.transform = transform
self.inference = inference
self.channels = channels
def __len__(self):
return len(self.df)
def _read_img(self, path: str, channels: List[int]):
if channels:
img = rasterio.open(path).read(channels).transpose((1, 2, 0))
else:
img = rasterio.open(path).read().transpose((1, 2, 0))
img = np.float32(img) / 65535
return img
def _read_mask(self, path: str):
mask = rasterio.open(path).read().transpose((1, 2, 0))
mask = np.int32(mask)
return mask
def __getitem__(self, idx: int):
row = self.df.iloc[idx]
image = self._read_img(row["image"], self.channels)
if self.inference:
return image
mask = self._read_mask(row["mask"])
if self.transform:
sample = self.transform(image=image, mask=mask)
image, mask = sample["image"], sample["mask"]
return image, mask
def conv3x3(out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1):
"""3x3 convolution with padding"""
return nn.Conv(
features=out_planes,
kernel_size=(3, 3),
strides=stride,
padding=dilation,
feature_group_count=groups,
use_bias=False,
kernel_dilation=dilation,
)
def conv1x1(out_planes: int, stride: int = 1):
"""1x1 convolution"""
return nn.Conv(
features=out_planes,
kernel_size=(1, 1),
strides=stride,
padding=0,
use_bias=False,
)
class BasicBlock(nn.Module):
planes: int
stride: int = 1
downsample: Any = None
groups: int = 1
base_width: int = 64
dilation: int = 1
expansion: int = 1
train: bool = True
def setup(self):
if self.groups != 1 or self.base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if self.dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv3x3(self.planes, self.stride)
self.conv2 = conv3x3(self.planes)
self.bn1 = nn.BatchNorm(use_running_average=not self.train)
self.bn2 = nn.BatchNorm(use_running_average=not self.train)
self.bn3 = nn.BatchNorm(use_running_average=not self.train)
@nn.compact
def __call__(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = nn.activation.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
identity = self.bn3(identity)
out += identity
out = nn.activation.relu(out)
return out
class Bottleneck(nn.Module):
planes: int
stride: int = 1
downsample: Any = None
groups: int = 1
base_width: int = 64
dilation: int = 1
expansion: int = 4
train: bool = True
def setup(self):
width = int(self.planes * (self.base_width / 64.0)) * self.groups
self.conv1 = conv1x1(width)
self.conv2 = conv3x3(width, self.stride, self.groups, self.dilation)
self.conv3 = conv1x1(self.planes * self.expansion)
self.bn1 = nn.BatchNorm(use_running_average=not self.train)
self.bn2 = nn.BatchNorm(use_running_average=not self.train)
self.bn3 = nn.BatchNorm(use_running_average=not self.train)
self.bn4 = nn.BatchNorm(use_running_average=not self.train)
def __call__(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = nn.activation.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = nn.activation.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
identity = self.bn4(identity)
out += identity
out = nn.activation.relu(out)
return out
class ResNetModule(nn.Module):
block: Type[Union[BasicBlock, Bottleneck]]
layers: List[int]
groups: int = 1
width_per_group: int = 64
strides: Optional[List[int]] = (2, 2, 2)
replace_stride_with_dilation: Optional[List[bool]] = None
train: bool = True
def setup(self):
self.repl = self.replace_stride_with_dilation
if self.repl is None:
self.repl = [False, False, False]
if len(self.repl) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
f"or a 3-element tuple, got {self.repl}"
)
self.inplanes = 64
self.dilation = 1
self.base_width = self.width_per_group
self.conv1 = nn.Conv(
self.inplanes, kernel_size=(7, 7), strides=2, padding=3, use_bias=False
)
self.norm = nn.BatchNorm(use_running_average=not self.train)
self.layer1 = self._make_layer(self.block, 64, self.layers[0])
self.layer2 = self._make_layer(
self.block, 128, self.layers[1], stride=self.strides[0], dilate=self.repl[0]
)
self.layer3 = self._make_layer(
self.block, 256, self.layers[2], stride=self.strides[1], dilate=self.repl[1]
)
self.layer4 = self._make_layer(
self.block, 512, self.layers[3], stride=self.strides[2], dilate=self.repl[2]
)
def _make_layer(
self,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False,
):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = conv1x1(planes * block.expansion, stride)
layers = []
layers.append(
block(
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
train=self.train,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
train=self.train,
)
)
return layers
def __call__(self, x):
x = self.conv1(x)
x = self.norm(x)
x = nn.activation.relu(x)
x = nn.max_pool(
x, window_shape=(3, 3), strides=(2, 2), padding=((0, 1), (0, 1))
)
for blocks in self.layer1:
x = blocks(x)
f1 = x
for blocks in self.layer2:
x = blocks(x)
f2 = x
for blocks in self.layer3:
x = blocks(x)
f3 = x
for blocks in self.layer4:
x = blocks(x)
f4 = x
return [f1, f2, f3, f4]
class ResNet(nn.Module):
block: Type[Union[BasicBlock, Bottleneck]]
layers: List[int]
groups: int = 1
width_per_group: int = 64
strides: Optional[List[int]] = (2, 2, 2)
replace_stride_with_dilation: Optional[List[bool]] = None
@nn.compact
def __call__(self, x, train: bool):
x = ResNetModule(
block=self.block,
layers=self.layers,
groups=self.groups,
width_per_group=self.width_per_group,
strides=self.strides,
replace_stride_with_dilation=self.replace_stride_with_dilation,
train=train,
)(x)
return x
class ASPPConv(nn.Module):
out_channels: int
dilation: int
train: bool = True
@nn.compact
def __call__(self, x):
x = nn.Conv(
self.out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=self.dilation,
kernel_dilation=self.dilation,
use_bias=False,
)(x)
x = nn.BatchNorm(use_running_average=not self.train)(x)
x = nn.activation.relu(x)
return x
class ASPPPooling(nn.Module):
out_channels: int
train: bool = True
@nn.compact
def __call__(self, x):
shape = x.shape
size = x.shape[1], x.shape[2]
x = nn.avg_pool(x, window_shape=size, strides=size, padding=((0, 0), (0, 0)))
x = nn.Conv(
self.out_channels,
kernel_size=(1, 1),
strides=(1, 1),
padding=0,
use_bias=False,
)(x)
x = nn.BatchNorm(use_running_average=not self.train)(x)
x = nn.activation.relu(x)
x = jax.image.resize(
x, shape=(shape[0], shape[1], shape[2], x.shape[3]), method="bilinear"
)
return x
class ASPP(nn.Module):
out_channels: int = 256
atrous_rates: List[int] = (12, 24, 36)
separable: bool = False
train: bool = True
def setup(self):
self.mod1 = nn.Sequential(
[
conv1x1(self.out_channels),
nn.BatchNorm(use_running_average=not self.train),
]
)
rate1, rate2, rate3 = self.atrous_rates
self.aspp1 = ASPPConv(
out_channels=self.out_channels, dilation=rate1, train=self.train
)
self.aspp2 = ASPPConv(
out_channels=self.out_channels, dilation=rate2, train=self.train
)
self.aspp3 = ASPPConv(
out_channels=self.out_channels, dilation=rate3, train=self.train
)
self.aspp_pool = ASPPPooling(out_channels=self.out_channels, train=self.train)
self.project = nn.Sequential(
[
conv1x1(self.out_channels),
nn.BatchNorm(use_running_average=not self.train),
]
)
self.modules = [self.aspp1, self.aspp2, self.aspp3, self.aspp_pool]
def __call__(self, x):
res = [nn.activation.relu(self.mod1(x))]
for mod in self.modules:
res.append(mod(x))
out = jnp.concatenate(res, axis=3)
prj = nn.activation.relu(self.project(out))
return prj
class DeepLabV3Decoder(nn.Module):
out_channels: int = 256
atrous_rates: List[int] = (12, 24, 36)
@nn.compact
def __call__(self, features, train: bool):
x = features[-1]
x = ASPP(
out_channels=self.out_channels, atrous_rates=self.atrous_rates, train=train
)(x)
x = nn.Conv(
self.out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=(1, 1),
use_bias=False,
)(x)
x = nn.BatchNorm(use_running_average=not train)(x)
x = nn.activation.relu(x)
return x
class DeepLabV3(nn.Module):
block: str
layers: List[int]
decoder_channels: int = 256
atrous_rates: List[int] = (12, 24, 36)
classes: int = 1
upsampling: int = 8
activation: str = ""
strides: Optional[List[int]] = (2, 2, 2)
replace_stride_with_dilation: Optional[List[bool]] = None
def setup(self):
block = BasicBlock if self.block == "BasicBlock" else Bottleneck
self.encoder = ResNet(
block=block,
layers=self.layers,
strides=self.strides,
replace_stride_with_dilation=self.replace_stride_with_dilation,
)
self.decoder = DeepLabV3Decoder(
out_channels=self.decoder_channels, atrous_rates=self.atrous_rates
)
self.segmentation_head = SegmentationHead(
out_channels=self.classes,
activation=self.activation,
upsampling=self.upsampling,
)
def __call__(self, x, train: bool):
features = self.encoder(x, train)
decoder_output = self.decoder(features, train)
masks = self.segmentation_head(decoder_output)
return masks
class ConvBnRelu(nn.Module):
out_channels: int
kernel_size: List[int]
stride: int = 1
padding: int = 0
dilation: int = 1
groups: int = 1
bias: bool = True
add_relu: bool = True
interpolate: bool = False
train: bool = True
@nn.compact
def __call__(self, x):
x = nn.Conv(
features=self.out_channels,
kernel_size=self.kernel_size,
strides=self.stride,
padding=self.padding,
kernel_dilation=self.dilation,
feature_group_count=self.groups,
use_bias=self.bias,
)(x)
x = nn.BatchNorm(use_running_average=not self.train)(x)
if self.add_relu:
x = nn.activation.relu(x)
if self.interpolate:
b, h, w, c = x.shape
x = jax.image.resize(x, shape=(b, h * 2, w * 2, c), method="bilinear")
return x
class FPABlock(nn.Module):
out_channels: int
train: bool = True
def setup(self):
# global pooling branch
self.branch1 = ConvBnRelu(
out_channels=self.out_channels,
kernel_size=(1, 1),
stride=1,
padding=0,
train=self.train,
)
# middle branch
self.mid = ConvBnRelu(
out_channels=self.out_channels,
kernel_size=(1, 1),
stride=1,
padding=0,
train=self.train,
)
self.down1 = ConvBnRelu(
out_channels=1, kernel_size=(7, 7), stride=1, padding=3, train=self.train
)
self.down2 = ConvBnRelu(
out_channels=1, kernel_size=(5, 5), stride=1, padding=2, train=self.train
)
self.down3 = nn.Sequential(
[
ConvBnRelu(
out_channels=1,
kernel_size=(3, 3),
stride=1,
padding=1,
train=self.train,
),
ConvBnRelu(
out_channels=1,
kernel_size=(3, 3),
stride=1,
padding=1,
train=self.train,
),
]
)
self.conv2 = ConvBnRelu(
out_channels=1, kernel_size=(5, 5), stride=1, padding=2, train=self.train
)
self.conv1 = ConvBnRelu(
out_channels=1, kernel_size=(7, 7), stride=1, padding=3, train=self.train
)
def __call__(self, x):
size = x.shape[1], x.shape[2]
b, h, w, c = x.shape
b1 = nn.avg_pool(x, window_shape=size, strides=size, padding=((0, 0), (0, 0)))
b1 = self.branch1(b1)
b1 = jax.image.resize(b1, shape=(b, h, w, b1.shape[3]), method="bilinear")
mid = self.mid(x)
x1 = nn.max_pool(
x, window_shape=(2, 2), strides=(2, 2), padding=((0, 0), (0, 0))
)
x1 = self.down1(x1)
x2 = nn.max_pool(
x1, window_shape=(2, 2), strides=(2, 2), padding=((0, 0), (0, 0))
)
x2 = self.down2(x2)
x3 = nn.max_pool(
x2, window_shape=(2, 2), strides=(2, 2), padding=((0, 0), (0, 0))
)
x3 = self.down3(x3)
x3 = jax.image.resize(
x3, shape=(b, h // 4, w // 4, x3.shape[3]), method="bilinear"
)
x2 = self.conv2(x2)
x = x2 + x3
x = jax.image.resize(
x, shape=(b, h // 2, w // 2, x.shape[3]), method="bilinear"
)
x1 = self.conv1(x1)
x = x + x1
x = jax.image.resize(x, shape=(b, h, w, x.shape[3]), method="bilinear")
x = jax.lax.mul(x, mid)
x = x + b1
return x
class GAUBlock(nn.Module):
out_channels: int
train: bool = True
@nn.compact
def __call__(self, x, y):
"""
Args:
x: low level feature
y: high level feature
"""
xsize = x.shape[1], x.shape[2]
bx, hx, wx, cx = x.shape
ysize = y.shape[1], y.shape[2]
by, hy, wy, cy = y.shape
y_up = jax.image.resize(y, shape=(bx, hx, wx, y.shape[3]), method="bilinear")
x = ConvBnRelu(
out_channels=self.out_channels,
kernel_size=(3, 3),
padding=1,
train=self.train,
)(x)
y = nn.avg_pool(y, window_shape=ysize, strides=ysize, padding=((0, 0), (0, 0)))
y = ConvBnRelu(
out_channels=self.out_channels,
kernel_size=(1, 1),
add_relu=False,
train=self.train,
)(y)
y = nn.activation.sigmoid(y)
z = jax.lax.mul(x, y)
return y_up + z
class PANDecoder(nn.Module):
decoder_channels: int
@nn.compact
def __call__(self, features, train: bool):
x5 = FPABlock(out_channels=self.decoder_channels, train=train)(
features[-1]
) # 1/32
x4 = GAUBlock(out_channels=self.decoder_channels, train=train)(
features[-2], x5
) # 1/16
x3 = GAUBlock(out_channels=self.decoder_channels, train=train)(
features[-3], x4
) # 1/8
x2 = GAUBlock(out_channels=self.decoder_channels, train=train)(
features[-4], x3
) # 1/4
return x2
class PAN(nn.Module):
block: str
layers: List[int]
decoder_channels: int = 32
classes: int = 1
upsampling: int = 4
activation: str = ""
strides: Optional[List[int]] = (2, 2, 2)
replace_stride_with_dilation: Optional[List[bool]] = None
def setup(self):
block = BasicBlock if self.block == "BasicBlock" else Bottleneck
self.encoder = ResNet(
block=block,
layers=self.layers,
strides=self.strides,
replace_stride_with_dilation=self.replace_stride_with_dilation,
)
self.decoder = PANDecoder(decoder_channels=self.decoder_channels)
self.segmentation_head = SegmentationHead(
out_channels=self.classes,
activation=self.activation,
upsampling=self.upsampling,
)
def __call__(self, x, train: bool):
features = self.encoder(x, train)
decoder_output = self.decoder(features, train)
masks = self.segmentation_head(decoder_output)
return masks
class SegmentationHead(nn.Module):
out_channels: int
activation: str = ""
upsampling: int = 8
@nn.compact
def __call__(self, x):
ks = 3
x = nn.Conv(
self.out_channels, kernel_size=(ks, ks), strides=(1, 1), padding=ks // 2
)(x)
if self.upsampling > 1:
b, h, w, c = x.shape
x = jax.image.resize(
x,
shape=(b, h * self.upsampling, w * self.upsampling, c),
method="bilinear",
)
if len(self.activation) > 0:
x = getattr(nn.activation, self.activation)(x)
return x
def get_model(name: str, only_dct: bool = False, dct: Dict[str, Any] = None):
res = [None, None]
if name == "DeepLabV3_ResNet18":
res[0] = class_to_dct(DEEPLABV3_RESNET18) if not dct else dct
if not only_dct:
res[1] = DeepLabV3(**res[0])
return res
elif name == "DeepLabV3_ResNet34":
res[0] = class_to_dct(DEEPLABV3_RESNET34) if not dct else dct
if not only_dct:
res[1] = DeepLabV3(**res[0])
return res
elif name == "DeepLabV3_ResNet50":
res[0] = class_to_dct(DEEPLABV3_RESNET50) if not dct else dct
if not only_dct:
res[1] = DeepLabV3(**res[0])
return res
elif name == "DeepLabV3_ResNet101":
res[0] = class_to_dct(DEEPLABV3_RESNET101) if not dct else dct
if not only_dct:
res[1] = DeepLabV3(**res[0])
return res
elif name == "DeepLabV3_ResNet152":
res[0] = class_to_dct(DEEPLABV3_RESNET152) if not dct else dct
if not only_dct:
res[1] = DeepLabV3(**res[0])
return res
elif name == "PAN_ResNet18":
res[0] = class_to_dct(PAN_RESNET18) if not dct else dct
if not only_dct:
res[1] = PAN(**res[0])
return res
elif name == "PAN_ResNet34":
res[0] = class_to_dct(PAN_RESNET34) if not dct else dct
if not only_dct:
res[1] = PAN(**res[0])
return res
elif name == "PAN_ResNet50":
res[0] = class_to_dct(PAN_RESNET50) if not dct else dct
if not only_dct:
res[1] = PAN(**res[0])
return res
elif name == "PAN_ResNet101":
res[0] = class_to_dct(PAN_RESNET101) if not dct else dct
if not only_dct:
res[1] = PAN(**res[0])
return res
elif name == "PAN_ResNet152":
res[0] = class_to_dct(PAN_RESNET152) if not dct else dct
if not only_dct:
res[1] = PAN(**res[0])
return res
return None
class TrainState(train_state.TrainState):
batch_stats: Any
@functools.partial(jax.pmap, static_broadcasted_argnums=(1, 2))
def create_train_state(rng: Any, lr_function: Any, shape: List[int]):
_, model = get_model(CFG.model)
variables = model.init(rng, jnp.ones(shape), train=True)
params = variables["params"]
batch_stats = variables["batch_stats"]
tx = getattr(optax, CFG.optimizer)(lr_function, **CFG.optimizer_params)
return TrainState.create(
apply_fn=model.apply, params=params, batch_stats=batch_stats, tx=tx
)
def create_learning_rate_fn(ttl_iters: int):
scheduler = getattr(optax, CFG.scheduler)
for key in CFG.ttl_iters_keys:
if key in CFG.scheduler_params.keys():
CFG.scheduler_params[key] = ttl_iters
return scheduler(**CFG.scheduler_params)
@functools.partial(jax.pmap, axis_name="batch")
def train_step(state: Any, image: Any, mask: Any):
def loss_fn(params: Any):
logits, updates = state.apply_fn(
{"params": params, "batch_stats": state.batch_stats},
image,
train=True,
mutable=["batch_stats"],
)
labels = mask
alpha = LOSS.alpha
beta = LOSS.beta
gamma = LOSS.gamma
delta = LOSS.delta
theta = LOSS.theta
mu = LOSS.mu
smooth = LOSS.smooth
preds = nn.activation.sigmoid(logits)
flat_logits = jnp.ravel(preds)
flat_labels = jnp.ravel(labels)
tp = jnp.sum(flat_logits * flat_labels)
fp = jnp.sum(flat_logits * (1 - flat_labels))
fn = jnp.sum((1 - flat_logits) * flat_labels)
union0 = jnp.clip((1 - preds) + (1 - labels), a_min=0, a_max=1)
intersection0 = (1 - preds) * (1 - labels)
iou0 = jnp.sum(intersection0) / (jnp.sum(union0) + smooth)
union1 = jnp.clip(preds + labels, a_min=0, a_max=1)
intersection1 = preds * labels
iou1 = jnp.sum(intersection1) / (jnp.sum(union1) + smooth)
tversky_loss = 1 - (tp + smooth) / (tp + alpha * fp + beta * fn + smooth)
tversky_focal_loss = tversky_loss**gamma
miou_loss = (1 - iou0) * delta + (1 - iou1) * theta
loss = mu * tversky_focal_loss + (1 - mu) * miou_loss
return loss, (logits, updates)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(loss, (logits, updates)), grads = grad_fn(state.params)
state = state.apply_gradients(grads=grads)
state = state.replace(batch_stats=updates["batch_stats"])
return state, loss
@functools.partial(jax.pmap, axis_name="batch")
def eval_step(state: Any, image: Any, mask: Any):
def loss_fn(params: Any):
logits = state.apply_fn(
{"params": params, "batch_stats": state.batch_stats}, image, train=False
)
labels = mask
alpha = LOSS.alpha
beta = LOSS.beta
gamma = LOSS.gamma
delta = LOSS.delta
theta = LOSS.theta
mu = LOSS.mu
smooth = LOSS.smooth
preds = nn.activation.sigmoid(logits)
flat_logits = jnp.ravel(preds)
flat_labels = jnp.ravel(labels)
tp = jnp.sum(flat_logits * flat_labels)
fp = jnp.sum(flat_logits * (1 - flat_labels))
fn = jnp.sum((1 - flat_logits) * flat_labels)
union0 = jnp.clip((1 - preds) + (1 - labels), a_min=0, a_max=1)
intersection0 = (1 - preds) * (1 - labels)
iou0 = jnp.sum(intersection0) / (jnp.sum(union0) + smooth)
union1 = jnp.clip(preds + labels, a_min=0, a_max=1)
intersection1 = preds * labels
iou1 = jnp.sum(intersection1) / (jnp.sum(union1) + smooth)
tversky_loss = 1 - (tp + smooth) / (tp + alpha * fp + beta * fn + smooth)
tversky_focal_loss = tversky_loss**gamma
miou_loss = (1 - iou0) * delta + (1 - iou1) * theta
loss = mu * tversky_focal_loss + (1 - mu) * miou_loss
return loss
loss = loss_fn(state.params)
return loss
@functools.partial(jax.pmap, axis_name="batch")
def compute_metrics(state: Any, image: Any, mask: Any):
logits = state.apply_fn(
{"params": state.params, "batch_stats": state.batch_stats}, image, train=False
)
preds = nn.activation.sigmoid(logits) > 0.5
labels = mask
smooth = LOSS.smooth
tp = jnp.sum((preds == 1) * (labels == 1))
fp = jnp.sum((preds == 1) * (labels == 0))
tn = jnp.sum((preds == 0) * (labels == 0))
fn = jnp.sum((preds == 0) * (labels == 1))
precision = tp / (tp + fp + smooth)
recall = tp / (tp + fn + smooth)
union0 = jnp.clip((1 - preds) + (1 - labels), a_min=0, a_max=1)
intersection0 = (1 - preds) * (1 - labels)
iou0 = jnp.sum(intersection0) / (jnp.sum(union0) + smooth)
union1 = jnp.clip(preds + labels, a_min=0, a_max=1)
intersection1 = preds * labels
iou1 = jnp.sum(intersection1) / (jnp.sum(union1) + smooth)
miou = (iou0 + iou1) / 2
return precision, recall, iou0, iou1, miou
def train_epoch(state: Any, train_loader: Any, epoch: int, lr_fn: Any):
pbar = tqdm(train_loader)
pbar.set_description(f"train epoch: {epoch + 1}")
epoch_loss = 0.0
for step, batch in enumerate(pbar):
image, mask = batch
image = shard(jnp.array(image, dtype=jnp.float32))
mask = shard(jnp.array(mask, dtype=jnp.int32))
state, loss = train_step(state, image, mask)
if USE_ORBAX_WITH_FLAX:
lr = lr_fn(jax_utils.unreplicate(state).step)
else:
lr = lr_fn(state.step)[0]
epoch_loss += jax_utils.unreplicate(loss)
pbar.set_description(
f"train epoch: {epoch + 1} loss: {(epoch_loss / (step + 1)):.3f} lr: {lr:.6f}"
)
return state
def test_epoch(state: Any, test_loader: Any, epoch: int):
pbar = tqdm(test_loader)
pbar.set_description(f"test epoch: {epoch + 1}")
num = len(test_loader)
epoch_loss = 0.0
epoch_precision = 0.0
epoch_recall = 0.0
epoch_iou0 = 0.0
epoch_iou1 = 0.0
epoch_miou = 0.0
for step, batch in enumerate(pbar):
image, mask = batch
image = shard(jnp.array(image, dtype=jnp.float32))
mask = shard(jnp.array(mask, dtype=jnp.int32))
loss = eval_step(state, image, mask)
precision, recall, iou0, iou1, miou = compute_metrics(state, image, mask)
epoch_loss += jax_utils.unreplicate(loss)
epoch_precision += jax_utils.unreplicate(precision)
epoch_recall += jax_utils.unreplicate(recall)
epoch_iou0 += jax_utils.unreplicate(iou0)
epoch_iou1 += jax_utils.unreplicate(iou1)
epoch_miou += jax_utils.unreplicate(miou)
pbar_str = f"test epoch: {epoch + 1} "
pbar_str += f"loss: {(epoch_loss / (step + 1)):.3f} "
pbar_str += f"precision: {(epoch_precision / (step + 1)):.3f} "
pbar_str += f"recall: {(epoch_recall / (step + 1)):.3f} "
pbar_str += f"iou0: {(epoch_iou0 / (step + 1)):.3f} "
pbar_str += f"iou1: {(epoch_iou1 / (step + 1)):.3f} "
pbar_str += f"miou: {(epoch_miou / (step + 1)):.3f}"
pbar.set_description(pbar_str)
epoch_loss /= num
epoch_precision /= num
epoch_recall /= num
epoch_iou0 /= num
epoch_iou1 /= num
epoch_miou /= num
metrics = {
"loss": epoch_loss,
"precision": epoch_precision,
"recall": epoch_recall,
"iou0": epoch_iou0,
"iou1": epoch_iou1,
"miou": epoch_miou,
}
return metrics
def class_to_dct(cls: Any):
dct = {}
for attr in dir(cls):
if attr[:2] != "__" and attr[-2:] != "__":
dct[attr] = getattr(cls, attr)
return dct
def best_fn(metrics: Dict[str, float]):
return metrics["precision"] + metrics["recall"] + metrics["iou1"] + metrics["miou"]
def main(rng: Any, train_df: Any, test_df: Any):
# hyperparameters
epochs = CFG.epochs
test_size = CFG.test_size
batch_size = CFG.batch_size
shape = CFG.shape
channels = CFG.channels
num_workers = CFG.num_workers
# define transformations
transform = albu.Compose(
[
albu.Rotate((-45, 45)),
albu.HorizontalFlip(p=0.5),
albu.VerticalFlip(p=0.5),
albu.RandomBrightnessContrast(0.1, 0.1),
]
)
# create datasets
train_dataset = FireDataset(train_df, channels=channels, transform=transform)
test_dataset = FireDataset(test_df, channels=channels)
# create dataloaders
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=True,
pin_memory=False,
num_workers=num_workers,
)
test_loader = DataLoader(
test_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=False,
pin_memory=False,
num_workers=num_workers,
)
# total steps
ttl_iters = epochs * len(train_loader)
# create lr_function
lr_fn = create_learning_rate_fn(ttl_iters)
# init PRNG and state
rng, init_rng = jax.random.split(rng)
state = create_train_state(
jax.random.split(init_rng, jax.device_count()), lr_fn, shape
)
if os.path.exists(CFG.ckpt_path):
print("ckpt cleaned")
shutil.rmtree(CFG.ckpt_path)
if not USE_ORBAX_WITH_FLAX:
print("metadata cleaned")
for metadata in CFG.metadata:
if os.path.exists(CFG.ckpt_path + "/" + metadata):
shutil.rmtree(CFG.ckpt_path + "/" + metadata)
model_dct, _ = get_model(CFG.model, only_dct=True)
metadata_dct = [class_to_dct(CFG), model_dct, class_to_dct(LOSS)]
if USE_ORBAX_WITH_FLAX:
orbax_checkpointer = PyTreeCheckpointer()
ckpt = {"state": jax_utils.unreplicate(state)}
for metadata_idx, metadata in enumerate(CFG.metadata):
ckpt[metadata] = metadata_dct[metadata_idx]
save_args = orbax_utils.save_args_from_target(ckpt)
save_dct = {"state": None}
for metadata_idx, metadata in enumerate(CFG.metadata):
save_dct[metadata] = metadata_dct[metadata_idx]
else:
metadata_ckptr = Checkpointer(JsonCheckpointHandler())
for metadata_idx, metadata in enumerate(CFG.metadata):
metadata_ckptr.save(
CFG.ckpt_path + "/" + metadata, metadata_dct[metadata_idx], force=True
)
ckptr = Checkpointer(PyTreeCheckpointHandler())
# train cycle
best_metrics = 0.0
for epoch in range(epochs):
state = train_epoch(state, train_loader, epoch, lr_fn)
metrics = test_epoch(state, test_loader, epoch)
save_state = jax_utils.unreplicate(state)
comb_metrics = best_fn(metrics)
if USE_ORBAX_WITH_FLAX:
if comb_metrics > best_metrics:
if os.path.exists(CFG.ckpt_path):
shutil.rmtree(CFG.ckpt_path)
best_metrics = comb_metrics
ckpt["state"] = save_state
save_args = orbax_utils.save_args_from_target(ckpt)
orbax_checkpointer.save(CFG.ckpt_path, ckpt, save_args=save_args)
else:
if comb_metrics > best_metrics:
best_metrics = comb_metrics
ckptr.save(CFG.ckpt_path, save_state, force=True)
return state
def inference(path: str, local: bool = False):
metadata_dct = []
if USE_ORBAX_WITH_FLAX:
orbax_checkpointer = PyTreeCheckpointer()
raw_restored = orbax_checkpointer.restore(path)
restored_state = raw_restored["state"]
for metadata in CFG.metadata:
restored_dct = raw_restored[metadata]
metadata_dct.append(restored_dct)
else:
if len(os.listdir(path)) > 1:
ckptr = Checkpointer(PyTreeCheckpointHandler())
restored_state = ckptr.restore(path)
metadata_ckptr = Checkpointer(JsonCheckpointHandler())
metadata_path = CFG.pretrained if local else CFG.ckpt_path
for metadata in CFG.metadata:
restored_dct = metadata_ckptr.restore(metadata_path + "/" + metadata)
metadata_dct.append(restored_dct)
else:
orbax_checkpointer = PyTreeCheckpointer()
raw_restored = orbax_checkpointer.restore(path)
restored_state = raw_restored["state"]
for metadata in CFG.metadata:
restored_dct = raw_restored[metadata]
metadata_dct.append(restored_dct)
config_dct = metadata_dct[0]
model_dct = metadata_dct[1]
_, model = get_model(config_dct["model"], dct=model_dct)
return model, restored_state, metadata_dct
def predict(model: Any, state: Any, img: Any):
jnp_img = jnp.array(img, dtype=jnp.float32)[jnp.newaxis, :, :, :]
logits = model.apply(
{"params": state["params"], "batch_stats": state["batch_stats"]},
jnp_img,
train=False,
)
preds = nn.activation.sigmoid(logits) > 0.5
return preds
def vizualize(model: Any, state: Any, img: Any, mask: Any):
# img - array from read_img function
preds = predict(model, state, img)
_, axs = plt.subplots(1, 2)
_ = axs[0].imshow(preds[0])
_ = axs[1].imshow(mask)
# split pandas dataframe
train_df, test_df = train_test_split(df, test_size=CFG.test_size, random_state=CFG.seed)
if not CFG.inference:
state = main(rng, train_df, test_df)
model, state, metadata = inference(CFG.ckpt_path, local=CFG.inference)
else:
model, state, metadata = inference(CFG.pretrained, local=CFG.inference)
# vizualization images
n = 50
rows = np.random.choice([i for i in range(len(test_df))], size=n)
for row in rows:
img = read_img(test_df.iloc[row]["image"], channels=CFG.channels)
mask = read_mask(test_df.iloc[row]["mask"])
vizualize(model, state, img, mask)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
pd.options.display.float_format = "{:.2f}".format
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
import pickle
from itertools import combinations
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.arima.model import ARIMA as ARIMA
import statsmodels.api as sm
import statsmodels.tsa.api as smt
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
X_full = pd.read_csv(
"/kaggle/input/godaddy-microbusiness-density-forecasting/train.csv", na_values=True
)
X_test_full = pd.read_csv(
"/kaggle/input/godaddy-microbusiness-density-forecasting/test.csv", na_values=True
)
sample_df = pd.read_csv(
"/kaggle/input/godaddy-microbusiness-density-forecasting/sample_submission.csv"
)
census_df = pd.read_csv(
"/kaggle/input/godaddy-microbusiness-density-forecasting/census_starter.csv"
)
census_df.info()
X_full.info()
X_test_full.info()
X_test_full = X_test_full.convert_dtypes()
X_full = X_full.convert_dtypes()
print(X_test_full.dtypes, "\n", X_full.dtypes)
X_full.isna().sum()
X_full[["microbusiness_density", "active"]] = X_full[
["microbusiness_density", "active"]
].fillna(X_full[["microbusiness_density", "active"]].median())
X_test_full.isna().sum()
X_full["first_day_of_month"] = pd.to_datetime(X_full["first_day_of_month"])
X_test_full["first_day_of_month"] = pd.to_datetime(X_test_full["first_day_of_month"])
df = X_full.copy()
def make_feature(df):
feature = pd.DataFrame()
feature["row_id"] = df["row_id"]
feature["microbusiness_density"] = df["microbusiness_density"]
feature["contry_code"] = df["cfips"] // 100
feature["state_code"] = df["cfips"] % 100
feature["first_day_of_month"] = df["first_day_of_month"]
feature["year"] = df["first_day_of_month"].dt.year
feature["month"] = df["first_day_of_month"].dt.month
feature["week"] = df["first_day_of_month"].dt.dayofweek
return feature
train_feature = make_feature(X_full)
train_feature
data = train_feature[["first_day_of_month", "microbusiness_density"]].copy()
data["Date"] = pd.to_datetime(data["first_day_of_month"])
data = data.drop(columns="first_day_of_month")
data = data.set_index("Date")
data
data = data.groupby(data.index).mean()
data.head()
portion_df = data.microbusiness_density["2019-08-01":"2022-10-01"]
portion_df
# Save test predictions to file
output = pd.DataFrame({"Date": portion_df.index, "microbusiness_density": portion_df})
output.to_csv("submission.csv", index=False)
plt.figure(figsize=(15, 5))
data["microbusiness_density"].plot()
dec = sm.tsa.seasonal_decompose(data["microbusiness_density"], period=12).plot()
plt.show()
def test_stationarity(timeseries):
# Determing rolling statistics
MA = timeseries.rolling(window=12).mean()
MSTD = timeseries.rolling(window=12).std()
# Plot rolling statistics:
plt.figure(figsize=(15, 5))
orig = plt.plot(timeseries, color="blue", label="Original")
mean = plt.plot(MA, color="red", label="Rolling Mean")
std = plt.plot(MSTD, color="black", label="Rolling Std")
plt.legend(loc="best")
plt.title("Rolling Mean & Standard Deviation")
plt.show(block=False)
# Perform Dickey-Fuller test:
print("Results of Dickey-Fuller Test:")
dftest = adfuller(timeseries, autolag="AIC")
dfoutput = pd.Series(
dftest[0:4],
index=[
"Test Statistic",
"p-value",
"#Lags Used",
"Number of Observations Used",
],
)
for key, value in dftest[4].items():
dfoutput["Critical Value (%s)" % key] = value
print(dfoutput)
test_stationarity(data["microbusiness_density"])
data_diff = data.diff()
data_diff = data_diff.dropna()
dec = sm.tsa.seasonal_decompose(data_diff, period=12).plot()
plt.show()
test_stationarity(data_diff)
def tsplot(y, lags=None, figsize=(15, 7), style="bmh"):
if not isinstance(y, pd.Series):
y = pd.Series(y)
with plt.style.context(style):
fig = plt.figure(figsize=figsize)
layout = (2, 2)
ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2)
acf_ax = plt.subplot2grid(layout, (1, 0))
pacf_ax = plt.subplot2grid(layout, (1, 1))
y.plot(ax=ts_ax)
p_value = sm.tsa.stattools.adfuller(y)[1]
ts_ax.set_title(
"Time Series Analysis Plots\n Dickey-Fuller: p={0:.5f}".format(p_value)
)
smt.graphics.plot_acf(y, lags=lags, ax=acf_ax)
smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax)
plt.tight_layout()
tsplot(data_diff["microbusiness_density"])
data["microbusiness_density"]
data = data.convert_dtypes()
data.dtypes
np.asarray(data)
import pandas as pd
import numpy as np
from statsmodels.tsa.arima_model import ARIMA
# Convert Pandas data to NumPy data
numpy_data = data["microbusiness_density"].to_numpy(dtype=float)
# Fit ARIMA model to NumPy data
model = sm.tsa.arima.ARIMA(numpy_data, order=(0, 1, 0))
model_fit = model.fit()
# Print model summary
print(model_fit.summary())
# AIC -149.986
# BIC -148.348
p = 0
d = 1
q = 0
# ARIMA auto autoregressive integrated moving average
model = sm.tsa.arima.ARIMA(numpy_data, order=(p, d, q))
results_ARIMA = model.fit()
plt.figure(figsize=(30, 6))
plt.plot(numpy_data)
plt.plot(results_ARIMA.fittedvalues, color="red")
# The residual sum of squares (RSS) is the absolute amount of explained variation,
# whereas
# R-squared is the absolute amount of variation as a proportion of total variation
plt.title("RSS: %.4f" % sum((results_ARIMA.fittedvalues) ** 2))
plt.show()
# **ARIMA model is a combination of 3 models :
# **
# AR (p) : Auto Regressive
# I (d) : Integrated
# MA (q) : Moving Average
# (p,d,q) is known as the order of the ARIMA model. Values of these parameters are based on the above mentioned models.
# **
# **p : Number of auto regressive terms.
# d : Number of differencing orders required to make the time series stationary.
# q : Number of lagged forecast errors in the prediction equation.
# Selection criteria for the order of ARIMA model :
# **
# **p : Lag value where the Partial Autocorrelation (PACF) graph cuts off or drops to 0 for the 1st instance.
# d : Number of times differencing is carried out to make the time series stationary.
# q : Lag value where the Autocorrelation (ACF) graph crosses the upper confidence interval for the 1st instance.****
# split_date = '2021-02-01'
# ts_test = data.loc[data.index < split_date].copy()
# ts_test
# ## test the ARIMA model on test dataset
# ## test the ARIMA model on test dataset
# # define function to get perdiction for forecasting
# def StartARIMAForecasting(Actual, p, d, q):
# model = sm.tsa.arima.ARIMA(Actual, order=(p, d, q))
# model_fit = model.fit(disp=0)
# prediction = model_fit.forecast()[0]
# return prediction
# ts_test.iloc[0]
# ts_test = ts_test.reset_index(drop=True)
# for I in range(len(ts_test)):
# try:
# ActualValue = ts_test[I]
# except KeyError:
# # handle the KeyError exception
# print(f"KeyError: index {I} does not exist")
# continue
# #forecast value
# ForecastedValue = model_fit.forecast()[0]
# #calculate error
# Error = ActualValue - ForecastedValue
# #update the model with the actual value
# model_fit.update(ActualValue)
# output.head()
# # Save test predictions to file
# output = pd.DataFrame({'row_id': ts_test,
# 'microbusiness_density': ts_test})
# output.to_csv('submission.csv', index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
vehicleData = pd.read_csv("/kaggle/input/vehicle-dataset-from-cardekho/car data.csv")
vehicleData.head(25)
vehicleData.info()
vehicleData.describe()
# Select the categorical variables to be encoded
encodingVars = ["Car_Name", "Year", "Fuel_Type", "Seller_Type", "Transmission"]
# Convert the categorical variables to one-hot encoding
vehicleData_encoded = pd.get_dummies(vehicleData, columns=encodingVars)
vehicleData_encoded
# from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
vehicleData_encoded.drop("Selling_Price", axis=1),
vehicleData_encoded["Selling_Price"],
test_size=0.05,
random_state=42,
)
# Initialize the random forest regressor with 100 estimators
forestRegressor = RandomForestRegressor(n_estimators=10, random_state=42)
# Fit the model on the training data
forestRegressor.fit(X_train, y_train)
# Predict on the test data
predictions = forestRegressor.predict(X_test)
# Calculate the mean squared error on the test data
mse = mean_squared_error(y_test, predictions)
# Print the mean squared error
print("Mean Squared Error:", mse)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import torch
from tqdm import tqdm
from torch import nn, optim
from string import punctuation
from collections import Counter
from sklearn.model_selection import train_test_split
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
from torch.utils.data import TensorDataset, DataLoader
from sklearn.metrics import confusion_matrix, accuracy_score
class Config:
train_data_path = "../input/sentiment-analysis-on-movie-reviews/train.tsv.zip"
test_data_path = "../input/sentiment-analysis-on-movie-reviews/test.tsv.zip"
batch_size = 50
learning_rate = 0.01
num_epochs = 10
clip_value = 1
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
output_size = 1
embedding_dim = 400
hidden_dim = 256
n_layers = 2
n_classes = 5
dropout = 0.3
eval_every = 1
pad_inputs = 0
model_path = "/kaggle/working/SentimentRNN.pt"
test_batch_size = 4
train_data = pd.read_csv(Config.train_data_path, sep="\t")
test_data = pd.read_csv(Config.test_data_path, sep="\t")
test_data.head(30)
def pre_process(df):
reviews = []
for p in tqdm(df["Phrase"]):
p = p.lower()
p = "".join([c for c in p if c not in punctuation])
reviews_split = p.split()
p = " ".join(reviews_split)
reviews.append(p)
return reviews
train_data_pp = pre_process(train_data)
test_data_pp = pre_process(test_data)
print(train_data["Phrase"][0])
print(train_data_pp[:3])
def encode_words(words):
counts = Counter(words)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}
reviews_ints = []
for review in reviews_split:
reviews_ints.append([vocab_to_int[word] for word in review.split()])
return reviews_ints
def encode_words(data_pp):
words = []
for p in data_pp:
words.extend(p.split())
counts = Counter(words)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}
return vocab_to_int
encoded_voc = encode_words(train_data_pp + test_data_pp)
def encode_data(data):
reviews_ints = []
for ph in data:
reviews_ints.append([encoded_voc[word] for word in ph.split()])
return reviews_ints
train_reviews_ints = encode_data(train_data_pp)
test_reviews_ints = encode_data(test_data_pp)
print(train_reviews_ints[0])
print(test_reviews_ints[0])
def to_categorical(y, num_classes):
"""1-hot encodes a tensor"""
return np.eye(num_classes, dtype="uint8")[y]
y_target = to_categorical(train_data["Sentiment"], 5)
print(y_target[0])
train_review_lens = Counter([len(x) for x in train_reviews_ints])
print("Zero-length train reviews: {}".format(train_review_lens[0]))
print("Maximum train review length: {}".format(max(train_review_lens)))
test_review_lens = Counter([len(x) for x in test_reviews_ints])
print("Zero-length test reviews: {}".format(test_review_lens[0]))
print("Maximum train test length: {}".format(max(test_review_lens)))
test_zero_idx = [
test_data.iloc[ii]["PhraseId"]
for ii, review in enumerate(test_reviews_ints)
if len(review) == 0
]
print(test_zero_idx)
# TODO update submit csv by this index with sentiment 2
# remove reviews with 0 length
non_zero_idx = [ii for ii, review in enumerate(train_reviews_ints) if len(review) != 0]
train_reviews_ints = [train_reviews_ints[ii] for ii in non_zero_idx]
y_target = np.array([y_target[ii] for ii in non_zero_idx])
print("Number of reviews after removing outliers: ", len(train_reviews_ints))
def pad_features(reviews, seq_length):
features = np.zeros((len(reviews), seq_length), dtype=int)
for i, row in enumerate(reviews):
try:
features[i, -len(row) :] = np.array(row)[:seq_length]
except ValueError:
continue
return features
train_features = pad_features(train_reviews_ints, max(test_review_lens))
X_test = pad_features(test_reviews_ints, max(test_review_lens))
X_train, X_val, y_train, y_val = train_test_split(
train_features, y_target, test_size=0.2
)
print(X_train[0])
print(y_train[0])
X_train = X_train[:124800]
X_val = X_val[:31200]
y_train = y_train[:124800]
y_val = y_val[:31200]
print("X_training shape", X_train.shape)
print("X_validation shape", X_val.shape)
print("X_testing shape", X_test.shape)
ids_test = np.array([t["PhraseId"] for ii, t in test_data.iterrows()])
train_data = TensorDataset(torch.from_numpy(X_train), torch.from_numpy(y_train))
valid_data = TensorDataset(torch.from_numpy(X_val), torch.from_numpy(y_val))
test_data = TensorDataset(torch.from_numpy(X_test), torch.from_numpy(ids_test))
train_loader = DataLoader(train_data, shuffle=True, batch_size=Config.batch_size)
valid_loader = DataLoader(valid_data, shuffle=True, batch_size=Config.batch_size)
test_loader = DataLoader(test_data, batch_size=Config.test_batch_size)
class SentimentRNN(nn.Module):
"""
The RNN model that will be used to perform Sentiment analysis.
"""
def __init__(
self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob
):
"""
Initialize the model by setting up the layers.
"""
super(SentimentRNN, self).__init__()
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
# embedding and LSTM layers
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(
embedding_dim, hidden_dim, n_layers, dropout=drop_prob, batch_first=True
)
# dropout layer
self.dropout = nn.Dropout(p=drop_prob)
# linear and sigmoid layers
self.fc = nn.Linear(hidden_dim, output_size)
self.sig = nn.Sigmoid()
def forward(self, x, hidden):
"""
Perform a forward pass of our model on some input and hidden state.
"""
batch_size = x.size(0)
# embeddings and lstm_out
embeds = self.embedding(x)
lstm_out, hidden = self.lstm(embeds, hidden)
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
out = self.dropout(lstm_out)
out = self.fc(out)
out = self.sig(out)
out = out.view(batch_size, -1)
out = out[:, -5:]
return out, hidden
# return last sigmoid output and hidden state
return out, hidden
def init_hidden(self, batch_size, device):
"""Initializes hidden state"""
# Create two new tensors with sizes n_layers x batch_size x hidden_dim,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
hidden = (
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().to(device),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().to(device),
)
return hidden
def train_loop(
model,
optimizer,
criterion,
train_loader,
clip_value,
device,
batch_size=Config.batch_size,
):
running_loss = 0
model.train()
h = model.init_hidden(batch_size, device)
for seq, targets in train_loader:
seq = seq.to(device)
targets = targets.to(device)
h = tuple([each.data for each in h])
out, h = model.forward(seq, h)
loss = criterion(out, targets.float())
running_loss += loss.item() * seq.shape[0]
optimizer.zero_grad()
loss.backward()
if clip_value:
nn.utils.clip_grad_norm_(model.parameters(), clip_value)
optimizer.step()
running_loss /= len(train_loader.sampler)
return running_loss
def get_prediction(t):
max_indices = torch.argmax(t, dim=1)
new = torch.zeros_like(t)
new[torch.arange(t.shape[0]), max_indices] = 1
return new
def eval_loop(
model,
criterion,
eval_loader,
device,
batch_size=Config.batch_size,
ignore_index=None,
):
val_h = model.init_hidden(batch_size, device)
val_loss = 0
model.eval()
accuracy = []
for seq, targets in eval_loader:
val_h = tuple([each.data for each in val_h])
seq = seq.to(device)
targets = targets.to(device)
out, val_h = model(seq, val_h)
loss = criterion(out, targets.float())
val_loss += loss.item() * seq.shape[0]
predicted = get_prediction(out).flatten().cpu().numpy()
labels = targets.view(-1).cpu().numpy()
accuracy.append(accuracy_score(labels, predicted))
acc = sum(accuracy) / len(accuracy)
val_loss /= len(eval_loader.sampler)
return {"accuracy": acc, "loss": val_loss}
def train(
model,
optimizer,
criterion,
train_loader,
valid_loader,
eval_every,
num_epochs,
clip_value,
ignore_index=None,
device=Config.device,
valid_loss_min=np.inf,
):
for e in range(num_epochs):
# train for epoch
train_loss = train_loop(
model, optimizer, criterion, train_loader, clip_value, device
)
if (e + 1) % eval_every == 0:
# evaluate on validation set
metrics = eval_loop(model, criterion, valid_loader, device)
# show progress
print_string = f"Epoch: {e+1} "
print_string += f"TrainLoss: {train_loss:.5f} "
print_string += f'ValidLoss: {metrics["loss"]:.5f} '
print_string += f'ACC: {metrics["accuracy"]:.5f} '
print(print_string)
# save the model
if metrics["loss"] <= valid_loss_min:
torch.save(model.state_dict(), Config.model_path)
valid_loss_min = metrics["loss"]
vocab_size = len(encoded_voc) + 1 # +1 for the 0 padding + our word tokens
model = SentimentRNN(
vocab_size,
Config.output_size,
Config.embedding_dim,
Config.hidden_dim,
Config.n_layers,
Config.dropout,
)
model = model.to(Config.device)
optimizer = optim.SGD(model.parameters(), lr=Config.learning_rate)
criterion = nn.BCELoss()
train(
model,
optimizer,
criterion,
train_loader,
valid_loader,
Config.eval_every,
Config.num_epochs,
Config.clip_value,
)
model = SentimentRNN(
vocab_size,
Config.output_size,
Config.embedding_dim,
Config.hidden_dim,
Config.n_layers,
Config.dropout,
)
model.load_state_dict(torch.load(Config.model_path))
model = model.to(Config.device)
@torch.no_grad()
def prediction(
model, test_loader, device=Config.device, batch_size=Config.test_batch_size
):
df = pd.DataFrame(
{"PhraseId": pd.Series(dtype="int"), "Sentiment": pd.Series(dtype="int")}
)
test_h = model.init_hidden(batch_size, device)
model.eval()
for seq, id_ in test_loader:
test_h = tuple([each.data for each in test_h])
seq = seq.to(device)
out, test_h = model(seq, test_h)
out = get_prediction(out)
for ii, row in zip(id_, out):
if ii in test_zero_idx:
predicted = 2
else:
predicted = int(torch.argmax(row) + 1)
subm = {"PhraseId": int(ii), "Sentiment": predicted}
df = df.append(subm, ignore_index=True)
return df
submission = prediction(model, test_loader)
submission.to_csv("submission.csv", index=False)
d = pd.read_csv("submission.csv")
print(d)
|
import numpy as np # linear algebra
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
os.path.join(dirname, filename)
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import sys
package_paths = [
"/kaggle/input/fractal/",
]
for pth in package_paths:
sys.path.append(pth)
print(sys.path)
import json
import matplotlib.pyplot as plt
import mxnet as mx
from mxnet import gluon, nd
from mxnet.gluon.model_zoo import vision
from mxnet.gluon.data.vision import transforms
import numpy as np
from decode.FracTAL_ResUNet.models.semanticsegmentation.FracTAL_ResUNet import *
ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()
print(ctx.device_type)
depth = 6
norm_type = "GroupNorm"
norm_groups = 32
ftdepth = 5
NClasses = 2
nfilters_init = 32
psp_depth = 4
nheads_start = 4
net = FracTAL_ResUNet_cmtsk(
depth=depth,
nfilters_init=nfilters_init,
NClasses=NClasses,
norm_groups=norm_groups,
norm_type=norm_type,
psp_depth=psp_depth,
)
net.initialize(mx.initializer.Xavier())
import xarray
from skimage import exposure
import rasterio
from rasterio.plot import show
def getImage(filePath, i):
xds = xarray.open_dataset(filePath)
red_time = xds["B4"][i]
green_time = xds["B3"][i]
blue_time = xds["B2"][i]
merged_array = np.stack([red_time, green_time, blue_time], axis=-1)
merged_array_norm = exposure.rescale_intensity(merged_array, out_range=(0, 1))
return merged_array_norm
def getMask(filePath):
img = rasterio.open(filePath)
return img.read(2)
from mxnet.gluon.data import Dataset
class CustomImageDataset(Dataset):
def __init__(self, data_dir):
self.data_dir = data_dir
self.mask_dir = data_dir.replace("images", "masks")
self.samples = self._make_dataset()
def _make_dataset(self):
samples = []
for filename in os.listdir(self.data_dir):
if filename.endswith(".nc"):
path = os.path.join(self.data_dir, filename)
mask_filename = filename.replace("S2_10m_256.nc", "S2label_10m_256.tif")
mask_path = os.path.join(self.mask_dir, mask_filename)
for i in range(0, 5):
item = (path, mask_path, i)
samples.append(item)
return samples
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
path, mask_path, i = self.samples[idx]
img = getImage(path, i)
mask = getMask(mask_path)
return img, mask
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Importo
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import RocCurveDisplay, roc_curve, auc
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # APPUNTI
# * La metrica di valutazione per questo hackathon è il punteggio ROC_AUC.
# * LINK: https://www.kaggle.com/datasets/anmolkumar/health-insurance-cross-sell-prediction
# Il cliente è una compagnia di assicurazioni che ha fornito un'assicurazione sanitaria ai suoi clienti, adesso hanno bisogno del tuo aiuto per costruire un modello predittivo in grado di prevedere se gli assicurati dell'anno passato potrebbero essere interessati ad acquistare anche un'assicurazione per il proprio veicolo.
# Il dataset è composto dalle seguenti proprietà:
# * id: id univoco dell'acquirente.
# * Gender: sesso dell'acquirente.
# * Age: età dell'acquirente.
# * Driving_License: 1 se l'utente ha la patente di guida, 0 altrimenti.
# * Region_Code: codice univoco della regione dell'acquirente.
# * Previously_Insured: 1 se l'utente ha già un veicolo assicurato, 0 altrimenti.
# * Vehicle_Age: età del veicolo
# * Vehicle_Damage: 1 se l'utente ha danneggiato il veicolo in passato, 0 altrimenti.
# * Annual_Premium: la cifra che l'utente deve pagare come premio durante l'anno.
# * Policy_Sales_Channel: codice anonimizzato del canale utilizzato per la proposta (es. per email, per telefono, di persona, ecc...)
# * Vintage: numero di giorni dalla quale l'utente è cliente dell'azienda.
# * Response: 1 se l'acquirente ha risposto positivamente alla proposta di vendita, 0 altrimenti.
# L'obiettivo del modello è prevedere il valore di Response.
# Tip Fai attenzione alla distribuzione delle classi, dai uno sguardo a questo approfondimento. In caso di classi sbilanciate puoi provare a:
# https://machinelearningmastery.com/tactics-to-combat-imbalanced-classes-in-your-machine-learning-dataset/
# Penalizzare la classe più frequente (ricorda l'argomento class_weight)
# Utilizzare l'oversampling o l'undersampling.
# https://machinelearningmastery.com/random-oversampling-and-undersampling-for-imbalanced-classification/
# # IMPORTING DATA
# DATA CLEANING LINK: https://www.kaggle.com/datasets/anmolkumar/health-insurance-cross-sell-prediction
BASE_URL = "/kaggle/input/health-insurance-cross-sell-prediction"
FILE_PATH = BASE_URL + "/train.csv"
df = pd.read_csv(FILE_PATH, index_col="id")
df.head(10)
# # LABEL ENCODING
df["Region_Code"] = df["Region_Code"].astype("str")
df.info()
# LabelEncoding
from sklearn.preprocessing import LabelEncoder
LabEnc = LabelEncoder()
df["Gender"] = LabEnc.fit_transform(df["Gender"])
df["Vehicle_Age"] = LabEnc.fit_transform(df["Vehicle_Age"])
df["Vehicle_Damage"] = LabEnc.fit_transform(df["Vehicle_Damage"])
df["Region_Code"] = LabEnc.fit_transform(df["Region_Code"])
df.head()
# Creo una funzione per confrontare le distribuzioni
def dashboard(dataframe):
num_colonne = len(dataframe.columns)
fig, axs = plt.subplots(nrows=1, ncols=num_colonne, figsize=(15, 5))
for i, nome_colonna in enumerate(dataframe.columns):
axs[i].hist(dataframe[nome_colonna], color="blue"),
axs[i].set_title(nome_colonna)
plt.show()
dashboard(df)
# # BALANCING [RESPONSE] features
df["Response"].hist()
plt.show()
print(df["Response"].value_counts())
# raggruppa il dataframe in base alla colonna 'Region_Code'
groups = df.groupby("Response")
# calcola il numero minimo di righe in un sottogruppo
min_rows = groups.size().min()
# campiona ogni sottogruppo con lo stesso numero di righe
df_balanced = pd.DataFrame()
for name, g in groups:
df_balanced = pd.concat([df_balanced, g.sample(n=min_rows)])
# reset l'indice del dataframe risultante
df_balanced = df_balanced.reset_index(drop=True)
df_balanced.head()
# Verifico che siano bilanciate
df_balanced["Response"].hist()
plt.show()
dashboard(df_balanced)
# # Data analyses
df_balanced.describe()
# # MODEL LOGISTIC REGRESSION
X = df_balanced.drop("Response", axis=1)
y = df_balanced["Response"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# SCALING DATI CON SCIKITLEARN
ss = StandardScaler()
X_train = ss.fit_transform(X_train)
X_test = ss.transform(X_test)
# LOGISTIC REGRESSION
lr = LogisticRegression()
lr.fit(X_train, y_train)
print(classification_report(y_train, y_pred_train))
print(classification_report(y_test, y_pred_test))
# Ottieni le predizioni del modello
y_pred = lr.predict_proba(X_test)[:, 1]
# Calcola la curva ROC
fpr, tpr, _ = roc_curve(y_test, y_pred)
roc_auc = auc(fpr, tpr)
# Crea l'oggetto RocCurveDisplay e visualizza la curva ROC
roc_display = RocCurveDisplay.from_predictions(y_test, y_pred)
roc_display.plot()
|
import numpy as np
from dataclasses import dataclass
from time import time
# Conveniency functions.
def arr_to_str(a):
return ";".join([str(x) for x in a.reshape(-1)])
# Evaluation metric.
@dataclass
class Camera:
rotmat: np.array
tvec: np.array
def quaternion_from_matrix(matrix):
M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4]
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# Symmetric matrix K.
K = np.array(
[
[m00 - m11 - m22, 0.0, 0.0, 0.0],
[m01 + m10, m11 - m00 - m22, 0.0, 0.0],
[m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],
[m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22],
]
)
K /= 3.0
# Quaternion is eigenvector of K that corresponds to largest eigenvalue.
w, V = np.linalg.eigh(K)
q = V[[3, 0, 1, 2], np.argmax(w)]
if q[0] < 0.0:
np.negative(q, q)
return q
def evaluate_R_t(R_gt, t_gt, R, t, eps=1e-15):
t = t.flatten()
t_gt = t_gt.flatten()
q_gt = quaternion_from_matrix(R_gt)
q = quaternion_from_matrix(R)
q = q / (np.linalg.norm(q) + eps)
q_gt = q_gt / (np.linalg.norm(q_gt) + eps)
loss_q = np.maximum(eps, (1.0 - np.sum(q * q_gt) ** 2))
err_q = np.arccos(1 - 2 * loss_q)
GT_SCALE = np.linalg.norm(t_gt)
t = GT_SCALE * (t / (np.linalg.norm(t) + eps))
err_t = min(np.linalg.norm(t_gt - t), np.linalg.norm(t_gt + t))
return np.degrees(err_q), err_t
def compute_dR_dT(R1, T1, R2, T2):
"""Given absolute (R, T) pairs for two cameras, compute the relative pose difference, from the first."""
dR = np.dot(R2, R1.T)
dT = T2 - np.dot(dR, T1)
return dR, dT
def compute_mAA(err_q, err_t, ths_q, ths_t):
"""Compute the mean average accuracy over a set of thresholds. Additionally returns the metric only over rotation and translation."""
acc, acc_q, acc_t = [], [], []
for th_q, th_t in zip(ths_q, ths_t):
cur_acc_q = err_q <= th_q
cur_acc_t = err_t <= th_t
cur_acc = cur_acc_q & cur_acc_t
acc.append(cur_acc.astype(np.float32).mean())
acc_q.append(cur_acc_q.astype(np.float32).mean())
acc_t.append(cur_acc_t.astype(np.float32).mean())
return np.array(acc), np.array(acc_q), np.array(acc_t)
def dict_from_csv(csv_path, has_header):
csv_dict = {}
with open(csv_path, "r") as f:
for i, l in enumerate(f):
if has_header and i == 0:
continue
if l:
image, dataset, scene, R_str, T_str = l.strip().split(",")
R = np.fromstring(R_str.strip(), sep=";").reshape(3, 3)
T = np.fromstring(T_str.strip(), sep=";")
if dataset not in csv_dict:
csv_dict[dataset] = {}
if scene not in csv_dict[dataset]:
csv_dict[dataset][scene] = {}
csv_dict[dataset][scene][image] = Camera(rotmat=R, tvec=T)
return csv_dict
def eval_submission(
submission_csv_path,
ground_truth_csv_path,
rotation_thresholds_degrees_dict,
translation_thresholds_meters_dict,
verbose=False,
):
"""Compute final metric given submission and ground truth files. Thresholds are specified per dataset."""
submission_dict = dict_from_csv(submission_csv_path, has_header=True)
gt_dict = dict_from_csv(ground_truth_csv_path, has_header=True)
# Check that all necessary keys exist in the submission file
for dataset in gt_dict:
assert dataset in submission_dict, f"Unknown dataset: {dataset}"
for scene in gt_dict[dataset]:
assert (
scene in submission_dict[dataset]
), f"Unknown scene: {dataset}->{scene}"
for image in gt_dict[dataset][scene]:
assert (
image in submission_dict[dataset][scene]
), f"Unknown image: {dataset}->{scene}->{image}"
# Iterate over all the scenes
if verbose:
t = time()
print("*** METRICS ***")
metrics_per_dataset = []
for dataset in gt_dict:
metrics_per_scene = []
for scene in gt_dict[dataset]:
err_q_all = []
err_t_all = []
images = [camera for camera in gt_dict[dataset][scene]]
# Process all pairs in a scene
for i in range(len(images)):
for j in range(i + 1, len(images)):
gt_i = gt_dict[dataset][scene][images[i]]
gt_j = gt_dict[dataset][scene][images[j]]
dR_gt, dT_gt = compute_dR_dT(
gt_i.rotmat, gt_i.tvec, gt_j.rotmat, gt_j.tvec
)
pred_i = submission_dict[dataset][scene][images[i]]
pred_j = submission_dict[dataset][scene][images[j]]
dR_pred, dT_pred = compute_dR_dT(
pred_i.rotmat, pred_i.tvec, pred_j.rotmat, pred_j.tvec
)
err_q, err_t = evaluate_R_t(dR_gt, dT_gt, dR_pred, dT_pred)
err_q_all.append(err_q)
err_t_all.append(err_t)
mAA, mAA_q, mAA_t = compute_mAA(
err_q=err_q_all,
err_t=err_t_all,
ths_q=rotation_thresholds_degrees_dict[(dataset, scene)],
ths_t=translation_thresholds_meters_dict[(dataset, scene)],
)
if verbose:
print(
f"{dataset} / {scene} ({len(images)} images, {len(err_q_all)} pairs) -> mAA={np.mean(mAA):.06f}, mAA_q={np.mean(mAA_q):.06f}, mAA_t={np.mean(mAA_t):.06f}"
)
metrics_per_scene.append(np.mean(mAA))
metrics_per_dataset.append(np.mean(metrics_per_scene))
if verbose:
print(f"{dataset} -> mAA={np.mean(metrics_per_scene):.06f}")
print()
if verbose:
print(
f"Final metric -> mAA={np.mean(metrics_per_dataset):.06f} (t: {time() - t} sec.)"
)
print()
return np.mean(metrics_per_dataset)
# Set rotation thresholds per scene.
# TODO update the thresholds.
rotation_thresholds_degrees_dict = {
**{
("haiper", scene): np.linspace(1, 10, 10)
for scene in ["bike", "chairs", "fountain"]
},
**{("heritage", scene): np.linspace(1, 10, 10) for scene in ["cyprus", "dioscuri"]},
**{("heritage", "wall"): np.linspace(0.2, 10, 10)},
**{("urban", "kyiv-puppet-theater"): np.linspace(1, 10, 10)},
}
translation_thresholds_meters_dict = {
**{
("haiper", scene): np.geomspace(0.05, 0.5, 10)
for scene in ["bike", "chairs", "fountain"]
},
**{
("heritage", scene): np.geomspace(0.1, 2, 10)
for scene in ["cyprus", "dioscuri"]
},
**{("heritage", "wall"): np.geomspace(0.05, 1, 10)},
**{("urban", "kyiv-puppet-theater"): np.geomspace(0.5, 5, 10)},
}
# Generate and evaluate a random submission.
src = "/kaggle/input/image-matching-challenge-2023"
# TODO check the final order of the csv file.
with open(f"{src}/train/train_labels.csv", "r") as fr, open(
"submission.csv", "w"
) as fw:
for i, l in enumerate(fr):
if i == 0:
fw.write("image_path,dataset,scene,rotation_matrix,translation_vector\n")
else:
dataset, scene, image, _, _ = l.strip().split(",")
R = np.random.rand(9)
T = np.random.rand(3)
fw.write(f"{image},{dataset},{scene},{arr_to_str(R)},{arr_to_str(T)}\n")
# Note that the fields were reordered. Here we regenerate the ground truth file.
with open(f"{src}/train/train_labels.csv", "r") as fr, open(
"ground_truth.csv", "w"
) as fw:
for i, l in enumerate(fr):
if i == 0:
fw.write("image_path,dataset,scene,rotation_matrix,translation_vector\n")
else:
dataset, scene, image, R, T = l.strip().split(",")
fw.write(f"{image},{dataset},{scene},{R},{T}\n")
eval_submission(
submission_csv_path="submission.csv",
ground_truth_csv_path="ground_truth.csv",
rotation_thresholds_degrees_dict=rotation_thresholds_degrees_dict,
translation_thresholds_meters_dict=translation_thresholds_meters_dict,
verbose=True,
)
# Now evaluate a perfect submission.
eval_submission(
submission_csv_path="ground_truth.csv",
ground_truth_csv_path="ground_truth.csv",
rotation_thresholds_degrees_dict=rotation_thresholds_degrees_dict,
translation_thresholds_meters_dict=translation_thresholds_meters_dict,
verbose=True,
)
|
# # **Approach**
# * Load the dataset and perform exploratory data analysis (EDA) to understand the features and their distributions, check for missing values, and correlations between features.
# * Prepare the data for machine learning by encoding categorical features and splitting the data into training and validation sets.
# * Train different machine learning models like Decision Trees, Random Forest, XGBoost, and SVM to predict the star type from the given features.
# * Evaluate the models using appropriate evaluation metrics like accuracy, precision, recall, and F1-score.
# * Select the best performing model and fine-tune its hyperparameters using techniques like grid search and cross-validation.
# * Use the tuned model to predict the star type from the test dataset and create a submission file.
import os
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import (
train_test_split,
KFold,
cross_val_score,
GridSearchCV,
)
from sklearn.metrics import (
accuracy_score,
roc_auc_score,
roc_curve,
precision_recall_fscore_support as score,
precision_score,
recall_score,
f1_score,
)
from sklearn import metrics
from sklearn.feature_selection import RFECV
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
import pickle
import keras
from keras.utils.np_utils import (
to_categorical,
) # used for converting labels to one-hot-encoding
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras import backend as K
from tensorflow.keras.layers import BatchNormalization
from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import (
Input,
Dense,
Flatten,
GlobalAveragePooling2D,
concatenate,
)
from tensorflow.keras.layers import (
Conv2D,
MaxPool2D,
Activation,
Dropout,
BatchNormalization,
LeakyReLU,
)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger
from tensorflow.keras.optimizers import SGD, Adamax
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras import regularizers
import tensorflow as tf
# CatBoost model
from catboost import CatBoostClassifier, Pool
# To ignore warinings
import warnings
warnings.filterwarnings("ignore")
# # **Data**
# Load the training and testing datasets into pandas dataframe
df_train = pd.read_csv("/kaggle/input/nebulanet/train.csv")
# Rename columns for ease of use
df_train.columns = [
"Temperature",
"Luminosity",
"Radius",
"Absolute Magnitude",
"Star Type",
"Star Color",
"Spectral Class",
]
df_train.head()
# # **Exploratory Data Analysis**
df_train.info()
df_train.shape
print(df_train["Star Type"].unique())
print(df_train["Star Type"].value_counts())
print(df_train["Star Color"].unique())
print(df_train["Star Color"].value_counts())
print(df_train["Spectral Class"].unique())
print(df_train["Spectral Class"].value_counts())
le = LabelEncoder()
df_train["Star Color"] = le.fit_transform(df_train["Star Color"])
df_train["Spectral Class"] = le.fit_transform(df_train["Spectral Class"])
df_train["Star Type"] = le.fit_transform(df_train["Star Type"])
df_train.head()
print(df_train["Star Type"].unique())
print(df_train["Star Type"].value_counts())
print(df_train["Spectral Class"].unique())
print(df_train["Spectral Class"].value_counts())
scaler = MinMaxScaler()
df_train[
[
"Temperature",
"Luminosity",
"Radius",
"Absolute Magnitude",
"Star Color",
"Spectral Class",
]
] = scaler.fit_transform(
df_train[
[
"Temperature",
"Luminosity",
"Radius",
"Absolute Magnitude",
"Star Color",
"Spectral Class",
]
]
)
# Remove rows with missing values
df_train.dropna(inplace=True)
df_train.head()
print(df_train["Spectral Class"].unique())
print(df_train["Spectral Class"].value_counts())
print(df_train["Star Color"].unique())
print(df_train["Star Color"].value_counts())
df_train.describe()
columns = list(df_train.columns)
columns
# # **Data Visualization**
# Visualize the distribution of RH_type classes
sns.countplot(data=df_train, x="Star Type")
plt.title("Distribution of Star Types")
plt.show()
# Use heatmap to see corelation between variables
sns.heatmap(df_train.corr(), annot=True)
plt.title("Heatmap of co-relation between variables", fontsize=16)
plt.show()
# # **Feature Selection**
X_train = df_train[
[
"Temperature",
"Luminosity",
"Radius",
"Absolute Magnitude",
"Star Color",
"Spectral Class",
]
] # X-input features
y_train = df_train["Star Type"]
X_train.head()
print(y_train.unique())
print(y_train.value_counts())
# Train test split
# split the data into train and test with test size and 20% and train size as 80%
X_train_ex, X_test_ex, y_train_ex, y_test_ex = train_test_split(
X_train, y_train, test_size=0.2, random_state=42
)
# Define estimator for feature selection
estimator = RandomForestClassifier(n_estimators=100, random_state=42)
# Define recursive feature elimination with cross-validation
rfecv = RFECV(estimator=estimator, step=1, cv=5, scoring="accuracy")
# Fit RFECV to training data
rfecv.fit(X_train, y_train)
# Print selected features
print("Selected Features: ", X_train.columns[rfecv.support_])
X_train_mod = df_train[["Radius", "Absolute Magnitude"]] # X-input features
y_train_mod = df_train["Star Type"]
# Train test split
# split the data into train and test with test size and 20% and train size as 80%
X_train_mod_ex, X_test__mod_ex, y_train_mod_ex, y_test_mod_ex = train_test_split(
X_train_mod, y_train_mod, test_size=0.2, random_state=42
)
"""
# Define the models to evaluate
models = {
'Logistic Regression': LogisticRegression(random_state=42),
'Random Forest': RandomForestClassifier(random_state=42),
'Gradient Boosting': GradientBoostingClassifier(random_state=42),
'Support Vector Machines': SVC(random_state=42),
'K-Nearest': KNeighborsClassifier(),
'XGB': XGBClassifier(random_state=42),
'Cat': CatBoostClassifier(random_state=42),
'Decision Tree': DecisionTreeClassifier(random_state=42)
}
# Define the hyperparameters to tune for each model
params = {
'Logistic Regression': {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000], 'solver': ['newton-cg', 'lbfgs', 'liblinear']},
'Random Forest': {'n_estimators': [10, 50, 100, 250, 500], 'max_depth': [5, 10, 20]},
'Gradient Boosting': {'n_estimators': [10, 50, 100, 250, 500], 'learning_rate': [0.001, 0.005, 0.0001, 0.0005]},
'Support Vector Machines': {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000], 'kernel': ['linear', 'rbf']},
'K-Nearest': { 'n_neighbors': [3, 5, 7, 11, 21], 'weights': ['uniform', 'distance'], 'metric': ['euclidean', 'manhattan']},
'XGB': {'max_depth': [5, 10, 20], 'n_estimators': [10, 50, 100, 250, 500], 'learning_rate': [ 0.001, 0.005, 0.0001, 0.0005]},
'Cat': {'iterations': [50,500,5000], 'max_depth': [5, 10, 20], 'loss_function': ['Logloss', 'CrossEntropy', 'MultiClass'], 'learning_rate': [ 0.001, 0.005, 0.0001, 0.0005], 'eval_metric': ['MultiClass']},
'Decision Tree': {'max_features': ['auto', 'sqrt', 'log2'],'ccp_alpha': [0.1, .01, .001],'max_depth' : [5, 10, 20],'criterion' :['gini', 'entropy']}
}
# Create a list to store the results of each model
results = []
# Loop through each model and perform GridSearchCV
for name, model in models.items():
clf = RandomizedSearchCV(model, params[name], cv=5, n_jobs=-1, scoring='accuracy')
clf.fit(X_train, y_train)
#clf.fit(X_train_ex, y_train_ex)
#clf.fit(X_train_mod, y_train_mod)
#clf.fit(X_train_mod_ex, y_train_mod_ex)
# Add the model name and best accuracy score to the results list
results.append({'model': name, 'best_score': clf.best_score_, 'best_params': clf.best_params_})
# Print the results for each model
for result in results:
print(f"{result['model']}: Best score = {result['best_score']:.4f}, Best params = {result['best_params']}")
"""
# ## **X_train**
# * Logistic Regression: Best score = 0.9582, Best params = {'solver': 'newton-cg', 'C': 100}
# * Random Forest: Best score = 0.9860, Best params = {'n_estimators': 100, 'max_depth': 20}
# * Gradient Boosting: Best score = 0.9681, Best params = {'n_estimators': 250, 'learning_rate': 0.0005}
# * Support Vector Machines: Best score = 0.9767, Best params = {'kernel': 'rbf', 'C': 1000}
# * K-Nearest: Best score = 0.9674, Best params = {'weights': 'distance', 'n_neighbors': 3, 'metric': 'euclidean'}
# * XGB: Best score = 0.9771, Best params = {'n_estimators': 500, 'max_depth': 5, 'learning_rate': 0.005}
# * Cat: Best score = 0.9168, Best params = {'max_depth': 5, 'loss_function': 'MultiClass', 'learning_rate': 0.001, 'iterations': 50, 'eval_metric': 'MultiClass'}
# * Decision Tree: Best score = 0.9674, Best params = {'max_features': 'log2', 'max_depth': 20, 'criterion': 'gini', 'ccp_alpha': 0.01}
# **Best Score:**
# * Random Forest (0.9860): 'n_estimators': 100, 'max_depth': 20
# ## **X_train_ex**
# * Logistic Regression: Best score = 0.9941, Best params = {'solver': 'lbfgs', 'C': 1000}
# * Random Forest: Best score = 1.0000, Best params = {'n_estimators': 50, 'max_depth': 20}
# * Gradient Boosting: Best score = 0.9647, Best params = {'n_estimators': 250, 'learning_rate': 0.001}
# * Support Vector Machines: Best score = 0.9941, Best params = {'kernel': 'linear', 'C': 100}
# * K-Nearest: Best score = 0.9824, Best params = {'weights': 'distance', 'n_neighbors': 5, 'metric': 'euclidean'}
# * XGB: Best score = 0.9708, Best params = {'n_estimators': 500, 'max_depth': 20, 'learning_rate': 0.005}
# * Cat: Best score = 0.9943, Best params = {'max_depth': 5, 'loss_function': 'MultiClass', 'learning_rate': 0.005, 'iterations': 5000, 'eval_metric': 'MultiClass'}
# * Decision Tree: Best score = 0.9882, Best params = {'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'gini', 'ccp_alpha': 0.01}
# **Best Score:**
# * Random Forest (1.0000): 'n_estimators': 50, 'max_depth': 20
# ## **X_train_mod**
# * Logistic Regression: Best score = 0.8054, Best params = {'solver': 'liblinear', 'C': 100}
# * Random Forest: Best score = 1.0000, Best params = {'n_estimators': 250, 'max_depth': 10}
# * Gradient Boosting: Best score = 0.9907, Best params = {'n_estimators': 100, 'learning_rate': 0.005}
# * Support Vector Machines: Best score = 0.8007, Best params = {'kernel': 'rbf', 'C': 10}
# * K-Nearest: Best score = 0.8517, Best params = {'weights': 'uniform', 'n_neighbors': 3, 'metric': 'euclidean'}
# * XGB: Best score = 0.9860, Best params = {'n_estimators': 500, 'max_depth': 20, 'learning_rate': 0.005}
# * Cat: Best score = 1.0000, Best params = {'max_depth': 10, 'loss_function': 'MultiClass', 'learning_rate': 0.0001, 'iterations': 500, 'eval_metric': 'MultiClass'}
# * Decision Tree: Best score = 0.9953, Best params = {'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'entropy', 'ccp_alpha': 0.001}
# **Best Score:**
# * Random Forest (1.0000): 'n_estimators': 250, 'max_depth': 10
# * Cat (1.0000): 'max_depth': 10, 'loss_function': 'MultiClass', 'learning_rate': 0.0001, 'iterations': 500, 'eval_metric': 'MultiClass'
# ## **X_train_mod_ex**
# * Logistic Regression: Best score = 0.8657, Best params = {'solver': 'liblinear', 'C': 1000}
# * Random Forest: Best score = 1.0000, Best params = {'n_estimators': 100, 'max_depth': 10}
# * Gradient Boosting: Best score = 0.9941, Best params = {'n_estimators': 500, 'learning_rate': 0.001}
# * Support Vector Machines: Best score = 0.8716, Best params = {'kernel': 'rbf', 'C': 100}
# * K-Nearest: Best score = 0.8724, Best params = {'weights': 'distance', 'n_neighbors': 3, 'metric': 'manhattan'}
# * XGB: Best score = 0.9884, Best params = {'n_estimators': 10, 'max_depth': 5, 'learning_rate': 0.001}
# * Cat: Best score = 1.0000, Best params = {'max_depth': 5, 'loss_function': 'MultiClass', 'learning_rate': 0.0005, 'iterations': 500, 'eval_metric': 'MultiClass'}
# * Decision Tree: Best score = 1.0000, Best params = {'max_features': 'auto', 'max_depth': 10, 'criterion': 'entropy', 'ccp_alpha': 0.001}
# **Best Score:**
# * Random Forest (1.0000): 'n_estimators': 100, 'max_depth': 10
# * Cat (1.0000): 'max_depth': 5, 'loss_function': 'MultiClass', 'learning_rate': 0.0005, 'iterations': 500, 'eval_metric': 'MultiClass'
# * Decision Tree (1.0000): 'max_features': 'auto', 'max_depth': 10, 'criterion': 'entropy', 'ccp_alpha': 0.001
# As evident Feature Selection using RFECV has been quite successful (approximately 2% inclrease in accuracy score).
# ## **The top 4 Unsupervised models are:**
# * Random Forest Classifier (100%)
# * Cat Boost Classifier (100%)
# * Decision Tree Classifier (99.53%)
# * Gradient Boosting (99.07)
random_forest = RandomForestClassifier(n_estimators=100, max_depth=10, random_state=42)
cat_boost = CatBoostClassifier(
iterations=500,
max_depth=5,
learning_rate=0.0005,
loss_function="MultiClass",
eval_metric="MultiClass",
random_state=42,
)
decision_tree = DecisionTreeClassifier(
max_features="auto",
max_depth=10,
criterion="entropy",
ccp_alpha=0.001,
random_state=42,
)
gradient_boost = GradientBoostingClassifier(
n_estimators=100, learning_rate=0.005, random_state=42
)
random_forest.fit(X_train_mod, y_train_mod)
cat_boost.fit(X_train_mod, y_train_mod)
decision_tree.fit(X_train_mod, y_train_mod)
gradient_boost.fit(X_train_mod, y_train_mod)
# # **RNN**
X_train_mod.shape
y_train_mod.shape
"""
# Define the model architecture
def create_model(neurons, dropout_rate, kernel_regularizer, learning_rate):
input_shape = (2,)
model = Sequential()
model.add(Dense(neurons, activation='relu', input_shape=input_shape))
model.add(Dropout(dropout_rate))
model.add(Dense(neurons//2, activation='relu'))
model.add(Dropout(dropout_rate))
model.add(Dense(neurons//4, activation='relu', kernel_regularizer=regularizers.l2(kernel_regularizer)))
model.add(Dropout(dropout_rate))
model.add(Dense(6, activation='softmax'))
# also try adam optimizer
#adamax = Adamax(learning_rate=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# Create the KerasClassifier wrapper for scikit-learn
model = KerasClassifier(build_fn=create_model, verbose=0)
# Define the hyperparameters search space
neurons = [64, 128, 256, 512]
dropout_rate = [0, 0.25, 0.5, 0.75]
kernel_regularizer = [0.01, 0.001, 0.0001]
learning_rate = [0.01, 0.05, 0.001, 0.005, 0.0001, 0.0005]
batch_size = [16, 32, 64]
epochs = [50, 100, 150, 300, 500, 1000]
param_grid = dict(neurons=neurons, dropout_rate=dropout_rate, kernel_regularizer=kernel_regularizer, learning_rate=learning_rate, batch_size=batch_size, epochs=epochs)
# Perform the randomized search with cross-validation
n_iter_search = 50
random_search = RandomizedSearchCV(model, param_distributions=param_grid, n_iter=n_iter_search, cv=5, n_jobs=-1, scoring='accuracy')
random_search.fit(X_train, y_train)
# Print the best parameters and score
print("Best parameters: ", random_search.best_params_)
print("Best score: ", random_search.best_score_)
"""
# ## **X_train, y_train**
# * Parameters: {'neurons': 512, 'learning_rate': 0.005, 'kernel_regularizer': 0.0001, 'epochs': 300, 'dropout_rate': 0, 'batch_size': 64}
# * Best Score: 0.9953488372093023
# ## **X_train_mod, y_train_mod**
#
# * Parameters: {'neurons': 128, 'learning_rate': 0.005, 'kernel_regularizer': 0.01, 'epochs': 150, 'dropout_rate': 0.25, 'batch_size': 16}
# * Best Score: 0.8149048625792812
y_train.shape
from keras.utils import to_categorical
y_train_encoded = to_categorical(y_train, num_classes=6)
y_train_encoded
model = Sequential()
model.add(Dense(512, activation="relu", input_shape=(6,)))
model.add(Dense(256, activation="relu"))
model.add(Dense(128, activation="relu", kernel_regularizer=regularizers.l2(0.0001)))
model.add(Dense(6, activation="softmax"))
opt = keras.optimizers.Adam(learning_rate=0.005)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
history = model.fit(X_train, y_train_encoded, epochs=300, batch_size=64)
# plot loss during training
plt.figure(figsize=(10, 10))
plt.subplot(211)
plt.title("Loss")
plt.plot(history.history["loss"], label="training loss")
plt.plot(history.history["accuracy"], label="training accuracy")
plt.legend()
# # **Pedictions**
df_test = pd.read_csv("/kaggle/input/nebulanet/test.csv")
df_test.head()
# Rename columns for ease of use
df_test.columns = [
"Temperature",
"Luminosity",
"Radius",
"Absolute Magnitude",
"Star Color",
"Spectral Class",
]
le = LabelEncoder()
df_test["Star Color"] = le.fit_transform(df_test["Star Color"])
df_test["Spectral Class"] = le.fit_transform(df_test["Spectral Class"])
scaler = MinMaxScaler()
df_test[
[
"Temperature",
"Luminosity",
"Radius",
"Absolute Magnitude",
"Star Color",
"Spectral Class",
]
] = scaler.fit_transform(
df_test[
[
"Temperature",
"Luminosity",
"Radius",
"Absolute Magnitude",
"Star Color",
"Spectral Class",
]
]
)
# Remove rows with missing values
df_test.dropna(inplace=True)
df_test.head()
print(df_test["Star Color"].unique())
print(df_test["Star Color"].value_counts())
print(df_test["Spectral Class"].unique())
print(df_test["Spectral Class"].value_counts())
X_test = df_test[
[
"Temperature",
"Luminosity",
"Radius",
"Absolute Magnitude",
"Star Color",
"Spectral Class",
]
] # X-input features
X_test_mod = df_test[["Radius", "Absolute Magnitude"]] # X-input features
X_test.shape
y_pred_proba_CNN = model.predict(X_test)
y_pred_proba_DT = random_forest.predict(X_test_mod)
y_pred_proba_CAT = cat_boost.predict(X_test_mod)
y_pred_proba_RF = decision_tree.predict(X_test_mod)
y_pred_proba_GB = gradient_boost.predict(X_test_mod)
data = {
"Decision Tree": list(y_pred_proba_DT),
"Cat boost": list(y_pred_proba_CAT),
"Random Forest": list(y_pred_proba_RF),
"Gradient Boosting": list(y_pred_proba_GB),
}
df_pred_data = pd.DataFrame(data)
df_pred_data = df_pred_data.replace(3, "Crimson Dwarfs")
df_pred_data = df_pred_data.replace(0, "Aurelian Mainstays")
df_pred_data = df_pred_data.replace(4, "Pearl Dwarfs")
df_pred_data = df_pred_data.replace(1, "Celestial Sovereigns")
df_pred_data = df_pred_data.replace(5, "Umber Dwarfs")
df_pred_data = df_pred_data.replace(2, "Cosmic Behemoths")
a = list()
y_pred_proba_CNN = list(y_pred_proba_CNN)
for i in range(len(y_pred_proba_CNN)):
maxpos = pd.Series(y_pred_proba_CNN[i]).idxmax()
if maxpos == 0:
a.append("Aurelian Mainstays")
elif maxpos == 1:
a.append("Celestial Sovereigns")
elif maxpos == 2:
a.append("Cosmic Behemoths")
elif maxpos == 3:
a.append("Crimson Dwarfs")
elif maxpos == 4:
a.append("Pearl Dwarfs")
elif maxpos == 5:
a.append("Umber Dwarfs")
df_pred_data["RNN"] = a
df_pred_data
"""
df_pred_data['RNN'].to_csv('NebuleNet_predictions.csv')
df = pd.read_csv('/kaggle/working/NebuleNet_predictions.csv')
df.columns = ['ID','Star Type']
df.head()
df.to_csv('NebulaNet_submission.csv', index=False)
"""
|
import pandas as pd
pd.DataFrame(
{" ": ["17", "19", "12"]},
index=["Punkte Kandidat 1", "Punkte Kandidat 2", "Punkte Kandidat 3"],
)
pd.DataFrame(
{
"KanzlerIn": [
"Konrad Adenauer",
"Ludwig Erhard",
"Willy Brandt",
"Angela Merkel",
],
"Dauer in Jahren": ["14", "3", "5", "16"],
},
index=["1949-1963", "1963-1966", "1969-1974", "2005-2021"],
)
df = pd.read_csv("Gender_Inequality_Index.csv")
df.head()
df.groupby("F_secondary_educ").min()
df.groupby("F_secondary_educ").max()
df.groupby("M_secondary_educ").min()
df.groupby("M_secondary_educ").max()
|
# custorino.it
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import torch
import fastai
from fastai.tabular.all import *
from fastai.text.all import *
from fastai.vision.all import *
from fastai.medical.imaging import *
from fastai import *
import time
from datetime import datetime
print(
f'Notebook last run on {datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d, %H:%M:%S UTC")}'
)
print("Using fastai version ", fastai.__version__)
print("And torch version ", torch.__version__)
from PIL import Image
img = Image.open("../input/prova/Prova_Scalata.png")
img
TensorTypes = (TensorImage, TensorMask, TensorPoint, TensorBBox)
def _add1(x):
return x + 1
dumb_tfm = RandTransform(enc=_add1, p=0.5)
start, d1, d2 = 2, False, False
for _ in range(40):
t = dumb_tfm(start, split_idx=0)
if dumb_tfm.do:
test_eq(t, start + 1)
d1 = True
else:
test_eq(t, start)
d2 = True
assert d1 and d2
dumb_tfm
# #Image.Flip
_, axs = subplots(1, 2)
show_image(img, ctx=axs[0], title="original")
show_image(img.flip_lr(), ctx=axs[1], title="flipped")
_, axs = plt.subplots(1, 3, figsize=(12, 4))
for ax, sz in zip(axs.flatten(), [300, 500, 700]):
show_image(img.crop_pad(sz), ctx=ax, title=f"Size {sz}")
_, axs = plt.subplots(1, 3, figsize=(12, 4))
for ax, mode in zip(axs.flatten(), [PadMode.Zeros, PadMode.Border, PadMode.Reflection]):
show_image(img.crop_pad((600, 700), pad_mode=mode), ctx=ax, title=mode)
import cv2 as cv
import matplotlib.pyplot as plt
IMG_PATH = "../input/prova/Prova_Scalata.png"
imgArray = cv.imread(IMG_PATH)
convertedArray = cv.cvtColor(imgArray, cv.COLOR_BGR2RGB)
plt.subplots(figsize=(15, 10))
plt.imshow(convertedArray)
plt.show()
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(14, 10))
ax1.imshow(convertedArray[:, :, 0], cmap="Reds_r")
ax1.set_title("R", size=20)
ax2.imshow(convertedArray[:, :, 1], cmap="Greens_r")
ax2.set_title("G", size=20)
ax3.imshow(convertedArray[:, :, 2], cmap="Blues_r")
ax3.set_title("B", size=20)
ax4.axis("off")
plt.tight_layout()
plt.show()
# #Histograms - Use lower case in "r", "g", "b".
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 4))
ax1.hist(convertedArray[:, :, 0].flatten(), color="r", bins=200)
ax1.set_title("r", size=20)
ax2.hist(convertedArray[:, :, 1].flatten(), color="g", bins=200)
ax1.set_title("g", size=20)
ax3.hist(convertedArray[:, :, 2].flatten(), color="b", bins=200)
ax1.set_title("b", size=20)
plt.tight_layout()
plt.show()
type(convertedArray) # numpy.ndarray
convertedArray.dtype # dtype('uint8')
convertedArray.min() # 0
convertedArray.max() # 255
convertedArray.shape # (256, 196, 3)
# Above, the tuple tells us that the image has 256 rows, 196 columns and 3 channels (RGB). To crop the image we can simply use numpy indexing methods.
# So, in the next snippets choose numbers below 256 and 196.
# Take the first 200 rows and the first 190 columns (of all channels) and write like this below:
# #Crop
crop1 = convertedArray[:200, :190, :]
crop1.shape # (200,190, 3)
plt.imshow(crop1)
plt.show()
# In case you want to select from row 230 to 250, column 190 to 195 and all channels:
crop2 = convertedArray[230:250, 190:195, :]
plt.figure(figsize=(15, 8))
plt.imshow(crop2)
plt.show()
# If you want to crop only one channel (the first one). Just write:
plt.figure(figsize=(15, 8))
plt.imshow(convertedArray[230:250, 190:195, 0], cmap="rainbow")
plt.show()
convertedArray.shape # (256,196,3)
convertedArray.shape[0] * convertedArray.shape[1] # 50176
# #Dissecting an image
# Suppose you want to extract a vertical and a horizontal section from the image.
plt.subplots(figsize=(15, 10))
plt.imshow(convertedArray)
plt.axvline(190, color="yellow")
plt.axhline(250, color="orange")
plt.show()
# #To avoid IndexError: index 600 is out of bounds for axis 0 with size 256. Change to the right number of Pixels max 256
# If you want to extract these two profiles. Proceed this way for the horizontal section at row 200
horSection = convertedArray[200, :, :]
plt.figure(figsize=(16, 5))
plt.plot(horSection[:, 0], label="r", color="#e74c3c")
plt.plot(horSection[:, 1], label="g", color="#16a085")
plt.plot(horSection[:, 2], label="b", color="#3498db")
plt.xlabel("X")
plt.legend()
plt.show()
# Code by Olga Belitskaya https://www.kaggle.com/olgabelitskaya/sequential-data/comments
from IPython.display import display, HTML
c1, c2, f1, f2, fs1, fs2 = "#eb3434", "#eb3446", "Akronim", "Smokum", 30, 15
def dhtml(string, fontcolor=c1, font=f1, fontsize=fs1):
display(
HTML(
"""<style>
@import 'https://fonts.googleapis.com/css?family="""
+ font
+ """&effect=3d-float';</style>
<h1 class='font-effect-3d-float' style='font-family:"""
+ font
+ """; color:"""
+ fontcolor
+ """; font-size:"""
+ str(fontsize)
+ """px;'>%s</h1>""" % string
)
)
dhtml("Sì, l ho fatto, @mpwolke sono stata qui.")
|
# Importing the packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Loading the dataset
df = pd.read_csv("Customer_Subscription_And_Transaction_Details.csv")
df.head(5)
df.info()
# Making sure that there is no more null values in the dataset
df.isnull().sum()
# Checking that all the errors that were in the previous dataset are now rectified
columns = [
"transaction_type",
"subscription_type",
"customer_gender",
"age_group",
"customer_country",
"referral_type",
]
for col in columns:
unique_values = df[col].unique()
print(f"Unique values in the column {col} are \n {unique_values} ")
# Replacing "initial" with "INITIAL" in the dataset
df.transaction_type.replace("initial", "INITIAL", inplace=True)
# ###### The primary objective of this analysis is to enhance company performance through a comprehensive examination of the provided dataset. To achieve this, we will prioritize the evaluation of three crucial aspects during our analysis.
# Evaluate the current performance of the company
# Conduct a thorough analysis of the customer base
# Identify potential strategies to improve the overall performance
# ### An assessment of the current performance of the company
# ###### Financial performance of the company over the last three years
# Converting the column “transactio_date” to a datetime object
df["transaction_date"] = pd.to_datetime(df["transaction_date"])
# Checking the time perid of the data we are having
strat_date = min(df["transaction_date"]).strftime("%Y-%m-%d")
end_date = max(df["transaction_date"]).strftime("%Y-%m-%d")
print(f"We have the data from {strat_date} to {end_date}")
# Here, I am going to analyze the financial performance of the company over the past three years, using data from 2020-01-01 to 2022-12-01. The analysis will be conducted on a quarterly basis, focusing on three key areas. Firstly, an assessment will be made of the revenue generated from new customer acquisitions and upgrades of existing plans. Secondly, a review of quarterly losses will be conducted, analyzing the impact of customer cancellations and downgrades. Finally, the net profit of the company in each quarter will be computed to evaluate the overall financial performance of the company over the last three years.
# group data by year and quarter
grouped = df.groupby([pd.Grouper(key="transaction_date", freq="Q")])
# calculate the total subscription price per quarter and subscription type
mrr_income = grouped.apply(
lambda x: x.loc[
x["transaction_type"].isin(["INITIAL", "UPGRADE"]), "subscription_price"
].sum()
)
# Making a list of each quarter
quarter_list = [
"2020-01-01 - 2020-03-31",
"2020-04-01 - 2020-06-31",
"2020-07-01 - 2020-09-31",
"2020-10-01 - 2020-12-31",
"2021-01-01 - 2021-03-31",
"2021-04-01 - 2020-06-31",
"2021-07-01 - 2020-09-31",
"2021-10-01 - 2021-12-31",
"2022-01-01 - 2021-03-31",
"2022-04-01 - 2020-06-31",
"2022-07-01 - 2020-09-31",
"2022-10-01 - 2022-12-31",
]
# Revenue generated in each quarter
quarter_incomes = mrr_income.values
# create the bar plot for revenue
# setting the figure size
fig, ax = plt.subplots(figsize=(10, 6))
# ploting the data points
ax.bar(quarter_list, quarter_incomes, color="blue")
# setting the x-axis and y-axis labels and title
ax.set_title("2020-2022 Quarterly Revenue")
ax.set_ylabel("Revenue")
ax.set_xlabel("Quarter")
# rotating the x-axis labels to avoid overlapping
plt.xticks(rotation=90)
# show the plot
plt.show()
# The company's revenue increased consistently from Q1 2020 to Q2 2022. However, the revenue declined in Q3 2022 and continued to decline until Q4 2022. Despite this decline, the company still generated an overall revenue increase of 123.5% over the three-year period. Further analysis would be needed to understand the reasons behind the decline in revenue in the latter part of the period and to identify potential areas for improvement.
# calculating the total loss in each quarter
mrr_loss = grouped.apply(
lambda x: x.loc[
x["transaction_type"].isin(["REDUCTION", "CHURN"]), "subscription_price"
].sum()
)
quarterly_loss = mrr_loss.values
# creating the plot for quarterly loss
# setting the figure size
fig, ax = plt.subplots(figsize=(10, 6))
# plotting the data points
ax.bar(quarter_list, quarterly_loss, color="blue")
# setting the x-axis and y-axis labels and title
ax.set_title("2020-2022 Quarterly Loss")
ax.set_ylabel("Loss")
ax.set_xlabel("Quarter")
# rotating the x-axis labels to avoid overlapping
plt.xticks(rotation=90)
# show the plot
plt.show()
# The company experienced varying levels of losses over the 12 quarters represented in the data. The main aim of this analysis is to identify potential solutions to minimize this loss.
# Calculating the net profit
net_profit = mrr_income.values - mrr_loss.values
# creating a the plot for showing the profit
# setting the figure size
fig, ax = plt.subplots(figsize=(10, 6))
# plotting the data points
ax.bar(quarter_list, net_profit, color="blue")
# setting the x-axis and y-axis labels and title
ax.set_title("2020-2022 Quarterly Profit")
ax.set_ylabel("Net profit")
ax.set_xlabel("Quater")
# rotating the x-axis labels to avoid overlapping
plt.xticks(rotation=90)
# show the plot
plt.show()
# The company's profit fluctuated throughout the 12 quarters, ranging from a low of 48,745 in Q4 2020 to a high of 76,453 in Q3 2022. Overall, the company experienced a positive trend in profit, with an average quarterly profit of approximately 61,041. The strongest quarters were Q3 and Q4 of 2021 and Q3 of 2022, which had profits above 62,000. However, the company saw a dip in profit in Q4 of 2020 and Q1 of 2021, with profits falling below 51,000.
# ### Calculating the Revenue and Loss from each country
# ###### Revenue by Country and Year
# Making a copy of the original dataframe
df1 = df
# Setting the index of the copied dataframe to "transaction_type"
df1 = df1.set_index("transaction_type")
# Select rows from the dataframe where "transaction_type" is either "INITIAL" or "UPGRADE",
# select only the columns "subscription_price", "customer_country", and "transaction_date"
df1_income = df1.loc[
["INITIAL", "UPGRADE"],
["subscription_price", "customer_country", "transaction_date"],
]
# Resetting the index of the selected rows to default and drop the "transaction_type" column
df1_income = df1_income.reset_index(drop=True)
# Group the selected rows by year and customer_country, and calculate the sum of subscription_price for each group
grouped_income = df1_income.groupby(
[pd.Grouper(key="transaction_date", freq="Y"), "customer_country"]
)
cou_grouped_income = grouped_income["subscription_price"].sum()
# output
cou_grouped_income
# From the data, we can see that Sweden consistently generates the highest revenue, followed by Norway, Finland, and Denmark.
# ###### Loss by Country and Year
# Select rows from the dataframe where "transaction_type" is either 'REDUCTION','CHURN'
# select only the columns "subscription_price", "customer_country", and "transaction_date"
df1_loss = df1.loc[
["REDUCTION", "CHURN"],
["subscription_price", "customer_country", "transaction_date"],
]
# Resetting the index of the selected rows to default and drop the "transaction_type" column
df1_loss = df1_loss.reset_index(drop=True)
# Group the selected rows by year and customer_country, and calculate the sum of subscription_price for each group
grouped_loss = df1_loss.groupby(
[pd.Grouper(key="transaction_date", freq="Y"), "customer_country"]
)
cou_grouped_loss = grouped_loss["subscription_price"].sum()
cou_grouped_loss
# The report also shows that Sweden generated the highest total loss over the three years, followed by Finland, Denmark, and Norway.
# ###### Number of new customers subscribed each year
# Select rows from the original dataframe where "transaction_type" is "INITIAL"
# and only keep the columns "customer_country" and "transaction_date"
df1_initial = df1.loc[["INITIAL"], ["customer_country", "transaction_date"]]
# Reset the index of the selected rows to default and drop the "transaction_type" column
df1_initial = df1_initial.reset_index(drop=True)
# Group the selected rows by year, based on the "transaction_date" column
df1_initial = df1_initial.groupby([pd.Grouper(key="transaction_date", freq="Y")])
# Calculate the number of new customers in each year
df1_initial_data = df1_initial["customer_country"].count()
# Return the resulting data as output
df1_initial_data
# ###### The annual number of Upgredation
# Select rows from the original dataframe where "transaction_type" is 'UPGRADE'
# and only keep the columns "customer_country" and "transaction_date"
df1_upgrade = df1.loc[["UPGRADE"], ["customer_country", "transaction_date"]]
# Reset the index of the selected rows to default and drop the "transaction_type" column
df1_upgrade = df1_upgrade.reset_index(drop=True)
# Group the selected rows by year, based on the "transaction_date" column
df1_upgrade = df1_upgrade.groupby([pd.Grouper(key="transaction_date", freq="Y")])
# Calculate the number of upgraded customer
df1_upgrade_data = df1_upgrade["customer_country"].count()
df1_upgrade_data
# ###### The annual number of Reduction
# Select rows from the original dataframe where "transaction_type" is 'REDUCTION'
# and only keep the columns "customer_country" and "transaction_date"
df1_reduced = df1.loc[["REDUCTION"], ["customer_country", "transaction_date"]]
# Reset the index of the selected rows to default and drop the "transaction_type" column
df1_reduced = df1_reduced.reset_index(drop=True)
# Group the selected rows by year, based on the "transaction_date" column
df1_reduced = df1_reduced.groupby([pd.Grouper(key="transaction_date", freq="Y")])
# Calculate the number of reduced customer
df1_reduced_data = df1_reduced["customer_country"].count()
df1_reduced_data
# ###### The annual number of cancellations
# Select rows from the original dataframe where "transaction_type" is 'CHURN'
# and only keep the columns "customer_country" and "transaction_date"
df1_cancelled = df1.loc[["CHURN"], ["customer_country", "transaction_date"]]
# Reset the index of the selected rows to default and drop the "transaction_type" column
df1_cancelled = df1_cancelled.reset_index(drop=True)
# Group the selected rows by year, based on the "transaction_date" column
df1_cancelled = df1_cancelled.groupby([pd.Grouper(key="transaction_date", freq="Y")])
# Calculate the number of reduced customer
df1_cancelled_data = df1_cancelled["customer_country"].count()
df1_cancelled_data
# From this report, we can see that the number of new customers added each year remained relatively stable, with approximately 3,400-3,500 new customers per year. However, we can also see that the number of upgrades, reduction and cancellation increased significantly from 2020 to 2022,
# Here, I conducted a preliminary analysis of the company's performance based on the available data. The analysis focused on the financial performance of the company, with a particular emphasis on customer acquisition and retention. Specifically, I examined the company's ability to attract new customers and encourage existing customers to upgrade to higher plans. Based on my analysis, I identified two main challenges facing the company: customers downgrading to lower plans and customers canceling their subscriptions. While the available data provides some insights into these issues, a more detailed analysis will be carried out in the third section to fully understand the underlying trends and factors driving these changes.
# ### Customer Analysis and Marketing Strategy Evaluation for Improved Company Performance
# As a professional data analyst, the second part of the analysis would involve delving deeper into the company's customer base and the effectiveness of its current marketing strategies. This phase of the analysis is crucial because a comprehensive understanding of the customer base can help identify potential customers and enhance the company's performance.
# To begin with,I would first examine the customer demographics, such as age, gender, location, and other relevant characteristics. This information would help identify the target market and enable the company to tailor its marketing strategies to meet their specific needs and preferences.The next step is to assess the effectiveness of the company's marketing strategies. This would include reviewing advertising campaigns.
# Copying the original dataframe into a new dataframe named df3
df3 = df
# Counting the number of customers in each country and store the result in the variable country_counts
country_counts = df3.customer_country.value_counts()
# Creating a color palette with pastel colors for the pie chart
colors = sns.color_palette("pastel")[0:5]
# Setting the figure size
fig = plt.figure(figsize=(8, 8))
# Creating a pie chart with the customer counts for each country as the values
plt.pie(
country_counts.values, labels=country_counts.index, colors=colors, autopct="%.0f%%"
)
plt.show()
# From the above data, it is evident that Sweden has the highest number of customers, followed by Denmark, Finland, and Norway.
# This information is crucial for the company's marketing team to plan their marketing strategies and target potential customers.
# The marketing team can use this data to tailor their promotions and marketing strategies specific to each country.
# Furthermore, this data can also help the company identify any trends or patterns in customer behavior across different countries. For example, if the company notices a significant increase in sales from a particular country, they can investigate why this is happening and try to replicate this success in other countries. Therefore here the company can investigate the reason why the company is performing well in Sweden and can use this strategy to improve its performance in other countries also.
# ### Sweden
# Setting the index of the dataframe to 'customer_country' to make it easier to select data by country
df3 = df3.set_index("customer_country")
# Selectting a subset of data from the dataframe that corresponds to customers from Sweden and includes specific columns
df_sweden = df3.loc[
["Sweden"],
[
"transaction_type",
"subscription_type",
"customer_gender",
"age_group",
"referral_type",
],
]
# Creating a bar plot for the "subscription_type" column
subscription_type_counts = df_sweden.subscription_type.value_counts()
plt.bar(subscription_type_counts.index, subscription_type_counts.values)
plt.title("Preferable subscription type in Sweden")
plt.xlabel("Subscription type")
plt.ylabel("Number of customers")
plt.show()
subscription_type_counts
# Creating a bar plot for the "customer_gender" column
customer_gender_counts = df_sweden.customer_gender.value_counts()
plt.bar(customer_gender_counts.index, customer_gender_counts.values)
plt.title("Amount of each gender in the dataset")
plt.xlabel("Gender")
plt.ylabel("Count")
plt.show()
customer_gender_counts
# group the data by age group and gender, and count the number of customers in each group
grouped = (
df_sweden.groupby(["age_group", "customer_gender"]).size().reset_index(name="count")
)
# pivot the data to create a matrix with age group as rows, gender as columns, and count as values
matrix = grouped.pivot(index="age_group", columns="customer_gender", values="count")
# create a stacked bar plot of the data
ax = matrix.plot(kind="bar", stacked=True, figsize=(10, 6))
# set the axis labels and title
ax.set_xlabel("Age Group")
ax.set_ylabel("Count")
ax.set_title("Customer Age Group and Gender")
# show the plot
plt.show()
# creating a bar chart to reprasent the participation of customers in different referral program
referral_data_counts = df_sweden.referral_type.value_counts()
plt.bar(referral_data_counts.index, referral_data_counts.values)
plt.title("Perfomance of different referral program in Sweden")
plt.xlabel("Referral Program")
plt.ylabel("Count")
plt.xticks(rotation=90)
plt.show()
referral_data_counts
tranaction_type_count = df_sweden.transaction_type.value_counts()
tranaction_type_count
# filter the dataframe to include only the desired transaction types
df_filtered = df_sweden[df_sweden["transaction_type"].isin(["INITIAL", "UPGRADE"])]
# group the data by transaction type and referral type, and count the number of customers in each group
grouped_type = (
df_filtered.groupby(["transaction_type", "referral_type"])
.size()
.reset_index(name="count")
)
# pivot the data to create a matrix with transaction type as rows, referral type as columns, and count as values
matrix_type = grouped_type.pivot(
index="transaction_type", columns="referral_type", values="count"
)
# create a stacked bar plot of the data
ax = matrix_type.plot(kind="bar", stacked=True, figsize=(10, 6))
# set the axis labels and title
ax.set_xlabel("Transaction Type")
ax.set_ylabel("Count")
ax.set_title("Transaction type and Referral Program")
# show the plot
plt.show()
# group the data by transaction type and referral type, and count the number of customers in each group
grouped_type = (
df_sweden.groupby(["transaction_type", "subscription_type"])
.size()
.reset_index(name="count")
)
# pivot the data to create a matrix with transaction type as rows, referral type as columns, and count as values
matrix_type = grouped_type.pivot(
index="transaction_type", columns="subscription_type", values="count"
)
# create a stacked bar plot of the data
ax = matrix_type.plot(kind="bar", stacked=True, figsize=(10, 6))
# set the axis labels and title
ax.set_xlabel("Transaction Type")
ax.set_ylabel("Count")
ax.set_title("Transaction type and subscription_type")
# show the plot
plt.show()
# Overall, all the above the data shows that female customers are more prevalent than male customers in Sweden. The Basic subscription type has the highest number of customers, and Google Ads and Facebook are the most effective referral types. Companies can leverage this information to make informed decisions on marketing campaigns and product offerings to better target the customers in Sweden.
# ### Denmark
# Selectting a subset of data from the dataframe that corresponds to customers from Denmark and includes specific columns
df_denmark = df3.loc[
["Denmark"],
[
"transaction_type",
"subscription_type",
"customer_gender",
"age_group",
"referral_type",
],
]
# Creating a bar plot for the "subscription_type" column
# Identifying the best performing subscription type in Denmark
subscription_type_counts = df_denmark.subscription_type.value_counts()
plt.bar(subscription_type_counts.index, subscription_type_counts.values)
plt.title("Preferable subscription type in Denmark")
plt.xlabel("Subscription type")
plt.ylabel("Number of customers")
plt.show()
subscription_type_counts
# Creating a bar plot for the "customer_gender" column
customer_gender_counts = df_denmark.customer_gender.value_counts()
plt.bar(customer_gender_counts.index, customer_gender_counts.values)
plt.title("Amount of each gender in the dataset")
plt.xlabel("Gender")
plt.ylabel("Count")
plt.show()
customer_gender_counts
# group the data by age group and gender, and count the number of customers in each group
grouped = (
df_denmark.groupby(["age_group", "customer_gender"])
.size()
.reset_index(name="count")
)
# pivot the data to create a matrix with age group as rows, gender as columns, and count as values
matrix = grouped.pivot(index="age_group", columns="customer_gender", values="count")
# create a stacked bar plot of the data
ax = matrix.plot(kind="bar", stacked=True, figsize=(10, 6))
# set the axis labels and title
ax.set_xlabel("Age Group")
ax.set_ylabel("Count")
ax.set_title("Customer Age Group and Gender")
# show the plot
plt.show()
# creating a bar chart to reprasent the participation of customers in different referral program
referral_data_counts = df_denmark.referral_type.value_counts()
plt.bar(referral_data_counts.index, referral_data_counts.values)
plt.title("Perfomance of different referral program in Denmark")
plt.xlabel("Referral Program")
plt.ylabel("Count")
plt.xticks(rotation=90)
plt.show()
referral_data_counts
# filter the dataframe to include only the desired transaction types
df_filtered = df_denmark[df_denmark["transaction_type"].isin(["INITIAL", "UPGRADE"])]
# group the data by transaction type and referral type, and count the number of customers in each group
grouped_type = (
df_filtered.groupby(["transaction_type", "referral_type"])
.size()
.reset_index(name="count")
)
# pivot the data to create a matrix with transaction type as rows, referral type as columns, and count as values
matrix_type = grouped_type.pivot(
index="transaction_type", columns="referral_type", values="count"
)
# create a stacked bar plot of the data
ax = matrix_type.plot(kind="bar", stacked=True, figsize=(10, 6))
ax.set_xlabel("Transaction Type")
ax.set_ylabel("Count")
ax.set_title("Transaction type and Referral Program")
plt.show()
# group the data by transaction type and referral type, and count the number of customers in each group
grouped_type = (
df_denmark.groupby(["transaction_type", "subscription_type"])
.size()
.reset_index(name="count")
)
# pivot the data to create a matrix with transaction type as rows, referral type as columns, and count as values
matrix_type = grouped_type.pivot(
index="transaction_type", columns="subscription_type", values="count"
)
# create a stacked bar plot of the data
ax = matrix_type.plot(kind="bar", stacked=True, figsize=(10, 6))
ax.set_xlabel("Transaction Type")
ax.set_ylabel("Count")
ax.set_title("Transaction type and subscription_type")
plt.show()
# The analysis of the data shows in Denmark also that the company's Basic subscription plan is more popular among customers, and the Pro plan is also doing well. The company has a larger female customer base, and the majority of customers were acquired through Google Ads and Facebook.
# ### Norway
# Selectting a subset of data from the dataframe that corresponds to customers from Norway and includes specific columns
df_norway = df3.loc[
["Norway"],
[
"transaction_type",
"subscription_type",
"customer_gender",
"age_group",
"referral_type",
],
]
# Creating a bar plot for the "subscription_type" column
subscription_type_counts = df_norway.subscription_type.value_counts()
plt.bar(subscription_type_counts.index, subscription_type_counts.values)
plt.title("Preferable subscription type in Norway")
plt.xlabel("Subscription type")
plt.ylabel("Number of customers")
plt.show()
subscription_type_counts
# Creating a bar plot for the "customer_gender" column
customer_gender_counts = df_norway.customer_gender.value_counts()
plt.bar(customer_gender_counts.index, customer_gender_counts.values)
plt.title("Amount of each gender in the dataset")
plt.xlabel("Gender")
plt.ylabel("Count")
plt.show()
customer_gender_counts
# group the data by age group and gender, and count the number of customers in each group
grouped = (
df_norway.groupby(["age_group", "customer_gender"]).size().reset_index(name="count")
)
# pivot the data to create a matrix with age group as rows, gender as columns, and count as values
matrix = grouped.pivot(index="age_group", columns="customer_gender", values="count")
ax = matrix.plot(kind="bar", stacked=True, figsize=(10, 6))
ax.set_xlabel("Age Group")
ax.set_ylabel("Count")
ax.set_title("Customer Age Group and Gender")
plt.show()
# creating a bar chart to reprasent the participation of customers in different referral program
referral_data_counts = df_norway.referral_type.value_counts()
plt.bar(referral_data_counts.index, referral_data_counts.values)
plt.title("Perfomance of different referral program in Norway")
plt.xlabel("Referral Program")
plt.ylabel("Count")
plt.xticks(rotation=90)
plt.show()
referral_data_counts
# filter the dataframe to include only the desired transaction types
df_filtered = df_norway[df_norway["transaction_type"].isin(["INITIAL", "UPGRADE"])]
# group the data by transaction type and referral type, and count the number of customers in each group
grouped_type = (
df_filtered.groupby(["transaction_type", "referral_type"])
.size()
.reset_index(name="count")
)
# pivot the data to create a matrix with transaction type as rows, referral type as columns, and count as values
matrix_type = grouped_type.pivot(
index="transaction_type", columns="referral_type", values="count"
)
ax = matrix_type.plot(kind="bar", stacked=True, figsize=(10, 6))
ax.set_xlabel("Transaction Type")
ax.set_ylabel("Count")
ax.set_title("Transaction type and Referral Program")
plt.show()
# group the data by transaction type and referral type, and count the number of customers in each group
grouped_type = (
df_norway.groupby(["transaction_type", "subscription_type"])
.size()
.reset_index(name="count")
)
# pivot the data to create a matrix with transaction type as rows, referral type as columns, and count as values
matrix_type = grouped_type.pivot(
index="transaction_type", columns="subscription_type", values="count"
)
# create a stacked bar plot of the data
ax = matrix_type.plot(kind="bar", stacked=True, figsize=(10, 6))
ax.set_xlabel("Transaction Type")
ax.set_ylabel("Count")
ax.set_title("Transaction type and subscription_type")
plt.show()
# ### Finland
# Selectting a subset of data from the dataframe that corresponds to customers from Finland and includes specific columns
df_finland = df3.loc[
["Finland"],
[
"transaction_type",
"subscription_type",
"customer_gender",
"age_group",
"referral_type",
],
]
# Creating a bar plot for the "subscription_type" column
subscription_type_counts = df_finland.subscription_type.value_counts()
plt.bar(subscription_type_counts.index, subscription_type_counts.values)
plt.title("Preferable subscription type in Finland")
plt.xlabel("Subscription type")
plt.ylabel("Number of customers")
plt.show()
subscription_type_counts
# Creating a bar plot for the "customer_gender" column
customer_gender_counts = df_finland.customer_gender.value_counts()
plt.bar(customer_gender_counts.index, customer_gender_counts.values)
plt.title("Amount of each gender in the dataset")
plt.xlabel("Gender")
plt.ylabel("Count")
plt.show()
customer_gender_counts
# group the data by age group and gender, and count the number of customers in each group
grouped = (
df_finland.groupby(["age_group", "customer_gender"])
.size()
.reset_index(name="count")
)
# pivot the data to create a matrix with age group as rows, gender as columns, and count as values
matrix = grouped.pivot(index="age_group", columns="customer_gender", values="count")
# create a stacked bar plot of the data
ax = matrix.plot(kind="bar", stacked=True, figsize=(10, 6))
ax.set_xlabel("Age Group")
ax.set_ylabel("Count")
ax.set_title("Customer Age Group and Gender")
plt.show()
# creating a bar chart to reprasent the participation of customers in different referral program
referral_data_counts = df_finland.referral_type.value_counts()
plt.bar(referral_data_counts.index, referral_data_counts.values)
plt.title("Perfomance of different referral program in Finland")
plt.xlabel("Referral Program")
plt.ylabel("Count")
plt.xticks(rotation=90)
plt.show()
# filter the dataframe to include only the desired transaction types
df_filtered = df_finland[df_finland["transaction_type"].isin(["INITIAL", "UPGRADE"])]
# group the data by transaction type and referral type, and count the number of customers in each group
grouped_type = (
df_filtered.groupby(["transaction_type", "referral_type"])
.size()
.reset_index(name="count")
)
# pivot the data to create a matrix with transaction type as rows, referral type as columns, and count as values
matrix_type = grouped_type.pivot(
index="transaction_type", columns="referral_type", values="count"
)
ax = matrix_type.plot(kind="bar", stacked=True, figsize=(10, 6))
ax.set_xlabel("Transaction Type")
ax.set_ylabel("Count")
ax.set_title("Transaction type and Referral Program")
# show the plot
plt.show()
# group the data by transaction type and referral type, and count the number of customers in each group
grouped_type = (
df_finland.groupby(["transaction_type", "subscription_type"])
.size()
.reset_index(name="count")
)
# pivot the data to create a matrix with transaction type as rows, referral type as columns, and count as values
matrix_type = grouped_type.pivot(
index="transaction_type", columns="subscription_type", values="count"
)
# create a stacked bar plot of the data
ax = matrix_type.plot(kind="bar", stacked=True, figsize=(10, 6))
ax.set_xlabel("Transaction Type")
ax.set_ylabel("Count")
ax.set_title("Transaction type and subscription_type")
plt.show()
# Based on the analysis of the customer data across the four countries, it is evident that there is a clear preference for the basic subscription plan. This indicates that customers in all four countries are looking for a more cost-effective option that meets their basic needs. Additionally, the data also shows that the majority of customers in all four countries are females, which provides a clear target audience for the company's marketing efforts.
# However, it is also important to note that the gap between the number of female and male customers is not very high. This suggests that the company should not solely focus on female customers, but also consider targeting males to attract a more diverse customer base. By targeting both categories, the company can maximize its potential customer base and increase its overall revenue.
# ### what strategies can be implemented to enhance the company's performance?
# The performance of the company can be improved mainly by 3 methods
# Reducing the unnecessary operational cost
# Retaining current customers by improving churn rate/reducing churn
# Increasing the customer base
# ###### How to reduce unnecessary costs?
# By analyzing the data, we can identify the marketing channels that are generating the most value for your company. By reallocating company's budget to these high-performing channels, we can ensure that you are getting the best possible return on investment. Additionally, it is important to identify the channels that are not producing any value to your company and stop using them altogether. This way, you can eliminate unnecessary costs and focus your efforts on the channels that are most effective in driving business growth.
# ### Performance of different marketting channels over the 3 years
df_per = df1
# Select rows with INITIAL and UPGRADE values in 'Transaction Type'
df_per = df_per.loc[
["INITIAL", "UPGRADE"], ["transaction_date", "cust_id", "referral_type"]
]
# Group the selected data by year and referral type and count the number of customer IDs for each group
df_per = (
df_per.groupby([pd.Grouper(key="transaction_date", freq="Y"), "referral_type"])
.count()["cust_id"]
.unstack()
)
ax = df_per.plot(kind="line")
ax.legend(bbox_to_anchor=(1.1, 1.05))
plt.show()
# ### Finding the best referral program for particular age group
# filter the dataframe to include only the desired transaction types
df_filtered = df[df["transaction_type"].isin(["INITIAL", "UPGRADE"])]
# group the data by transaction type and referral type, and count the number of customers in each group
grouped_type = (
df_filtered.groupby(["age_group", "referral_type"]).size().reset_index(name="count")
)
# pivot the data to create a matrix with transaction type as rows, referral type as columns, and count as values
matrix_type = grouped_type.pivot(
index="age_group", columns="referral_type", values="count"
)
# create a stacked bar plot of the data
ax = matrix_type.plot(kind="bar", stacked=True, figsize=(10, 6))
ax.set_xlabel("Age group")
ax.set_ylabel("Count")
ax.set_title("Transaction type and age group")
ax.legend(bbox_to_anchor=(1.1, 1.05))
plt.show()
# ### Subscription cancellations over the last 3-year period
df5 = df1
df5_yearly = (
df5.loc[["CHURN"]].groupby([pd.Grouper(key="transaction_date", freq="Y")]).count()
)
# create the plot
df5_yearly.plot(
kind="line", y="subscription_type", figsize=(10, 6), marker="o", legend=False
)
# set the axis labels and title
plt.xlabel("Year")
plt.ylabel("Number of Cancelled Subscriptions")
plt.title("Cancelled Subscriptions by Year")
# show the plot
plt.show()
# ### Over the last 3 years, the number of cancellations by country
df5 = df5.loc[["CHURN"], ["customer_country", "transaction_date"]]
# group the data by country and year, and count the number of cancelled customers in each group
grouped_churn = (
df5.groupby(["customer_country", pd.Grouper(key="transaction_date", freq="Y")])
.size()
.reset_index(name="count")
)
# create a bar plot for country column, with year as hue
ax = sns.barplot(
x="customer_country", y="count", hue="transaction_date", data=grouped_churn
)
# set the axis labels and title
ax.set_ylabel("Number of Cancelled Customers")
ax.set_xlabel("Customer Country")
ax.set_title("Cancelled Customers by Country and Year")
ax.legend(bbox_to_anchor=(1.1, 1.05))
# show the plot
plt.show()
|
# # The Best Cities for a workation
# Looking for a change of scenery while you work? A "workation" might be just what you need! In this analysis, we have gathered data on various factors such as remote connection speed, co-working spaces, cost of living, and tourist attractions to identify the best cities for a workation. Whether you're looking for a cozy, productive, relaxed, or adventurous workation, we've got you covered. Let's dive into the data and find your perfect workation destination!
# # Import Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# # Import Data
df = pd.read_csv(
"/kaggle/input/the-best-cities-for-a-workation/best cities for a workation.csv"
)
df.head()
# # Data Screening
df.info()
df.columns
# Create Lists
cols = [
"Remote connection: Average WiFi speed (Mbps per second)",
"Co-working spaces: Number of co-working spaces",
"Caffeine: Average price of buying a coffee",
"Travel: Average price of taxi (per km)",
"After-work drinks: Average price for 2 beers in a bar",
"Accommodation: Average price of 1 bedroom apartment per month",
"Food: Average cost of a meal at a local, mid-level restaurant",
"Climate: Average number of sunshine hours",
"Tourist attractions: Number of ‘Things to do’ on Tripadvisor",
"Instagramability: Number of photos with #",
]
sns.pairplot(df, vars=cols, plot_kws={"alpha": 0.4})
# ## Scikit-learn: KMeans Clustering
from sklearn import preprocessing
from sklearn.cluster import KMeans
# ### Z-Score
scaler = preprocessing.StandardScaler()
z = scaler.fit_transform(df[cols])
z[:5].round(4)
df[cols].hist(layout=(1, len(cols)), figsize=(3 * len(cols), 3.5))
# Based on the histogram of the transformed data, it appears that the distribution is not normal. Therefore, it may be necessary to consider alternative methods for data transformation instead of using the Yeo-Johnson method. The Yeo-Johnson transformation assumes a normal distribution, so it may not be appropriate for non-normal data. It is important to choose a transformation method that is appropriate for the data and the analysis being conducted.
# ### yeo-johnson
pt = preprocessing.PowerTransformer(
method="yeo-johnson", standardize=True
) # support only positive value
mat = pt.fit_transform(df[cols])
mat[:5].round(4)
dfmat = pd.DataFrame(mat, columns=cols)
dfmat.head()
dfmat.hist(layout=(1, len(cols)), figsize=(3 * len(cols), 3.5))
# When we switched to using yeo-johnson, the histogram showed a normal distribution. This means it ok to use this dataset.
# # Scikit-learn: KMeans Clustering
X = pd.DataFrame(mat, columns=cols)
X.head()
# ## Optimal Number of Clusters
ssd = []
for k in range(2, 8):
model = KMeans(n_clusters=k)
model.fit(X)
ssd.append((k, model.inertia_))
ssd
dfssd = pd.DataFrame(ssd, columns=["k", "ssd"])
dfssd
dfssd["pct_chg"] = dfssd["ssd"].pct_change() * 100
dfssd
plt.plot(dfssd["k"], dfssd["ssd"], linestyle="--", marker="o")
for index, row in dfssd.iterrows():
plt.text(row["k"] + 0.02, row["ssd"] + 0.02, f'{row["pct_chg"]:.2f}', fontsize=10)
kmodel = KMeans(n_clusters=4)
kmodel
# As we can see from the plot, there is an elbow point at around 4 clusters, where the percentage change in variance explained starts to level off. Therefore, the optimal number of clusters for this dataset would be 4.
# ## fit the model
kmodel.fit(X)
kmodel.labels_
# ## Sense Making About Each Cluster
df["cluster"] = kmodel.labels_
df.head()
df.groupby("cluster").describe().T
sns.countplot(x="cluster", data=df)
fig, ax = plt.subplots(nrows=5, ncols=2, figsize=(20, 9))
ax = ax.ravel()
for i, col in enumerate(cols):
sns.violinplot(x="cluster", y=col, data=df, ax=ax[i])
dx = X
dx["cluster"] = kmodel.labels_
dx.head()
df.groupby("cluster").head(3).sort_values("cluster")
sns.heatmap(
dx.groupby("cluster").median(),
cmap="Blues",
linewidths=1,
square=True,
annot=True,
fmt=".2f",
annot_kws={"fontsize": 6},
)
sns.pairplot(
df,
vars=[
"Remote connection: Average WiFi speed (Mbps per second)",
"Co-working spaces: Number of co-working spaces",
"Caffeine: Average price of buying a coffee",
"Travel: Average price of taxi (per km)",
"After-work drinks: Average price for 2 beers in a bar",
"Accommodation: Average price of 1 bedroom apartment per month",
"Food: Average cost of a meal at a local, mid-level restaurant",
"Climate: Average number of sunshine hours",
"Tourist attractions: Number of ‘Things to do’ on Tripadvisor",
"Instagramability: Number of photos with #",
],
hue="cluster",
)
plt.show()
# # Summary
# Based on the cluster analysis, I will categorize the data into four distinct clusters, each representing a dominant type of workation from heatmap plot.
# #### Cluster 0: The Productive Workation
# This cluster excels in the number of co-working spaces, tourist attractions, and Instagramability, making it an ideal choice for individuals seeking a balance between work and leisure. Furthermore, cities in this cluster have high average cost for after-work drinks, accommodation, and food, implying that these cities have a high cost of living, making them suitable for those who have the financial means to support their workation lifestyle.
# #### Cluster 1: The Serene Workation
# An ideal choice for individuals seeking a tranquil workation experience. The cluster shows low scores across various factors, including remote connectivity and availability of co-working spaces, suggesting that the location may be suitable for individuals who do not require high internet speeds or prefer to work in their accommodation. Furthermore, the limited number of tourist attractions and entertainment options in this cluster may indicate a quieter and more relaxed environment, making it perfect for those seeking a peaceful workation. The sunny climate in this cluster can also be a desirable feature for individuals looking to escape colder weather.
# #### Cluster 2: The Relaxing Workation
# This cluster has lower cost of living but has a lot of co-working spaces. This cluster would be ideal for someone who desires a comfortable working environment and ample opportunities for leisure and entertainment. However, it's worth noting that this cluster also has low average number of sunshine hours and tourist attractions. These cities may not be the best choice for those who want to travel and work in a city with sunny and warm weather, or for those who want to travelling a variety of tourist attractions during their free time.
# #### Cluster 3: The Vibrant Workation
# It is characterized by high costs of living, shorter daylight hours, and fewer tourist attractions. The cities in this cluster offer some co-working spaces, but are more suitable for individuals looking for a lively and bustling atmosphere with plenty of options for food, drinks, and accommodation. This workation type is perfect for those who enjoy the energy of a vibrant city and want to balance work with exploring and experiencing the local culture.
pd.options.display.max_rows = None
df[["City", "Country", "cluster"]].sort_values("cluster")
|
import numpy as np
import pandas as pd
import sklearn as sk
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from pickle import dump, load
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
accuracy_score,
f1_score,
precision_score,
recall_score,
classification_report,
confusion_matrix,
)
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.svm import SVC
import pickle
# ## EDA & Pre-processing
data = pd.read_csv(
"/kaggle/input/lead-scoring-model-0417/Data_Science_Internship.csv"
).drop(columns="Unnamed: 0")
print(f"Data Shape: {data.shape}")
data.head()
data.describe(
include="all",
).T
# Droping the rows other than "WON", "LOST"
df_mod = data.copy()
df_mod.drop(
df_mod[(df_mod["status"] != "WON") & (df_mod["status"] != "LOST")].index,
inplace=True,
)
df_mod["status"] = df_mod["status"].map({"WON": 1, "LOST": 0})
df_mod.replace(
"9b2d5b4678781e53038e91ea5324530a03f27dc1d0e5f6c9bc9d493a23be9de0",
np.NAN,
inplace=True,
)
df_mod.head(5)
# Checking for duplicate entries
dups = df_mod[df_mod.duplicated()]
print(f"Duplicate Entries: {len(dups)}")
df_mod = df_mod.drop_duplicates(keep="first")
# ### Data cleaning
# Checking for missing values
print("Columns : % of missing vals")
print(np.round(df_mod.isna().sum() / len(df_mod.index) * 100, 2))
print("Rows : % of missing vals")
print(np.round(df_mod.isnull().sum(axis=1) / len(df_mod.columns) * 100))
print(
f""""
Max missing features across rows: {max(np.round(df_mod.isnull().sum(axis=1)/len(df_mod.columns)*100))}\n
Average missing features across rows: {np.round(np.mean(df_mod.isnull().sum(axis=1)/len(df_mod.columns)*100))}"""
)
# Drop the columns with % of missing vals >30%
dct_c = np.round(df_mod.isna().sum() / len(df_mod.index) * 100, 2).to_dict()
for key in dct_c.keys():
if dct_c[key] > 30:
print("Column dropeed-", key)
df_mod.drop(key, axis=1)
# Drop the rows with % of missing vals >=60%
thresh = 60.0
min_ = int(((100 - thresh) / 100) * df_mod.shape[1] + 1)
df_mod = df_mod.dropna(axis=0, thresh=min_)
print(df_mod.shape)
df_mod.head(5)
# Here we're not modifying unique categories in the feature columns
# Instead we'll develop several models to evaluate the performance even in minority category class
columns = [
"Agent_id",
"lost_reason",
"budget",
"lease",
"movein",
"source",
"source_city",
"source_country",
"utm_source",
"utm_medium",
"des_city",
"des_country",
"room_type",
"lead_id",
]
feat_vec = []
for col in columns:
encoder = LabelEncoder()
vals = df_mod[col].to_numpy()
encoder.fit(vals)
categorical_values = encoder.transform(vals)
feat_vec.append(categorical_values)
# save the encoder to reuse in future
dump(encoder, open(f"/kaggle/working/{col}.pkl", "wb"))
feat_vec = np.array(feat_vec)
# Checking number of features and number of categories in each feature
for i in range(len(feat_vec)):
print(i, ":", len(np.unique(feat_vec[i, ...])))
# ## Model 1: Random Forest Classifier
# The RandomForestClassifier an ensemble machine learning model for handling class imabalance. In addition, class_weight='balanced' to handle the imbalanced data by assigning higher weights to minority class samples during training.
X = feat_vec.T
print(f"Input Feature Dimension: {X.shape[0]} X {X.shape[1]}")
# Select categorical features
categorical_features = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13]
# Create one-hot encoder
enc = OneHotEncoder()
# Fit encoder on categorical data
enc.fit(X)
# Transform categorical data
X_encoded = enc.transform(X).toarray()
y = df_mod["status"].to_numpy()
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X_encoded, y, test_size=0.3, random_state=42
)
print(f"X train dimension: {X_train.shape[0]} X {X_train.shape[1]}")
print(f"X train dimension: {X_test.shape[0]} X {X_test.shape[1]}")
# Create and train Random Forest classifier
clf = RandomForestClassifier(n_estimators=100, random_state=42, class_weight="balanced")
clf.fit(X_train, y_train)
# Predict on test set
y_pred = clf.predict(X_test)
# Calculate evaluation metrics
accuracy = accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
report = classification_report(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
clf_performance = {
"accuracy": accuracy,
"F1_score": f1,
"Precision": precision,
"Recall": recall,
}
# Print evaluation metrics
print("Accuracy:", accuracy)
print("F1-score:", f1)
print("Precision:", precision)
print("Recall:", recall)
print("Classification Report: \n", report)
sns.heatmap(cm, annot=True, fmt="d", cmap="Reds")
# Add labels and title
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
plt.title("Confusion Matrix")
# Show plot
plt.show()
# Saving RF Model
filename = "/kaggle/working/random_forest.pickle"
# save model
pickle.dump(clf, open(filename, "wb"))
# ## Model 2: PCA+SVM
# Here we've used Principal Component Analysis to reduce the dimensionality of the input data. In combination with SVM (Support vector machine) classifier, separates two classes in a way that maximizes the margin between the hyperplane and the nearest data points of each class.
#
# Create Increamental PCA object with 50 components for solving exceeding memory issues
n_batches = 100
n_components = 50
# pca = PCA(n_components=50, copy=False)
ipca = IncrementalPCA(
copy=False, n_components=n_components, batch_size=(X_train.shape[0] // n_batches)
)
# Fit the PCA object to the training data and transform both training and testing data
X_train_pca = ipca.fit_transform(X_train)
X_test_pca = ipca.transform(X_test)
print(f"Transformed X train dimension: {X_train_pca.shape[0]} X {X_train_pca.shape[1]}")
print(f"Transformed X train dimension: {X_test_pca.shape[0]} X {X_test_pca.shape[1]}")
# Create SVM object with radial basis function kernel and class_weight='balanced' to handle imbalanced data
svm = SVC(kernel="rbf", class_weight="balanced")
# Fit the SVM object to the training data
svm.fit(X_train_pca, y_train)
# Predict the labels of the testing data
y_pred = svm.predict(X_test_pca)
# Calculate evaluation metrics
accuracy = accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
report = classification_report(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
svm_performance = {
"accuracy": accuracy,
"F1_score": f1,
"Precision": precision,
"Recall": recall,
}
# Print evaluation metrics
print("Accuracy:", accuracy)
print("F1-score:", f1)
print("Precision:", precision)
print("Recall:", recall)
print("Classification Report: \n", report)
sns.heatmap(cm, annot=True, fmt="d", cmap="Blues")
# Add labels and title
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
plt.title("Confusion Matrix")
# Show plot
plt.show()
# Saving SVM Model
filename = "/kaggle/working/pcaSVM.pickle"
# save model
pickle.dump(svm, open(filename, "wb"))
# ### Comparing performance of two classifiers
df1 = pd.DataFrame.from_dict(clf_performance, orient="index", columns=["RF Classifier"])
df2 = pd.DataFrame.from_dict(
svm_performance, orient="index", columns=["SVM Classifier"]
)
metrics = list(clf_performance.keys())
values1 = [clf_performance[m] for m in metrics]
values2 = [svm_performance[m] for m in metrics]
df = pd.DataFrame(
{
"metrics": metrics + metrics,
"values": values1 + values2,
"classifier": ["Classifier 1"] * len(metrics) + ["Classifier 2"] * len(metrics),
}
)
# Create a bar plot of the performance of both classifiers
sns.set_style("whitegrid")
sns.barplot(x="metrics", y="values", hue="classifier", data=df)
plt.xlabel("Evaluation Metrics")
plt.ylabel("Performance")
plt.title("Performance Comparison of Two Classifiers")
plt.show()
|
# Python Booleans - Mantıksal Operatörler
# Mantıksal operatörler iki değerden oluşur. True - False True: doğru False: Yanlış
# Boolean Values
# Programlamada genellikle bir ifadenin Doğru mu yoksa Yanlış mı olduğunu bilmek gerekir.
# Python'da herhangi bir ifadeyi değerlendirebilir ve True veya False olmak üzere iki yanıttan biri alınabilir.
# İki değeri karşılaştırdığınızda, ifade değerlendirilir ve Python, Boole yanıtını döndürür:
print(7 > 1)
print(8 == 3)
print(15 < 3)
# if ifadesinde bir koşul çalıştırıldığında, Python True veya False değerini döndürür:
#
x = 420
y = 56
if y > x:
print("y daha büyük x")
else:
print("y daha büyük değildir x")
# Değerleri ve Değişkenleri Değerlendirme
# bool() işlevi, herhangi bir değeri değerlendirmeye ve karşılığında True veya False vermeye izin verir.
print(bool("kırmızı"))
print(bool(25))
a = "mavi"
b = 34
print(bool(a))
print(bool(b))
# Çoğu Değer Doğrudur
# Bir tür içeriğe sahipse, hemen hemen her değer True olarak değerlendirilir.
# Boş diziler dışında tüm diziler True'dur.
# 0 dışında herhangi bir sayı True'dur.
# Boş olanlar dışında tüm liste, demet, küme ve sözlük True'dur.
#
bool("asdf")
bool(9876)
bool(["eşek", "köpek", "balık"])
# Bazı Değerler Yanlış
# (), [], {}, "", 0 sayısı ve Yok değeri gibi boş değerler dışında False olarak değerlendirilen çok fazla değer yoktur.False değeri False olarak değerlendirilir.
#
bool(False)
bool(None)
bool(0)
bool("")
bool(())
bool([])
bool({})
# Fonksiyonlar bir Boole Döndürebilir
# Bir Boole Değeri döndüren fonksiyonlar oluşturulabilir.
def myFunction():
return True
print(myFunction())
# Bir işlevin Boole yanıtına göre kod çalıştırılabilir.
def myFunction():
return True
if myFunction():
print("doğru")
else:
print("yanlış")
# Python ayrıca, bir nesnenin belirli bir veri türünde olup olmadığını belirlemek için kullanılabilen isinstance() fonksiyonu gibi bir boolean değeri döndüren birçok yerleşik işleve sahiptir:
#
a = 250
print(isinstance(a, int))
a = "en güzel mevsim yazdır"
print(isinstance(a, str))
print(29 > 9)
print(33 == 5)
print(21 < 3)
print(bool("asdfg"))
print(bool(3))
# Python Operatörleri
# Operatörler, değişkenler ve değerler üzerinde işlem yapmak için kullanılır.
print(90 + 34)
# Python, operatörleri aşağıdaki gruplara ayırır:
# Arithmetic operators Assignment operators Comparison operators Logical operators Identity operators Membership operators Bitwise operators
# Python Arithmetic Operators
# Aritmetik opetaörler, yaygın matematiksel işlemleri gerçekleştirmek için sayısal değerlerle birlikte kullanılır
# Name Example Try it
# + Addition x + y
# - Subtraction x - y
# * Multiplication x * y
# / Division x / y
# % Modulus x % y
# ** Exponentiation x ** y
# // Floor division x // y
a = 11
b = 5
print(a + b)
m = 6
n = 2
print(m - n)
e = 6
f = 5
print(e * f)
x = 80
y = 8
print(x / y)
x = 10
y = 2
print(x % y)
x = 3
y = 6
print(x**y)
x = 18
y = 4
print(x // y)
# Python Atama Operatörleri
# Atama işleçleri, değişkenlere değer atamak için kullanılır
s = 10
s
a = 8
a += 2
print(a)
x = 7
x -= 2
print(x)
y = 9
y /= 5
print(y)
x = 9
x %= 4
print(x)
r = 15
r //= 7
print(r)
x = 66
x **= 8
print(x)
# Python Karşılaştırma Operatörleri
# Karşılaştırma işleçleri iki değeri karşılaştırmak için kullanılır
x = 22
y = 3
print(x == y)
p = 6
r = 2
print(p != r)
a = 11
b = 3
print(a > b)
x = 6
y = 1
print(x < y)
x = 4
y = 2
print(x >= y)
a = 4
b = 3
print(a <= b)
# Python Mantıksal Operatörler
# Mantıksal işleçler, koşullu ifadeleri birleştirmek için kullanılır: "and, or, not"
a = 3
print(a > 3 and a < 10)
x = 2
print(x > 5 or x < 4)
a = 6
print(not (a > 4 and a < 10))
# Python Kimlik Operatörleri
# Kimlik fonksiyonları, nesneleri eşit olup olmadıklarını değil, aslında aynı nesne olup olmadıklarını ve aynı bellek konumuna sahip olup olmadıklarını karşılaştırmak için kullanılır
a = ["kahve", "çay"]
b = ["kahve", "çay"]
c = a
print(a is c)
print(a is b)
print(a == b)
x = ["kivi", "çilek"]
y = ["kivi", "çilek"]
z = x
print(x is not z)
print(x is not y)
print(x != y)
|
# ## Keşifçi Veri Analizi | Becerileri Pekiştirme
# 
# **Kullanacağımız veri seti, her biri bir tür iris bitkisi olan 50 örnekten 3 sınıf içerir.**
# Aşağıda ihtiyacımız doğrultusunda kullanacağımız kütüphaneleri yükleyelim.
import numpy as np
import seaborn as sns
import pandas as pd
# numpy kütüphanesi, bilimsel hesaplama ve veri analizi için kullanılan bir Python kütüphanesidir.
# seaborn kütüphanesi, veri görselleştirme ve keşif amacıyla kullanılan bir Python kütüphanesidir.
# pandas kütüphanesi, veri analizi ve manipülasyonu için kullanılan bir Python kütüphanesidir.
# Veri çerçevemizi bulunduğumuz dizinden yükleyelim ve bir veri çerçevesi haline getirerek df değişkenine atayalım. (pd.read_csv(...csv))
df = pd.read_csv("/kaggle/input/iris-flower-dataset/IRIS.csv")
# Dosya yolunu read_csv() fonksiyonu içine yazıyoruz.
# Veri çerçevesinin ilk 5 gözlemini görüntüleyelim.
df.head()
# head() fonksiyonu, bir veri çerçevesinin veya bir Seri nesnesinin ilk birkaç satırını (varsayılan olarak 5 satır) görüntülemek için kullanılır.
# Bu fonksiyon, bir veri kümesini hızlıca gözden geçirmek ve verilerin yapısını anlamak için kullanışlı bir araçtır.
# Veri çerçevesinin kaç öznitelik ve kaç gözlemden oluştuğunu görüntüleyelim.
df.shape
# Veri çerçevesinin boyutunu (yani, kaç satır ve sütun olduğunu) anlamak için kullanılır.
# Veri çerçevesindeki değişkenlerin hangi tipte olduğunu ve bellek kullanımını görüntüleyelim.
df.info()
# info() fonksiyonu bir pandas veri çerçevesinin veya bir pandas serisinin hakkında bilgi sağlar.
# Bu fonksiyon, veri çerçevesindeki her sütunun ismini, her sütunda bulunan toplam veri sayısını,
# veri tiplerini ve her sütunda eksik veri olup olmadığını verir.
# Veri çerçevesindeki sayısal değişkenler için temel istatistik değerlerini görüntüleyelim.
# Standart sapma ve ortalama değerlerden çıkarımda bulunarak hangi değişkenlerin ne kadar varyansa sahip olduğu hakkında fikir yürütelim.
df.describe().T
# describe() fonksiyonu, bir pandas veri çerçevesinin veya bir pandas serisinin istatistiksel özetini sağlar.
# Bu fonksiyon, veri kümesindeki her sütunun sayısal özelliklerini, yani ortalama, standart sapma, minimum, maksimum, çeyreklikler, gibi istatistiksel bilgileri döndürür.
# **Varyans, standart sapmanın karesine eşittir buna göre en büyük varyans petal_length'e, en küçük varyans ise sepal_width'e aittir.**
# Veri çerçevesinde hangi öznitelikte kaç adet eksik değer olduğunu gözlemleyelim.
df.isnull().sum()
# df.isnull().sum() ifadesi, bir pandas veri çerçevesindeki her sütunda kaç tane eksik (NaN) değer olduğunu sayar.
# df.isna().sum() kod bloğu da kullanılabilir.
# Sayısal değişkenler arasında korelasyon olup olmadığını göstermek için korelasyon matrisi çizdirelim. Korelasyon katsayıları hakkında fikir yürütelim.
# En güçlü pozitif ilişki hangi iki değişken arasındadır?
df.corr()
# df.corr() ifadesi, bir pandas veri çerçevesinin sütunları arasındaki korelasyon matrisini hesaplar.
# Korelasyon matrisi, iki değişken arasındaki doğrusal ilişkinin şiddetini ve yönünü ölçer.
# Bu matris, her sütunun diğer sütunlarla olan ilişkisini gösterir ve her bir eleman, iki sütun arasındaki korelasyon katsayısını ifade eder.
# 1'e yakın bir korelasyon katsayısı, güçlü bir pozitif ilişkiyi gösterir.
# -1'e yakın bir korelasyon katsayısı, güçlü bir negatif ilişkiyi gösterir.
# 0'a yakın bir korelasyon katsayısı, iki değişken arasında bir ilişki olmadığını veya çok zayıf bir ilişki olduğunu gösterir.
# **Pozitif korelasyon, iki değişken arasında doğrusal bir ilişkinin olduğunu ve bir değişkenin artışının diğer değişkenin artışı ile ilişkili olduğunu gösterir. Negatif korelasyon ise iki değişken arasında ters orantılı bir ilişki olduğunu ve bir değişkenin artışının diğer değişkenin azalışı ile ilişkili olduğunu gösterir.**
# **Tablodaki verilere göre 1'e en yakın ilişki yani en güçlü pozitif ilişki "petal_length" ile "petal_width" arasındadır.**
# Korelasyon katsayılarını daha iyi okuyabilmek için ısı haritası çizdirelim.
corr = df.corr()
sns.heatmap(
corr, annot=True, xticklabels=corr.columns.values, yticklabels=corr.columns.values
)
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz değerlerini görüntüleyelim.
df["species"].unique()
# unique() fonksiyonu, bir veri setindeki benzersiz (tekrar etmeyen) değerleri bulmak için kullanılır.
# Bu fonksiyon, bir pandas Serisi veya NumPy dizisi üzerinde çağrılabilir ve benzersiz değerlerin bir listesini döndürür.
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz kaç adet değer içerdiğini görüntüleyelim.
df["species"].nunique()
# nunique() fonksiyonu, bir pandas DataFrame veya Serisi üzerindeki benzersiz (tekrar etmeyen) değerlerin sayısını hesaplamak için kullanılır.
# Bu fonksiyon, bir pandas DataFrame veya Serisi üzerinde çağrılabilir ve benzersiz değerlerin sayısını (tekrar etmeyen değerlerin toplam sayısı) döndürür.
# len(df["species"].unique()) alternatif kod bloğu olabilir.
# Veri çerçevesindeki sepal.width ve sepal.length değişkenlerinin sürekli olduğunu görüyoruz. Bu iki sürekli veriyi görselleştirmek için önce scatterplot kullanalım.
sns.scatterplot(x="sepal_length", y="sepal_width", data=df, color="green")
# scatterplot() iki değişken arasındaki ilişkiyi görselleştirmek için kullanılan bir grafik türüdür.
# Aynı iki veriyi daha farklı bir açıdan frekanslarıyla incelemek için jointplot kullanarak görselleştirelim.
sns.jointplot(x="sepal_length", y="sepal_width", data=df, color="green")
# jointplot() fonksiyonu, iki değişken arasındaki ilişkiyi görselleştirmek için kullanılan bir seaborn kütüphanesi fonksiyonudur.
# Aynı iki veriyi scatterplot ile tekrardan görselleştirelim fakat bu sefer "variety" parametresi ile hedef değişkenine göre kırdıralım.
# 3 farklı renk arasında sepal değişkenleriyle bir kümeleme yapılabilir mi? Ne kadar ayırt edilebilir bunun üzerine düşünelim.
sns.scatterplot(x="sepal_length", y="sepal_width", hue="species", data=df)
# **Bu şekilde çizilen grafikte, "setosa" çeşidi genellikle diğer çeşitlerden daha ayırt edilebilirken, "versicolor" ve "virginica" çeşitleri arasındaki fark daha az belirgindir. Bu nedenle birbirine yakın veriler olduğundan kümeleme işlemi çok uygun olmaz.**
# value_counts() fonksiyonu ile veri çerçevemizin ne kadar dengeli dağıldığını sorgulayalım.
df.value_counts()
# value_counts() fonksiyonu, pandas kütüphanesinde bir pandas Serisi'nin tekil (unique) değerlerini ve her bir değerin kaç kez tekrarlandığını saymak için kullanılır.
# **Genel olarak, her sınıf kabaca aynı sayıda gözleme sahipse, bir veri kümesinin dengeli olduğu kabul edilir. Verilen tabloda dengeli dağılıma sahip diyebiliriz.**
# Keman grafiği çizdirerek sepal.width değişkeninin dağılımını inceleyin.
# Söz konusu dağılım bizim için ne ifade ediyor, normal bir dağılım olduğunu söyleyebilir miyiz?
sns.violinplot(y="sepal_width", data=df, color="green")
# violinplot() fonksiyonu, seaborn kütüphanesi ile çizdirilen bir grafik türüdür ve bir veri setinin dağılımını gösterir.
# Eğrinin maksimum noktası aritmetik ortalamadır ve eğri aritmetik ortalamaya göre simetriktir.
# **Grafiği incelediğimizde, sepal.width değişkeninin normal bir dağılıma sahip olduğu söylenebilir.Normal dağılıma sahip veri setleri, bir "çan şekli" oluşturacak şekilde simetrik olarak dağılırlar. Görüldüğü gibi bu dosyadaki veriler çan şeklinde simetrik olarak dağılmışlardır.**
# Daha iyi anlayabilmek için sepal.width üzerine bir distplot çizdirelim.
sns.distplot(df["sepal_width"], bins=13, color="green")
# Seaborn kütüphanesindeki distplot() fonksiyonu, bir veri setinin dağılımını görselleştirmek için kullanılır. Bu fonksiyon, bir histogram ile birlikte yoğunluk grafiğini (kernel density plot) gösterir.
# Ayrıca fonksiyon veri setindeki aykırı değerleri ve verilerin simetrik olup olmadığını da görselleştirerek analiz etmenizi sağlar.
# Üç çiçek türü için üç farklı keman grafiğini sepal.length değişkeninin dağılımı üzerine tek bir satır ile görselleştirelim.
sns.violinplot(y="sepal_length", x="species", data=df)
# Hangi çiçek türünden kaçar adet gözlem barındırıyor veri çerçevemiz?
# 50 x 3 olduğunu ve dengeli olduğunu value_counts ile zaten görmüştük, ancak bunu görsel olarak ifade etmek için sns.countplot() fonksiyonuna variety parametresini vereilm.
sns.countplot(x="species", data=df)
# Seaborn kütüphanesindeki countplot() fonksiyonu, kategorik verilerin sıklığını görselleştirmek için kullanılır.
# Bu fonksiyon, her bir kategori için kaç tane veri noktası olduğunu sayar ve bu sayıları bir çubuk grafiği şeklinde gösterir.
# sepal.length ve sepal.width değişkenlerini sns.jointplot ile görselleştirelim, dağılımı ve dağılımın frekansı yüksek olduğu bölgelerini inceleyelim.
sns.jointplot(x="sepal_length", y="sepal_width", data=df, color="green")
# Seaborn kütüphanesindeki jointplot() fonksiyonu, iki farklı sayısal değişken arasındaki ilişkiyi görselleştirmek için kullanılır. Bu fonksiyon, iki değişkenin dağılımını gösteren histogramlarla birlikte, iki değişken arasındaki ilişkiyi gösteren bir scatter plot veya bir hex plot şeklinde birleştirir.
# Mod bir veride en çok tekrar eden değerdir. Tekrar eden sayısı da frekansı verir.
# **Tablodaki verilere göre dağılımın yoğun olduğu yerlerde frekansda yüksektir.**
# Bir önceki hücrede yapmış olduğumuz görselleştirmeye kind = "kde" parametresini ekleyelim. Böylelikle dağılımın noktalı gösterimden çıkıp yoğunluk odaklı bir görselleştirmeye dönüştüğünü görmüş olacağız.
sns.jointplot(x="sepal_length", y="sepal_width", data=df, kind="kde", color="green")
# kind="kde" parametresi, Seaborn kütüphanesindeki jointplot() fonksiyonunda kullanılan bir parametredir.
# Bu parametre, iki değişken arasındaki ilişkiyi göstermek için kullanılan grafik türünü belirler ve kernel yoğunluk tahminini (KDE) kullanarak bir yoğunluk çizgisi çizer.
# scatterplot ile petal.length ve petal.width değişkenlerinin dağılımlarını çizdirelim.
sns.scatterplot(x="petal_length", y="petal_width", data=df, color="green")
# Aynı görselleştirmeye hue = "variety" parametresini ekleyerek 3. bir boyut verelim.
sns.scatterplot(x="petal_length", y="petal_width", hue="species", data=df)
# sns.lmplot() görselleştirmesini petal.length ve petal.width değişkenleriyle implemente edelim. Petal length ile petal width arasında ne tür bir ilişki var ve bu ilişki güçlü müdür? sorusunu yanıtlayalım.
sns.lmplot(x="petal_length", y="petal_width", data=df)
# lmplot() fonksiyonu, seaborn kütüphanesinin bir parçasıdır ve veri setindeki iki değişken arasındaki ilişkiyi görselleştirmek için kullanılır.
# Fonksiyon, veri noktalarını bir scatter plot olarak çizer ve ayrıca bu noktalar arasındaki doğrusal regresyon çizgisini hesaplar ve görselleştirir.
# **Bu görselleştirme, petal length ile petal width arasında pozitif ve güçlü bir ilişki olduğunu göstermektedir. Yani, bir çiçeğin petal length'i arttıkça petal width'i de artmaktadır. Bu ilişki, genellikle Pearson korelasyon katsayısı ile ölçülen korelasyonun yüksek olduğu bir ilişkidir.**
# Bu sorunun yanıtını pekiştirmek için iki değişken arasında korelasyon katsayısını yazdıralım.
df.corr()["petal_length"]["petal_width"]
# Petal Length ile Sepal Length değerlerini toplayarak yeni bir total length özniteliği oluşturalım.
df["total_length"] = df["petal_length"] + df["sepal_length"]
# total.length'in ortalama değerini yazdıralım.
df["total_length"].mean()
# mean() bir sayı dizisindeki sayıların aritmetik ortalamasını hesaplar.
# Aritmetik ortalama, bir sayı dizisindeki tüm sayıların toplamının sayı dizisinin uzunluğuna bölünmesiyle elde edilir.
# total.length'in standart sapma değerini yazdıralım.
df["total_length"].std()
# std() (standart sapma), bir sayı dizisinin yayılımını ölçmek için kullanılan bir istatistiksel fonksiyondur.
# Standart sapma, bir sayı dizisindeki değerlerin ortalama etrafındaki dağılımının ölçüsüdür.
# sepal.length'in maksimum değerini yazdıralım.
df["sepal_length"].max()
# max() bir veri kümesindeki en büyük değeri döndüren bir fonksiyondur.
# sepal.length'i 5.5'den büyük ve türü setosa olan gözlemleri yazdıralım.
df[(df["species"] == "Iris-setosa") & (df["sepal_length"] > 5.5)]
# petal.length'i 5'den küçük ve türü virginica olan gözlemlerin sadece sepal.length ve sepal.width değişkenlerini ve değerlerini yazdıralım.
df[(df["petal_length"] < 5) & (df["species"] == "Iris-virginica")].filter(
["sepal_length", "sepal_width"]
)
# Pandas kütüphanesi içinde yer alan filter() fonksiyonu, belirtilen koşullara göre veri çerçevesinin sütunlarını filtrelemeye yarayan bir fonksiyondur.
# Özellikle büyük veri setlerinde sadece belirli sütunlarla çalışmak istendiğinde veya bazı sütunlar gereksiz olduğunda kullanışlıdır.
# Hedef değişkenimiz variety'e göre bir gruplama işlemi yapalım değişken değerlerimizin ortalamasını görüntüleyelim.
df.groupby("species").mean()
# Pandas kütüphanesi içinde yer alan groupby() fonksiyonu, belirli bir sütuna veya sütunlara göre verileri gruplamak ve bu gruplar üzerinde işlemler yapmak için kullanılır.
# Hedef değişkenimiz variety'e göre gruplama işlemi yaparak sadece petal.length değişkenimizin standart sapma değerlerini yazdıralım.
df.groupby(["species"]).describe()["petal_length"]
# Transpoz yapılarakta --> "df.groupby(["species"]).describe()["petal_length"].T "görüntülenebilir.
|
import cv2
import shutil
import os
import time
import argparse
import glob
import unicodedata
import os
import subprocess
import pandas as pd
# def is_english_only(string):
# for s in string:
# cat = unicodedata.category(s)
# if not cat in ['Ll', 'Lu', 'Nd', 'Po', 'Pd', 'Zs']:
# return False
# return True
# df = pd.read_parquet('/kaggle/input/diffusiondb-metadata/metadata-large.parquet')
# print(df.shape)
# df['prompt'] = df['prompt'].str.strip()
# df.drop_duplicates(subset='prompt', inplace=True)
# print(df.shape)
# # df = df[(df['width'] == 512) & (df['height'] == 512)]
# # print(df.shape)
# df['prompt'] = df['prompt'].str.strip()
# print(df.shape)
# df = df[df['prompt'].map(lambda x: len(x.split())) >= 5]
# print(df.shape)
# df = df[~df['prompt'].str.contains('^(?:\s*|NULL|null|NaN)$', na=True)]
# print(df.shape)
# df = df[df['prompt'].apply(is_english_only)]
# print(df.shape)
# df['head'] = df['prompt'].str[:15]
# df['tail'] = df['prompt'].str[-15:]
# print(df.shape)
# df.drop_duplicates(subset='head', inplace=True)
# print(df.shape)
# df.drop_duplicates(subset='prompt', inplace=True)
# print(df.shape)
# df.reset_index(drop=True, inplace=True)
# print(df.shape)
# new_df = pd.DataFrame(columns=df.columns)
# all_img_num = 0
# for idx in range(1, 401):
# print(idx,'.....................................................')
# subprocess.check_output("wget -q https://huggingface.co/datasets/poloclub/diffusiondb/resolve/main/diffusiondb-large-part-1/part-{0}.zip -O part-{0}.zip > /dev/null".format(str(idx).zfill(6)), shell=True)
# subprocess.check_output('unzip part-{0}.zip -d /kaggle/working/{0}/ > /dev/null'.format(str(idx).zfill(6)), shell=True)
# subprocess.check_output('rm part-{0}.zip'.format(str(idx).zfill(6)), shell=True)
# for path in glob.glob('./{0}/*.webp'.format(str(idx).zfill(6))):
# file_name = path.split('/')[-1]
# if file_name in df['image_name'].values:
# # 从原始DataFrame中提取与文件名匹配的行
# matching_row = df[df['image_name'] == file_name]
# matching_row['image_name'] = path.split('/')[-2]+'/'+file_name
# # 将提取的行添加到新的DataFrame中
# new_df = new_df.append(matching_row, ignore_index=True)
# else:
# os.unlink(path)
# all_img_num += len(glob.glob('/kaggle/working/{0}/*.webp'.format(str(idx).zfill(6))))
# print(all_img_num)
# subprocess.check_output('zip -r {0}.zip {0} > /dev/null'.format(str(idx).zfill(6)), shell=True)
# subprocess.check_output('rm -rf {0}'.format(str(idx).zfill(6)), shell=True)
# new_df = new_df[['image_name', 'prompt']]
# new_df
# new_df.to_csv('large_data_52W.csv')
|
# Project 3 - Isaak Cesar Bocanegra-Estrada
import numpy as np # linear algebra
import matplotlib.pyplot as plt # plotting
import seaborn as sns
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
dataset = pd.read_csv("/kaggle/input/dataset-kidney-stone/dataset-kidney-stone.csv")
# Identify the numerical features
num_features = dataset.select_dtypes(include=[np.number]).columns.tolist()
# Loop through each numerical feature and create a distribution plot
for feature in num_features:
sns.displot(dataset[feature], kde=False)
# Add vertical lines for mean and median
mean = np.mean(dataset[feature])
median = np.median(dataset[feature])
plt.axvline(mean, color="r", linestyle="dashed", linewidth=2)
plt.axvline(median, color="g", linestyle="dashed", linewidth=2)
# Set the plot labels
plt.xlabel(feature)
plt.ylabel("Frequency")
plt.title("Distribution of " + feature)
if "Unnamed" in feature:
dataset = dataset.drop(feature, axis=1)
# Display the plot
plt.show()
from scipy import stats
# Identify the numerical features
num_features = dataset.select_dtypes(include=[np.number]).columns.tolist()
# Loop through each numerical feature and remove outliers using z-score method
for feature in num_features:
z = np.abs(stats.zscore(dataset[feature]))
threshold = 3
dataset = dataset[(z < threshold)]
# Display the updated dataset without outliers
print(dataset.head())
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
# Prepare the data
X = dataset.iloc[:, :-1]
y = dataset.iloc[:, -1]
le = preprocessing.LabelEncoder()
y = le.fit_transform(y)
# Display the updated standardized dataset
print(dataset.head())
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.metrics import (
accuracy_score,
mean_squared_error,
r2_score,
mean_absolute_error,
)
import xgboost as xgb
# Split the dataset into training and testing sets using an 80:20 ratio
train_data, test_data, train_target, test_target = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Train an XGBoost model on the training data
params = {"objective": "binary:logistic", "eval_metric": "logloss"}
dtrain = xgb.DMatrix(train_data, label=train_target)
bst = xgb.train(params, dtrain)
# Use the XGBoost model to make predictions on the testing data
dtest = xgb.DMatrix(test_data)
y_pred = bst.predict(dtest)
# Evaluate the performance of the XGBoost model on the testing data
accuracy = accuracy_score(test_target, y_pred.round())
mse = mean_squared_error(test_target, y_pred)
r2 = r2_score(test_target, y_pred)
mae = mean_absolute_error(test_target, y_pred)
print("Accuracy: {:.2f}%".format(accuracy * 100))
print("Mean Squared Error: {:.4f}".format(mse))
print("R-Squared Score: {:.4f}".format(r2))
print("Mean Absolute Error: {:.4f}".format(mae))
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.linear_model import LogisticRegression
# Train a logistic regression model
lr_model = LogisticRegression(random_state=42)
lr_model.fit(train_data, train_target)
# Generate predicted probabilities for the testing data
test_pred_prob = lr_model.predict_proba(test_data)[:, 1]
# Calculate ROC curve and AUC score
fpr, tpr, thresholds = roc_curve(test_target, test_pred_prob)
auc_score = roc_auc_score(test_target, test_pred_prob)
# Plot ROC curve
plt.plot(fpr, tpr, label="ROC curve (area = %0.2f)" % auc_score)
plt.plot([0, 1], [0, 1], "k--")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver Operating Characteristic (ROC) Curve")
plt.legend(loc="lower right")
plt.show()
from sklearn.model_selection import RandomizedSearchCV
# Define the hyperparameter grid to search over
hyperparameters = {
"max_depth": range(1, 10),
"min_child_weight": [1, 3, 5],
"subsample": [0.6, 0.8, 1.0],
"colsample_bytree": [0.6, 0.8, 1.0],
"gamma": [0, 0.1, 0.2, 0.3],
"learning_rate": np.linspace(0.01, 0.5, 100),
"n_estimators": range(50, 200, 10),
}
xgb_model = xgb.XGBRegressor(objective="reg:squarederror")
# Create a random search object
random_cv = RandomizedSearchCV(
estimator=xgb_model,
param_distributions=hyperparameters,
cv=5,
n_iter=50,
n_jobs=-1,
)
# Fit the random search object to the training data
random_cv.fit(train_data, train_target)
# Print the best hyperparameters found
print(random_cv.best_params_)
best_model = xgb.XGBClassifier(**random_cv.best_params_)
best_model.fit(X, y)
# Make predictions on the entire dataset
y_pred = best_model.predict(X)
# Evaluate the performance of the XGBoost model on the testing data
accuracy = accuracy_score(y, y_pred.round())
mse = mean_squared_error(y, y_pred)
r2 = r2_score(y, y_pred)
mae = mean_absolute_error(y, y_pred)
print("\n")
print("Accuracy: {:.2f}%".format(accuracy * 100))
print("Mean Squared Error: {:.4f}".format(mse))
print("R-Squared Score: {:.4f}".format(r2))
print("Mean Absolute Error: {:.4f}".format(mae))
print("\nAnother ROC Curve\n")
y_pred_proba = best_model.predict_proba(test_data)[:, 1] # predicted probabilities
roc_auc = roc_auc_score(test_target, y_pred_proba)
fpr, tpr, thresholds = roc_curve(test_target, y_pred_proba)
plt.figure()
plt.plot(fpr, tpr, label="XGBoost (area = %0.2f)" % roc_auc)
plt.plot([0, 2], [0, 2], "r--")
plt.xlim([0, 2])
plt.ylim([0, 2])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver Operating Characteristic")
plt.legend(loc="lower right")
plt.show()
# The conclusions drawn here, as shown by accuracy of the data after
# going the XGBoost method, is that we can boost accuracy, by up to 20% in this case
# and get even more accurate data when using optimal hyperparameters
# Essentially, the hyperparameters can make all the difference when predicting
# data, as using the optimal ones can significantly improve the model's accuracy.
# The random search CV's usage to find optimal parameters was incredibly helpful,
# and should be used in future analysis, as it can create a much more accurate model
|
import os
os.listdir("../input/handwritten-digits")
loc0 = "../input/handwritten-digits/digit_0"
loc1 = "../input/handwritten-digits/digit_1"
loc2 = "../input/handwritten-digits/digit_2"
loc3 = "../input/handwritten-digits/digit_3"
loc4 = "../input/handwritten-digits/digit_4"
loc5 = "../input/handwritten-digits/digit_5"
loc6 = "../input/handwritten-digits/digit_6"
loc7 = "../input/handwritten-digits/digit_7"
loc8 = "../input/handwritten-digits/digit_8"
loc9 = "../input/handwritten-digits/digit_9"
from tqdm import tqdm
import cv2
features = []
for i in tqdm(os.listdir(loc0)):
f = cv2.imread(os.path.join(loc0, i), 0)
fr = cv2.resize(f, (50, 50))
features.append(fr)
for i in tqdm(os.listdir(loc1)):
f = cv2.imread(os.path.join(loc1, i), 0)
fr = cv2.resize(f, (50, 50))
features.append(fr)
for i in tqdm(os.listdir(loc2)):
f = cv2.imread(os.path.join(loc2, i), 0)
fr = cv2.resize(f, (50, 50))
features.append(fr)
for i in tqdm(os.listdir(loc3)):
f = cv2.imread(os.path.join(loc3, i), 0)
fr = cv2.resize(f, (50, 50))
features.append(fr)
for i in tqdm(os.listdir(loc4)):
f = cv2.imread(os.path.join(loc4, i), 0)
fr = cv2.resize(f, (50, 50))
features.append(fr)
for i in tqdm(os.listdir(loc5)):
f = cv2.imread(os.path.join(loc5, i), 0)
fr = cv2.resize(f, (50, 50))
features.append(fr)
for i in tqdm(os.listdir(loc6)):
f = cv2.imread(os.path.join(loc6, i), 0)
fr = cv2.resize(f, (50, 50))
features.append(fr)
for i in tqdm(os.listdir(loc7)):
f = cv2.imread(os.path.join(loc7, i), 0)
fr = cv2.resize(f, (50, 50))
features.append(fr)
for i in tqdm(os.listdir(loc8)):
f = cv2.imread(os.path.join(loc8, i), 0)
fr = cv2.resize(f, (50, 50))
features.append(fr)
for i in tqdm(os.listdir(loc9)):
f = cv2.imread(os.path.join(loc9, i), 0)
fr = cv2.resize(f, (50, 50))
features.append(fr)
import numpy as np
X = np.array(features)
X.shape
labels = []
for i in tqdm(os.listdir(loc0)):
labels.append(0)
for i in tqdm(os.listdir(loc1)):
labels.append(1)
for i in tqdm(os.listdir(loc2)):
labels.append(2)
for i in tqdm(os.listdir(loc3)):
labels.append(3)
for i in tqdm(os.listdir(loc4)):
labels.append(4)
for i in tqdm(os.listdir(loc5)):
labels.append(5)
for i in tqdm(os.listdir(loc6)):
labels.append(6)
for i in tqdm(os.listdir(loc7)):
labels.append(7)
for i in tqdm(os.listdir(loc8)):
labels.append(8)
for i in tqdm(os.listdir(loc9)):
labels.append(9)
Y = np.array(labels)
Y.shape
import pandas as pd
ft = pd.DataFrame(X.reshape(6837, 2500))
lt = pd.DataFrame(Y.reshape(6837, 1), columns=["Labels"])
digits = pd.concat((ft, lt), axis="columns")
digits.to_csv("digits.csv")
import matplotlib.pyplot as plt
plt.imshow(X[6])
plt.show()
X = ft.values
Y = lt.values
from sklearn.model_selection import train_test_split
xtrain, xtest, ytrain, ytest = train_test_split(X, Y)
from sklearn.ensemble import RandomForestClassifier
rmodel = RandomForestClassifier(max_depth=22)
rmodel.fit(xtrain, ytrain)
print(rmodel.score(xtrain, ytrain))
print(rmodel.score(xtest, ytest))
|
import torch
import matplotlib.pyplot as plt
import numpy as np
# Add path to load context
import sys
sys.path.insert(1, "/kaggle/input/data-wp5-cifar10-context")
# Load data
GROUPS = torch.load("/kaggle/input/data-wp5-cifar10-context/groups.pt")
CONTEXT = torch.load("/kaggle/input/data-wp5-cifar10-context/context.pt")
# Map data, keep training
GROUPS = np.array(GROUPS)
CLASS = np.zeros((4, len(CONTEXT.train_dataset))).astype(bool)
for index, (_, _, id) in enumerate(CONTEXT.train_dataset):
CLASS[:, index] = GROUPS[:, id]
# Display
for i in range(4):
plt.scatter(np.arange(len(CLASS[i])), CLASS[i] * 0.2 + i * 0.5, s=0.0002)
plt.yticks(
[*[i * 0.5 + 0.2 for i in range(4)], *[i * 0.5 for i in range(4)]],
[
*[f"Group{i} \n count: {np.sum(CLASS[i])}" for i in range(4)],
*["not in this group" for i in range(4)],
],
)
plt.show()
scoresList = [(0, 0), (1, 2), (3, 5), (6, 20)]
plt.pie(
np.sum(CLASS, axis=1),
labels=[
f"Group {i}\n count: {np.sum(CLASS[i])}\n prop: {round(np.sum(CLASS[i])/400,2)}%\n forgetscore range: {scoresList[i]}"
for i in range(4)
],
labeldistance=1.4,
)
plt.show()
|
# By Astitva Prakash (20301432)
# B. Tech CSE-G
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.metrics import (
accuracy_score,
classification_report,
roc_curve,
confusion_matrix,
)
import warnings
warnings.filterwarnings("ignore")
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = "/kaggle/input/pulsar-star/pulsar_star.csv"
df = pd.read_csv(data)
# ## 1. Exploratory Data Analysis
df.shape
col_names = df.columns
df.columns = df.columns.str.strip()
df.columns = [
"IP Mean",
"IP SD",
"IP Kurtosis",
"IP Skewness",
"DM-SNR Mean",
"DM-SNR SD",
"DM-SNR Kurtosis",
"DM-SNR Skewness",
"target",
]
df["target"].value_counts() / np.float64(len(df))
df.info()
df.isnull().sum()
# **Data Summary**
# * 8 continuous variables, 1 discrete variable
# * Discrete value is `target`
# * There are no missing values
# ### Outliers among numerical values
round(df.describe(), 2)
plt.figure(figsize=(24, 20))
plt.subplot(4, 2, 1)
fig = df.boxplot(column="IP Mean")
fig.set_title("")
fig.set_ylabel("IP Mean")
plt.subplot(4, 2, 2)
fig = df.boxplot(column="IP SD")
fig.set_title("")
fig.set_ylabel("IP SD")
plt.subplot(4, 2, 3)
fig = df.boxplot(column="IP Kurtosis")
fig.set_title("")
fig.set_ylabel("IP Kurtosis")
plt.subplot(4, 2, 4)
fig = df.boxplot(column="IP Skewness")
fig.set_title("")
fig.set_ylabel("IP Skewness")
plt.subplot(4, 2, 5)
fig = df.boxplot(column="DM-SNR Mean")
fig.set_title("")
fig.set_ylabel("DM-SNR Mean")
plt.subplot(4, 2, 6)
fig = df.boxplot(column="DM-SNR SD")
fig.set_title("")
fig.set_ylabel("DM-SNR SD")
plt.subplot(4, 2, 7)
fig = df.boxplot(column="DM-SNR Kurtosis")
fig.set_title("")
fig.set_ylabel("DM-SNR Kurtosis")
plt.subplot(4, 2, 8)
fig = df.boxplot(column="DM-SNR Skewness")
fig.set_title("")
fig.set_ylabel("DM-SNR Skewness")
# **Distribution of Variables**
# We check if the distribution is normal or skewed
plt.figure(figsize=(24, 20))
plt.subplot(4, 2, 1)
fig = df["IP Mean"].hist(bins=20)
fig.set_xlabel("IP Mean")
fig.set_ylabel("Number of pulsar stars")
plt.subplot(4, 2, 2)
fig = df["IP SD"].hist(bins=20)
fig.set_xlabel("IP SD")
fig.set_ylabel("Number of pulsar stars")
plt.subplot(4, 2, 3)
fig = df["IP Kurtosis"].hist(bins=20)
fig.set_xlabel("IP Kurtosis")
fig.set_ylabel("Number of pulsar stars")
plt.subplot(4, 2, 4)
fig = df["IP Skewness"].hist(bins=20)
fig.set_xlabel("IP Skewness")
fig.set_ylabel("Number of pulsar stars")
plt.subplot(4, 2, 5)
fig = df["DM-SNR Mean"].hist(bins=20)
fig.set_xlabel("DM-SNR Mean")
fig.set_ylabel("Number of pulsar stars")
plt.subplot(4, 2, 6)
fig = df["DM-SNR SD"].hist(bins=20)
fig.set_xlabel("DM-SNR SD")
fig.set_ylabel("Number of pulsar stars")
plt.subplot(4, 2, 7)
fig = df["DM-SNR Kurtosis"].hist(bins=20)
fig.set_xlabel("DM-SNR Kurtosis")
fig.set_ylabel("Number of pulsar stars")
plt.subplot(4, 2, 8)
fig = df["DM-SNR Skewness"].hist(bins=20)
fig.set_xlabel("DM-SNR Skewness")
fig.set_ylabel("Number of pulsar stars")
# All continuous variables seem skewed.
# ## Declaring vectors and targets for training and testing
X = df.drop("target", axis=1)
y = df["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
X_train.shape, X_test.shape
cols = X_train.columns
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_train = pd.DataFrame(X_train, columns=[cols])
X_test = pd.DataFrame(X_test, columns=[cols])
X_train.describe()
# ## Running classification with SVM
svc = SVC()
svc.fit(X_train, y_train)
y_pred = svc.predict(X_test)
print(
"Model Accuracy score with default parameters: {0:0.4f}".format(
accuracy_score(y_test, y_pred)
)
)
# We should attempt to modify hyperparameters
svc = SVC(C=10.0)
svc.fit(X_train, y_train)
y_pred = svc.predict(X_test)
print("Model accuracy score with C=10: {0:0.4f}".format(accuracy_score(y_test, y_pred)))
# ## Confusion Matrix and Classification Metrics
cm = confusion_matrix(y_test, y_pred)
cmat = pd.DataFrame(
data=cm,
columns=["True Positive", "True Negative"],
index=["Predicted Positive", "Predicted Negative"],
)
sns.heatmap(cmat, annot=True, fmt="d", cmap="YlGnBu")
print(classification_report(y_test, y_pred))
TP = cm[0, 0]
TN = cm[1, 1]
FP = cm[0, 1]
FN = cm[1, 0]
classification_accuracy = (TP + TN) / float(TP + TN + FP + FN)
classification_error = (FP + FN) / float(TP + TN + FP + FN)
print("Classification accuracy: {0:0.4f}".format(classification_accuracy))
print("Classification error: {0:0.4f}".format(classification_error))
precision = TP / float(TP + FP)
recall = TP / float(TP + FN)
true_positive_rate = TP / float(TP + FN)
false_positive_rate = FP / float(FP + TN)
specificity = TN / (TN + FP)
f1 = 2 * (precision * recall) / (precision + recall)
mcc = ((TP * TN) - (FP * FN)) / np.sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))
print("Precision: {0:0.4f}".format(precision))
print("Recall: {0:0.4f}".format(recall))
print("TRP: {0:0.4f}".format(true_positive_rate))
print("FPR: {0:0.4f}".format(false_positive_rate))
print("Specificity: {0:0.4f}".format(specificity))
print("F1-Score: {0:0.4f}".format(f1))
print("MCC: {0:0.4f}".format(mcc))
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
plt.figure(figsize=(6, 4))
plt.plot(fpr, tpr, linewidth=2)
plt.plot([0, 1], [0, 1], "k--")
plt.rcParams["font.size"] = 12
plt.title("ROC curve for Predicting a Pulsar Star classifier")
plt.xlabel("False Positive Rate (1 - Specificity)")
plt.ylabel("True Positive Rate (Sensitivity)")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import copy
from shapely.geometry import shape, GeometryCollection, Polygon, MultiPolygon
from shapely.affinity import affine_transform
from PIL import Image, ImageOps
import nudged
import numpy as np
from skimage.morphology import convex_hull_image
import matplotlib.pyplot as plt
import json
import plotly.graph_objs as go
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
paths = []
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
paths.append(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
site = "5cd56b6ae2acfd2d33b59ccb"
floor = "F4"
geojson_file = (
"/kaggle/input/indoor-location-navigation/metadata/%s/%s/geojson_map.json"
% (site, floor)
)
infos_file = (
"/kaggle/input/indoor-location-navigation/metadata/%s/%s/floor_info.json"
% (site, floor)
)
image_file = (
"/kaggle/input/indoor-location-navigation/metadata/%s/%s/floor_image.png"
% (site, floor)
)
image = Image.open(image_file)
with open(infos_file, "rb") as f:
infos = json.load(f)
with open(geojson_file, "rb") as f:
geojson = json.load(f)
image
def extract_coords_from_polygon(polygon):
coords = []
if type(polygon) == MultiPolygon:
polygons = polygon.geoms
else:
polygons = [polygon]
for polygon in polygons:
x, y = polygon.exterior.xy
coords.append((np.array(x), np.array(y)))
for interior in polygon.interiors:
x, y = interior.xy
coords.append((np.array(x), np.array(y)))
return coords
def get_bounding_box(x, y):
x_min = min(x)
y_min = min(y)
x_max = max(x)
y_max = max(y)
return np.array([[x_min, y_min], [x_min, y_max], [x_max, y_min], [x_max, y_max]])
def plot_shape(shapes):
if type(shapes) == Polygon:
shapes = [shapes]
for shape in shapes:
for interior in shape.interiors:
plt.plot(*interior.xy)
plt.plot(*shape.exterior.xy)
def extract_geometries(geojson):
# Extract floor plan geometry (First geometry)
floor = copy.deepcopy(geojson)
floor["features"] = [floor["features"][0]]
floor_layout = GeometryCollection(
[shape(feature["geometry"]).buffer(0) for feature in floor["features"]]
)[0]
# Extract shops geometry (remaining ones)
shops = copy.deepcopy(geojson)
shops["features"] = shops["features"][1:]
shops_geometry = GeometryCollection(
[shape(feature["geometry"]).buffer(0.1) for feature in shops["features"]]
)
shops_geometry
# Geometry differences to get corridor (floor layout - shops)
corridor = copy.deepcopy(floor_layout)
for shop in shops_geometry:
corridor = corridor.difference(shop)
return floor_layout, corridor
def extract_image_bouding_box(image):
# Flip and convert to black and white
gray_image = ImageOps.flip(image).convert("LA")
bw_image = np.array(gray_image.point(lambda p: p > 251 and 255)) > 0
bw_image = Image.fromarray(bw_image.any(axis=2) == True)
# Get convex hull
ch_image = convex_hull_image(np.array(bw_image))
# Transform to coordinates
image_y, image_x = np.where(ch_image == True)
bounding_box = get_bounding_box(image_x, image_y)
return bounding_box
def extrat_geojson_bounding_box(floor_layout):
# Get convex hull
ch_geojson = floor_layout.convex_hull
coords = [coord for coord in ch_geojson.exterior.coords]
geojson_x = [coord[0] for coord in coords]
geojson_y = [coord[1] for coord in coords]
bounding_box = get_bounding_box(geojson_x, geojson_y)
return bounding_box
def find_translation(points_a, points_b):
"""
Find best translation between 2 sets of points
Map right coefficients for:
https://shapely.readthedocs.io/en/stable/manual.html#shapely.affinity.affine_transform
"""
trans = nudged.estimate(points_a, points_b)
matrix_cooefs = np.ravel(trans.get_matrix())
trans_coeffs = [
matrix_cooefs[0],
matrix_cooefs[1],
matrix_cooefs[3],
matrix_cooefs[4],
matrix_cooefs[2],
matrix_cooefs[5],
]
return trans_coeffs
def georeferencing(image, geojson, infos):
"""
:param image: raw PIL image object
:param geojson: dict, geojson format
:param infos: dict, plan infos
"""
# Extract floor layout and corridor geometries from geojson (shapely Polygon/MultiPolygon)
floor_layout, corridor = extract_geometries(geojson)
# Extract bounding boxes both from image and geojson (Using convexhull)
image_bounding_box = extract_image_bouding_box(image)
geojson_bounding_box = extrat_geojson_bounding_box(floor_layout)
# Find best translation from geojson to image referential
translation_coeffs = find_translation(geojson_bounding_box, image_bounding_box)
# Convert to image size scale
translated_corridor = affine_transform(corridor, translation_coeffs)
# Convert to waypoints scale (using ratio between waypoint scale and image scale)
x_ratio = infos["map_info"]["width"] / image.size[0]
y_ratio = infos["map_info"]["height"] / image.size[1]
waypoint_translation_coeffs = [x_ratio, 0, 0, y_ratio, 0, 0]
translated_corridor = affine_transform(
translated_corridor, waypoint_translation_coeffs
)
return translated_corridor
geometry = georeferencing(image, geojson, infos)
plot_shape(geometry)
coords = extract_coords_from_polygon(geometry)
fig = go.Figure()
fig.update_layout(
images=[
go.layout.Image(
source=image,
xref="x",
yref="y",
x=0,
y=infos["map_info"]["height"],
sizex=infos["map_info"]["width"],
sizey=infos["map_info"]["height"],
sizing="contain",
opacity=1,
layer="below",
)
]
)
for coord in coords:
x, y = coord
fig.add_trace(
go.Scattergl(
x=x,
y=y,
)
)
# configure
fig.update_xaxes(autorange=False, range=[0, infos["map_info"]["width"]])
fig.update_yaxes(
autorange=False,
range=[0, infos["map_info"]["height"]],
scaleanchor="x",
scaleratio=1,
)
fig.update_layout(
title=go.layout.Title(
text="No title.",
xref="paper",
x=0,
),
autosize=True,
width=900,
height=200 + 900 * infos["map_info"]["height"] / infos["map_info"]["width"],
template="plotly_white",
)
fig.show()
|
import pandas as pd
df = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
df
import numpy as np
import matplotlib.pyplot as plt
# Get the pixel values of the first image
pixels = df.iloc[3, 1:].values.reshape(28, 28)
# Display the image using matplotlib
plt.imshow(pixels, cmap="gray")
plt.show()
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
# Split the data into features and target
X = df.drop("label", axis=1) # Features
y = df["label"] # Target
X = X / 255.0
X = X.values.reshape(-1, 28, 28, 1)
y = keras.utils.to_categorical(y, 10)
# Split the data into training and validation sets
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=0.2, random_state=42, shuffle=True
)
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
num_classes = 10
input_shape = (28, 28, 1)
model = Sequential()
model.add(
Conv2D(
32,
kernel_size=(3, 3),
activation="relu",
kernel_initializer="he_normal",
input_shape=input_shape,
)
)
model.add(
Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer="he_normal")
)
model.add(MaxPool2D((2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding="Same", activation="relu"))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding="Same", activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation="softmax"))
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.RMSprop(),
metrics=["accuracy"],
)
# Train the model
history = model.fit(
X_train, y_train, epochs=30, batch_size=32, validation_data=(X_val, y_val)
)
test_df = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
test_df
import matplotlib.pyplot as plt
import numpy as np
# Get the first image as a numpy array
first_image = test_df.iloc[0].to_numpy()
# Reshape the array to a 2D matrix
reshaped_image = np.reshape(first_image, (28, 28))
# Plot the image using matplotlib
plt.imshow(reshaped_image, cmap="gray")
plt.show()
test_df = test_df / 255.0
test_images = test_df.values.reshape(-1, 28, 28, 1)
# Make predictions
predictions = model.predict(test_images)
predicted_labels = np.argmax(predictions, axis=1)
predicted_labels[0]
# Save the predictions to a CSV file
results_df = pd.DataFrame(
{"ImageId": range(1, len(predicted_labels) + 1), "Label": predicted_labels}
)
results_df.to_csv("predictions.csv", index=False)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import recall_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
import warnings
warnings.filterwarnings("ignore")
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df_train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
df_train.head()
df_train.info()
# # EDA
corr = df_train.corr()
sns.heatmap(corr, annot=True)
cols = ["gravity", "ph", "osmo", "cond", "urea", "calc"]
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(15, 15))
for i, ax in enumerate(axes.flat):
if i < len(cols):
sns.kdeplot(
df_train[cols[i]][df_train["target"] == 0],
label="kidneystone - No",
fill=True,
ax=ax,
)
sns.kdeplot(
df_train[cols[i]][df_train["target"] == 1],
label="kidneystone - Yes",
fill=True,
ax=ax,
)
plt.legend(["No kidneystone", "kidneystone"], loc="upper right")
plt.xlabel(cols[i])
plt.title(f"{cols[i]} distribution by kidneystone outcome")
plt.show()
# # ML
target = ["target"]
features = df_train.columns.difference(["target", "id"])
X = df_train[features]
y = df_train[target]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=0
)
# ## XGBOOST
param_grid = {
"learning_rate": [0.1, 0.2, 0, 5],
"max_depth": [3, 5, 7],
"min_child_weight": [1, 3, 5],
"gamma": [0.0, 0.1, 0.2],
"subsample": [0.8, 1.0],
"colsample_bytree": [0.8, 1.0],
}
xgb = XGBClassifier(objective="binary:logistic")
grid_search = GridSearchCV(
estimator=xgb, param_grid=param_grid, scoring="roc_auc", cv=5
)
# fit the GridSearchCV object to the data
grid_search.fit(X_train, y_train)
print("Best parameters: ", grid_search.best_params_)
print("Best AUC score: ", grid_search.best_score_)
# implement the best parameters
best_params = grid_search.best_params_
xgb_best = XGBClassifier(objective="binary:logistic", **best_params)
xgb_best.fit(X_train, y_train)
y_pred = xgb_best.predict(X_test)
cm_xgb = confusion_matrix(y_test, y_pred)
sns.heatmap(cm_xgb, annot=True, cmap="Blues", fmt="g")
plt.xlabel("Predicted labels")
plt.ylabel("True labels")
plt.title("Confusion Matrix")
plt.show()
acc_xgb = accuracy_score(y_test, y_pred)
print(acc_xgb)
print(metrics.classification_report(y_test, y_pred))
# ## Decision Tree
param_grid = {
"max_depth": [3, 5, 7],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [1, 2, 4],
"max_features": [None, "sqrt", "log2"],
}
dtree = DecisionTreeClassifier()
grid_search = GridSearchCV(
estimator=dtree, param_grid=param_grid, scoring="roc_auc", cv=3
)
# fit the GridSearchCV object to the data
grid_search.fit(X_train, y_train)
print("Best parameters: ", grid_search.best_params_)
print("Best AUC score: ", grid_search.best_score_)
# implement the best parameters
best_params = grid_search.best_params_
dtree_best = DecisionTreeClassifier(**best_params)
dtree_best.fit(X_train, y_train)
y_pred = dtree_best.predict(X_test)
cm_dt = confusion_matrix(y_test, y_pred)
sns.heatmap(cm_dt, annot=True, cmap="Blues", fmt="g")
plt.xlabel("Predicted labels")
plt.ylabel("True labels")
plt.title("Confusion Matrix")
plt.show()
acc_dt = accuracy_score(y_test, y_pred)
print(acc_dt)
print(metrics.classification_report(y_test, y_pred))
# ## Random Forest
param_grid = {
"n_estimators": [50, 100, 200],
"max_depth": [3, 5, 7],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [1, 2, 4],
"max_features": [None, "sqrt", "log2"],
}
rf = RandomForestClassifier()
grid_search = GridSearchCV(estimator=rf, param_grid=param_grid, scoring="roc_auc", cv=3)
# fit the GridSearchCV object to the data
grid_search.fit(X_train, y_train)
print("Best parameters: ", grid_search.best_params_)
print("Best AUC score: ", grid_search.best_score_)
# implement the best parameters
best_params = grid_search.best_params_
rf_best = RandomForestClassifier(**best_params)
rf_best.fit(X_train, y_train)
y_pred = rf_best.predict(X_test)
cm_rf = confusion_matrix(y_test, y_pred)
sns.heatmap(cm_rf, annot=True, cmap="Blues", fmt="g")
plt.xlabel("Predicted labels")
plt.ylabel("True labels")
plt.title("Confusion Matrix")
plt.show()
acc_rf = accuracy_score(y_test, y_pred)
print(acc_rf)
print(metrics.classification_report(y_test, y_pred))
# ## Naive Bayes
param_grid = {"var_smoothing": np.logspace(0, -9, num=100)}
nb = GaussianNB()
grid_search = GridSearchCV(
estimator=nb, param_grid=param_grid, scoring="roc_auc", cv=3, n_jobs=-1
)
# fit the GridSearchCV object to the data
grid_search.fit(X_train, y_train)
print("Best parameters: ", grid_search.best_params_)
print("Best AUC score: ", grid_search.best_score_)
# implement the best parameters
best_params = grid_search.best_params_
nb_best = GaussianNB(**best_params)
nb_best.fit(X_train, y_train)
y_pred = nb_best.predict(X_test)
cm_nb = confusion_matrix(y_test, y_pred)
sns.heatmap(cm_nb, annot=True, cmap="Blues", fmt="g")
plt.xlabel("Predicted labels")
plt.ylabel("True labels")
plt.title("Confusion Matrix")
plt.show()
acc_nb = accuracy_score(y_test, y_pred)
print(acc_nb)
print(metrics.classification_report(y_test, y_pred))
# ## Model comparasion
models = pd.DataFrame(
{
"Model": ["Decision Tree", "Random Forest", "Naive Bayes", "XGBoost"],
"Score": [acc_dt, acc_rf, acc_nb, acc_xgb],
}
)
models.sort_values(by="Score", ascending=False, ignore_index=True)
# ## Cross validation
classifiers = []
classifiers.append(xgb_best)
classifiers.append(dtree_best)
classifiers.append(rf_best)
classifiers.append(nb_best)
len(classifiers)
cv_results = []
for classifier in classifiers:
cv_results.append(
cross_val_score(classifier, X_train, y_train, scoring="accuracy", cv=10)
)
cv_mean = []
cv_std = []
for cv_result in cv_results:
cv_mean.append(cv_result.mean())
cv_std.append(cv_result.std())
cv_res = pd.DataFrame(
{
"Cross Validation Mean": cv_mean,
"Cross Validation Std": cv_std,
"Algorithm": ["XGBoost", "Decision Tree", "Random Forest", "Naive Bayes"],
}
)
cv_res.sort_values(by="Cross Validation Mean", ascending=False)
sns.barplot(
x="Cross Validation Mean",
y="Algorithm",
data=cv_res,
order=cv_res.sort_values(by="Cross Validation Mean", ascending=False)["Algorithm"],
palette="Set2",
**{"xerr": cv_std},
)
plt.title("Cross Validation Scores")
# ## Output on df_test
df_test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
df_test_new = df_test[["calc", "cond", "gravity", "osmo", "ph", "urea"]]
y_pred_final = xgb_best.predict_proba(df_test_new)[:, 1]
y_pred_final
data = {"id": df_test["id"], "target": y_pred_final}
df_submission = pd.DataFrame(data)
df_submission
df_submission.to_excel("submission_playgrounds3e12.xlsx", index=False)
|
# # Handwritten digits classification using neural network
# NIST is a dataset of 60.000 examples of handwritten digits. It is a good database to check models of machine learning.
# All images are a greyscale of 28x28 pixels.
# In this notebook we will classify handwritten digits using a simple neural network which has only input and output layers. We will than add a hidden layer and see how the performance of the model improves
from IPython.display import Image
Image(filename="/kaggle/input/digiii/digi.jpg")
# ## Importing libraries
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
# ## Loading data
(X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train.shape
plt.matshow(X_train[6])
y_train[6]
# Normalizing pixels by dividing by 255 is a common preprocessing step for image data in neural networks. This is done to scale the pixel values to a range between 0 and 1, which can help the neural network converge faster during training.
# In digital images, each pixel is represented by a numeric value that corresponds to its intensity or color. These values typically range from 0 to 255, with 0 being black and 255 being white (for grayscale images), or various combinations of red, green, and blue values (for color images). However, these pixel values are typically too large for neural networks to handle effectively, which is why normalization is required.
# By dividing each pixel value by 255, we can rescale the values to a range between 0 and 1. This has the effect of making the data more consistent and easier to work with, since all the pixel values will now fall within a narrow range of values.
# Normalization can also help to improve the accuracy of the neural network by making the data less sensitive to variations in lighting conditions or color intensity. This is especially important in image classification tasks, where small variations in pixel values can have a significant impact on the performance of the model.
# In summary, normalizing pixels by dividing by 255 is a common preprocessing step in neural networks for image data. It helps to rescale the pixel values to a consistent range, making the data easier to work with and less sensitive to variations in lighting and color intensity.
# --
# Normalize data
X_train = X_train / 255
X_test = X_test / 255
X_train[0]
# ## Model
from IPython.display import Image
Image(filename="/kaggle/input/digiii/dogi.jpg")
# Flattening data in a neural network refers to the process of transforming multi-dimensional arrays or tensors into one-dimensional arrays. This is typically done before passing the data into a fully connected layer or a neural network model.
# The reason for flattening the data is to convert the input data into a format that can be processed by the neural network's dense layers, which require a one-dimensional array as input. By flattening the data, we can also reduce the size of the input data and make it easier for the model to process.
# Flattening the data is a common pre-processing step for many types of neural network models, such as convolutional neural networks (CNNs) and recurrent neural networks (RNNs). It allows these models to handle inputs of varying sizes and dimensions, while also making the training process more efficient by reducing the number of parameters that need to be learned.
X_train_flattened = X_train.reshape(len(X_train), 28 * 28)
X_test_flattened = X_test.reshape(len(X_test), 28 * 28)
X_train_flattened.shape
# X_train_flattened[0]
# An activation function is a non-linear mathematical function that is applied to the output of a neural network layer. It introduces non-linearity into the network, enabling it to learn more complex relationships between inputs and outputs. Without activation functions, neural networks would simply be a linear regression model, which is limited in its ability to model complex patterns in data.
# There are several types of activation functions used in neural networks, including:
# Sigmoid function: The sigmoid function is a commonly used activation function in neural networks. It maps any real-valued number to a value between 0 and 1, making it useful for tasks such as binary classification.
# ReLU function: The ReLU (rectified linear unit) function is another commonly used activation function. It maps any negative input value to 0, and any positive input value to the same value. ReLU is often used in deep neural networks due to its simplicity and effectiveness.
# Tanh function: The tanh (hyperbolic tangent) function is similar to the sigmoid function, but it maps any real-valued number to a value between -1 and 1. It is useful for tasks such as regression and binary classification.
# Softmax function: The softmax function is used in the output layer of a neural network for multi-class classification tasks. It maps the output of the network to a probability distribution over the possible classes, enabling the network to make predictions for each class.
# The choice of activation function depends on the specific task and the architecture of the neural network. Choosing an appropriate activation function is important for achieving good performance in the network, and is an active area of research in the field of deep learning.
# Here are some general guidelines for choosing activation functions:
# For binary classification tasks, sigmoid activation function is commonly used in the output layer. For multi-class classification tasks, the softmax activation function is often used in the output layer.
# For hidden layers, the ReLU activation function is often a good choice, since it is simple, efficient, and effective. However, it may not be appropriate for all cases, as it can suffer from the "dying ReLU" problem, where some neurons may stop learning due to being stuck in the zero region of the function.
# For recurrent neural networks (RNNs) or Long Short-Term Memory (LSTM) networks, the hyperbolic tangent (tanh) activation function is often used, as it can better capture the long-term dependencies in sequential data.
# If the data being used for training and testing has a large dynamic range or a skewed distribution, it may be useful to consider using activation functions that are more robust to such distributions, such as the LeakyReLU activation function.
# Finally, the choice of activation function can also depend on the specific architecture of the network and the performance metrics being optimized. In some cases, empirical testing of different activation functions may be required to determine the best choice.
# Overall, choosing the right activation function for a neural network is an important step in the model building process, and requires careful consideration of the factors described above.
from IPython.display import Image
Image(filename="/kaggle/input/actiiii/activation.jpg")
# The loss function in a neural network is a measure of the difference between the predicted output of the network and the actual output. It is used to evaluate how well the model is performing during training, and to adjust the weights and biases of the network to improve its performance.
# The choice of loss function depends on the specific task and the type of data being used. Here are some common loss functions used in neural networks:
# Mean Squared Error (MSE): This is a common loss function for regression tasks, where the goal is to predict a continuous output. It measures the average squared difference between the predicted output and the actual output.
# Binary Cross-Entropy: This is a common loss function for binary classification tasks, where the output is either 0 or 1. It measures the difference between the predicted output and the actual output, using the logarithmic loss function.
# Categorical Cross-Entropy: This is a common loss function for multi-class classification tasks, where the output can belong to multiple classes. It measures the difference between the predicted output and the actual output, using the logarithmic loss function.
# Hinge loss: This is a loss function that is commonly used for training support vector machines (SVMs) and other models that require margin maximization. It measures the difference between the predicted output and the actual output, using the hinge loss function.
# Kullback-Leibler divergence: This is a loss function that measures the difference between two probability distributions. It is commonly used in generative models such as variational autoencoders.
# The choice of loss function depends on the specific task and the type of data being used. Choosing the appropriate loss function is important for achieving good performance in the network, and is an active area of research in the field of deep learning.
# ## Very simple neural network with no hidden layers
# It's important to note that the number of units in the output layer of a neural network model depends on the specific task you are trying to solve. For example, in a classification task with 10 classes, you would typically set the number of units in the output layer to be equal to the number of classes, which would be 10 in this case. Similarly, for regression tasks, the number of units in the output layer would depend on the desired output dimensionality.
# The sigmoid activation function you have chosen is commonly used for binary classification tasks, where the goal is to classify an input into one of two classes. If you are working on a multi-class classification task with more than two classes, you may want to consider using a different activation function such as softmax. The choice of activation function depends on the specific problem you are trying to solve and the desired properties of the output.
#
from IPython.display import Image
Image(filename="/kaggle/input/multiii/multi.jpg")
# The given code appears to be using the Keras API to compile a neural network model with the Adam optimizer, sparse categorical cross-entropy loss function, and accuracy metric for evaluation during training. Let's break it down:
# optimizer='adam': Adam is a popular optimization algorithm used for training neural networks. It is an adaptive learning rate optimization algorithm that combines techniques from both AdaGrad and RMSprop. It is known for its ability to handle sparse gradients and perform well on a wide range of deep learning tasks.
# loss='sparse_categorical_crossentropy': Sparse categorical cross-entropy is a loss function commonly used for multi-class classification problems when the target labels are integers. It computes the cross-entropy loss between the predicted probabilities and the true target labels. The "sparse" part indicates that the target labels are represented as integers, rather than one-hot encoded vectors.
# metrics=['accuracy']: During training, the accuracy metric will be used to evaluate the performance of the model. Accuracy is a common metric used in classification tasks that measures the percentage of correctly classified samples out of the total samples.
# The model.compile() function in Keras is used to configure the training process of the neural network model. It takes in various parameters, including the optimizer, loss function, and evaluation metrics, to set up the model for training. After compiling, the model is ready to be trained using the model.fit() function with appropriate training data.
model = keras.Sequential(
[keras.layers.Dense(10, input_shape=(784,), activation="softmax")]
)
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
model.fit(X_train_flattened, y_train, epochs=5)
model.evaluate(X_test_flattened, y_test)
y_predicted = model.predict(X_test_flattened)
y_predicted[0]
plt.matshow(X_test[0])
# y_predicted_labels = [np.argmax(i) for i in y_predicted]In neural networks, np.argmax (short for NumPy argmax) is often used to find the index of the maximum value in an array or tensor along a specific axis.
# This function is commonly used in neural networks for tasks such as prediction and classification. For example, when making predictions on a dataset using a neural network, the output of the last layer is often a probability distribution over the possible classes. The index of the highest probability value in this distribution can be found using np.argmax, which gives the predicted class for a given input.
# Similarly, in the process of training a neural network, np.argmax can be used to calculate the accuracy of the model by comparing the predicted class with the true class label of the input. This allows the model to optimize its parameters to minimize the difference between the predicted and true labels.
# Overall, np.argmax is a useful function in neural networks for tasks such as prediction, classification, and evaluation. It helps to extract meaningful information from the output of a neural network and can aid in the optimization of the model's parameters.
y_predicted_labels = [np.argmax(i) for i in y_predicted]
cm = tf.math.confusion_matrix(labels=y_test, predictions=y_predicted_labels)
import seaborn as sn
plt.figure(figsize=(10, 7))
sn.heatmap(cm, annot=True, fmt="d")
plt.xlabel("Predicted")
plt.ylabel("Truth")
# ## ANN Final model with hidden layers
model = keras.Sequential(
[
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="sigmoid"),
]
)
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
tb_callback = tf.keras.callbacks.TensorBoard(log_dir="logs/", histogram_freq=1)
model.fit(X_train, y_train, epochs=5, callbacks=[tb_callback])
model.evaluate(X_test, y_test)
y_predicted = model.predict(X_test)
y_predicted[0]
y_predicted_labels2 = [np.argmax(i) for i in y_predicted]
cm2 = tf.math.confusion_matrix(labels=y_test, predictions=y_predicted_labels2)
plt.figure(figsize=(10, 7))
sn.heatmap(cm2, annot=True, fmt="d")
plt.xlabel("Predicted")
plt.ylabel("Truth")
# %load_ext tensorboard
# %tensorboard --logdir logs/fit
|
# # Installing packages, Loading and inspecting dataset to have a sneak peek
import numpy as np
import pandas as pd
# For visualizations
import matplotlib.pyplot as plt
import seaborn as sns
# To avoid encoding error (ENC), using "unicode_escape"
df = pd.read_csv(
"//kaggle/input/diwali-sales-dataset/Diwali Sales Data.csv",
encoding="unicode_escape",
)
# Checking the shape and size of the dataframe
df.shape
# Checking the first 10 rows of the dataframe contents
df.head(10)
# # Data cleaning and preparing for analysis
# Checking the data to analyze the need for cleaning areas
df.info()
# dropping the balnk and/or unrelated columns and saving the dataframe
df.drop(["Status", "unnamed1"], axis=1, inplace=True)
# Rechecking the data shape after dropping the unrelated columns
df.info()
# Checking nullvalues in the dataframe
pd.isnull(df).sum()
# Dropping null values form the dataframe and saving it for further analysis
df.dropna(inplace=True)
# Rechecking the shape of dataframe to see the changes in dataframe after dropping the nulls
pd.isnull(df).sum()
# Changing the data type from float to integer
df["Amount"] = df["Amount"].astype("int")
# Checking data type after the conversion
df["Amount"].dtypes
# Checking the column names to determine the need for further conversion or changes
df.columns
# Renaming column for more clarity
df.rename(
columns={"Cust_name": "Customer_name", "Marital_Status": "Relationship_Status"},
inplace=True,
)
# Rechecking column names after renaming
df.columns
# Using describe() to check the descripton of "Orders" and "Amount" column
df[["Orders", "Amount"]].describe()
# # Exploratory Data Analysis (EDA)
# ## Based on Gender
# Checking order placement data for potential patterns or trends in purchasing behavior across genders
ax = sns.countplot(x="Gender", data=df)
for bars in ax.containers:
ax.bar_label(bars)
# Checking order placement data for potential patterns or trends in purchasing power across genders
sales_gen = (
df.groupby(["Gender"], as_index=False)["Amount"]
.sum()
.sort_values(by="Amount", ascending=False)
)
ax = sns.barplot(x="Gender", y="Amount", data=sales_gen)
for index, row in sales_gen.iterrows():
ax.text(index, row["Amount"], row["Amount"], ha="center")
# Based on the above-mentioned graphs, it is evident that the majority of the purchasers are female and that the purchasing power of females surpasses that of males.
# # Based on Age
# Checking order placement data for potential patterns or trends in purchasing behavior based on "Age Groups"
ax = sns.countplot(x="Age Group", data=df, hue="Gender")
for bars in ax.containers:
ax.bar_label(bars)
# Checking order placement data for potential patterns or trends in purchasing power across "Age Groups"
sales_age = (
df.groupby(["Age Group"], as_index=False)["Amount"]
.sum()
.sort_values(by="Amount", ascending=False)
)
ax = sns.barplot(x="Age Group", y="Amount", data=sales_age)
for index, row in sales_age.iterrows():
ax.text(index, row["Amount"], row["Amount"], ha="center")
# The chart presented above indicates that the age group of 26-35 made the highest number of purchases, while the age group of 55+ made the least number of purchases. Moreover, the data shows that females are the primary purchasers across all age groups, regardless of age range.
# ## Based on States
# Checking total number of orders from top 10 states
sales_state = (
df.groupby(["State"], as_index=False)["Orders"]
.sum()
.sort_values(by="Orders", ascending=False)
.head(10)
)
sns.set(rc={"figure.figsize": (18, 5)})
ax = sns.barplot(x="State", y="Orders", data=sales_state)
# Checking top 10 states based on Amount spent
sales_state = (
df.groupby(["State"], as_index=False)["Amount"]
.sum()
.sort_values(by="Amount", ascending=False)
.head(10)
)
sns.set(rc={"figure.figsize": (18, 5)})
ax = sns.barplot(x="State", y="Amount", data=sales_state)
# Based on the above graphs, it is evident that the states of Uttar Pradesh, Maharashtra, and Karnataka contribute significantly to the majority of the orders and total sales/amount.
# ## Based on Relationship Statsu
# Checking the order pattern based on relationship status
ax = sns.countplot(x="Relationship_Status", data=df)
sns.set(rc={"figure.figsize": (8, 5)})
for bars in ax.containers:
ax.bar_label(bars)
# Checking the pattern of Amount spent based on relationship status and gender
sales_state = (
df.groupby(["Relationship_Status", "Gender"], as_index=False)["Amount"]
.sum()
.sort_values(by="Amount", ascending=False)
)
sns.set(rc={"figure.figsize": (8, 5)})
ax = sns.barplot(x="Relationship_Status", y="Amount", data=sales_state, hue="Gender")
# Based on the above graphs, it is evident that the majority of the buyers are married women, and they possess a high purchasing power.
# ## Based on Occupation
# Checking the order pattern based on Occupation
ax = sns.countplot(x="Occupation", data=df)
sns.set(rc={"figure.figsize": (18, 5)})
for bars in ax.containers:
ax.bar_label(bars)
# Checking the pattern of Amount spent based on Occupation
sales_state = (
df.groupby(["Occupation"], as_index=False)["Amount"]
.sum()
.sort_values(by="Amount", ascending=False)
)
sns.set(rc={"figure.figsize": (18, 5)})
ax = sns.barplot(x="Occupation", y="Amount", data=sales_state)
# Based on the graphical data presented, it is evident that a significant proportion of buyers operate within the Information Technology, Healthcare, and Aviation industries.
# ## Based on Product Category
# Checking the order pattern based on Product_Category
ax = sns.countplot(x="Product_Category", data=df)
sns.set(rc={"figure.figsize": (20, 5)})
for bars in ax.containers:
ax.bar_label(bars)
# Checking the pattern of Amount spent based on Product_Category
sales_state = (
df.groupby(["Product_Category"], as_index=False)["Amount"]
.sum()
.sort_values(by="Amount", ascending=False)
.head(10)
)
sns.set(rc={"figure.figsize": (22, 5)})
ax = sns.barplot(x="Product_Category", y="Amount", data=sales_state)
# The aforementioned graphs indicate that the majority of products sold fall under the categories of Food, Clothing, and Electronics.
# Checking the top 10 soled products based on Product_ID
sales_state = (
df.groupby(["Product_ID"], as_index=False)["Orders"]
.sum()
.sort_values(by="Orders", ascending=False)
.head(10)
)
sns.set(rc={"figure.figsize": (22, 5)})
ax = sns.barplot(x="Product_ID", y="Orders", data=sales_state)
|
from duckduckgo_search import ddg_images
from fastcore.all import *
def search_images(search_term, max_results=200):
return L(ddg_images(search_term, max_results=max_results)).itemgot("image")
from fastdownload import download_url
cat_location = "cat.jpg"
download_url(search_images("cat", max_results=1)[0], dest=cat_location)
from fastai.vision.all import *
Image.open(cat_location).to_thumb(256, 256)
dog_location = "dog.jpg"
download_url(search_images("dog", max_results=1)[0], dest=dog_location)
Image.open(dog_location).to_thumb(256, 256)
from time import sleep
searches = ["cat", "dog", "bird", "worm"]
path = Path("task1data")
for search_term in searches:
dest = path / search_term
dest.mkdir(exist_ok=True, parents=True)
download_images(dest, urls=search_images(f"{search_term} photo"))
sleep(10)
download_images(dest, urls=search_images(f"{search_term} sun photo"))
sleep(10)
download_images(dest, urls=search_images(f"{search_term} body photo"))
sleep(10)
resize_images(dest, max_size=400, desst=dest)
failed = verify_images(get_image_files(path))
failed.map(Path.unlink)
len(failed)
dls = DataBlock(
blocks=(
ImageBlock,
CategoryBlock,
),
get_items=get_image_files,
splitter=RandomSplitter(valid_pct=0.2, seed=42),
get_y=parent_label,
item_tfms=[Resize(192, method="squish")],
).dataloaders(path)
dls.show_batch(max_n=12)
classifier = vision_learner(dls, resnet18, metrics=error_rate)
classifier.fine_tune(5)
download_url(
"https://ukmadcat.com/wp-content/uploads/2019/04/sleepy-cat.jpg",
dest="test.jpg",
show_progress=False,
)
result = classifier.predict(PILImage.create("test.jpg"))
print(result)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import os
import cv2
import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tqdm import tqdm
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import confusion_matrix
from keras.models import Model, load_model
# from keras.layers import Dense, Input, Conv2D, MaxPool2D, Flatten,
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras import activations
from tensorflow.keras.layers import (
Dense,
Input,
Conv2D,
MaxPool2D,
Flatten,
Activation,
Dropout,
)
def load_image(norm_path, label):
norm_files = np.array(os.listdir(norm_path))
norm_labels = np.array([label] * len(norm_files))
norm_images = []
for image in tqdm(norm_files):
image = cv2.imread(norm_path + image)
image = cv2.resize(image, dsize=(200, 200))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
norm_images.append(image)
norm_images = np.array(norm_images)
return norm_images, norm_labels
def load_image_test(norm_path):
norm_files = np.array(os.listdir(norm_path))
# norm_labels = np.array([label]*len(norm_files))
norm_images = []
for image in tqdm(norm_files):
image = cv2.imread(norm_path + image)
image = cv2.resize(image, dsize=(200, 200))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
norm_images.append(image)
norm_images = np.array(norm_images)
return norm_images
norm_images, norm_labels = load_image(
"/kaggle/input/shai-level-2-training-2023/train/normal/", 0
)
covid_images, covid_labels = load_image(
"/kaggle/input/shai-level-2-training-2023/train/covid/", 1
)
virus_images, virus_labels = load_image(
"/kaggle/input/shai-level-2-training-2023/train/virus/", 2
)
X_test = load_image_test("/kaggle/input/shai-level-2-training-2023/test/")
virus_images.shape
X_train = []
X_train.append(norm_images[:])
X_train.append(covid_images[:])
X_train.append(virus_images[:])
y_train = []
y_train.append(norm_labels)
y_train.append(covid_labels)
y_train.append(virus_labels)
len(X_train)
print(y_train)
train_dir = "/kaggle/input/shai-level-2-training-2023/train"
test_dir = "/kaggle/input/shai-level-2-training-2023/test"
train = pd.read_csv("/kaggle/input/shai-level-2-training-2023/train.csv")
train.head()
from sklearn.preprocessing import LabelEncoder
Ln = LabelEncoder().fit(train["Label"])
cases_count = train["Label"].value_counts()
print(cases_count)
# Plot the results
plt.figure(figsize=(6, 4))
sns.barplot(x=cases_count.index, y=cases_count.values)
plt.title("Number of cases", fontsize=14)
plt.xlabel("Case type", fontsize=12)
plt.ylabel("Count", fontsize=12)
plt.xticks(range(len(cases_count.index)), ["Covid(0)", "Normal(1)", "Virus(2)"])
plt.show()
covid_samples = (train[train["Label"] == 0]["Image"].iloc[:10]).tolist()
normal_samples = (train[train["Label"] == 1]["Image"].iloc[:10]).tolist()
virus_samples = (train[train["Label"] == 2]["Image"].iloc[:10]).tolist()
image_size = 224
BATCH_SIZE = 64
train_datagen = ImageDataGenerator(
rescale=1.0 / 255,
validation_split=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
rotation_range=15,
fill_mode="nearest",
)
training_set = train_datagen.flow_from_directory(
train_dir,
subset="training",
target_size=(image_size, image_size),
batch_size=BATCH_SIZE,
class_mode="categorical",
seed=42,
shuffle=True,
)
validation_set = train_datagen.flow_from_directory(
train_dir,
subset="validation",
target_size=(image_size, image_size),
batch_size=BATCH_SIZE,
class_mode="categorical",
seed=42,
shuffle=True,
)
y_train = training_set.classes
y_val = validation_set.classes
print(training_set.class_indices)
labels = ["COVID", "NORMAL", "VIRUS"]
sample_data = training_set.__getitem__(0)[0]
sample_label = training_set.__getitem__(0)[1]
plt.figure(figsize=(10, 8))
for i in range(12):
plt.subplot(3, 4, i + 1)
plt.axis("off")
plt.imshow(sample_data[i])
plt.title(labels[np.argmax(sample_label[i])])
from keras.backend import clear_session
clear_session()
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(224, 224, 3)))
model.add(layers.Activation(activations.relu))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(layers.Activation(activations.relu))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(layers.Activation(activations.relu))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(Dropout(0.4))
model.add(Dense(3, activation="softmax"))
model.summary()
model.compile(
loss="categorical_crossentropy",
metrics=["accuracy"],
optimizer=tf.keras.optimizers.Adam(),
)
early_stop = tf.keras.callbacks.EarlyStopping(
monitor="val_loss", patience=2, restore_best_weights=True
)
history = model.fit_generator(training_set, epochs=7, validation_data=validation_set)
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "Valid"], loc="upper left")
plt.show()
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "valid"], loc="upper left")
plt.show()
|
# NB: Kaggle requires phone verification to use the internet or a GPU. If you haven't done that yet, the cell below will fail
# This code is only here to check that your internet is enabled. It doesn't do anything else.
# Here's a help thread on getting your phone number verified: https://www.kaggle.com/product-feedback/135367
import socket, warnings
try:
socket.setdefaulttimeout(1)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(("1.1.1.1", 53))
except socket.error as ex:
raise Exception(
"STOP: No internet. Click '>|' in top right and set 'Internet' switch to on"
)
from duckduckgo_search import ddg_images
from fastcore.all import *
def search_images(term, max_images=30):
print(f"Searching for '{term}'")
return L(ddg_images(term, max_results=max_images)).itemgot("image")
# NB: `search_images` depends on duckduckgo.com, which doesn't always return correct responses.
# If you get a JSON error, just try running it again (it may take a couple of tries).
urls = search_images("bird photos", max_images=1)
urls[0]
from fastdownload import download_url
dest = "bird.jpg"
download_url(urls[0], dest, show_progress=False)
from fastai.vision.all import *
im = Image.open(dest)
im.to_thumb(256, 256)
download_url(
search_images("forest photos", max_images=1)[0], "forest.jpg", show_progress=False
)
Image.open("forest.jpg").to_thumb(256, 256)
searches = "forest", "bird"
path = Path("bird_or_not")
from time import sleep
for o in searches:
dest = path / o
dest.mkdir(exist_ok=True, parents=True)
download_images(dest, urls=search_images(f"{o} photo"))
sleep(10) # Pause between searches to avoid over-loading server
download_images(dest, urls=search_images(f"{o} sun photo"))
sleep(10)
download_images(dest, urls=search_images(f"{o} shade photo"))
sleep(10)
resize_images(path / o, max_size=400, dest=path / o)
failed = verify_images(get_image_files(path))
failed.map(Path.unlink)
len(failed)
dls = DataBlock(
blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(valid_pct=0.2, seed=42),
get_y=parent_label,
item_tfms=[Resize(192, method="squish")],
).dataloaders(path, bs=32)
dls.show_batch(max_n=6)
learn = vision_learner(dls, resnet18, metrics=error_rate)
learn.fine_tune(3)
is_bird, _, probs = learn.predict(PILImage.create("bird.jpg"))
print(f"This is a: {is_bird}.")
print(f"Probability it's a bird: {probs[0]:.4f}")
|
# 
# # **Businesss Problem**
# In the telecom industry, customers are able to choose from multiple service providers and actively switch from one operator to another. In this highly competitive market, the telecommunications industry experiences an average of 15-25% annual churn rate. Given the fact that it costs 5-10 times more to acquire a new customer than to retain an existing one, customer retention has now become even more important than customer acquisition.
# For many incumbent operators, retaining high profitable customers is the number one business goal.
#
# To reduce customer churn, telecom companies need to predict which customers are at high risk of churn.
# In this project, you will analyse customer-level data of a leading telecom firm, build predictive models to identify customers at high risk of churn and identify the main indicators of churn.
# ## **Understanding the Business Objective and the Data**
# The dataset contains customer-level information for a span of four consecutive months - June, July, August and September. The months are encoded as 6, 7, 8 and 9, respectively.
# The business objective is to predict the churn in the last (i.e. the ninth) month using the data (features) from the first three months. To do this task well, understanding the typical customer behaviour during churn will be helpful.
# # TABLE OF CONTENT
#
# * [1. Reading & Understanding Data](#1)
#
# * [2. EDA & Visualizations](#2)
#
# * [3. Preparing The Data For Modelling](#3)
#
# * [4. Evaluate The Model](#4)
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
pd.set_option("display.max_columns", 500)
#
# # 1. Reading & Understanding Data
tel_data = pd.read_csv("/kaggle/input/telecomm-churn-data/telecom_churn_data.csv")
tel_data.head()
tel_data.shape
tel_data.info()
tel_data.describe()
# No of missing values in each column
tel_data.isnull().sum()
# computes the fraction of missing values in each column by dividing the number of missing values by the total number of rows in the dataset.
tel_data.isnull().sum() / len(tel_data.index)
# ## **Handling missing values**
tel_miss_cols = (
round(((tel_data.isnull().sum() / len(tel_data.index)) * 100), 2).to_frame("null")
).sort_values("null", ascending=False)
tel_miss_cols
# **.to_frame('null'):** Converts the resulting pandas Series into a DataFrame with a single column named "null".
# **.sort_values('null', ascending=False):** sorts the DataFrame in descending order based on the values in the "null" column
# **Let's Delete those columns which are having more than 30% of the missing value in this dataset.**
# First Find out those columns and total in numbers
cols_with_30_percent_missing_value = list(
tel_miss_cols.index[tel_miss_cols["null"] > 30]
)
print(cols_with_30_percent_missing_value, "\n")
len(cols_with_30_percent_missing_value)
# **Total 40 columns which are having more than 30% columns which are having Null values.**
# Delete those list of Columns now
tel_data = tel_data.drop(cols_with_30_percent_missing_value, axis=1)
tel_data.head()
tel_data.shape
# **The total numbers of columns now reduced from 226 to 186 after dropping 40 columns.**
# Deleting the date columns as the date columns are not required in our analysis
date_cols = []
for col in tel_data.columns:
if "date" in col:
date_cols.append(col)
print(date_cols, "\n")
print(len(date_cols))
# **Now let's drop these 8 date columns and reduce the no of columsn again**
tel_data = tel_data.drop(date_cols, axis=1)
tel_data.shape
tel_data.circle_id.value_counts()
# **circle_id is having only one ID. Hence there will no impact of this column for our Data analysis.**
tel_data = tel_data.drop("circle_id", axis=1)
tel_data.shape
|
import pandas as pd
import numpy as np
import seaborn as sns
import copy
import matplotlib.pyplot as plt
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
confusion_matrix,
plot_confusion_matrix,
f1_score,
ConfusionMatrixDisplay,
accuracy_score,
precision_score,
recall_score,
roc_auc_score,
)
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from warnings import simplefilter
from sklearn.model_selection import cross_val_score
simplefilter(action="ignore", category=FutureWarning)
# Смотрим на датасет.
Df = pd.read_csv("/kaggle/input/titaniccsv/titanic.csv")
Df.head()
# Проверка баланса классов Survived.
print(
"Баланс классов Survived: ",
len(Df[Df["Survived"] == 1]) / len(Df[Df["Survived"] == 0]),
)
# Проверка содержимого.
Df.info()
# Класс Ticket не берем в обучение, категоризовать нельзя, что скрыто за шифром билета не ясно.
# Класс Cabin не берем в обучение, слишком много не заполненных ячеек.
# Класс Fare не понятно из чего складывается, но лмишним не будет.
# Требуется кодировка классов sex и emvarked. Класс sex можно кодировать c помощью LabelEncoder.
# В классе Age присутсвует NaN, заполним медианым значением.
# Скопируем дата сет.
Df_copy = copy.deepcopy(Df)
# Рассмторим класс Embarked.
print("Все порты: ", Df["Embarked"].unique())
print("Порт S: ", len(Df[Df["Embarked"] == "S"]))
print("Порт C: ", len(Df[Df["Embarked"] == "C"]))
print("Порт Q: ", len(Df[Df["Embarked"] == "Q"]))
# Для использования в обучении требуется заолнить NaN. Заполняем наиболее часто встречаемым портом, то есть портом S.
Df_copy["Embarked"].fillna("S", inplace=True)
# Так же заполним Age для дальнейшего обучения медианными значениями.
Df_copy["Age"].fillna(Df["Age"].median(), inplace=True)
# Кодировка Sex по LabelEncoder и Embarked по One-Hot-Encoder.
# le,ohe,scaker для эргономичности.
le = LabelEncoder()
ohe = OneHotEncoder()
scaler = MinMaxScaler()
# Нормализация данных.
for i in ["Pclass", "Age", "SibSp", "Parch", "Fare"]:
Df_copy[i] = scaler.fit_transform(Df_copy[[i]])
# Кодировка Sex.
Df_copy["Sex"] = le.fit_transform(Df_copy["Sex"].values.ravel())
# Кодировка Embarked.
Df_copy = pd.concat(
[
Df_copy,
pd.DataFrame(
ohe.fit_transform(Df_copy[["Embarked"]]).toarray(),
columns=np.ravel(ohe.categories_),
),
],
axis=1,
)
Df_copy = Df_copy.drop(["Embarked", "Cabin", "Ticket", "Name", "PassengerId"], axis=1)
print(Df_copy)
# Обучение.
X = Df_copy[["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "C", "Q", "S"]]
y = Df_copy["Survived"]
# Tree.
error_accuracy_tree = {}
error_precision_tree = {}
error_recall_tree = {}
error_roc_auc_tree = {}
for i in range(10):
accuracy_tree = []
precision_tree = []
recall_tree = []
roc_auc_tree = []
for j in range(150):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = DecisionTreeClassifier(
criterion="entropy", min_samples_leaf=5, max_depth=i + 1
)
clf.fit(X=X_train, y=y_train)
y_pred_tree = clf.predict(X_test)
accuracy_tree.append(accuracy_score(y_test, y_pred_tree))
precision_tree.append(precision_score(y_test, y_pred_tree))
recall_tree.append(recall_score(y_test, y_pred_tree))
roc_auc_tree.append(roc_auc_score(y_test, y_pred_tree))
error_accuracy_tree[f"i_{i}"] = accuracy_tree
error_precision_tree[f"i_{i}"] = precision_tree
error_recall_tree[f"i_{i}"] = recall_tree
error_roc_auc_tree[f"i_{i}"] = roc_auc_tree
# KNN.
error_accuracy_knn = {}
error_precision_knn = {}
error_recall_knn = {}
error_roc_auc_knn = {}
for g in np.arange(1, 11):
error_rates = []
accuracy_knn = []
precision_knn = []
recall_knn = []
roc_auc_knn = []
for k in np.arange(1, 101):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
knn = KNeighborsClassifier(n_neighbors=k, p=g)
knn.fit(X=X_train, y=y_train)
y_pred_knn = knn.predict(X_test)
error_rates.append(np.mean(y_pred_knn != y_test))
accuracy_knn.append(accuracy_score(y_test, y_pred_knn))
precision_knn.append(precision_score(y_test, y_pred_knn))
recall_knn.append(recall_score(y_test, y_pred_knn))
roc_auc_knn.append(roc_auc_score(y_test, y_pred_knn))
error_accuracy_knn[f"g_{g}"] = accuracy_knn
error_precision_knn[f"g_{g}"] = precision_knn
error_recall_knn[f"g_{g}"] = recall_knn
error_roc_auc_knn[f"g_{g}"] = roc_auc_knn
# Confusion Matrix.
fig, axs = plt.subplots(1, 2, figsize=(8, 4))
# Tree.
cm_tree = confusion_matrix(y_test, y_pred_tree, labels=clf.classes_)
disp_tree = ConfusionMatrixDisplay(
confusion_matrix=cm_tree, display_labels=clf.classes_
)
disp_tree.plot(ax=axs[0], cmap=plt.cm.Blues)
axs[0].set_title("Tree")
# KNN.
cm_knn = confusion_matrix(y_test, y_pred_knn, labels=clf.classes_)
disp_knn = ConfusionMatrixDisplay(confusion_matrix=cm_knn, display_labels=clf.classes_)
disp_knn.plot(ax=axs[1], cmap=plt.cm.Blues)
axs[1].set_title("KNN")
plt.show()
# Boxplot.
# Tree and KNN.
for metric_type in ["accuracy", "precision", "recall", "roc_auc"]:
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
# Tree.
for i in range(10):
axs[0].boxplot(
globals()[f"error_{metric_type}_tree"]["i_" + str(i)], positions=[i]
)
axs[0].set_title(f"{metric_type}_tree")
# KNN.
for g in np.arange(1, 11):
axs[1].boxplot(
globals()[f"error_{metric_type}_knn"]["g_" + str(g)], positions=[g]
)
axs[1].set_title(f"{metric_type}_knn")
plt.show()
# DecisionTreeClassifier.
fn = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "C", "Q", "S"]
cn = ["0", "1"]
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(50, 50), dpi=300)
tree.plot_tree(clf, feature_names=fn, class_names=cn, filled=True)
fig.savefig("/kaggle/working/DT.png")
pred_tree = clf.predict(X)
pred_knn = knn.predict(X)
output_tree = pd.DataFrame({"PassengerId": Df_copy.PassengerId, "Survived": pred_tree})
output_knn = pd.DataFrame({"PassengerId": Df_copy.PassengerId, "Survived": pred_knn})
output_tree.to_csv("/kaggle/working/submission_tree.csv", index=False)
output_knn.to_csv("/kaggle/working/submission_knn.csv", index=False)
print("Результаты сохранены!")
|
# # IMPORTING LIBRARIES
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from pandas.api.types import is_numeric_dtype
from plotly.subplots import make_subplots
# # IMPORTING THE DATA
# ### Importing the train and test data
train_df = pd.read_csv("/kaggle/input/telecom-churn-case-study-hackathon-c46/train.csv")
test_df = pd.read_csv("/kaggle/input/telecom-churn-case-study-hackathon-c46/test.csv")
train_df.head()
test_df.head()
# ### Importing the data dictionary
dictionary_df = pd.read_csv(
"/kaggle/input/telecom-churn-case-study-hackathon-c46/data_dictionary.csv"
)
dictionary_df
# # ANALYZING THE DATA
# ### Printing the columns in the data
list(train_df.columns)
# ## Printing the shape of the dataframe
train_df.shape
test_df.shape
# ### Printing the number of nulls in the column
train_df.isnull().sum()
test_df.isnull().sum()
def percentage(x, size):
"""
Function to calculate the percentage of null in the dataframe
"""
null_count = (x.isnull().sum() / size) * 100
return null_count
def create_null_dataframe(df):
"""
creating dataframe with percentage of null values in each column
"""
null_df = pd.DataFrame()
size = df.shape[0]
column = list()
percent = list()
for col in df.columns:
column.append(col)
percent.append(percentage(df[col], size))
null_df = pd.DataFrame({"Column": column, "Percentage": percent})
return null_df
train_null_df = create_null_dataframe(train_df)
test_null_df = create_null_dataframe(test_df)
# Observing the distribution of the null percentage in the data
# Create histogram using Plotly
fig = px.histogram(
train_null_df,
x="Percentage",
title="Distribution of null values in the train dataframe",
)
# Show the plot
fig.show()
# Create histogram using Plotly
fig = px.histogram(
test_null_df,
x="Percentage",
title="Distribution of null values in the test dataframe",
)
# Show the plot
fig.show()
# Dropping all the which have more than 50% null columns
# ### Printing the schema of the data
train_df.dtypes
train_df.describe()
test_df.describe()
# # DATA PREPARATION - NULL HANDLING
# ## Dropping the columns that have more than 50% null values
train_df.drop(
train_null_df[train_null_df["Percentage"] > 50]["Column"].values,
axis=1,
inplace=True,
)
test_df.drop(
test_null_df[test_null_df["Percentage"] > 50]["Column"].values, axis=1, inplace=True
)
# Checking the shape of the dataframe
print("Shape of the train dataset {}".format(train_df.shape))
print("Shape of the test dataset {}".format(test_df.shape))
# ### Applying imputation methods to handle rest of the null values
def find_mode(col):
"""
Returning mode of the column passed
"""
return col.mode()[0]
def find_mean(col):
"""
Returning mean of the column passed
"""
return col.mean()
# ### Understanding each of the columns and imputing them
def unique_count_and_null(col, name):
"""
Finding the number of unique values in
the dataframe column and returning False
if there are not.
Finding the number of null columns in the data
"""
null_flag = False
unique_flag = False
print(
"Number of null values in the column {} : {}".format(name, col.isnull().sum())
)
print("Does the column {} have unique values: {}".format(name, col.nunique() > 1))
if col.isnull().sum() > 0:
null_flag = True
if col.nunique() > 1:
unique_flag = True
return null_flag, unique_flag
col_to_drop = list()
for col in train_df.drop("churn_probability", axis=1).columns:
null_flag, unique_flag = unique_count_and_null(train_df[col], col)
if null_flag:
if is_numeric_dtype(train_df[col]):
mean = find_mean(train_df[col])
train_df[col].fillna(mean, inplace=True)
else:
mode = find_mode(train_df[col])
train_df[col].fillna(mode, inplace=True)
if not unique_flag:
col_to_drop.append(col)
# ### Dropping the columns that have only one unique value
train_df.drop(col_to_drop, axis=1, inplace=True)
# ### Handling imputation for Target variable
train_df["churn_probability"].isnull().sum()
# There are no null values in the target value column
# ### For test dataset
col_to_drop = list()
for col in test_df.columns:
null_flag, unique_flag = unique_count_and_null(test_df[col], col)
if null_flag:
if is_numeric_dtype(test_df[col]):
mean = find_mean(test_df[col])
test_df[col].fillna(mean, inplace=True)
else:
mode = find_mode(test_df[col])
test_df[col].fillna(mode, inplace=True)
if not unique_flag:
col_to_drop.append(col)
test_df.drop(col_to_drop, axis=1, inplace=True)
# ### Checking the shape of the dataframes
test_df.shape
train_df.shape
# ### Confirmation if all the columns are the same
import collections
if collections.Counter(
train_df.drop("churn_probability", axis=1).columns
) == collections.Counter(test_df.columns):
print("All Columns are same")
# # DATA PREPARATION - DERIVING NEW VARIABLES
# To understand the object features
# * date_of_last_rech_6
# * date_of_last_rech_7
# * date_of_last_rech_8
#
train_df["date_of_last_rech_6"] = pd.to_datetime(train_df["date_of_last_rech_6"])
train_df["date_of_last_rech_7"] = pd.to_datetime(train_df["date_of_last_rech_7"])
train_df["date_of_last_rech_8"] = pd.to_datetime(train_df["date_of_last_rech_8"])
# **Retrieving the date out of the entire mm/dd/yyy hence the column mentions the month and the date is same for all the records**
#
train_df["date_of_last_rech_6"] = train_df["date_of_last_rech_6"].dt.day
train_df["date_of_last_rech_7"] = train_df["date_of_last_rech_7"].dt.day
train_df["date_of_last_rech_8"] = train_df["date_of_last_rech_8"].dt.day
# Carrying out the same procedure for test dataframe
test_df["date_of_last_rech_6"] = pd.to_datetime(test_df["date_of_last_rech_6"])
test_df["date_of_last_rech_7"] = pd.to_datetime(test_df["date_of_last_rech_7"])
test_df["date_of_last_rech_8"] = pd.to_datetime(test_df["date_of_last_rech_8"])
test_df["date_of_last_rech_6"] = test_df["date_of_last_rech_6"].dt.day
test_df["date_of_last_rech_7"] = test_df["date_of_last_rech_7"].dt.day
test_df["date_of_last_rech_8"] = test_df["date_of_last_rech_8"].dt.day
# # EXPLORATORY DATA ANALYSIS
# writing a util function since there are a lot of columns in format feature_6 , feature_7 and feature_8
def violin_plot(col_june, col_july, col_aug, metric):
# create a trace for each column
trace1 = go.Violin(
y=train_df[col_june], name="{} in month of June".format(metric), opacity=0.5
)
trace2 = go.Violin(
y=train_df[col_july], name="{} in month of July".format(metric), opacity=0.5
)
trace3 = go.Violin(
y=train_df[col_aug], name="{} in month of August".format(metric), opacity=0.5
)
data = [trace1, trace2, trace3]
# create the layout
layout = go.Layout(
title="Violin plot of {} in June, July, August".format(metric),
yaxis=dict(title="Value"),
)
# create the figure
fig = go.Figure(data=data, layout=layout)
return fig
# ## Analyzing average revenue per user in the month of June, July and August
fig = violin_plot("arpu_6", "arpu_7", "arpu_8", "Average revenue per user")
# show the figure
fig.show()
# **Average revenue per user is :**
# * June: With minimum of -2258 and maximum of 27.73k
# * July: With minimum of -1289 and 35.1k
# * August: With minimum of -945 and 33.5k
# **We can see that June has the least turn over whereas August has a better turn over.****
# ## Analyzing minutes used by all kinds of calls within the same operator network (ONNET_MOU)
fig = violin_plot(
"onnet_mou_6",
"onnet_mou_7",
"onnet_mou_8",
"Minutes used by all kinds of calls with same network",
)
# show the figure
fig.show()
# **Minutes used by all kinds of calls with same network :**
# * June: With minimum of 0 and max of 7,376 minutes
# * July: With minimum of 0 and max of 8157 minutes
# * August: With minimum of 0 and max of 10.752k minutes
# **As we see that user utilizes the most minutes in August with June hitting minimum and slight improvement in July.****
# ## Analyzing minutes utilized by all kinds of calls outside the operator - OFFNET_MOU
fig = violin_plot(
"offnet_mou_6",
"offnet_mou_7",
"offnet_mou_8",
"Minutes used by all kinds of calls outside the network",
)
# show the figure
fig.show()
# **Minutes used by all kinds of calls with same network :**
# * June: With minimum of 0 and max of 8362 minutes
# * July: With minimum of 0 and max of 7043 minutes
# * August: With minimum of 0 and max of 14.007k minutes
# **As we see that user utilizes the most minutes in August with util taking a dip in July.**
# ## Analyzing minutes utilized by the customer during an incoming call while being in the roaming zone - ROAM_IC_MOU
fig = violin_plot(
"roam_ic_mou_6",
"roam_ic_mou_7",
"roam_ic_mou_8",
"Minutes used by the customer while receving incoming call-roaming",
)
# show the figure
fig.show()
# **Minutes used by the customer while receiving incoming call in roaming zone :**
# * June: With minimum of 0 and max of 2850 minutes
# * July: With minimum of 0 and max of 4155 minutes
# * August: With minimum of 0 and max of 4169 minutes
# **As we see that user utilizes the most minutes in August which is comparable with util in July and takes a huge dip in June.**
# ## Analyzing minutes utilized by the customer during an outgoing call while being in the roaming zone - ROAM_OG_MOU
fig = violin_plot(
"roam_og_mou_6",
"roam_og_mou_7",
"roam_og_mou_8",
"Minutes used by the customer while receving outgoing call-roaming",
)
# show the figure
fig.show()
# **Minutes used by the customer while receiving outgoing call in roaming zone :**
# * June: With minimum of 0 and max of 3775 minutes
# * July: With minimum of 0 and max of 2812 minutes
# * August: With minimum of 0 and max of 5337 minutes
# **As we see that user utilizes the most minutes in August following July and then June.**
# ## Analyzing minutes utilized by the user during local calls within same telecom circle and within same operator (mobile to mobile)
fig = violin_plot(
"loc_og_t2t_mou_6",
"loc_og_t2t_mou_7",
"loc_og_t2t_mou_8",
"Minutes used by the customer during local calls with same operator",
)
# show the figure
fig.show()
# **Minutes used by the customer while making a local call in same operator within same telecom :**
# * June: With minimum of 0 and max of 6431 minutes
# * July: With minimum of 0 and max of 7400 minutes
# * August: With minimum of 0 and max of 10.752k minutes
# **As we see that user utilizes the most minutes in August following July and then June.**
# ## Analyzing minutes utilized by the user during local calls within same telecom circle within operator to different mobile operator
fig = violin_plot(
"loc_og_t2m_mou_6",
"loc_og_t2m_mou_7",
"loc_og_t2m_mou_8",
"Minutes used by the customer during local calls with different operator",
)
# show the figure
fig.show()
# **Minutes used by the customer while making a local call with different operator within same telecom :**
# * June: With minimum of 0 and max of 4696 minutes
# * July: With minimum of 0 and max of 4557 minutes
# * August: With minimum of 0 and max of 4961 minutes
# **As we see that user utilizes the most minutes in August followed by June and July.**
# ## From the graphs we see above we can say that utilization has been higher in the month of August in most of the cases
# ## EXPLORATORY DATA ANALYSIS - BIVARIATE ANALYSIS
# ## Understanding the age on network in number of days using the operator with Churn probability
fig = px.box(train_df, x="churn_probability", y="aon")
fig.show()
# **People who do not churn have been using the network more than those who churn the network**
# ### Grouping the features of same type with different months
def plot_comparision(feature_1, feature_2, feature_3, train_df, metric):
fig = go.Figure()
fig.add_trace(
go.Box(
# defining y axis in corresponding
# to x-axis
y=list(train_df[train_df["churn_probability"] == 0][feature_1])
+ list(train_df[train_df["churn_probability"] == 0][feature_2])
+ list(train_df[train_df["churn_probability"] == 0][feature_3]),
x=[feature_1] * len(train_df[train_df["churn_probability"] == 0])
+ [feature_2] * len(train_df[train_df["churn_probability"] == 0])
+ [feature_3] * len(train_df[train_df["churn_probability"] == 0]),
name="Not Churned",
marker_color="blue",
)
)
fig.add_trace(
go.Box(
# defining y axis in corresponding
# to x-axis
y=list(train_df[train_df["churn_probability"] == 1][feature_1])
+ list(train_df[train_df["churn_probability"] == 1][feature_2])
+ list(train_df[train_df["churn_probability"] == 1][feature_3]),
x=[feature_1] * len(train_df[train_df["churn_probability"] == 1])
+ [feature_2] * len(train_df[train_df["churn_probability"] == 1])
+ [feature_3] * len(train_df[train_df["churn_probability"] == 1]),
name="Churned",
marker_color="green",
)
)
fig.update_layout(
# group together boxes of the different
# traces for each value of x
boxmode="group",
title="Metric {}".format(metric),
)
fig.show()
features_grouped = [
["arpu_6", "arpu_7", "arpu_8"],
["onnet_mou_6", "onnet_mou_7", "onnet_mou_8"],
["offnet_mou_6", "offnet_mou_7", "offnet_mou_8"],
["roam_ic_mou_6", "roam_ic_mou_7", "roam_ic_mou_8"],
["roam_og_mou_6", "roam_og_mou_7", "roam_og_mou_8"],
["loc_og_t2t_mou_6", "loc_og_t2t_mou_7", "loc_og_t2t_mou_8"],
["loc_og_t2m_mou_6", "loc_og_t2m_mou_7", "loc_og_t2m_mou_8"],
["loc_og_t2f_mou_6", "loc_og_t2f_mou_7", "loc_og_t2f_mou_8"],
["loc_og_t2c_mou_6", "loc_og_t2c_mou_7", "loc_og_t2c_mou_8"],
["loc_og_mou_6", "loc_og_mou_7", "loc_og_mou_8"],
["std_og_t2t_mou_6", "std_og_t2t_mou_7", "std_og_t2t_mou_8"],
["std_og_t2m_mou_6", "std_og_t2m_mou_7", "std_og_t2m_mou_8"],
["std_og_t2f_mou_6", "std_og_t2f_mou_7", "std_og_t2f_mou_8"],
["std_og_mou_6", "std_og_mou_7", "std_og_mou_8"],
["isd_og_mou_6", "isd_og_mou_7", "isd_og_mou_8"],
["spl_og_mou_6", "spl_og_mou_7", "spl_og_mou_8"],
["og_others_6", "og_others_7", "og_others_8"],
["total_og_mou_6", "total_og_mou_7", "total_og_mou_8"],
["loc_ic_t2t_mou_6", "loc_ic_t2t_mou_7", "loc_ic_t2t_mou_8"],
["loc_ic_t2m_mou_6", "loc_ic_t2m_mou_7", "loc_ic_t2m_mou_8"],
["loc_ic_t2f_mou_6", "loc_ic_t2f_mou_7", "loc_ic_t2f_mou_8"],
["loc_ic_mou_6", "loc_ic_mou_7", "loc_ic_mou_8"],
["std_ic_t2t_mou_6", "std_ic_t2t_mou_7", "std_ic_t2t_mou_8"],
["std_ic_t2m_mou_6", "std_ic_t2m_mou_7", "std_ic_t2m_mou_8"],
["std_ic_t2f_mou_6", "std_ic_t2f_mou_7", "std_ic_t2f_mou_8"],
["std_ic_mou_6", "std_ic_mou_7", "std_ic_mou_8"],
["total_ic_mou_6", "total_ic_mou_7", "total_ic_mou_8"],
["spl_ic_mou_6", "spl_ic_mou_7", "spl_ic_mou_8"],
["isd_ic_mou_6", "isd_ic_mou_7", "isd_ic_mou_8"],
["ic_others_6", "ic_others_7", "ic_others_8"],
["total_rech_num_6", "total_rech_num_7", "total_rech_num_8"],
["total_rech_amt_6", "total_rech_amt_7", "total_rech_amt_8"],
["max_rech_amt_6", "max_rech_amt_7", "max_rech_amt_8"],
["date_of_last_rech_6", "date_of_last_rech_7", "date_of_last_rech_8"],
["last_day_rch_amt_6", "last_day_rch_amt_7", "last_day_rch_amt_8"],
["vol_2g_mb_6", "vol_2g_mb_7", "vol_2g_mb_8"],
["vol_3g_mb_6", "vol_3g_mb_7", "vol_3g_mb_8"],
["monthly_2g_6", "monthly_2g_7", "monthly_2g_8"],
["sachet_2g_6", "sachet_2g_7", "sachet_2g_8"],
["monthly_3g_6", "monthly_3g_7", "monthly_3g_8"],
["sachet_3g_6", "sachet_3g_7", "sachet_3g_8"],
["aug_vbc_3g", "jul_vbc_3g", "jun_vbc_3g"],
]
for i in range(3):
feature = features_grouped[i]
plot_comparision(
feature[0], feature[1], feature[2], train_df, feature[0].split("_")[0]
)
# **The metric average revenue per user is higher for people who do not churn**
# **The metric onnet is comparable for both churned and non churned customers in the month of june and july and increases in august**
# **Metric offnet is also comparable june and july,there are a couple of outliers in all these graphs**
# ### The above code can be used to print box plots and analyse them
# ### The notebook crashes when all the analysis is shown Hence limiting the number of features analyzed
# ### Correlation Heatmap
corr = train_df.corr()
# Create a heatmap using seaborn library
plt.figure(figsize=(300, 300))
sns.heatmap(corr, annot=True, cmap="coolwarm")
# Add title and show the plot
plt.title("Correlation Heatmap")
plt.show()
corr
# As we can see from the dataframe
# * the correlation between arpu_6 and arpu_7 is strong and so is the case arpu_6 and arpu_8
# * The correlation between arpu_6 and onnet_6/7/8 is low but the correlation between them is high
# * The correlation between aon and arpu 6/7/8 is very low
# # TRAIN TEST SPLIT
train_df.drop("id", axis=1, inplace=True)
y_train = train_df["churn_probability"]
X_train = train_df.drop("churn_probability")
# # REMOVING OUTLIERS
# From EDA, we saw that there are columns which have outliers in them.
# Hence writing code to remove outliers
def remove_outliers(data, threshold=1.5):
"""
Remove outliers from a numpy array using the Interquartile Range (IQR) method.
Parameters:
data (np.ndarray): Input data array.
threshold (float): IQR threshold value. Default is 1.5.
Returns:
np.ndarray: Output data array without outliers.
"""
q1, q3 = np.percentile(data, [25, 75])
iqr = q3 - q1
lower_bound = q1 - (iqr * threshold)
upper_bound = q3 + (iqr * threshold)
mask = (data >= lower_bound) & (data <= upper_bound)
return mask
train_df.columns.remove("churn_probability")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# variables
# paths for files
app_path_str = "../input/credit-card-approval-prediction/application_record.csv"
credit_path_str = "../input/credit-card-approval-prediction/credit_record.csv"
# For random forest, a number of trees must be selected.
# The higher number, the more thorough the calculation, but it takes longer to run.
number_of_trees = 200
# Target column for random forest prediction
target_column_name = "high_risk"
# Usually, decision trees can be large. Setting this variable to 3 or 4 makes the result tree easier to see and interpret.
tree_depth = 3
# Load data
# create dataframe from data
df_app = pd.read_csv(app_path_str)
df_app.head()
# Load data
# create dataframe from data
df_credit = pd.read_csv(credit_path_str)
df_credit.shape
# Replace C and X with 0, expanding the 0 group to 0-29 days past due, so that we have all numeric categories for delinquency status.
df_credit["STATUS"] = df_credit["STATUS"].replace(["X"], 0)
df_credit["STATUS"] = df_credit["STATUS"].replace(["C"], 0)
# check rows,cols
df_app.shape
# Convert status to numeric and group-max by status for each unique id.
# This will be a proxy for whether an applicant will be approved, since there is no yes/no flag for approved in the data set.
df_credit["STATUS"] = df_credit["STATUS"].apply(pd.to_numeric)
# Select highest status, i.e. the highest level of delinquency for each customer id
df_credit = df_credit.groupby("ID")["STATUS"].max().reset_index()
# export data to csv file
df_credit.to_csv("df_credit.csv", index=False)
df_credit.groupby("ID")["STATUS"].count().reset_index()
# Join grouped status table to df_app by ID
df_consol = pd.merge(df_app, df_credit, left_on="ID", right_on="ID")
df_consol.shape
# convert status to binary. If < 1, then
df_consol["high_risk"] = np.where(df_consol["STATUS"] < 1, 0, 1)
# convert days old to years
df_consol["age_years"] = round(df_consol["DAYS_BIRTH"] / -365, 0).astype(int)
df_consol["years_employed"] = round(df_consol["DAYS_EMPLOYED"] / -365, 0).astype(int)
df_consol.head()
# Encode categorical columns
df_formatted = pd.get_dummies(
df_consol,
columns=[
"CODE_GENDER",
"FLAG_OWN_CAR",
"FLAG_OWN_REALTY",
"NAME_INCOME_TYPE",
"NAME_EDUCATION_TYPE",
"NAME_FAMILY_STATUS",
"NAME_HOUSING_TYPE",
"OCCUPATION_TYPE",
],
prefix=[
"gender",
"own_car",
"own_property",
"income_type",
"education",
"family_status",
"housing_type",
"occupation_type",
],
)
# check length-rows and width-columns of data
df_formatted.shape
# drop columns not needed
df_formatted.drop(["ID"], axis=1, inplace=True)
df_formatted.drop(["STATUS"], axis=1, inplace=True)
df_formatted.drop(["DAYS_BIRTH"], axis=1, inplace=True)
df_formatted.drop(["DAYS_EMPLOYED"], axis=1, inplace=True)
df_formatted.drop(["own_car_N"], axis=1, inplace=True)
df_formatted.drop(["own_property_N"], axis=1, inplace=True)
df_formatted.to_csv("df_formatted.csv", index=False)
# Use numpy to convert to arrays.
# NumPy is a library for the Python programming language, adding support for large, multi-dimensional arrays and matrices,
# along with a large collection of high-level mathematical functions to operate on these arrays.
import numpy as np
# Assign target variable to separate array
target = np.array(df_formatted[target_column_name])
# Remove target column from features
features = df_formatted.drop(target_column_name, axis=1)
# Saving feature names for later use
feature_list = list(features.columns)
# convert features dataframe to array
features = np.array(features)
# Using Skicit-learn to split data into training and testing sets.
# Scikit-learn (formerly scikits.learn and also known as sklearn) is a free software machine learning library for the Python programming language.
# It features various classification, # regression and clustering algorithms including support vector machines, random forests,
# gradient boosting, k-means and DBSCAN, and is designed to interoperate with the Python numerical and scientific libraries NumPy and SciPy.
from sklearn.model_selection import train_test_split
# Split the data into training and testing sets. test_size is n% of the rows. The other % will train the model.
train_features, test_features, train_target, test_target = train_test_split(
features, target, test_size=0.25, random_state=42
)
# Check to see that training features and labels have the same rows, and testing features and labels have the same rows
print("Training Features Shape:", train_features.shape)
print("Training target Shape:", train_target.shape)
print("Testing Features Shape:", test_features.shape)
print("Testing target Shape:", test_target.shape)
# Import the model we are using
from sklearn.ensemble import RandomForestRegressor
# Instantiate model. n_estimators is the number of decision trees you want to use
rf = RandomForestRegressor(n_estimators=number_of_trees, random_state=42)
# Train the model on training data
rf.fit(train_features, train_target)
# Import tools needed for visualization
from sklearn.tree import export_graphviz
from IPython.display import Image
# pydot may need to be installed.
try:
import pydot
except ImportError as e:
import pydot
# Limit depth of tree to n levels
rf_small = RandomForestRegressor(n_estimators=10, max_depth=tree_depth)
rf_small.fit(train_features, train_target)
# Extract the small tree
tree_small = rf_small.estimators_[5]
# Save the tree as a png image
export_graphviz(
tree_small,
out_file="small_tree.dot",
feature_names=feature_list,
rounded=True,
precision=1,
)
(graph,) = pydot.graph_from_dot_file("small_tree.dot")
graph.write_png("small_tree.png")
# show png file
Image(graph.create_png())
# Get numerical feature importances
importances = list(rf.feature_importances_)
# List of tuples with variable and importance
feature_importances = [
(feature, round(importance, 2))
for feature, importance in zip(feature_list, importances)
]
# Sort the feature importances by most important first
feature_importances = sorted(feature_importances, key=lambda x: x[1], reverse=True)
# Print out the feature and importances
[print("Variable: {:20} Importance: {}".format(*pair)) for pair in feature_importances]
dfcorr = df_formatted[["AMT_INCOME_TOTAL", "age_years", "years_employed", "high_risk"]]
# import packages
import seaborn as sn
import matplotlib.pyplot as plt
# set width and height
f = plt.figure()
f.set_figwidth(15)
f.set_figheight(12)
# create matrix
sn.heatmap(
dfcorr.corr(),
annot=True,
vmin=-1,
vmax=1,
center=0,
cmap="Blues",
linewidths=1,
linecolor="black",
)
# Make x and y descriptions larger so they are easier to read
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
|
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option("max_columns", None)
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
import warnings
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
data_2019 = pd.read_csv("/kaggle/input/flight-delay-prediction/Jan_2019_ontime.csv")
data_2020 = pd.read_csv("/kaggle/input/flight-delay-prediction/Jan_2020_ontime.csv")
print(data_2019.info(), data_2020.info())
# Join both datasets 2019 and 2020
data = pd.concat([data_2019, data_2020])
data.head()
data.shape
# rename the categories in categorical columns
data["DEP_DEL15"] = np.where(data["DEP_DEL15"] == 0.0, "NO", "YES")
data["CANCELLED"] = np.where(data["CANCELLED"] == 0.0, "NO", "YES")
data["DIVERTED"] = np.where(data["DIVERTED"] == 0.0, "NO", "YES")
data["ARR_DEL15"] = np.where(data["ARR_DEL15"] == 0.0, "NO", "YES")
# Since there is many categories in the ORIGIN and DEST column, I combined them to a single column and extracted the 50 most used routes.
# Combine ORIGIN and DEST into a single column
data["ORIGIN-DEST"] = data["ORIGIN"] + "-" + data["DEST"]
# get the count of each combination into a dataframe
org_dest = data["ORIGIN-DEST"].value_counts().to_frame()
# check the number of observation in the most frequent 50 to check whether the sample size is enough for the analysis
org_dest[:50]["ORIGIN-DEST"].sum()
# extract the data from original dataframe
org_dest_list = org_dest[:50].index.tolist()
data = data[data["ORIGIN-DEST"].isin(org_dest_list)]
# Distance variable is categorized into three to simplify the analysis.
print(
"max distance: ",
data["DISTANCE"].max(),
"\n",
"min distance: ",
data["DISTANCE"].min(),
)
data["DIST_GROUP"] = "SHORT"
data.loc[
(data["DISTANCE"] > 928.0) & (data["DISTANCE"] <= 1757.0), "DIST_GROUP"
] = "MEDIUM"
data.loc[(data["DISTANCE"] > 1757.0), "DIST_GROUP"] = "LONG"
# extract the necessary columns
data = data[
[
"DAY_OF_MONTH",
"DAY_OF_WEEK",
"OP_UNIQUE_CARRIER",
"ORIGIN-DEST",
"DEP_DEL15",
"CANCELLED",
"DIVERTED",
"DIST_GROUP",
"ARR_DEL15",
]
]
data = data.reset_index().drop("index", axis=1)
data.shape
# check for missing values
data.isnull().sum()
data.head()
# # Exploratory Analysis
var = ["DEP_DEL15", "CANCELLED", "DIVERTED"]
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 10))
for k, ax in zip(range(3), axes.flatten()):
sns.countplot(data=data, x=f"{var[k]}", hue="ARR_DEL15", ax=ax)
ax.set_title(f"Arrival delay vs {var[k]}")
for container in ax.containers:
ax.bar_label(container)
DEP_DEL_YES = data.loc[data["DEP_DEL15"] == "YES"]
DEP_DEL_NO = data.loc[data["DEP_DEL15"] == "NO"]
fig, ax = plt.subplots(figsize=(20, 5))
sns.countplot(
data=data,
x="ORIGIN-DEST",
hue="ARR_DEL15",
)
plt.title("Arrival delay with ORIGIN-DESTINATION")
plt.xticks(rotation=45, ha="right")
var = ["YES", "NO"]
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(20, 10))
for k, ax in zip(range(2), axes.flatten()):
sns.countplot(
data=data.loc[data["DEP_DEL15"] == var[k]],
x="ORIGIN-DEST",
hue="ARR_DEL15",
ax=ax,
palette=["#98F5FF", "#BF3EFF"],
)
ax.set_title(f"Arrival delay vs ORIGIN-DEST with DEP_DEL_{var[k]}")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha="right")
fig, ax = plt.subplots(figsize=(20, 5))
sns.countplot(
data=data,
x="OP_UNIQUE_CARRIER",
hue="ARR_DEL15",
)
plt.title("Arrival delay with OP_UNIQUE_CARRIER")
var = ["YES", "NO"]
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(20, 10))
for k, ax in zip(range(2), axes.flatten()):
sns.countplot(
data=data.loc[data["DEP_DEL15"] == var[k]],
x="OP_UNIQUE_CARRIER",
hue="ARR_DEL15",
ax=ax,
palette=["#98F5FF", "#BF3EFF"],
)
ax.set_title(f"Arrival delay vs CARRIER CODE with DEP_DEL_{var[k]}")
fig, ax = plt.subplots(figsize=(20, 10))
sns.countplot(
data=data,
x="DIST_GROUP",
hue="ARR_DEL15",
)
plt.title("Arrival delay with DISTANCE GROUP")
for container in ax.containers:
ax.bar_label(container)
var = ["YES", "NO"]
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 10))
for k, ax in zip(range(2), axes.flatten()):
sns.countplot(
data=data.loc[data["DEP_DEL15"] == var[k]],
x="DIST_GROUP",
hue="ARR_DEL15",
ax=ax,
palette=["#98F5FF", "#BF3EFF"],
)
ax.set_title(f"Arrival delay vs Distance with DEP_DEL_{var[k]}")
sns.histplot(data=data, x="DAY_OF_WEEK", hue="ARR_DEL15", multiple="dodge", shrink=6)
sns.histplot(data=data, x="DAY_OF_MONTH", hue="ARR_DEL15", multiple="dodge", shrink=0.8)
sns.set(rc={"figure.figsize": (5, 5)})
sns.set_style(rc={"axes.facecolor": "#FFFFFF"})
# **Summary of Exploratory Analysis**
# 1. Being delayed on departure has an effect on arrival delay.
# 2. All the flight routes have high probability of not gettinng delayed. But from all the routes most of the delays are from route ORD-LDA.
# 3. Carrier doesn't seem to have an impact on arrival delay.
# 4. Most of the flights have traveled short distance. In every distance group most of the flights have arrived on time.
# 5. Day of the week and day of the month seems to have no impact on the arrival delay.
# 6. In each category, **having a departure delay** has the **highest probability in havin an arrival delay.**
# # Data Pre-processing
data.head()
data["ARR_DEL15"] = np.where(data["ARR_DEL15"] == "NO", 0, 1)
y = data["ARR_DEL15"]
x = data.drop("ARR_DEL15", axis=1)
x = pd.get_dummies(x, drop_first=True)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)
# # Logistic Model
accuracy_table = pd.DataFrame(
columns=[
"model",
"feature selection",
"precision",
"recall",
"f1 score",
"accuracy",
]
)
model_lr = LogisticRegression(random_state=0)
model_lr.fit(x_train, y_train)
y_pred_lr = model_lr.predict(x_test)
cr_lr = classification_report(y_pred_lr, y_test)
print(cr_lr)
row1 = pd.Series(
["Logistic", "no", 0.96, 0.95, 0.95, 0.93], index=accuracy_table.columns
)
# # KNN
knn = KNeighborsClassifier()
param_grid = {"n_neighbors": range(1, 20)}
grid = GridSearchCV(knn, param_grid, cv=5)
grid.fit(x_train, y_train)
print("Best n_neighbors:", grid.best_params_["n_neighbors"])
knn = KNeighborsClassifier(n_neighbors=13)
model_knn = knn.fit(x_train, y_train)
y_pred_knn = model_knn.predict(x_test)
cr_knn = classification_report(y_pred_knn, y_test)
print(cr_knn)
row2 = pd.Series(["KNN", "no", 0.98, 0.88, 0.93, 0.88], index=accuracy_table.columns)
# # Random Forest
rf = RandomForestClassifier()
param_grid = {
"n_estimators": [100, 200, 300],
"max_depth": [10, 20, 30],
"min_samples_split": [2, 4, 6],
"min_samples_leaf": [1, 2, 3],
"bootstrap": [True, False],
}
grid_search = GridSearchCV(
estimator=rf, param_grid=param_grid, cv=5, n_jobs=-1, verbose=False
)
grid_search.fit(x_train, y_train)
print("Best parameters:", grid_search.best_params_)
rf = RandomForestClassifier(
n_estimators=200,
bootstrap=True,
max_depth=10,
min_samples_leaf=1,
min_samples_split=6,
)
model_rf = rf.fit(x_train, y_train)
y_pred_rf = model_rf.predict(x_test)
cr_rf = classification_report(y_pred_rf, y_test)
print(cr_rf)
row3 = pd.Series(
["Random Forest", "no", 0.96, 0.95, 0.95, 0.93], index=accuracy_table.columns
)
# # XgBoost
xb = xgb.XGBClassifier()
param_grid = {
"learning_rate": [0.01, 0.1, 0.5],
"max_depth": [3, 5, 7],
"n_estimators": [100, 500, 1000],
"subsample": [0.5, 1],
"colsample_bytree": [0.5, 1],
}
grid_search = GridSearchCV(estimator=xb, param_grid=param_grid, cv=5, verbose=False)
grid_search.fit(x_train, y_train)
print("Best parameters: ", grid_search.best_params_)
xg = xgb.XGBClassifier(
learning_rate=0.01,
max_depth=7,
n_estimators=1000,
subsample=0.5,
colsample_bytree=1,
)
model_xg = xg.fit(x_train, y_train)
y_pred_xg = model_xg.predict(x_test)
cr_xg = classification_report(y_pred_xg, y_test)
print(cr_xg)
row4 = pd.Series(
["XgBoost", "no", 0.96, 0.94, 0.95, 0.93], index=accuracy_table.columns
)
# # SVM
clf = svm.SVC(kernel="linear")
model_svm = clf.fit(x_train, y_train)
y_pred_svm = model_svm.predict(x_test)
cr_svm = classification_report(y_pred_svm, y_test)
print(cr_svm)
row5 = pd.Series(["SVM", "no", 0.96, 0.95, 0.95, 0.93], index=accuracy_table.columns)
# # Naive Bayes Model
nb = GaussianNB()
model_nb = nb.fit(x_train, y_train)
y_pred_nb = model_nb.predict(x_test)
cr_nb = classification_report(y_pred_nb, y_test)
print(cr_nb)
# From the initial models the best models are Logistiv model, Random Forest, XgBoost and SVM algorithm. To improve the accuracy further we can try to implement feature selection.
row6 = pd.Series(
["Naive Bayes", "no", 0.95, 0.90, 0.93, 0.88], index=accuracy_table.columns
)
# # Lasso model
model_ls = LogisticRegression(penalty="l1", solver="liblinear")
model_ls.fit(x_train, y_train)
y_pred_ls = model_ls.predict(x_test)
cr_ls = classification_report(y_pred_ls, y_test)
print(cr_ls)
row7 = pd.Series(["Lasso", "yes", 0.96, 0.95, 0.95, 0.93], index=accuracy_table.columns)
# # Random Forest with Feature Selection
fig, axes = plt.subplots(figsize=(30, 5))
feature_importances = rf.feature_importances_
indices = feature_importances.argsort()[::-1]
feature_names = x.columns[indices]
plt.bar(range(x_train.shape[1]), feature_importances[indices])
plt.xticks(range(x_train.shape[1]), feature_names, rotation=90)
plt.xlabel("Features")
plt.ylabel("Importance")
plt.show()
y_rf = data["ARR_DEL15"]
x_rf = x[
[
"DEP_DEL15_YES",
"CANCELLED_YES",
"DAY_OF_MONTH",
"DAY_OF_WEEK",
"DIVERTED_YES",
"ORIGIN-DEST_ORD-LGA",
"OP_UNIQUE_CARRIER_HA",
"OP_UNIQUE_CARRIER_AS",
"ORIGIN-DEST_LGA-ORD",
"OP_UNIQUE_CARRIER_OO",
]
]
x_train_rf, x_test_rf, y_train_rf, y_test_rf = train_test_split(
x_rf, y_rf, test_size=0.3, random_state=0
)
rf = RandomForestClassifier()
param_grid = {
"n_estimators": [100, 200, 300],
"max_depth": [10, 20, 30],
"min_samples_split": [2, 4, 6],
"min_samples_leaf": [1, 2, 3],
"bootstrap": [True, False],
}
grid_search = GridSearchCV(
estimator=rf, param_grid=param_grid, cv=5, n_jobs=-1, verbose=False
)
grid_search.fit(x_train_rf, y_train_rf)
print("Best parameters:", grid_search.best_params_)
rf_fs = RandomForestClassifier(
n_estimators=200,
bootstrap=True,
max_depth=10,
min_samples_leaf=3,
min_samples_split=2,
)
model_rf_fs = rf.fit(x_train_rf, y_train_rf)
y_pred_rf_fs = model_rf_fs.predict(x_test_rf)
cr_rf_fs = classification_report(y_pred_rf_fs, y_test_rf)
print(cr_rf_fs)
row8 = pd.Series(
["Random Forest", "yes", 0.96, 0.94, 0.95, 0.92], index=accuracy_table.columns
)
# # XgBoost with Feature Selection
fig, axes = plt.subplots(figsize=(30, 5))
feature_importances = xg.feature_importances_
indices = feature_importances.argsort()[::-1]
feature_names = x.columns[indices]
plt.bar(range(x_train.shape[1]), feature_importances[indices])
plt.xticks(range(x_train.shape[1]), feature_names, rotation=90)
plt.xlabel("Features")
plt.ylabel("Importance")
plt.show()
y_xg = data["ARR_DEL15"]
x_xg = x[
[
"DEP_DEL15_YES",
"CANCELLED_YES",
"DIVERTED_YES",
"OP_UNIQUE_CARRIER_HA",
"ORIGIN-DEST_ORD-LGA",
"OP_UNIQUE_CARRIER_AS",
"ORIGIN-DEST_MSP-ORD",
"ORIGIN-DEST_ORD-ATL",
"ORIGIN-DEST_TPA-ATL",
"ORIGIN-DEST_ORD-DCA",
]
]
x_train_xg, x_test_xg, y_train_xg, y_test_xg = train_test_split(
x_xg, y_xg, test_size=0.3, random_state=0
)
xb = xgb.XGBClassifier()
param_grid = {
"learning_rate": [0.01, 0.1, 0.5],
"max_depth": [3, 5, 7],
"n_estimators": [100, 500, 1000],
"subsample": [0.5, 1],
"colsample_bytree": [0.5, 1],
}
grid_search = GridSearchCV(estimator=xb, param_grid=param_grid, cv=5, verbose=False)
grid_search.fit(x_train_xg, y_train_xg)
print("Best parameters: ", grid_search.best_params_)
xg = xgb.XGBClassifier(
learning_rate=0.01,
max_depth=3,
n_estimators=500,
subsample=0.5,
colsample_bytree=0.5,
)
model_xg_fs = xg.fit(x_train_xg, y_train_xg)
y_pred_xg_fs = model_xg_fs.predict(x_test_xg)
cr_xg_fs = classification_report(y_pred_xg_fs, y_test_xg)
print(cr_xg_fs)
row9 = pd.Series(
["XgBoost", "yes", 0.96, 0.95, 0.95, 0.93], index=accuracy_table.columns
)
accuracy_table = accuracy_table.append(
[row1, row2, row3, row4, row5, row6, row7, row8, row9], ignore_index=True
)
accuracy_table
|
# ## 1. Introduction
# Name: Alec Daalman
# Username: AlecDaalman
# Leaderboard rank:
# ## 2. Data
# ### 2.1 Dataset
# In this section, we load and explore the dataset.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
print(os.listdir("../input"))
train = pd.read_csv(
"../input/LANL-Earthquake-Prediction/train.csv",
dtype={"acoustic_data": np.int16, "time_to_failure": np.float64},
)
# ### 2.2 Data Exploration
# Explore the features and target variables of the dataset. Think about making some scatter plots, box plots, histograms or printing the data, but feel free to choose any method that suits you.
# What do you think is the right performance
# metric to use for this dataset? Clearly explain which performance metric you
# choose and why.
# Algorithmic bias can be a real problem in Machine Learning. So based on this,
# should we use the Race and the Sex features in our machine learning algorithm? Explain what you believe.
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2, 1, figsize=(20, 12))
ax[0].scatter(train.index[:1000000], train.acoustic_data[:1000000], c="darkred")
ax[0].set_title("Acoustic data of 10 Mio rows")
ax[0].set_xlabel("Index")
ax[0].set_ylabel("Quaketime in ms")
ax[1].scatter(train.index[:1000000], train.time_to_failure[:1000000], c="darkred")
ax[1].set_title("Quaketime of 10 Mio rows")
ax[1].set_xlabel("Index")
ax[1].set_ylabel("Acoustic signal")
# split_size = 150_000
# box_acoustic = [[] for i in range(int(np.floor(train.shape[0] / split_size)))]
# for i in range(int(np.floor(train.shape[0] / split_size))):
# box_acoustic[i] = train.acoustic_data[i*split_size:(i+1)*split_size]
# ax[2].boxplot(np.asarray(box_acoustic)[:int(np.floor(len(box_acoustic)/1000)),:]);
# ### 2.3 Data Preparation
# This dataset hasn’t been cleaned yet. Meaning that some attributes (features) are in numerical format and some are in categorial format. Moreover, there are missing values as well. However, all Scikit-learn’s implementations of these algorithms expect numerical features. Check for all features if they are in categorial and use a method to transform them to numerical values. For the numerical data, handle the missing data and normalize the data.
# Note that you are only allowed to use training data for preprocessing but you then need to perform similar changes on test data too.
# You can use [pipelining](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html) to help with the preprocessing.
# ##### 2.3.1 Feature extraction
from sklearn.model_selection import train_test_split
rows = 150_000
segments = int(np.floor(train.shape[0] / rows))
x = pd.DataFrame(
index=range(segments), dtype=np.float64, columns=["ave", "std", "max", "min"]
)
y = pd.DataFrame(index=range(segments), dtype=np.float64, columns=["time_to_failure"])
# Time features
for segment in range(segments):
samples = train.acoustic_data[segment * rows : (segment + 1) * rows]
samples_fft = np.fft.fft(samples)
# plt.hist(samples_fft, bins=int(30e5), alpha=0.5, stacked=True)
x.loc[segment, "average"] = np.mean(samples)
x.loc[segment, "std"] = np.std(samples)
x.loc[segment, "maximum"] = np.max(samples)
x.loc[segment, "minimum"] = np.min(samples)
y.loc[segment, "time_to_failure"] = train.time_to_failure[(segment + 1) * rows]
del samples
del samples_fft
# del train
import matplotlib.pyplot as plt
import numpy as np
# Generating some random data
data_list = [np.random.randn(1000) for i in range(5)]
# Creating a histogram without storing the data in a variable
plt.hist(data_list, bins=30, alpha=0.5, stacked=True)
plt.show()
plt.hist(data_list, bins=30, alpha=0.5, stacked=False)
plt.show()
|
import warnings
warnings.filterwarnings("ignore")
# loading packages
import numpy as np
import pandas as pd
from pandas import datetime as dt
from pandas import Series, DataFrame
# data visualization
import matplotlib.pyplot as plt
import seaborn as sns # advanced vizs
from sklearn.model_selection import train_test_split
# machine learning
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
import xgboost as xgb
# 결과 살펴보기
from sklearn.metrics import r2_score as r2, mean_squared_error as mse
import math
# importing train data files
store = pd.read_csv("../input/rossmann-store-sales/store.csv")
train = pd.read_csv("../input/rossmann-store-sales/train.csv")
test = pd.read_csv("../input/rossmann-store-sales/test.csv")
state = pd.read_csv("../input/rossmann-store-extra/store_states.csv")
state_name = pd.read_csv("../input/rossmann-store-extra/state_names.csv")
weathers = pd.read_csv("../input/rossmann-store-extra/weather.csv")
store = store.merge(state, on=["Store"], how="inner")
store
store.CompetitionDistance.fillna(store.CompetitionDistance.median(), inplace=True)
store.CompetitionOpenSinceMonth.fillna(
store.CompetitionOpenSinceMonth.median(), inplace=True
)
store.CompetitionOpenSinceYear.fillna(
store.CompetitionOpenSinceYear.median(), inplace=True
)
store.Promo2SinceWeek.fillna(0, inplace=True)
store.Promo2SinceYear.fillna(0, inplace=True)
store.PromoInterval.fillna(0, inplace=True)
df = store.merge(train, on=["Store"], how="inner")
df.head()
df["Date"] = pd.to_datetime(df["Date"])
df["Year"] = df["Date"].dt.year
df["Month"] = df["Date"].dt.month
df["Day"] = df["Date"].dt.day
df["Week"] = df["Date"].dt.week % 4
df["WeekOfYear"] = df["Date"].dt.week
df["StateHoliday"] = df["StateHoliday"].map({0: 0, "0": 0, "a": 1, "b": 1, "c": 1})
df = df[(df["Open"] != 0) & (df["Sales"] != 0)]
df
df["Assortment"] = [1 if i == "a" else 2 if i == "b" else 3 for i in df["Assortment"]]
df["CompetitionOpen"] = 0
df["CompetitionOpen"] = df["CompetitionOpen"].where(
df["CompetitionOpenSinceYear"] == 0,
other=12 * (df["Year"] - df["CompetitionOpenSinceYear"])
+ (df["Month"] - df["CompetitionOpenSinceMonth"]),
)
df["PromoOpen"] = 0
df["PromoOpen"] = df["PromoOpen"].where(
df["Promo2SinceYear"] == 0,
other=12 * (df["Year"] - df["Promo2SinceYear"])
+ (df["WeekOfYear"] - df["Promo2SinceWeek"]) / 4,
)
df["PromoOpen"] = df["PromoOpen"].where(df["PromoOpen"] > 0, 0)
df.info()
df.drop(
columns=[
"Store",
"CompetitionOpenSinceMonth",
"CompetitionOpenSinceYear",
"Promo2SinceWeek",
"Promo2SinceYear",
"WeekOfYear",
"Date",
],
inplace=True,
)
df2 = pd.get_dummies(
df, columns=["StoreType", "PromoInterval", "State"], drop_first=True
)
df2
df2["ln_Sales"] = df2["Sales"].map(lambda x: np.log(x) if x != 0 else 0)
df2["ln_Customers"] = df2["Customers"].map(lambda x: np.log(x) if x != 0 else 0)
df2["ln_CompetitionDistance"] = df2["CompetitionDistance"].map(
lambda x: np.log(x) if x != 0 else 0
)
from sklearn.preprocessing import RobustScaler
roscaler = RobustScaler()
data = df2[["PromoOpen", "CompetitionOpen"]]
data_scaled = roscaler.fit_transform(data)
data_final = pd.DataFrame(
data_scaled, columns=["scaled_PromoOpen", "scaled_CompetitionOpen"]
)
data_final
df3 = pd.concat([df2, data_final], ignore_index=True, axis=1)
df3
df3.isna().sum()
df3.drop(
columns=[
"PromoOpen",
"CompetitionOpen",
"CompetitionDistance",
"Sales",
"Customers",
],
inplace=True,
)
df3.isna().sum()
from sklearn.preprocessing import StandardScaler
std = StandardScaler()
data = df3[["ln_CompetitionDistance", "ln_Customers", "ln_Sales"]]
std_data = std.fit_transform(data)
std_data = pd.DataFrame(std_data, columns="scaled_" + data.columns)
std_data.head()
df4 = pd.concat([df3, std_data], axis=1)
df4.drop(
columns=[
"ln_Customers",
"ln_CompetitionDistance",
"ln_Sales",
"scaled_ln_Customers",
],
inplace=True,
)
df4.tail()
df4.isna().sum()
x = df4.drop(["scaled_ln_Sales"], axis=1)
y = df4["scaled_ln_Sales"]
x.isna().sum()
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
# Multiple Linear Regression
# 모델 정의하기 = 인스턴스화= 객체화
m_lr = LinearRegression()
# 학습하기
m_lr.fit(x_train, y_train)
# 결과 예측하기
y_pred = m_lr.predict(x_test)
# 설명력
print("R^2: ", r2(y_test, y_pred))
# RMSE 예측력 : 해석을 위해서
print("RSME: ", math.sqrt(mse(y_test, y_pred)))
|
import pandas as pd
import polars as pl
from itertools import product
data_path = "/kaggle/input/m5-sales-ts/project_2_data/sales_data.parquet"
# # Feature list
# 1. Lags for 1, 2, 3 days
# 2. Seasonal lags for 1, 2, 3 days (weekday, week of the year, month)
# 3. Rolling mean and std with 7- and 30-day windows
# 4. Seasonal mean and std with 7- and 30-day windows (weekday, week of the year, month)
# # Pandas
def add_date_features(df, date_features):
for feature in date_features:
if feature == "weekday":
df[feature] = df.index.weekday
if feature == "week":
df[feature] = pd.Int64Index(df.index.isocalendar().week)
if feature == "month":
df[feature] = df.index.month
return df
def add_lag_features(df, target, horizon, lags, level):
for lag in lags:
feature_name = f"lag_{lag}_{horizon}"
df[feature_name] = (
df.groupby(level)[target].shift(horizon + lag).rename(feature_name)
)
return df
def add_seasonal_lag_features(df, target, horizon, lags, level, seasons):
for season in seasons:
for lag in lags:
feature_name = f"seasonal_lag_{lag}_{horizon}_by_{season}"
df[feature_name] = (
df.groupby([level, season])[target]
.shift(horizon + lag)
.rename(feature_name)
)
return df
def add_rolling_features(df, target, horizon, windows, agg_funcs, level):
grouped_target_df = df.groupby(level, group_keys=False)[target]
for agg_func in agg_funcs:
for window in windows:
feature_name = f"rolling_{agg_func}_{window}_{horizon}"
rolling_feature_df = (
grouped_target_df.rolling(window, closed="right", min_periods=1)
.agg({target: agg_func})
.reset_index()
.assign(date=lambda x: x.date + pd.Timedelta(days=horizon))
.rename(columns={"sales": feature_name})
)
df = (
df.reset_index()
.merge(rolling_feature_df, on=["date", "id"], how="left")
.set_index("date")
)
return df
def add_seasonal_rolling_features(
df, target, horizon, windows, agg_funcs, level, seasons
):
for season in seasons:
grouped_target_df = df.groupby([level, season], group_keys=False)[target]
for agg_func in agg_funcs:
for window in windows:
feature_name = (
f"seasonal_rolling_{agg_func}_{window}_{horizon}_by_{season}"
)
rolling_feature_df = (
grouped_target_df.rolling(window, closed="right", min_periods=1)
.agg({target: agg_func})
.reset_index()
.assign(date=lambda x: x.date + pd.Timedelta(days=horizon))
.rename(columns={"sales": feature_name})
.drop(columns=season)
)
df = (
df.reset_index()
.merge(rolling_feature_df, on=["date", "id"], how="left")
.set_index("date")
)
return df
data = (
pd.read_parquet(data_path)
.assign(cum_sales=lambda x: x.groupby("id")["sales"].cumsum())
.query("cum_sales > 0")
.drop(columns="cum_sales")
.reset_index("id")
)
add_date_features(data, ["weekday", "week", "month"])
add_lag_features(data, horizon=28, lags=[1, 2, 3], target="sales", level="id")
add_seasonal_lag_features(
data,
horizon=28,
lags=[1, 2, 3],
target="sales",
level="id",
seasons=["weekday", "week", "month"],
)
data = add_rolling_features(
data,
target="sales",
horizon=28,
windows=[7, 30],
agg_funcs=["mean", "std"],
level="id",
)
pd_data = add_seasonal_rolling_features(
data,
target="sales",
horizon=28,
windows=[7, 30],
agg_funcs=["mean", "std"],
level="id",
seasons=["weekday", "week", "month"],
)
pd_data.head()
# # Polars
pl.Config.set_fmt_str_lengths(100)
def lag_by_id(
col: str,
n: int,
horizon: int,
season: str = None,
):
if season:
grouper = ["id", season]
feature_name = f"seasonal_lag_{n}_{horizon}_by_{season}"
else:
grouper = "id"
feature_name = f"lag_{n}_{horizon}"
return pl.col(col).shift(n + horizon).over(grouper).alias(feature_name)
def rolling_mean_by_id(
col: str,
window: int,
horizon: int,
season: str = None,
):
if season:
grouper = ["id", season]
feature_name = f"seasonal_rolling_mean_{window}_{horizon}_by_{season}"
else:
grouper = "id"
feature_name = f"rolling_mean_{window}_{horizon}"
func = (
pl.col(col)
.rolling_mean(window, min_periods=1, closed="right")
.over(grouper)
.alias(feature_name)
)
shift_func = pl.col(feature_name).shift(window + horizon).over(grouper)
return func, shift_func
def rolling_std_by_id(
col: str,
window: int,
horizon: int,
season: str = None,
):
if season:
grouper = ["id", season]
feature_name = f"seasonal_rolling_std_{window}_{horizon}_by_{season}"
else:
grouper = "id"
feature_name = f"rolling_std_{window}_{horizon}"
func = (
pl.col(col)
.rolling_std(window, min_periods=1, closed="right")
.over(grouper)
.alias(feature_name)
)
shift_func = pl.col(feature_name).shift(window + horizon).over(grouper)
return func, shift_func
lags = [1, 2, 3]
windows = [7, 30]
seasons = ["weekday", "week", "month"]
rolling_features_calculations = (
[rolling_mean_by_id("sales", w, 28)[0] for w in windows]
+ [rolling_mean_by_id("sales", w, 28, s)[0] for w, s in product(windows, seasons)]
+ [rolling_std_by_id("sales", w, 28)[0] for w in windows]
+ [rolling_std_by_id("sales", w, 28, s)[0] for w, s in product(windows, seasons)]
)
rolling_features_shifts = (
[rolling_mean_by_id("sales", w, 28)[1] for w in windows]
+ [rolling_mean_by_id("sales", w, 28, s)[1] for w, s in product(windows, seasons)]
+ [rolling_std_by_id("sales", w, 28)[1] for w in windows]
+ [rolling_std_by_id("sales", w, 28, s)[1] for w, s in product(windows, seasons)]
)
data = (
pl.read_parquet(data_path)
# remove dates before first sale
.with_columns(pl.col("sales").cumsum().over("id").alias("cum_sales"))
.filter(pl.col("cum_sales") > 0)
.drop("cum_sales")
# add date features
.with_columns(
pl.col("date").dt.weekday().alias("weekday"),
pl.col("date").dt.week().alias("week"),
pl.col("date").dt.month().alias("month"),
)
# add lag features
.with_columns(
[lag_by_id("sales", l, 28) for l in lags]
+ [lag_by_id("sales", l, 28, s) for l, s in product(lags, seasons)]
)
# add rolling features
.with_columns(rolling_features_calculations)
.with_columns(rolling_features_shifts)
).lazy()
pl_data = data.collect()
pl_data.head()
# # Compare execution time
# A pretty drastic difference tbh
pandas_time()
polars_time()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.