script
stringlengths 113
767k
|
---|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv("/kaggle/input/mobile-price-classification/train.csv")
data
data.info()
data.isnull().sum()
for col in data.columns:
print(col)
print(data[col].unique())
print("************")
col_names = [
"blue",
"dual_sim",
"fc",
"four_g",
"n_cores",
"three_g",
"touch_screen",
"wifi",
"price_range",
]
for col in col_names:
data[col].value_counts().plot(kind="bar", title=col)
plt.xticks(rotation=90)
plt.show()
data.head()
## Now, try to find the correlation between features and price_range
sns.heatmap(
data.corr()[["price_range"]].sort_values(by="price_range", ascending=False),
vmin=-1,
vmax=1,
annot=True,
cmap="BrBG",
)
## So, we can say that ram is highly correlated to target.
## Now, draw a heatmap for four_g, dual_sim, wifi, blue, talk_time, fc, sc_h, three_g, pc, sc_w, int_memory
data1 = data[
[
"four_g",
"dual_sim",
"wifi",
"blue",
"talk_time",
"fc",
"sc_h",
"three_g",
"pc",
"sc_w",
"int_memory",
]
]
data1.head()
corr_matrix = data1.corr()
plt.figure(figsize=(12, 6))
sns.heatmap(corr_matrix, cmap="coolwarm", annot=True, vmin=-1, vmax=1)
plt.show()
# ## spliting the model into Training and Testing dataset
y = data["price_range"]
data.drop(["price_range"], axis=1, inplace=True)
X = data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
X_train.shape
X_test.shape
y_train.shape
y_test.shape
# ## Train the model using LinearRegression
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(X_train, y_train)
y_pred = reg.predict(X_test)
from sklearn.metrics import r2_score
print(r2_score(y_test, y_pred))
# ### Linear Regression model accuracy is 92.1%
# ## Training with KNN Classifier
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=10)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(accuracy_score(y_test, y_pred))
# ### KNN Model Accuracy is 95.2%
# ## Training the model with LogisticRegression
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state=0)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(accuracy_score(y_test, y_pred))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(
"/kaggle/input/online-shop-customer-sales-data/Online Shop Customer Sales Data.csv"
)
print("Shape:", df.shape)
print("Columns:", df.columns)
df.head()
# Number of Unique values
for columns in df.columns:
print(columns, ":", df[columns].nunique())
df.head()
df.describe()
df.info()
df.isnull().any()
category = ["Gender", "Browser", "Newsletter", "Voucher"]
# Columns
for columns in category:
print(columns, ":", df[columns].unique())
print(columns, "value counts :\n", df[columns].value_counts(), "\n")
# Number of Unique values
num_list = [
"Age",
"Revenue_Total",
"N_Purchases",
"Purchase_VALUE",
"Pay_Method",
"Time_Spent",
]
for columns in num_list:
print("Number of unique values in", columns, ":", df[columns].nunique())
# Maxminmum and minimum
print("Maximum", columns, ":", df[columns].max())
print("Minmum", columns, ":", df[columns].min())
# Average
print("Average", columns, ":", df[columns].mean())
# Common
print("Common", columns, ":", df[columns].mode()[0], "\n")
new_list = [
"Age",
"Gender",
"Revenue_Total",
"N_Purchases",
"Purchase_VALUE",
"Pay_Method",
"Time_Spent",
]
df1 = df[new_list].sort_values(by=["Age"])
df1.head()
# Max purchase by Age
max_purchase = df1[["Age", "Gender", "N_Purchases", "Purchase_VALUE", "Time_Spent"]][
df1["N_Purchases"] == df1["N_Purchases"].max()
]
min_purchase = df1[["Age", "Gender", "N_Purchases", "Purchase_VALUE", "Time_Spent"]][
df1["N_Purchases"] == df1["N_Purchases"].min()
]
mid_purchase = df1[["Age", "Gender", "N_Purchases", "Purchase_VALUE", "Time_Spent"]][
df1["N_Purchases"] == df1["N_Purchases"].median()
]
# Top 5 max purchase by age
max_purchase.head(10)
# Top 10 min purchase by age
min_purchase.head(10)
# most time spend by age group
most_time_spend = df1[["Age", "Gender", "N_Purchases", "Purchase_VALUE", "Time_Spent"]][
df1["Time_Spent"] == df1["Time_Spent"].max()
]
min_time_spend = df1[["Age", "Gender", "N_Purchases", "Purchase_VALUE", "Time_Spent"]][
df1["Time_Spent"] == df1["Time_Spent"].min()
]
most_time_spend.head(10)
min_time_spend.head(10)
|
# # Import Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import warnings
import xgboost as xgb
from hyperopt import Trials, STATUS_OK, tpe, hp, fmin
import warnings
warnings.filterwarnings("ignore")
# # Read dataframe & analyzing it
df = pd.read_csv(
"/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv"
)
df.head()
df.columns
df.shape
df.describe().T
# # Finding categorical columns
obj_dtypes = [i for i in df.select_dtypes(include=np.object).columns]
obj_dtypes
# # Categorical variables encoding
# Label Encoding for object to numeric conversion
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
for feat in obj_dtypes:
df[feat] = le.fit_transform(df[feat].astype(str))
print(df.info())
df.drop(["EmployeeNumber"], axis=1)
df.sample(10)
# # Checking the distribution of target variable
# To show imbalance data
fig = plt.figure(figsize=(13, 6))
plt.subplot(121)
df["Attrition"].value_counts().plot.pie(
autopct="%1.0f%%",
colors=["red", "yellow"],
startangle=60,
wedgeprops={"linewidth": 2, "edgecolor": "k"},
shadow=True,
)
plt.title("Distribution of Turnover")
plt.show()
# **From the above graphical analysis , we can see that the data is highly imbalanced**
# # Applying SMOTE for imbalance data
X = df.drop(["Attrition"], axis=1)
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, df["Attrition"], test_size=0.1, random_state=0
)
# train,test = train_test_split(df , test_size = 0.10 ,random_state = 17 , stratify = df['Turnover'])
print("Number transactions X_train dataset: ", X_train.shape)
print("Number transactions y_train dataset: ", y_train.shape)
print("Number transactions X_test dataset: ", X_test.shape)
print("Number transactions y_test dataset: ", y_test.shape)
print("Before OverSampling, counts of label '1': {}".format(sum(y_train == 1)))
print("Before OverSampling, counts of label '0': {} \n".format(sum(y_train == 0)))
sm = SMOTE(sampling_strategy="minority", random_state=2)
X_train_res, y_train_res = sm.fit_resample(X_train, y_train.ravel())
print("After OverSampling, the shape of train_X: {}".format(X_train_res.shape))
print("After OverSampling, the shape of train_y: {} \n".format(y_train_res.shape))
print("After OverSampling, counts of label '1': {}".format(sum(y_train_res == 1)))
print("After OverSampling, counts of label '0': {}".format(sum(y_train_res == 0)))
# # Applying Logistic Regression algorithm
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
confusion_matrix,
precision_recall_curve,
auc,
roc_auc_score,
roc_curve,
recall_score,
classification_report,
)
parameters = {
"penalty": ["l1", "l2"],
"C": np.logspace(-3, 3, 7),
"solver": ["newton-cg", "lbfgs", "liblinear"],
}
lr = LogisticRegression()
clf = GridSearchCV(lr, parameters, cv=5, verbose=5, n_jobs=3)
clf.fit(X_train, y_train.ravel())
print("Tuned Hyperparameters :", clf.best_params_)
print("Accuracy :", clf.best_score_)
logreg = LogisticRegression(
C=100,
penalty="l2",
solver="liblinear",
)
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print("Accuracy:", logreg.score(X_test, y_test))
import sklearn.metrics as metrics
classification_report_tuned = metrics.classification_report(y_test, y_pred)
print(classification_report_tuned)
acc_scr_log_yuned = accuracy_score(y_test, y_pred)
print("Overall accuracy of logistic regression model:", acc_scr_log_yuned)
# # Applying XG Boost algorithm
param_grid = {
"max_depth": [3, 4, 5, 7],
"learning_rate": [0.1, 0.01, 0.05],
"gamma": [0, 0.25, 1],
"reg_lambda": [0, 1, 10],
"scale_pos_weight": [1, 3, 5],
"subsample": [0.8],
"colsample_bytree": [0.5],
}
xgb_cl = xgb.XGBClassifier(objective="binary:logistic")
# Init Grid Search
grid_cv = GridSearchCV(xgb_cl, param_grid, n_jobs=-1, cv=3, scoring="roc_auc")
# Fit
_ = grid_cv.fit(X_train, y_train)
print(grid_cv.best_score_)
print(grid_cv.best_params_)
# Insert the new fixed values to the grid
param_grid["scale_pos_weight"] = [3]
param_grid["subsample"] = [0.8]
param_grid["colsample_bytree"] = [0.5]
# Give new value ranges to other params
param_grid["gamma"] = [3, 5, 7]
param_grid["max_depth"] = [9, 15, 20]
param_grid["reg_lambda"] = [10, 30, 50]
param_grid["learning_rate"] = [0.3, 0.5, 0.7, 1]
grid_cv_2 = GridSearchCV(xgb_cl, param_grid, cv=3, scoring="roc_auc", n_jobs=-1)
_ = grid_cv_2.fit(X_train, y_train)
print(grid_cv_2.best_score_)
print(grid_cv_2.best_params_)
final_cl = xgb.XGBClassifier(**grid_cv.best_params_)
_ = final_cl.fit(X_train, y_train)
pred_gs = final_cl.predict(X_test)
acc_scr_gs = accuracy_score(y_test, pred_gs)
print("Overall accuracy of XG Boost_gs model:", acc_scr_gs)
|
# [Rapids](https://rapids.ai) is an open-source GPU accelerated Data Science and Machine Learning library, developed and mainatained by [Nvidia](https://www.nvidia.com). It is designed to be compatible with many existing CPU tools, such as Pandas, scikit-learn, numpy, etc. It enables **massive** acceleration of many data-science and machine learning tasks, oftentimes by a factor fo 100X, or even more.
# Rapids is still undergoing developemnt, and as of right now it's not availabel in the Kaggle Docker environment. If you are interested in installing and riunning Rapids locally on your own machine, then you shoudl [refer to the followong instructions](https://rapids.ai/start.html).
# The first successful install of a Rapids library on kaggle was done by [Chris Deotte](https://www.kaggle.com/cdeotte) in the follwiong [Digit Recognizer kernel](https://www.kaggle.com/cdeotte/rapids-gpu-knn-mnist-0-97). An improved install version that uses a Kaggle Dataset for install can be found [here](https://www.kaggle.com/cdeotte/rapids-data-augmentation-mnist-0-985). In this kerenl we'll follow that approach.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# INSTALL RAPIDS OFFLINE (FROM KAGGLE DATASET). TAKES 1 MINUTE :-)
import sys
sys.path = (
["/opt/conda/envs/rapids/lib"]
+ ["/opt/conda/envs/rapids/lib/python3.6"]
+ ["/opt/conda/envs/rapids/lib/python3.6/site-packages"]
+ sys.path
)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, roc_auc_score
import cudf, cuml
import cupy as cp
from cuml.linear_model import LogisticRegression
train = pd.read_csv("../input/digit-recognizer/train.csv")
submission = pd.read_csv("../input/digit-recognizer/sample_submission.csv")
train.head()
submission.head()
y = train["label"].values
y
train_tsne = np.load("../input/mnist-2d-t-sne-with-rapids/train_2D.npy")
test_tsne = np.load("../input/mnist-2d-t-sne-with-rapids/test_2D.npy")
train_x, val_x, train_y, val_y = train_test_split(train_tsne, y, test_size=0.10)
clf = LogisticRegression(C=0.1)
clf.fit(train_x, train_y.astype("float32"))
preds = clf.predict(val_x)
np.mean(cp.array(val_y) == preds.values.astype("int64"))
train_umap = np.load("../input/mnist-2d-umap-with-rapids/train_2D.npy")
test_umap = np.load("../input/mnist-2d-umap-with-rapids/test_2D.npy")
train_x, val_x, train_y, val_y = train_test_split(train_umap, y, test_size=0.10)
clf = LogisticRegression(C=12)
clf.fit(train_x, train_y.astype("float64"))
preds = clf.predict(val_x)
np.mean(cp.array(val_y) == preds.values.astype("int64"))
train_y.astype("float32")
train_both = np.hstack([train_umap, train_tsne])
test_both = np.hstack([test_umap, test_tsne])
train_x, val_x, train_y, val_y = train_test_split(train_both, y, test_size=0.10)
clf = LogisticRegression(C=1)
clf.fit(train_x, train_y.astype("float64"))
preds = clf.predict(val_x)
np.mean(cp.array(val_y) == preds.values.astype("int64"))
test_preds = clf.predict(test_both)
submission["Label"] = test_preds
submission.to_csv("submission.csv", index=False)
|
# # cross validation
from sklearn.datasets import load_iris
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
import numpy as np
iris = load_iris()
x, y = iris.data, iris.target
kf = KFold(n_splits=5, shuffle=True, random_state=42)
# shuffle true ise veri kümesi herbir katman için rastgele bir şekilde karıştırılacak demektir.
# random state farklı çalışmalarda aynı sonuç elde etmek için kullanılır.
model = LinearRegression()
scores = []
for train_index, test_index in kf.split(x):
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
score = mean_squared_error(y_test, y_pred)
scores.append(score)
mean_score = np.mean(scores)
print("k fold cross validation", scores)
print("ortalama hata skoru", mean_score)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_twitter = pd.read_csv(
"/kaggle/input/twitter-with-steming/twitter-entity-sentiment-analysis-with-stemming (1).csv"
)
train_twitter
train_twitter["Tweet"] = train_twitter["Tweet"].astype(str)
train_twitter["Tweet"] = train_twitter["Tweet"].str.split(" ")
train_twitter
import gensim
tokenized_tweet = train_twitter["Tweet"]
model_w2v = gensim.models.Word2Vec(
tokenized_tweet, window=5, min_count=2, sg=1, hs=0, negative=10, workers=32, seed=34
)
model_w2v.train(tokenized_tweet, total_examples=len(train_twitter["Tweet"]), epochs=20)
len(model_w2v.wv["good"])
def word_vector(tokens, size):
vec = np.zeros(size).reshape((1, size))
count = 0
for word in tokens:
try:
vec += model_w2v.wv[word].reshape((1, size))
count += 1
except KeyError:
continue
if count != 0:
vec /= count
return vec
word2vec_arrays = np.zeros((len(tokenized_tweet), 100))
for i in range(len(tokenized_tweet)):
word2vec_arrays[i, :] = word_vector(tokenized_tweet[i], 100)
word2vec_df = pd.DataFrame(word2vec_arrays)
word2vec_df.shape
model_w2v.wv["good"]
from sklearn.model_selection import train_test_split
xtrain_bow, svalid_bow, ytrain, yvalid = train_test_split(
train_twitter["Tweet"], train_twitter["Sentiment"], random_state=42, test_size=0.3
)
index_train_t = pd.DataFrame(ytrain.index)
index_train_t.columns = ["Indice"]
index_train_t
index_train_t.to_csv("twitter-index-train.csv", index=False)
index_test_t = pd.DataFrame(yvalid.index)
index_test_t.columns = ["Indice"]
index_test_t
index_test_t.to_csv("twitter-index-test.csv", index=False)
from sklearn import svm, datasets
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import f1_score
parameters = {"kernel": ("linear", "rbf"), "C": [1, 10]}
xtrain_w2v = word2vec_df.iloc[index_train_t.Indice, :]
xvalid_w2v = word2vec_df.iloc[index_test_t.Indice, :]
ytrain = train_twitter.iloc[index_train_t.Indice, :]["Sentiment"]
yvalid = train_twitter.iloc[index_test_t.Indice, :]["Sentiment"]
svc = svm.SVC()
clf = GridSearchCV(svc, parameters)
clf.fit(xtrain_w2v, ytrain)
print(clf.best_estimator_)
predictions = clf.predict(xvalid_w2v)
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.base import clone
from sklearn.preprocessing import label_binarize
from scipy import interp
from sklearn.metrics import roc_curve, auc
import concurrent.futures
def get_metrics(true_labels, predicted_labels):
parameters = []
parameters.append(
np.round(metrics.accuracy_score(true_labels, predicted_labels), 4)
)
parameters.append(
np.round(
metrics.precision_score(true_labels, predicted_labels, average="weighted"),
4,
)
)
parameters.append(
np.round(
metrics.metrics.recall_score(
true_labels, predicted_labels, average="weighted"
),
4,
)
)
parameters.append(
np.round(metrics.f1_score(true_labels, predicted_labels, average="weighted"), 4)
)
print("Accuracy:", parameters[0])
print("Precision:", parameters[1])
print("Recall:", parameters[2])
print("F1 Score:", parameters[3])
return parameters
def display_classification_report(true_labels, predicted_labels, classes=[1, 0]):
report = metrics.classification_report(
true_labels,
predicted_labels,
)
print(report)
return report
def display_confusion_matrix(true_labels, predicted_labels, classes=[1, 0]):
total_classes = len(classes)
level_labels = [total_classes * [0], list(range(total_classes))]
cm = metrics.confusion_matrix(true_labels, predicted_labels)
cm_frame = pd.DataFrame(
cm,
pd.MultiIndex([["Predicted:"], classes], level_labels),
pd.MultiIndex([["Actual:"], classes], level_labels),
)
print(cm_frame)
def display_model_performance_metrics(true_labels, predicted_labels, classes=[1, 0]):
print("Model Performance metrics:")
print("-" * 30)
parameters = get_metrics(true_labels, predicted_labels)
print("\nModel Classification report:")
print("-" * 30)
report = display_classification_report(true_labels, predicted_labels, classes)
print("\nPrediction Confusion Matrix:")
print("-" * 30)
display_confusion_matrix(true_labels, predicted_labels, classes)
return parameters, report
display_model_performance_metrics(yvalid, predictions, [0, 4])
|
# ---
# ---
# # Préparation / consignes
# - Votre travail consiste à compléter ce cadre de TP.
# - Vous pouvez (hmmm devez ?) ajouter des blocs de code comme des blocs d'explication.
# - Les blocs d'explication sont au format Markdown : [markdown](https://www.markdownguide.org/cheat-sheet/).
# - Le rendu est une version imprimée de cette ```frame```.
# - Pensez à faire des ```commit``` régulièrement
# - Il est posible de créer une copie locale de votre travail ```file / Download Notebook```
# ---
# ---
# # TP L3 ISIMA : arbres de décision
# ---
# Les principaux points abordés dans ce TP sont :
# - La construction des ensembles à manipuler (apprentissage, test, validation)
# - La visualisation des données
# - Le choix des critères de séparation pour la création d'un arbre de décision
# + cas où la séparation peut s'effectuer sur un unique attribut
# + cas où la sépararation est linéaire, mais doit faire intervenir plusieurs attributs
# - La construction de l'arbre
# - Le jugement de la qualité de l'apprentissage
# - Découverte de sklearn
# ## Charger les librairies
# - numpy
# - pandas
# - seaborn
# - matplotlib.pyplot
# Dans la suite ajouter les imports dans cette cellule.
# Charger les bibliothèques demandées
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
print("Setup complete.")
# ## Charger la base de données 'Iris Species' dans l'environnement
# - Commencer par incorporer la BD dans le kernel : *add data* dans la 'frame' de droite
# - Utiliser la commande pandas permettant de charger le fichier CSV des données
# - N'oubliez pas de vérifier l'apparence du résultat en affichant les premières lignes
#
# Rappel : les données se trouvent dans "../input/", dont on peut lister le contenu par :
# ```
# import os
# print(os.listdir("../input/"))
# ```
#
# Charger la BD
import os
print(os.listdir("../input/iris"))
iris_filepath = "../input/iris/Iris.csv"
iris = pd.read_csv(iris_filepath, index_col="Id")
# Cette base de donnée est trop bien nettoyée... A des fins pédagogiques, nous allons la déterriorer.
# Exécuter la ligne suivante : ```iris[np.random.rand(150,5)<0.05]=np.nan```
iris[np.random.rand(150, 5) < 0.05] = np.nan
# ## Examiner le contenu de cette base de données
# - Lister les 5 premières lignes (préférer```display``` à ```print```)
# - Afficher le nombre de lignes ainsi que le nombre de colonnes
# - Afficher un résumé statistique simple de cette base
# Réaliser les premiers affichages
display(iris.head())
display(iris.shape)
display(iris.describe())
# ## Vérifier qu'il n'y a pas de données absentes
# - Pour chaque attribut, compter le nombre de données manquantes.
# - Supprimer les lignes possédant au moins une donnée manquante (c'est la façon la plus simple de se débarrasser du problème)
# - Combien d'exemples ont-ils été ainsi perdus ?
# - Réfléchir à d'autres façons de traiter les données absentes (conseil : revenir à ce point à la fin du TP)
# On s'intéresse aux donnés manquantes...
data_number = iris.shape[0]
print("Nombre de données manquantes :")
print("Pour l'attribut SepalLengthCm :")
display(iris[pd.isnull(iris.SepalLengthCm)].shape[0])
print("Pour l'attribut SepalWidthCm :")
display(iris[pd.isnull(iris.SepalWidthCm)].shape[0])
print("Pour l'attribut PetalLengthCm :")
display(iris[pd.isnull(iris.PetalLengthCm)].shape[0])
print("Pour l'attribut PetalWidthCm :")
display(iris[pd.isnull(iris.PetalWidthCm)].shape[0])
print("Pour l'attribut Species :")
display(iris[pd.isnull(iris.Species)].shape[0])
#####
iris = iris.dropna()
display(iris.shape)
print("On a perdu : ")
print(data_number - iris.shape[0], " données")
# Comment on aurait pu faire...
# # Représentation de la distribution des attributs
# ## Pour chaque attribut, représenter sa distribution (une courbe par variété)
# - Histogramme
# - Boîte à moustache
# - Diagramme en violon
# - Estimation par fonction noyau de la densité
# NB : pour obtenir une présentation correcte, il peut être utile d'utiliser ```fig,ax = plt.subplots(paramètres)``` pour définir la présentation des graphiques, puis ```plt.sca(ax[i])``` afin de choisir dans quel sous graphique écrire.
# Premiers graphiques
# ## Les ensembles de travail
# - l'ensemble d'apprentisage : iris_Train, 70% des données
# - l'ensemble de validation : iris_Test, 20% des données
# - l'ensemble de test : iris_Validation, 10% des données
# ### Rappeler la fonction de chacun de ces ensembles
# Réponse : ...
# ## Créer ces ensembles
# - Charger ```train_test_split``` du module ```sklearn.model_selection```
# - Séparer iris en observations (les attributs observables) et classe (la variété)
# - Lire le manuel de ```train_test_split```
# - Examiner en particulier l'option stratify, la mettre en oeuvre
# - Se poser la question de l'intérêt de random_state
# - Par deux applications de cette fonction, créer les 6 ensembles train_X, train_Y, test_X, test_Y, validation_X, validation_Y (conseil, vérifier les tailles des ensembles obtenus)
# import puis utilisation pour fabriquer les trois ensembles
# ---
# ---
# # Mise en place manuelle d'un arbre de décision
# ---
# ---
# # Première découpe : (travail sur **iris_Train**)
# ## Représenter tous les couples d'attributs possibles
# - Diagrammes points ou points
# - Densité ou histogramme ou boîte à moustaches
# *** (utiliser *pair*grid du module seaborn)***
# Retour sur des graphiques, mais cette fois pour réaliser l'apprentissage
# ## Commenter les graphismes obtenus
# - Y a-t-il une variété aisément séparable ?
# - Quels attributs permettent de la séparer des deux autres ?
# - Quels graphiques ont permis de choisir cet attribut ?
# Réponses :
# ## Choisir la racine de l'arbre de décision :
# - attribut sur lequel effectuer la séparation
# - valeur du seuil à utiliser
# Réponse
# ## Ecrire une fonction niveau0
# - Prenant en entrée une description
# - Renvoyant une estimation de la variété d'iris (pour l'instant, il n'y a que deux 'variétés', celle qu'on a séparé et 'le reste' que l'on note ici ```???```)
# NB : une utilisation de votre fonction peut être par exemple : ```niveau0(iris_Train_X)``` doit renvoyer ```iris_Train_Y``` si l'apprentissage est parfait (ce qui n'est en général pas bon signe...)
# NB2 : vous devez renvoyer un DataFrame possédant un unique attribut que vous nommerez
# NB3 : mettre en oeuvre apply de pandas
#
# Fonction niveau0
# # Niveau suivant de l'arbre
# On devrait maintenant construire pour chacune des valeurs de sortie de *niveau0* une fonction permettant d'affiner la classification. Ici, le travail est simplifié, car une des deux classes obtenues par *niveau0* est parfaitement homogène, on ne va donc affiner que la partie corrrespondant à la réponse *???* de la sortie de *niveau0*. Dans le cas général, il faudrait suivre la même procédure sur l'autre sous-arbre.
# ## Filtrer dans la base de test les éléments dont la réponse par *niveau0* est '???'
# - Appeller cette base train_2
# - la séparer en train_X_2, train_Y_2
# ** Si nécessaire, faire un reset d'index : df.reset_index(drop=True, inplace = True) **
# Ne pas oublier d'effectuer les reset d'index
# ## Recommencer le graphique des paires, afin de déterminer la meilleure séparation
# Reprise des graphiques, en se limittant aux données du sous-arbre '???'
# ## Dur Dur
# Il semble ici nettement plus difficile de déterminer la meilleure façon de classer :
# - aucune coupe verticale ne semble nettement meilleure que les autres
# - aucune coupe diagonale ne semble résoudre le problème
# - peut-être existe-il une coupe en dimension supérieure qui serait satisfaisante, mais à partir de la dimension 3, les choses deviennent difficiles à voir...
# 1) Donner une situation (non présente ici) où il n'y aurait aucune coupe verticale satisfaisante mais où il y aurait une coupe oblique convenable, si possible généraliser à trois variables.
# 2) Montrer dans un exemple simple en dimension 2 (sur un domaine compact) que par une infinité de coupes verticales il est possible d'obtenir une coupe oblique.
# Solution :
# ## Recherche de la meilleure coupe
# On va rechercher parmi tous les attributs celui qui semble permettre la meilleure séparation entre les deux variétés d'iris restantes. Pour cela, on va envisager une coupe selon n'importe quel attribut, et pour n'importe quelle valeur de seuil, puis on réalisera un balayage des coupes verticales possibles, et on conservera la moins mauvaise.
# ** Il existe des méthodes plus efficaces (heureusement) que celle présentée ici, cf. Séparateurs à Vastes Marges **
# Ecrire une fonctionnelle *separe* prenant comme entrées :
# - un attribut 'att'
# - un seuil
# - une étiquette 'A'
# - une étiquette 'B'
# qui renvoie une fonction qui prend en entrée une situation et qui renvoie 'A' si cette situation a sont attribut *att < seuil* et 'B' sinon
# ** Normalement vous devriez pouvoir prendre *niveau0* comme base de travail**
# ## Choix de la 'meilleure' coupe verticale
# Les mesures que vous devez connaître sont :
# - la matrice de confusion
# - Le taux de bonne prédiction (accuracy) $\frac{VP + VN}{VP+VN+FP+FN}$
# - Le taux de vrais positifs / rappel (recall, sensitivity) $\frac{VP }{VP+FN}$
# - Le taux de vrais négatifs (specificity) $\frac{VN}{VN+FP}$
# - La précision $\frac{VP }{VP + FP}$
# - La F_1 mesure $2 \times \frac{rappel \times précision}{rappel + précision}$
# - La courbe ROC
# - Le score ROC
# Si un besoin d'aide sur ces mesures se fait sentir : [Evaluating classifiers](https://www.youtube.com/watch?v=FAr2GmWNbT0)
# On choisit de définir la meilleure coupe comme celle ayant le meilleur taux de prédiction.
# Ecrire une fonction de balayage renvoyant le tuple *(attribut, seuil, A, B)* (ou la fonction permettant le taux de prédiction) maximisant le taux de prédiction. On nomme mc niveau1_d la fonction de classification obtenue.
# NB1 : il n'est pas demandé un algo malin, mais un simple balayage... qui peut prendre un temps important :)
# NB2 : penser à incorporermetrics de sklearn...
# NB3 : faire attention à bien choisir l'ensemble sur lequel on travaille (apprentissage ?, test ?, validation ?)
# ## Construction du classifieur chaînant les deux premier classifieurs
# Créer une fonction de classification qui enchaîne les deux fonctions **niveau0** et **niveau1_d**, nommer la fonction obtenue **arbre**
# ---
# # Juger de la qualité du travail !!
# ---
# Juger de la qualité du résultat est très important, cela permet
# - De choisir entre plusieurs modèles le plus adapté
# - De déterminer des pistes d'amélioration d'un modèle
# - D'évaluer les capacités du modèle lorsqu'il sera mis en production
# ## Sur quel ensemble doit-on juger de la qualité ?
# Expliquer l'intéret de mesurer la qualité sur chacun des ensembles
# - Ensemble d'apprentissage :
# - Ensemble de validation :
# - Ensemble de test :
# ## Expliquer l'intéret de chacune des mesures précédentes, et proposer un exemple pertinent pour chacune d'elles justifiant son existence
# - la matrice de confusion :
# - Le taux de bonne prédiction (accuracy) :
# - Le taux de vrais positifs / rappel (recall, sensitivity) :
# - Le taux de vrais négatifs (specificity) :
# - La précision :
# - La F_1 mesure :
# ## A l'aide de sklearn.metric, évaluer les différentes mesures, commenter
# ## Coupes en 'diagonale'
# On a choisi d'effectuer des coupes sur un attribut (un côté gauche, et un côté droit). Il est possible également de découper l'espace en deux demi-espaces. Dans l'absolu, outes les découpes sont possibles. On s'intéresse ici des découpes observables par le graphique des paires.
# Réobserver le graphique, et déterminer s'il existe une découpe plus efficace que celle trouvée précédement.
# - Il n'est pas demandé de la réaliser (mais vous le pouvez :))
# - Exposer une situation à 3 attributs où il n'existe pas de coupe par plan sur 2 paramètres alors qu'il existe un plan séparateur parfait. (ils ne sont linéairement séparables dans aucun des graphiques 'paires' mais sont pourtant linéairement séparables)
# ### Réponses :
# ---
# # Arbre de décision réalisés par sklearn
# ---
# - importer le module ```tree``` de ```sklearn```
# - Etudier la documentation de ```DecisionTreeClassifier```, en particulier la partie **Tips on practical use**
# - Construire un classifieur utilisant l'indice de Gini
# ## Qualification
# - Calculer les scores utilisés dans ce TP
# - Comparer les scores à ceux obtenus par un classifieur 'bidon' (sklearn.dummy) (à quoi cela sert-il ?)
# Rq : il reste un bug dans dummy, si vous obtenez une erreur de type 'no argmax on list', un contournement de ce problème peut être obtenu en reformattant les entrées du classifieur par 'check_X_y'
# # Construction du 'meilleur' arbre de décision
# - Faire varier les paramètres de construction de l'arbre de décision (bien mettre en pratique les 'Tips')
# - Pour chaque série de paramètres, qualifier le résultat sur l'ensemble de test
# - Choisir l'arbre le 'meilleur' sur l'ensemble de test
# N'oubliez pas le principe du ** rasoir d'Ockham ** pour effectuer votre choix !!!!!
# jouer avec les paramètres, et à chaque fois juger de la qualité, jusqu'à obtenir votre 'meilleur arbre
# ## Au fait, à quoi sert l'ensemble de validation ??
# - Utiliser l'ensemble de validation pour donner la qualification finale de votre arbre
# - Pourquoi cette qualification ne peut-elle pas être obtenue à partir de l'ensemble de test ?
#
# Faire la qualification finale
# Réponse à l'utilité de l'ensemble de validation :
# ## Il est possible de représenter un arbre de décision
# - Importer le module graphviz
# - utiliser la fonction de ```tree.export_graphviz``` puis ```graphviz.Source``` afin de réaliser une belle représentation graphique
# ---
# # Random Forest
# ---
# - Rappeler le principe des forets d'arbres décisionnelles
# - Remplacer le classifieur par arbre de décision par un classifieur par une forêt d'arbres décisionnels
# # Optimisation des paramètres
# - Utiliser une 'gridSearch' de sklearn afin de rechercher les meilleurs paramètres de la forêt
# # Choisir les meileurs paramètres en utilisant l'ensemble de test, expliquer ce qu'est la validation croisée
# Réponse :
# # Merci d'être allé jusqu'à la fin du TP, j'espère que ce travail vous a aidé à approfondir votre compréhension du cours d'apprentissage artificiel. A bientôt pour la suite :)
import pandas as pd
Iris = pd.read_csv("../input/iris/Iris.csv")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# **Importing Libraries**
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report
from sklearn import metrics
from sklearn import tree
from sklearn.model_selection import cross_val_score
crop = pd.read_csv("/kaggle/input/crop-recommendation-dataset/Crop_recommendation.csv")
crop.head(5)
# # Data Analysis
crop.isnull().sum()
crop.info()
crop.describe()
crop.columns
crop.shape
crop["label"].unique()
crop["label"].nunique()
crop["label"].value_counts()
fig, ax = plt.subplots(1, 1, figsize=(15, 9))
sns.heatmap(crop.corr(), annot=True, cmap="viridis")
plt.title("Correlation between different features", fontsize=15, c="black")
plt.show()
crop_summary = pd.pivot_table(crop, index=["label"], aggfunc="mean")
crop_summary.head()
x = crop_summary.index
y1 = crop_summary["N"]
y2 = crop_summary["P"]
y3 = crop_summary["K"]
color1 = "mediumvioletred"
color2 = "springgreen"
color3 = "dodgerblue"
fig, ax = plt.subplots(figsize=(10, 6))
ax.bar(x, y1, color=color1, label="Nitrogen")
ax.bar(x, y2, color=color2, bottom=y1, label="Phosphorous")
ax.bar(x, y3, color=color3, bottom=y1 + y2, label="Potash")
ax.set_title("N-P-K values comparision between crops")
ax.set_xlabel("Crop")
ax.set_ylabel("Nutrient Value")
plt.xticks(rotation=-45, ha="left", va="top")
ax.legend()
plt.subplots_adjust(bottom=0.2)
plt.show()
x = crop_summary.index
y1 = crop_summary["temperature"]
y2 = crop_summary["humidity"]
y3 = crop_summary["rainfall"]
fig, ax = plt.subplots(figsize=(10, 6))
ax.bar(x, y1, color=color1, label="Temperature")
ax.bar(x, y2, color=color2, bottom=y1, label="Humidity")
ax.bar(x, y3, color=color3, bottom=y1 + y2, label="Rainfall")
ax.set_title("Temperature-Humidity-Rainfall values comparision between crops")
ax.set_xlabel("Crop")
ax.set_ylabel("Environmental Values")
plt.xticks(rotation=-45, ha="left", va="top")
ax.legend()
plt.subplots_adjust(bottom=0.2)
plt.show()
# # Clustering
plt.scatter(crop["humidity"], crop["rainfall"])
clst = crop.loc[:, ["humidity", "rainfall"]].values
print(clst.shape)
clst_data = pd.DataFrame(clst)
clst_data.head()
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
clst_scaled = scaler.fit_transform(clst_data)
clst_scaled = pd.DataFrame(clst_scaled, columns=["humidity", "rainfall"])
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
km = KMeans(n_clusters=i, init="k-means++")
km.fit(clst_data)
wcss.append(km.inertia_)
sns.set()
plt.plot(range(1, 11), wcss)
plt.title("The Elbow Point Graph")
plt.xlabel("Number of Clusters")
plt.ylabel("WCSS")
plt.show()
km = KMeans(n_clusters=2, init="k-means++", max_iter=300, n_init=10, random_state=0)
y_means = km.fit_predict(clst_scaled)
a = crop["label"]
y_means = pd.DataFrame(y_means)
z = pd.concat([clst_scaled["humidity"], clst_scaled["rainfall"], y_means, a], axis=1)
z = z.rename(columns={0: "cluster"})
print("Crops in First Cluster:", z[z["cluster"] == 0]["label"].unique())
print("Crops in Second Cluster:", z[z["cluster"] == 1]["label"].unique())
centers = km.cluster_centers_
print(centers)
c1 = z[z["cluster"] == 0]
c2 = z[z["cluster"] == 1]
plt.scatter(c1["humidity"], c1["rainfall"], color="green")
plt.scatter(c2["humidity"], c2["rainfall"], color="red")
plt.scatter(
centers[:, 0], centers[:, 1], marker="x", s=200, linewidths=3, color="black"
)
# # Splitting the dataset
features = crop[["N", "P", "K", "temperature", "humidity", "ph", "rainfall"]]
target = crop["label"]
acc = []
model = []
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
features, target, test_size=0.2, random_state=2
)
# # KNN
#
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(x_train, y_train)
predicted_values = knn.predict(x_test)
x = metrics.accuracy_score(y_test, predicted_values)
print("KNN Accuracy is: ", x)
score = cross_val_score(knn, features, target, cv=5)
print("Cross validation score: ", score)
knn_train_accuracy = knn.score(x_train, y_train)
print("knn_train_accuracy = ", knn.score(x_train, y_train))
knn_test_accuracy = knn.score(x_test, y_test)
print("knn_test_accuracy = ", knn.score(x_test, y_test))
# # Hyperparameter Tuning
#
a = []
for i in range(1, 47, 2):
a.append(i)
len(a)
from sklearn.model_selection import GridSearchCV
grid_params = {
"n_neighbors": a,
"weights": ["uniform", "distance"],
"metric": ["minkowski", "euclidean", "manhattan"],
}
gs = GridSearchCV(KNeighborsClassifier(), grid_params, verbose=1, cv=7, n_jobs=-1)
g_res = gs.fit(x_train, y_train)
gsresult = pd.DataFrame(g_res.cv_results_)
gsresult.head()
gsresult = pd.DataFrame(g_res.cv_results_)
gsresult.head()
gsresult[
["param_metric", "param_weights", "param_n_neighbors", "mean_test_score"]
].head()
g_res.best_score_
g_res.best_params_
best_knn = KNeighborsClassifier(n_neighbors=5, weights="distance", metric="manhattan")
best_knn.fit(x_train, y_train)
predicted_values = best_knn.predict(x_test)
knn_train_accuracy = best_knn.score(x_train, y_train)
print("knn_train_accuracy = ", best_knn.score(x_train, y_train))
knn_test_accuracy = best_knn.score(x_test, y_test)
print("knn_test_accuracy = ", best_knn.score(x_test, y_test))
score = cross_val_score(best_knn, features, target, cv=5)
print("Cross validation score: ", score)
print(score.mean())
acc.append(score.mean())
model.append("K Nearest Neighbours")
print(classification_report(y_test, predicted_values))
from sklearn.metrics import confusion_matrix
cm_knn = confusion_matrix(y_test, predicted_values)
f, ax = plt.subplots(figsize=(15, 10))
sns.heatmap(cm_knn, annot=True, linewidth=0.5, fmt=".0f", cmap="viridis", ax=ax)
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.title("Predicted vs actual")
plt.show()
# # Naive Bayes
from sklearn.naive_bayes import GaussianNB
NaiveBayes = GaussianNB()
NaiveBayes.fit(x_train, y_train)
predicted_values = NaiveBayes.predict(x_test)
x = metrics.accuracy_score(y_test, predicted_values)
params_NB = {"var_smoothing": np.logspace(0, -9, num=100)}
gs = GridSearchCV(GaussianNB(), params_NB, verbose=1, cv=7, n_jobs=-1)
g_res = gs.fit(x_train, y_train)
gsresult = pd.DataFrame(g_res.cv_results_)
gsresult.head()
g_res.best_score_
g_res.best_params_
NaiveBayes = GaussianNB(var_smoothing=1.873817422860383e-05)
score = cross_val_score(
GaussianNB(var_smoothing=1.873817422860383e-05), features, target, cv=5
)
print("Cross validation score: ", score)
NaiveBayes.fit(x_train, y_train)
nb_train_accuracy = NaiveBayes.score(x_train, y_train)
print("Training accuracy = ", NaiveBayes.score(x_train, y_train))
nb_test_accuracy = NaiveBayes.score(x_test, y_test)
print("Testing accuracy = ", NaiveBayes.score(x_test, y_test))
score = cross_val_score(NaiveBayes, features, target, cv=5)
print("Cross validation score: ", score)
print(score.mean())
acc.append(score.mean())
model.append("Naive Bayes")
y_pred = NaiveBayes.predict(x_test)
y_true = y_test
from sklearn.metrics import confusion_matrix
cm_nb = confusion_matrix(y_true, y_pred)
f, ax = plt.subplots(figsize=(15, 10))
sns.heatmap(cm_nb, annot=True, linewidth=0.5, fmt=".0f", cmap="viridis", ax=ax)
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.title("Predicted vs actual")
plt.show()
print(classification_report(y_true, y_pred))
# # Decision Tree
from sklearn.tree import DecisionTreeClassifier
DT = DecisionTreeClassifier(criterion="entropy", random_state=2, max_depth=5)
DT.fit(x_train, y_train)
predicted_values = DT.predict(x_test)
x = metrics.accuracy_score(y_test, predicted_values)
print("Decision Tree's Accuracy is: ", x)
print(classification_report(y_test, predicted_values))
param_grid = {
"max_features": ["auto", "sqrt", "log2"],
"ccp_alpha": [0.1, 0.01, 0.001],
"max_depth": [5, 6, 7, 8, 9],
"criterion": ["gini", "entropy"],
}
gs = GridSearchCV(
estimator=DecisionTreeClassifier(), param_grid=param_grid, cv=5, verbose=True
)
gs.fit(x_train, y_train)
g_res = gs.fit(x_train, y_train)
gsresult = pd.DataFrame(g_res.cv_results_)
gsresult.head()
g_res.best_score_
g_res.best_params_
DT = DecisionTreeClassifier(
ccp_alpha=0.001, criterion="entropy", max_depth=9, max_features="log2"
)
score = cross_val_score(DT, features, target, cv=5)
print("Cross validation score: ", score)
print(score.mean())
acc.append(score.mean())
model.append("Decision Tree")
DT.fit(x_train, y_train)
dt_train_accuracy = DT.score(x_train, y_train)
print("Training accuracy = ", DT.score(x_train, y_train))
dt_test_accuracy = DT.score(x_test, y_test)
print("Testing accuracy = ", DT.score(x_test, y_test))
y_pred = DT.predict(x_test)
y_true = y_test
from sklearn.metrics import confusion_matrix
cm_dt = confusion_matrix(y_true, y_pred)
f, ax = plt.subplots(figsize=(15, 10))
sns.heatmap(cm_dt, annot=True, linewidth=0.5, fmt=".0f", cmap="viridis", ax=ax)
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.title("Predicted vs actual")
plt.show()
# # Random Forest
from sklearn.ensemble import RandomForestClassifier
RF = RandomForestClassifier(n_estimators=20)
RF.fit(x_train, y_train)
predicted_values = RF.predict(x_test)
x = metrics.accuracy_score(y_test, predicted_values)
print("Random Forest Accuracy is: ", x)
print(classification_report(y_test, predicted_values))
score = cross_val_score(RF, features, target, cv=5)
print("Cross validation score: ", score)
print(score.mean())
acc.append(score.mean())
model.append("Random Forest")
rf_train_accuracy = RF.score(x_train, y_train)
print("Training accuracy = ", RF.score(x_train, y_train))
rf_test_accuracy = RF.score(x_test, y_test)
print("Testing accuracy = ", RF.score(x_test, y_test))
y_pred = RF.predict(x_test)
y_true = y_test
cm_rf = confusion_matrix(y_true, y_pred)
f, ax = plt.subplots(figsize=(15, 10))
sns.heatmap(cm_rf, annot=True, linewidth=0.5, fmt=".0f", cmap="viridis", ax=ax)
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.title("Predicted vs actual")
plt.show()
# # SVC
from sklearn.svm import SVC
SVM = SVC()
SVM.fit(x_train, y_train)
predicted_values = SVM.predict(x_test)
x = metrics.accuracy_score(y_test, predicted_values)
print("SVM's Accuracy is: ", x)
print(classification_report(y_test, predicted_values))
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
clf = make_pipeline(StandardScaler(), SVC())
clf.fit(x_train, y_train)
param_grid = {
"C": [0.1, 1, 10],
"gamma": [1, 0.1, 0.01, "auto", "scale"],
"kernel": ["linear", "poly", "rbf"],
}
gs = GridSearchCV(SVC(), param_grid, refit=True, verbose=3)
gs.fit(x_train, y_train)
gs.best_score_
gs.best_params_
SVM = SVC(C=10, gamma=1, kernel="linear")
score = cross_val_score(SVM, features, target, cv=5)
print("CV score", score)
print(score.mean())
acc.append(score.mean())
model.append("SVC")
SVM.fit(x_train, y_train)
svc_train_accuracy = SVM.score(x_train, y_train)
print("Training accuracy = ", SVM.score(x_train, y_train))
rf_test_accuracy = SVM.score(x_test, y_test)
print("Testing accuracy = ", SVM.score(x_test, y_test))
y_pred = SVM.predict(x_test)
y_true = y_test
cm_rf = confusion_matrix(y_true, y_pred)
f, ax = plt.subplots(figsize=(15, 10))
sns.heatmap(cm_rf, annot=True, linewidth=0.5, fmt=".0f", cmap="viridis", ax=ax)
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.title("Predicted vs actual")
plt.show()
# # PCA
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_scaled = scaler.fit_transform(features)
pca = PCA(n_components=3)
X_pca = pca.fit_transform(X_scaled)
print("Explained Variance Ratio:", pca.explained_variance_ratio_)
print("Transformed Data:")
print(X_pca)
X_pca.shape
x_trainPca, x_testPca, y_train, y_test = train_test_split(
X_pca, target, test_size=0.2, random_state=2
)
NaiveBayes.fit(x_trainPca, y_train)
y_pred = NaiveBayes.predict(x_testPca)
y_true = y_test
NaiveBayes.score(x_testPca, y_test)
# 29% accuracy dropped with PCA.
# # Results
modeldict = {"Model Name": model, "Accuracy": acc}
modelDF = pd.DataFrame(modeldict)
modelDF
|
# # Udacity weatherData project
# ## Import OS and librarys
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## Data preparation
# ### Reading in the CSV files and explore data frame
dfMUC = pd.read_csv("../input/resultsMUC.csv")
dfWORLD = pd.read_csv("../input/resultsWORLD.csv")
dfMUC.describe()
# One can observe that the count for ```year```and ```avg_temp```is inconsistent. A quick view on the ```head()```of the data frame unveils that ```avg_temp```contains several rows with ```NaN```values. This can cause problems calculating the *moving average* and therefore, need to be removed first.
dfMUC_clean = dfMUC.dropna()
# As explained [here](https://datatofish.com/dropna/), the ```df.dropna()```function is applied and the value count for both.
# ### Read in ```resultsWORLD.csv``` and explore data frame
dfWORLD.describe()
# The ```dfWORLD```data frame doesn't have any inconsistencies and therefore, can be kept.
# ### Cleaning and merging
# ## Calculating the moving-average
# Next, we're going to calculate the moving-average using panda's ```df.rolling()```function.
dfWORLD["MA"] = dfWORLD.rolling(window=10)["avg_temp"].mean()
dfWORLD["MA"] = dfWORLD["MA"].round(2)
dfWORLD.head(10)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import matplotlib.pyplot as plt
import seaborn as sns
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv("/kaggle/input/country-gdp/countries.csv")
df.head()
print("Shape:", df.shape)
print("Columns:", df.columns)
df.info()
df.describe()
df.isnull().sum()
df.duplicated().any()
print("Total Rows in dataset -", df.shape[0], "Rows \n")
for columns in df.columns:
print("Unique values in", columns, ":", df[columns].nunique(), "\n")
# Continent
df["Continent"].value_counts()
df["Continent"].value_counts().plot(kind="pie")
# ## Population
# most Populated Country in each Continent
for value in df["Continent"].unique():
dfn = df[df["Continent"] == value]
max_value = dfn[["Country", "Population"]][
dfn["Population"] == dfn["Population"].max()
]
print(value, "\n\n", max_value.to_string(index=False), "\n")
# Least Populated Country in each Continent
for value in df["Continent"].unique():
dfn = df[df["Continent"] == value]
max_value = dfn[["Country", "Population"]][
dfn["Population"] == dfn["Population"].min()
]
print(value, "\n\n", max_value.to_string(index=False), "\n")
# ## IMF GDP
gdp = df[["IMF_GDP", "UN_GDP", "GDP_per_capita"]]
for col in gdp:
print(col, "Max:", df[col].max(), "\n")
IMF_df = df.sort_values(by=["IMF_GDP"], ascending=False)
# top 10 countries with highest IMF_GDP
IMF_df.head()
df[df["IMF_GDP"] == 26695150000000.0]
df[df["UN_GDP"] == 18624475000000.0]
df[df["GDP_per_capita"] == 178196.57]
# Max IMF GDP Country in each Continent
for value in df["Continent"].unique():
dfn = df[df["Continent"] == value]
max_value = dfn[["Country", "Population", "IMF_GDP"]][
dfn["IMF_GDP"] == dfn["IMF_GDP"].max()
]
print(value, "\n\n", max_value.to_string(index=False), "\n")
# Min IMF_GDP Country in each Continent
for value in df["Continent"].unique():
dfn = df[df["Continent"] == value]
max_value = dfn[["Country", "Population", "IMF_GDP"]][
dfn["IMF_GDP"] == dfn["IMF_GDP"].min()
]
print(value, "\n\n", max_value.to_string(index=False), "\n")
# ## UN GDP
# Max UN GDP Country in each Continent
for value in df["Continent"].unique():
dfn = df[df["Continent"] == value]
max_value = dfn[["Country", "Population", "UN_GDP"]][
dfn["UN_GDP"] == dfn["UN_GDP"].max()
]
print(value, "\n\n", max_value.to_string(index=False), "\n")
# Min UN GDP Country in each Continent
for value in df["Continent"].unique():
dfn = df[df["Continent"] == value]
max_value = dfn[["Country", "Population", "UN_GDP"]][
dfn["UN_GDP"] == dfn["UN_GDP"].min()
]
print(value, "\n\n", max_value.to_string(index=False), "\n")
# ## GDP per capita
# most GDP per capita Country in each Continent
for value in df["Continent"].unique():
dfn = df[df["Continent"] == value]
max_value = dfn[["Country", "Population", "GDP_per_capita"]][
dfn["GDP_per_capita"] == dfn["GDP_per_capita"].max()
]
print(value, "\n\n", max_value.to_string(index=False), "\n")
# Min GDP per capita Country in each Continent
for value in df["Continent"].unique():
dfn = df[df["Continent"] == value]
max_value = dfn[["Country", "Population", "GDP_per_capita"]][
dfn["GDP_per_capita"] == dfn["GDP_per_capita"].max()
]
print(value, "\n\n", max_value.to_string(index=False), "\n")
|
# # Fraud prediction using neural network
# In this analysis, we use neural network model to do fraud prediction of credit card.
# First, we plot the first 5 rows of the dataset. Features V1, V2, … V28 are the principal components obtained with PCA. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-senstive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
import numpy as np
import pandas as pd
df = pd.read_csv("../input/creditcardfraud/creditcard.csv")
df.head()
df.describe()
# Check is there is NaN value in the dataframe:
df.isnull().any()
# Here we do a simple statistic for the number of fraud samples (class 1) and the number of non-fraud samples (class 0) to check if this dataset is balanced or not.
df["Class"].value_counts()
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import pareto
plt.style.use("ggplot")
#### https://www.gdv.de/de/zahlen-und-fakten/versicherungsbereiche/ueberblick-24074
beitraege = pd.read_csv(
"../input/versicherungends/Beitraege.csv",
header=[0, 1],
sep=";",
nrows=13,
decimal=",",
)
leistungen = pd.read_csv(
"../input/versicherungends/Leistungen.csv",
header=[0, 1],
sep=";",
nrows=13,
decimal=",",
)
beitraege.columns = ["VERSICHERUNGSSPARTE", "2017", "2018", "VERAENDERUNG"]
leistungen.columns = ["VERSICHERUNGSSPARTE", "2017", "2018", "VERAENDERUNG"]
for df in [beitraege, leistungen]:
for jahr in ["2017", "2018"]:
df[jahr] = df[jahr].str.replace(".", "").astype(int)
df.VERAENDERUNG = (
df.VERAENDERUNG.str.replace(",", ".").str.replace("%", "").astype(float) / 100
)
df.set_index("VERSICHERUNGSSPARTE", inplace=True)
beitraege
leistungen
leistungen["2018"] / beitraege["2018"]
np.random.exponential()
np.random.pareto(2)
pd.Series((pareto.rvs(2, size=1_000_000) - 1) * 10).hist(
bins=np.linspace(0, 200, 201), figsize=(20, 9)
)
pareto.cdf(x=[0.25, 0.5], b=1)
pareto.ppf(0.2, b=1)
pareto.mean(2)
np.mean(pareto.rvs(2, size=1_000_000))
|
# # Logistic Regression
# - Logistic Regression is an classification algorithm comes under the supervised machine learning technique.
# - Logistic Regression is used to predict the category of a dependent variable based on the value of the independent variable.
# - The output of logistic regression problem can be only between 0 and 1.
# ## Diagram :
# 
# - In figure the S-Curve is called the Sigmoid function.
# - The Sigmoid function is a mathematical function used to map the predicted values to probabilities.
# - The value of the Logistic Regression must be between 0 and 1, which cannot go beyond this limit, So it forms a curve like the 'S' form.
# - In Logistic Regression, we use the concept of the threshold value, which defines the probability of either 0 and 1. Such as values above the threshold value tends to 1 and a value below the threshold value tends to 0.
# # Assumptions for Logistic Regression
# - The dependent variable must be categorical in nature.
# - The independent variable should not have multi-collinearity.
# # Implementation of Logistic Regression Algorithm
# # Import the necessary libraries
import numpy as np
import pandas as pd
# # Import/Load the dataset
dataset = pd.read_csv(r"/kaggle/input/diabetes-dataset/diabetes.csv")
dataset
# # Encoding the categorical data
from sklearn.preprocessing import LabelEncoder
l1 = LabelEncoder()
dataset["outcome"] = l1.fit_transform(dataset["outcome"])
# # Independent Variable (X) & Dependent Variable (Y)
X = dataset.iloc[:, :-1]
Y = dataset.iloc[:, -1]
# # Split the dataset for train and test
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.25, random_state=0
)
# # Model training
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train, Y_train)
# ### **Pickle ** in Python is primarily used in serializing and deserializing a Python object structure. In other words, it's the process of converting a Python object into a byte stream to store it in a file/database, maintain program state across sessions, or transport data over the network.
# #### **Install in Windows anaconda: **
# #### conda install -c conda-forge pickle5
# data.to_csv("")
# SAV (Sparse Allele Vectors) is a file format for storing very large sets of genotypes
# # Model Export
# save the model to disk
filename = "finalized_model.sav"
pickle.dump(classifier, open(filename, "wb"))
# # Model Import
# load the model from disk
loaded_model = pickle.load(open(r"/kaggle/working/finalized_model.sav", "rb"))
result = loaded_model.score(X_test, Y_test)
print(result)
y_pred1 = loaded_model.predict(X_test)
print(y_pred1)
# import os
# os.chdir(r'/kaggle/working')
# !tar -czf Landscapes.tar.gz images_out/Landscapes
# from IPython.display import FileLink
# FileLink(r'Landscapes.tar.gz')
# # Model prediction
y_pred = classifier.predict(X_test)
print(y_pred)
# # Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(Y_test, y_pred)
print(cm)
# # Making the Accuracy Score
from sklearn.metrics import accuracy_score
ac = accuracy_score(Y_test, y_pred) * 100
ac
|
# Importing necessary Library
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objs as go
import plotly as py
import calendar
import re
import nltk
from nltk.corpus import stopwords
from textblob import TextBlob
from textblob import Word
import string
from gensim import corpora
from gensim.models.ldamodel import LdaModel
from gensim.parsing.preprocessing import preprocess_string
from gensim.models.coherencemodel import CoherenceModel
# Importing Data
import pandas as pd
tweets_df = pd.read_csv(
"../input/elon-musks-tweets/data_elonmusk.csv", encoding="latin1"
)
# Overview of Dataset
tweets_df.head()
# We need only "Tweet" column to do our topic modelling analysis.However, I will keep "Time" column as well to show the tweet counts by months
tweets_df = tweets_df.drop(["row ID", "Retweet from", "User"], axis=1)
tweets_df.head()
# Converting "Time" column to datetime column
tweets_df["Time"] = pd.to_datetime(tweets_df["Time"])
tweets_df["Time"] = pd.to_datetime(tweets_df["Time"], format="%y-%m-%s %H:%M:%S")
# Showing date column as Year-Month combination
tweets_df["Time"] = pd.to_datetime(tweets_df["Time"]).dt.to_period("M")
# In next few lines, I am trying to convert the date time to year and month name just in case we need for any visualization purpose
tweets_df["Time"] = pd.DataFrame(tweets_df["Time"].astype(str))
tweets_df["Month"] = tweets_df["Time"].apply(lambda x: x.split("-")[1]).astype(int)
tweets_df["Year"] = tweets_df["Time"].apply(lambda x: x.split("-")[0])
tweets_df["Month"] = tweets_df["Month"].apply(lambda x: calendar.month_name[x])
tweets_df["Year_month"] = tweets_df["Year"].astype(str) + tweets_df["Month"].astype(str)
tweets_df = tweets_df.drop(["Month", "Year", "Time"], axis=1)
tweets_df.head()
# Let's start with cleaning our Tweet Column.
# We will try to remove "@",userhandle id ,emoticons,RT signs,hyperlinks
HANDLE = "@\w+"
LINK = "https://t\.co/\w+"
def basic_clean(text):
text = re.sub(HANDLE, "", text)
text = re.sub(LINK, "", text)
return text
tweets_df["clean_tweet"] = tweets_df["Tweet"].apply(lambda x: basic_clean(x))
tweets_df.head()
# splitting the "clean_tweet" columns into tokens as well as basic text preprocessing e.g. stopword removal / lemmatization/spelling correction
stops = stopwords.words("english")
tweets_df["clean_tweet"] = tweets_df["clean_tweet"].apply(
lambda x: " ".join(word.lower() for word in x.split() if word not in stops)
)
tweets_df["clean_tweet"] = tweets_df["clean_tweet"].apply(
lambda x: " ".join(Word(word).lemmatize() for word in x.split())
)
retweet = ["RT", "rt", "http"]
punc = [string.punctuation] + retweet
tweets_df["clean_tweet"] = tweets_df["clean_tweet"].apply(
lambda x: " ".join(word for word in x.split() if word not in punc)
)
# Let's check our tweet column after basic cleaning
tweets_df.head()
# Let's implement the LDA model from Gensim
tweets = tweets_df["clean_tweet"].apply(preprocess_string).tolist()
tweets
dictionary = corpora.Dictionary(tweets)
corpus = [dictionary.doc2bow(text) for text in tweets]
NUM_TOPICS = 5
lda = LdaModel(corpus, num_topics=NUM_TOPICS, id2word=dictionary, passes=15)
lda.print_topics(num_words=6)
# In order to decide on the correct number of topics, we will need a way to assess how well the model's topics were chosen. Gensim provides a CoherenceModel instance that you can use
def calculate_coherence_score(tweets, dictionary, lda):
coherence_model = CoherenceModel(model=lda, text=tweets, dictionay=dictionay)
return coherence_model.get_coherence()
def get_coherence_values(start, stop):
for num_topics in range(start, stop):
print(f"\nCalculating coherence for {num_topics} topics")
lda = LdaModel(corpus, num_topics=num_topics, id2word=dictionary, passes=2)
coherence = calculate_coherence_score(tweets, dictionary, lda, coherence="c_v")
yield coherence
min_topics, max_topics = 10, 30
coherence_score = list(get_coherence_values(min_topics, max_topics))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
test = pd.read_csv("../input/digit-recognizer/test.csv")
train = pd.read_csv("../input/digit-recognizer/train.csv")
# ## **KERAS**: ##
# It's an open-source neural-network Python library, capable of running on top of TensorFlow, Microsoft Cognitive Toolkit, R, Theano or PlaidML, for experimentation with deep neural networks (source:Wikepedia).
from keras.models import Sequential
from keras.layers import (
Dense,
) # This is a linear operation where every input is connected to every output by a weight.
from keras.layers import Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from keras.preprocessing import image
gen = image.ImageDataGenerator()
# same as: from keras.preprocessing.image import ImageDataGenerator
# **Data exploration**
# test-data set excludes the label column
train.head(2)
test.head(2)
train["label"].unique()
train["label"].value_counts()
# check the size of both data sources
train.shape, test.shape # 2-dimensions
# converts labels to integers and pixels into floats
X_train = (train.iloc[:, 1:].values).astype("float32") # all pixel values
y_train = train.iloc[:, 0].values.astype("int32") # Labels, column 0, target
X_test = test.values.astype("float32")
# Reshape by adding dimension for color channel
X_train_4D = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test_4D = X_test.reshape(X_test.shape[0], 28, 28, 1)
X_train_4D.shape, X_test_4D.shape
# **Visualize**
X_trainA = X_train_4D.reshape(
X_train_4D.shape[0], 28, 28
) ##This is important for images to show properly
# put labels into y_train variable
# visualize number of digits classes
import seaborn as sns
plt.figure(figsize=(15, 7))
g = sns.countplot(y_train, palette="icefire")
plt.title("Number of classes")
# prints a different range (beware: range over 15 fails)
for i in range(10, 14):
plt.subplot(330 + (i + 1))
plt.imshow(X_trainA[i], cmap=plt.get_cmap("gray"))
plt.title(y_train[i])
# prints the digits in positions 0 to 5 (not the digit image)
for i in range(0, 5):
plt.subplot(330 + (i + 1))
plt.imshow(X_trainA[i], cmap=plt.get_cmap("gray"))
plt.title(y_train[i])
# shows the pixel values of the image
plt.figure()
plt.imshow(X_trainA[0])
plt.colorbar()
plt.grid(False)
plt.show()
# ### **Logistic Regression** ###
# The label data as individual dataframe
Labls = train[["label"]]
Labls.shape
Labls.head(2)
Labls["label"].unique()
# label as array
# The label data as individual set
arrayLbl = train["label"]
arrayLbl.shape
# split the data
from sklearn.model_selection import train_test_split
train_img, test_img, train_lbl, test_lbl = train_test_split(
train, arrayLbl, test_size=28000, random_state=0
)
from sklearn.linear_model import LogisticRegression
Lmodel = LogisticRegression(
solver="lbfgs"
) # =Limited-memory Broyden–Fletcher–Goldfarb–Shanno
# solver = seeks parameter weights that minimize a cost function
# lbfgs solver= approximates the second derivative matrix updates with gradient evaluations
# and stores only the last few updates to save memory
# Source: https://towardsdatascience.com/dont-sweat-the-solver-stuff-aea7cddc3451
# fit the model
Lmodel.fit(test_img, test_lbl)
# **Predictions and submission**
# Make predictions on entire test data
predictions = Lmodel.predict(test_img)
print(predictions)
predictions2 = Lmodel.predict(train_img)
print(predictions2)
Acc = Lmodel.score(test_img, test_lbl)
print(Acc)
df = pd.DataFrame(predictions, columns=["ImageId"])
df.head(2)
df.shape
S = pd.concat([df, Labls], axis=1)
S.head(2)
S.info()
a = S.iloc[0:28000] # from 0 to 27999
a.head(2)
a.tail(2) # verify the end rows of the table
a = a.astype(int)
a.head(3)
a = a.rename(columns={"label": "Label"})
a.info()
sorted_by_img = a.sort_values("ImageId")
sorted_by_img = sorted_by_lbl.astype(int)
sorted_by_img.head(3)
# Submit dataframe/table a containing:
# ImageId,Label
# 1,0
# 2,0
a.to_csv("Subms.csv", index=False)
|
# # Introduction
# In this notebook, we test a method that will provide the best possible tradeoff between interpretability and prediction. We first make predictions that rely on a very transparent and interpretable model. For this part, we use Microsoft's ["explanable boosting machine"](https://github.com/interpretml/interpret). We then use the xgboost library to reduce our model's error of prediction. One way to put it is that our model goes as far as it is humanly interpretable. From there, we use a model with higher predictive performances to reduce the prediction mistakes.
# In short:
# $$(1)\;\;\; y_i = f(X_i) $$
# Where $y_i$ is the target variable, $X_i$ the features vector and $f$ is an unknown data generating process.
# $$(2)\;\;\; y_i = \hat{y}_i + \lambda_i $$
# Where $\hat{y}_i$ is estimated with a glassbox model, $\lambda_i$ the prediction's residual.
# $$(3)\;\;\; y_i = \hat{y}_i + \hat{\lambda}_i + \sigma_i $$
# Where $\hat{\lambda}_i$ is estimated with a blackbox model, and $\sigma_i$ is the new residual. We hypothesize that $\sum_{i=1}^{N}\lambda_i^2 > \sum_{i=1}^{N}\sigma_i^2$.
# We believe it is a better method than stacking an interpretable and blackbox models, or using ex-post sensitivity tests (like SHAP), since the additive structure sets clear boundaries between the interpretable and non-intepretable parts of each prediction.
# We use the dataset on houses sale in Ames Iowa prepared by Dean De Cock (2011). It is a widely used and some high quality publicly available notebooks already did a good job at exploring it. We can thus save some time building upon them to test our method.
# # Environment & loading data
import os
os.chdir("/kaggle/input/house-prices-advanced-regression-techniques/")
import warnings
warnings.simplefilter(action="ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import uniform, randint, norm
import xgboost as xgb
from sklearn.preprocessing import OneHotEncoder, scale, StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.compose import make_column_transformer, TransformedTargetRegressor
from sklearn.impute import SimpleImputer
from sklearn.model_selection import (
cross_val_score,
GridSearchCV,
KFold,
RandomizedSearchCV,
train_test_split,
)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, mean_absolute_error
from math import sqrt
# Installing Microsoft's package for "explanable boosting machine"
# Set random seed
np.random.seed(123)
# loading data
data = pd.read_csv("train.csv")
# # Preprocessing
# Some features of this dataset are notoriously problematic. In a real situation, we would investigate the nature of each variable, especially for the reasons behind the missing data. We would also investigate about outliers. Since we do not wish to spend too much time on this dataset, we rely on its author's remarks and the previous works in the data science community.
# ## (Almost) perfect correlation between features
# We drop GarageArea, since it is almost perfectly correlated with GarageCars. Same for 1stFloorSF, TotalBsmtSF and GrLivArea, TotRmsAbvGrd. We keep the later in both cases.
# ## Intended missing values
# Most NAs are tagged so voluntarily. For example, the data dictionnary indicates that the PoolQc variable is missing if the property has no pool. We will thus replace them by the string "no", which will not be interpreted as missing.
# ## Other missing values
# Looking at the remaining missing values, we find that LotFrontage, that is the "Linear feet of street connected to property" has more than 15% of NAs. For now, we do not have an explanation for this. We will thus simply remove this feature. We do the same for the variable GarageYrBlt.
# The three remaining features have less than one percent of NAs. We will deal with them in the preprocessing pipeline. The two numeric NAs will be changed for the median of the respective variable and the NA for the variable Electrical will take is most frequent value.
# ## Outliers
# There are five points that the author of the dataset identified as outliers. Three of them are partial sales that simply are another kind of transaction. We thus follow the recommendation of the author by removing all transactions with more than 4000 square feets of total living area (above ground). There are simply not such enough cases in the dataset to properly train a model.
# droping (almost) perfectly correlated variables
data.drop(["GarageArea", "1stFlrSF", "GrLivArea"], axis=1)
# replacing intended NAs
NA_to_no = [
"Alley",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"FireplaceQu",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"PoolQC",
"Fence",
"MiscFeature",
]
for i in NA_to_no:
data[i] = data[i].fillna("N")
# Droping the two features with many missing values
data = data.drop(["LotFrontage", "GarageYrBlt"], axis=1)
# Dropping the outliers
data = data[data.GrLivArea < 4000]
# Splitting the features from the target, and the train and test sets
X = data
X = X.drop("SalePrice", axis=1)
y = data.loc[:, "SalePrice"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=69
)
# identifying the categorical and numeric variables
numeric = [
"LotArea",
"OverallQual",
"OverallCond",
"YearBuilt",
"YearRemodAdd",
"MasVnrArea",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"2ndFlrSF",
"LowQualFinSF",
"BsmtFullBath",
"BsmtHalfBath",
"FullBath",
"HalfBath",
"TotRmsAbvGrd",
"Fireplaces",
"GarageCars",
"WoodDeckSF",
"OpenPorchSF",
"EnclosedPorch",
"3SsnPorch",
"ScreenPorch",
"PoolArea",
"MiscVal",
"YrSold",
]
# # Linear model
# I use the log transformation for prediction
def log(x):
return np.log(x)
def exp(x):
return np.exp(x)
# Setting up the preprocessor.
preprocessor = make_column_transformer(
(
make_pipeline(
SimpleImputer(strategy="most_frequent"),
OneHotEncoder(handle_unknown="ignore"),
),
categorical,
),
(make_pipeline(SimpleImputer(strategy="median"), StandardScaler()), numeric),
)
# Instantiating the model
pipeline_linear = make_pipeline(
preprocessor,
TransformedTargetRegressor(LinearRegression(), func=log, inverse_func=exp),
)
# Fitting the model and retrieving the prediction
pipeline_linear.fit(X_train, y_train)
line_pred = pipeline_linear.predict(X_test)
# # xgboost
pipeline_xgb = make_pipeline(
preprocessor,
TransformedTargetRegressor(
xgb.XGBRegressor(objective="reg:squarederror", nthread=-1),
func=log,
inverse_func=exp,
),
)
# Hyperparameters distributions
params = {
"transformedtargetregressor__regressor__colsample_bytree": uniform(0.7, 0.3),
"transformedtargetregressor__regressor__gamma": uniform(0, 0.5),
"transformedtargetregressor__regressor__learning_rate": uniform(0.03, 0.3),
"transformedtargetregressor__regressor__max_depth": randint(2, 6),
"transformedtargetregressor__regressor__n_estimators": randint(500, 1000),
"transformedtargetregressor__regressor__subsample": uniform(0.6, 0.4),
}
# Instantiating the xgboost model, with random-hyperparameter tuning
xgb_model = RandomizedSearchCV(
pipeline_xgb,
param_distributions=params,
random_state=123,
n_iter=50,
cv=5,
n_jobs=-1,
)
# Fitting the model and retrieving the predictions
xgb_model.fit(X_train, y_train)
xgb_pred = xgb_model.predict(X_test)
# # ebm
from interpret.glassbox import ExplainableBoostingRegressor
from interpret import show
from interpret.data import Marginal
# Definition of the EBM preprocessor; I do not one hot encode, since EBM deals with categoricals
preprocessor_ebm = make_column_transformer(
(SimpleImputer(strategy="most_frequent"), categorical),
(SimpleImputer(strategy="median"), numeric),
)
# Instantiating the model
ebm = make_pipeline(
preprocessor_ebm,
TransformedTargetRegressor(
ExplainableBoostingRegressor(random_state=123), func=log, inverse_func=exp
),
)
# Fitting the model and retrieving the predictions
ebm.fit(X_train, y_train)
ebm_pred = ebm.predict(X_test)
# # ebm + xgboost
params = {
"xgbregressor__colsample_bytree": uniform(0.7, 0.3),
"xgbregressor__gamma": uniform(0, 0.5),
"xgbregressor__learning_rate": uniform(0.03, 0.3),
"xgbregressor__max_depth": randint(2, 6),
"xgbregressor__n_estimators": randint(500, 1000),
"xgbregressor__subsample": uniform(0.6, 0.4),
}
pipeline_xgb2 = make_pipeline(
preprocessor, xgb.XGBRegressor(objective="reg:squarederror", nthread=-1)
)
xgb_model_2 = RandomizedSearchCV(
pipeline_xgb2, param_distributions=params, random_state=123, n_iter=50, cv=5
)
# getting residual predictions from the train data
ebm_pred_train = ebm.predict(X_train)
ebm_residual_train = y_train - ebm_pred_train
# training the xgb from the train data residual
xgb_model_2.fit(X_train, ebm_residual_train)
residual_predicted = xgb_model_2.predict(X_test)
# then we get our boosted ebm prediction
ebm_xgb_pred = ebm_pred + residual_predicted
# # Comparing performances
# It has been remarked in the past that ebm gives similar prediction performances than xgboost. Our method reaches performances that are in between the two.
# Getting performance
predict = [line_pred, xgb_pred, ebm_pred, ebm_xgb_pred]
mae = []
mse = []
rmse = []
for i in predict:
mae.append(mean_absolute_error(y_test, i))
mse.append(mean_squared_error(y_test, i))
rmse.append(sqrt(mean_squared_error(y_test, i)))
scores = pd.DataFrame(
[mae, mse, rmse],
columns=["line", "xgb", "ebm", "ebm + xgb"],
index=["mae", "mse", "rmse"],
)
scores["ebm + xgb over ebm"] = (
round((scores["ebm"] / scores["ebm + xgb"] - 1) * 100, 2).astype(str) + " %"
)
scores["xgb over ebm + xgb"] = (
round((1 - scores["xgb"] / scores["ebm + xgb"]) * 100, 2).astype(str) + " %"
)
scores
|
import pandas as pd
import numpy as np
from nltk.stem import WordNetLemmatizer
from nltk import word_tokenize
from matplotlib import pyplot as plt
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
from sklearn.ensemble import RandomForestClassifier as RFClassi
from sklearn.model_selection import (
GridSearchCV,
cross_val_score,
StratifiedShuffleSplit,
)
from sklearn.linear_model import SGDClassifier as SGDC
from sklearn.feature_extraction.text import TfidfVectorizer as TVec
from sklearn.feature_extraction.text import CountVectorizer as CVec
from sklearn.preprocessing import MinMaxScaler as mmScaler
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import classification_report as cr
from sklearn.metrics import accuracy_score
from keras.models import Sequential
from keras.layers import LSTM, Dropout, Dense
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
path = r"../input/ireland-historical-news/irishtimes-date-text.csv"
df = pd.read_csv(path)
category_counts = df.headline_category.value_counts()
print("No of classes are: ", len(category_counts))
print(category_counts)
selected_category_counts = category_counts[category_counts > 3000].index.tolist()
df_small = df.loc[df["headline_category"].isin(selected_category_counts)]
f, ax = plt.subplots(figsize=(30, 30))
category_counts = category_counts.sort_values(ascending=False)
plt.barh(category_counts.index, category_counts)
plt.show()
# print(category_counts, category_counts.index)
# Now, we see there are 156 classes, many of which have counts even lessser than 20 and extremely specific titles. Not only will we rarely encounter such titles as a group, they'll also make our classification very difficult.
# Another thing to note is that the news tag is the most common (obviously) with 574774 samples. This might cause an imbalance in the classification later.
# For making our problem easier, let's only use the classes with a count > 3000, which gives us 49 classes.
stratSplit = StratifiedShuffleSplit(n_splits=3, test_size=0.25)
tr_idx, te_idx = next(
stratSplit.split(np.zeros(len(df_small)), df_small["headline_category"])
)
# Evaluation of any model should provide an accurate estimation of it's performance on data similar to the one used for training. While randomly splitting it in a 75%-25% ratio is very common, it might give a test set without all the classes or worse, a training set without all the classes. Moreover, the distribution of all classes might not be proportionate to the original datatset and lead to some biasing. This calls for a stratified split, which mimics the percentage of samples for each class in each split.
# A better judgement of the model's accuracy can also be found out by using k folds, where k-1 folds (or subsets) of the dataset are used for training and 1 fold for testing. The process is repeated k times and an analysis of the score for each iteration, such as mean or variance, gives us an understanding of how our model will perform on unseen data and whether it is biased or not.
# sklearn's StratifiedShuffleSplit provides train/test indices to split the data. With our imbalanced data, it is better to use this so as to let the model train on each class just as well. The number of folds or splits (k) can be set to create k different models and estimate behavior of the model under different scenarios. Here, I've used only 2 for the sake of simplicity but it's advisable to use more.
class LemmaTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, doc):
return [self.wnl.lemmatize(t) for t in word_tokenize(doc)]
def getSplit(te_idx, tr_idx):
vec = CVec(ngram_range=(1, 3), stop_words="english", tokenizer=LemmaTokenizer())
lsa = TruncatedSVD(20, algorithm="arpack")
mmS = mmScaler(feature_range=(0, 1))
countVec = vec.fit_transform(df_small.iloc[tr_idx]["headline_text"])
countVec = countVec.astype(float)
# print(len(countVec))
dtm_lsa = lsa.fit_transform(countVec)
X_train = mmS.fit_transform(dtm_lsa)
countVec = vec.transform(df_small.iloc[te_idx]["headline_text"])
countVec = countVec.astype(float)
dtm_lsa = lsa.transform(countVec)
X_test = mmS.transform(dtm_lsa)
x_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
x_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
enc = LabelEncoder()
enc.fit(df_small.iloc[:]["headline_category"].astype(str))
y_train = enc.transform(df_small.iloc[tr_idx]["headline_category"].astype(str))
y_test = enc.transform(df_small.iloc[te_idx]["headline_category"].astype(str))
y_train_c = np_utils.to_categorical(y_train)
y_test_c = np_utils.to_categorical(y_test)
return (X_train, y_train, X_test, y_test)
# To extract information from the text, we use a countvectorizer that uses n_grams upto 3 words and removes all stop words. Another option for a vectorizer is the TfIdfVectorizer which uses the term frequency-inverse document frequency as a metric instead of count. A lemmatizing class is passed as an argument to the vectorizer to reduce complex words to their basic form.
# Now, the countvec will create a lot of features, as we have used ngrams, for feature extraction. So, it'll be helpful to do some dimensionality reduction by using single value decomposition. TruncatedSVD is a transformer that is very helpful for latent semantic analysis (To know more about LSA, check out insert link here).
# We reduce the whopping number of features () to a smaller 20. Now this is helpful for two reasons. Reducing dimensionality has not only reduced the complexity of the problem and the time taken to train the model by giving it a smaller number of features, it has also taken care of features that were correlated, hence saving the time needed for correlation analysis.
# The final step is to fix the range of the fetaures using the MinMaxScaler and divide the dataset into training and test sets. Another point to keep in mind is whie transforming the input, we use fit_transform on the training and only transform on the testing set. If the entire dataset is used to transform the training set, information about the test set may leak into the training set. As for the transformation of testing set, it must rely only on the calculations of the training test, as the test rows are supposed to be unseen.
rfc = RFClassi(n_estimators=20)
mNB = MultinomialNB(alpha=0.5)
gNB = GaussianNB()
bNB = BernoulliNB(alpha=0.2)
sgdC = SGDC(n_jobs=-1, max_iter=1000, eta0=0.001)
gsCV_sgdClassifier = GridSearchCV(
sgdC,
{
"loss": ["hinge", "squared_hinge", "modified_huber", "perceptron"],
"class_weight": ["balanced", None],
"shuffle": [True, False],
"learning_rate": ["optimal", "adaptive"],
},
)
models = [rfc, mNB, gNB, bNB, gsCV_sgdClassifier]
# For choosing a model, there are a ton of options to choose from. While NaiveBayes is used very commonly for text classification, decision trees also offer great performance.
# Here, I've used multiple models to compare and judge on accuracies. RandomForestClassifier uses a number of decision trees to create an ensemble model that controls overfitting and class imbalances. With a huge number of samples for some classes and few for others, this is a problem the model could very well run into.
for model in models:
print("For model: ", model)
acc = 0.0
for tr_idx, te_idx in stratSplit.split(
np.zeros(len(df_small)), df_small["headline_category"]
):
(X_train, y_train, X_test, y_test) = getSplit(tr_idx, te_idx)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
acc += accuracy_score(y_test, y_pred)
print("Classification Report is:\n", cr(y_test, y_pred))
print(
"Accuracy is: ",
acc / 3.0,
"\n------------------------------------------------------------------------------------\n",
)
# At first glance, the BernoulliNB and MultinomialNB models seem to give great accuracies but closer inspection reveals they have actually cheated by very conveniently classifying all the samples(MultinomialNB) or most of the samples (BernoulliNB) as news, since it is the majority class and has 42% samples. The report shows that the class imbalance has got to them and affected their precision and recall scores. If we had only seen the accuracy of the model, we might not have been able to make this observation, but a classwise score calculation helps us here. The GaussianNB fares better in this aspect as it's precision and recall scores are better and it has actually classified samples into more than one class, but again 11.4% isn't a good score at all.
# The RBF has done considerably better by accurately classifying 48.7% of the samples and without classifying all the samples as one class.
# Choosing SGDClassifier effectively means we're choosing a linear model, and it is interesting to see how the performance will be affected when we consider this low variance model.
print(gsCV_sgdClassifier.best_params_, gsCV_sgdClassifier.best_score_)
|
# # Reference:
# https://www.kaggle.com/viveksrinivasan/eda-ensemble-model-top-10-percentile#Correlation-Analysis
# https://github.com/viveksrinivasanss/blogs/blob/master/bike_sharing_demand/eda_%26_ensemble_model.ipynb
# https://medium.com/analytics-vidhya/how-to-finish-top-10-percentile-in-bike-sharing-demand-competition-in-kaggle-part-2-29e854aaab7d
# https://www.kaggle.com/c/bike-sharing-demand/discussion/10431
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# # Data Fields
# * datetime - hourlydate + timestamp
# * season - 1:spring, 2:summer, 3:fall, 4:winter
# * holiday - whether the day is considered a holiday
# * workingday - whether a day is niether weekend or a holiday
# * weather-
# * 1:clear, few clouds, partly cloudy
# * 2: misty & cloudy, misty&broken clouds, misty&few clouds, misty
# * 3: Light snow, Light Rain&thunder storm & scattered clouds, light rain & scattered clouds
# * 4: Heavy Rain+ Ice pallets+thundersorm+mist, snow+fog
# * temp - temperature in celsius
# * atemp - "feels like" temperature in celsius
# * humidity - relative humidity
# * windspeed
# * casual - number of non-registered user rental initiated
# * registered - number of registered user rentals initiated
# * count - number of total rentals(Dependant variable)
import pylab
import calendar
import seaborn as sn
from scipy import stats
import missingno as msno
from datetime import datetime
import matplotlib.pyplot as plt
import warnings
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore", category=DeprecationWarning)
# # Reading the Dataset
dailyData = pd.read_csv("../input/bike-sharing-demand/train.csv")
# # Data Summary
# Next we will see more about the dataset
# * Size of the data
# * Glimpse of the dataset
# * What of type of variables
# # Size of the dataset
dailyData.shape
# # Sample Rows
dailyData.head(10)
# # Data Type
dailyData.dtypes
# # Feature Engineering
# * The following data arecategorical, but is present in the dataset as int type.
# so we need to convert these datas into categorical variables.
# 1. Weather
# 2. Season
# 3. Holiday
# 4. Working Day
# * Create the following columns from datetime column
# 1. date
# 2. hour
# 3. weekDay
# 4. month
# * Drop the datetime column
#
dailyData.datetime.apply(lambda x: x.split()[0])
# creating new columns from datetime column
# apply() : apply is a function in pandas library. It helps to apply a function(lambda/userdefined/Numpy) to the rows/columns in a dataFrame.
# The default value for axis in apply function is axis = 0 (column).
# lambda function: it takes input as a dataframe(all/specified number rows of a df or all/specified number columns)
dailyData["date"] = dailyData.datetime.apply(lambda x: x.split()[0])
dailyData["hour"] = dailyData.datetime.apply(lambda x: x.split()[1].split(":")[0])
# strptime: create a datetime object from a string
# datetime.strptime(date_string, format) where datetime is an object that supplies different classes like strptime
# for manipulating and formatting date ot time
dailyData["weekday"] = dailyData.date.apply(
lambda dateString: calendar.day_name[
datetime.strptime(dateString, "%Y-%m-%d").weekday()
]
)
dailyData["month"] = dailyData.date.apply(
lambda dateString: calendar.month_name[
datetime.strptime(dateString, "%Y-%m-%d").month
]
)
dailyData["season"] = dailyData.season.map(
{1: "Spring", 2: "Summer", 3: "Fall", 4: "winter"}
)
dailyData["weather"] = dailyData.weather.map(
{
1: " Clear + Few clouds + Partly cloudy + Partly cloudy",
2: " Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist ",
3: " Light Snow, Light Rain + Thunderstorm + Scattered clouds, Light Rain + Scattered clouds",
4: " Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog ",
}
)
# creating category variables
# A categorical variable is one that usually takes a fixed, number of possible values
categoryvariables = [
"hour",
"weekday",
"month",
"season",
"weather",
"holiday",
"workingday",
]
for var in categoryvariables:
dailyData[var] = dailyData[var].astype("category")
# Dropping Datetime column
dailyData = dailyData.drop(["datetime"], axis=1)
# # Visualizing the total number of each datatypes present in the dataset
# creating the data for the plot
typesCountSerie = dailyData.dtypes.value_counts()
# format columns as arrays of either strings or integers
# typeNames are easier to sort as array of `string` rather than an array of `dtype`
typeNamesColumn = list(map(lambda t: t.name, typesCountSerie.index.values))
typeCountColumn = typesCountSerie.values
# create an initial dataframe, with multiple occurences of the same "variableType"
intialDataTypeDf = pd.DataFrame(
{"variableType": typeNamesColumn, "count": typeCountColumn}
)
# Group initial data frame by "variableType",
# then reset_index to have a proper dataframe
groupedDataTypeDf = (
intialDataTypeDf.groupby(["variableType"]).sum()[["count"]].reset_index()
)
# dataTypeDf = pd.DataFrame(dailyData.dtypes.value_counts()).reset_index().rename(columns={"index":"variableType",0:"count"})
fig, ax = plt.subplots()
fig.set_size_inches(12, 5)
# plotting the barchart
sn.barplot(data=groupedDataTypeDf, x="variableType", y="count", ax=ax)
ax.set(xlabel="variableType", ylabel="Count", title="Count of the different Datatypes")
# # Missing Value
# Matrix :
# Using this matrix you can very quickly find the pattern of missingness in the dataset.
# Bar Chart :
# This bar chart gives you an idea about how many missing values are there in each column.
# No Missing values detected.
# * Checking the presence of missing values by visualising using "msno"
msno.matrix(dailyData, figsize=(12, 5))
msno.bar(dailyData, figsize=(12, 5))
# Now it is confirmed that there are no missing values.
# # Heatmap :
# Heatmap shows the correlation of missingness between every 2 columns. In our example, the correlation between AAWhiteSt-4 and SulphidityL-4 is 1 which means if one of them is present then the other one must be present.
# A value near -1 means if one variable appears then the other variable is very likely to be missing.
# A value near 0 means there is no dependence between the occurrence of missing values of two variables.
# A value near 1 means if one variable appears then the other variable is very likely to be present.
# # Detecting Outliers
# Analysis using Boxplots
fig, axes = plt.subplots(nrows=2, ncols=2)
fig.set_size_inches(12, 10)
sn.boxplot(data=dailyData, y="count", orient="v", ax=axes[0][0])
sn.boxplot(data=dailyData, y="count", x="season", orient="v", ax=axes[0][1])
sn.boxplot(data=dailyData, y="count", x="hour", orient="v", ax=axes[1][0])
sn.boxplot(data=dailyData, y="count", x="workingday", orient="v", ax=axes[1][1])
axes[0][0].set(ylabel="Count", title="Box Plot On Count")
axes[0][1].set(xlabel="Season", ylabel="Count", title="Box Plot On Count across season")
axes[1][0].set(
xlabel="Hour of the day",
ylabel="Count",
title="Box Plot On Count across Hour of the day",
)
axes[1][1].set(
xlabel="Working Day", ylabel="Count", title="Box Plot On Count across Working day"
)
# # About Box Plot
# A box and whisker plot—also called a box plot—displays the five-number summary of a set of data. The five-number summary is the minimum, first quartile, median, third quartile, and maximum. In a box plot, we draw a box from the first quartile to the third quartile. A vertical line goes through the box at the median.
# # Observation from the above box plot
# * The count has many 'outliers' as it exceeds the outer quartile limit.
# * Spring Season has got relatively lower count.
# * The box plot for "Hour of the day " infer that the median values are higher at 7AM- 8AM, and 5pm-6pm.
# These time indicates regular office and school hours.
# * Most of the 'outliers' are contributed by 'working days' rather than 'non-working days'.
# # Removing Outliers
# checking how many count values are with in 3*standard deviation
np.sum(
np.abs(dailyData["count"] - dailyData["count"].mean())
<= (3 * dailyData["count"].std())
)
dailyDataWithoutOutliers = dailyData[
np.abs(dailyData["count"] - dailyData["count"].mean())
<= (3 * dailyData["count"].std())
]
print("shape of the data with outliers", dailyData.shape)
print("shape of the data without outliers", dailyDataWithoutOutliers.shape)
# # Correlation Analysis
# * To determine the relationship a dependent variable is having with the numerical features.
# * Below are the data types of numerical features(non-categorical)(temp, atemp, casual, registered, humidity) and dependend variable(count)
dailyData[
["temp", "atemp", "casual", "registered", "humidity", "windspeed", "count"]
].dtypes
# # Plotting the corrrelation between Count and the ("temp","atemp","casual","registered","humidity","windspeed")
dailyDataCorr = dailyData[
["temp", "atemp", "casual", "registered", "humidity", "windspeed", "count"]
].corr()
mask = np.array(dailyDataCorr)
mask[np.tril_indices_from(mask)] = False
fig, ax = plt.subplots()
fig.set_size_inches(20, 10)
sn.heatmap(dailyDataCorr, mask=mask, vmax=0.8, square=True, annot=True)
# * temp and humidity features are showing positive and negative correlation with the count variable. Although the correlation between them are not prominent, the count has little dependency with them.
# * windspeed is not really going to be useful. The correlation value with count is 0.1.
# * atemp has a strong relationship with temp. So one of the variable has to be dropped during model building since they exhibit multicollinearity in the data.
# * Casual and registered variables are not considered since they are leakage variables.
# * casual(non registered)+registered = count
# # Multicollinearity
# There are certain reasons why multicollinearity occurs:
# * It is caused by an inaccurate use of dummy variables.
# * It is caused by the inclusion of a variable which is computed from other variables in the data set.
# * Multicollinearity can also result from the repetition of the same kind of variable.
# * Generally occurs when the variables are highly correlated to each other.
# Multicollinearity can result in several problems. These problems are as follows:
# * The partial regression coefficient due to multicollinearity may not be estimated precisely. The standard errors are likely to be high.
# * Multicollinearity results in a change in the signs as well as in the magnitudes of the partial regression coefficients from one sample to another sample.
# * Multicollinearity makes it tedious to assess the relative importance of the independent variables in explaining the variation caused by the dependent variable.
# Partial regression coefficient
# * A value indicating the effect of each independent variable on the dependent variable with the influence of all the remaining variables held constant. Each coefficient is the slope between the dependent variable and each of the independent variables
#
# casual(non registered)+registered = count
# https://www.kaggle.com/jjuanramos/bike-sharing-demand
plt.scatter(x=dailyData["casual"] + dailyData["registered"], y=dailyData["count"])
plt.show()
# # Regression Plot
# Regression plot in seaborn is one useful way to depict the relationship between two features. Here we consider "count" vs "temp", "humidity", "windspeed".
# * a partial regression plot attempts to show the effect of adding another variable to a model that already has one or more independent variables. Partial regression plots are also referred to as added variable plots, adjusted variable plots, and individual coefficient plots.
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3)
fig.set_size_inches(12, 5)
sn.regplot(x="temp", y="count", data=dailyData, ax=ax1)
sn.regplot(x="windspeed", y="count", data=dailyData, ax=ax2)
sn.regplot(x="humidity", y="count", data=dailyData, ax=ax3)
# # Data Distribution
fig, axes = plt.subplots(ncols=2, nrows=2)
fig.set_size_inches(12, 10)
sn.distplot(dailyData["count"], ax=axes[0][0])
stats.probplot(dailyData["count"], dist="norm", fit=True, plot=axes[0][1])
sn.distplot(np.log(dailyDataWithoutOutliers["count"]), ax=axes[1][0])
stats.probplot(
np.log1p(dailyDataWithoutOutliers["count"]), dist="norm", fit=True, plot=axes[1][1]
)
# As it is visible from the below figures that "count" variable is skewed towards right. It is desirable to have Normal distribution as most of the machine learning techniques require dependent variable to be Normal. One possible solution is to take log transformation on "count" variable after removing outlier data points. After the transformation the data looks lot better but still not ideally following normal distribution.
# # Visualizing Count Vs (Month,Season,Hour,Weekday,Usertype)
fig, (ax1) = plt.subplots(nrows=1)
fig.set_size_inches(10, 5)
sortOrder = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
hueOrder = [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
]
monthAggregated = pd.DataFrame(dailyData.groupby("month")["count"].mean()).reset_index()
monthSorted = monthAggregated.sort_values(by="count", ascending=False)
sn.barplot(data=monthSorted, x="month", y="count", ax=ax1, order=sortOrder)
ax1.set(xlabel="Month", ylabel="Avearage Count", title="Average Count By Month")
fig, ax2 = plt.subplots(nrows=1)
fig.set_size_inches(10, 5)
hourAggregated = pd.DataFrame(
dailyData.groupby(["hour", "season"], sort=True)["count"].mean()
).reset_index()
sn.pointplot(
x=hourAggregated["hour"],
y=hourAggregated["count"],
hue=hourAggregated["season"],
data=hourAggregated,
join=True,
ax=ax2,
)
ax2.set(
xlabel="Hour Of The Day",
ylabel="Users Count",
title="Average Users Count By Hour Of The Day Across Season",
label="big",
)
fig, ax3 = plt.subplots(nrows=1)
fig.set_size_inches(10, 5)
hourAggregated = pd.DataFrame(
dailyData.groupby(["hour", "weekday"], sort=True)["count"].mean()
).reset_index()
sn.pointplot(
x=hourAggregated["hour"],
y=hourAggregated["count"],
hue=hourAggregated["weekday"],
hue_order=hueOrder,
data=hourAggregated,
join=True,
ax=ax3,
)
ax3.set(
xlabel="Hour Of The Day",
ylabel="Users Count",
title="Average Users Count By Hour Of The Day Across Weekdays",
label="big",
)
fig, ax4 = plt.subplots(nrows=1)
fig.set_size_inches(10, 5)
hourTransformed = pd.melt(
dailyData[["hour", "casual", "registered"]],
id_vars=["hour"],
value_vars=["casual", "registered"],
)
hourAggregated = pd.DataFrame(
hourTransformed.groupby(["hour", "variable"], sort=True)["value"].mean()
).reset_index()
sn.pointplot(
x=hourAggregated["hour"],
y=hourAggregated["value"],
hue=hourAggregated["variable"],
hue_order=["casual", "registered"],
data=hourAggregated,
join=True,
ax=ax4,
)
ax4.set(
xlabel="Hour Of The Day",
ylabel="Users Count",
title="Average Users Count By Hour Of The Day Across User Type",
label="big",
)
# * It is quiet obvious that people tend to rent bike during summer season since it is really conducive to ride bike at that season.Therefore June, July and August has got relatively higher demand for bicycle.
# * On weekdays more people tend to rent bicycle around 7AM-8AM and 5PM-6PM. As we mentioned earlier this can be attributed to regular school and office commuters.
# * Above pattern is not observed on "Saturday" and "Sunday".More people tend to rent bicycle between 10AM and 4PM.
# * The peak user count around 7AM-8AM and 5PM-6PM is purely contributed by registered user.
# # Data Modeling and predicting
# Filling Zeros in windspeed using Random Forest
dataTrain = pd.read_csv("../input/bike-sharing-demand/train.csv")
dataTest = pd.read_csv("../input/bike-sharing-demand/test.csv")
# combine test and train data
data = dataTrain.append(dataTest)
data.reset_index(inplace=True)
data.drop("index", inplace=True, axis=1)
# # Feature Engineering
data["date"] = data.datetime.apply(lambda x: x.split()[0])
data["hour"] = data.datetime.apply(lambda x: x.split()[1].split(":")[0]).astype("int")
data["year"] = data.datetime.apply(lambda x: x.split()[0].split("-")[0])
data["weekday"] = data.date.apply(
lambda dateString: datetime.strptime(dateString, "%Y-%m-%d").weekday()
)
data["month"] = data.date.apply(
lambda dateString: datetime.strptime(dateString, "%Y-%m-%d").month
)
dataWindspeedOriginal = data["windspeed"]
fig, ax = plt.subplots(nrows=1)
fig.set_size_inches(20, 5)
# sortOrder = ["January","February","March","April","May","June","July","August","September","October","November","December"]
# hueOrder = ["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]
windspeed = pd.DataFrame(data.windspeed.value_counts()).reset_index()
plt.xticks(rotation=45)
sn.barplot(data=windspeed, x="index", y="windspeed", ax=ax)
ax1.set(
xlabel="windspeed Values",
ylabel="Count",
title="Count of windspeed values before imputing",
)
# Windspeed has many zero entries which make it look suspicious.
# As specified in the kaggle discussion:
# * It can actually be 0 at these points.
# * It is too low to be measured, for example varying from 0 to 5.
# * All zeros or part of them are nothing but NAs.
# Considering windspeed 0 entries as missing values, we will fill them with Random Forest Classifier model.
from sklearn.ensemble import RandomForestClassifier
wCol = ["season", "weather", "humidity", "month", "temp", "year", "atemp"]
# dataWind0 is the entire dataset(contains cols season, weather, humidity, month, temp, year, atemp)
# with windspeed value = 0
dataWind0 = data[data["windspeed"] == 0]
# dataNotWind0 is the entire dataset(contains cols season, weather, humidity, month, temp, year, atemp)
# without windspeed value = 0
dataWindNot0 = data[data["windspeed"] != 0]
dataWindNot0["windspeed"] = dataWindNot0["windspeed"].astype("str")
# predicting value for windspeed = 0
rfModel_wind = RandomForestClassifier()
rfModel_wind.fit(dataWindNot0[wCol], dataWindNot0["windspeed"])
Wind0Values = rfModel_wind.predict(X=dataWind0[wCol])
dataWind0["windspeed"] = Wind0Values
data = dataWindNot0.append(dataWind0)
data["windspeed"] = data["windspeed"].astype("float")
data.reset_index(inplace=True)
data.drop("index", inplace=True, axis=1)
# # After Imputing Windspeed values
fig, ax = plt.subplots(nrows=1)
fig.set_size_inches(20, 5)
windspeed = pd.DataFrame(data.windspeed.value_counts()).reset_index()
plt.xticks(rotation=45)
sn.barplot(data=windspeed, x="index", y="windspeed", ax=ax)
ax.set(
xlabel="Windspeed Values",
ylabel="Count",
title="Count Of Windspeed Values After Imputing",
label="big",
)
# # Coercing to Categorical Type
categoricalFeatureNames = [
"season",
"holiday",
"workingday",
"weather",
"weekday",
"month",
"year",
"hour",
]
numericalFeatureNames = ["temp", "humidity", "windspeed", "atemp"]
dropFeatures = ["casual", "count", "datetime", "date", "registered"]
for var in categoricalFeatureNames:
data[var] = data[var].astype("category")
data.head()
# # Splitting Train and Test Data
dataTrain = data[pd.notnull(data["count"])].sort_values(
by=["datetime"]
) # datatime is not droppe, month, week etc are created from it.
dataTest = data[~pd.notnull(data["count"])].sort_values(by=["datetime"])
datetimecol = dataTest["datetime"]
yLabels = dataTrain["count"]
yLabelsRegistered = dataTrain["registered"]
yLabelsCasual = dataTrain["casual"]
# # Splitting Train and Validator
from sklearn.model_selection import train_test_split
X_train, X_validate, y_train, y_validate = train_test_split(
dataTrain, yLabels, test_size=0.3, random_state=42
)
dateTimeColValidate = X_validate["datetime"]
# # Dropping Unnecessary Features
dataTrain = dataTrain.drop(dropFeatures, axis=1)
dataTest = dataTest.drop(dropFeatures, axis=1)
X_train = X_train.drop(dropFeatures, axis=1)
X_validate = X_validate.drop(dropFeatures, axis=1)
# # RMSLE Scorer
# * One common way to evaluate the regression model is through calculating MSE or RMSE. In this particular competition, the metric to evaluate our model is* Root Mean Square Logarithmic Error* (RMSLE). RMSLE is particularly helpful when you want to penalize an under-predicted estimate greater than an over-predicted estimate.
# Most of the Kaggle competition where we predict sales and inventory demand especially use RMSLE as their metric to evaluate. For example competition such as grupo-bimbo-inventory-demand and sberbank-russian-housing-market use RMSLE as a metric.
# 
# Unfortunately, sklearn metrics do not have the direct implementation to calculate RMSLE. So let us construct a custom function to perform theRMSLE calculation.
def rmsle(y, y_, convertExp=True):
if convertExp:
y = (np.exp(y),)
y_ = np.exp(y_)
log1 = np.nan_to_num(np.array([np.log(v + 1) for v in y]))
log2 = np.nan_to_num(np.array([np.log(v + 1) for v in y_]))
calc = (log1 - log2) ** 2
return np.sqrt(np.mean(calc))
import warnings
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Data is prepared by filling the missing values and constructed the RMSLE scorer. So we are now good to go for our model building experimet.
# # Linear Regression
# * As a first step, let us start with a simple statistical technique like linear regression. It is always good to start from a simple model than to try complex machine learning algorithms at first. Because at times features will have a smooth, nearly linear dependence on the covariates. Then linear regression will model the dependence better than anrandom forest algorithm that will basically approximate a linear curve with an ugly irregular step function. A StackExchange discussion gives loads of information about it.
# https://stats.stackexchange.com/questions/174806/linear-regression-performing-better-than-random-forest-in-caret
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.model_selection import GridSearchCV
# Initialize Logistic Regression model
lModel = LinearRegression()
# Train the model
lModel.fit(X=X_train, y=np.log1p(y_train))
# Make predictions
preds = lModel.predict(X=X_validate)
print(
"RMSLE Value For Linear Regression In Validation: ",
rmsle(np.exp(np.log1p(y_validate)), np.exp(preds), False),
)
# * Before submitting our test results we will visualize the distribution of train and test results. Kaggle has a limit on the number of submissions per day. (in our case it is 5 submissions/day). So visualizing the distribution gives a good clue on how close we have predicted our test based on our training set. From the figure it visible that the distribution of the train and test set vary considerably.
predsTest = lModel.predict(X=dataTest)
fig, (ax1, ax2) = plt.subplots(ncols=2)
fig.set_size_inches(20, 5)
sn.distplot(yLabels, ax=ax1, bins=100)
sn.distplot(np.exp(predsTest), ax=ax2, bins=100)
ax1.set(title="Training Set Distribution")
ax2.set(title="Test Set Distribution")
# print ("RMSLE Value For Linear Regression In Validation: ",rmsle(np.exp(np.log1p(y_validate)),np.exp(predsTest),False))
# The RMSLE value on the test set is around 1.05 and it is definitely not on par with the best score(0.33) in theKaggle leaderboard. We can improve this score substantially in a number of ways.
# * Feature Engineering
# * Regularization (L1 & L2)
# * Ensemble Models
# 1. We have already created a few features such as weekday, month, hour from the datetime attribute. And there are many numbers of ways one can come with feature engineering steps. As a part of this blog, I am not taking that into consideration and I will leave that to the imagination of users.
# # Regularization
# Regularization is extremely useful in any of these cases. Multicollinearity and overfitting may pose some issues for us.
# * overfitting
# * A large number of variables
# * Low ratio of number of observations to number of variables
# * Multicollinearity
# * Overfitting refers to a model that performs well on the training set by learning the detail and noise in the training data but does not generalize well on the new set of data. Let us take our example, RMSLE value on training data is around 0.98 and there is no big difference from the test set results.So far we do not have any overfitting problems but at imes it will be a nightmare while fitting the models.
# >> Having a large number of variables may again result in overfitting. This is because the model becomes more complex and sometimes lowers its predicting and generalization power. ***L1 regularization(Lasso Regression)***comes in handy in these situations by reducing the coefficients to zero thereby producing simpler models.
# >*** L2 Regularization***(Ridge Regression) is extremely helpful for the third case where we have the ratio of more number of attributes
# to less number of observation.But in this case, we are fine with that with 12 attributes and 10886 records. Ridge regression is also when there is high multicollinearity between predictor variables. We have highly correlated variables like temp-atemp and month-season.
# > So we are not getting affected much with the above problems. But to improve our score, we will build simple regularization models.
# # Regularization Model - Ridge(L2)
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
ridge_m_ = Ridge()
ridge_params_ = {
"max_iter": [3000],
"alpha": [0.01, 0.05, 0.1, 1, 2, 3, 4, 10, 30, 100, 200, 300, 400, 800, 900, 1000],
}
rmsle_scorer = metrics.make_scorer(rmsle, greater_is_better=False)
grid_ridge_m = GridSearchCV(ridge_m_, ridge_params_, scoring=rmsle_scorer, cv=5)
grid_ridge_m.fit(X=X_train, y=np.log1p(y_train))
preds = grid_ridge_m.predict(X=X_validate)
print(grid_ridge_m.best_params_)
print(
"RMSLE Value For Ridge Regression: ",
rmsle(np.exp(np.log1p(y_validate)), np.exp(preds), False),
)
fig, ax = plt.subplots()
fig.set_size_inches(20, 5)
df = pd.DataFrame(grid_ridge_m.cv_results_)
df
df["alpha"] = df["params"].apply(lambda x: x["alpha"])
df["rmsle"] = df["mean_test_score"].apply(lambda x: -x)
sn.pointplot(data=df, x="alpha", y="rmsle", ax=ax)
# # L1 Regularization(Lasso)
from sklearn.linear_model import Lasso
lasso_m_ = Lasso()
alpha = [0.001, 0.005, 0.01, 0.3, 0.1, 0.3, 0.5, 0.7, 1]
lasso_params_ = {"max_iter": [3000], "alpha": alpha}
# rmsle_scorer = metrics.make_scorer(rmsle, greater_is_better=False)
grid_lasso_m = GridSearchCV(lasso_m_, lasso_params_, scoring=rmsle_scorer, cv=5)
grid_lasso_m.fit(X=X_train, y=np.log1p(y_train))
preds = grid_lasso_m.predict(X=X_validate)
print(grid_lasso_m.best_params_)
print("RMSLE Value: ", rmsle(np.exp(np.log1p(y_validate)), np.exp(preds), False))
fig, ax = plt.subplots()
fig.set_size_inches(20, 5)
df = pd.DataFrame(grid_lasso_m.cv_results_)
df["alpha"] = df["params"].apply(lambda x: x["alpha"])
df["rmsle"] = df["mean_test_score"].apply(lambda x: -x)
sn.pointplot(data=df, x="alpha", y="rmsle", ax=ax)
# The optimum value of the regularization parameter (alpha-0.005) is obtained through a grid search. The chart below visualizes RMSLE values for different alpha parameters. RMSLE value on the test set is around 1.04 and has not improved from our previous. So regularization has not given any boost to our score. But let us not lose hope because when nothing goes right ensemble model always produces something out of the box for us.
# # Ensemble Models
# > Ensemble models are nothing but an art of combining a diverse set of individual weak learners(models) together to improve the stability and predictive capacity of the model. Ensemble Models improves the performance of the model by
# * Averaging out biases.
# * Reducing the variance.
# * Avoiding overfitting.
# >> If you are still wondering what ensemble model is all about then this series of articles can get you started with it. So that’s enough introduction about ensemble model and here is a snippet on how we fit naive Random Forest model on our dataset with default parameters.
from sklearn.ensemble import RandomForestRegressor
rfModel = RandomForestRegressor(n_estimators=100)
rfModel.fit(X=X_train, y=np.log1p(y_train))
preds = rfModel.predict(X=X_validate)
print("RMSLE Value: ", rmsle(np.exp(np.log1p(y_validate)), np.exp(preds), False))
features = pd.DataFrame()
features["features"] = X_train.columns
features["coefficient"] = rfModel.feature_importances_
features.sort_values(by=["coefficient"], ascending=False, inplace=True)
fig, ax = plt.subplots()
fig.set_size_inches(20, 5)
sn.barplot(data=features, x="features", y="coefficient", ax=ax)
predsTest = rfModel.predict(X=dataTest)
fig, (ax1, ax2) = plt.subplots(ncols=2)
fig.set_size_inches(20, 5)
sn.distplot(yLabels, ax=ax1, bins=100)
sn.distplot(np.exp(predsTest), ax=ax2, bins=100)
ax1.set(title="Training Set Distbution")
ax2.set(title="Test Set Distribution")
|
# Hand_Written_Digit_Classification
# Objectives :
# The handwritten digit recognition is the ability of computers to recognize human handwritten digits. It is a hard task for the machine because handwritten digits are not perfect and can be made with many different flavors.
# Import Library
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Import data
from sklearn.datasets import load_digits
df = load_digits()
print(df)
_, axes = plt.subplots(nrows=1, ncols=6, figsize=(8, 4))
for ax, image, label in zip(axes, df.images, df.target):
ax.set_axis_off()
ax.imshow(image, cmap=plt.cm.gray_r, interpolation="nearest")
ax.set_title("Training : %i" % label)
# Data Processing
df.images.shape
df.images[0]
df.images[0].shape
len(df.images)
n_sample = len(df.images)
data = df.images.reshape(n_sample, -2)
data[0]
data.shape
# Scalling Image data
data.min()
data.max()
data = data / 16
data.max()
data[0]
# Train Test Split Data
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(data, df.target, test_size=0.3)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
# Random Forest Model
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
rf.fit(x_train, y_train)
# Predict test data
y_pred = rf.predict(x_test)
y_pred
from sklearn.metrics import confusion_matrix, classification_report
confusion_matrix(y_test, y_pred)
print(classification_report(y_test, y_pred))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
df = pd.read_csv("/kaggle/input/creditcardfraud/creditcard.csv")
df.head()
df.shape
df.info()
plt.figure(figsize=(10, 12))
sns.heatmap(df.corr())
z = df["Class"].value_counts(sort=True).sort_index()
z.plot(kind="bar")
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
z = scaler.fit_transform(df["Amount"].values.reshape(1, -1))
df["normAmount"] = z.reshape(-1, 1)
Y = df["Class"]
df = df.drop(["Amount"], axis=1)
X_TEST = df.drop(["Class"], axis=1)
Y_TEST = df["Class"]
df.head()
df["Class"].value_counts()
Y = df["Class"]
fraud_indices = np.array(Y[Y == 1].index)
normal_indices = np.array(Y[Y == 0].index)
number_fraud = Y[Y == 1].count()
random_normal_indices = np.random.choice(normal_indices, number_fraud, replace=True)
print((random_normal_indices).reshape(1, -1))
under_sample_indices = np.concatenate([random_normal_indices, fraud_indices])
import random
random.shuffle(under_sample_indices)
print(under_sample_indices)
df.head()
X_under_sample = df.iloc[under_sample_indices]
X_under_sample = X_under_sample.drop(["Class"], axis=1)
Y_under_sample = df["Class"].iloc[under_sample_indices]
print(Y_under_sample[:15])
# print(X_under_sample.shape,Y_under_sample.shape)
Y_under_sample.value_counts()
# NOW WE HAVE DONE UNDERSAMPLING
# The instaces of both classes are same
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
X_under_sample, Y_under_sample, test_size=0.33
)
print(len(x_test) / (len(x_test) + len(x_train)))
from sklearn.model_selection import cross_val_score
def kfold():
c_param_range = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
values = []
for var in c_param_range:
model = LogisticRegression(C=var, penalty="l1")
scores = cross_val_score(
model, X_under_sample, Y_under_sample, cv=5, scoring="recall"
)
print("C=", var)
print("scores", scores)
values.append(scores.mean())
print("Mean is ", scores.mean())
return c_param_range[values.index(max(values))]
best_c = kfold()
print("best_c", best_c)
model = LogisticRegression(C=best_c, penalty="l1")
model.fit(x_train, y_train)
y_pred_undersample = model.predict(x_test)
from sklearn.metrics import confusion_matrix
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred_undersample)
np.set_printoptions(precision=2)
print(
"Recall metric in the testing dataset: ",
cnf_matrix[1, 1] / (cnf_matrix[1, 0] + cnf_matrix[1, 1]),
)
model = LogisticRegression(C=best_c, penalty="l1")
model.fit(x_train, y_train)
y_pred_undersample = model.predict(X_TEST)
from sklearn.metrics import confusion_matrix
# Compute confusion matrix
cnf_matrix = confusion_matrix(Y_TEST, y_pred_undersample)
np.set_printoptions(precision=2)
print(
"Recall metric in the testing dataset: ",
cnf_matrix[1, 1] / (cnf_matrix[1, 0] + cnf_matrix[1, 1]),
)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
confusion_matrix,
precision_recall_curve,
auc,
roc_auc_score,
roc_curve,
recall_score,
classification_report,
)
lr = LogisticRegression(C=best_c, penalty="l1")
y_pred_undersample_score = lr.fit(x_train, y_train).decision_function(x_test)
fpr, tpr, thresholds = roc_curve(y_test, y_pred_undersample_score)
roc_auc = auc(fpr, tpr)
# Plot ROC
plt.title("Receiver Operating Characteristic")
plt.plot(fpr, tpr, "b", label="AUC = %0.2f" % roc_auc)
plt.legend(loc="lower right")
plt.plot([0, 1], [0, 1], "r--")
plt.xlim([-0.1, 1.0])
plt.ylim([-0.1, 1.01])
plt.ylabel("True Positive Rate")
plt.xlabel("False Positive Rate")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta, date
import pytz
import seaborn as sns
import geopy.distance
# importing machine learning libraries
from sklearn.model_selection import KFold, cross_val_score, train_test_split
# from sklearn.metrics import confusion_matrix
import sklearn.metrics as metrics
from sklearn.cluster import KMeans
# importing regressors
from sklearn.ensemble import RandomForestRegressor
from xgboost.sklearn import XGBRegressor
from sklearn.linear_model import LinearRegression
from sklearn import svm
from sklearn.neighbors import KNeighborsRegressor
# ignoring warnings
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import GridSearchCV
import pandas as pd
item_categories = pd.read_csv(
"../input/competitive-data-science-predict-future-sales/item_categories.csv"
)
items = pd.read_csv("../input/competitive-data-science-predict-future-sales/items.csv")
sales = pd.read_csv(
"..//input/competitive-data-science-predict-future-sales/sales_train.csv"
)
# sample_submission = pd.read_csv("../input/competitive-data-science-predict-future-sales/sample_submission.csv")
shops = pd.read_csv("../input/competitive-data-science-predict-future-sales/shops.csv")
print("----------Shape of Data----------")
print(sales.shape)
print("----------first 5 rows----------")
print(sales.head(5))
print("-----------data frame overview-----------")
print(sales.info())
print("----------Missing value-----------")
print(sales.isnull().sum())
print(sorted(sales.shop_id.unique()))
# We should reduce the dataframe Memory which is `134.4 MB` to make it faster and more efficient. This can be done through **downcasting** the types of the columns:
def downcasting(df):
"""
make 2 lists onc contains index of float64 columns and the other int64 or int32,
then change the dtype of these column into less memory consumer data type
"""
float_cols = [col for col in df if df[col].dtype == "float64"]
int_cols = [col for col in df if df[col].dtype in ["int64", "int32"]]
df[float_cols] = df[float_cols].astype(np.float32)
df[int_cols] = df[int_cols].astype(np.int16)
return df
sales = downcasting(sales)
sales.info()
# The data frame memory usage went from `134.4 MB` to `61.6 MB` which about `55.2%` reduction
# ### 1) Date:
#
# changing the type of the column into date time, this will ease future feature engineering
sales.date = pd.to_datetime(sales.date)
sales.info()
print("First date --> ", sales.date.min())
print("Last date --> ", sales.date.max())
# ### 2) date_block_num
#
# what is the number of items soled at each month ?
sales_grouped_by_month = sales.groupby("date_block_num")["item_cnt_day"].sum()
plt.figure(figsize=(15, 8))
plt.title("Number of items sold by each month")
plt.xlabel("Date_block_num")
plt.ylabel("Items_sum")
sns.lineplot(data=sales_grouped_by_month)
sales.item_price.max()
# what is the average number of items soled at each month ?
sales_grouped_by_month = sales.groupby("date_block_num")["item_cnt_day"].mean()
plt.figure(figsize=(15, 8))
plt.title("Average number of items sold by each month")
plt.xlabel("Date_block_num")
plt.ylabel("Items_mean")
sns.lineplot(data=sales_grouped_by_month)
# what is the max number of items soled at each month ?
sales_grouped_by_month = sales.groupby("date_block_num")["item_cnt_day"].max()
plt.figure(figsize=(15, 8))
plt.title("Max number of items sold by each month")
plt.xlabel("Date_block_num")
plt.ylabel("Items_mean")
sns.lineplot(data=sales_grouped_by_month)
# ### 3) shop_id
print("Nuumber of shops is : ", sales.shop_id.nunique())
# what is the number of items soled at each shop ?
items_ordered_grouped_by_shop = sales.groupby("shop_id")["item_cnt_day"].mean()
plt.figure(figsize=(20, 8))
plt.title("Avg number of items sold by each shop")
plt.xlabel("shop_id")
plt.ylabel("Items_mean")
sns.barplot(x=sales.shop_id.unique(), y=items_ordered_grouped_by_shop)
# There is a significant difference between the shops in terms number of ordered items.
# ### 4) item_id
sales.item_id.nunique()
# what is the 10 most ordered items
best_10_items = (
sales.groupby("item_id")["item_cnt_day"]
.sum()
.sort_values(ascending=False)
.head(10)
.index
)
best_10_items_ids = list(best_10_items)
print(best_10_items)
best_10_items = (
sales.groupby("item_id")["item_cnt_day"].sum().sort_values(ascending=False).head(10)
)
best_10_items_values = []
for i in range(0, len(best_10_items)):
best_10_items_values.append(best_10_items[best_10_items_ids[i]])
# best_10_items_names = shops.shop_id.map(lambda Id : [ID ])
# best_10_items_names = [shops.shop_name_translated for Id in shops.shop_id if Id in best_10_items]
best_10_items_values
plt.figure(figsize=(20, 8))
plt.title("Best 10 ordered items")
plt.xlabel("Item_id")
plt.ylabel("Number of ordered items")
sns.barplot(x=best_10_items_ids, y=best_10_items_values)
# Item with id `20949` is clearly an outlier and need more investigation.
# ### 5) item_price:
sales.item_price.describe().round(1)
# #### This is a huge price comparing to the mean and median which is considered to be an outlier so i will remove it.
plt.figure(figsize=(20, 8))
sns.boxplot(sales.item_price)
# ### 6) item_cnt_day (target variable):
sales.item_cnt_day.describe().round()
plt.figure(figsize=(20, 8))
sns.boxplot(sales.item_cnt_day)
item_categories.nunique()
sales.loc[1163158]
items.loc[6066]
# print(sales.item_price.idxmax())
best_10_items = (
sales.groupby("item_id")["item_cnt_day"]
.sum()
.sort_values(ascending=False)
.head(10)
.index
)
best_10_items_ids = list(best_10_items)
print(best_10_items)
best_10_items = (
sales.groupby("item_id")["item_cnt_day"].sum().sort_values(ascending=False).head(10)
)
best_10_items_values = []
for i in range(0, len(best_10_items)):
best_10_items_values.append(best_10_items[best_10_items_ids[i]])
# best_10_items_names = shops.shop_id.map(lambda Id : [ID ])
# best_10_items_names = [shops.shop_name_translated for Id in shops.shop_id if Id in best_10_items]
|
import pandas as pd
import numpy as np
import requests
import re
from bs4 import BeautifulSoup as bs
# Getting the request from the URL
url = "https://www.soccerbase.com/"
response = requests.get(url)
response
# Creating a BeautifulSoup object
soup = bs(response.text)
type(soup)
# Finding the relative path for desired page on the website
result_link = soup.find("a", title="Results")
result_link["href"]
# Creating an absolute path for the results page
link = url + result_link["href"]
link
# Getting the request from the above link
results = requests.get(link)
results
# Converting the HTML into the Text
r_soup = bs(results.text)
# Retrieving the Header of Results page
r_soup.find("div", class_="pageHeader pageHeaderLatestResults").find("h1").text
r_soup.find("div", class_="headlineGroop").text
# creating a dataframe from the results data
matches = []
for i in r_soup.find_all("tbody")[1:]:
for j in i:
matches.append(
[
re.sub("\xa0-\xa0", "-", re.sub("ft", " ft", i.text))
for i in j.find_all("td")
if i.text != "" and i.text != "N"
]
)
# print(matches[1:])
df = pd.DataFrame(matches[1:], columns=["Day-Date-FT", "Team1", "Score", "Team2"])
df
# Here we are checking for the rows which are redundant
matches
# Removing the None values from the dtatframe
df = df.dropna()
df
# Checking the shape of the dataframe
df.shape
# Reseting all the indices of the dataframe which are disturbed due to dropping None values
df.reset_index(inplace=True)
# Finally printing the clean Dataframe
df
# Saving the file in CSV format
df.to_csv("results.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import xlrd
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
exportdf = pd.read_csv("/kaggle/input/india-trade-data/2018-2010_export.csv")
importdf = pd.read_csv("/kaggle/input/india-trade-data/2018-2010_import.csv")
importdf.drop_duplicates(inplace=True)
exportdf.drop_duplicates(inplace=True)
print("Export Unqiue: " + str(len(exportdf["Commodity"].unique())))
print("Import Unqiue: " + str(len(importdf["Commodity"].unique())))
print("Export Value Sum: " + str(exportdf["value"].sum()))
print("Import Value Sum: " + str(importdf["value"].sum()))
print(
"Total Deficit of 10 year: "
+ str(exportdf["value"].sum() - importdf["value"].sum())
)
growthImport = importdf.groupby("year").agg({"value": sum})
sns.barplot(y=growthImport.value, x=growthImport.index)
growthExport = exportdf.groupby("year").agg({"value": sum})
sns.barplot(y=growthExport.value, x=growthExport.index)
commodity = (
importdf[["value", "Commodity"]]
.groupby("Commodity")
.agg({"value": "sum"})
.sort_values(by="value", ascending=False)[:10]
)
sns.barplot(y=commodity.index, x=commodity.value)
most_expensive = importdf[importdf.value > 1000]
most_expensive1 = most_expensive.groupby(["country"]).agg({"value": "sum"})
most_expensive1.sort_values(by="value", ascending=False)
most_expensive1
plt.figure(figsize=(15, 5))
most_expensiveHSCode = (
most_expensive.groupby(["HSCode", "country"])
.agg({"value": "sum"})
.sort_values(by="value", ascending=False)[:15]
)
sns.barplot(most_expensiveHSCode.index, most_expensiveHSCode.value).set_xticklabels(
sns.barplot(
most_expensiveHSCode.index, most_expensiveHSCode.value
).get_xticklabels(),
rotation="90",
)
|
# # A try to the dogs vs cats dataset using a simple CNN
# This has been one of my first ML projects.
# I got inspired by [sentdesk](https://pythonprogramming.net/convolutional-neural-network-deep-learning-python-tensorflow-keras/), [Adrian Rosebrock](https://www.pyimagesearch.com/2018/12/24/how-to-use-keras-fit-and-fit_generator-a-hands-on-tutorial/) and [Uysim Ty](https://www.kaggle.com/uysimty/keras-cnn-dog-or-cat-classification). Thanks!
# Any feedback would be great :)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk(
"/kaggle/input/dogs-vs-cats-redux-kernels-edition/"
):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# # Import necessary packages
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Activation, Flatten, Dropout
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import TensorBoard, EarlyStopping
import random
from mlxtend.plotting import plot_confusion_matrix
# # Let's have a look of where the data is
for dirname, _, filenames in os.walk("/kaggle/input/"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # So we need to extract the images in the zip files
with zipfile.ZipFile("../input/dogs-vs-cats-redux-kernels-edition/train.zip") as z:
z.extractall("..")
with zipfile.ZipFile("../input/dogs-vs-cats-redux-kernels-edition/test.zip") as z:
z.extractall("..")
print(os.listdir(".."))
# # Now, we have two folders containing each train and test images
# # The next step is to load the training data
# ## Both features (images) and labels (dog or cat) are loaded into a python list
DATADIR = "../train"
training_data = []
RESIZE = 100
X = []
y = []
def create_training_data():
for img in os.listdir(DATADIR):
try:
img_array = cv2.imread(os.path.join(DATADIR, img), cv2.IMREAD_GRAYSCALE)
img2 = cv2.resize(img_array, (RESIZE, RESIZE))
img2 = (img2 - img2.mean()) / img2.std()
if img[:3] == "dog":
class_num = 0
else:
class_num = 1
X.append(img2)
y.append(class_num)
except Exception as e:
pass
create_training_data()
# ## The python list containing the loaded data is converted into two numpy arrays, one for features and one for labels
X = np.array(X).reshape(-1, RESIZE, RESIZE, 1)
y = np.asarray(y)
# ## Now, we divide the training data into two sets, one for training and one for validation
(X_train, X_val, y_train, y_val) = train_test_split(
X, y, test_size=0.3, random_state=42
)
# # Now, it is time to build and train a simple CNN model
# ## First, we create generators for augmentation of training data and for normalization of validation data
aug_train = ImageDataGenerator(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest",
)
generator_val = ImageDataGenerator()
# ## The ImageDataGenerator.fit method is used for feature normalization
aug_train.fit(X_train)
generator_val.fit(X_val)
# ## We build now a CNN. Let's try with a simple one consisnting in 5 Conv layer, one dense layer and one ouput layer
model = Sequential()
model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(2, activation="softmax"))
model.compile(
loss="sparse_categorical_crossentropy", optimizer="rmsprop", metrics=["accuracy"]
)
model.summary()
# ## Ok, let's now train the model
earlystop = EarlyStopping(patience=10)
history = model.fit(
aug_train.flow(X_train, y_train, batch_size=32),
validation_data=generator_val.flow(X_val, y_val, batch_size=32),
epochs=100,
callbacks=[earlystop],
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import folium # plotting library
from folium import plugins
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
import seaborn as sns
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# # Introduction
# ### Firstly, we will take a quick look into our data in order to understand with what we are working with! And, then, we will clean/filter it!
# ### Afterwards, we will apply k-Means clustering in order to identify similar airports based on the number of occurences of weather events that had happened in that particular airport! Plus, we are going to use Principal Component Analysis in order to visualise high dimensional data, so that, we can see how our clusters are related in the original space.
# ### Finally, the final result of clustered airports will be illustrated using the Seaborn library and Folium Maps
# # Data Overview
df = pd.read_csv("../input/us-weather-events/US_WeatherEvents_2016-2019.csv")
df.head()
df["Type"].value_counts()
df["Severity"].value_counts()
# ## Data prep for k-Means clustering
# ### Let's filter our data discarding the events that has severity as 'unk' or 'other'
df = df[(df["Severity"] != "UNK") & (df["Severity"] != "Other")]
df.head()
df_types = df[["AirportCode", "Type"]]
df_types.head()
# ### Here, we are going to group the occurences for each airport!
types = pd.get_dummies(df_types["Type"])
types["AirportCode"] = df_types["AirportCode"]
types = types.groupby("AirportCode").sum().reset_index()
types.head()
# # k-Means Clustering
codes = types[["AirportCode"]]
types.drop("AirportCode", axis=1, inplace=True)
# ### In order to identify the optimal number of clusters, we need to use the Elbow Method! When the slope of the tangent line starts to be almost horizontal, that is the optimal number of cluster!
distortions = []
K = range(1, 20)
for k in K:
kmean = KMeans(n_clusters=k, random_state=0, n_init=50, max_iter=500)
kmean.fit(types)
distortions.append(kmean.inertia_)
plt.figure(figsize=(10, 5))
plt.plot(K, distortions, "bx-")
plt.xlabel("k")
plt.ylabel("Distortion")
plt.title("The Elbow Method")
plt.show()
# ### The elbow method seems to suggest 4 or 5 clusters!
# run k-means clustering
kmeans = KMeans(n_clusters=4, random_state=0).fit(types)
codes["cluster"] = kmeans.labels_
codes.head()
# ### I am used to apply some dimensionality reduction techniques in order to visualise how our clusters are related in the original high dimensional space! Moreover, we are able to see if the features of our data are linear related among them.
pca = PCA().fit(types)
pca_types = pca.transform(types)
print("Variance explained by each component (%): ")
for i in range(len(pca.explained_variance_ratio_)):
print("\n", i + 1, "º:", pca.explained_variance_ratio_[i] * 100)
print("Total sum (%): ", sum(pca.explained_variance_ratio_) * 100)
print(
"Explained variance of the first two components (%): ",
sum(pca.explained_variance_ratio_[0:1]) * 100,
)
# ### Since the number of samples are larger than the number of features, we are able to solve all 5 principal components (PC), leading to 100% of the original information being explained by these PC.
# ### We can see that using the first two components we are able to preserve 63,65% of the original information, therefore, reducing the dimensionality of our data.
# ### Let's use these PC to visualise our clusters!
c0 = []
c1 = []
c2 = []
c3 = []
for i in range(len(pca_types)):
if kmeans.labels_[i] == 0:
c0.append(pca_types[i])
if kmeans.labels_[i] == 1:
c1.append(pca_types[i])
if kmeans.labels_[i] == 2:
c2.append(pca_types[i])
if kmeans.labels_[i] == 3:
c3.append(pca_types[i])
c0 = np.array(c0)
c1 = np.array(c1)
c2 = np.array(c2)
c3 = np.array(c3)
plt.figure(figsize=(7, 7))
plt.scatter(c0[:, 0], c0[:, 1], c="red", label="Cluster 0")
plt.scatter(c1[:, 0], c1[:, 1], c="blue", label="Cluster 1")
plt.scatter(c2[:, 0], c2[:, 1], c="green", label="Cluster 2")
plt.scatter(c3[:, 0], c3[:, 1], c="black", label="Cluster 3")
plt.legend()
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.title("Low dimensional visualization (PCA) - Airports")
# ### We see that 4 clusters seems to be reasonable to identify similar samples within our data!
# ### Let's take a look in the particularity of each cluster using seaborn library!
types["cluster"] = kmeans.labels_
types.head()
types.groupby("cluster").mean()
sns.catplot(x="cluster", y="Cold", data=types, kind="bar")
sns.catplot(x="cluster", y="Fog", data=types, kind="bar")
sns.catplot(x="cluster", y="Rain", data=types, kind="bar")
sns.catplot(x="cluster", y="Snow", data=types, kind="bar")
sns.catplot(x="cluster", y="Storm", data=types, kind="bar")
# ### Looking into these plots we can see that cluster 0 is the most affected by snow and cold! And cluster 3 is the most affected by rains!
# # Folium Maps Visualisation by Number of Occurences and Clustering
# ### Firstly, we need to create a map of USA
# ### We are going to plot two maps: the first one will display airports by their number of weather events that occured in that airport! The size of each mark (of each airport) will vary accordingly to these numbers. The second map will show us the clusters that we had acquired through k-Means!
#
latitude = 38.500000
longitude = -95.665
map_USA = folium.Map(location=[latitude, longitude], zoom_start=4)
map_USA
airports = df[["AirportCode", "LocationLat", "LocationLng", "City", "State"]]
airports.head()
number_of_occurences = pd.DataFrame(airports["AirportCode"].value_counts())
number_of_occurences.reset_index(inplace=True)
number_of_occurences.columns = ["AirportCode", "Count"]
number_of_occurences.head()
number_of_occurences = number_of_occurences.merge(airports.drop_duplicates())
number_of_occurences = number_of_occurences.merge(codes)
number_of_occurences.head()
occurences = folium.map.FeatureGroup()
n_mean = number_of_occurences["Count"].mean()
for lat, lng, number, city, state in zip(
number_of_occurences["LocationLat"],
number_of_occurences["LocationLng"],
number_of_occurences["Count"],
number_of_occurences["City"],
number_of_occurences["State"],
):
occurences.add_child(
folium.vector_layers.CircleMarker(
[lat, lng],
radius=number
/ n_mean
* 5, # define how big you want the circle markers to be
color="yellow",
fill=True,
fill_color="blue",
fill_opacity=0.6,
tooltip=str(number) + "," + str(city) + "," + str(state),
)
)
map_USA.add_child(occurences)
# ### We can see that the airports that had registered the greatest number of occurences are in the north of the West Coast!
# ### But, in general, the airports that are located far away from the coast had suffered less from weather events! However, the state of Colorado seems to be a exception to that :)
# ### Finally, let's see our clusters!
map_clusters = folium.Map(location=[latitude, longitude], zoom_start=4)
# set color scheme for the clusters
x = np.arange(4)
ys = [i + x + (i * x) ** 2 for i in range(4)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
# add markers to the map
markers_colors = []
for lat, lng, cluster, city, state in zip(
number_of_occurences["LocationLat"],
number_of_occurences["LocationLng"],
number_of_occurences["cluster"],
number_of_occurences["City"],
number_of_occurences["State"],
):
# label = folium.Popup(str(city)+ ','+str(state) + '- Cluster ' + str(cluster), parse_html=True)
folium.vector_layers.CircleMarker(
[lat, lng],
radius=5,
# popup=label,
tooltip=str(city) + "," + str(state) + "- Cluster " + str(cluster),
color=rainbow[cluster - 1],
fill=True,
fill_color=rainbow[cluster - 1],
fill_opacity=0.9,
).add_to(map_clusters)
map_clusters
|
# # Indian Bird Species Classification
# This notebook demonstrates how to train a deep learning model to classify images of 25 different bird species found in India.
# Tags: deep learning, computer vision, image classification, PyTorch
#
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from torch.utils.data import DataLoader
from sklearn.model_selection import KFold
import torch.nn.functional as F
import warnings
warnings.filterwarnings("ignore")
# # Load and preprocess the dataset
# This code loads and preprocesses a dataset of Indian bird species images using PyTorch.
# To begin, the number of classes in the dataset is defined as `num_classes = 25`. This is a necessary step when training a classification model, as the model needs to know how many different classes to predict.
# Next, the directory containing the image dataset is specified using the `data_dir` variable. The dataset contains images of Indian bird species, which are organized into separate folders for each class.
# A series of data transformations are then defined using the `transforms.Compose()` function from PyTorch. This function takes a list of transform objects and applies them sequentially to each image. In this case, the following transformations are applied:
# - `transforms.Resize((224, 224))`: This transformation resizes each image to a fixed size of 224x224 pixels. This is a common size for image classification models.
# - `transforms.RandomHorizontalFlip()`: This transformation randomly flips each image horizontally with a probability of 0.5. This helps to increase the diversity of the training data and improve the model's robustness.
# - `transforms.ToTensor()`: This transformation converts each image to a PyTorch tensor. Tensors are the primary data structure used by PyTorch for training deep learning models.
# - `transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])`: This transformation normalizes each image with the mean and standard deviation values from the ImageNet dataset. Normalization helps to make the data more comparable across different images and can improve the convergence of the training process.
# Finally, the `datasets.ImageFolder()` function from PyTorch is used to create a PyTorch dataset from the image files. This function reads the images from the directory specified by `data_dir` and applies the transformations defined in `transform` to each image. The resulting dataset can be used for training a deep learning model to classify the images based on their bird species.
# Define the number of classes
num_classes = 25
# Load the dataset
data_dir = "/kaggle/input/25-indian-bird-species-with-226k-images"
transform = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
dataset = datasets.ImageFolder(data_dir, transform=transform)
# # Define cross-validation splits
# This code uses the `KFold` function from the scikit-learn library to define cross-validation splits for the dataset. Cross-validation is a common technique used in machine learning to evaluate the performance of a model on a limited dataset.
# To create the cross-validation splits, the `KFold` function is used. This function takes several parameters, including `n_splits`, which specifies the number of folds to create. In this case, `n_splits=5`, which means that the dataset will be split into 5 equal-sized parts.
# The `shuffle=True` parameter indicates that the data should be shuffled before splitting. This can help to ensure that each fold contains a diverse set of data and that the model is not biased towards any particular subset of the data.
# The `random_state` parameter sets the random seed for reproducibility. By setting a random seed, the splits will be generated in a consistent manner each time the code is run, which can help with debugging and comparison of different models.
#
# Define the cross-validation splits
kf = KFold(n_splits=5, shuffle=True, random_state=123)
# # Train and evaluate the model using cross-validation
# This code trains and evaluates a deep learning model using cross-validation. Cross-validation is a technique used to evaluate the performance of a machine learning model on a limited dataset. It involves splitting the data into several folds, training the model on each fold, and evaluating its performance on the remaining data.
# To perform cross-validation, the code uses the `KFold` function from the scikit-learn library to generate indices for the training and validation sets for each fold. The `enumerate(kf.split(dataset))` function is used to iterate over each fold of the cross-validation process.
# In each fold, the code defines a ResNet-18 model with a modified fully connected layer that has `num_classes` output units. This model is then trained for 10 epochs using the `CrossEntropyLoss` loss function and the `Adam` optimizer.
# After each epoch, the training loss is printed to the console. This provides a measure of how well the model is learning from the training data.
# Once training is complete, the code evaluates the model on the validation set and prints the accuracy to the console. This provides a measure of how well the model generalizes to new, unseen data. The process is repeated for each fold of the cross-validation process.
# By using cross-validation, the code is able to obtain a more robust estimate of the model's performance than would be possible with a single train-test split. This can be helpful for evaluating the effectiveness of different models or hyperparameter settings.
#
from torchvision.models import resnet18
# Train and evaluate the model using cross-validation
for fold, (train_idx, test_idx) in enumerate(kf.split(dataset)):
print(f"Fold {fold+1}")
# Define the model and move it to the GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = resnet18(pretrained=True)
model.fc = nn.Linear(model.fc.in_features, num_classes)
model = model.to(device)
# Define the loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
train_sampler = torch.utils.data.SubsetRandomSampler(train_idx)
test_sampler = torch.utils.data.SubsetRandomSampler(test_idx)
train_loader = DataLoader(
dataset, batch_size=64, sampler=train_sampler, num_workers=4, pin_memory=True
)
test_loader = DataLoader(
dataset, batch_size=64, sampler=test_sampler, num_workers=4, pin_memory=True
)
for epoch in range(10):
running_loss = 0.0
for i, (inputs, labels) in enumerate(train_loader, 0):
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f"Epoch {epoch+1} loss: {running_loss/len(train_loader)}")
correct = 0
total = 0
with torch.no_grad():
for inputs, labels in test_loader:
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f"Accuracy: {correct/total}")
# # Save the trained model
# This code saves the trained ResNet-18 model to a file specified by the `model_path` variable using the `state_dict()` function from PyTorch. This function returns a dictionary containing the parameters and persistent buffers of the model, which can be used to save and load the model's state.
# The saved model can be loaded later using the `load_state_dict()` function from PyTorch. This allows you to reuse the trained model for inference or further training without having to retrain the model from scratch.
#
# Save the trained model
model_path = "/kaggle/working/trained_resnet18.pth"
torch.save(model.state_dict(), model_path)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.preprocessing import LabelEncoder
import pandas as pd
sample_submission = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/sample_submission.csv"
)
test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
train = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
print(train["GrLivArea"].head())
plt.scatter(x=train["GrLivArea"], y=train["SalePrice"])
plt.show()
train = train.drop(
train[(train["GrLivArea"] > 4000) & (train["SalePrice"] < 300000)].index
)
plt.scatter(x=train["GrLivArea"], y=train["SalePrice"])
plt.show()
sns.distplot(train["SalePrice"], fit=norm)
train_test = pd.concat((train, test))
train_test.drop(["SalePrice"], axis=1, inplace=True)
print(train_test.info())
na = 100 * train_test.isnull().sum() / (len(train_test))
na = na.drop(na[na == 0].index).sort_values(ascending=False)
na = pd.DataFrame(na, columns=["percentage"])
_, _ = plt.subplots(figsize=(10, 7))
plt.xticks(rotation="90")
sns.barplot(x=na.index, y=na["percentage"])
cor_map = train.corr()
plt.subplots(figsize=(12, 8))
sns.heatmap(cor_map)
na_col = [
"PoolQC",
"MiscFeature",
"Alley",
"Fence",
"FireplaceQu",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"MasVnrType",
"MSSubClass",
]
for col in na_col:
train[col] = train[col].fillna("na")
test[col] = test[col].fillna("na")
zero_col = [
"GarageYrBlt",
"GarageArea",
"GarageCars",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"BsmtFullBath",
"BsmtHalfBath",
"MasVnrArea",
]
for col in zero_col:
train[col] = train[col].fillna(0)
test[col] = test[col].fillna(0)
train["LotFrontage"] = train.groupby("Neighborhood")["LotFrontage"].transform(
lambda x: x.fillna(x.median())
)
test["LotFrontage"] = test.groupby("Neighborhood")["LotFrontage"].transform(
lambda x: x.fillna(x.median())
)
train = train.drop(["Utilities"], axis=1)
test = test.drop(["Utilities"], axis=1)
mode_col = ["MSZoning", "Electrical", "KitchenQual", "Exterior1st", "Exterior2nd"]
for col in mode_col:
train[col] = train[col].fillna(train[col].mode()[0])
test[col] = test[col].fillna(train[col].mode()[0])
train_test_clean = pd.concat((train, test))
train_test_clean_na = 100 * train_test_clean.isnull().sum() / len(train_test_clean)
print(pd.DataFrame({"missing": train_test_clean_na}))
str_col = ["MSSubClass", "OverallCond", "YrSold", "MoSold"]
for col in str_col:
train[col] = train[col].astype(str)
test[col] = test[col].astype(str)
enc_col = [
"FireplaceQu",
"BsmtQual",
"BsmtCond",
"GarageQual",
"GarageCond",
"ExterQual",
"ExterCond",
"HeatingQC",
"PoolQC",
"KitchenQual",
"BsmtFinType1",
"BsmtFinType2",
"Functional",
"Fence",
"BsmtExposure",
"GarageFinish",
"LandSlope",
"LotShape",
"PavedDrive",
"Street",
"Alley",
"CentralAir",
"MSSubClass",
"OverallCond",
"YrSold",
"MoSold",
]
lbl = LabelEncoder()
for col in enc_col:
lbl.fit
train["TotalSF"] = train["TotalBsmtSF"] + train["1stFlrSF"] + train["2ndFlrSF"]
test["TotalSF"] = test["TotalBsmtSF"] + test["1stFlrSF"] + test["2ndFlrSF"]
numerical_col = train.dtypes[train.dtypes != "object"].index # filter numerical columns
pd.DataFrame(train[numerical_col].skew().sort_values(ascending=False).head())
numerical_col_test = test.dtypes[train.dtypes != "object"].index
pd.DataFrame(test[numerical_col_test].skew().sort_values(ascending=False).head())
|
import mxnet as mx
from mxnet import nd, autograd, gluon
from mxnet.gluon import nn
from mxnet.gluon.data import vision
from matplotlib import pyplot as plt
from tqdm.notebook import tqdm
from distutils.dir_util import copy_tree
import os
plt.style.use("seaborn")
ctx = mx.gpu()
fromDirectory = "../input/natural-images/data/natural_images"
toDirectory = "../../training/"
from distutils.dir_util import copy_tree
copy_tree(fromDirectory, toDirectory)
os.chdir("../../")
os.listdir("./")
train_root = "./training/"
val_root = "./val/"
test_root = "./test/"
os.mkdir(val_root)
os.mkdir(test_root)
categories = os.listdir(train_root)
categories.sort()
print("Categories:", categories)
print("Total Categories:", len(categories))
for category in categories:
os.mkdir(os.path.join(val_root, category))
os.mkdir(os.path.join(test_root, category))
print(f"{len(os.listdir(train_root + category))} images for '{category}' category")
for category in categories:
print(f"Creating validation and testing dataset for '{category}' category")
for _ in range(10):
images = os.listdir(train_root + category)
idx = int(nd.random.randint(0, len(images)).asscalar())
image = images[idx]
os.rename(
os.path.join(train_root, category, image),
os.path.join(val_root, category, image),
)
for _ in range(150):
images = os.listdir(train_root + category)
idx = int(nd.random.randint(0, len(images)).asscalar())
image = images[idx]
os.rename(
os.path.join(train_root, category, image),
os.path.join(test_root, category, image),
)
train_counts = []
for category in categories:
train_counts.append(len(os.listdir(train_root + category)))
plt.figure(figsize=(8, 8))
plt.bar(categories, train_counts)
plt.title("Training images in each category")
plt.xlabel("Categories")
plt.ylabel("Counts")
plt.show()
val_counts = []
for category in categories:
val_counts.append(len(os.listdir(val_root + category)))
plt.figure(figsize=(8, 8))
plt.bar(categories, val_counts)
plt.title("Validation images in each category")
plt.xlabel("Categories")
plt.ylabel("Counts")
plt.show()
test_counts = []
for category in categories:
test_counts.append(len(os.listdir(test_root + category)))
plt.figure(figsize=(8, 8))
plt.bar(categories, test_counts)
plt.title("Testing images in each category")
plt.xlabel("Categories")
plt.ylabel("Counts")
plt.show()
train_transform = vision.transforms.Compose(
[
vision.transforms.RandomSaturation(saturation=0.1),
vision.transforms.RandomLighting(alpha=0.2),
vision.transforms.RandomHue(hue=0.1),
vision.transforms.RandomFlipLeftRight(),
vision.transforms.RandomContrast(contrast=0.2),
vision.transforms.RandomColorJitter(
brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1
),
vision.transforms.Resize(128),
vision.transforms.ToTensor(),
]
)
transform = vision.transforms.Compose(
[vision.transforms.Resize(128), vision.transforms.ToTensor()]
)
batch_size = 64
train_data = gluon.data.DataLoader(
vision.ImageFolderDataset(root=train_root, flag=1).transform_first(train_transform),
batch_size=batch_size,
shuffle=True,
)
val_data = gluon.data.DataLoader(
vision.ImageFolderDataset(root=val_root, flag=1).transform_first(transform),
batch_size=batch_size,
shuffle=False,
)
test_data = gluon.data.DataLoader(
vision.ImageFolderDataset(root=test_root, flag=1).transform_first(transform),
batch_size=batch_size,
shuffle=False,
)
print(f"{len(train_data)} batches in training data")
print(f"{len(val_data)} batches in validation data")
print(f"{len(test_data)} batches in testing data")
for features, labels in train_data:
break
print(f"features.shape: {features.shape}")
print(f"labels.shape: {labels.shape}")
print(f"features.max(): {features.max().asscalar()}")
print(f"features.min(): {features.min().asscalar()}")
plt.figure(figsize=(10, 12))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.imshow(features[i].transpose((1, 2, 0)).asnumpy())
plt.title(categories[int(labels[i].asscalar())].title())
plt.axis("off")
plt.show()
model = gluon.model_zoo.vision.mobilenet_v2_1_0(pretrained=True, ctx=ctx)
with model.name_scope():
model.output.add(nn.Dropout(0.5))
model.output.add(nn.Dense(len(categories)))
model.output.initialize(mx.init.Xavier(), ctx=ctx)
print(model)
model.summary(features.as_in_context(ctx))
mx.viz.plot_network(model(mx.sym.var(name="data")), node_attrs={"fixedsize": "false"})
model.hybridize()
objective = gluon.loss.SoftmaxCrossEntropyLoss()
optimizer = mx.optimizer.Adam(learning_rate=0.0005)
trainer = gluon.Trainer(model.collect_params(), optimizer)
metric = mx.metric.Accuracy()
epochs = 10
batches = len(train_data)
train_losses = []
train_accs = []
val_losses = []
val_accs = []
best_val = 0.0
for epoch in range(epochs):
metric.reset()
cum_loss = 0.0
for features, labels in tqdm(
train_data, desc=f"Epoch: {epoch + 1} Completed", ncols=800
):
features = features.as_in_context(ctx)
labels = labels.as_in_context(ctx)
with autograd.record():
outputs = model(features)
loss = objective(outputs, labels)
loss.backward()
trainer.step(batch_size)
cum_loss += loss.mean()
metric.update(labels, outputs)
train_loss = cum_loss.asscalar() / batches
train_acc = metric.get()[1]
train_losses.append(train_loss)
train_accs.append(train_acc)
metric.reset()
cum_loss = 0.0
for features, labels in test_data:
features = features.as_in_context(ctx)
labels = labels.as_in_context(ctx)
outputs = model(features)
metric.update(labels, outputs)
cum_loss += objective(outputs, labels).mean()
val_loss = cum_loss.asscalar() / batches
val_acc = metric.get()[1]
val_losses.append(val_loss)
val_accs.append(val_acc)
print(f"Training Loss:\t {train_loss:.5f} | Training Accuracy: {train_acc:.5f}")
print(f"Validation Loss: {val_loss:.5f} | Validation Accuracy: {val_acc:.5f}")
if val_acc > best_val:
print("Saving model for best validation accuracy")
model.save_parameters("model.params")
best_val = val_acc
plt.figure(figsize=(10, 5))
plt.plot(train_accs, label="Training Accuracy")
plt.plot(val_accs, label="Validation Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.title("Training and Validation Accuracy")
plt.legend()
plt.show()
plt.figure(figsize=(10, 5))
plt.plot(train_losses, label="Training Loss")
plt.plot(val_losses, label="Validation Loss")
plt.title("Training and Validation Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
model.load_parameters("model.params")
metric.reset()
for features, labels in test_data:
features = features.as_in_context(ctx)
labels = labels.as_in_context(ctx)
outputs = model(features)
metric.update(labels, outputs)
print(f"Testing Accuracy: {metric.get()[1]}")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
np.random.seed(1212)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import keras
from keras.models import Model
from keras.layers import *
from keras import optimizers
df_train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
df_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
df_train.head() # 784 features, 1 label
df_test.head() # 784 features, 1 label
# Splitting into training and validation dataset
df_features = df_train.iloc[:, 1:785]
df_label = df_train.iloc[:, 0]
X_test = df_test.iloc[:, 0:784]
print(df_features.shape)
from sklearn.model_selection import train_test_split
X_train, X_cv, y_train, y_cv = train_test_split(
df_features, df_label, test_size=0.2, random_state=1212
)
# Data cleaning, normalization and selection
print((min(X_train), max(X_train)))
# Feature Normalization
X_train = X_train.astype("float32")
X_cv = X_cv.astype("float32")
X_test = X_test.astype("float32")
X_train = X_train / 255
X_cv = X_cv / 255
X_test = X_test / 255
# Convert labels to One Hot Encoded
num_digits = 10
y_train = keras.utils.to_categorical(y_train, num_digits)
y_cv = keras.utils.to_categorical(y_cv, num_digits)
# Model Fitting
# Input Parameters
n_input = 784 # number of features
n_hidden_1 = 300
n_hidden_2 = 100
n_hidden_3 = 100
n_hidden_4 = 200
num_digits = 10
Inp = Input(shape=(784,))
x = Dense(n_hidden_1, activation="relu", name="Hidden_Layer_1")(Inp)
x = Dense(n_hidden_2, activation="relu", name="Hidden_Layer_2")(x)
x = Dense(n_hidden_3, activation="relu", name="Hidden_Layer_3")(x)
x = Dense(n_hidden_4, activation="relu", name="Hidden_Layer_4")(x)
output = Dense(num_digits, activation="softmax", name="Output_Layer")(x)
# Our model would have '6' layers - input layer, 4 hidden layer and 1 output layer
model = Model(Inp, output)
model.summary() # We have 297,910 parameters to estimate
# Insert Hyperparameters
learning_rate = 0.1
training_epochs = 20
batch_size = 100
sgd = optimizers.SGD(lr=learning_rate)
# We rely on the plain vanilla Stochastic Gradient Descent as our optimizing methodology
model.compile(loss="categorical_crossentropy", optimizer="sgd", metrics=["accuracy"])
history1 = model.fit(
X_train,
y_train,
batch_size=batch_size,
epochs=training_epochs,
verbose=2,
validation_data=(X_cv, y_cv),
)
Inp = Input(shape=(784,))
x = Dense(n_hidden_1, activation="relu", name="Hidden_Layer_1")(Inp)
x = Dense(n_hidden_2, activation="relu", name="Hidden_Layer_2")(x)
x = Dense(n_hidden_3, activation="relu", name="Hidden_Layer_3")(x)
x = Dense(n_hidden_4, activation="relu", name="Hidden_Layer_4")(x)
output = Dense(num_digits, activation="softmax", name="Output_Layer")(x)
# We rely on ADAM as our optimizing methodology
adam = keras.optimizers.Adam(lr=learning_rate)
model2 = Model(Inp, output)
model2.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history2 = model2.fit(
X_train,
y_train,
batch_size=batch_size,
epochs=training_epochs,
verbose=2,
validation_data=(X_cv, y_cv),
)
Inp = Input(shape=(784,))
x = Dense(n_hidden_1, activation="relu", name="Hidden_Layer_1")(Inp)
x = Dense(n_hidden_2, activation="relu", name="Hidden_Layer_2")(x)
x = Dense(n_hidden_3, activation="relu", name="Hidden_Layer_3")(x)
x = Dense(n_hidden_4, activation="relu", name="Hidden_Layer_4")(x)
output = Dense(num_digits, activation="softmax", name="Output_Layer")(x)
learning_rate = 0.01
adam = keras.optimizers.Adam(lr=learning_rate)
model2a = Model(Inp, output)
model2a.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history2a = model2a.fit(
X_train,
y_train,
batch_size=batch_size,
epochs=training_epochs,
verbose=2,
validation_data=(X_cv, y_cv),
)
Inp = Input(shape=(784,))
x = Dense(n_hidden_1, activation="relu", name="Hidden_Layer_1")(Inp)
x = Dense(n_hidden_2, activation="relu", name="Hidden_Layer_2")(x)
x = Dense(n_hidden_3, activation="relu", name="Hidden_Layer_3")(x)
x = Dense(n_hidden_4, activation="relu", name="Hidden_Layer_4")(x)
output = Dense(num_digits, activation="softmax", name="Output_Layer")(x)
learning_rate = 0.5
adam = keras.optimizers.Adam(lr=learning_rate)
model2b = Model(Inp, output)
model2b.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history2b = model2b.fit(
X_train,
y_train,
batch_size=batch_size,
epochs=training_epochs,
validation_data=(X_cv, y_cv),
)
# Input Parameters
n_input = 784 # number of features
n_hidden_1 = 300
n_hidden_2 = 100
n_hidden_3 = 100
n_hidden_4 = 100
n_hidden_5 = 200
num_digits = 10
Inp = Input(shape=(784,))
x = Dense(n_hidden_1, activation="relu", name="Hidden_Layer_1")(Inp)
x = Dense(n_hidden_2, activation="relu", name="Hidden_Layer_2")(x)
x = Dense(n_hidden_3, activation="relu", name="Hidden_Layer_3")(x)
x = Dense(n_hidden_4, activation="relu", name="Hidden_Layer_4")(x)
x = Dense(n_hidden_5, activation="relu", name="Hidden_Layer_5")(x)
output = Dense(num_digits, activation="softmax", name="Output_Layer")(x)
# Our model would have '7' layers - input layer, 5 hidden layer and 1 output layer
model3 = Model(Inp, output)
model3.summary() # We have 308,010 parameters to estimate
# We rely on 'Adam' as our optimizing methodology
adam = keras.optimizers.Adam(lr=0.01)
model3.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history3 = model3.fit(
X_train,
y_train,
batch_size=batch_size,
epochs=training_epochs,
validation_data=(X_cv, y_cv),
)
# Input Parameters
n_input = 784 # number of features
n_hidden_1 = 300
n_hidden_2 = 100
n_hidden_3 = 100
n_hidden_4 = 200
num_digits = 10
Inp = Input(shape=(784,))
x = Dense(n_hidden_1, activation="relu", name="Hidden_Layer_1")(Inp)
x = Dropout(0.3)(x)
x = Dense(n_hidden_2, activation="relu", name="Hidden_Layer_2")(x)
x = Dropout(0.3)(x)
x = Dense(n_hidden_3, activation="relu", name="Hidden_Layer_3")(x)
x = Dropout(0.3)(x)
x = Dense(n_hidden_4, activation="relu", name="Hidden_Layer_4")(x)
output = Dense(num_digits, activation="softmax", name="Output_Layer")(x)
# Our model would have '6' layers - input layer, 4 hidden layer and 1 output layer
model4 = Model(Inp, output)
model4.summary() # We have 297,910 parameters to estimate
model4.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history = model4.fit(
X_train,
y_train,
batch_size=batch_size,
epochs=training_epochs,
validation_data=(X_cv, y_cv),
)
test_pred = pd.DataFrame(model4.predict(X_test, batch_size=200))
test_pred = pd.DataFrame(test_pred.idxmax(axis=1))
test_pred.index.name = "ImageId"
test_pred = test_pred.rename(columns={0: "Label"}).reset_index()
test_pred["ImageId"] = test_pred["ImageId"] + 1
test_pred.head()
test_pred.to_csv("mnist_test.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
PATH = "/kaggle/input/bengaliai-cv19/"
# Any results you write to the current directory are saved as output.
import fastai
from fastai.vision import *
from fastai.callbacks import SaveModelCallback
# from csvlogger import *
# from radam import *
# from mish_activation import *
import warnings
warnings.filterwarnings("ignore")
fastai.__version__
import cv2
import zipfile
from tqdm import tqdm_notebook as tqdm
import random
import torchvision
SEED = 42
LABELS = "train.csv"
def seed_everything(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
seed_everything(SEED)
HEIGHT = 137
WIDTH = 236
SIZE = (128, 128)
BATCH = 128
TRAIN = [
PATH + "train_image_data_0.parquet",
PATH + "train_image_data_1.parquet",
PATH + "train_image_data_2.parquet",
PATH + "train_image_data_3.parquet",
]
df_label = pd.read_csv(PATH + LABELS)
nunique = list(df_label.nunique())[1:-1]
print(nunique)
df_label["components"] = (
"r_"
+ df_label["grapheme_root"].astype(str)
+ ","
+ "v_"
+ df_label["vowel_diacritic"].astype(str)
+ ","
+ "c_"
+ df_label["consonant_diacritic"].astype(str)
)
df_label.head()
df_split = df_label[["image_id", "grapheme_root"]].astype(str).copy()
df_split["total"] = df_split.groupby("grapheme_root")["grapheme_root"].transform(
"count"
)
df_gpd = df_split.groupby("grapheme_root").apply(
lambda x: x.sample(frac=0.2, random_state=47)
)
df_gpd.describe()
df_merged_root = pd.merge(
left=df_split, right=df_gpd, on="image_id", how="left", suffixes=("", "_y")
)
df_merged_root["is_valid_root"] = df_merged_root.grapheme_root_y.isnull() != True
df_merged_root.drop(["grapheme_root_y", "total_y"], axis=1, inplace=True)
df_merged_root.rename({"total": "total_root"}, axis=1, inplace=True)
df_merged_root.head()
df_split = df_label[["image_id", "vowel_diacritic"]].astype(str).copy()
df_split["total"] = df_split.groupby("vowel_diacritic")["vowel_diacritic"].transform(
"count"
)
df_gpd = df_split.groupby("vowel_diacritic").apply(
lambda x: x.sample(frac=0.2, random_state=47)
)
df_gpd.describe()
df_merged_vowel = pd.merge(
left=df_split, right=df_gpd, on="image_id", how="left", suffixes=("", "_y")
)
df_merged_vowel["is_valid_vowel"] = df_merged_vowel.vowel_diacritic_y.isnull() != True
df_merged_vowel.drop(["vowel_diacritic_y", "total_y"], axis=1, inplace=True)
df_merged_vowel.rename({"total": "total_vowel"}, axis=1, inplace=True)
df_merged_vowel.tail()
df_split = df_label[["image_id", "consonant_diacritic"]].astype(str).copy()
df_split["total"] = df_split.groupby("consonant_diacritic")[
"consonant_diacritic"
].transform("count")
df_gpd = df_split.groupby("consonant_diacritic").apply(
lambda x: x.sample(frac=0.2, random_state=47)
)
# print(df_gpd.describe())
df_merged_conso = pd.merge(
left=df_split, right=df_gpd, on="image_id", how="left", suffixes=("", "_y")
)
df_merged_conso["is_valid_conso"] = (
df_merged_conso.consonant_diacritic_y.isnull() != True
)
df_merged_conso.drop(["consonant_diacritic_y", "total_y"], axis=1, inplace=True)
df_merged_conso.rename({"total": "total_conso"}, axis=1, inplace=True)
df_merged_conso.head()
dfs = [
df.set_index(["image_id"])
for df in [
df_label[["image_id", "components"]],
df_merged_root,
df_merged_vowel,
df_merged_conso,
]
]
df_ = pd.concat(dfs, axis=1).reset_index()
cols = ["is_valid_root", "is_valid_vowel", "is_valid_conso"]
df_["is_valid"] = np.where(df_[cols].eq(True).all(1), True, False)
df_
stats128, stats137, fold, nfolds = ([0.08547], [0.22490]), ([0.06922], [0.20514]), 0, 4
FOLDER = "../input/bengali-grapheme"
src = (
ImageList.from_df(df_, path=".", folder=FOLDER, suffix=".png", cols="image_id")
.split_from_df(col="is_valid")
.label_from_df(cols=["components"], label_delim=",")
)
data = (
src.transform(
get_transforms(do_flip=False, max_warp=0.1), size=SIZE, padding_mode="zeros"
)
.databunch(bs=BATCH)
.normalize(imagenet_stats)
)
data.show_batch()
# Model
arch = models.resnet34
acc_02 = partial(accuracy_thresh)
f_score = partial(fbeta)
learn = cnn_learner(data, arch, metrics=[acc_02, f_score])
learn.lr_find()
learn.recorder.plot()
lr = 0.03
learn.fit_one_cycle(6, slice(lr))
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(5, slice(1e-5, lr / 5))
learn.export(Path("/kaggle/working") / "try3-rn34-im128.pkl")
|
# Playground Series - Season 3, Episode 12
# Binary Classification with a Kidney Stone Prediction Dataset
#
#
# # Table of Contents
#
# 1. [Introduction](#intro)
# 1. [Imports and Setups](#import)
# 1. [Data Loading](#loading)
# 1. [Exploratory data analysis (EDA)](#eda)
# 1. [Feature engineering (FE)](#fe)
# 1. [Feature Importance](#importance)
# 1. [Modeling](#model)
# 1. [Prediction](#prediction)
# ___
# # Introduction [↑](#top)
# ## Dataset Description [↑](#top)
# The dataset for this competition (both train and test) was generated from a deep learning model trained on the Kidney Stone Prediction based on Urine Analysis dataset. Feature distributions are close to, but not exactly the same, as the original. Feel free to use the original dataset as part of this competition, both to explore differences as well as to see whether incorporating the original in training improves model performance.
# ### Files [↑](#top)
# * `train.csv` - target is the likelihood of a kidney stone being present
# * `test.csv` - the test dataset; your objective is to predict the probability of target
# * `sample_submission.csv` - a sample submission file in the correct format
# ## Features [↑](#top)
# The six physical characteristics of the urine are:
# * specific `gravity`, the density of the urine relative to water
# * `ph`, the negative logarithm of the hydrogen ion
# * osmolarity (`osmo`), a unit used in biology and medicine but not in
# physical chemistry. Osmolarity is proportional to the concentration of
# molecules in solution
# * conductivity (mMho milliMho) `cond`. One Mho is one reciprocal Ohm
# Conductivity is proportional to the concentration of charged
# ions in solution
# * `urea` concentration in millimoles per litre
# * calcium concentration (`calc`) in millimoles llitre
# [Source](https://www.kaggle.com/datasets/vuppalaadithyasairam/kidney-stone-prediction-based-on-urine-analysis)
# ## Competition goal [↑](#top)
# Submissions are evaluated on area under the ROC curve between the predicted probability and the observed target.
# ___
# # Imports and Setups [↑](#top)
import warnings
warnings.filterwarnings("ignore")
import os
import numpy as np
import pandas as pd
from pathlib import Path
from tqdm.notebook import trange, tqdm
from IPython.display import display, Markdown
from sklearn.feature_selection import mutual_info_classif
from sklearn.metrics import classification_report, accuracy_score
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import colorsys
plt.style.use("seaborn-whitegrid")
plt.rc("figure", autolayout=True)
plt.rc(
"axes",
labelweight="bold",
labelsize="large",
titleweight="bold",
titlesize=14,
titlepad=10,
)
blues_palette = palette = sns.color_palette("Blues_r", n_colors=10)
reds_palette = palette = sns.color_palette("Reds_r", n_colors=10)
greys_palette = sns.color_palette("Greys", n_colors=10)
blue = blues_palette[1]
red = reds_palette[1]
two_colors = [blue, red]
sns.set()
sns.set_theme(style="whitegrid", palette=blues_palette)
sns.color_palette()
class Cfg:
INPUT_ROOT = Path("/kaggle/input/playground-series-s3e12")
OUTPUT_ROOT = Path("/kaggle/working/")
TRAN_FILE = INPUT_ROOT / "train.csv"
TEST_FILE = INPUT_ROOT / "test.csv"
SAMPLE_SUBMISSION_FILE = INPUT_ROOT / "sample_submission.csv"
SUBMISSION_FILE = OUTPUT_ROOT / "submission.csv"
RANDOM_STATE = 2023
NUM_MOST_IMPORTANCE_FEATURES = 20
SAMPLE_SIZE = 1.0
N_TRIALS = 5
TEST_SIZE = 0.2
TARGET = "target"
INDEX = "id"
# ### Helper Functions
def get_feature_names(data, target=Cfg.TARGET):
"""Gets the feature names excluding the target name."""
return data.columns.difference([target])
def get_categorical_feature_names(data, target=Cfg.TARGET):
return list(
data.select_dtypes(["category", "object", "bool"]).columns.difference(
[Cfg.TARGET]
)
)
def get_continuous_feature_names(data, target=Cfg.TARGET):
return list(train_data.select_dtypes(np.float).columns.difference([Cfg.TARGET]))
def get_discrete_feature_names(data, target=Cfg.TARGET):
return list(train_data.select_dtypes(np.int).columns.difference([Cfg.TARGET]))
def get_numerical_feature_names(data, target=Cfg.TARGET):
return get_continuous_feature_names(data, Cfg.TARGET) + get_discrete_feature_names(
data, Cfg.TARGET
)
def factorize(X):
for colname in X.select_dtypes(["category", "object"]):
X[colname], _ = X[colname].factorize()
return X
def make_mi_scores(X, y):
"""Utility functions from FE Tutorial"""
X = factorize(X.copy())
# All discrete features should now have integer dtypes
discrete_features = [pd.api.types.is_integer_dtype(t) for t in X.dtypes]
mi_scores = mutual_info_classif(
X, y, discrete_features=discrete_features, random_state=Cfg.RANDOM_STATE
)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return pd.DataFrame({"feature": X.columns, "mi_score": mi_scores}).set_index(
"feature"
)
def plot_mi_scores(scores, ax=None):
if ax == None:
fig, ax = plt.subplots(1, 1)
sns.barplot(
data=scores,
x="mi_score",
y=scores.index,
palette=blues_palette,
orient="h",
alpha=0.8,
ax=ax,
)
ax.set_title("Mutual Information Scores")
ax.set_xlabel("Score")
ax.set_ylabel("Features")
return ax
def plot_hist(data, feature, palette=blues_palette, ax=None, kde=True):
if ax is None:
ax = plt.gca()
sns.histplot(
data=data,
x=feature,
bins=20,
legend=True,
palette=palette,
alpha=0.8,
kde=kde,
ax=ax,
)
mean = np.mean(data[feature])
ax.vlines(
mean, 0, 1, transform=ax.get_xaxis_transform(), color="k", linewidth=2, ls=":"
)
return ax
def plot_count(data, feature, palette=blues_palette, hue=None, ax=None):
if ax is None:
ax = plt.gca()
sns.countplot(data=data, x=feature, hue=hue, palette=palette, alpha=0.8, ax=ax)
return ax
def plot_hist(data, feature, palette=blues_palette, hue=None, ax=None, kde=False):
if ax is None:
ax = plt.gca()
sns.histplot(
data=data,
x=feature,
hue=hue,
bins=30,
legend=True,
palette=palette,
alpha=0.8,
kde=kde,
ax=ax,
)
mean = np.mean(data[feature])
ax.vlines(mean, 0, 1, transform=ax.get_xaxis_transform(), color=red, ls=":")
return ax
def plot_boxplot(data, x=None, y=None, palette=blues_palette, hue=None, ax=None):
if ax is None:
ax = plt.gca()
sns.boxplot(
data=data, x=x, y=y, hue=hue, boxprops=dict(alpha=0.8), palette=palette, ax=ax
)
return ax
def plot_kde(
data, feature, hue=None, ax=None, palette=blues_palette, legend=True, show_mean=True
):
if ax is None:
ax = plt.gca()
sns.kdeplot(
data=data,
x=feature,
hue=hue,
fill=True,
legend=legend,
palette=palette,
alpha=0.8,
ax=ax,
)
if show_mean:
mean = np.mean(data[feature])
ax.vlines(mean, 0, 1, transform=ax.get_xaxis_transform(), color=red, ls=":")
return ax
def plot_scatter(data, x, y, palette=blues_palette, hue=None, ax=None):
if ax is None:
ax = plt.gca()
sns.scatterplot(data=data, x=x, y=y, hue=hue, alpha=0.8, palette=palette, ax=ax)
ax.set_title(f'Scatter "{x}" vs "{y}"')
return ax
# ___
# # Data Loading [↑](#top)
def read_train_data(file=Cfg.TRAN_FILE, index_col=Cfg.INDEX):
"""Reads the train data"""
return pd.read_csv(file).set_index(Cfg.INDEX)
def read_test_data(file=Cfg.TEST_FILE, index_col=Cfg.INDEX):
"""Reads the test data"""
return pd.read_csv(file).set_index(Cfg.INDEX)
train_data = read_train_data()
test_data = read_test_data()
display(train_data.head())
display(test_data.head())
print(f"Train data size: {train_data.shape[0]} rows; {train_data.shape[1]} columns")
print(f"Test data size : {test_data.shape[0]} rows; {test_data.shape[1]} columns")
# ___
# # Exploratory Data Analysis (EDA) [↑](#top)
# ## Basic Statistics [↑](#top)
display(train_data.describe().T)
display(test_data.describe().T)
# ## Unique Values [↑](#top)
features = get_feature_names(train_data)
pd.DataFrame(
{
"feature": features,
"dytpe": train_data[features].dtypes,
"unique": train_data[features].nunique(),
}
)
# ## Missing Values [↑](#top)
features = get_feature_names(train_data)
pd.DataFrame(
{
"feature": features,
"train": train_data[features].isna().sum(),
"test": test_data[features].isna().sum(),
}
).set_index("feature").sort_values(by="train", ascending=False)
# ## Duplicates [↑](#top)
test_data.duplicated().sum(), test_data.duplicated().sum()
print(f"Duplicated values in train data: {train_data.duplicated().sum()}")
print(f"Duplicated values in test data: {test_data.duplicated().sum()}")
# ## Outliers Detection [↑](#top)
from scipy import stats
z_threshold = 3
features = get_numerical_feature_names(train_data)
z_sorce = np.abs(stats.zscore(train_data[features], axis=0))
pd.DataFrame(
{
"feature": features,
"num_outlier": [
train_data[z_sorce[f] > z_threshold].shape[0] for f in features
],
}
).set_index("feature")
features = get_numerical_feature_names(train_data)
fig, axis = plt.subplots(nrows=2, ncols=4, figsize=(15, 3))
for feature, ax in zip(features, axis.flatten()):
plot_boxplot(train_data, x=feature, palette=blues_palette, ax=ax)
ax.set_title("")
plt.tight_layout()
plt.show()
# ## Target Distribution [↑](#top)
fig, ax = plt.subplots(1, 1, figsize=(3, 3))
plot_count(train_data, Cfg.TARGET, palette=two_colors, ax=ax)
ax.set_title(f"Target Distribution")
plt.show()
# ## Continous Data Distribution [↑](#top)
features = get_numerical_feature_names(train_data)
fig, axis = plt.subplots(2, 3, figsize=(12, 6))
for feature, ax in zip(features, axis.flatten()):
plot_hist(train_data, feature, hue=Cfg.TARGET, palette=two_colors, ax=ax)
ax.set_title(f'Distribution "{feature}"')
plt.show()
features = get_numerical_feature_names(train_data)
fig, axis = plt.subplots(1, 6, figsize=(16, 4))
for feature, ax in zip(features, axis.flatten()):
plot_boxplot(train_data, x=Cfg.TARGET, y=feature, palette=two_colors, ax=ax)
ax.set_title(f'Boxplot "{feature}"')
plt.tight_layout()
plt.show()
# ## Correlations [↑](#top)
num_features = get_numerical_feature_names(train_data)
num_corr = train_data[num_features].corr()
cmap = sns.diverging_palette(245, 15, as_cmap=False)
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
mask = np.triu(np.ones_like(num_corr, dtype=bool))
sns.heatmap(
num_corr,
mask=mask,
vmax=1,
vmin=-1,
cmap=cmap,
center=0,
square=True,
linewidths=0.1,
ax=ax,
alpha=1,
annot=True,
fmt=".1g",
cbar_kws={"shrink": 0.5},
)
plt.tight_layout()
plt.show()
def plot_pairplot(data, features, hue=Cfg.TARGET, height=3):
grid = sns.pairplot(
data=train_data[features + [Cfg.TARGET]],
palette=two_colors,
height=height,
hue=hue,
corner=True,
)
grid.fig.set_size_inches(10, 8)
for ax in filter(None, grid.axes.flatten()):
ax.set_xlabel(ax.get_xlabel(), rotation=90)
ax.set_ylabel(ax.get_ylabel(), rotation=0)
ax.yaxis.get_label().set_horizontalalignment("right")
fig.tight_layout()
plt.show()
num_features = get_numerical_feature_names(train_data)
plot_pairplot(train_data, num_features, height=1)
# # Feature engineering (FE) [↑](#top)
from sklearn.metrics import auc
from sklearn.model_selection import cross_val_score
from sklearn.metrics import make_scorer
from xgboost.sklearn import XGBClassifier
from catboost import CatBoostClassifier
xgb_params = {
"seed": Cfg.RANDOM_STATE,
"objective": "binary:logistic",
"eval_metric": "auc",
"tree_method": "hist",
"max_depth": 5,
"eta": 0.01,
"n_estimators": 100,
}
cat_params = {
"learning_rate": 0.04,
"depth": 3,
"n_estimators": 10,
"eval_metric": "AUC",
"random_seed": Cfg.RANDOM_STATE,
"verbose": 0,
}
def score_dataset(X, y, prev_score=0, model=CatBoostClassifier(**cat_params)):
X = X.copy()
for colname in X.select_dtypes(["category", "object"]):
X[colname], _ = X[colname].factorize()
score = cross_val_score(model, X, y, cv=5, scoring="roc_auc").mean()
diff = score - prev_score
direction = "↑" if diff > 0 else "↓"
return score, direction, diff
X = train_data.copy()
y = X.pop(Cfg.TARGET)
mi_scores = make_mi_scores(X, y)
fig, ax = plt.subplots(figsize=(6, 3))
plot_mi_scores(mi_scores, ax=ax)
plt.show()
baseline_score, _, _ = score_dataset(X, y)
print(f"Baseline Score: {baseline_score}") # 0.7699112155633895
# ___
# ## Specific gravity and disease [↑](#top)
# See: [Urine specific gravity](https://en.wikipedia.org/wiki/Urine_specific_gravity)
# Normal value: 1.001 - 1.035 g/l
def create_disease_feature(X):
labels = ["hyposthenurie", "eusthenurie", "hypersthenurie"]
X["disease"] = pd.cut(
train_data["gravity"], bins=[0, 1.010, 1.030, 2], labels=labels, ordered=True
)
return X
X = create_disease_feature(X)
score, direction, diff = score_dataset(X, y, prev_score=baseline_score)
print(f"Score: {score} {direction} - diff = {diff}")
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
sns.countplot(data=X, x="disease", hue=y, palette=two_colors, alpha=0.8, ax=ax)
ax.set_title(f"Specific gravity and disease")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
plt.show()
# sns.relplot(data=X, x="urea", y="calc", hue=y, palette=two_colors, col='disease');
# ## What is a normal urine pH?
# A neutral pH is 7.0. The average urine sample tests at about 6.0, but typical urine pH may range from 4.5–8.0.
# The higher the number, the more basic your urine is. The lower the number, the more acidic your urine is.
# https://www.onmeda.de/krankheiten/blasensteine-id200175/
def create_ph_level_feature(X):
labels = ["acidic", "normal", "basic"]
X["ph_level"] = pd.cut(
train_data["ph"], bins=[0, 5.5, 7.0, 12], labels=labels, ordered=True
)
return X
X = create_ph_level_feature(X)
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
sns.countplot(data=X, x="ph_level", hue=y, palette=two_colors, alpha=0.8, ax=ax)
ax.set_title(f"Normal pH level")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
plt.show()
score, direction, diff = score_dataset(X, y, prev_score=baseline_score)
print(f"Score: {score} {direction} - diff = {diff}")
# sns.relplot(data=X, x="cond", y="calc", hue=y, palette=two_colors, col='ph_level');
def create_features(X):
X = create_disease_feature(X)
X = create_ph_level_feature(X)
return X
X = train_data.copy()
y = train_data[Cfg.TARGET]
X = create_features(X)
mi_scores = make_mi_scores(X, y)
fig, ax = plt.subplots(figsize=(6, 4))
plot_mi_scores(mi_scores, ax=ax)
plt.show()
# ___
# # Modeling [↑](#top)
import optuna
optuna.logging.set_verbosity(optuna.logging.CRITICAL)
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, RocCurveDisplay
from catboost import CatBoostClassifier, CatBoostRegressor
from xgboost.sklearn import XGBClassifier
from lightgbm.sklearn import LGBMClassifier
from sklearn.ensemble import StackingClassifier, VotingClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import cross_validate
def plot_model_result(y_pred, y_true, y_pred_proba):
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4))
ConfusionMatrixDisplay.from_predictions(
y_true, y_pred, ax=ax1, cmap="Blues", normalize="true", colorbar=False
)
ax1.set_title("Confusion Matrix")
sns.histplot(
data=y_pred_proba, palette=two_colors, legend=True, bins=30, kde=True, ax=ax2
)
ax2.set_xlabel("Prediction Probapility")
ax2.set_ylabel("Probabitity")
# ROC curve
RocCurveDisplay.from_predictions(y_true, y_pred, ax=ax3)
ax3.set_title("ROC")
plt.tight_layout()
plt.show()
print(classification_report(y_true, y_pred))
X = train_data.copy()
y = X.pop(Cfg.TARGET)
X = factorize(create_features(X))
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=Cfg.TEST_SIZE, random_state=Cfg.RANDOM_STATE
)
pd.DataFrame(
{"Rows": [X_train.shape[0], X_val.shape[0]], "Dataset": ["Train", "Validation"]}
).set_index("Dataset")
def xgb_objective(trial):
eta = trial.suggest_float("eta", 0, 1)
max_depth = trial.suggest_int("max_depth", 5, 30)
n_estimators = trial.suggest_int("n_estimators", 100, 300)
reg_alpha = trial.suggest_float(
"reg_alpha", 1e-8, 10, log=True
) # l1 regularization
reg_lambda = trial.suggest_float(
"reg_lambda", 1e-8, 10, log=True
) # l2 regularization
model = XGBClassifier(
eta=eta,
n_estimators=n_estimators,
max_depth=max_depth,
seed=Cfg.RANDOM_STATE,
eval_metric="auc",
reg_alpha=reg_alpha,
reg_lambda=reg_lambda,
)
y_pred = model.fit(X_train, y_train).predict(X_val)
score = roc_auc_score(y_val, y_pred)
return score
study = optuna.create_study()
study.optimize(xgb_objective, Cfg.N_TRIALS)
xgb_params = study.best_params.copy()
xgb_params.update({"eval_metric": "auc", "seed": Cfg.RANDOM_STATE})
xgb_params
def cat_objective(trial):
learning_rate = trial.suggest_float("learning_rate", 0.0, 0.1)
depth = trial.suggest_int("depth", 3, 5, 10)
n_estimators = trial.suggest_int("n_estimators", 10, 350, 500)
model = CatBoostClassifier(
learning_rate=learning_rate,
depth=depth,
n_estimators=n_estimators,
verbose=0,
random_seed=Cfg.RANDOM_STATE,
eval_metric="AUC",
)
y_pred = model.fit(X_train, y_train).predict(X_val)
score = roc_auc_score(y_val, y_pred)
return score
study = optuna.create_study()
study.optimize(cat_objective, n_trials=Cfg.N_TRIALS)
cat_params = study.best_params.copy()
cat_params.update({"eval_metric": "AUC", "random_seed": Cfg.RANDOM_STATE, "verbose": 0})
cat_params
def lgbm_objective(trial):
learning_rate = trial.suggest_float("learning_rate", 5e-4, 0.75, log=True)
n_estimators = trial.suggest_int("n_estimators", 100, 1500, log=True)
max_depth = (trial.suggest_int("max_depth", 5, 30),)
num_leaves = trial.suggest_int("num_leaves", 2, 128, log=True)
colsample_bytree = trial.suggest_float("colsample_bytree", 0.17, 1)
subsample = trial.suggest_float("colsample_bytree", 0, 1.0)
reg_alpha = trial.suggest_float(
"reg_alpha", 1e-8, 10, log=True
) # l1 regularization
reg_lambda = trial.suggest_float(
"reg_lambda", 1e-8, 10, log=True
) # l2 regularization
model = LGBMClassifier(
learning_rate=learning_rate,
n_estimators=n_estimators,
max_depth=max_depth,
num_leaves=num_leaves,
colsample_bytree=colsample_bytree,
subsample=subsample,
reg_alpha=reg_alpha,
reg_lambda=reg_lambda,
metric="AUC",
seed=Cfg.RANDOM_STATE,
)
y_pred = model.fit(X_train, y_train).predict(X_val)
score = roc_auc_score(y_val, y_pred)
return score
study = optuna.create_study()
study.optimize(lgbm_objective, n_trials=Cfg.N_TRIALS)
lgbm_params = study.best_params.copy()
lgbm_params.update({"metric": "AUC", "seed": Cfg.RANDOM_STATE})
lgbm_params
# xgb_params = {
# 'seed': Cfg.RANDOM_STATE,
# 'objective': 'binary:logistic',
# 'eval_metric': 'auc',
# 'tree_method' : 'hist',
# 'max_depth' : 5,
# 'eta' : .01,
# 'n_estimators' : 100
# }
# lgbm_params = {
# 'seed': Cfg.RANDOM_STATE,
# 'objective': 'binary:logistic',
# 'n_estimators' : 403
# }
# cat_params = {
# 'iterations': 100,
# 'learning_rate': 0.1,
# 'early_stopping_rounds': 10,
# 'max_depth': 5,
# 'eval_metric' : 'AUC',
# 'random_seed': Cfg.RANDOM_STATE
# }
# estimators = [
# ('xgb', XGBClassifier(**xgb_params)),
# ('lgbm', LGBMClassifier(verbose=-1, force_row_wise=True, **lgbm_params)),
# ('cat', CatBoostClassifier(**cat_params)),
# ]
# model = VotingClassifier(
# estimators = estimators,
# verbose=False,
# voting='soft',
# #weights=[0.2, 0.2, 0.6]
# ).fit(X_train, y_train)
#
model = CatBoostClassifier(**cat_params).fit(X_train, y_train)
y_pred = model.predict(X_val)
y_pred_proba = model.predict_proba(X_val)
plot_model_result(y_pred, y_val, y_pred_proba)
# ___
# # Submission [↑](#top)
X = factorize(create_features(test_data))
y_pred_submission = model.predict_proba(test_data)[:, 1]
submission_data = pd.DataFrame(
{
Cfg.INDEX: test_data.index,
Cfg.TARGET: y_pred_submission,
}
).set_index(Cfg.INDEX)
submission_data
# save submission file
submission_data.to_csv(Cfg.SUBMISSION_FILE)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
from PIL import Image
import os
import time
import torchvision.models as models
import json
from IPython.display import FileLink, HTML
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# # 1. Importing and Managing Data
transformToTensor = transforms.Compose(
[transforms.Resize((64,)), transforms.CenterCrop((64, 64)), transforms.ToTensor()]
)
f = open("/kaggle/input/eurosat-dataset/EuroSAT/label_map.json", "r")
label_to_index = json.load(f)
index_to_label = {label_to_index[k]: k for k in label_to_index}
type_to_folder = {
"valid": "/kaggle/input/eurosat-dataset/EuroSAT/validation.csv",
"test": "/kaggle/input/eurosat-dataset/EuroSAT/test.csv",
"train": "/kaggle/input/eurosat-dataset/EuroSAT/train.csv",
}
class EurosatDataManager:
def __init__(self, _type):
self.pd = pd.read_csv(type_to_folder[_type])
self.folder_base = "/kaggle/input/eurosat-dataset/EuroSAT/"
def get_batch(self, size):
ix = np.random.randint(0, len(self.pd), (size,))
X, Y = list(self.pd.iloc[ix].Filename), list(self.pd.iloc[ix].Label)
return torch.stack(
[self.path_to_tensor(self.folder_base + x) for x in X]
), torch.tensor(Y, dtype=torch.long)
def visualize(self, plt):
fig, axs = plt.subplots(4, 4, figsize=(20, 20))
X, labels = self.get_batch(16)
labels = labels.tolist()
for i in range(4):
for j in range(4):
axs[i, j].imshow(X[i * 4 + j])
axs[i, j].set_title(index_to_label[labels[i * 4 + j]])
plt.show()
def path_to_tensor(self, path):
img = Image.open(path)
return transformToTensor(img).permute(1, 2, 0)
dm_train = EurosatDataManager("train")
dm_train.visualize(plt)
dm_train.get_batch(23)[0].shape
# N,X,Y,C
dm_valid = EurosatDataManager("valid")
dm_test = EurosatDataManager("test")
dataset_managers = {"valid": dm_valid, "train": dm_train, "test": dm_test}
print(f"Train set examples: {len(dm_train.pd)}")
print(f"Validation set examples: {len(dm_valid.pd)}")
print(f"Test set examples: {len(dm_test.pd)}")
# ## Distribution of data
#
dm_train.pd["Label"].hist()
# # Evaluation
def getYPred(model, validX, validY):
examples = validX.to("cuda")
results = model(examples.permute(0, 3, 1, 2).type(torch.cuda.FloatTensor))
results = nn.functional.softmax(results, dim=1)
results = torch.argmax(results, dim=1)
return results
def evaluate_model(model, dataset, batch_size=20):
# in order to evaluate over all the images, we can't load all images at once
# we must do this without running out of gpu memory, the batch size can be controlled but for now im keeping it 20 and a default param
dm = dataset_managers[dataset]
model.eval()
batch_size = 20
Ytrue = []
Ypred = []
for i in range((len(dm.pd) + 1) // batch_size):
ix = i * batch_size
labels = list(dm.pd["Label"].iloc[ix : ix + batch_size])
paths = list(dm.pd["Filename"].iloc[ix : ix + batch_size])
X_path = paths
arr = []
for x in X_path:
x = dm_valid.path_to_tensor(dm.folder_base + x)
arr.append(x)
Xt = torch.stack(arr)
Ytrue += labels
Ypred += getYPred(model, Xt, labels).tolist()
print(classification_report(Ytrue, Ypred))
del Xt
del Ytrue
torch.cuda.empty_cache()
# ### Functions for Saving Models
def get_download_link(model, filename):
os.chdir(r"/kaggle/working")
torch.save(model.state_dict(), filename)
return FileLink(filename)
# ## Train Function
def train(
model, steps, batch_size, dm, lr=0.05, dm_valid=None, validation_batch_size=10
):
model.train()
loss_i = []
loss_val_i = []
optim = torch.optim.Adam(model.parameters(), lr=lr)
for i in range(steps):
start_time = time.time()
xtr, ytr = dm.get_batch(batch_size)
ytr.type(torch.cuda.LongTensor)
ytr = ytr.to(torch.device("cuda:0"))
# xtr = n,h,w, c
# conv2d (input layer ) needs this shape: n, c, h, w
xtr = xtr.permute(0, 3, 1, 2).type(torch.cuda.FloatTensor)
xtr = xtr.to(torch.device("cuda:0"))
out = model(xtr)
loss = nn.CrossEntropyLoss()(out, ytr)
optim.zero_grad()
loss.backward()
optim.step()
acc = 0.0
# COMPUTE VALIDATION LOSS
if dm_valid != None:
with torch.no_grad():
x, y = dm_valid.get_batch(validation_batch_size)
y.type(torch.cuda.LongTensor)
y = y.to(torch.device("cuda:0"))
x = x.permute(0, 3, 1, 2).type(torch.cuda.FloatTensor)
x = x.to(torch.device("cuda:0"))
out = model(x)
loss_val = nn.CrossEntropyLoss()(out, y)
loss_val_i.append(loss_val.item())
results = nn.functional.softmax(out, dim=1)
results = torch.argmax(results, dim=1)
acc = torch.mean(torch.eq(y, results).float())
del x
del y
del loss_val
del out
torch.cuda.empty_cache()
loss_i.append(loss.item())
if i % 50 == 0:
print(f" Step: {i+1}/{steps} Loss: {loss.item()}", end="")
if dm_valid != None:
print(f" ValLoss (sampled) = {loss_val_i[-1]}", end="")
print(" t= {:.2f}s val_acc:{:.5f}".format(time.time() - start_time, acc))
del xtr
del ytr
del loss
torch.cuda.empty_cache()
return loss_i, loss_val_i
# # Train a classifier from scratch
class ResNetBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResNetBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(p=0.2)
self.conv2 = nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(out_channels)
self.skip_connection = nn.Sequential()
# with the skip connection, the gradients flowing back will also contribute, in a case where conv layer messes up, that will at least
# be useful, so it acts as a regularization measure
if stride != 1 or in_channels != out_channels:
self.skip_connection = nn.Sequential(
nn.Conv2d(
in_channels, out_channels, kernel_size=1, stride=stride, bias=False
),
nn.BatchNorm2d(out_channels),
)
def forward(self, x):
residual = self.skip_connection(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.dropout(x)
x = self.conv2(x)
x = self.bn2(x)
x += residual
x = self.relu(x)
return x
class ResNet(nn.Module):
def __init__(self, num_classes=100):
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.layer1 = nn.Sequential(ResNetBlock(64, 64), ResNetBlock(64, 64))
self.layer2 = nn.Sequential(
ResNetBlock(64, 128, stride=2), ResNetBlock(128, 128)
)
self.layer3 = nn.Sequential(
ResNetBlock(128, 256, stride=2), ResNetBlock(256, 256)
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(256, num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
# x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
myresnet = ResNet(10)
# print(n_params(myresnet))
if torch.cuda.device_count() > 1:
myresnet = nn.DataParallel(myresnet)
myresnet.to(DEVICE)
loss_i, loss_val_i = train(
myresnet, 1500, 100, dm_train, lr=0.001, dm_valid=dm_valid, validation_batch_size=20
)
loss_i2, loss_val_i2 = train(
myresnet,
1500,
100,
dm_train,
lr=0.0005,
dm_valid=dm_valid,
validation_batch_size=20,
)
loss_i3, loss_val_i3 = train(
myresnet, 500, 100, dm_train, lr=0.0001, dm_valid=dm_valid, validation_batch_size=20
)
plt.figure(figsize=(7, 5))
plt.plot(loss_i + loss_i2 + loss_i3, label="Train Loss")
plt.plot(loss_val_i + loss_val_i2 + loss_val_i3, label="Validation loss")
plt.legend()
evaluate_model(myresnet, "valid")
evaluate_model(myresnet, "test")
get_download_link(myresnet, "eurosat_1.pt")
# # Transfer Learning on a pretrained GoogLeNet
pretrained_net = models.googlenet(pretrained=True)
sum([param.numel() for param in pretrained_net.parameters()])
pretrained_net = nn.DataParallel(pretrained_net)
pretrained_net.to(DEVICE)
# ### Freeze all layers but the last
for param in pretrained_net.module.parameters():
param.requires_grad = False
pretrained_net.module.fc = nn.Linear(in_features=1024, out_features=10, bias=True)
pretrained_net.module.fc.requires_grad = True
pretrained_net.to("cuda")
loss_Ti, loss_val_Ti = train(
pretrained_net,
50,
100,
dm_train,
lr=0.005,
dm_valid=dm_valid,
validation_batch_size=20,
)
# ### Unfreeze all the parameters
for param in pretrained_net.module.parameters():
param.requries_grad = True
loss_Ti2, loss_val_Ti2 = train(
pretrained_net,
1000,
100,
dm_train,
lr=0.005,
dm_valid=dm_valid,
validation_batch_size=20,
)
loss_Ti3, loss_val_Ti3 = train(
pretrained_net,
1000,
100,
dm_train,
lr=0.0001,
dm_valid=dm_valid,
validation_batch_size=20,
)
loss_Ti4, loss_val_Ti4 = train(
pretrained_net,
1000,
100,
dm_train,
lr=0.0001,
dm_valid=dm_valid,
validation_batch_size=20,
)
loss_Ti5, loss_val_Ti5 = train(
pretrained_net,
1000,
42,
dm_train,
lr=0.0001,
dm_valid=dm_valid,
validation_batch_size=20,
)
loss_Ti6, loss_val_Ti6 = train(
pretrained_net,
1000,
200,
dm_train,
lr=0.0005,
dm_valid=dm_valid,
validation_batch_size=20,
)
plt.figure(figsize=(7, 5))
plt.plot(
loss_Ti + loss_Ti2 + loss_Ti3 + loss_Ti4 + loss_Ti5 + loss_Ti6, label="Train Loss"
)
plt.plot(
loss_val_Ti
+ loss_val_Ti2
+ loss_val_Ti3
+ loss_val_Ti4
+ loss_val_Ti5
+ loss_val_Ti6,
label="Validation loss",
)
plt.legend()
evaluate_model(pretrained_net, "valid")
get_download_link(pretrained_net, "pretrained_googLeNet.pt")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# #### 🇰🇷 **Introduction**
# ***
# K-dramas are drama television series that is produced and aired in South Korea, starring Korean actors. These shows are done in the Korean language and tend to follow a certain type of structure. These dramas are mostly limited series, often completed in 1 season, that comes out twice a week during nighttime slots.
# Unlike the extended and overarching plots of other drama television, K-dramas are notably compact as they tell their complete story within a limited but drama-filled, number of episodes.
# While K-pop may have been the first breakthrough success in the US, Korean dramas are rapidly becoming mainstream in the country as well. Between 2019 and 2021, K-dramas have seen a 200% spike in viewership numbers.
# According to a 2022 survey conducted across 26 countries around the world, about 36 percent of respondents stated that Korean dramas (K-dramas) were very popular in their countries that year. In total, around 66 percent of respondents felt that K-dramas were generally popular even outside of dedicated fan circles.
# In this notebook, we analyze the top 100 korean dramas in 2023 and attempt to build a recommender system to suggest the right K-drama just for you!!
# #### 🇰🇷 **Install dataset and libraries**
# ***
from IPython.core.display import HTML
with open("./CSS.css", "r") as file:
custom_css = file.read()
HTML(custom_css)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.figure_factory as ff
import plotly.graph_objects as go
import warnings
warnings.filterwarnings("ignore")
import calendar
import scipy.stats as stats
import statsmodels.api as sm
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
from scipy.stats import levene
import itertools
rc = {
"axes.facecolor": "#FFF9ED",
"figure.facecolor": "#FFF9ED",
"axes.edgecolor": "#000000",
"grid.color": "#EBEBE7",
"font.family": "serif",
"axes.labelcolor": "#000000",
"xtick.color": "#000000",
"ytick.color": "#000000",
"grid.alpha": 0.4,
}
sns.set(rc=rc)
from colorama import Style, Fore
red = Style.BRIGHT + Fore.RED
blu = Style.BRIGHT + Fore.BLUE
mgt = Style.BRIGHT + Fore.MAGENTA
gld = Style.BRIGHT + Fore.YELLOW
res = Style.RESET_ALL
df = pd.read_csv("/kaggle/input/top-100-k-drama-2023/top100_kdrama.csv")
df.head()
# #### 🇰🇷 **Basic EDA**
# ***
# 🔎 Basic information of data
# - There are 15 features in the data, of which 5 are numeric (ignoring ID).
# - There are 100 observations (Expected since it contains the top 100 drama).
# - Only the end date column contains 12 missing values. Upon further inspection, these dramas have already finished airing.
# - All dramas have a score of between 8 to 10. It is expected that the top 100 dramas should have high scores.
# - The duration for each episode of the dramas are relatively short, the longest being 1 h 40 mins.
# - The number of episodes for the dramas differ widely from 30 to 100.
# - It is unclear what drama rank refers to. Why would a drama in the top 100 listings be rank 368?
#
#
desc = pd.DataFrame(df.describe(include="all").transpose())
def summary_stats(df):
print(f"The shape of the data is: {df.shape}")
summary = pd.DataFrame(df.dtypes, columns=["data type"])
summary["Number of missing values"] = df.isnull().sum().values
summary["% of missing values"] = df.isnull().sum().values / len(df) * 100
summary["min value"] = desc["min"].values
summary["mean value"] = desc["mean"].values
summary["max value"] = desc["max"].values
return summary
summary_stats(df)
df[df["Rank"] == 368]
df[df["End_date"].isna()]
#
# Most of these dramas have already finished airing. However, we will not spend the effort to fill in these missing dates as we will not be using end date column.
#
# Convert watchers to integer
df["Watchers"] = df["Watchers"].apply(lambda x: int(x.replace(",", "")))
# #### 🇰🇷 **Visualization (Aired month)**
# ***
# Extract the month that the shows are aired
df["Month aired"] = pd.to_datetime(df["Start_date"]).dt.month
df.head()
sns.countplot(x="Month aired", data=df)
#
# The winter season (Nov - Mar) has the most number of shows aired.
#
sns.barplot(x=df["Month aired"], y="Watchers", data=df, ci=None)
#
# - Highest viewership month: Feb and Dec
# - Lowest viewership month: March, May and October
# - 🤔 But why the low viewership, is it because many people are busy in those months or there aren't many good shows in those months?
#
#
df[df["Month aired"].isin([3, 5, 10])]
#
# - The low viewership might not be because of a lack of good shows. There are many good shows such as Hospital Playlist and the Glory in those months.
# - The fall in viewership in March might be due to the start of a new university semester. Students spend more time adjusting to study and watch lesser drama instead.
#
sns.barplot(x="Month aired", y="Score", data=df, ci=None)
#
# We see that there is no association between the month the show is aired and its score.
# #### 🇰🇷 **ANOVA (Aired month and score)**
# ***
# - ANOVA can be used to determine if there is any association between a numeric and a categorical variable.
# - $H_0$: There is no association between the two variables.
# - $H_1$ : There is association between the two variables.
# Assumptions for ANOVA model
# - Independence of observations
# - Normal distribution
# - Equal variances
#
#
stats.shapiro(df["Score"])
#
# - By Shapiro wilk test, the distribution of score is not normal. Hence, ANOVA cannot be used.
# - Nonetheless, the graph provides sufficient evidence that there is no association between the month the show is aired and its score.
#
# #### 🇰🇷 **Kruskal Wallis test (Aired month and score)**
# ***
# - A non-parametric statistical test used to compare the medians of two or more groups.
# - An alternative to the t-test or ANOVA when the assumptions of normality or equal variances are not met and is an extension of wilcoxon rank sum test.
stats.kruskal(
df[df["Month aired"] == 1]["Score"],
df[df["Month aired"] == 2]["Score"],
df[df["Month aired"] == 3]["Score"],
df[df["Month aired"] == 4]["Score"],
df[df["Month aired"] == 5]["Score"],
df[df["Month aired"] == 6]["Score"],
df[df["Month aired"] == 7]["Score"],
df[df["Month aired"] == 8]["Score"],
df[df["Month aired"] == 9]["Score"],
df[df["Month aired"] == 10]["Score"],
df[df["Month aired"] == 11]["Score"],
df[df["Month aired"] == 12]["Score"],
)
#
# By Kruskal Wallis test, we do not reject the null hypothesis that there is no association between month aired and score.
# #### 🇰🇷 **Visualization (Day Aired)**
# ***
day_df = (
df["Day_aired"]
.str.split(", ", expand=True)
.stack()
.reset_index(level=1, drop=True)
.rename("actor")
)
day_counts = day_df.value_counts()
day_counts
#
# As expected, Fri - Sun has the most drama being aired since it is the weekend and people have more time to watch.
#
df["Day_aired"].value_counts()
sns.barplot(y="Day_aired", x="Duration", data=df, ci=None)
#
# Dramas aired on Thursday have the longest mean duration.
#
sns.barplot(y="Day_aired", x="Watchers", data=df, ci=None)
#
# Dramas aired on both Saturday and Sunday have the highest average viewership.
#
sns.barplot(y="Day_aired", x="Score", data=df, ci=None)
#
# There is no association between days aired and score.
# #### 🇰🇷 **Visualization (Main role actors)**
# ***
actor_df = (
df["Main Role"]
.str.split(", ", expand=True)
.stack()
.reset_index(level=1, drop=True)
.rename("actor")
)
actor_counts = actor_df.value_counts()
top_actors = actor_counts.head(20)
#
# - The above shows the top 20 actors that appeared the most as main roles in the top 100 dramas.
# - The highest number of times an actor acted as main role is 5.
# #### 🇰🇷 **ANOVA (Actors and viewership)**
# ***
# In this section, we investigate if actors appearing in the drama influence viewership.
# We expect that people who are fans of certain actors (eg. IU) will watch dramas where she is casted, leading to an increase in viewership.
stats.shapiro(np.log(df["Watchers"]))
#
# By Shapiro wilk test, we do not reject the null hypothesis that log(watchers) is normally distributed.
#
stats.levene(df["Main Role"], df["Watchers"])
# - There are too many groups to check for variance equality. We take only
model = ols("Watchers ~ Main Role", data=df).fit()
anova_table = anova_lm(model)
print(anova_table)
|
#
#
# Dataviz - Data Science Specialization Program - FACENS
# # Exercício 2
# * **Data de entrega:** xx/01/2020
# * **Professor:** Matheus Mota
# * **Aluno:** João Vitor Bracale de Jesus
# * **RA:** 191221
# ***Bibliotecas necessárias***
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# ***Importação\visualização do dataset***
bf = pd.read_csv("../input/dataviz-facens-20182-ex3/BlackFriday.csv", delimiter=",")
bf.head(10)
# ## Questão 1
# Construa um ou mais gráficos do tipo violino que permita(m) a comparação entre o valor gasto e a idade dos compradores.
plt.figure(figsize=(15, 4))
plt.title("Total de Compras por grupo de idade")
sns.violinplot(y=bf["Purchase"], x=bf["Age"].sort_values(ascending=True), scale="count")
# ## Questão 2
# Represente graficamente os N produtos mais comprados, onde N > 8.
plt.figure(figsize=(8, 5))
bf["Product_ID"].value_counts().head(8).plot(
kind="bar", title="Os 8 produtos mais comprados"
)
# ## Questão 3
# Represente graficamente a distribuição dos valores gastos em cada faixa etária dos compradores associados às 5 ocupações mais frequentes.
# Agrupando informações
gp_u = bf.groupby(["User_ID", "Age", "Occupation"]).sum().reset_index()
# 5 mais frequentes
freq5 = gp_u[gp_u["Occupation"].isin(gp_u["Occupation"].value_counts().head(5).index)]
# 5 frequentes com base na idade
freq5_idade = freq5.sort_values(by="Age")
# Tamanho do gráfico
plt.figure(figsize=(20, 12))
# Montagem do gráfico
sns.boxplot(
x=freq5_idade["Occupation"],
y=freq5_idade["Purchase"],
hue=freq5_idade["Age"],
linewidth=5,
)
# ## Questão 4
# Represente visualmente a relação entre ocupação e estado civil das compras com valor maior que nove mil.
sns.catplot(
x="Marital_Status",
y="Purchase",
hue="Marital_Status",
kind="point",
col="Occupation",
aspect=0.4,
col_wrap=7,
data=bf[bf["Purchase"] > 9000],
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# # Bay Wheels Trip History Data in January 2020
# ### About the data
# [Bay Wheels](https://www.lyft.com/bikes/bay-wheels) is a bike sharing service in the San Francisco Bay Area available on the Lyft app. The dataset includes anonymized trip history data of Bay Wheels. The data is provided by Lyft for public use.
# ([Data source](https://www.lyft.com/bikes/bay-wheels/system-data))
# ### Why the data
# Bikes are a fun and affordable way to explore a city or commute in short distance. Some of the research questions that I am hoping to explore in the dataset include:
# * Who are the customers?
# * What are the trips like in general?
# * Is there any interesting pattern among the trips?
# I am especially looking at certain columns to come up with findings and visualizations about:
# * How many frequent members versus casual customers are there?
# * How long and how far do customers bike?
# * What are the most popular start and end stations?
import pandas as pd
baywheels = pd.read_csv("../input/202001-baywheels-tripdata.csv")
baywheels.head()
# ### Q1: How many frequent members versus casual customers are there?
# Among 295,854 trip data, there are 58% membership subscribers and 42% casual customers.
baywheels["user_type"].value_counts().sort_index().plot.barh()
baywheels["user_type"].value_counts(normalize=True)
# ### Q2: How long do customers bike?
# On average, customers bike around 780 seconds (13 minutes) for each trip.
# The longest trip went 811,077 seconds (about 9 days), it's likely a customer who forgot to return the bike.
# The shortest trip went 60 seconds (1 minute), it's likely a customer who rented the bike and then decided not to.
baywheels["duration_sec"].describe()
# ### Q3: How far do customers bike?
# On average, customers bike around 2.55 kilometers. The longest trip went 12795.17 kilometers, which is definitely an interesting case to dig deeper into how it happened. The shortest trips didn't leave their original stations at all, which reflects what we saw in the duration column - some customers rented the bike and then decided not to. Might be interesting to clean out these data to see how the rest of the dataset would look like.
import mpu
for i in range(len(baywheels)):
baywheels.at[i, "distance"] = mpu.haversine_distance(
(
baywheels.at[i, "start_station_latitude"],
baywheels.at[i, "start_station_longitude"],
),
(
baywheels.at[i, "end_station_latitude"],
baywheels.at[i, "end_station_longitude"],
),
)
baywheels["distance"].describe()
# ### Q4: What are the most popular start and end stations?
# Top 5 stations where most trips start are:
# 1. "Market St at 10th St"
# 2. "San Francisco Caltrain (Townsend St at 4th St)"
# 3. "Berry St at 4th St"
# 4. "Howard St at Beale St"
# 5. "San Francisco Ferry Building (Harry Bridges Plaza)
# Top 5 stations where most trips end are:
# 1. "San Francisco Caltrain (Townsend St at 4th St)"
# 2. "Montgomery St BART Station (Market St at 2nd St)"
# 3. "Berry St at 4th St"
# 4. "San Francisco Ferry Building (Harry Bridges Plaza)"
# 5. "Market St at 10th St"
# Interestingly, 4 out of 5 are the same stations.
n = 5
popularstartstations = baywheels["start_station_name"].value_counts()[:n].index.tolist()
popularendstations = baywheels["end_station_name"].value_counts()[:n].index.tolist()
print("Popular start stations: %s" % popularstartstations)
print("Popular end stations: %s" % popularendstations)
|
# This is a inprogress notebook.
# Notebook submitted in response to task#3:
# https://www.kaggle.com/bombatkarvivek/paani-foundations-satyamev-jayate-water-cup/tasks?taskId=348
# Aim is to find method that will identify the correct pair of District-Taluka-Village among different datasets.
# # Pain of Entity Matching
# - 'Entity Matching' is common task in most of the data engineering pipeline which joins multiple datasets.
# - Complexity of this problem could escalate as dataset coming from different sources.
# - While working WaterCup dataset, we realise there are quite a lot of time we have names of the places typed differently in different datasets.
# - That leads us to creating a mapping of names manually, something like this:
# `_df_ListOfTalukas = _df_ListOfTalukas.replace('Ahmednagar','Ahmadnagar') \ .
# .replace('Buldhana','Buldana') \
# .replace('Sangli','Sangali') \
# .replace('Nashik','Nasik')`
# Of course this is not way to go with bigger datasets and more granular mapping!
# - In this notebook we will try to address this issue using various traditional and some non-traditional but innovative methods!
import geopandas as gpd
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df_mh_all_villages = gpd.read_file("../input/mh-villages-v2w2/MH_Villages v2W2.shp")[
["DTNAME", "GPNAME", "VILLNAME"]
]
# ['DTNAME','GPNAME','VILLNAME']
print(df_mh_all_villages.shape)
df_mh_all_villages.T
df_mh_all_villages["DTNAME"].unique()
print(len(df_mh_all_villages["DTNAME"].unique()))
df_mh_all_villages[df_mh_all_villages["DTNAME"] == "Sangali"]
df_mh_all_villages[df_mh_all_villages["DTNAME"] == "Mumbai"]
df_mh_all_villages[df_mh_all_villages["VILLNAME"].isnull()].shape
# We need to get rid of rows with missing village name
# Are the village names unique given a district?
df_mh_all_villages.groupby("DTNAME")["VILLNAME"].agg(["count", "nunique"])
# ### There are a lot of duplicate village names in a district - thus we need information on Taluka for matching as we cannot simply use district and village name for matching
df_ListOfTalukas = pd.read_csv(
"../input/paani-foundations-satyamev-jayate-water-cup/ListOfTalukas.csv"
)
print(df_ListOfTalukas.shape)
df_ListOfTalukas.T
df_ListOfTalukas["District"].unique()
print("Number of unique districts", len(df_ListOfTalukas["District"].unique()))
df_ListOfTalukas[df_ListOfTalukas["District"] == "Sangli"]
# **There are different spellings for district names in both files also the number of unique districts is different**
# * GPNAME - most probably refers to gram panchayat name, so cannot be matched with Taluka
# * We will need to create a list of districts with ground truth spelling - let's use MH_Villages v2W2.shp for that
df_StateLevelWinners = pd.read_csv(
"/kaggle/input/paani-foundations-satyamev-jayate-water-cup/StateLevelWinners.csv"
)
print(df_StateLevelWinners.shape)
df_StateLevelWinners.T
df_StateLevelWinners["District"].unique()
from fuzzywuzzy import fuzz
districts = df_mh_all_villages["DTNAME"].unique().tolist()
def get_best_district_match(mydist, districts=districts):
fuzz_ratio = [fuzz.ratio(mydist, dist) for dist in districts]
max_ratio = max(fuzz_ratio)
idx_max = [i for i, j in enumerate(fuzz_ratio) if j == max_ratio]
# ToDo: if more than one match throw an error
return districts[idx_max[0]]
get_best_district_match("Sangli")
df_StateLevelWinners["district_m"] = df_StateLevelWinners["District"].apply(
lambda x: get_best_district_match(x)
)
_idx = df_StateLevelWinners["District"] != df_StateLevelWinners["district_m"]
df_StateLevelWinners.loc[_idx, ["District", "district_m"]]
|
# **Treino com os dados do Titanic**
# Um dos naufrágios mais infames da história é o do Titanic, que afundou após colidir com um iceberg. Infelizmente, não havia botes salva-vidas suficientes para todos a bordo, resultando na morte de 1502 dos 2224 passageiros e tripulantes.
# Embora houvesse algum elemento de sorte envolvido na sobrevivência, parece que alguns grupos de pessoas eram mais propensos a sobreviver do que outros. **O desafio aqui é construir um modelo preditivo que discrimine os grupos de pessoas com maiores chances de sovreviver.**
# **Principais objetivos**:
# - Me familiarizar com a plataforma Kaggle e suas competições.
# - Desenvolver e colocar em prática técnicas de EDA
# - Resolver o problema e buscar melhorar a pontuação baseando-se em metodos e ideias que vi em notebooks compartilhados por cientistas de dados da comunidade mais experientes.
# **Importando dados e bibliotecas necessárias**
# análise e transformação dos dados
import pandas as pd
import numpy as np
import random as rnd
# visualização
import seaborn as sns
import matplotlib.pyplot as plt
# machine learning
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
# importando os datasets
import os
train_df = pd.read_csv("/kaggle/input/titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
all_data = train_df.append(test_df, sort=True)
train_df.head(8)
# **Análise exploratória**
train_df.describe()
train_df.describe(include=["O"])
# Número de "NaN"s em cada feature
print(all_data.shape)
all_data.isnull().sum().drop("Survived")
# **Observações**:
# - Cinco features são nominais, das quais apenas "Sex" e "Embarked" possuem poucos valores únicos e podem ser facilmente convertidas em variáveis numéricas ordinais/discretas.
# - Proporção de "Missing values" encontrados: "Cabin" 77% >>> "Age" 20% >> "Embarked" apenas 2 > "Fare" apenas 1.
# - A chance de sobrvivência média do conjunto de dados é de 38% e está próxima da taxa real de 32% (1-1502/2224). Também pode ser usado como parâmetro de comparação para o impacto de determinadas features.
# **Especulações**:
# - PassangerId, Ticket e Name provavelmente não contribuem para a sobrevivência, descartá-los logo no início é conveniente pois reduz o volume de dados, acelera o processamento do código e simplifica a análise, mas vale ressaltar que o nome também possui informações sobre o título do passageiro, extraír isso em uma nova feature pode beneficiar a acuracia do modelo.
# - Uma possibilidade para haver tantos valores nulos para a cabine é que isso pode representar uma luxúria de alguns poucos passageiros, possivelmente um indicador de sua influência. Transforma-la em uma variável binária pode beneficiar o modelo.
# - É importante saber quais features se correlacionam com a sobrevivência e entre sí logo no início do projeto, pois isso guia a tomada de decisão sobre quais delas manter e transformar; alguns dados como o valor da tarifa (Fare) e classe (Pclass) podem acabar dizendo a mesma coisa ficando redundantes
# - Se queremos fazer correlações logo de início, é importante converter features potencialmente relevantes como "Sex" e "Embarked" para variáveis ordinais/discretas antes.
# - Por fim, se a análise apontar que features incompletas como "Age" e "Embarked" impactam na sobrevivência, devem ser completadas ao invés de descartadas.
# Extraindo os títulos de "Name"
all_data["Title"] = all_data.Name.str.extract(" ([A-Za-z]+)\.", expand=False)
pd.crosstab(all_data["Title"], all_data["Sex"]).T
# Transformando a feature "Cabin" em binária
all_data["Cabin"].fillna(0, inplace=True)
all_data.loc[all_data["Cabin"] != 0, "Cabin"] = 1
# Descartando dados irrelevantes
Id_test = test_df["PassengerId"] # necessário para submissão do projeto.
all_data = all_data.drop(["PassengerId", "Ticket", "Name"], axis=1)
# Criando uma variável randômica para testar algumas hipóteses
np.random.seed(2020)
all_data["random"] = np.random.randint(0, 2, len(all_data))
# Split
train_df = all_data[: len(train_df)]
test_df = all_data[len(train_df) :]
# **Checando correlações com a sobrevivência**
def pivota_feature_com_sobrev(feature_analisada):
# Essa função cria um pequeno DataFrame com a taxa de sobrevivência
# e o número de indivíduos de cada elemento de uma feature.
df_pivot = pd.concat(
[
train_df[feature_analisada].rename("# ind").value_counts(),
train_df[[feature_analisada, "Survived"]]
.groupby(feature_analisada, as_index=True)
.mean(),
],
axis=1,
sort=True,
)
df_pivot.index.name = feature_analisada
return round(df_pivot, 3)
display(
pivota_feature_com_sobrev("Sex"),
pivota_feature_com_sobrev("Embarked"),
pivota_feature_com_sobrev("Pclass"),
pivota_feature_com_sobrev("Cabin"),
)
# Todas estas features aparentam impactar nas chances de sobrevivência; devem ser preenchidas e convertidas para variáveis numéricas.
# Convertendo a feature "sex" de nominal para binária
all_data["Sex"].replace(["female", "male"], [0, 1], inplace=True)
# Preenchendo 2 valores nulos com o porto de embarque mais comum
all_data["Embarked"].fillna("S", inplace=True)
# Convertendo a feature "Embarked" de nominal para discreta
all_data["Embarked"].replace(["S", "Q", "C"], [0, 1, 2], inplace=True)
display(
pivota_feature_com_sobrev("Title").T,
pivota_feature_com_sobrev("Parch").T,
pivota_feature_com_sobrev("SibSp").T,
)
# Nota-se que grande parte dos indivídios que tiveram companhia para a viagem ou Títulos raros tiveram mais de 50% de chance de sobreviver. Entretanto muitos desses elementos não possuem um número de indivíduos alto o suficiente para serem representativos do todo, transformar esta feature em uma variável ordinal pode incorrer em problemas de amostragem para determiandos valores, fazendo mais sentido criar as seguintes variáveis binárias:
# - "Family": 0 para indivíduos sozinhos e 1 para acompanhados.
# - "Title": 1 para indivíduos com títulos raros e 0 para títulos comuns
#
# Transformando a feature "Title"
all_data["Title"] = all_data["Title"].replace(["Mrs", "Miss", "Mr"], 0)
all_data.loc[all_data["Title"] != 0, "Title"] = 1
# Criando a feature "Family"
all_data["Family"] = all_data["Parch"] + all_data["SibSp"]
all_data.loc[all_data["Family"] > 0, "Family"] = 1
# Descartando
all_data.drop(["SibSp", "Parch"], axis=1, inplace=True)
# Split
train_df = all_data[: len(train_df)]
test_df = all_data[len(train_df) :]
display(pivota_feature_com_sobrev("Title"), pivota_feature_com_sobrev("Family"))
# Checando a feature randômica
pivota_feature_com_sobrev("random")
# Como esperado, esta feature não ajuda a discriminar quem tem as melhores chances de sobrevivência, mas a manteremos para futuras comparações
# **Matriz de correlações**
# Split
train_df = all_data[: len(train_df)]
test_df = all_data[len(train_df) :]
train_df.corr().style.background_gradient(cmap="Blues").set_precision(2)
# A correlação entre a idade e a sobrevivência é baixa (-0,07), entretando, se analizamos as curvas de kde encontramos faixas estárias mais propensas a sobreviverem. Isso sugere que a feature é relevante para o modelo e deve ser completada.
kde_age = sns.FacetGrid(train_df, col="Pclass", row="Sex", hue="Survived")
kde_age.add_legend().set(xlim=(0, 100))
kde_age = kde_age.map(sns.kdeplot, "Age", shade=True)
# Uma possibilidade para completar as informações de idade é preenche-las com a mediana das idades, mas este valor pode variar em função de diferentes grupos de pessoas. Vale checar as features "Family" e "Pclass", que são bem correlacionadas com "Age":
plt.figure(figsize=(10, 7))
plt.title("Idade em função da classe e se o passageiro viajou com a familia")
sns.violinplot(
x="Pclass", y="Age", hue="Family", data=all_data, split=True, inner="quartile"
)
plt.show()
# **Preenchendo missing values**
# Assim como esperado, diferentes grupos possuem diferentes distribuições de idades, e uma vez que possuimos estas informações, é melhor fazer o preenchimento de forma condicionada:
matriz_de_medianas = np.zeros((2, 3))
for classe in range(1, 4):
for familia in range(0, 2):
matriz_de_medianas[familia, classe - 1] = all_data.loc[
(all_data["Pclass"] == classe) & (all_data["Family"] == familia)
]["Age"].median()
all_data.loc[
(np.isnan(all_data["Age"]))
& (all_data["Family"] == familia)
& (all_data["Pclass"] == classe),
"Age",
] = matriz_de_medianas[familia, classe - 1]
medianas = pd.DataFrame(matriz_de_medianas, columns=[1, 2, 3])
medianas.index.name = "Family"
medianas.columns.name = "Pclass"
medianas
# O mesmo é realizado para preencher o único valor nulo da feature "Fare":
all_data.loc[np.isnan(all_data["Fare"])]
# Preenchendo o valor nulo com a mediana das tarifas deste grupo de indivíduos
all_data.loc[np.isnan(all_data["Fare"])] = all_data.loc[
(all_data["Pclass"] == 3) & (all_data["Sex"] == 1) & (all_data["Family"] == 0)
]["Fare"].median()
# Nossos dados estão finalmente organizados, limpos e transformados:
all_data.head()
# **Modelagem e predição**
# Split
train_df = all_data[: len(train_df)]
test_df = all_data[len(train_df) :]
train_data = train_df.drop("Survived", axis=1)
train_target = train_df["Survived"]
X_test = test_df.drop("Survived", axis=1)
train_data.shape, train_target.shape, X_test.shape
def treina_e_testa_modelo(modelo, dados_de_treino, rotulo, cross_validation_folders):
modelo.fit(dados_de_treino, rotulo)
score = cross_val_score(
modelo, dados_de_treino, rotulo, cv=cross_validation_folders
).mean()
model_name = str(modelo).split("(")[0]
print(model_name + " accuracy: " + str(round(score.mean() * 100, 2)) + "%")
return score
knn = KNeighborsClassifier(n_neighbors=3)
knn_score = treina_e_testa_modelo(knn, train_data, train_target, 5)
random_forest = RandomForestClassifier(n_estimators=100, random_state=2020)
random_forest_score = treina_e_testa_modelo(random_forest, train_data, train_target, 5)
logreg = LogisticRegression(solver="newton-cg", random_state=2020)
logreg_score = treina_e_testa_modelo(logreg, train_data, train_target, 5)
svc = SVC(gamma="scale", random_state=2020)
svc_score = treina_e_testa_modelo(svc, train_data, train_target, 5)
# **Compreendendo os resultados**
# Random forest e logistic regression foram os modelos mais acertivos, com aproximadamente 80% de acurácia, o próximo passo é utilizar alguns métodos para tentar compreender quais são as features mais importantes para cada um e entender seu funcionamento.
def importance_plot(x_name, y_name, dados, graph_title):
sns.barplot(
x=x_name, y=y_name, data=dados, orient="h", color="royalblue"
).set_title(graph_title, fontsize=20)
plt.show()
feature_importance_df = pd.DataFrame(
{
"Feature": train_data.columns,
"feature_importance": random_forest.feature_importances_,
}
).sort_values("feature_importance", ascending=False)
coeff_df = pd.DataFrame(
{"Feature": train_data.columns, "feature correlation": pd.Series(logreg.coef_[0])}
).sort_values("feature correlation", ascending=False)
importance_plot(
"feature_importance",
"Feature",
feature_importance_df,
"RForest feature importance_defaut",
)
importance_plot(
"feature correlation",
"Feature",
coeff_df,
"logistic regression feature correlation",
)
# - Para o algoritmo de regressão logistica a feature 'random' foi apontada como mais correlacionada com a sobrevivência do que a idade e a tarifa, sabemos que isso não faz sentido, mas já era esperado pelo fato de que este modelo não lida com variáveis contínuas como estas.
# - Já para o random forest, notamos que 'random' é apontada como mais importante que a família e o título, o que também não faz muito sentido. Frente a isso existem duas possibilidade; estas três features não contribuem muito para o modelo e devem ser descartadas, ou o método ".feature_importances_" não é muito acurado. Este último ponto é confirmado pela literatura, que o aponta como enviesado, apesar de bem direto e rápido.
# - Uma possibilidade é testar métodos recursivos, que são muito mais acertivos e computacionalmente caros, o que não representa um problema para um conjunto de dados pequeno como este.
rforest_clone = clone(random_forest)
logreg_clone = clone(logreg)
rf_impact = [] # rf = random forest
lr_impact = [] # lr = logistic regression
for feature in train_data.columns:
rf_new_score = cross_val_score(
rforest_clone, train_data.drop(feature, axis=1), train_target, cv=5
).mean()
lr_new_score = cross_val_score(
logreg_clone, train_data.drop(feature, axis=1), train_target, cv=5
).mean()
rf_impact.append(random_forest_score - rf_new_score)
lr_impact.append(logreg_score - lr_new_score)
rforest_impact = pd.DataFrame(
data={"Feature": train_data.columns, "Impact": rf_impact}
).sort_values("Impact", ascending=False)
logreg_impact = pd.DataFrame(
data={"Feature": train_data.columns, "Impact": lr_impact}
).sort_values("Impact", ascending=False)
importance_plot("Impact", "Feature", rforest_impact, "Random Forest feature impact")
importance_plot(
"Impact", "Feature", logreg_impact, "Logistic Regression feature impact"
)
random_forest_score = treina_e_testa_modelo(
random_forest,
train_data.drop(["Title", "random", "Family"], axis=1),
train_target,
10,
)
logreg_forest_score = treina_e_testa_modelo(
logreg, train_data.drop(["Cabin", "random", "Fare"], axis=1), train_target, 10
)
predict = random_forest.predict(
X_test.drop(["Title", "random", "Family"], axis=1)
).astype(int)
submission = pd.DataFrame()
submission["PassengerId"] = Id_test
# get predictions
submission["Survived"] = predict
submission.head(15).T
submission.to_csv("random_forest_submission.csv", index=False)
|
# ## Intro
# This notebook is a combination of three great notebooks.
# * @xhlulu [Disaster NLP: Keras BERT using TFHub](https://www.kaggle.com/xhlulu/disaster-nlp-keras-bert-using-tfhub)
# * @Dieter [BERT-Embeddings + LSTM](https://www.kaggle.com/christofhenkel/bert-embeddings-lstm/notebook)
# * Wojtek Rosa [Keras BERT using TFHub (modified train data)](https://www.kaggle.com/wrrosa/keras-bert-using-tfhub-modified-train-data)
# Thanks to their great works. I combine the bert_model from @xhlulu, LSTM model from @Dieter and modified data from @Wojtek Rosa.
# We will use the official tokenization script created by the Google team
import tensorflow as tf
import numpy as np
import pandas as pd
import tensorflow.keras.backend as K
from tensorflow.keras.layers import (
Dense,
Input,
Bidirectional,
SpatialDropout1D,
Embedding,
add,
concatenate,
)
from tensorflow.keras.layers import (
GRU,
GlobalAveragePooling1D,
LSTM,
GlobalMaxPooling1D,
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import (
EarlyStopping,
ModelCheckpoint,
LearningRateScheduler,
)
import tensorflow_hub as hub
import tokenization
train = pd.read_csv("../input/preprocesseddata/train.csv")
test = pd.read_csv("../input/preprocesseddata/test.csv")
submission = pd.read_csv("../input/nlp-getting-started/sample_submission.csv")
module_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1"
bert_layer = hub.KerasLayer(module_url, trainable=True)
def bert_encoder(texts, tokenizer, max_len=512):
all_tokens = []
all_masks = []
all_segments = []
for text in texts:
text = tokenizer.tokenize(text)
text = text[: max_len - 2]
input_sequence = ["[CLS]"] + text + ["[SEP]"]
pad_len = max_len - len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence)
tokens += [0] * pad_len
pad_masks = [1] * len(input_sequence) + [0] * pad_len
segments_ids = [0] * max_len
all_tokens.append(tokens)
all_masks.append(pad_masks)
all_segments.append(segments_ids)
return np.array(all_tokens), np.array(all_masks), np.array(all_segments)
def build_model(bert_layer, max_len=512):
input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
segment_ids = Input(shape=(max_len,), dtype=tf.int32, name="segment_ids")
_, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
x = SpatialDropout1D(0.3)(sequence_output)
x = Bidirectional(GRU(LSTM_UNITS, return_sequences=True))(x)
x = Bidirectional(GRU(LSTM_UNITS, return_sequences=True))(x)
hidden = concatenate(
[
GlobalMaxPooling1D()(x),
GlobalAveragePooling1D()(x),
]
)
hidden = add([hidden, Dense(DENSE_HIDDEN_UNITS, activation="relu")(hidden)])
hidden = add([hidden, Dense(DENSE_HIDDEN_UNITS, activation="relu")(hidden)])
result = Dense(1, activation="sigmoid")(hidden)
model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=result)
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
return model
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)
train_input = bert_encoder(train.text.values, tokenizer, max_len=160)
test_input = bert_encoder(test.text.values, tokenizer, max_len=160)
train_labels = train.target.values
import gc
NUM_MODELS = 1
BATCH_SIZE = 16
LSTM_UNITS = 64
EPOCHS = 5
DENSE_HIDDEN_UNITS = 256
checkpoint_predictions = []
checkpoint_val_pred = []
weights = []
for model_idx in range(NUM_MODELS):
model = build_model(bert_layer, max_len=160)
for global_epoch in range(EPOCHS):
model.fit(
train_input,
train_labels,
batch_size=BATCH_SIZE,
validation_split=0.25,
epochs=1,
verbose=1,
callbacks=[
LearningRateScheduler(lambda epoch: 2e-6 * (0.6**global_epoch))
],
)
checkpoint_predictions.append(
model.predict(test_input, batch_size=64).flatten()
)
weights.append(2**global_epoch)
del model
gc.collect()
test_pred = np.average(checkpoint_predictions, weights=weights, axis=0)
submission["target"] = test_pred.round().astype(int)
submission.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Import package here
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
import missingno as msno
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from pathlib import Path
input_path = Path("/kaggle/input/amex-default-prediction/")
# Loading dataset train_data.csv
train_df_sample = pd.read_csv(
"../input/amex-default-prediction/train_data.csv", nrows=100000
)
# Loading dataset train_labels.csv
train_label_df = pd.read_csv("../input/amex-default-prediction/train_labels.csv")
# Loading dataset test_data.csv
test_df = pd.read_csv(
"../input/amex-default-prediction/test_data.csv",
nrows=100000,
index_col="customer_ID",
)
# Merge of train_df_sample and train_label_df dataframe using key as customer_ID
train_df = pd.merge(train_df_sample, train_label_df, how="inner", on=["customer_ID"])
categorical_features = [
"B_30",
"B_38",
"D_114",
"D_116",
"D_117",
"D_120",
"D_126",
"D_63",
"D_64",
"D_68",
]
# The info method finds that there are objects in the table besides numbers, which needs to be processed. Since there are many parameters, the missing data can be visualized by visualization.
# Data Summary
summary = train_df.describe(include="all").T
summary["missing"] = train_df.isnull().sum()
summary["unique"] = train_df.nunique()
summary["type"] = train_df.dtypes
# Print
print(summary)
# Features are anonymized and normalized, and fall into the following general categories:
# D_* = Delinquency variables
# S_* = Spend variables
# P_* = Payment variables
# B_* = Balance variables
# R_* = Risk variables
# Show data distribution for different general categories:
# Group according to prefix
feature_groups = {}
for feature in train_df.columns.tolist():
prefix = feature[0]
if prefix not in feature_groups:
feature_groups[prefix] = []
feature_groups[prefix].append(feature)
# Print
for prefix, group in feature_groups.items():
print(f"Feature group {prefix}: {group}")
# Calculate the correlation between target and all the other columns
correlations = train_df.corrwith(train_df["target"])
# Print results
print("Correlations between target and other columns:")
print(correlations)
# Find columns with correlation greater than 0.5
high_correlations = correlations[abs(correlations) > 0.5]
# Print results
print("\n")
print("strongly correlated values with 'target' (with correlation greater than 0.5):")
print(high_correlations)
# Show distribution of 'target' using histplot
sns.histplot(train_df["target"], color="g", bins=100, alpha=0.4)
corr = train_df.drop("target", axis=1).corr()
plt.figure(figsize=(50, 50))
sns.heatmap(
corr[(abs(corr) >= 0.5) | (abs(corr) <= -0.4)],
cmap="viridis",
vmax=1.0,
vmin=-1.0,
linewidths=0.1,
annot=True,
annot_kws={"size": 8},
square=True,
)
plt.savefig("heatmap.png")
plt.show()
# Select numerical features
num_df = train_df.select_dtypes(exclude="object")
# Plot distributions of numerical features
ncols = 7
nrows = 28
fig, axes = plt.subplots(nrows, ncols, figsize=(4 * ncols, 4 * nrows), sharex=False)
for idx, ax in enumerate(axes.ravel()):
if idx < len(num_df.columns):
column = num_df.columns[idx]
ax.hist(num_df[column].dropna(), bins=50)
ax.set_title(column)
ax.set_ylabel("Frequency")
else:
ax.set_axis_off()
# Save the histograms as an image file
plt.savefig("histograms.png")
plt.show()
cal_df = train_df[categorical_features]
# Plot countplots for the selected features
fig, axes = plt.subplots(round(len(cal_df.columns) / 3), 3, figsize=(12, 12))
for i, ax in enumerate(fig.axes):
if i < len(cal_df.columns):
sns.countplot(x=cal_df.columns[i], alpha=0.7, data=cal_df, ax=ax)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
fig.tight_layout()
# Statistical missing value:
# The amount of missing values for each column
missing_values = train_df.isnull().sum()
# Display the first 5 rows
print(missing_values.head(5))
# Display an empty line for separation
print("...")
# Display the last 5 rows
print(missing_values.tail(5))
# Visulize missing values:
# Randomly get 1919 sample data
train_df_sample = train_df.sample(1919) # 随机取样1919个数据
msno.matrix(train_df_sample)
# Each vertical bar represents a feature. If there is no missing value, the bar will be as black as the rightmost one. Some areas are almost all white, which means that almost all the values of this feature are missing.
# Drop meaningless values:
# Drop customer_ID and S_2 from train_df dataframe since they are not required for model building
train_df.drop(axis=1, columns=["customer_ID", "S_2"], inplace=True)
# Drop S_2 in test_df dataframe which is not required for model building
test_df.drop(axis=1, columns=["S_2"], inplace=True)
# Handle categorial column:
label_encoder = LabelEncoder()
for feature in categorical_features:
train_df[feature] = label_encoder.fit_transform(train_df[feature])
# Drop columns leads to multicollinearity:
features_drop_for_multicollinearity = [
"B_11",
"S_7",
"B_13",
"B_23",
"D_74",
"D_75",
"D_77",
"B_33",
"B_37",
"D_110",
"D_111",
]
train_df.drop(columns=features_drop_for_multicollinearity)
# Handle missing values:
# rough_train_data = train_df.copy()
fill_mean_for_missing = [
"P_2",
"D_48",
"D_52",
"P_3",
"D_55",
"D_59",
"D_62",
"D_68",
"D_70",
"D_104",
"D_107",
"S_27",
"D_115",
"D_117",
"D_118",
"D_119",
"D_121",
"D_122",
"D_123",
"D_124",
"D_130",
]
fill_mode_for_missing = [
"S_3",
"D_44",
"D_46",
"B_13",
"D_61",
"D_69",
"D_78",
"D_79",
"D_83",
"D_84",
"D_89",
"D_91",
"D_102",
"R_27",
"D_113",
"D_114",
"D_116",
"D_120",
"D_125",
"D_128",
"D_129",
"D_131",
"D_133",
"D_139",
"D_140",
"D_141",
"D_143",
"D_144",
"D_145",
]
drop_for_missing = [
"D_42",
"D_43",
"D_49",
"D_50",
"D_53",
"D_56",
"S_9",
"B_17",
"D_66",
"D_73",
"D_76",
"D_77",
"R_9",
"D_82",
"B_29",
"D_87",
"D_88",
"D_105",
"D_106",
"R_26",
"D_108",
"D_110",
"D_111",
"B_39",
"B_42",
"D_132",
"D_134",
"D_135",
"D_136",
"D_137",
"D_138",
"D_142",
]
# pd.options.display.max_info_columns = 300
# rough_train_data.info()
for feature_name in fill_mean_for_missing:
train_df[feature_name].fillna(train_df[feature_name].mean(), inplace=True)
for feature_name in fill_mode_for_missing:
train_df[feature_name].fillna(train_df[feature_name].mode(), inplace=True)
for feature_name in drop_for_missing:
train_df.drop(axis=1, columns=[feature_name], inplace=True)
# rough_train_data.info()
for column in train_df.columns.tolist():
CpuKiller = train_df[column].mean()
# Fill in each column
train_df[column] = train_df[column].fillna(CpuKiller)
pd.options.display.max_info_columns = 300
# Visualize the degree of dispersion of the data
train_df.boxplot()
# Rank correlations
# correlations = train_df.corr()['target'].abs().sort_values(ascending=False)
# Select top 100 related features
# top_100_features = correlations.index[1:51]
# Create a new dataset
# train_df_top_100 = train_df[top_100_features].copy()
# Keep 'target' feature
# train_df_top_100.loc[:, 'target'] = train_df['target']
# train_df = train_df_top_100
X = train_df.drop(columns="target")
y = train_df["target"]
print("Shape of X", X.shape)
print("Shape of y", y.shape)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, stratify=y, random_state=42
)
from lightgbm import LGBMClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
# Create pipeline with robust scaler and LGBM classifier
pipeline = make_pipeline(
RobustScaler(), LGBMClassifier(random_state=45, n_estimators=5000)
)
# Fit pipeline on training data
pipeline.fit(X_train, y_train)
# Predict on test data
y_pred = pipeline.predict(X_test)
# Calculate the accuracy
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy)
# Evaluate performance
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
# Parameter tuning:
from lightgbm import LGBMClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.model_selection import GridSearchCV
# Define Parameters Distribution
# Define Parameter grid
param_grid = {"n_estimators": range(5000, 10000, 1000)}
model = LGBMClassifier()
# Cross validation and grid search
grid_search = GridSearchCV(model, param_grid, scoring="accuracy", cv=5, verbose=1)
grid_search.fit(X_train, y_train)
# Print best parametors
print("Best parameters found:", grid_search.best_params_)
# Other data normalization methods for testing:
from lightgbm import LGBMClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
# Create pipeline with standard scaler and LGBM classifier
pipeline = make_pipeline(
StandardScaler(), LGBMClassifier(random_state=45, n_estimators=6000)
)
# Fit pipeline on training data
pipeline.fit(X_train, y_train)
# Predict on test data
y_pred = pipeline.predict(X_test)
# Calculate the accuracy
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy)
# Evaluate performance
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
from lightgbm import LGBMClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler
# Create pipeline with MinMaxScaler and LGBM classifier
pipeline = make_pipeline(
MinMaxScaler(), LGBMClassifier(random_state=45, n_estimators=6000)
)
# Fit pipeline on training data
pipeline.fit(X_train, y_train)
# Predict on test data
y_pred = pipeline.predict(X_test)
# Calculate the accuracy
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy)
# Evaluate performance
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
from lightgbm import LGBMClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
# Create pipeline with Normalizer and LGBM classifier
pipeline = make_pipeline(
Normalizer(), LGBMClassifier(random_state=45, n_estimators=6000)
)
# Fit pipeline on training data
pipeline.fit(X_train, y_train)
# Predict on test data
y_pred = pipeline.predict(X_test)
# Calculate the accuracy
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy)
# Evaluate performance
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
|
import numpy as np
import pandas as pd
import pickle as pkl
import datetime
import time
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
import re
from sklearn import metrics
from nltk.corpus import stopwords
import nltk
from nltk.wsd import lesk
from nltk.corpus import wordnet as wn
import os
import random
from math import log
import torch
import torch.nn as nn
seed = random.randint(1, 200)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
os.environ["CUDA_VISIBLE_DEVICES"] = ""
path = "/kaggle/input/google-quest-challenge"
train = pd.read_csv(f"{path}/train.csv")
test = pd.read_csv(f"{path}/test.csv")
submission = pd.read_csv(f"{path}/sample_submission.csv")
X_train = train["question_body"].fillna("fillna").values
X_test = test["question_body"].fillna("fillna").values
# y_train = train[submission.columns[1:]].values
y_train = train[submission.columns[1]].values
a = np.append(X_train, X_test)
print(a.shape)
with open("mr_0" + ".txt", "w") as f:
j = 0
for i in a:
f.write(i + "\n")
j += 1
x = []
c = np.array(x)
with open("mr_0" + ".txt", "r") as f:
lines = f.readlines()
i = -1
for line in lines:
i += 1
if i <= 5471:
line = str(i) + "\t" + "train" + "\t" + str(y_train[i])
c = np.append(c, line)
if 6078 >= i > 5471:
line = str(i) + "\t" + "test" + "\t" + str(y_train[i])
c = np.append(c, line)
with open("mr_1" + ".txt", "w") as f:
for line in c:
f.write(line + "\n")
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_corpus(dataset_str):
"""
Loads input corpus from gcn/data directory
ind.dataset_str.x => the feature vectors of the training docs as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.tx => the feature vectors of the test docs as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training docs/words
(a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.y => the one-hot labels of the labeled training docs as numpy.ndarray object;
ind.dataset_str.ty => the one-hot labels of the test docs as numpy.ndarray object;
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
ind.dataset_str.adj => adjacency matrix of word/doc nodes as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.train.index => the indices of training docs in original doc list.
All objects above must be saved using python pickle module.
:param dataset_str: Dataset name
:return: All data input files loaded (as well the training/test data).
"""
names = ["x", "y", "tx", "ty", "allx", "ally", "adj"]
objects = []
for i in range(len(names)):
with open("ind.{}.{}".format(dataset_str, names[i]), "rb") as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding="latin1"))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, adj = tuple(objects)
# print(x.shape, y.shape, tx.shape, ty.shape, allx.shape, ally.shape)
features = sp.vstack((allx, tx)).tolil()
labels = np.vstack((ally, ty))
# print(len(labels))
train_idx_orig = parse_index_file("{}.train.index".format(dataset_str))
train_size = len(train_idx_orig)
val_size = train_size - x.shape[0]
test_size = tx.shape[0]
idx_train = range(len(y))
idx_val = range(len(y), len(y) + val_size)
idx_test = range(allx.shape[0], allx.shape[0] + test_size)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
return (
adj,
features,
y_train,
y_val,
y_test,
train_mask,
val_mask,
test_mask,
train_size,
test_size,
)
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.0
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
# return sparse_to_tuple(features)
return features.A
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.0
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
# return sparse_to_tuple(adj_normalized)
return adj_normalized.A
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders["labels"]: labels})
feed_dict.update({placeholders["labels_mask"]: labels_mask})
feed_dict.update({placeholders["features"]: features})
feed_dict.update(
{placeholders["support"][i]: support[i] for i in range(len(support))}
)
feed_dict.update({placeholders["num_features_nonzero"]: features[1].shape})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which="LM")
scaled_laplacian = (2.0 / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k + 1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
def loadWord2Vec(filename):
"""Read Word Vectors"""
vocab = []
embd = []
word_vector_map = {}
file = open(filename, "r")
for line in file.readlines():
row = line.strip().split(" ")
if len(row) > 2:
vocab.append(row[0])
vector = row[1:]
length = len(vector)
for i in range(length):
vector[i] = float(vector[i])
embd.append(vector)
word_vector_map[row[0]] = vector
print("Loaded Word Vectors!")
file.close()
return vocab, embd, word_vector_map
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " 's", string)
string = re.sub(r"\'ve", " 've", string)
string = re.sub(r"n\'t", " n't", string)
string = re.sub(r"\'re", " 're", string)
string = re.sub(r"\'d", " 'd", string)
string = re.sub(r"\'ll", " 'll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def print_log(msg="", end="\n"):
now = datetime.datetime.now()
t = (
str(now.year)
+ "/"
+ str(now.month)
+ "/"
+ str(now.day)
+ " "
+ str(now.hour).zfill(2)
+ ":"
+ str(now.minute).zfill(2)
+ ":"
+ str(now.second).zfill(2)
)
if isinstance(msg, str):
lines = msg.split("\n")
else:
lines = [msg]
for line in lines:
if line == lines[-1]:
print("[" + t + "] " + str(line), end=end)
else:
print("[" + t + "] " + str(line))
dataset = "mr_0"
nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
print(stop_words)
doc_content_list = []
with open("mr_0.txt", "rb") as f:
for line in f.readlines():
doc_content_list.append(line.strip().decode("latin1"))
word_freq = {}
for doc_content in doc_content_list:
temp = clean_str(doc_content)
words = temp.split()
for word in words:
if word in word_freq:
word_freq[word] += 1
else:
word_freq[word] = 1
clean_docs = []
for doc_content in doc_content_list:
temp = clean_str(doc_content)
words = temp.split()
doc_words = []
for word in words:
if dataset == "mr_0":
doc_words.append(word)
elif word not in stop_words and word_freq[word] >= 5:
doc_words.append(word)
doc_str = " ".join(doc_words).strip()
clean_docs.append(doc_str)
clean_corpus_str = "\n".join(clean_docs)
with open("mr_0" + ".clean.txt", "w") as f:
f.write(clean_corpus_str)
min_len = 10000
aver_len = 0
max_len = 0
with open("mr_0" + ".clean.txt", "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip()
temp = line.split()
aver_len = aver_len + len(temp)
if len(temp) < min_len:
min_len = len(temp)
if len(temp) > max_len:
max_len = len(temp)
aver_len = 1.0 * aver_len / len(lines)
print("Min_len : " + str(min_len))
print("Max_len : " + str(max_len))
print("Average_len : " + str(aver_len))
word_embeddings_dim = 300
word_vector_map = {}
doc_name_list = []
doc_train_list = []
doc_test_list = []
with open("mr_1" + ".txt", "r") as f:
lines = f.readlines()
for line in lines:
doc_name_list.append(line.strip())
temp = line.split("\t")
if temp[1].find("test") != -1:
doc_test_list.append(line.strip())
elif temp[1].find("train") != -1:
doc_train_list.append(line.strip())
doc_content_list = []
with open("mr_0" + ".clean.txt", "r") as f:
lines = f.readlines()
for line in lines:
doc_content_list.append(line.strip())
train_ids = []
for train_name in doc_train_list:
train_id = doc_name_list.index(train_name)
train_ids.append(train_id)
random.shuffle(train_ids)
train_ids_str = "\n".join(str(index) for index in train_ids)
with open("mr_0" + ".train.index", "w") as f:
f.write(train_ids_str)
test_ids = []
for test_name in doc_test_list:
test_id = doc_name_list.index(test_name)
test_ids.append(test_id)
random.shuffle(test_ids)
test_ids_str = "\n".join(str(index) for index in test_ids)
with open("mr_0" + ".test.index", "w") as f:
f.write(test_ids_str)
ids = train_ids + test_ids
shuffle_doc_name_list = []
shuffle_doc_words_list = []
for id in ids:
shuffle_doc_name_list.append(doc_name_list[int(id)])
shuffle_doc_words_list.append(doc_content_list[int(id)])
shuffle_doc_name_str = "\n".join(shuffle_doc_name_list)
shuffle_doc_words_str = "\n".join(shuffle_doc_words_list)
with open("mr_1" + "_shuffle.txt", "w") as f:
f.write(shuffle_doc_name_str)
with open("mr_0" + "_shuffle.txt", "w") as f:
f.write(shuffle_doc_words_str)
word_freq = {}
word_set = set()
for doc_words in shuffle_doc_words_list:
words = doc_words.split()
for word in words:
word_set.add(word)
if word in word_freq:
word_freq[word] += 1
else:
word_freq[word] = 1
vocab = list(word_set)
vocab_size = len(vocab)
word_doc_list = {}
for i in range(len(shuffle_doc_words_list)):
doc_words = shuffle_doc_words_list[i]
words = doc_words.split()
appeared = set()
for word in words:
if word in appeared:
continue
if word in word_doc_list:
doc_list = word_doc_list[word]
doc_list.append(i)
word_doc_list[word] = doc_list
else:
word_doc_list[word] = [i]
appeared.add(word)
word_doc_freq = {}
for word, doc_list in word_doc_list.items():
word_doc_freq[word] = len(doc_list)
word_id_map = {}
for i in range(vocab_size):
word_id_map[vocab[i]] = i
vocab_str = "\n".join(vocab)
with open("mr_0" + "_vocab.txt", "w") as f:
f.write(vocab_str)
label_set = set()
for doc_meta in shuffle_doc_name_list:
temp = doc_meta.split("\t")
label_set.add(temp[2])
label_list = list(label_set)
label_list_str = "\n".join(label_list)
with open("mr_0" + "_labels.txt", "w") as f:
f.write(label_list_str)
train_size = len(train_ids)
val_size = int(0.1 * train_size)
real_train_size = train_size - val_size
real_train_doc_names = shuffle_doc_name_list[:real_train_size]
real_train_doc_names_str = "\n".join(real_train_doc_names)
with open("mr_1" + ".real_train.name", "w") as f:
f.write(real_train_doc_names_str)
row_x = []
col_x = []
data_x = []
for i in range(real_train_size):
doc_vec = np.array([0.0 for k in range(word_embeddings_dim)])
doc_words = shuffle_doc_words_list[i]
words = doc_words.split()
doc_len = len(words)
for word in words:
if word in word_vector_map:
word_vector = word_vector_map[word]
doc_vec = doc_vec + np.array(word_vector)
for j in range(word_embeddings_dim):
row_x.append(i)
col_x.append(j)
data_x.append(doc_vec[j] / doc_len)
x = sp.csr_matrix(
(data_x, (row_x, col_x)), shape=(real_train_size, word_embeddings_dim)
)
y = []
for i in range(real_train_size):
doc_meta = shuffle_doc_name_list[i]
temp = doc_meta.split("\t")
label = temp[2]
one_hot = [0 for l in range(len(label_list))]
label_index = label_list.index(label)
one_hot[label_index] = 1
y.append(one_hot)
y = np.array(y)
test_size = len(test_ids)
row_tx = []
col_tx = []
data_tx = []
for i in range(test_size):
doc_vec = np.array([0.0 for k in range(word_embeddings_dim)])
doc_words = shuffle_doc_words_list[i + train_size]
words = doc_words.split()
doc_len = len(words)
for word in words:
if word in word_vector_map:
word_vector = word_vector_map[word]
doc_vec = doc_vec + np.array(word_vector)
for j in range(word_embeddings_dim):
row_tx.append(i)
col_tx.append(j)
data_tx.append(doc_vec[j] / doc_len)
tx = sp.csr_matrix((data_tx, (row_tx, col_tx)), shape=(test_size, word_embeddings_dim))
ty = []
for i in range(test_size):
doc_meta = shuffle_doc_name_list[i + train_size]
temp = doc_meta.split("\t")
label = temp[2]
one_hot = [0 for l in range(len(label_list))]
label_index = label_list.index(label)
one_hot[label_index] = 1
ty.append(one_hot)
ty = np.array(ty)
word_vectors = np.random.uniform(-0.01, 0.01, (vocab_size, word_embeddings_dim))
for i in range(len(vocab)):
word = vocab[i]
if word in word_vector_map:
vector = word_vector_map[word]
word_vectors[i] = vector
row_allx = []
col_allx = []
data_allx = []
for i in range(train_size):
doc_vec = np.array([0.0 for k in range(word_embeddings_dim)])
doc_words = shuffle_doc_words_list[i]
words = doc_words.split()
doc_len = len(words)
for word in words:
if word in word_vector_map:
word_vector = word_vector_map[word]
doc_vec = doc_vec + np.array(word_vector)
for j in range(word_embeddings_dim):
row_allx.append(int(i))
col_allx.append(j)
data_allx.append(doc_vec[j] / doc_len)
for i in range(vocab_size):
for j in range(word_embeddings_dim):
row_allx.append(int(i + train_size))
col_allx.append(j)
data_allx.append(word_vectors.item((i, j)))
row_allx = np.array(row_allx)
col_allx = np.array(col_allx)
data_allx = np.array(data_allx)
allx = sp.csr_matrix(
(data_allx, (row_allx, col_allx)),
shape=(train_size + vocab_size, word_embeddings_dim),
)
ally = []
for i in range(train_size):
doc_meta = shuffle_doc_name_list[i]
temp = doc_meta.split("\t")
label = temp[2]
one_hot = [0 for l in range(len(label_list))]
label_index = label_list.index(label)
one_hot[label_index] = 1
ally.append(one_hot)
for i in range(vocab_size):
one_hot = [0 for l in range(len(label_list))]
ally.append(one_hot)
ally = np.array(ally)
print(x.shape, y.shape, tx.shape, ty.shape, allx.shape, ally.shape)
"""
Doc word heterogeneous graph
"""
# Word co-occurence with context windows
window_size = 20
windows = []
for doc_words in shuffle_doc_words_list:
words = doc_words.split()
length = len(words)
if length <= window_size:
windows.append(words)
else:
for j in range(length - window_size + 1):
window = words[j : j + window_size]
windows.append(window)
word_window_freq = {}
for window in windows:
appeared = set()
for i in range(len(window)):
if window[i] in appeared:
continue
if window[i] in word_window_freq:
word_window_freq[window[i]] += 1
else:
word_window_freq[window[i]] = 1
appeared.add(window[i])
word_pair_count = {}
for window in windows:
for i in range(1, len(window)):
for j in range(0, i):
word_i = window[i]
word_i_id = word_id_map[word_i]
word_j = window[j]
word_j_id = word_id_map[word_j]
if word_i_id == word_j_id:
continue
word_pair_str = str(word_i_id) + "," + str(word_j_id)
if word_pair_str in word_pair_count:
word_pair_count[word_pair_str] += 1
else:
word_pair_count[word_pair_str] = 1
word_pair_str = str(word_j_id) + "," + str(word_i_id)
if word_pair_str in word_pair_count:
word_pair_count[word_pair_str] += 1
else:
word_pair_count[word_pair_str] = 1
row = []
col = []
weight = []
num_window = len(windows)
for key in word_pair_count:
temp = key.split(",")
i = int(temp[0])
j = int(temp[1])
count = word_pair_count[key]
word_freq_i = word_window_freq[vocab[i]]
word_freq_j = word_window_freq[vocab[j]]
pmi = log(
(1.0 * count / num_window)
/ (1.0 * word_freq_i * word_freq_j / (num_window * num_window))
)
if pmi <= 0:
continue
row.append(train_size + i)
col.append(train_size + j)
weight.append(pmi)
doc_word_freq = {}
for doc_id in range(len(shuffle_doc_words_list)):
doc_words = shuffle_doc_words_list[doc_id]
words = doc_words.split()
for word in words:
word_id = word_id_map[word]
doc_word_str = str(doc_id) + "," + str(word_id)
if doc_word_str in doc_word_freq:
doc_word_freq[doc_word_str] += 1
else:
doc_word_freq[doc_word_str] = 1
for i in range(len(shuffle_doc_words_list)):
doc_words = shuffle_doc_words_list[i]
words = doc_words.split()
doc_word_set = set()
for word in words:
if word in doc_word_set:
continue
j = word_id_map[word]
key = str(i) + "," + str(j)
freq = doc_word_freq[key]
if i < train_size:
row.append(i)
else:
row.append(i + vocab_size)
col.append(train_size + j)
idf = log(1.0 * len(shuffle_doc_words_list) / word_doc_freq[vocab[j]])
weight.append(freq * idf)
doc_word_set.add(word)
node_size = train_size + vocab_size + test_size
adj = sp.csr_matrix((weight, (row, col)), shape=(node_size, node_size))
with open("ind.{}.x".format(dataset), "wb") as f:
pkl.dump(x, f)
with open("ind.{}.y".format(dataset), "wb") as f:
pkl.dump(y, f)
with open("ind.{}.tx".format(dataset), "wb") as f:
pkl.dump(tx, f)
with open("ind.{}.ty".format(dataset), "wb") as f:
pkl.dump(ty, f)
with open("ind.{}.allx".format(dataset), "wb") as f:
pkl.dump(allx, f)
with open("ind.{}.ally".format(dataset), "wb") as f:
pkl.dump(ally, f)
with open("ind.{}.adj".format(dataset), "wb") as f:
pkl.dump(adj, f)
class GraphConvolution(nn.Module):
def __init__(
self,
input_dim,
output_dim,
support,
act_func=None,
featureless=False,
dropout_rate=0.0,
bias=False,
):
super(GraphConvolution, self).__init__()
self.support = support
self.featureless = featureless
for i in range(len(self.support)):
setattr(
self, "W{}".format(i), nn.Parameter(torch.randn(input_dim, output_dim))
)
if bias:
self.b = nn.Parameter(torch.zeros(1, output_dim))
self.act_func = act_func
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x):
x = self.dropout(x)
for i in range(len(self.support)):
if self.featureless:
pre_sup = getattr(self, "W{}".format(i))
else:
pre_sup = x.mm(getattr(self, "W{}".format(i)))
if i == 0:
out = self.support[i].mm(pre_sup)
else:
out += self.support[i].mm(pre_sup)
if self.act_func is not None:
out = self.act_func(out)
return out
class GCN(nn.Module):
def __init__(self, input_dim, support, dropout_rate=0.0, num_classes=2):
super(GCN, self).__init__()
self.layer1 = GraphConvolution(
input_dim,
200,
support,
act_func=nn.ReLU(),
featureless=True,
dropout_rate=dropout_rate,
)
self.layer2 = GraphConvolution(
200, num_classes, support, dropout_rate=dropout_rate
)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
return out
class CONFIG(object):
"""docstring for CONFIG"""
def __init__(self):
super(CONFIG, self).__init__()
self.dataset = "mr_0"
self.model = "gcn" # 'gcn', 'gcn_cheby', 'dense'
self.learning_rate = 0.02 # Initial learning rate.
self.epochs = 200 # Number of epochs to train.
self.hidden1 = 200 # Number of units in hidden layer 1.
self.dropout = 0.5 # Dropout rate (1 - keep probability).
self.weight_decay = 0.0 # Weight for L2 loss on embedding matrix.
self.early_stopping = 10 # Tolerance for early stopping (# of epochs).
self.max_degree = 3 # Maximum Chebyshev polynomial degree.
cfg = CONFIG()
dataset = "mr_0"
cfg.dataset = dataset
# Load data
(
adj,
features,
y_train,
y_val,
y_test,
train_mask,
val_mask,
test_mask,
train_size,
test_size,
) = load_corpus(cfg.dataset)
features = sp.identity(features.shape[0])
# Some preprocessing
features = preprocess_features(features)
if cfg.model == "gcn":
support = [preprocess_adj(adj)]
num_supports = 1
model_func = GCN
elif cfg.model == "gcn_cheby":
support = chebyshev_polynomials(adj, cfg.max_degree)
num_supports = 1 + cfg.max_degree
model_func = GCN
elif cfg.model == "dense":
support = [preprocess_adj(adj)]
num_supports = 1
model_func = MLP
else:
raise ValueError("Invalid argument for model: " + str(cfg.model))
# Define placeholders
t_features = torch.from_numpy(features)
t_y_train = torch.from_numpy(y_train)
t_y_val = torch.from_numpy(y_val)
t_y_test = torch.from_numpy(y_test)
t_train_mask = torch.from_numpy(train_mask.astype(np.float32))
tm_train_mask = torch.transpose(torch.unsqueeze(t_train_mask, 0), 1, 0).repeat(
1, y_train.shape[1]
)
t_support = []
for i in range(len(support)):
t_support.append(torch.Tensor(support[i]))
model = model_func(
input_dim=features.shape[0], support=t_support, num_classes=y_train.shape[1]
)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=cfg.learning_rate)
# Define model evaluation function
def evaluate(features, labels, mask):
t_test = time.time()
model.eval()
with torch.no_grad():
logits = model(features)
t_mask = torch.from_numpy(np.array(mask * 1.0, dtype=np.float32))
tm_mask = torch.transpose(torch.unsqueeze(t_mask, 0), 1, 0).repeat(
1, labels.shape[1]
)
loss = criterion(logits * tm_mask, torch.max(labels, 1)[1])
pred = torch.max(logits, 1)[1]
acc = (
(pred == torch.max(labels, 1)[1]).float() * t_mask
).sum().item() / t_mask.sum().item()
return loss, acc, pred, labels.numpy(), (time.time() - t_test), logits
val_losses = []
# Train model
for epoch in range(cfg.epochs):
t = time.time()
# Forward pass
logits = model(t_features)
loss = criterion(logits * tm_train_mask, torch.max(t_y_train, 1)[1])
acc = (
(torch.max(logits, 1)[1] == torch.max(t_y_train, 1)[1]).float() * t_train_mask
).sum().item() / t_train_mask.sum().item()
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Validation
val_loss, val_acc, pred, labels, duration, logits = evaluate(
t_features, t_y_val, val_mask
)
val_losses.append(val_loss)
print_log(
"Epoch: {:.0f}, train_loss= {:.5f}, train_acc= {:.5f}, val_loss= {:.5f}, val_acc= {:.5f}, time= {:.5f}".format(
epoch + 1, loss, acc, val_loss, val_acc, time.time() - t
)
)
if epoch > cfg.early_stopping and val_losses[-1] > np.mean(
val_losses[-(cfg.early_stopping + 1) : -1]
):
print_log("Early stopping...")
break
print_log("Optimization Finished!")
# Testing
test_loss, test_acc, pred, labels, test_duration, logits = evaluate(
t_features, t_y_test, test_mask
)
print_log(
"Test set results: \n\t loss= {:.5f}, accuracy= {:.5f}, time= {:.5f}".format(
test_loss, test_acc, test_duration
)
)
test_pred = []
test_labels = []
for i in range(len(test_mask)):
if test_mask[i]:
test_pred.append(pred[i])
test_labels.append(labels[i])
|
import gc
from functools import partial
from pathlib import Path
from fastai.text import *
from fastai.callbacks import *
import numpy as np
import pandas as pd
home = Path(".")
# Changes
# V10 15 epochs, fwd + bwd
# V9 500kk qrnn
# V7 awd-lstm
# V6 100kk SP
#
bs = 512 + 128
max_vocab = 15_000
data_en_wiki = load_data(home, "data_en_wiki_15000", bs=bs)
data_en_wiki.show_batch()
1 / 0
learn.purge()
gc.collect()
config = awd_lstm_lm_config.copy()
wd = 0.1
# config['qrnn'] = True
# config['n_hid'] = 1550 #default 1152
# config['n_layers'] = 4 #default 3
# wd=0.01
learn = language_model_learner(
data_en_wiki,
AWD_LSTM,
config=config,
drop_mult=0.0,
true_wd=True,
wd=wd,
pretrained=False,
metrics=[accuracy, Perplexity()],
).to_fp16()
# learn.lr_find()
# learn.recorder.plot(skip_end=10)
lr = 8e-04
lr *= bs / 48 # Scale learning rate by batch size
learn.unfreeze()
learn.fit_one_cycle(
10,
lr,
moms=(0.8, 0.7),
callbacks=[
# SaveModelCallback(learn, monitor="perplexity", mode="min", name="best_model"),
ShowGraph(learn)
],
)
learn.fit_one_cycle(5, lr / 10, moms=(0.8, 0.7))
learn.to_fp32().save(f"learn_en_wiki_{max_vocab}", with_opt=False)
learn.data.vocab.save(
home / "models/learn_en_wiki_15_vocab.pkl",
)
data_en_wiki = load_data(home, "data_en_wiki_15000_bwd", bs=bs, backwards=True)
learn = language_model_learner(
data_en_wiki,
AWD_LSTM,
config=config,
drop_mult=0.0,
true_wd=True,
wd=wd,
pretrained=False,
metrics=[accuracy, Perplexity()],
).to_fp16()
learn.unfreeze()
learn.fit_one_cycle(
10,
lr,
moms=(0.8, 0.7),
callbacks=[
# SaveModelCallback(learn, monitor="perplexity", mode="min", name="best_model"),
ShowGraph(learn)
],
)
learn.fit_one_cycle(5, lr / 10, moms=(0.8, 0.7))
learn.to_fp32().save(f"learn_en_wiki_{max_vocab}_bwd", with_opt=False)
learn.data.vocab.save(
home / "models/learn_en_wiki_15_vocab_bwd.pkl",
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_absolute_error, accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# retrieve data
train = pd.read_csv("../input/titanic/train.csv")
test = pd.read_csv("../input/titanic/test.csv")
# data processing
train.info()
# test.info()
# train.describe()
test.describe()
# sample data
train.head(10)
# encode categorical data
train = train.join(pd.get_dummies(train[["Sex"]]))
train = train.drop(["Sex", "Ticket", "Cabin", "Embarked"], axis=1)
test = test.join(pd.get_dummies(test[["Sex"]]))
test = test.drop(["Sex", "Ticket", "Cabin", "Embarked"], axis=1)
train.head(5)
test.head(5)
# handle missing values
train.Age = train.Age.fillna(train.Age.mean())
test.Age = test.Age.fillna(test.Age.mean())
test.Fare = test.Fare.fillna(test.Fare.mean())
# data processing
print(train.columns)
feature_columns = list(train.columns)
feature_columns.remove("Name")
feature_columns.remove("Survived")
feature_columns.remove("PassengerId")
X = train[feature_columns]
y = train.Survived
# split data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
model = RandomForestClassifier(max_leaf_nodes=200, random_state=1)
model.fit(train_X, train_y)
predictions = model.predict(val_X)
accuracy = accuracy_score(predictions, val_y) * 100
print("Accuracy:", accuracy)
xgb_model = XGBClassifier()
clf = GridSearchCV(xgb_model, {"max_depth": [2, 4, 6]}, verbose=1)
clf.fit(train_X, train_y)
predictions = clf.predict(val_X)
accuracy = accuracy_score(predictions, val_y) * 100
print("Accuracy:", accuracy)
test.head()
test.describe()
# retrain model on all training data
model.fit(train[feature_columns], y)
clf.fit(train[feature_columns], y)
# submit results
predictions = clf.predict(test[feature_columns])
submission = pd.DataFrame({"PassengerId": test["PassengerId"], "Survived": predictions})
# Visualize the first 5 rows
submission.head()
filename = "Titanic Predictions 1.csv"
submission.to_csv(filename, index=False)
print("Saved file: " + filename)
|
# In this notebook I will present some of the results of my first attempts at implementing a deep learning model to solve a computer vision problem, I'm still far from being an expert of the field and my intention here is not to compete with other kernels, instead I'm just trying to break the ice with Deep Learning and the (awesome) Kaggle community while learning as much Data Science as possible.
# Having little to no previous experience in this field, I followed as an example for my analysis the current top kernel for this dataset, made by the user NAIN (please check out his awesome work if you haven't already: https://www.kaggle.com/aakashnain/beating-everything-with-depthwise-convolution, thank you NAIN for all the things you taught me, hopefully one day my notebooks will be as helpful and useful to other people) so you can see some of his code here especially for mechanical things like loading the images from disk and plotting the confusion matrix, though many more things are different and I reached different results that I think are worth to be shown.
# I made this notebook using Google Colab and Google Drive, I did a lot of trial and error attempts there, now I'm publishing here the final results.
# load data directories
dir = "/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/train/"
test = "/kaggle/input/chest-xray-pneumonia/chest_xray/test"
train = "/kaggle/input/chest-xray-pneumonia/chest_xray/train"
val = "/kaggle/input/chest-xray-pneumonia/chest_xray/val"
norm_test = "/kaggle/input/chest-xray-pneumonia/chest_xray/test/NORMAL"
pneum_test = "/kaggle/input/chest-xray-pneumonia/chest_xray/test/PNEUMONIA"
norm_train = "/kaggle/input/chest-xray-pneumonia/chest_xray/train/NORMAL"
pneum_train = "/kaggle/input/chest-xray-pneumonia/chest_xray/train/PNEUMONIA"
norm_val = "/kaggle/input/chest-xray-pneumonia/chest_xray/val/NORMAL"
pneum_val = "/kaggle/input/chest-xray-pneumonia/chest_xray/val/PNEUMONIA"
import pandas as pd
from pathlib import Path
# Load train dataset
train_data_norm = pd.DataFrame(Path(norm_train).glob("*.jpeg"))
train_data_pneum = pd.DataFrame(Path(pneum_train).glob("*.jpeg"))
train_data_norm[1] = 0
train_data_pneum[1] = 1
train_data = train_data_norm.append(train_data_pneum)
# Load test dataset
test_data_norm = pd.DataFrame(Path(norm_test).glob("*.jpeg"))
test_data_pneum = pd.DataFrame(Path(pneum_test).glob("*.jpeg"))
test_data_norm[1] = 0
test_data_pneum[1] = 1
test_data = test_data_norm.append(test_data_pneum)
# Load validation dataset
val_data_norm = pd.DataFrame(Path(norm_val).glob("*.jpeg"))
val_data_pneum = pd.DataFrame(Path(pneum_val).glob("*.jpeg"))
val_data_norm[1] = 0
val_data_pneum[1] = 1
val_data = val_data_norm.append(val_data_pneum)
# Let's explore the data
count_tr = len(train_data)
count_tr_n = len(train_data[train_data[1] == 0])
count_tr_p = len(train_data[train_data[1] == 1])
count_ts = len(test_data)
count_ts_n = len(test_data[test_data[1] == 0])
count_ts_p = len(test_data[test_data[1] == 1])
count_val = len(val_data)
count_val_n = len(val_data[val_data[1] == 0])
count_val_p = len(val_data[val_data[1] == 1])
print("Train data")
print(f"Normal cases {count_tr_n} ({round(count_tr_n/count_tr,2)*100}%)")
print(f"Pneunomia cases {count_tr_p} ({round(count_tr_p/count_tr,2)*100}%)")
print(f"Total cases: {count_tr} ")
print("")
print("Test data")
print(f"Normal cases {count_ts_n} ({round(count_ts_n/count_ts,2)*100}%)")
print(f"Pneunomia cases {count_ts_p} ({round(count_ts_p/count_ts,2)*100}%)")
print(f"Total cases: {count_ts} ")
print("")
print("Validation data")
print(f"Normal cases {count_val_n} ({round(count_val_n/count_val,2)*100}%)")
print(f"Pneunomia cases {count_val_p} ({round(count_val_p/count_val,2)*100}%)")
print(f"Total cases: {count_val} ")
# Classes are not balanced, the classifier could naively learn to simply classify every image as a Pneunomia case and still get 62% precision at test time, this is a danger signal.
# Images come from a folder for each class, which means they're ordered by group and in this case it's good practice to shuffle the data.
# Let's shuffle the data
from sklearn.utils import shuffle
train_data = shuffle(train_data)
test_data = shuffle(test_data)
val_data = shuffle(val_data)
# Let's load and resize the data:
# This part was pretty much copy-pasted from NAIN's notebook (sorry about that), I do have a couple of comments to make though:
# 1) I added the blackwhite_counter to see if there actually were any black and white images, and to my knowledge there were none.
# Maybe this control is just a good practice that we're supposed to make on a dataset of images.
# 2) It wasn't clear to me why we were converting from BGR to RGB [cv2.cvtColor(img, cv2.COLOR_BGR2RGB)], in case you're also wondering, know that it's because apparenty the cv2.imread() function automatically loads images in the BGR format and we need them to be in the RGB format.
# I saved the shapes of the images just for personal curiosity, I wanted to see what were the real shapes were, I've had no real use for them in this notebook.
# (you can skip them)
# Let's load the images.
import cv2
import numpy as np
# loading train data
train_img = []
train_label = []
train_shapes = []
blacwhite_counter = 0
for i, imgfile in enumerate(train_data[0]):
img = cv2.imread(str(imgfile))
train_shapes.append(np.shape(img))
img = cv2.resize(img, (224, 224))
if img.shape[2] == 1:
img = np.dstack([img, img, img])
blacwhite_counter = blacwhite_counter + 1
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255.0
train_img.append(img)
train_label.append(train_data.iloc[i, 1])
# loading test data
test_img = []
test_label = []
test_shapes = []
blacwhite_counter_t = 0
for i, imgfile in enumerate(test_data[0]):
img = cv2.imread(str(imgfile))
test_shapes.append(np.shape(img))
img = cv2.resize(img, (224, 224))
if img.shape[2] == 1:
img = np.dstack([img, img, img])
blacwhite_counter_t = blacwhite_counter_t + 1
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255.0
test_img.append(img)
test_label.append(test_data.iloc[i, 1])
# loading val data
val_img = []
val_label = []
val_shapes = []
blacwhite_counter_v = 0
for i, imgfile in enumerate(val_data[0]):
img = cv2.imread(str(imgfile))
val_shapes.append(np.shape(img))
img = cv2.resize(img, (224, 224))
if img.shape[2] == 1:
img = np.dstack([img, img, img])
blacwhite_counter_v = blacwhite_counter_v + 1
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255.0
val_img.append(img)
val_label.append(val_data.iloc[i, 1])
print(blacwhite_counter, blacwhite_counter_t, blacwhite_counter_v)
from keras.utils import to_categorical
# I rename the datasets with easier names and turn the labels into categorical data
trainX = np.array(train_img)
trainY = to_categorical(np.array(train_label))
valX = np.array(val_img)
valY = to_categorical(np.array(val_label))
testX = np.array(test_img)
testY = to_categorical(np.array(test_label))
# You can delete the old variables to clean some ram
del train_img, train_label, val_img, val_label, test_img, test_label
# Now we're finally ready to build some cool Neural Networks.
# I'll start with the image augmentation generator, and I'll plot some examples of the images generated so we know what input we are actually feeding to the model, since I noticed that blindly setting augmentation parameters can give very bad results and unexpected surprises.
# When defining the generator, you can see the list of the most common augmentations we can apply to our images.
# Of these I personally noticed that shifting and rotating actually gave very bad results, maybe if you're using fine-tuning on networks already trained on this kind of augmentation they might give better results, but from the many experiments I did with the neural networks I built from scratch I found no improvement whatsoever and they simply slowed down and disrupted the training process.
# I think this might even be logical for our data since we're analyzing Xray images which usually have a fixed orientation and there's not much sense in rotating an Xray picture, they usually all have the same orientation and position.
# Of the remaining augmentation options:
# -brightness_range -> I had to remove it because it seemed to not function well and it broke the images. (see example below)
# -horizontal_flip -> didn't seem to cause much trouble but didn't show any improvements either.
# So I ended up using only zoom_range, which surprisingly seemed to be more than enough.
# Just for curiosity here's what the brightness augmentation did to my data.
# I tried all kinds of brightness ranges, couldn't seem to get it to generate any usable image.
# I'd have to use offline augmentation and basically create a new dataset with the manually augmented images added to the original ones, maybe I could try it in the future.
from keras.models import Sequential, Model
from keras.applications.vgg16 import VGG16, preprocess_input
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from keras.models import Sequential
from keras.layers import (
Conv2D,
MaxPooling2D,
Dense,
Dropout,
Input,
Flatten,
SeparableConv2D,
)
from keras.layers import GlobalMaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import Concatenate
from keras.optimizers import Adam, SGD, RMSprop
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
from keras.layers import GaussianNoise
from keras.layers import Activation
import keras.metrics
from sklearn.metrics import precision_score, recall_score
from mlxtend.plotting import plot_confusion_matrix
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
# construct the training image generator for data augmentation
aug = ImageDataGenerator( # zoom_range=[0.9,1.1],
brightness_range=[0.9, 1.1]
# horizontal_flip=True ,
# rotation_range=90,
# height_shift_range=0.15,
# width_shift_range=[-0.125,0.125]
)
aug.fit(trainX)
for X_batch, y_batch in aug.flow(trainX, trainY, batch_size=9):
# create a grid of 3x3 images
for i in range(0, 9):
plt.subplot(330 + 1 + i)
plt.imshow(X_batch[i])
# show the plot
plt.show()
break
# Sadly I can't seem to be able to make brightness augmentation work in this case.
# Let's first avoid any form of augmentation:
# construct the training image generator for data augmentation
aug = ImageDataGenerator( # zoom_range=[0.9,1.1],
# brightness_range=[0.,1.]
# horizontal_flip=True ,
# rotation_range=90,
# height_shift_range=0.15,
# width_shift_range=[-0.125,0.125]
)
aug.fit(trainX)
for X_batch, y_batch in aug.flow(trainX, trainY, batch_size=9):
# create a grid of 3x3 images
for i in range(0, 9):
plt.subplot(330 + 1 + i)
plt.imshow(X_batch[i])
# show the plot
plt.show()
break
# As I started this analysis my goal was to learn the basics of deep learning for computer vision, as I said earlier I was following as a reference the dataset's top Kernel, so at first I was looking for a model that could simply come close to the results of the kernel that I was following as an example, that means NAIN's kernel, I only knew that it had to be a model built from scratch because I have to get the basics straight first, and from what I've seen fine tuning has already been done by a lot of other users and it would simply mean me copy-pasting more code from other kernels.
# So here are the results I was trying to reach, these are NAIN's results:
# 
# Recall of the model is 0.98
# Precision of the model is 0.79
# I started immediatly building complex models with hundreds of neurons and stacking all sorts of layers (gaussian noise, dropout, separable convs), and adding all kinds of data augmentation that would make the model take literally hours to train, that would consume all the RAM available on CoLab and make the system crash, and what's worse the results were very bad, the model would simply tend to classify most (and often even all) of the observations as "Pneunomia" and couldn't learn to recognize a healthy individual with precision.
# So intead, I tried to build a very simple and basic model, that would be fast to train so I could tweak the architecture and hyperparameters more easily and see what would work and what not.
EPOCHS = 50
BS = 64
def build_model():
input_img = Input(shape=(224, 224, 3), name="ImageInput")
x = Conv2D(16, (3, 3), activation="relu", padding="same", name="Conv1_1")(input_img)
x = Conv2D(16, (3, 3), activation="relu", padding="same", name="Conv1_2")(x)
x = MaxPooling2D((2, 2), name="pool1")(x)
x = Conv2D(32, (3, 3), activation="relu", padding="same", name="Conv2_1")(x)
x = Conv2D(32, (3, 3), activation="relu", padding="same", name="Conv2_2")(x)
x = MaxPooling2D((2, 2), name="pool2")(x)
x = Flatten(name="flatten")(x)
x = Dense(128, activation="relu", name="fc1")(x)
x = Dense(64, activation="relu", name="fc2")(x)
x = Dense(2, activation="softmax", name="fc3")(x)
model = Model(inputs=input_img, output=x)
return model
model = build_model()
model.summary()
# I wasn't sure which metric was best to monitor, whether the validation loss or the validation accuracy so I monitored them both.
# My models have no early stopping, so at the end of the training process for every model I'll check 3 different sets of weights on the test set:
# -the weights at the end of the last epoch
# -the weights with lowest validation loss
# -the weights with the highest validation accuracy
# Of these three, I will further inspect only the predictions of the best one.
# I think this would technically be considered not legitimate in a kaggle competion, since I'm actually using the test data as validation data, but I guess I could apply the same logic if the validation set was a bit larger and/or by using k-fold cross validation.
opt = Adam(lr=0.0001, decay=1e-5)
chkpt = ModelCheckpoint(
filepath="best_aug_model_todate2loss.h5",
monitor="val_loss",
save_best_only=True,
save_weights_only=True,
)
chkpt2 = ModelCheckpoint(
filepath="best_aug_model_todate2acc.h5",
monitor="val_accuracy",
save_best_only=True,
save_weights_only=True,
)
callbacks_list = [chkpt, chkpt2]
model.compile(loss="binary_crossentropy", metrics=["accuracy"], optimizer=opt)
H = model.fit_generator(
aug.flow(trainX, trainY, batch_size=BS),
callbacks=callbacks_list,
validation_data=(valX, valY),
steps_per_epoch=len(trainX) // BS,
epochs=EPOCHS,
)
print(H.history.keys())
# "Accuracy"
plt.plot(H.history["accuracy"])
plt.plot(H.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
# "Loss"
plt.plot(H.history["loss"])
plt.plot(H.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
model.save_weights("last_epoch.h5")
test_loss_last, test_score_last = model.evaluate(testX, testY, batch_size=32)
print("Loss on test set: ", test_loss_last)
print("Accuracy on test set: ", test_score_last)
model.load_weights("best_aug_model_todate2loss.h5")
test_loss_bestloss, test_score_bestloss = model.evaluate(testX, testY, batch_size=32)
print("Loss on test set: ", test_loss_bestloss)
print("Accuracy on test set: ", test_score_bestloss)
model.load_weights("best_aug_model_todate2acc.h5")
test_loss_bestacc, test_score_bestacc = model.evaluate(testX, testY, batch_size=32)
print("Loss on test set: ", test_loss_bestacc)
print("Accuracy on test set: ", test_score_bestacc)
max_model = np.argmax([test_score_last, test_score_bestloss, test_score_bestacc])
max_model
if max_model == 0:
model.load_weights("last_epoch.h5")
elif max_model == 1:
model.load_weights("best_aug_model_todate2loss.h5")
elif max_model == 2:
model.load_weights("best_aug_model_todate2acc.h5")
# Get predictions
preds = model.predict(testX, batch_size=16)
preds = np.argmax(preds, axis=-1)
# Original labels
orig_test_labels = np.argmax(testY, axis=-1)
print(orig_test_labels.shape)
print(preds.shape)
# Get the confusion matrix
cm = confusion_matrix(orig_test_labels, preds)
plt.figure()
plot_confusion_matrix(cm, figsize=(12, 8), hide_ticks=True, cmap=plt.cm.Blues)
plt.xticks(range(2), ["Normal", "Pneumonia"], fontsize=16)
plt.yticks(range(2), ["Normal", "Pneumonia"], fontsize=16)
plt.show()
print(classification_report(orig_test_labels, preds))
# As you can see from the confusion matrix and the precision/recall report the model catches the vast majority of the patients with pneunomia (recall), but around a quarter of those diagnosed with pneunomia are actually healthy people (precision).
# This happens because the model can't precisely distinguish healthy from non-healthy individuals so it simply classifies them as "Pneunomia" whenever in doubt.
# When the model encounters a healthy individual it seems to basically pick up a diagnosis at random by tossing a coin.
# My model was very basic and there was no data augmentation so nobody was expecting state of the art results, I showed you this model for a reason though, next I try and apply some zoom augmentation to the data, let's see what happens.
# construct the training image generator for data augmentation
aug = ImageDataGenerator(
zoom_range=[0.9, 1.1],
# brightness_range=[0.,1.]
# horizontal_flip=True ,
# rotation_range=90,
# height_shift_range=0.15,
# width_shift_range=[-0.125,0.125]
)
aug.fit(trainX)
for X_batch, y_batch in aug.flow(trainX, trainY, batch_size=9):
# create a grid of 3x3 images
for i in range(0, 9):
plt.subplot(330 + 1 + i)
plt.imshow(X_batch[i])
# show the plot
plt.show()
break
EPOCHS = 50
BS = 64
def build_model():
input_img = Input(shape=(224, 224, 3), name="ImageInput")
x = Conv2D(16, (3, 3), activation="relu", padding="same", name="Conv1_1")(input_img)
x = Conv2D(16, (3, 3), activation="relu", padding="same", name="Conv1_2")(x)
x = MaxPooling2D((2, 2), name="pool1")(x)
x = Conv2D(32, (3, 3), activation="relu", padding="same", name="Conv2_1")(x)
x = Conv2D(32, (3, 3), activation="relu", padding="same", name="Conv2_2")(x)
x = MaxPooling2D((2, 2), name="pool2")(x)
x = Flatten(name="flatten")(x)
x = Dense(128, activation="relu", name="fc1")(x)
x = Dense(64, activation="relu", name="fc2")(x)
x = Dense(2, activation="softmax", name="fc3")(x)
model = Model(inputs=input_img, output=x)
return model
model = build_model()
model.summary()
opt = Adam(lr=0.0001, decay=1e-5)
chkpt = ModelCheckpoint(
filepath="best_aug_model_todate2loss.h5",
monitor="val_loss",
save_best_only=True,
save_weights_only=True,
)
chkpt2 = ModelCheckpoint(
filepath="best_aug_model_todate2acc.h5",
monitor="val_accuracy",
save_best_only=True,
save_weights_only=True,
)
callbacks_list = [chkpt, chkpt2]
model.compile(loss="binary_crossentropy", metrics=["accuracy"], optimizer=opt)
H = model.fit_generator(
aug.flow(trainX, trainY, batch_size=BS),
callbacks=callbacks_list,
validation_data=(valX, valY),
steps_per_epoch=len(trainX) // BS,
epochs=EPOCHS,
)
print(H.history.keys())
# "Accuracy"
plt.plot(H.history["accuracy"])
plt.plot(H.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
# "Loss"
plt.plot(H.history["loss"])
plt.plot(H.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
model.save_weights("last_epoch.h5")
test_loss_last, test_score_last = model.evaluate(testX, testY, batch_size=32)
print("Loss on test set: ", test_loss_last)
print("Accuracy on test set: ", test_score_last)
model.load_weights("best_aug_model_todate2loss.h5")
test_loss_bestloss, test_score_bestloss = model.evaluate(testX, testY, batch_size=32)
print("Loss on test set: ", test_loss_bestloss)
print("Accuracy on test set: ", test_score_bestloss)
model.load_weights("best_aug_model_todate2acc.h5")
test_loss_bestacc, test_score_bestacc = model.evaluate(testX, testY, batch_size=32)
print("Loss on test set: ", test_loss_bestacc)
print("Accuracy on test set: ", test_score_bestacc)
max_model = np.argmax([test_score_last, test_score_bestloss, test_score_bestacc])
max_model
if max_model == 0:
model.load_weights("last_epoch.h5")
elif max_model == 1:
model.load_weights("best_aug_model_todate2loss.h5")
elif max_model == 2:
model.load_weights("best_aug_model_todate2acc.h5")
# Get predictions
preds = model.predict(testX, batch_size=16)
preds = np.argmax(preds, axis=-1)
# Original labels
orig_test_labels = np.argmax(testY, axis=-1)
print(orig_test_labels.shape)
print(preds.shape)
# Get the confusion matrix
cm = confusion_matrix(orig_test_labels, preds)
plt.figure()
plot_confusion_matrix(cm, figsize=(12, 8), hide_ticks=True, cmap=plt.cm.Blues)
plt.xticks(range(2), ["Normal", "Pneumonia"], fontsize=16)
plt.yticks(range(2), ["Normal", "Pneumonia"], fontsize=16)
plt.show()
print(classification_report(orig_test_labels, preds))
# As you can see just by randomly zooming in/out images by at most 10% has drastically improved the results.
# Now the model has a clearer view of the 2 groups of data, although there are still a lot of individuals wrongfully diagnosticated with pneunomia.
# I made a lot of tries and in my experience the following should be the (approximate) optimal zoom-range, adding too much zooming seemed to be disruptive.
# construct the training image generator for data augmentation
aug = ImageDataGenerator(
zoom_range=[0.75, 1.25],
# brightness_range=[0.,1.]
# horizontal_flip=True ,
# rotation_range=90,
# height_shift_range=0.15,
# width_shift_range=[-0.125,0.125]
)
aug.fit(trainX)
for X_batch, y_batch in aug.flow(trainX, trainY, batch_size=9):
# create a grid of 3x3 images
for i in range(0, 9):
plt.subplot(330 + 1 + i)
plt.imshow(X_batch[i])
# show the plot
plt.show()
break
# I prolong the training to 100 epochs because by experience it generally takes a bit more than 50 before it starts overfitting.
EPOCHS = 100
BS = 64
def build_model():
input_img = Input(shape=(224, 224, 3), name="ImageInput")
x = Conv2D(16, (3, 3), activation="relu", padding="same", name="Conv1_1")(input_img)
x = Conv2D(16, (3, 3), activation="relu", padding="same", name="Conv1_2")(x)
x = MaxPooling2D((2, 2), name="pool1")(x)
x = Conv2D(32, (3, 3), activation="relu", padding="same", name="Conv2_1")(x)
x = Conv2D(32, (3, 3), activation="relu", padding="same", name="Conv2_2")(x)
x = MaxPooling2D((2, 2), name="pool2")(x)
x = Flatten(name="flatten")(x)
x = Dense(128, activation="relu", name="fc1")(x)
x = Dense(64, activation="relu", name="fc2")(x)
x = Dense(2, activation="softmax", name="fc3")(x)
model = Model(inputs=input_img, output=x)
return model
model = build_model()
model.summary()
opt = Adam(lr=0.0001, decay=1e-5)
chkpt = ModelCheckpoint(
filepath="best_aug_model_todate2loss.h5",
monitor="val_loss",
save_best_only=True,
save_weights_only=True,
)
chkpt2 = ModelCheckpoint(
filepath="best_aug_model_todate2acc.h5",
monitor="val_accuracy",
save_best_only=True,
save_weights_only=True,
)
callbacks_list = [chkpt, chkpt2]
model.compile(loss="binary_crossentropy", metrics=["accuracy"], optimizer=opt)
H = model.fit_generator(
aug.flow(trainX, trainY, batch_size=BS),
callbacks=callbacks_list,
validation_data=(valX, valY),
steps_per_epoch=len(trainX) // BS,
epochs=EPOCHS,
)
print(H.history.keys())
# "Accuracy"
plt.plot(H.history["accuracy"])
plt.plot(H.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
# "Loss"
plt.plot(H.history["loss"])
plt.plot(H.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
model.save_weights("last_epoch.h5")
test_loss_last, test_score_last = model.evaluate(testX, testY, batch_size=32)
print("Loss on test set: ", test_loss_last)
print("Accuracy on test set: ", test_score_last)
model.load_weights("best_aug_model_todate2loss.h5")
test_loss_bestloss, test_score_bestloss = model.evaluate(testX, testY, batch_size=32)
print("Loss on test set: ", test_loss_bestloss)
print("Accuracy on test set: ", test_score_bestloss)
model.load_weights("best_aug_model_todate2acc.h5")
test_loss_bestacc, test_score_bestacc = model.evaluate(testX, testY, batch_size=32)
print("Loss on test set: ", test_loss_bestacc)
print("Accuracy on test set: ", test_score_bestacc)
max_model = np.argmax([test_score_last, test_score_bestloss, test_score_bestacc])
max_model
if max_model == 0:
model.load_weights("last_epoch.h5")
elif max_model == 1:
model.load_weights("best_aug_model_todate2loss.h5")
elif max_model == 2:
model.load_weights("best_aug_model_todate2acc.h5")
# Get predictions
preds = model.predict(testX, batch_size=16)
preds = np.argmax(preds, axis=-1)
# Original labels
orig_test_labels = np.argmax(testY, axis=-1)
print(orig_test_labels.shape)
print(preds.shape)
# Get the confusion matrix
cm = confusion_matrix(orig_test_labels, preds)
plt.figure()
plot_confusion_matrix(cm, figsize=(12, 8), hide_ticks=True, cmap=plt.cm.Blues)
plt.xticks(range(2), ["Normal", "Pneumonia"], fontsize=16)
plt.yticks(range(2), ["Normal", "Pneumonia"], fontsize=16)
plt.show()
print(classification_report(orig_test_labels, preds))
|
# ## Generating the training Data
# ### Loading the pictures
# Function to manage the input of Data
import glob
def data_path_from_name(name, all_names=False):
L = glob.glob(f"../**/{name}", recursive=True)
if len(L) > 1:
print(f"All path for {name} :")
print(L)
if all_names:
return L
print(f"Data path return {L[0]}")
return L[0]
from PIL import Image
picture_ims = [
Image.open(path) for path in data_path_from_name("Tile*.png", all_names=True)
]
tile_ims = [
Image.open(path) for path in data_path_from_name("Basic*.png", all_names=True)
]
from IPython.display import display
# Display the example of picture we want to shatter in tiles
def c_display(im, message="Size :"):
print(
"--------",
)
display(im)
print("________", message, im.size, end="\n\n")
for im in picture_ims:
c_display(im, message="Size of picture :")
# Display the types of tiles used for shattering the picture
for im in tile_ims:
c_display(im, message="Size of tile :")
# ### Shattering the pictures into 10x10 aeras
def shatter(im=picture_ims[0]):
l = []
for i in range(20):
for j in range(20):
box = (i * 10, j * 10, (i + 1) * 10, (j + 1) * 10)
l.append(picture_ims[0].crop(box))
return l
i = 0
for im in shatter():
c_display(im, message="Size of area:")
i += 1
if i >= 3:
break
box = (190, 190, 200, 200)
picture_ims[0].crop(box)
# ### Set of tile & their rotations
rotations = [0, 90, 180, 270]
def set_tiles(imgs=tile_ims):
l = []
for tile in imgs:
for rotation in rotations:
l.append(tile.rotate(angle=rotation))
return l
for im in set_tiles():
display(im)
# ### Generators of Data :
import random
def data_gen(
im_data,
max_it=10,
inf_gen=False,
func_encode_input=lambda x: x,
func_encode_output=lambda x: x,
):
n = 0
while True:
if n >= max_it:
return
im = random.choice(im_data)
yield (func_encode_input(im), func_encode_output(im))
n += 1
if inf_gen:
max_it += 1
x, y = None, None
for x, y in data_gen(shatter(picture_ims[0]), max_it=3):
c_display(x)
c_display(y)
print("\n\n\n\n")
import numpy as np
import torch
from_numpy_to_tensor_float = lambda z: torch.tensor(
torch.from_numpy(z), dtype=torch.float
).T
import copy
def from_im_to_array(im, t_bipolar=False):
data = copy.deepcopy(np.asarray(im.convert("L"), dtype="float"))
data = data.reshape((100, 1))
# Encoding data as bipolar input
data[data == 255] = 1 # black as 1
if t_bipolar:
data[data != 1] = -1 # white as -1
else:
data[data != 1] = 0 # white as 0
return data # Column vector
from functools import partial
# generator that iterate randomly over the areas of the picture
picture_area_gen = partial(
data_gen,
im_data=shatter(picture_ims[0]),
func_encode_input=lambda x: from_numpy_to_tensor_float(
from_im_to_array(x, t_bipolar=True)
),
func_encode_output=lambda y: from_numpy_to_tensor_float(from_im_to_array(y)),
)
# generator that iterate randomly over the type of tile and their rotations
tile_gen = partial(
data_gen,
im_data=set_tiles(tile_ims),
func_encode_input=lambda x: from_numpy_to_tensor_float(from_im_to_array(x)),
func_encode_output=lambda y: from_numpy_to_tensor_float(from_im_to_array(y)),
)
for x, y in picture_area_gen(max_it=2):
print("x :", x, "\ny :", y, end="\n\n")
# ## Pytorch Neural Network
# Let's plug our problem into a pytorch NN
import torch
import torch.nn as nn
import torch.nn.functional as F
# ### NN architecture
hidden_1 = 30
hidden_2 = 20
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(x.size(1), hidden_1)
self.fc2 = nn.Linear(hidden_1, hidden_2)
self.fc3 = nn.Linear(hidden_2, y.size(1))
def forward(self, x):
x = F.sigmoid(self.fc1(x))
x = F.sigmoid(self.fc2(x))
x = F.sigmoid(self.fc3(x))
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net, end="\n\n")
for (
param
) in (
net.parameters()
): # net.parameters() Function that is yielding the learnable parameters
print(param.size())
# ### Learning method
class NN_trainer:
total = 0
def _weighted_generator(self, l_gen, l_weight=None, max_it=1000):
"""providing a list of generators it create a balanced or weigted generator of the data labeled with integer"""
l_gen = [gen(inf_gen=True) for gen in l_gen]
n = 0
if not len(l_gen) == 1:
l_weight = [1 for _ in range(len(l_gen))] if l_weight is None else l_weight
norm = lambda l: [e / sum(l) for e in l]
pb = norm(l_weight)
next_value = (
lambda i=0, s=0: (next(l_gen[i]), i)
if np.random.rand() < pb[i] / (1 - s)
else next_value(i + 1, s + pb[i])
)
while True:
if n >= max_it:
return
try:
yield next_value()
except:
# Very low pb (Theorically null probability)
print(
"Check if the max itteration for one gen is lower than max_it"
)
yield next(l_gen[-1]), (len(l_gen) - 1)
n += 1
else:
while True:
if n >= max_it:
return
yield (next(l_gen[0]), 0)
n += 1
def learn(
self,
net,
criterion,
optimizer,
n_epoch=10,
epoch_batch=100,
data_generators=[],
weight_data_gens=None,
):
total_bp = self.total
for e in range(n_epoch):
running_loss = 0
# Build a random itterator over the data
data_epoch = self._weighted_generator(
l_gen=data_generators, l_weight=weight_data_gens, max_it=epoch_batch
)
for (x, y), indice_gen in data_epoch:
# Calculate output
output = net(x)
# Compute the loss
loss = criterion(output, y)
running_loss += loss.item()
# zero the parameter's gradient data
optimizer.zero_grad()
# Backpropagate the error
loss.backward()
optimizer.step()
total_bp += 1
print(
f"epoch : {e} loss {round(running_loss/epoch_batch,3)} total : {total_bp}"
)
self.total = total_bp
net = Net()
trainer = NN_trainer()
# Let's first begin by initializing the weights for an autoassiative network (matching the picture with itselves). Let's try to find the best architecture for it.
import torch.optim as optim
trainer.learn(
net=net,
criterion=nn.MSELoss(reduction="sum"),
optimizer=optim.SGD(net.parameters(), lr=0.01, momentum=0.9),
n_epoch=4,
epoch_batch=5000,
data_generators=[picture_area_gen],
weight_data_gens=None,
)
# ### Reconstruct the Image :
def rounding(output_crop_data, tile_ims=tile_ims):
num_of_matching_pixels = lambda c, t: c[
c == np.asarray(t.convert("L"), dtype="float")
].shape[0]
tiles_and_rotations = set_tiles(tile_ims)
tiles_and_rotations = sorted(
tiles_and_rotations, key=lambda t: num_of_matching_pixels(output_crop_data, t)
)
return np.asarray(tiles_and_rotations[-1].convert("L"), dtype="float")
def reconstruct(net=net, picture_im=picture_ims[0], with_rounding=False):
encode_input = lambda im: from_numpy_to_tensor_float(
from_im_to_array(im, t_bipolar=True)
)
decode_output = lambda t: ((t.detach().numpy() > 0.5) * 255).reshape(10, 10)
# Identity net
# et=lambda t : t
im_data = np.zeros((200, 200), dtype="uint8")
for i in range(20):
for j in range(20):
crop_im = picture_im.crop((i * 10, j * 10, (i + 1) * 10, (j + 1) * 10))
data = decode_output(net(encode_input(crop_im)))
if with_rounding:
data = rounding(data)
im_data[j * 10 : (j + 1) * 10, i * 10 : (i + 1) * 10] = data
num_of_matching_pixels = im_data[
im_data == np.asarray(picture_im.convert("L"), dtype="float")
].shape[0]
print("total matching pixel :", num_of_matching_pixels)
return im_data
Image.fromarray(reconstruct())
# Let's show what we have by rounding the image with the exact tiles wanted.
Image.fromarray(reconstruct(with_rounding=True))
# ### Let's define now a specific Loss to match our tiles problem
min_set_loss = lambda y, set_desired_output, criterion: sorted(
[criterion(t, y) for t in set_desired_output], key=lambda loss: loss.item()
)[0]
set_desired_output = [
from_numpy_to_tensor_float(from_im_to_array(s)) for s in set_tiles(tile_ims)
]
trainer.learn(
net=net,
criterion=lambda y, d: min_set_loss(
y, set_desired_output=set_desired_output, criterion=nn.MSELoss(reduction="sum")
),
optimizer=optim.SGD(net.parameters(), lr=0.01, momentum=0.9),
n_epoch=4,
epoch_batch=5000,
data_generators=[picture_area_gen],
weight_data_gens=None,
)
Image.fromarray(reconstruct())
|
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# imports
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
train = "/kaggle/input/benetech-making-graphs-accessible/train/images"
# sub_class = os.listdir(src_path)
labels = "/kaggle/input/benetech-making-graphs-accessible/train/annotations"
test = "/kaggle/input/benetech-making-graphs-accessible/test/images"
image_data = []
for image in train:
image = Image.open(train + image)
image = image.convert("L") # convert to greyscale
image = image.resize((28, 28))
np.append(image_data, image)
image_data
from PIL import Image
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, r2_score
from keras import Sequential
from keras.layers import Dense
cols = [
"Age",
"BusinessTravel",
"Department",
"DistanceFromHome",
"Lyon_EducationTemp",
"EducationField",
"Gender",
"JobRole",
"MaritalStatus",
"MonthlyIncome",
"NumCompaniesWorked",
"PercentSalaryHike",
"StandardHours",
"StockOptionLevel",
"TotalWorkingYears",
"TrainingTimesLastYear",
"YearsAtCompany",
"YearsSinceLastPromotion",
"YearsWithCurrManager",
"Attrition",
]
dtypes = {
"Age": np.int32,
"BusinessTravel": np.unicode_,
"Department": np.unicode_,
"DistanceFromHome": np.float32,
"Lyon_EducationTemp": np.float64,
"EducationField": np.unicode_,
"Gender": np.unicode_,
"JobRole": np.unicode,
"MaritalStatus": np.unicode,
"MonthlyIncome": np.float64,
"NumCompaniesWorked": np.float64,
"PercentSalaryHike": np.float64,
"StandardHours": np.float64,
"StockOptionLevel": np.float64,
"TotalWorkingYears": np.float64,
"TrainingTimesLastYear": np.float64,
"YearsAtCompany": np.float64,
"YearsSinceLastPromotion": np.float64,
"YearsWithCurrManager": np.float64,
"Attrition": np.int32,
}
raw_data = pd.read_csv(
"../input/train-data/training_data-2.csv",
names=cols,
decimal=".",
sep=",",
skiprows=[0],
index_col=False,
dtype=dtypes,
)
def one_hot_encode(raw_data, categorial_columns):
for categorial_column in categorial_columns:
raw_data = pd.concat(
[
raw_data,
pd.get_dummies(raw_data[categorial_column], prefix=categorial_column),
],
axis=1,
)
raw_data.drop([categorial_column], axis=1, inplace=True)
return raw_data
categorial_columns = [
"BusinessTravel",
"Department",
"EducationField",
"Gender",
"JobRole",
"MaritalStatus",
]
raw_data = one_hot_encode(raw_data, categorial_columns)
attrition = raw_data["Attrition"]
raw_data.drop(["Attrition"], axis=1, inplace=True)
raw_data = pd.concat([raw_data, attrition], axis=1)
raw_data.describe(include="all")
total_experience = raw_data["TotalWorkingYears"] + raw_data["TrainingTimesLastYear"]
raw_data = pd.concat([raw_data, total_experience.rename("TotalExperience")], axis=1)
total_experience = raw_data["TotalWorkingYears"] + raw_data["TrainingTimesLastYear"]
raw_data = pd.concat([raw_data, total_experience.rename("TotalExperience")], axis=1)
X = raw_data.iloc[:, 0:39]
y = raw_data.iloc[:, 39]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.7)
sc = StandardScaler()
X = sc.fit_transform(X)
from sklearn.model_selection import train_test_split
# X_Train, X_Test, Y_Train, Y_Test = train_test_split(X, Y, test_size = 0.25, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(
n_estimators=200, criterion="entropy", random_state=0
)
classifier.fit(X_train, y_train)
Y_Pred = classifier.predict(X_test)
from sklearn.metrics import roc_auc_score
y_true = y_test
roc_auc_score(y_true, Y_Pred)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, Y_Pred)
from matplotlib.colors import ListedColormap
X_Set, Y_Set = X_train, y_train
X1, X2 = np.meshgrid(
np.arange(start=X_Set[:, 0].min() - 1, stop=X_Set[:, 0].max() + 1, step=0.01),
np.arange(start=X_Set[:, 1].min() - 1, stop=X_Set[:, 1].max() + 1, step=0.01),
)
plt.contourf(
X1,
X2,
classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha=0.75,
cmap=ListedColormap(("red", "green")),
)
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(Y_Set)):
plt.scatter(
X_Set[Y_Set == j, 0],
X_Set[Y_Set == j, 1],
c=ListedColormap(("red", "green"))(i),
label=j,
)
plt.title("Random Forest Classifier (Training set)")
# plt.xlabel('Age')
# plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
classifier = Sequential()
# First Hidden Layer
classifier.add(
Dense(4, activation="relu", kernel_initializer="random_normal", input_dim=39)
)
# Second Hidden Layer
classifier.add(Dense(4, activation="relu", kernel_initializer="random_normal"))
# Output Layer
classifier.add(Dense(1, activation="sigmoid", kernel_initializer="random_normal"))
classifier.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
classifier.fit(X_train, y_train, batch_size=15, epochs=100)
eval_model = classifier.evaluate(X_train, y_train)
eval_model
cols = [
"Age",
"BusinessTravel",
"Department",
"DistanceFromHome",
"Lyon_EducationTemp",
"EducationField",
"Gender",
"JobRole",
"MaritalStatus",
"MonthlyIncome",
"NumCompaniesWorked",
"PercentSalaryHike",
"StandardHours",
"StockOptionLevel",
"TotalWorkingYears",
"TrainingTimesLastYear",
"YearsAtCompany",
"YearsSinceLastPromotion",
"YearsWithCurrManager",
"Id",
]
dtypes = {
"Age": np.int32,
"BusinessTravel": np.unicode_,
"Department": np.unicode_,
"DistanceFromHome": np.float32,
"Lyon_EducationTemp": np.float64,
"EducationField": np.unicode_,
"Gender": np.unicode_,
"JobRole": np.unicode,
"MaritalStatus": np.unicode,
"MonthlyIncome": np.float64,
"NumCompaniesWorked": np.float64,
"PercentSalaryHike": np.float64,
"StandardHours": np.float64,
"StockOptionLevel": np.float64,
"TotalWorkingYears": np.float64,
"TrainingTimesLastYear": np.float64,
"YearsAtCompany": np.float64,
"YearsSinceLastPromotion": np.float64,
"YearsWithCurrManager": np.float64,
"Id": np.int32,
}
test_data = pd.read_csv(
"../input/test-data/test_data.csv",
names=cols,
decimal=".",
sep=",",
skiprows=[0],
index_col=False,
dtype=dtypes,
)
test_data = one_hot_encode(test_data, categorial_columns)
id_test_data = test_data["Id"]
test_data.drop(["Id"], axis=1, inplace=True)
test_data.head()
y_pred = classifier.predict(test_data)
probs = []
for pred in y_pred:
# print(pred[0])
probs.append(pred[0])
plt.style.use("ggplot")
s = pd.Series(probs)
s.plot.kde()
level_out = np.quantile(probs, 0.84)
print(level_out)
result = []
for prob in probs:
if prob > 0.1847:
result.append(1)
else:
result.append(0)
result_csv = pd.concat([pd.DataFrame(id_test_data), pd.DataFrame(probs)], axis=1)
result_csv = pd.concat([result_csv, pd.DataFrame(result)], axis=1)
result_csv.to_csv("out.csv", encoding="utf-8", index=False)
result_csv.describe(include="all")
|
# # We are using the Store Data dataset(source not available). This dataset contains a total of 7501 transaction records, where every record consists of a list of items sold in just one transaction.
# **Import libraries and data**
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv(
"/kaggle/input/association-rule-learningapriori/Market_Basket_Optimisation.csv",
header=None,
)
data
data.info()
# **Converting dataframe into list of lists**
transactions = [[str(data.values[i, j]) for j in range(0, 20)] for i in range(0, 7501)]
from apyori import apriori
rules = apriori(
transactions, min_support=0.0045, min_confidence=0.2, min_lift=3, min_length=2
)
result = list(rules)
result
for item in result:
# first index of the inner list
# Contains base item and add item
pair = item[0]
items = [x for x in pair]
print("Rule: " + items[0] + " -> " + items[1])
# second index of the inner list
print("Support: " + str(item[1]))
# third index of the list located at 0th
# of the third index of the inner list
print("Confidence: " + str(item[2][0][2]))
print("Lift: " + str(item[2][0][3]))
print("=====================================")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
bottle = pd.read_csv("/kaggle/input/calcofi/bottle.csv")
bottle.columns
bottle.T_degC
salt = bottle.Salnty
data = pd.concat([degree, salt], axis=1, ignore_index=True)
data.rename(columns={0: "sicaklik", 1: "tuzluluk"}, inplace=True)
data
data.info()
data.describe().T
data.isna().sum()
data.dropna(inplace=True)
data.corr()
import matplotlib.pyplot as plt
import seaborn as sns
data.plot(kind="scatter", x="tuzluluk", y="sicaklik", alpha=0.5, color="blue")
plt.xlabel("tuzluluk")
plt.ylabel("sicaklik")
plt.show()
x = data[["tuzluluk"]]
y = data["sicaklik"]
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.33, random_state=0
)
model = LinearRegression().fit(x_train, y_train) # ornek modeli tanimliyoruz
y_pred = model.predict(x_test)
y_pred
from sklearn.metrics import mean_squared_error, r2_score
r2_score(y_test, y_pred)
model.predict([[33.4400]])
f, ax = plt.subplots(figsize=(18, 18)) # resmin buyuklugunu ayarlar
sns.heatmap(bottle.corr(), annot=True, linewidths=0.5, fmt=".1f", ax=ax)
# annot True uzerindeki degerleri yazar
# linewidth cizgilerin kalinligi
# fmt virgullu degerleri ayarlar
plt.show()
aray = np.arange(len(y_test))
plt.plot(aray, y_pred, color="red")
plt.plot(aray, y_test, color="blue", alpha=0.5)
plt.show()
# Plot outputs
plt.plot(x_test, y_test, color="black")
plt.plot(y_test, y_pred, color="blue", linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
# from sklearn.preprocessing import PolynomialFeatures
# polynomial_regression = PolynomialFeatures(degree = 100) # degree ust ifade eder.
# x_polynomial = polynomial_regression.fit_transform(x)
# linear_regression2 = LinearRegression()
# linear_regression2.fit(x_polynomial,y)
# y_head2 = linear_regression2.predict(x_polynomial)
# plt.plot(x_polynomial, y_head2, color="red", label="poly")
r2_score(y_head2, y)
|
import os
import pandas as pd
import numpy as np
from sklearn.linear_model import Ridge, Lasso, LinearRegression
from sklearn.model_selection import KFold, RandomizedSearchCV
from sklearn.metrics import mean_squared_error, make_scorer
from sklearn.ensemble import GradientBoostingRegressor
from mlxtend.regressor import StackingCVRegressor
from sklearn.svm import SVR
from sklearn.preprocessing import RobustScaler
from scipy.special import boxcox1p
from scipy.stats import boxcox_normmax, zscore
from multiprocessing import cpu_count
from lightgbm import LGBMRegressor
import matplotlib.pyplot as plt
data_dir = "/kaggle/input/house-prices-advanced-regression-techniques/"
p_train = pd.read_csv(os.path.join(data_dir, "train.csv"), index_col=0)
p_test = pd.read_csv(os.path.join(data_dir, "test.csv"), index_col=0)
p_sample = pd.read_csv(os.path.join(data_dir, "sample_submission.csv"))
from scipy import stats
# # Remove outliers (zscore >= 5 for GrLivArea (~4500 sqm and more))
p_train = p_train.loc[(np.abs(stats.zscore(p_train.GrLivArea)) < 5)]
# Train + Test features
X_train = p_train.drop("SalePrice", axis=1)
X_test = p_test
X = pd.concat([X_train, X_test])
# Get labels (logarithmic due to distribution)
y_train = np.log(p_train.loc[:, "SalePrice"])
# Impute missing values
# Categorical (big number of nans (79+))
# NaNs here indicate lack of something (no pool, no basement, etc)
cols = [
"PoolQC",
"MiscFeature",
"Alley",
"Fence",
"FireplaceQu",
"GarageCond",
"GarageQual",
"GarageFinish",
"GarageType",
"BsmtCond",
"BsmtExposure",
"BsmtQual",
"BsmtFinType2",
"BsmtFinType1",
]
X[cols] = X[cols].fillna("None")
# Impute using Neighborhoos mode (small numbers of NaNs)
cols = [
"MasVnrType",
"MSZoning",
"Utilities",
"Exterior1st",
"Exterior2nd",
"SaleType",
"Electrical",
"KitchenQual",
"Functional",
]
X[cols] = X.groupby("Neighborhood")[cols].transform(lambda x: x.fillna(x.mode()[0]))
# Numerical
cols = [
"GarageYrBlt",
"MasVnrArea",
"BsmtHalfBath",
"BsmtFullBath",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"GarageCars",
]
X[cols] = X[cols].fillna(0)
# Impute using Neighborhoods median
cols = ["GarageArea", "LotFrontage"]
X[cols] = X.groupby("Neighborhood")[cols].transform(lambda x: x.fillna(x.median()))
# FE
X["TotalSF"] = X["GrLivArea"] + X["TotalBsmtSF"] # Total square footage
X["TotalPorchSF"] = (
X["OpenPorchSF"] + X["EnclosedPorch"] + X["3SsnPorch"] + X["ScreenPorch"]
) # Total porch square footage
X["TotalBath"] = (
X["FullBath"] + X["BsmtFullBath"] + 0.5 * (X["BsmtHalfBath"] + X["HalfBath"])
) # Total baths
# Categorise categorial variables
# YrSold is also categorical to provide flexibility (esp. due to 2008 financial crisis)
cols = ["MSSubClass", "YrSold"]
X[cols] = X[cols].astype("category")
# Reprsent months as x,y coordinates on a circle to capture the seasonality better
# http://blog.davidkaleko.com/feature-engineering-cyclical-features.html
if "MoSold" in X:
X["SinMoSold"] = np.sin(2 * np.pi * X["MoSold"] / 12)
X["CosMoSold"] = np.cos(2 * np.pi * X["MoSold"] / 12)
X = X.drop("MoSold", axis=1)
# Transform highly skewed features using boxcox1p and boxcox_normmax, and scale features using RobustScaler.
skew = X.skew(numeric_only=True).abs()
cols = skew[skew > 1].index
print(skew[skew > 1].sort_values(ascending=False))
for col in cols:
X[col] = boxcox1p(X[col], boxcox_normmax(X[col] + 1))
cols = X.select_dtypes(np.number).columns
X[cols] = RobustScaler().fit_transform(X[cols])
# Convert all categorical variables into dummy variables.
X = pd.get_dummies(X)
temp = X[
X.select_dtypes("object").columns
].nunique() # TODO High cardinalities to binary
temp[temp > 5]
for k, v in temp[temp > 5].iteritems():
print(k, ":", X[k].unique())
# Recover train/test features
X_train = X.loc[p_train.index]
X_test = X.loc[p_test.index]
# # To remove outliers, we fit a linear model to the training data and remove examples with a studentized residual greater than 3.
# residuals = y_train - LinearRegression().fit(X_train, y_train).predict(X_train)
# outliers = residuals[np.abs(zscore(residuals)) > 3].index
# print(f'Removed {len(outliers)} outliers')
# X_train = X_train.drop(outliers)
# y_train = y_train.drop(outliers)
# Set up CV strategy (5-folds, RMSE)
kf = KFold(n_splits=5, random_state=0, shuffle=True)
rmse = lambda y, y_pred: np.sqrt(mean_squared_error(y, y_pred))
scorer = make_scorer(rmse, greater_is_better=False)
# Define hyperparam optimisation using random search
def random_search(model, grid, n_iter=100):
n_jobs = max(cpu_count() - 2, 1)
search = RandomizedSearchCV(
model, grid, n_iter, scorer, n_jobs=n_jobs, cv=kf, random_state=0, verbose=True
)
return search.fit(X_train, y_train)
# Optimise various models (Ridge, Lasso, SVR, LGBM, GBM)
print("Ridge")
ridge_search = random_search(Ridge(), {"alpha": np.logspace(-1, 2, 500)})
print("Lasso")
lasso_search = random_search(Lasso(), {"alpha": np.logspace(-5, -1, 500)})
print("Support Vector Machines")
svr_search = random_search(
SVR(),
{
"C": np.arange(1, 100),
"gamma": np.linspace(0.00001, 0.001, 50),
"epsilon": np.linspace(0.01, 0.1, 50),
},
)
print("LGBM")
lgbm_search = random_search(
LGBMRegressor(n_estimators=2000, max_depth=3),
{
"colsample_bytree": np.linspace(0.2, 0.7, 6),
"learning_rate": np.logspace(-3, -1, 100),
},
)
print("GBM")
gbm_search = random_search(
GradientBoostingRegressor(n_estimators=2000, max_depth=3),
{
"max_features": np.linspace(0.2, 0.7, 6),
"learning_rate": np.logspace(-3, -1, 100),
},
)
# Optimise stacked ensemble of the best models
models = [
search.best_estimator_
for search in [ridge_search, lasso_search, svr_search, lgbm_search, gbm_search]
]
stack_search = random_search(
StackingCVRegressor(models, Ridge(), cv=kf),
{"meta_regressor__alpha": np.logspace(-3, -2, 500)},
n_iter=20,
)
models.append(stack_search.best_estimator_)
preds = [model.predict(X_test) for model in models]
# Average all models (10% weight each) + ensemble (50% weight)
preds = np.average(preds, axis=0, weights=[0.1] * 5 + [0.5] * 1)
# Create submission
submission = pd.DataFrame({"Id": p_sample["Id"], "SalePrice": np.exp(preds)})
submission.to_csv("submission.csv", index=False)
|
# Data Analysis for Business Project
# Predict the House Price
# Fabrizio Rocco and Diego Croci
# 
# The aim of this project is to build a Machine Learning model in order to predict the appropriate price of a house given a set of features.
# We decided to divide our analysis into 5 parts:
# - First look at the problem and general understanding of the variables;
# - Study the main variable ("SalePrice");
# - Study how the main variable is related to the other feature;
# - Data Preprocessing: make some cleaning on our training data set in order to better visualize and estimate;
# - Build a model in order to predict SalePrice
#
# ***
# #### Importing Libraries and play a bit with our dataset
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm
from sklearn.model_selection import (
cross_val_score,
train_test_split,
KFold,
cross_val_predict,
)
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.linear_model import (
LinearRegression,
RidgeCV,
Lasso,
ElasticNetCV,
BayesianRidge,
LassoLarsIC,
)
from sklearn.metrics import mean_squared_error, make_scorer
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
import math
data_train = pd.read_csv("train.csv")
data_train
data_test = pd.read_csv("test.csv")
data_test
y = data_train.SalePrice
train_without_response = data_train[data_train.columns.difference(["SalePrice"])]
result = pd.concat([train_without_response, data_test], ignore_index=True)
result
result.head()
result.tail()
result.info()
result.shape # Numero di colonne e righe
result.columns
result.describe()
# # Our initial considerations
# Looking forward to our columns, we found some variables which can have an high correlation with our main variable SalePrice:
# - __Year Built__
# - __TotalBsmtSF__
# - __GrLivArea__
# - __PoolArea__
# These are variables related to the conditions of the building, its age and some "extra luxury" features such as __PoolArea__.
# In principle they are all characteristics which can rise the price of an abitation.
# Another theory we suggested was to consider mainly the "inner" part of the house, such as __KitchenQual__ or __CentralAir__, but these could be too general features which mainly all the houses can have.
# Now, with these prior hypotesis, let's dive into the "__SalePrice__" analysis.
# # SalePrice Analysis
y.describe()
sns.distplot(data_train["SalePrice"])
print("Skewness coeff. is: %f" % data_train["SalePrice"].skew())
print("Kurtosis coeff. is: %f" % data_train["SalePrice"].kurt())
# These measures of symmetry are useful in order to understand the symmetry of the distribution of our main variable.
# Our distribution is highly skewed and present a longer tail on the right.
# The high value of kurtosis can determine an higher probability of outliers values.
# # The other variables
data_year_trend = pd.concat([data_train["SalePrice"], data_train["YearBuilt"]], axis=1)
data_year_trend.plot.scatter(x="YearBuilt", y="SalePrice", ylim=(0, 800000))
data_bsmt_trend = pd.concat(
[data_train["SalePrice"], data_train["TotalBsmtSF"]], axis=1
)
data_bsmt_trend.plot.scatter(x="TotalBsmtSF", y="SalePrice", ylim=(0, 800000))
data_GrLivArea_trend = pd.concat(
[data_train["SalePrice"], data_train["GrLivArea"]], axis=1
)
data_GrLivArea_trend.plot.scatter(x="GrLivArea", y="SalePrice", ylim=(0, 800000))
data_PoolArea_trend = pd.concat(
[data_train["SalePrice"], data_train["PoolArea"]], axis=1
)
data_PoolArea_trend.plot.scatter(x="PoolArea", y="SalePrice", ylim=(0, 800000))
data = pd.concat([data_train["SalePrice"], data_train["OverallQual"]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x="OverallQual", y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000)
# By these analysis we discovered that our previsions were quite correct.
# __Year Built__ seems to have a slight relation with our main variable, and people, as we thought, tend to buy newer houses.
# Instead, for __TotalBsmtSF__ and __GrLivArea__ there seems be a stronger relation with __SalePrice__.
# # Heatmap Correlation Matrix
corr_matrix = result.corr()
f, ax1 = plt.subplots(figsize=(12, 9))
ax1 = sns.heatmap(corr_matrix, vmax=0.9)
# Using this kind of plot we can deduce if there's some collinearity between 2 or more variables.
# In particoular, there are some white blocks which have to be analyzed:
# 1. __GarageYrBlt__ and __YearBuilt__
# 2. __TotRmsAbvGrd__ and __GrLivArea__
# 3. __TotalBsmtSF__ and __X1stFlrSF__
# 4. __GarageArea__ and __GarageCars__
#
# Knowing the meaning of these pairs of variables seems trivial to notice a collinearity between pairs "1", "3" and "4".
# For the "2" pair the difference is slightly more subtle because the house area and the total number of rooms, not always are related.
# For example two houses with the same living area can be inhabited by different number of peoples and so the actual disposition/number of the rooms can be different.
# Let's restrict our matrix a bit more.
corrmat = data_train.corr()
top_corr_features = corrmat.index[abs(corrmat["SalePrice"]) > 0.5]
plt.figure(figsize=(9, 9))
g = sns.heatmap(data_train[top_corr_features].corr(), annot=True, cmap="RdYlGn")
var = data_train[data_train.columns[1:]].corr()["SalePrice"][:]
var
sns.set()
cols = [
"SalePrice",
"OverallQual",
"GrLivArea",
"GarageCars",
"TotalBsmtSF",
"FullBath",
"YearBuilt",
]
sns.pairplot(data_train[cols], height=2.5)
plt.show()
# # Number of null vallues
# Now our goal is to deal with null values and try to understand for each one what can we do: maybe we can replace them or maybe we can just skip them.
total_null = (
result.isnull().sum().sort_values(ascending=False)
) # First sum and order all null values for each variable
percentage = (result.isnull().sum() / result.isnull().count()).sort_values(
ascending=False
) # Get the percentage
missing_data = pd.concat([total_null, percentage], axis=1, keys=["Total", "Percentage"])
missing_data.head(20)
# We have to do some considerations.
# Let's divide our null values into 2 groups:
# - __PoolQC__, __MiscFeature__, __Alley__, __Fence__, __FireplaceQu__ and __LotFrontage__.
# These are all variables which presents many null values. In general, by common opinion, we can discourage variables which have more than 15% of missing values.
# These are not vital information for someone who wants to buy an house, such as __FireplaceQu__ or, for example, many houses doesn't have an __Alley__ access. We can drop them.
# The second group:
# - __GarageX__ properties
# If we look carefully, all of these variables have the same number of null values! Maybe this can be a strange coincidence, or just that they all refer to the same variable Garage, in which "Na" means "There is no Garage". The same occurs for __BsmtX__ and MasVnr__, which means that we will have to deal with them afterwards.
result = result.drop(
(missing_data[missing_data["Percentage"] > 0.15]).index, 1
) # Drop All Var. with null values > 1
# data_train = data_train.drop(data_train.loc[data_train['Electrical'].isnull()].index) #Delete the single null value in Electrical
result.isnull().sum()
# # Split categorical and numerical variables
del result["KitchenAbvGr"]
del result["YrSold"]
del result["MoSold"]
del result["MiscVal"]
del result["ScreenPorch"]
del result["X3SsnPorch"]
del result["BsmtHalfBath"]
del result["LowQualFinSF"]
del result["OverallCond"]
del result["EnclosedPorch"]
del result["MSSubClass"]
del result["X1stFlrSF"]
del result["YearBuilt"]
del result["YearRemodAdd"]
del result["BsmtFinSF2"] # 0 variance
del result["BsmtFinSF1"] # Because BsmtFinSF1 + BsmtUnfSF + BsmtFinSF2 = TotalBsmtSF
del result["BsmtUnfSF"] # Because BsmtFinSF1 + BsmtUnfSF + BsmtFinSF2 = TotalBsmtSF
del result["PoolArea"] # 0 variance
del result[
"GarageYrBlt"
] # Dropped for the same reason of YearBuilt, it might mislead our predictions
del result["GarageCond"] # 0 Variance
del result["GarageArea"] # High Correlation
del result["TotRmsAbvGrd"] # High Correlation
result
result["ExterCond"].value_counts()
del result["Street"]
del result["LandContour"]
del result["Utilities"]
del result["LandSlope"]
del result["Condition2"]
del result["RoofMatl"]
del result["BsmtFinType2"] # 0 variance
del result["Electrical"] # 0 Variance
del result["Condition1"] # Too many levels versione 2
del result["BldgType"] # versione 2
del result["HouseStyle"] # versione 2
del result["Exterior1st"] # versione 2
del result["Exterior2nd"] # versione 2
del result["Foundation"] # versione 2
del result["CentralAir"] # 0 variance
del result["Functional"] # 0 variance
del result["SaleType"] # 0 variance
del result["SaleCondition"] # 0 variance
del result["RoofStyle"] # 0 variance
result
result.shape
# Here we encode ExterQual in a rank
result.loc[result["ExterQual"] == "Ex", "ExterQual"] = 5
result.loc[result["ExterQual"] == "Gd", "ExterQual"] = 4
result.loc[result["ExterQual"] == "TA", "ExterQual"] = 3
result.loc[result["ExterQual"] == "Fa", "ExterQual"] = 2
result.loc[result["ExterQual"] == "Po", "ExterQual"] = 1
result["ExterQual"]
# Here we encode ExterCond in Rank
result.loc[result["ExterCond"] == "Ex", "ExterCond"] = 5
result.loc[result["ExterCond"] == "Gd", "ExterCond"] = 4
result.loc[result["ExterCond"] == "TA", "ExterCond"] = 3
result.loc[result["ExterCond"] == "Fa", "ExterCond"] = 2
result.loc[result["ExterCond"] == "Po", "ExterCond"] = 1
result["ExterCond"]
# Here we encode HeatingQC in Rank
result.loc[result["HeatingQC"] == "Ex", "HeatingQC"] = 5
result.loc[result["HeatingQC"] == "Gd", "HeatingQC"] = 4
result.loc[result["HeatingQC"] == "TA", "HeatingQC"] = 3
result.loc[result["HeatingQC"] == "Fa", "HeatingQC"] = 2
result.loc[result["HeatingQC"] == "Po", "HeatingQC"] = 1
result["HeatingQC"]
# Here we encode BsmtFinType1 in Rank
result.loc[result["BsmtFinType1"] == "GLQ", "BsmtFinType1"] = 6
result.loc[result["BsmtFinType1"] == "ALQ", "BsmtFinType1"] = 5
result.loc[result["BsmtFinType1"] == "BLQ", "BsmtFinType1"] = 4
result.loc[result["BsmtFinType1"] == "Rec", "BsmtFinType1"] = 3
result.loc[result["BsmtFinType1"] == "LwQ", "BsmtFinType1"] = 2
result.loc[result["BsmtFinType1"] == "Unf", "BsmtFinType1"] = 1
result["BsmtFinType1"].fillna(0, inplace=True)
result["BsmtFinType1"]
# Here we encode BsmtCond in Rank
result.loc[result["BsmtCond"] == "Ex", "BsmtCond"] = 5
result.loc[result["BsmtCond"] == "Gd", "BsmtCond"] = 4
result.loc[result["BsmtCond"] == "TA", "BsmtCond"] = 3
result.loc[result["BsmtCond"] == "Fa", "BsmtCond"] = 2
result.loc[result["BsmtCond"] == "Po", "BsmtCond"] = 1
result["BsmtCond"].fillna(0, inplace=True)
result["BsmtCond"]
# Here we encode BsmtQual in Rank
result.loc[result["BsmtQual"] == "Ex", "BsmtQual"] = 5
result.loc[result["BsmtQual"] == "Gd", "BsmtQual"] = 4
result.loc[result["BsmtQual"] == "TA", "BsmtQual"] = 3
result.loc[result["BsmtQual"] == "Fa", "BsmtQual"] = 2
result.loc[result["BsmtQual"] == "Po", "BsmtQual"] = 1
result["BsmtQual"].fillna(0, inplace=True)
result["BsmtQual"]
# Here we encode KitchenQual in Rank
result.loc[result["KitchenQual"] == "Ex", "KitchenQual"] = 4
result.loc[result["KitchenQual"] == "Gd", "KitchenQual"] = 3
result.loc[result["KitchenQual"] == "TA", "KitchenQual"] = 2
result.loc[result["KitchenQual"] == "Fa", "KitchenQual"] = 1
result["KitchenQual"]
# Here we encode BsmtExposure in Rank
result.loc[result["BsmtExposure"] == "Gd", "BsmtExposure"] = 4
result.loc[result["BsmtExposure"] == "Av", "BsmtExposure"] = 3
result.loc[result["BsmtExposure"] == "Mn", "BsmtExposure"] = 2
result.loc[result["BsmtExposure"] == "No", "BsmtExposure"] = 1
result["BsmtExposure"].fillna(0, inplace=True)
result["BsmtExposure"]
# Here we encode GarageQual in Rank
result.loc[result["GarageQual"] == "Ex", "GarageQual"] = 5
result.loc[result["GarageQual"] == "Gd", "GarageQual"] = 4
result.loc[result["GarageQual"] == "TA", "GarageQual"] = 3
result.loc[result["GarageQual"] == "Fa", "GarageQual"] = 2
result.loc[result["GarageQual"] == "Po", "GarageQual"] = 1
result["GarageQual"].fillna(0, inplace=True)
result["GarageQual"]
del result["GarageQual"] # perchè tutti i valori sono 3
# Here we encode GarageFinish in Rank
result.loc[result["GarageFinish"] == "Fin", "GarageFinish"] = 4
result.loc[result["GarageFinish"] == "RFn", "GarageFinish"] = 3
result.loc[result["GarageFinish"] == "Unf", "GarageFinish"] = 2
result["GarageFinish"].fillna(0, inplace=True)
result["GarageFinish"]
# HERE WE FILL THE LAST NAs IN THOSE VARIABLES WHICH WE CAN NOT RANK
result["MasVnrType"].fillna("None", inplace=True)
result["MasVnrArea"].fillna(0, inplace=True)
result["GarageType"].fillna("No Garage", inplace=True)
# Correlation matrix with new encoded variables
corr_matrix = result.corr()
f, ax1 = plt.subplots(figsize=(25, 25)) # Crea il sistema di riferimento
ax1 = sns.heatmap(corr_matrix, vmax=0.9)
# Con Seaborn fai una heatmap che ha val. max. 0.9
corrmat = data_train.corr()
top_corr_features = corrmat.index[abs(corrmat["SalePrice"]) > 0.3]
plt.figure(figsize=(9, 9))
g = sns.heatmap(data_train[top_corr_features].corr(), annot=True, cmap="RdYlGn")
pd.set_option("display.max_columns", 70)
# Near 0 variance
del result["ExterCond"]
del result["BsmtCond"] # Near 0 variance
# # Outliers
# Here we extract the numerical variables, this will come in handy later on
n_features = result.select_dtypes(exclude=["object"]).columns
def mod_outlier(df):
df1 = df.copy()
df = df._get_numeric_data()
q1 = df.quantile(0.25)
q3 = df.quantile(0.75)
iqr = q3 - q1
lower_bound = q1 - (1.5 * iqr)
upper_bound = q3 + (1.5 * iqr)
for col in df.columns:
for i in range(0, len(df[col])):
if df[col][i] < lower_bound[col]:
df[col][i] = lower_bound[col]
if df[col][i] > upper_bound[col]:
df[col][i] = upper_bound[col]
for col in df.columns:
df1[col] = df[col]
return df1
result = mod_outlier(result)
for i in result[n_features]:
sns.boxplot(x=result[i])
plt.show()
result
# Here we split train and test back and we attach "SalePrice" to the train
data_train_new, data_test_new = result[:1100], result[1101:]
data_train_new["SalePrice"] = y
data_train_new
data_test_new
# # Create Dummy Variables
data_train_dummies = pd.get_dummies(data_train_new)
data_train_dummies
# # Model
# ## K-Fold Cross Validation
n_folds = 5
def rmsle_cv(model):
kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(X.values)
rmse = np.sqrt(
-cross_val_score(model, X.values, Y, scoring="neg_mean_squared_error", cv=kf)
)
return rmse
X = data_train_dummies[data_train_dummies.columns.difference(["SalePrice"])]
Y = data_train_dummies["SalePrice"]
X
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.30, random_state=40
)
# ## Linear Regression
lr = LinearRegression()
lr.fit(X_train, Y_train)
print(lr.intercept_)
print(lr.coef_)
predicted = lr.predict(X_test)
plt.figure(figsize=(15, 8))
plt.scatter(Y_test, predicted)
plt.xlabel("Y Test")
plt.ylabel("Predicted Y")
plt.show()
score = rmsle_cv(lr)
print("\nLinear Regression score: {:.4f}\n".format(score.mean()))
from sklearn import metrics
print("MAE:", metrics.mean_absolute_error(Y_test, predicted))
print("MSE:", metrics.mean_squared_error(Y_test, predicted))
print("RMSE:", np.sqrt(metrics.mean_squared_error(Y_test, predicted)))
# ## Lasso
lasso = make_pipeline(RobustScaler(), Lasso(alpha=0.0005, random_state=1))
score = rmsle_cv(lasso)
print("\nLasso score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
##lasso = linear_model.Lasso()
### y_pred = cross_val_predict(lasso, X, y, cv=5)
# ## G Boosting
GBoost = GradientBoostingRegressor(
n_estimators=5000,
learning_rate=0.05,
max_depth=4,
max_features="sqrt",
min_samples_leaf=15,
min_samples_split=10,
loss="huber",
random_state=5,
)
# RMSE estimated through the partition of the train set
GBoost.fit(X_train, Y_train)
rmse = math.sqrt(mean_squared_error(Y_test, GBoost.predict(X_test)))
print("RMSE: %.4f" % rmse)
score = rmsle_cv(GBoost)
print("Gradient Boosting score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
# # Random Forest
regressor = RandomForestRegressor(n_estimators=300, random_state=0)
regressor.fit(X, Y)
# Score model
score = rmsle_cv(regressor)
print("Random Forest score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
|
# # Movie Recommender System
# https://www.kaggle.com/datasets/rounakbanik/the-movies-dataset
import os
import pandas as pd
import numpy as np
from surprise.model_selection import train_test_split
from surprise import *
# # Loading Dataset
# reader = Reader(line_format="userId movieId rating timestamp", sep=" ")
# movie_df = Dataset.load_from_file("/kaggle/input/the-movies-dataset/ratings_small.csv", reader=reader)
movie_df = pd.read_csv("/kaggle/input/the-movies-dataset/ratings_small.csv")
movie_df
reader = Reader(rating_scale=(0.5, 5.0))
data = Dataset.load_from_df(movie_df[["userId", "movieId", "rating"]], reader)
trainset, testset = train_test_split(data, test_size=0.25)
# **Computing the average MAE and RMSE of the Probabilistic Matrix Factorization (PMF), Userbased Collaboratice Filtering, Item based Collaborative Filtering, under the 5-fold cross-validation**
user_based = KNNBasic(user_based=True)
item_based = KNNBasic(user_based=False)
pmf = SVD()
metrics = ["rmse", "mae"]
user_based_results = cross_validate(user_based, data, measures=metrics, cv=5)
item_based_results = cross_validate(item_based, data, measures=metrics, cv=5)
pmf_results = cross_validate(pmf, data, measures=metrics, cv=5)
print("User-based CF RMSE:", user_based_results["test_rmse"].mean())
print("User-based CF MAE:", user_based_results["test_mae"].mean())
print("Item-based CF RMSE:", item_based_results["test_rmse"].mean())
print("Item-based CF MAE:", item_based_results["test_mae"].mean())
print("PMF RMSE:", pmf_results["test_rmse"].mean())
print("PMF MAE:", pmf_results["test_mae"].mean())
# # Cosine, Mean Squared Difference, and Pearson Similarites
user_based_cosine = KNNBasic(user_based=True)
user_based_msd = KNNBasic(user_based=True)
user_based_pearson = KNNBasic(user_based=True)
item_based = KNNBasic(user_based=False)
pmf = SVD()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import tensorflow as tf
from tensorflow.keras.datasets import reuters
from tensorflow.keras import (
models,
layers,
optimizers,
utils,
) # utils for one hot encode
import matplotlib.pyplot as plt
# ## Multi-Class Classification Problem:
(train_data, train_labels), (test_data, test_labels) = reuters.load_data(
num_words=10000
)
print(len(train_data))
print(len(test_data))
train_data[0]
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.0
return results
|
# # Creditcard Fraud Detection
# ### Context
# It is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase.
# ### Content
# The dataset contains transactions made by credit cards in September 2013 by European cardholders.
# This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
# It contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, they cannot provide the original features and more background information about the data. Features V1, V2, … V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
# ### Data
# https://www.kaggle.com/datasets/mlg-ulb/creditcardfraud
# ### Summary
# Of the 31 columns in the dataset, 29 were pre-scaled, so the remaining two columns were first scaled to make the distrubutions look like other features. The stratify method was used when dividing the data into test and train, because all fraud positive rows could be collected on one side because the data was imbalanced. Since the data was imbalanced, the oversampling method SMOTE (Synthetic Minority Oversampling Technique) was used. Base models were created with the selected algorithms. Hypertuning and ensemble techniques were applied by choosing among the base models. Since precision, recall and f1 are important metrics in Fraud detection, the priority was to keep them high.
# Overview
# 1) Understand the shape of the data
# 2) Data Exploration
# 3) Data Preprocessing for Model
# 4) Basic Model Building
# 5) Model Tuning
# 6) Ensemble Model Building
# ### 1) Understand the shape of the data
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import imblearn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
df = pd.read_csv("creditcard.csv")
df.Class.value_counts()
print(
"No Frauds",
round(df["Class"].value_counts()[0] / len(df) * 100, 2),
"% of the dataset",
)
print(
"Frauds",
round(df["Class"].value_counts()[1] / len(df) * 100, 2),
"% of the dataset",
)
pd.set_option("display.max_columns", 500)
df.head()
df.info()
df.describe()
# ### 2) Data Exploration
cmap = sns.color_palette("YlOrBr", as_cmap=True)
sns.heatmap(df.corr(), cmap=cmap)
plt.title("df")
fig, axs = plt.subplots(ncols=4, nrows=8, figsize=(15, 20))
for i, col in enumerate(df.columns):
row_idx = i // 4
col_idx = i % 4
axs[row_idx, col_idx].plot(df[col], color="#FF8400")
axs[row_idx, col_idx].set_title(col)
fig.tight_layout()
plt.show()
fig, ax = plt.subplots(1, 2, figsize=(16, 4))
amount_val = df.Amount
time_val = df.Time
sns.histplot(df.Amount, ax=ax[0], color="#AA96DA", kde=True, binwidth=200)
ax[0].set_title("Distribution of Transaction Amount", fontsize=14)
ax[0].set_xlim([min(df.Amount) - 500, max(df.Amount)])
sns.histplot(df.Time, ax=ax[1], color="#FCBAD3", kde=True)
ax[1].set_title("Distribution of Transaction Time", fontsize=14)
ax[1].set_xlim([min(df.Time), max(df.Time)])
# ### 3) Data Preprocessing for Model
from sklearn.preprocessing import StandardScaler, RobustScaler
# RobustScaler is less prone to outliers.
std_scaler = StandardScaler()
rob_scaler = RobustScaler()
df["scaled_amount"] = rob_scaler.fit_transform(df.Amount.values.reshape(-1, 1))
df["scaled_time"] = rob_scaler.fit_transform(df.Time.values.reshape(-1, 1))
df.drop(["Time", "Amount"], axis=1, inplace=True)
df.head()
print(
"The percentage of no frauds : ",
round(df.Class.value_counts()[0] / len(df) * 100, 2),
"%",
)
print(
"The percentage of frauds : ",
round(df.Class.value_counts()[1] / len(df) * 100, 2),
"%",
)
X = df.drop("Class", axis=1)
y = df["Class"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y)
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=2, sampling_strategy=0.5)
X_over, y_over = sm.fit_resample(X_train, y_train)
y_over.shape
# ### 4) Basic Model Building
# 1. Logistic Regression
# 2. XGBoost
# 3. KNeighborsClassifier
# 4. Random Forest
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import VotingClassifier
from sklearn import metrics
from sklearn.metrics import precision_recall_curve
# #### 4.1) Logistic Regression
logreg = LogisticRegression(max_iter=1000)
rf_clf = RandomForestClassifier()
xgb_clf = xgb.XGBClassifier()
knn = KNeighborsClassifier()
logreg.fit(X_over, y_over)
y_pred = logreg.predict(X_test)
logc = metrics.classification_report(y_test, y_pred)
print(logc)
y_pred_prob = logreg.predict_proba(X_test)[:, 1]
precision, recall, thresholds = precision_recall_curve(y_test, y_pred_prob)
plt.plot(precision, recall)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("Precision Recall Curve")
# #### 4.2) XGBoost Regression
xgb_clf.fit(X_over, y_over)
y_pred = xgb_clf.predict(X_test)
xgbc = metrics.classification_report(y_test, y_pred)
print(xgbc)
y_pred_prob = xgb_clf.predict_proba(X_test)[:, 1]
precision, recall, thresholds = precision_recall_curve(y_test, y_pred_prob)
plt.plot(precision, recall)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("Precision Recall Curve")
# #### 4.3) K-Nearest Neighbours classifcation
knn.fit(X_over, y_over)
y_pred = knn.predict(X_test)
knnc = metrics.classification_report(y_test, y_pred)
print(knnc)
y_pred_prob = knn.predict_proba(X_test)[:, 1]
precision, recall, thresholds = precision_recall_curve(y_test, y_pred_prob)
plt.plot(precision, recall)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("Precision Recall Curve")
# #### 4.4) Random Forest
rf_clf.fit(X_over, y_over)
y_pred = rf_clf.predict(X_test)
rfc = metrics.classification_report(y_test, y_pred)
print(rfc)
y_pred_prob = rf_clf.predict_proba(X_test)[:, 1]
precision, recall, thresholds = precision_recall_curve(y_test, y_pred_prob)
plt.plot(precision, recall)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("Precision Recall Curve")
# Feature importances
importances = rf_clf.feature_importances_
importances_df = pd.DataFrame({"Feature": X_train.columns, "Importance": importances})
importances_df = importances_df.sort_values(by="Importance", ascending=False)
# Print feature importances with column names as index
for i, row in importances_df.iterrows():
print("%s: %.3f" % (row["Feature"], row["Importance"]))
# ### 5) Model Tuning
from sklearn.model_selection import GridSearchCV
# Logistic Regression
logreg_param_grid = {
"C": [0.01, 0.1, 1, 10],
"max_iter": [100, 250, 500],
"multi_class": ["ovr", "multinomial"],
}
grid_lg = GridSearchCV(logreg, logreg_param_grid, cv=5, verbose=1, n_jobs=-1)
best_fit_lg = grid_lg.fit(X_over, y_over)
y_pred = best_fit_lg.best_estimator_.predict(X_test)
grid_lg = metrics.classification_report(y_test, y_pred)
print(grid_lg)
# Random Forest Classifier
rf_param_grid = {
"criterion": ["gini", "entropy"],
"max_depth": list(range(2, 4, 1)),
"min_samples_leaf": list(range(5, 7, 1)),
}
grid_rf = GridSearchCV(rf_clf, rf_param_grid, cv=5, verbose=1, n_jobs=-1)
best_fit_rf = grid_rf.fit(X_over, y_over)
y_pred = best_fit_rf.best_estimator_.predict(X_test)
grid_rf = metrics.classification_report(y_test, y_pred)
print(grid_rf)
# XGBoost Classifier
xgb_param_grid = {
"n_estimators": [100, 500],
"max_depth": [5, 10],
"learning_rate": [0.1, 1.0],
"subsample": [0.5, 0.75, 1.0],
}
grid_xgb = GridSearchCV(xgb_clf, xgb_param_grid, cv=2, verbose=1, n_jobs=-1)
best_fit_xgb = grid_xgb.fit(X_over, y_over)
y_pred = best_fit_xgb.best_estimator_.predict(X_test)
grid_xgb = metrics.classification_report(y_test, y_pred)
print(grid_xgb)
# KNN classification
knn_param_grid = {
"n_neighbors": [3, 5],
"weights": ["uniform", "distance"],
"p": [1, 2],
}
grid_knn = GridSearchCV(knn, knn_param_grid, cv=5, verbose=1, n_jobs=-1)
best_fit_knn = grid_knn.fit(X_over, y_over)
y_pred = best_fit_knn.best_estimator_.predict(X_test)
grid_knn = metrics.classification_report(y_test, y_pred)
print(grid_knn)
# ### 6) Ensemble Model Building
vot_soft = VotingClassifier(
estimators=[
("Random Forest Classifier", best_fit_rf.best_estimator_),
("XGBoost Classifier", best_fit_xgb.best_estimator_),
],
voting="soft",
)
vot_soft.fit(X_over, y_over)
y_pred = vot_soft.predict(X_test)
score = metrics.classification_report(y_test, y_pred)
print(score)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow_decision_forests as tfdf # model
import matplotlib.pyplot as plt # data visualization
import tensorflow_probability as tfp
import tensorflow as tf
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Lets first load the data
training = pd.read_csv("/kaggle/input/titanic/train.csv")
testing = pd.read_csv("/kaggle/input/titanic/test.csv")
# Lets check the headers
training.head()
testing.head()
# Lets check the details
training.info()
testing.info()
# Clean the data a little, remove rows with NA in Age column
training = training.fillna(0)
testing = testing.fillna(0)
# Lets check what groups make up our sample
# Lets break them into groups of
# alive women, alive men, dead women and dead men
# derive each group rows
alive_women = training.loc[training.Sex == "female"][training.Survived == True]
dead_women = training.loc[training.Sex == "female"][training.Survived == False]
alive_men = training.loc[training.Sex == "male"][training.Survived == True]
dead_men = training.loc[training.Sex == "male"][training.Survived == False]
# calculate what percentage of the sample they make up
percent_of_alive_women_in_sample = len(alive_women) / len(training)
percent_of_dead_women_in_sample = len(dead_women) / len(training)
percent_of_alive_men_in_sample = len(alive_men) / len(training)
percent_of_dead_men_in_sample = len(dead_men) / len(training)
# display data
print(
"""
in what part did the sample consist of alive women: {0}
in what part did the sample consist of alive men: {1}
in what part did the sample consist of dead women: {2}
in what part did the sample consist of dead men: {3}\n""".format(
percent_of_alive_women_in_sample,
percent_of_alive_men_in_sample,
percent_of_dead_women_in_sample,
percent_of_dead_men_in_sample,
)
)
# Lets count the probabilities of living depending on gender
# break the set depending on gender
all_women = training.loc[training.Sex == "female"]
all_men = training.loc[training.Sex == "male"]
probability_of_living_female = len(dead_women) / len(all_women)
probability_of_living_male = len(dead_men) / len(all_men)
print(
"""
probability of dying as a woman: {0}
probability of dying as a man: {1}\n
(reducing to gender only)\n""".format(
probability_of_living_female, probability_of_living_male
)
)
# Lets clean the data a little - remove lines with NA values
training = training[training["Age"].notna()]
testing = testing[testing["Age"].notna()]
# Lets create a model
# We will choose random forest
from sklearn.ensemble import RandomForestClassifier
# pick interesting columns for both training and testing
train_dependent_var = training["Survived"]
features = ["Sex", "Age", "Pclass"]
train_independent_vars = training[features]
test_independent_vars = testing[features]
# automatically convert categorical variables to numerical variables
train_independent_vars_nums = pd.get_dummies(train_independent_vars)
test_independent_vars_nums = pd.get_dummies(test_independent_vars)
# create, fit and predict results
predictor = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
predictor.fit(train_independent_vars_nums, train_dependent_var)
predictions = predictor.predict(test_independent_vars_nums)
# create results table
solution = testing.copy()
solution.insert(0, "Survived?", predictions)
# print
solution
# evaluate the model
# check whether the survival rate vs gender results would be similar
# Lets check what groups make up our sample
# Lets break them into groups of
# alive women, alive men, dead women and dead men
training_alive_women = training.loc[training.Sex == "female"][training.Survived == True]
training_alive_men = training.loc[training.Sex == "male"][training.Survived == True]
training_dead_women = training.loc[training.Sex == "female"][training.Survived == False]
training_dead_men = training.loc[training.Sex == "male"][training.Survived == False]
training_percent_of_alive_women_in_sample = len(training_alive_women) / len(training)
training_percent_of_alive_men_in_sample = len(training_alive_men) / len(testing)
training_percent_of_dead_women_in_sample = len(training_dead_women) / len(testing)
training_percent_of_dead_men_in_sample = len(training_dead_men) / len(training)
print(
"""
in what part did the sample consist of alive women: {0}
in what part did the sample consist of alive men: {1}
in what part did the sample consist of dead women: {2}
in what part did the sample consist of dead men: {3}\n""".format(
training_percent_of_alive_women_in_sample,
training_percent_of_alive_men_in_sample,
training_percent_of_dead_women_in_sample,
training_percent_of_dead_men_in_sample,
)
)
# Lets evaluate the model once again, but now on the possibilities of living
# Lets count the probabilities of living depending on gender
training_all_women = training.loc[training.Sex == "female"]
training_all_men = training.loc[training.Sex == "male"]
training_probability_of_living_female = len(training_dead_women) / len(
training_all_women
)
training_probability_of_living_male = len(training_dead_men) / len(training_all_men)
print(
"""
probability of dying as a woman: {0}
probability of dying as a man: {1}\n
(reducing to gender only)\n""".format(
training_probability_of_living_female, training_probability_of_living_male
)
)
import seaborn as sns
# calculate correlation
training = training.head(10)
corr_matrix = training.corr()
sns.heatmap(corr_matrix) # use cmap='coolwarm' to provide color map
# Bend x label ticks 45 degree to avoid overlappings with each-other
plt.xticks(rotation=45)
plt.title("Correlations")
plt.show()
submission = pd.DataFrame({"PassengerId": testing.PassengerId, "Survived": predictions})
submission.to_csv("submission.csv", index=False)
print("Your submission was successfully saved!")
submission.head()
|
# This is my first Kernel on Kaggle, I might have mislinked the dataset and uploaded as my own. I have credited the original dataset contributer at Reference section.
# # Summary
# We started with a dataset containing 800 Pokemon game stats and a categorical variable "Type", aiming to visualize the distribution of Pokemon as well as explaining how those of the same "Type" are grouped together. Initial dataframe was treated with a column change - "Type 2" had nearly half the values missing, thus it was replaced with binary 0/1, for data analysis.
# Initial data exploration shows that the distribution of types is binomial, which coincides with the distribution of the overall strength of 800 pokemons, measured by the sum of all stats (attack, defense, sp. atk, sp. def and speed). Each stat by itself shows a right-skewed normal distribution, leaving some outliers that are possibly game specials - "legendary pokemons". Having a secondary type present shows a lift in overall strength by 10.8%.
# To further examine whether, and if so, which attributes might explain the type, we reduced dimensions using PCA and plotted 18 types on a 2D graph. Although the two principal components are hard to intepret, we were able to cover 74.5% of the variance. We further attempted using PCA to find a fit between features and target, by logistic regression model. The result was of low accurancy - only 20%.
# This study shows that game design is sophisticated - when assigning attributes to certain characters, designers must think of whether each attribute fits the distribution, as well as overall strength within each group. Especially with special events, game companies often rewards players with special items and create special levels. These creations are the outliers in game data and must be treated with caution too.
# # Introduction
# In game design we often encouter character/monster stats that are inconsistent across the game - players might experience an extremely difficult starting level, for example. Each character, move and monster has at least 6 attributes (defense, attack, agility etc.) so the question is how do we design such characters whose attributes and level (being strong or weak in the game) are aligned, while all of them come together form a distribution (perhaps a normal distribution) that fits the best for the game play?
# In this study I will study a dataset of 800 pokemons (out of the 809 most-up-to-date number) to understand how game developers from Nintendo and Game Freak design these infamous creatures and balance their attributes out. For simplicity, I'm using a dataset of pokemons at their beginning level.
# # Data Overview
# This data set includes 721 Pokemon (with some pokemons of 2 versions, total 800 rows), including their number, name, first and second type, and basic stats: HP, Attack, Defense, Special Attack, Special Defense, and Speed. The dataset was obtained from kaggle, by Alberto Barradas through public domains of pokemon.com, pokemondb and bulbapedia. Link as below to the kaggle kennel:
# https://www.kaggle.com/abcsds/pokemon
# import important libraries
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
# read dataset
df = pd.read_csv("../input/Pokemon.csv", index_col=0)
# check attributes
df.head()
# check types of pokemons
df.groupby("Type 1").size()
df.groupby("Type 2").size()
plt.figure(figsize=(12, 5))
chart0 = sns.countplot(x="Type 1", data=df)
plt.figure(figsize=(12, 5))
sns.countplot(x="Type 2", data=df)
# Pokemons have a primary type stored in 'Type 1' column and a possible secondary type stored in 'Type 2'. Total 18 types.
# We also look at the column 'Total', which represents the total strength of such pokemon - the higher the better.
# Distribution of Total
sns.set(color_codes=True)
sns.distplot(df["Total"])
# Total seems to be binomial.
# We notice there are NaN in our data. Let's take a look
# checking the percentage of missing values in each variable
df.isnull().sum() / len(df) * 100
# Almost half of the Pokemons do not have a Type 2. We will change this column to a binary column.
# We also needs to check the variance of the attributes to see if it makes sense to keep all of them.
df.var() / len(df) * 100
# Since the column 'Total' is our output, such variance is within our expectation. Major attributes include HP, Attack, Defense, Sp.ATk, Sp.Def, and Speed all within a variance between 80 - 133. We will keep them all. This is expected, given it's Nintendo...
# # Data Prep
# Here we convert 'Type 2' to a boleen dummy column. '0' if this Pokemon does not have a Type 2 attribue and '1' if it does.
# change NaN to 0
df["Type 2"] = df["Type 2"].fillna(0)
# create a new list to change non-NaN values to 1
Type_2 = []
for i in df["Type 2"]:
if i == 0:
Type_2.append(0)
else:
Type_2.append(1)
# replace old column 'Type 2' with new binary column
df["Type 2"] = Type_2
df.head()
# Much better. Now let's take a look at the distribution of each attribute.
# Histogram of attribute 'Attack' and 'Defense'
sns.distplot(df["Attack"])
sns.distplot(df["Defense"])
# A right-skewed normal distribution graph for both attributes.
# Similarly let's look at the distribution of other attributes:
sns.distplot(df["HP"])
sns.distplot(df["Speed"])
sns.distplot(df["Sp. Atk"])
sns.distplot(df["Sp. Def"])
# It appears that all attributes follow a right-skewed pattern. We can further explore the statistical relationships between these attributes in next steps.
# # Preliminary Analysis
# Overviews of categorical plots, statistical estimation and 2D correlations
# "wide-form" plots of the dataframe
sns.catplot(data=df, kind="box")
# This proves that all attributes are right skewed, so is the outcome 'Total'. The outliers here are likely the 'legendary' kinds - unfortunately not included in this dataset.
# Type 1 is a categorical column that I left it untreated so far. I'm guessing the type of Pokemon has an affect on its total strength too. Let's take a look at 'Total' and 'Type 1'.
df.groupby("Type 1", sort=True).mean()
# Table gives the mean of each type but how much variance each type represents?
plt.figure(figsize=(10, 5))
chart1 = sns.catplot(x="Type 1", y="Total", kind="bar", data=df)
chart1.set_xticklabels(rotation=60)
# Looks like the developers really favour Dragon-type Pokemons!
# Now let's breakdown and see what makes up the 'Total'
# A brief overlook of the correlations between each attribute
df.corr()
# Since 'Total' is our outcome and all other attritbues have high correlations with it, this proves our heuristic guess. All other attributes do not have a correlation higher than 0.5 except 'Sp.Def' and 'Sp.Atk'. We could potentially drop one of them, but we will keep them both for now.
# Let's take a look at the 2D plots of 'Sp.Def' and 'Sp.Atk':
sns.relplot(x="Sp. Atk", y="Sp. Def", data=df)
# Overall we can see that the higher Sp. Atk a Pokemon has, the higher Sp. Def it has.
# It might make more sense to see if different type would give any more clues.
sns.relplot(x="Sp. Atk", y="Sp. Def", hue="Type 1", data=df)
# We have 18 types... That's kinda crazy to visualize over one scatterplot. Let's take a look at whether having a secondary type would make a difference.
sns.relplot(x="Sp. Atk", y="Sp. Def", hue="Type 2", data=df)
# Out of curiosity... Is the strength of pokemon higher when there is a secondary type present?
df.groupby("Type 2", sort=True).mean().sort_values("Total", ascending=False).Total
(456.6 - 412) / 412 * 100
# The presence of 'Type 2' has an overall 10.8% lift in overall strength. Perhaps with the help of other attributes we can explain the help of secondary type better.
# Which pokemon types are more likely to get a secondary type?
chart2 = sns.catplot(x="Type 1", kind="count", hue="Type 2", data=df)
chart2.set_xticklabels(rotation=60)
# Bug, Rock and Steel types are way more likely to get a secondary type!
# It is hard to cluster Pokemons based on just any of the two variables. Due to our limited dimensionality plotting, we will consider methods to lower dimensionality by grouping variables together.
# # Data Analysis
# Can we use a model to explain the relationship between total strength and all other attributes?
# Can we explain everything with our best friend - linear regression?
import statsmodels.api as sm
# First let's separate the predictors and the target - in this case -- Total.
df1 = df.drop(columns=["Total"])
df1
# I will use PCA to plot the 6 attribues on a dimensional data to find if they can explain the pattern of types of Pokemon.
# Lower dimensionality approach using PCA
# import standard scaler package
from sklearn.preprocessing import StandardScaler
features = ["Total", "Attack", "Defense", "Sp. Atk", "Sp. Def", "Speed"]
# separating out the features
x = df.loc[:, features].values
y = df.loc[:, ["Type 1"]].values
# standardizing the features
x = StandardScaler().fit_transform(x)
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(
data=principalComponents, columns=["principal component 1", "principal component 2"]
)
principalDf
target = df.iloc[:, 1]
target.index = range(800)
target
finalDf = pd.concat([principalDf, target], axis=1)
finalDf
# Perfect final PCA table! The two principal components here don't necessarily make any sense except for mapping out the classes and hopefully separating out the classes.
sns.relplot(
x="principal component 1", y="principal component 2", hue="Type 1", data=finalDf
)
# The plot did not seem to separate out types too well. Let's see if accuracy of this model:
pca.explained_variance_ratio_
# Together these two principal components contain 74.5% of the information, better than I thought!
# Let's split data into test and training to test a logistic regression model using PCA.
# Split dataset
from sklearn.model_selection import train_test_split
dat = df.loc[:, features].values
dat_target = target
x_train, x_test, y_train, y_test = train_test_split(
dat, dat_target, test_size=0.2, random_state=0
)
# Fit on training set only
scaler.fit(x_train)
# Standardize using scaler
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
# Make an instance of the model. This means that the minimum number of principal components chosen have 95% of the variance retained.
pca = PCA(0.95)
# Fit PCA on trainig set
pca.fit(x_train)
# Now transform the training and the test sets... aka mapping
x_train = pca.transform(x_train)
x_test = pca.transform(x_test)
# Apply logistic regression to the transformed data
from sklearn.linear_model import LogisticRegression
logisticRegr = LogisticRegression(solver="lbfgs") # faster!
# Train the model on the data
logisticRegr.fit(x_train, y_train)
# Predict for one observation
logisticRegr.predict(x_test[0].reshape(1, -1))
# Unfortunately a wrong prediction...! Let's see how accurate this model is on test data.
logisticRegr.score(x_test, y_test)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Yerleşik Veri Türleri
# Programlamada veri tipi önemli bir kavramdır.
# Değişkenler farklı türde verileri depolayabilir ve farklı türler farklı şeyler yapabilir.
# Python, bu kategorilerde varsayılan olarak yerleşik olarak aşağıdaki veri türlerine sahiptir:
# Text Type: str Numeric Types: int, float, complex Sequence Types: list, tuple, range Mapping Type: dict Set Types: set, frozenset Boolean Type: bool Binary Types: bytes, bytearray, memoryview None Type: NoneType
# # Veri Türünü Öğrenme
# type() işlevini kullanarak herhangi bir nesnenin veri türünü öğrenebilirsiniz.
b = 55
print(type(b))
# # Setting the Data Type
# Example Data Ty pe Try it x = "Hello World" str x = 20 int x = 20.5 float x = 1j complex x = ["apple", "banana", "cherry"] list x = ("apple", "banana", "cherry") tuple x = range(6) range x = {"name" : "John", "age" : 36} dict x = {"apple", "banana", "cherry"} set x = frozenset({"apple", "banana", "cherry"}) frozenset x = True bool x = b"Hello" bytes x = bytearray(5) bytearray x = memoryview(bytes(5)) memoryview x = None NoneType
# # Belirli Veri Türünü Ayarlama
# If you want to specify the data type, you can use the following constructor functions:
# Example Data Type Try it x = str("Hello World") str x = int(20) int x = float(20.5) float x = complex(1j) complex x = list(("apple", "banana", "cherry")) list x = tuple(("apple", "banana", "cherry")) tuple x = range(6) range x = dict(name="John", age=36) dict x = set(("apple", "banana", "cherry")) set x = frozenset(("apple", "banana", "cherry")) frozenset x = bool(5) bool x = bytes(5) bytes x = bytearray(5) bytearray x = memoryview(bytes(5)) memoryview
# # Python Numbers
# Python'da üç sayısal tür vardır:
# int float complex Sayısal tipteki değişkenler, onlara bir değer atadığınızda oluşturulur:
yas = 21 # int
y = 6.77 # float
z = 9j # complex
# Python'da herhangi bir nesnenin türünü doğrulamak için type() işlevini kullanırız.
print(type(yas))
print(type(y))
print(type(z))
# # Int - Integer
# Int veya tamsayı, pozitif veya negatif, ondalık basamak içermeyen, sınırsız uzunlukta bir tam sayıdır.
# integers
a = 555555
b = 99999
c = 8888
print(type(a))
print(type(b))
print(type(c))
# # Float
# Bir veya daha fazla ondalık basamak içeren pozitif veya negatif bir sayıdır.
x = 5.25
y = 5.8
z = -88.88
print(type(x))
print(type(y))
print(type(z))
# Float, 10'un kuvvetini belirtmek için "e" harfi bulunan bilimsel sayılar da olabilir.
# floats
s1 = 856e1
s2 = 58e4
s3 = -85.7e822
print(type(s1))
print(type(s2))
print(type(s3))
# # Complex
# Karmaşık sayılar sanal kısım olarak "j" ile yazılır:
z = 9 + 8j
e = 8j
l = -8j
print(type(z))
print(type(e))
# # Tip Dönüşümü
# int(), float() ve Complex() yöntemleriyle bir türden diğerine dönüştürebilirsiniz:
a = 999 # int
s = 2.8 # float
d = 1j # complex
# convert from int to float:
x = float(a)
# convert from float to int:
y = int(s)
# convert from int to complex:
z = complex(d)
print(x)
print(y)
print(z)
print(type(x))
print(type(y))
print(type(z))
# Not: Karmaşık sayıları başka bir sayı türüne dönüştüremezsiniz.
# # Rastgele Sayılar
# Python'un rasgele bir sayı yapmak için bir random() işlevi yoktur, ancak Python'un rasgele sayılar yapmak için kullanılabilecek random adlı yerleşik bir modülü vardır:
import random
print(random.randrange(90, 100))
# # Bir Değişken Türü Oluşturma
# Bir değişkene bir tür belirtmek istediğiniz zamanlar olabilir. Bu döküm ile yapılabilir. Python, nesne yönelimli bir dildir ve bu nedenle, ilkel türleri de dahil olmak üzere veri türlerini tanımlamak için sınıfları kullanır.
# # Bir Değişken Türü Belirtin
# Bir değişkene bir tür belirtmek istediğiniz zamanlar olabilir. Python, nesne yönelimli bir dildir
# int() - bir tamsayı hazır bilgisinden, bir değişken değişmez bilgisinden (tüm ondalık sayıları kaldırarak) veya bir dize değişmez bilgisinden (dizgenin bir tam sayıyı temsil etmesi koşuluyla) bir tamsayı oluşturur float() - bir tamsayı hazır bilgisinden, bir değişken sabit değerden veya bir dize değişmez bilgisinden bir kayan sayı oluşturur (dizenin bir kayan nokta veya bir tamsayıyı temsil etmesi koşuluyla) str() - diziler, tamsayı sabit değerleri ve değişken sabit değerler dahil olmak üzere çok çeşitli veri türlerinden bir dize oluşturur
# integers
x = int(8) # x will be 8
y = int(5.10) # y will be 2
z = int("5") # z will be 5
print(x)
print(y)
print(z)
# floats
x = float(7) # x will be 7.0
y = float(2.77) # y will be 2.77
z = float("55") # z will be 55.0
w = float("48.2") # w will be 48.2
print(x)
print(y)
print(z)
print(w)
# strings
x = str("iki2") # x will be 'iki2'
y = str(31) # y will be '31'
z = str(88.7) # z will be '388.7'
print(x)
print(y)
print(z)
# # Strings
# Python'daki dizeler, tek tırnak işaretleri veya çift tırnak işaretleri içine alınır.
# "merhaba", "merhaba" ile aynıdır.
# print() işleviyle bir dize hazır bilgisini görüntüleyebilirsiniz:
print("merhaba kanalıma hoşgeldiniz")
print("merhaba kanalıma hoşgeldiniz")
# # Dizeyi bir Değişkene Atama
# Bir değişkene bir dize atamak, değişken adının ardından eşittir işareti ve dize ile yapılır:
w = "kaybedenler klubu"
print(w)
# # Çok Satırlı Dizeler
# Üç tırnak kullanarak bir değişkene çok satırlı bir dize atayabilirsiniz:
m = """orda her kiminleysen
belki sevgilinleysen
soyle kumralım, için sızlamaz mı
bilmem hatırlarmısın
gözlerim ne renki
söyle kumralım benim adım neydi."""
print(m)
# Veya üç tek tırnak:
m = """orda her kiminleysen
belki sevgilinleysen
soyle kumralım, için sızlamaz mı
bilmem hatırlarmısın
gözlerim ne renki
söyle kumralım benim adım neydi."""
print(m)
# # Sringsler Dizilerdir
# Diğer birçok popüler programlama dili gibi, Python'daki dizeler de unicode karakterleri temsil eden bayt dizileridir.
# Bununla birlikte, Python'un bir karakter veri türü yoktur, tek bir karakter yalnızca 1 uzunluğunda bir dizedir.
# Dizenin öğelerine erişmek için köşeli parantezler kullanılabilir.
# 1 konumundaki karakteri alın (ilk karakterin 0 konumunda olduğunu unutmayın):
a = "ekomoni fanta!"
print(a[1])
# # Bir Dizide Döngü Yapmak
# Dizeler dizi olduğundan, bir dizideki karakterler arasında bir for döngüsü ile döngü yapabiliriz.
# "aşk" kelimesindeki harfler arasında dolaşın:
for x in "aşk":
print(x)
# # String Length
# Bir dizenin uzunluğunu almak için len() işlevini kullanın.
# len() işlevi, bir dizenin uzunluğunu döndürür:
a = "anne bana bi masal anlatsana"
print(len(a))
# # Dizeyi Kontrol Et
# Bir dizgede belirli bir ifadenin veya karakterin olup olmadığını kontrol etmek için in anahtar kelimesini kullanabiliriz.
# Aşağıdaki metinde "free" olup olmadığını kontrol edin:
txt = "ödevlerim bitince rahatlicam"
print("rahatlicam" in txt)
# Bir if ifadesinde kullanın:
txt = "ödevlerim bitince rahatlicam"
if "rahatlicam" in txt:
print("evet,dışarı çıkacaksın")
# # OLMADIĞINI kontrol edin
# Belirli bir kelime öbeğinin veya karakterin bir dizgede OLMADIĞINI kontrol etmek için not in anahtar kelimesini kullanabiliriz.
# Aşağıdaki metinde "pahalı" ifadesinin OLMADIĞINI kontrol edin
txt = "her şey ucuz"
print("pahalı" not in txt)
# Use it in an if statement:
txt = "her şey ucuz"
if "pahalı" not in txt:
print("hayır her şey çok pahalıı")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# 再現性の確保
# 乱数の固定
import os
import random as rn
os.environ["PYTHONHASHSEED"] = "0"
np.random.seed(7)
rn.seed(7)
from sklearn.model_selection import (
cross_validate,
KFold,
cross_val_score,
train_test_split,
cross_val_predict,
)
from sklearn.metrics import make_scorer, roc_auc_score
import xgboost as xgb
train_df = pd.read_csv(
"/kaggle/input/1056lab-diabetes-diagnosis/train.csv", index_col=0
)
test_df = pd.read_csv("/kaggle/input/1056lab-diabetes-diagnosis/test.csv", index_col=0)
print(train_df.shape)
print(test_df.shape)
# 目的変数の確認
print(list(train_df["Diabetes"]).count(1))
print(list(train_df["Diabetes"]).count(0))
train_df["Gender"] = train_df["Gender"].map({"male": 0, "female": 1})
test_df["Gender"] = test_df["Gender"].map({"male": 0, "female": 1})
train_df["Chol/HDL ratio labels"] = 0
test_df["Chol/HDL ratio labels"] = 0
for i in range(train_df.shape[0]):
if train_df["Chol/HDL ratio"].iloc[i] < 5.0:
train_df["Chol/HDL ratio labels"].iloc[i] = 0
else:
train_df["Chol/HDL ratio labels"].iloc[i] = 1
for i in range(test_df.shape[0]):
if test_df["Chol/HDL ratio"].iloc[i] < 5.0:
test_df["Chol/HDL ratio labels"].iloc[i] = 0
else:
test_df["Chol/HDL ratio labels"].iloc[i] = 1
train_df
X_train = train_df.drop("Diabetes", axis=1).values
y_train = train_df["Diabetes"].values
print(X_train.shape)
print(y_train.shape)
# X_learn, X_valid, y_learn, y_valid = train_test_split(X_train, y_train, test_size=0.2, random_state=0)
# print('学習用データ:', X_learn.shape, y_learn.shape)
# print('検証用データ:', y_valid.shape, y_valid.shape)
# from sklearn.model_selection import GridSearchCV
# import xgboost as xgb
# reg = xgb.XGBClassifier()
# params = {'random_state':[0, 1], 'n_estimators':[100, 300, 500, 1000], 'max_depth':[1, 2, 3, 4, 5, 6],
# 'learning_rate':[0.5, 0.1, 0.05, 0.01]}
# gscv = GridSearchCV(reg, params, cv=3)
# gscv.fit(X_res, y_res)
# CrossValidation
from imblearn.over_sampling import SMOTE
from lightgbm import LGBMClassifier
from sklearn.model_selection import GridSearchCV
from keras.utils.np_utils import to_categorical
kf = KFold(n_splits=5, shuffle=False)
score_l = []
score_v = []
for i in kf.split(X_train, y_train):
X_learn_s, y_learn_s = X_train[i[0]], y_train[i[0]]
X_valid_s, y_valid_s = X_train[i[1]], y_train[i[1]]
# 不均衡データ対策
ros = SMOTE(random_state=0)
X_res, y_res = ros.fit_sample(X_learn_s, y_learn_s)
Y_learn_s = to_categorical(y_learn_s)
Y_valid_s = to_categorical(y_valid_s)
# オーバーサンプリング後に行う前処理
# a = np.insert(a, 2, 1, axis=1)
# for n in range(X_res.shape[0]):
# if X_res[n][2] < 5.0:
# np.append(X_res[n], 0)
# if X_res[n][2] >= 5.0:
# np.append(X_res[n], 1)
clf = LGBMClassifier()
clf.fit(X_res, y_res)
pre_l = clf.predict_proba(X_learn_s)
pre_v = clf.predict_proba(X_valid_s)
score_l.append(roc_auc_score(Y_learn_s[:, 1], pre_l[:, 1]))
score_v.append(roc_auc_score(Y_valid_s[:, 1], pre_v[:, 1]))
print("CrossValidation(learn_data) : ", sum(score_l) / len(score_l))
print("CrossValidation(val_data) : ", sum(score_v) / len(score_v))
# a = [[0,0], [1,1], [2,2]]
# a = np.array(a)
# a
# a = np.insert(a, 2, 1, axis=1)
# a
X_res
score_v
# def auc_score(y_test,y_pred):
# return roc_auc_score(y_test,y_pred)
# kf = KFold(n_splits=4, shuffle=True, random_state=0)
# score_func = {'auc':make_scorer(auc_score)}
# scores = cross_validate(gscv, X_train, y_train, cv = kf, scoring=score_func)
# print('auc:', scores['test_auc'].mean())
# from sklearn.model_selection import cross_val_score
# scores = cross_val_score(gscv, X_valid, y_valid, cv=kf)
# scores.mean()
reg = LGBMClassifier()
ros = SMOTE(random_state=0)
X, y = ros.fit_sample(X_train, y_train)
reg.fit(X, y)
test = test_df.values
p = reg.predict_proba(test)
sample = pd.read_csv("/kaggle/input/1056lab-diabetes-diagnosis/sampleSubmission.csv")
sample["Diabetes"] = p[:, 1]
sample.to_csv("pre.csv", index=False)
|
import numpy as np
def schaffer(p):
x1, x2 = p
part1 = np.square(x1) - np.square(x2)
part2 = np.square(x1) + np.square(x2)
return 0.5 + (np.square(np.sin(part1)) - 0.5) / np.square(1 + 0.001 * part2)
from sko.GA import GA
ga = GA(
func=schaffer,
n_dim=2,
size_pop=50,
max_iter=800,
prob_mut=0.001,
lb=[-1, -1],
ub=[1, 1],
precision=1e-7,
)
best_x, best_y = ga.run()
print("best_x:", best_x, "\n", "best_y:", best_y)
# %% Plot the result
import pandas as pd
import matplotlib.pyplot as plt
Y_history = pd.DataFrame(ga.all_history_Y)
fig, ax = plt.subplots(2, 1)
ax[0].plot(Y_history.index, Y_history.values, ".", color="red")
Y_history.min(axis=1).cummin().plot(kind="line")
plt.show()
import numpy as np
import torch
import time
def schaffer(p):
"""
This function has plenty of local minimum, with strong shocks
global minimum at (0,0) with value 0
https://en.wikipedia.org/wiki/Test_functions_for_optimization
"""
x1, x2 = p
part1 = np.square(x1) - np.square(x2)
part2 = np.square(x1) + np.square(x2)
return 0.5 + (np.square(np.sin(part1)) - 0.5) / np.square(1 + 0.001 * part2)
import torch
from sko.GA import GA
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
ga = GA(
func=schaffer,
n_dim=2,
size_pop=50,
max_iter=800,
lb=[-1, -1],
ub=[1, 1],
precision=1e-7,
)
ga.to(device=device)
start_time = time.time()
best_x, best_y = ga.run()
print(time.time() - start_time)
print("best_x:", best_x, "\n", "best_y:", best_y)
ga = GA(
func=schaffer,
n_dim=2,
size_pop=50,
max_iter=800,
lb=[-1, -1],
ub=[1, 1],
precision=1e-7,
)
start_time = time.time()
best_x, best_y = ga.run()
print(time.time() - start_time)
print("best_x:", best_x, "\n", "best_y:", best_y)
def obj_func(p):
x1, x2, x3 = p
return x1**2 + x2**2 + x3**2
constraint_eq = [lambda x: 1 - x[1] - x[2]]
constraint_ueq = [lambda x: 1 - x[0] * x[1], lambda x: x[0] * x[1] - 5]
from sko.DE import DE
de = DE(
func=obj_func,
n_dim=3,
size_pop=50,
max_iter=800,
lb=[0, 0, 0],
ub=[5, 5, 5],
constraint_eq=constraint_eq,
constraint_ueq=constraint_ueq,
) # 等式约束与不等式约束
best_x, best_y = de.run()
print("best_x:", best_x, "\n", "best_y:", best_y)
import numpy as np
from scipy import spatial
import matplotlib.pyplot as plt
num_points = 50
points_coordinate = np.random.rand(num_points, 2) # generate coordinate of points
distance_matrix = spatial.distance.cdist(
points_coordinate, points_coordinate, metric="euclidean"
)
def cal_total_distance(routine):
"""The objective function. input routine, return total distance.
cal_total_distance(np.arange(num_points))
"""
(num_points,) = routine.shape
return sum(
[
distance_matrix[routine[i % num_points], routine[(i + 1) % num_points]]
for i in range(num_points)
]
)
from sko.GA import GA_TSP
ga_tsp = GA_TSP(
func=cal_total_distance, n_dim=num_points, size_pop=50, max_iter=500, prob_mut=1
)
best_points, best_distance = ga_tsp.run()
fig, ax = plt.subplots(1, 2)
best_points_ = np.concatenate([best_points, [best_points[0]]])
best_points_coordinate = points_coordinate[best_points_, :]
ax[0].plot(best_points_coordinate[:, 0], best_points_coordinate[:, 1], "o-r")
ax[1].plot(ga_tsp.generation_best_Y)
plt.show()
def demo_func(x):
x1, x2, x3 = x
return x1**2 + (x2 - 0.05) ** 2 + x3**2
# %% Do PSO
from sko.PSO import PSO
constraint_ueq = (lambda x: (x[0] - 1) ** 2 + (x[1] - 0) ** 2 - 0.5**2,)
pso = PSO(
func=demo_func,
n_dim=3,
pop=40,
max_iter=150,
lb=[0, -1, 0.5],
ub=[1, 1, 1],
w=0.8,
c1=0.5,
c2=0.5,
constraint_ueq=constraint_ueq,
)
pso.run()
print("best_x is ", pso.gbest_x, "best_y is", pso.gbest_y)
# %% Plot the result
import matplotlib.pyplot as plt
plt.plot(pso.gbest_y_hist)
plt.show()
demo_func = lambda x: x[0] ** 2 + (x[1] - 0.05) ** 2 + x[2] ** 2
# %% Do SA
from sko.SA import SA
sa = SA(func=demo_func, x0=[1, 1, 1], T_max=1, T_min=1e-9, L=300, max_stay_counter=150)
best_x, best_y = sa.run()
print("best_x:", best_x, "best_y", best_y)
# %% Plot the result
import matplotlib.pyplot as plt
import pandas as pd
plt.plot(pd.DataFrame(sa.best_y_history).cummin(axis=0))
plt.show()
# %%
from sko.SA import SAFast
sa_fast = SAFast(
func=demo_func,
x0=[1, 1, 1],
T_max=1,
T_min=1e-9,
q=0.99,
L=300,
max_stay_counter=150,
)
sa_fast.run()
print(
"Fast Simulated Annealing: best_x is ", sa_fast.best_x, "best_y is ", sa_fast.best_y
)
# %%
from sko.SA import SAFast
sa_fast = SAFast(
func=demo_func,
x0=[1, 1, 1],
T_max=1,
T_min=1e-9,
q=0.99,
L=300,
max_stay_counter=150,
lb=[-1, 1, -1],
ub=[2, 3, 4],
)
sa_fast.run()
print(
"Fast Simulated Annealing with bounds: best_x is ",
sa_fast.best_x,
"best_y is ",
sa_fast.best_y,
)
# %%
from sko.SA import SABoltzmann
sa_boltzmann = SABoltzmann(
func=demo_func,
x0=[1, 1, 1],
T_max=1,
T_min=1e-9,
q=0.99,
L=300,
max_stay_counter=150,
)
sa_boltzmann.run()
print(
"Boltzmann Simulated Annealing: best_x is ",
sa_boltzmann.best_x,
"best_y is ",
sa_boltzmann.best_y,
)
# %%
from sko.SA import SABoltzmann
sa_boltzmann = SABoltzmann(
func=demo_func,
x0=[1, 1, 1],
T_max=1,
T_min=1e-9,
q=0.99,
L=300,
max_stay_counter=150,
lb=-1,
ub=[2, 3, 4],
)
sa_boltzmann.run()
print(
"Boltzmann Simulated Annealing with bounds: best_x is ",
sa_boltzmann.best_x,
"best_y is ",
sa_boltzmann.best_y,
)
# %%
from sko.SA import SACauchy
sa_cauchy = SACauchy(
func=demo_func,
x0=[1, 1, 1],
T_max=1,
T_min=1e-9,
q=0.99,
L=300,
max_stay_counter=150,
)
sa_cauchy.run()
print(
"Cauchy Simulated Annealing: best_x is ",
sa_cauchy.best_x,
"best_y is ",
sa_cauchy.best_y,
)
# %%
from sko.SA import SACauchy
sa_cauchy = SACauchy(
func=demo_func,
x0=[1, 1, 1],
T_max=1,
T_min=1e-9,
q=0.99,
L=300,
max_stay_counter=150,
lb=[-1, 1, -1],
ub=[2, 3, 4],
)
sa_cauchy.run()
print(
"Cauchy Simulated Annealing with bounds: best_x is ",
sa_cauchy.best_x,
"best_y is ",
sa_cauchy.best_y,
)
import numpy as np
from sko.demo_func import function_for_TSP
num_points, points_coordinate, distance_matrix, cal_total_distance = function_for_TSP(
num_points=10
)
# %%
from sko.IA import IA_TSP
ia_tsp = IA_TSP(
func=cal_total_distance,
n_dim=num_points,
size_pop=500,
max_iter=800,
prob_mut=0.2,
T=0.7,
alpha=0.95,
)
best_points, best_distance = ia_tsp.run()
print("best routine:", best_points, "best_distance:", best_distance)
# %%
# step3: plot
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
best_points_ = np.concatenate([best_points, [best_points[0]]])
best_points_coordinate = points_coordinate[best_points_, :]
ax.plot(best_points_coordinate[:, 0], best_points_coordinate[:, 1], "o-r")
plt.show()
|
# ## Reading data
# First we remove duplicates in this dataset.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df_original = pd.read_csv("../input/chemicals-in-cosmetics/chemicals-in-cosmetics.csv")
df = df_original.drop_duplicates()
print("The original database shape:", df_original.shape)
print("Database without duplicates:", df.shape)
df.head()
# ## Investigating chemical counts
df["ChemicalName"].value_counts().size
# Overall there are 123 different chemicals that are reported.
# Based on the description, each entry in column 'ChemicalCount' is the total number of current chemicals reported for a product. This number does not include chemicals that have been removed from a product. Also, this number is a calculated field based on current reporting.
df["ChemicalCount"].describe()
# In average, products contain at least one chemical. Notice there are products with 0 chemicals, and there are products with 9 reported chemicals.
# Let's first investigate products where 'ChemicalCount'=0.
df.loc[df.ChemicalCount == 0].head()
# The number of chemicals being equal to zero suggests that the chemicals were removed from the product (reported in 'ChemicalDateRemoved'). This can be verified by checking if there are NaN values in this column.
# when the result is False, there are no NaN values
df.loc[df.ChemicalCount == 0]["ChemicalDateRemoved"].isnull().max()
# In the sequel, we will focus on products that have reported at least one chemical.
df_n0 = df.loc[df.ChemicalCount > 0]
# The maximum number of chemicals that is reported in a product is 9. We can find these products:
df_n0.loc[df.ChemicalCount == 9]
# It turns out it is only one product, where each chemical is separately reported.
# The following code is used to generate the bar chart showing the number of products per number of chemicals. In counting the number of products, different color, scent and/or flavor of the product are neglected (e.g. though 'Professional Eyeshadow Base' can be beige or bright, it is counted only once with 'CDPHId'=26).
data = df_n0.groupby(["ChemicalCount"]).nunique()["CDPHId"]
fig = plt.figure(figsize=(9, 7))
ax = plt.subplot(111)
ax.bar(data.index, data.values, log=True, align="center", alpha=0.5, edgecolor="k")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("bottom")
ax.set_xticks(np.arange(1, 10))
for x, y in zip(data.index, data.values):
plt.annotate(y, (x, y), textcoords="offset points", xytext=(0, 4), ha="center")
ax.set_title("Number of reported products containing chemicals", fontsize=15)
ax.title.set_position([0.5, 1.05])
ax.set_xlabel("Number of chemicals", fontsize=12)
ax.set_ylabel("Number of products (log scale)", fontsize=12)
plt.show()
# ## Chemicals in baby products
# Baby products represent one of the primary categories in this dataset.
baby_prod = df_n0.loc[df_n0["PrimaryCategory"] == "Baby Products"]
baby_prod.head()
# The next code is used to find all chemicals present in baby products (listed and in a graph). We look for chemicals using their name, since their identification number is not unique (e.g. 'Lead' has 6484 and 6602 as 'ChemicalId')
baby_prod_chem = baby_prod["ChemicalName"].value_counts()
print(baby_prod_chem)
# The long name 'Retinol/retinyl esters, when in daily dosages ...' will be replaced with 'Retinol'. The long name is stored in 'long_text', and a remark is given below the graph.
long_text = baby_prod_chem.index[2]
print("Old chemical name: ", long_text)
print()
baby_prod_chem.rename({baby_prod_chem.index[2]: "Retinol *"}, inplace=True)
print("New chemical name: ", baby_prod_chem.index[2])
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot(111)
ax.barh(baby_prod_chem.index, baby_prod_chem.values, color="red", alpha=0.6)
ax.xaxis.grid(linestyle="--", linewidth=0.5)
for x, y in zip(baby_prod_chem.values, baby_prod_chem.index):
ax.annotate(x, (x, y), textcoords="offset points", xytext=(4, 0), va="center")
ax.set_title("Chemicals in baby products", fontsize=15)
ax.title.set_position([0.5, 1.02])
ax.set_xlabel("Number of baby products", fontsize=12)
ax.set_xticks(np.arange(0, 22, 5))
plt.text(-0.15, -0.2, "* " + long_text, size=12, transform=ax.transAxes)
plt.show()
# List of all baby product names, containing at least one chemical, sorted by subcategory.
reported_baby_prod = baby_prod[
["ProductName", "CompanyName", "SubCategory"]
].sort_values("SubCategory")
reported_baby_prod.columns = ["Baby product", "Company", "Type of product"]
reported_baby_prod.style.hide_index()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(
"/kaggle/input/credit-risk-dataset/credit_risk_dataset.csv", encoding="latin"
)
df.head(10)
# # Applying multiple condition
df[(df["person_income"] > 83000) & (df["person_emp_length"] > 4)][
"person_income"
].head()
# # Applying multiple condition with loc
df.loc[
(df["person_income"] > 83000) & (df["person_emp_length"] > 4), ["person_income"]
].head()
# # Applying multiple condition with query function
df.query("person_income > 83000 and person_emp_length > 4")["person_income"].head()
df.head()
# # Search with regex
# ## (PERSONAL|EDUCATION|MEDICAL) is the regex applied
filt_cond = df.loan_intent.str.contains(r"(PERSONAL|EDUCATION|MEDICAL)")
df[filt_cond].head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import seaborn as sns
import matplotlib.pyplot as plt
# **EDA - Data Analysis (Training data)**
# **All helping functions for the program**
# show function will help print data for quick testing
def show(*args):
for arg in args:
print(arg, sep="\n")
# display option for float values
pd.set_option("display.float_format", lambda x: "%.3f" % x)
# inline printing of matplotlib
# **Importing data**
train_data = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
show(train_data, train_data.shape)
# **We have 1460 rows and 81 columns in the data**
# storing all the columns of train_data
all_columns = train_data.columns
all_X = train_data.columns[:-1]
all_y = train_data.columns[-1:]
show(all_X, all_y, all_X.shape, all_y.shape)
# **Data Analaysis for all independent variables and Dependent variables**
# **Data Analysis - Independent Variables**
# getting all the null data values in the dataset - X (independent variables)
show(train_data[all_X].isnull().sum().sort_values(ascending=False))
missing_X = train_data[all_X].isnull().sum().sort_values(ascending=False)
missing_X = missing_X[missing_X > 1]
show(missing_X, missing_X.shape)
# We have 19 columns with missing data
missing_X.plot.bar()
# percentage of missing data calculation
missing_X_perc = (
train_data[all_X].isnull().sum() / train_data[all_X].isnull().count()
) * 100
missing_X_perc.sort_values(ascending=False)
missing_X_perc[missing_X_perc > 0].sort_values(ascending=False)
# **Data Analysis of SalePrice**
# verifying that there are no missing data in the SalePrice
train_data[all_y].isnull().sum()
# **Cleaning the data**
# all the columns with the highest number of missing values
missing_X.index
# drop the missing columns from the dataset
# specifying 1 in the axis to drop the columns
train_data = train_data.drop(missing_X.index, 1)
# data is cleansed now
show(train_data.columns, missing_X)
# removing the 1 null value in the Electrical column
train_data = train_data.drop(train_data[train_data["Electrical"].isnull()].index)
# confirming that no null values exist in the train_data
train_data.isnull().sum().max()
# Analysing the SalePrice
train_data["SalePrice"]
train_data["LandContour"].dtype
# **Splitting the data into Quantitative and Qualitative variables**
quan_columns = [col for col in train_data if train_data.dtypes[col] != "object"]
qual_columns = [col for col in train_data if train_data.dtypes[col] == "object"]
show(quan_columns, qual_columns)
show(len(quan_columns) + len(qual_columns))
show(len(quan_columns), len(qual_columns))
# removing Id, SalePrice from the Quantitative Columns
quan_columns.remove("Id")
quan_columns.remove("SalePrice")
show(len(quan_columns), len(qual_columns))
# **Graphs - Normal Distribution Analysis for Quantitative Columns vs SalePrice**
# Unpivot a DataFrame from wide to long format, optionally leaving identifiers set.
f = pd.melt(train_data, value_vars=quan_columns)
g = sns.FacetGrid(f, col="variable", col_wrap=3, sharex=False, sharey=False)
g = g.map(sns.distplot, "value")
# g.savefig('quantitative.png')
quan_columns
# **Analysing the Qualitative Variables for pvalue**
qual_columns
for i in train_data[qual_columns]:
print(i, "\t", train_data[i].unique())
# **Box plot for analysing the Qualitative Data**
def boxplot(x, y, **kwargs):
sns.boxplot(x=x, y=y)
x = plt.xticks(rotation=90)
f = pd.melt(train_data, id_vars=["SalePrice"], value_vars=qual_columns)
g = sns.FacetGrid(f, col="variable", col_wrap=3, sharex=False, sharey=False, height=5)
g = g.map(boxplot, "value", "SalePrice")
# save as fig
# g.savefig('Qualitative_Data_Box_Plots.png')
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
import pyspark.sql.functions as F
from pyspark.sql.types import DoubleType, StringType, StructType, StructField
from pyspark.ml.feature import StringIndexer, VectorAssembler, QuantileDiscretizer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark import SparkContext
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# build spark session
spark = SparkSession.builder.appName(
"Spark on santander-customer-satisfaction"
).getOrCreate()
# Loading data to spark session
train_spark = spark.read.csv(
"../input/santander-customer-satisfaction/train.csv",
header="true",
inferSchema="true",
)
test_spark = spark.read.csv(
"../input/santander-customer-satisfaction/test.csv",
header="true",
inferSchema="true",
)
# Loading data panda to simplify data visuallization
train = pd.read_csv("../input/santander-customer-satisfaction/train.csv")
test = pd.read_csv("../input/santander-customer-satisfaction/test.csv")
# # 1.Overview and understand data
train_spark.printSchema()
# #summary of the data
# # Transfer spark to panda dataframe due to bad visibility in the kaggle Kernel. Maybe there is solution to have better display.
train_spark.describe().limit(5).toPandas()
train_spark.describe().toPandas()
train_spark.limit(5).toPandas()
# > Here I check number of rows for each ID?
# * Conclusion: there is one row for one ID
#
from pyspark.sql.functions import col
df_ID_count = train_spark.groupBy("ID").count().orderBy("count", ascending=False)
# df_ID_count = train_spark.groupBy("ID").count().filter("`count` >= 10").sort(col("count").desc())
df_ID_count.show(20)
# > function below will go through each column one by one to do the describe, it is really good pratic but it takes long time to process. Should not be used unless it requires to go through each parameters one by one.
##########warning take long time do not start it unless need to review each column on by one #####################
def describe_Column(df, column, target="TARGET", numRows=20):
df.groupby(column).agg(
F.count(column).alias("count"),
F.mean(target).alias("mean"),
F.stddev(target).alias("stddev"),
F.min(target).alias("min"),
F.max(target).alias("max"),
).orderBy("count", ascending=False).show(numRows)
# for column, typ in train_spark_reduce.dtypes:
# print(column)
# describe_Column(train_spark_reduce, column)
# please comment out lines above to use
# check the type
type(df_ID_count)
df_Target_count = train_spark.groupBy("TARGET").count()
df_Target_count.show()
type(df_Target_count)
# Understand the sample ratio. Balance between unsatisfied customers (1) and satisfied customers (0).
import pyspark.sql.functions as f
from pyspark.sql.window import Window
df_Target_count = df_Target_count.withColumn(
"ratio", f.col("count") / f.sum("count").over(Window.partitionBy())
)
df_Target_count.orderBy("ratio", ascending=False).show()
import seaborn as sns
from matplotlib import pyplot as plt
print("Histogram plot ")
sns.countplot("TARGET", data=train_spark.toPandas())
plt.title("Target size", fontsize=14)
plt.show()
print("Dataset is imbalanced")
# # 2.Processing and cleaning data
# # Data cleanning (remove irrelevant)
# Assuming ID is not correlated with customer satisfaction so i drop it
train_spark_NoID = train_spark.drop("ID")
train_spark_NoID.limit(5).toPandas()
# # Data cleanning (drop duplicate rows)
# * maybe it is not good to drop, since different customer may have exist same profile
print("Before dorp duplicate count: ", train_spark_NoID.count())
# drop duplicate
train_spark_NoID_NoDupRow = train_spark_NoID.dropDuplicates()
print("After dorp duplicate count: ", train_spark_NoID_NoDupRow.count())
train_spark_NoID_NoDupRow.distinct().count()
# # Data cleanning (drop duplicate columns)
# remove duplicated columns
remove = []
cols = train.columns
for i in range(len(cols) - 1):
v = train[cols[i]].values
for j in range(i + 1, len(cols)):
if np.array_equal(v, train[cols[j]].values):
remove.append(cols[j])
print("Before dorp duplicate column count: ", len(train_spark_NoID_NoDupRow.columns))
train_spark_NoID_NoDup = train_spark_NoID_NoDupRow.drop(*remove)
print("After dorp duplicate column count: ", len(train_spark_NoID_NoDup.columns))
# # Data cleanning (remove distinct count =1 )
# * code below used pure spark dataframe, if it is in panda dataframe, it will be calcuate each column STD.
# * Scripte is really slow, need improvement
# * script below only get one distinct column therefore, I remove it from running
# print("Before dorp column count: ",len(train_spark_NoID_NoDup.columns))
# #from pyspark.sql.functions import * is need for the countDistinct
# from pyspark.sql.functions import *
# #apply countDistinct on each column
# col_counts = train_spark_NoID_NoDup.agg(*(countDistinct(col(c)).alias(c) for c in train_spark_NoID_NoDup.columns)).collect()[0].asDict()
# #select the cols with Distinct count=1 in an array
# cols_to_drop = [col for col in train_spark_NoID_NoDup.columns if col_counts[col] == 1 ]
# #drop the selected column
# train_spark_drop1Distinct = train_spark_NoID_NoDup.drop(*cols_to_drop)
# print('Number of cols dropped: ',len(cols_to_drop))
# print("After dorp column count after removing distince count =1 : ",len(train_spark_drop1Distinct.columns))
train_spark_drop1Distinct = train_spark_NoID_NoDup
# # Data cleanning (replace strange value in columns)
# * the value -99999 looks werid/strange value, may need to replace
# count name of werid number for each columns and sort by count
count_series = train[train < -100000].count()
df_count = count_series.to_frame().T
df_count.max().sort_values(ascending=False).head()
# only var3 column has the strange value -999999
# train_spark_drop1Distinct.filter(train_spark_drop1Distinct.var3 == -999999).toPandas()
train_spark_drop = train_spark_drop1Distinct.withColumn(
"var3",
F.when(
train_spark_drop1Distinct["var3"] < -100000, train["var3"].median()
).otherwise(train_spark_drop1Distinct["var3"]),
)
# train_spark_drop.filter(train_spark_drop.var3 == -999999).toPandas()
train_spark_drop.describe().toPandas()
# # Check Null
# * there is no null value columns
from pyspark.sql.functions import *
train_spark_drop.select(
[count(when(isnan(c), c)).alias(c) for c in train_spark_drop.columns]
).toPandas()
# # 3.Normalize Imbalanced data
# * Most machine learning algorithms work best when the number of samples in each class are about equal. This is because most algorithms are designed to maximize accuracy and reduce error.
import seaborn as sns
from matplotlib import pyplot as plt
print("Histogram plot after process data set")
sns.countplot("TARGET", data=train_spark_drop.toPandas())
plt.title("Target size", fontsize=14)
plt.show()
import pyspark.sql.functions as f
from pyspark.sql.window import Window
df_Target_count_2 = train_spark_drop.groupBy("TARGET").count()
df_Target_count_2.show()
df_Target_count_2 = df_Target_count_2.withColumn(
"ratio", f.col("count") / f.sum("count").over(Window.partitionBy())
)
df_Target_count_2.orderBy("ratio", ascending=False).show()
# # Resampling techniques — Undersample majority class
# Stratified Sampling can be used as well
from time import *
start_time = time()
train_spark_1 = train_spark_drop.filter("TARGET =1")
train_spark_0_OG = train_spark_drop.filter("TARGET =0")
ratio = train_spark_1.count() / train_spark_0_OG.count()
print("Before Undersample 1 and 0: ", ratio)
train_spark_0, train_spark_dump = train_spark_0_OG.randomSplit([ratio, 1 - ratio])
# concate two dataframe together
train_spark_Undersample = train_spark_0.union(train_spark_1)
ratio_Undersample = (
train_spark_Undersample.filter("TARGET =1").count()
/ train_spark_Undersample.filter("TARGET =0").count()
)
print("After Undersample 1 and 0: ", ratio_Undersample)
end_time = time()
elapsed_time = end_time - start_time
print("Time for this session: %.3f seconds" % elapsed_time)
import seaborn as sns
from matplotlib import pyplot as plt
print("After Normalize Target distribution ")
sns.countplot("TARGET", data=train_spark_Undersample.toPandas())
plt.title("Target size", fontsize=14)
plt.show()
train_spark_Undersample.groupBy("TARGET").count().show()
# # Feature Assembly
feature_undersample = VectorAssembler(
inputCols=train_spark_Undersample.columns[:-1], outputCol="features"
)
feature_vector_undersample = feature_undersample.transform(train_spark_Undersample)
# # Data split
(
trainingData_undersample,
testData_undersample,
) = feature_vector_undersample.randomSplit([0.8, 0.2], seed=11)
# # 4.Modelling
# # 4.1 Logistic Regression
## Logistic Regression
from pyspark.ml.classification import LogisticRegression
lr = LogisticRegression(labelCol="TARGET", featuresCol="features", maxIter=5)
lrModel = lr.fit(trainingData_undersample)
import matplotlib.pyplot as plt
import numpy as np
beta = np.sort(lrModel.coefficients)
plt.plot(beta, label="LogisticRegression")
plt.ylabel("Beta Coefficients")
plt.legend(loc="lower right")
plt.show()
trainSet = lrModel.summary
roc = trainSet.roc.toPandas()
plt.plot(roc["FPR"], roc["TPR"], "-r", label="Logistic Regression ROC Curve")
plt.legend(loc="lower right")
plt.ylabel("False Positive Rate")
plt.xlabel("True Positive Rate")
plt.title("ROC Curve")
plt.show()
print("TrainSet areaUnderROC: " + str(trainSet.areaUnderROC))
# # 4.2 RandomForest
# Random Forests are a group of decision trees, that uses Mojority of Votingfor each of the decision tree. This algorithm provides less risk of overfitting by combining decision trees.
## Random Forest Classifier
from pyspark.ml.classification import RandomForestClassifier
# Creating RandomForest model.
rf = RandomForestClassifier(labelCol="TARGET", featuresCol="features", numTrees=2)
## train the model
rfModel = rf.fit(trainingData_undersample)
## make predictions
predictions = rfModel.transform(testData_undersample)
rfPredictions = predictions.select("TARGET", "prediction", "probability")
rfPredictions.show(10)
# Besides Logistic Regression (like Decision Trees or Random Forest which lack a model summary), therefore I used class CurveMetrics(BinaryClassificationMetrics):
from pyspark.mllib.evaluation import BinaryClassificationMetrics
# Python: https://spark.apache.org/docs/latest/api/python/_modules/pyspark/mllib/common.html
class CurveMetrics(BinaryClassificationMetrics):
def __init__(self, *args):
super(CurveMetrics, self).__init__(*args)
def _to_list(self, rdd):
points = []
# Note this collect could be inefficient for large datasets
# considering there may be one probability per datapoint (at most)
# The Scala version takes a numBins parameter,
# but it doesn't seem possible to pass this from Python to Java
for row in rdd.collect():
# Results are returned as type scala.Tuple2,
# which doesn't appear to have a py4j mapping
points += [(float(row._1()), float(row._2()))]
return points
def get_curve(self, method):
rdd = getattr(self._java_model, method)().toJavaRDD()
return self._to_list(rdd)
# the probability of getting the output either as 0 or 1
# Returns as a list (false positive rate, true positive rate)
preds = predictions.select("TARGET", "probability").rdd.map(
lambda row: (float(row["probability"][1]), float(row["TARGET"]))
)
points = CurveMetrics(preds).get_curve("roc")
plt.figure()
x_val = [x[0] for x in points]
y_val = [x[1] for x in points]
plt.title("ROC")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.plot(x_val, y_val, "-r", label="Random Forest Regression ROC Curve")
plt.legend(loc="lower right")
## evaluate the Rnadom Forest Classifier
from pyspark.ml.evaluation import BinaryClassificationEvaluator
evaluator = BinaryClassificationEvaluator(labelCol="TARGET")
evaluator.evaluate(predictions)
print("Random Forest Test areaUnderROC: {}".format(evaluator.evaluate(predictions)))
# # 4.3 Gradient-Boosted Tree Classifier
# Gradient-Boosted Tree Classifiers are also a group of decision trees and they iteratively train decision trees in order to minimize a loss function.
from pyspark.ml import Pipeline
## Gradient-Boosted Tree Classifier
from pyspark.ml.classification import GBTClassifier
stages = []
gbt = GBTClassifier(labelCol="TARGET", featuresCol="features", maxIter=5)
pipeline = Pipeline(stages=stages + [gbt])
gbtModel = pipeline.fit(trainingData_undersample)
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
predictions = gbtModel.transform(testData_undersample)
# Show predictions
predictions.select("TARGET", "prediction", "probability").show(10)
# the probability of getting the output either as 0 or 1
# Returns as a list (false positive rate, true positive rate)
preds_GBT = predictions.select("TARGET", "probability").rdd.map(
lambda row: (float(row["probability"][1]), float(row["TARGET"]))
)
points_GBT = CurveMetrics(preds_GBT).get_curve("roc")
plt.figure()
x_val = [x[0] for x in points_GBT]
y_val = [x[1] for x in points_GBT]
plt.title("ROC")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.plot(x_val, y_val, "-r", label="Gradient-Boosted Regression ROC Curve")
plt.legend(loc="lower right")
evaluator = BinaryClassificationEvaluator(labelCol="TARGET")
print(
"GBT Test Area Under ROC:"
+ str(evaluator.evaluate(predictions, {evaluator.metricName: "areaUnderROC"}))
)
# # 5. ParamGridBuilder and CrossValidator
# find the best model or parameters for a given dataset to improve the performance
# # *I did run the code because it is time consuming and PC power.*****
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
paramGrid = (
ParamGridBuilder()
.addGrid(lr.aggregationDepth, [2, 5, 10])
.addGrid(lr.elasticNetParam, [0.0, 0.5, 1.0])
.addGrid(lr.fitIntercept, [False, True])
.addGrid(lr.maxIter, [10, 100, 1000])
.addGrid(lr.regParam, [0.01, 0.5, 2.0])
.build()
)
# Model tuning, it find the best model or parameters for a given dataset to improve the performance.
cv = CrossValidator(
estimator=lr, estimatorParamMaps=paramGrid, evaluator=evaluator, numFolds=2
)
# Run cross validations
cvModel = cv.fit(trainingData_undersample)
predict_train = cvModel.transform(trainingData_undersample)
predict_test = cvModel.transform(testData_undersample)
print(
"Cross-validation areaUnderROC for train set is {}".format(
evaluator.evaluate(predict_train)
)
)
print(
"Cross-validation areaUnderROC for test set is {}".format(
evaluator.evaluate(predict_test)
)
)
# # Resampling Techniques — Oversample minority class
# * Oversampling can be defined as adding more copies of the minority class. Oversampling can be a good choice when you don’t have a ton of data to work with.
# * panda dataframe I would use SMOTE to oversample (I used SMOTE for panda analysis which I did)
# # Important Note
# Always split into test and train sets BEFORE trying oversampling techniques! Oversampling before splitting the data can allow the exact same observations to be present in both the test and train sets. This can allow our model to simply memorize specific data points and cause overfitting and poor generalization to the test data.
(trainingData_spark, testData_spark) = train_spark_drop.randomSplit([0.8, 0.2], seed=11)
from time import *
start_time = time()
train_spark_1_over_OG = trainingData_spark.filter("TARGET =1")
train_spark_0_over = trainingData_spark.filter("TARGET =0")
ratio = train_spark_1_over_OG.count() / train_spark_0_over.count()
print("Before oversample ratio 1 and 0: ", ratio)
sampleRatio = train_spark_0_over.count() / trainingData_spark.count()
print("sampleRatio:", sampleRatio)
# duplicate the minority rows
# explode_range
explode_range = range(int(train_spark_0_over.count() / train_spark_1_over_OG.count()))
train_spark_1_over = train_spark_1_over_OG.withColumn(
"dummy", explode(array([lit(x) for x in explode_range]))
).drop("dummy")
# print(train_spark_1_over.count())
# concate two dataframe together
train_spark_oversample = train_spark_1_over.union(train_spark_0_over)
ratio_oversample = (train_spark_oversample.filter("TARGET =1")).count() / (
train_spark_oversample.filter("TARGET =0")
).count()
print("After oversample ratio 1 and 0: ", ratio_oversample)
end_time = time()
elapsed_time = end_time - start_time
print("Time for this session: %.3f seconds" % elapsed_time)
import seaborn as sns
from matplotlib import pyplot as plt
print("After Normalize Target distribution ")
sns.countplot("TARGET", data=train_spark_oversample.toPandas())
plt.title("Target size", fontsize=14)
plt.show()
train_spark_oversample.groupBy("TARGET").count().show()
# # Feature assembly
# train_spark_oversample, testData_spark
feature_oversample_train = VectorAssembler(
inputCols=train_spark_oversample.columns[:-1], outputCol="features"
)
trainingData_oversample = feature_oversample_train.transform(train_spark_oversample)
feature_oversample_test = VectorAssembler(
inputCols=testData_spark.columns[:-1], outputCol="features"
)
testData_oversample = feature_oversample_test.transform(testData_spark)
# trainingData_oversample, testData_oversample
# # 6. Modelling
# # 6.1 Logistic Regression
## Logistic Regression
from pyspark.ml.classification import LogisticRegression
lr = LogisticRegression(labelCol="TARGET", featuresCol="features", maxIter=5)
lrModel = lr.fit(trainingData_oversample)
import matplotlib.pyplot as plt
import numpy as np
beta = np.sort(lrModel.coefficients)
plt.plot(beta, label="LogisticRegression")
plt.ylabel("Beta Coefficients")
plt.legend(loc="lower right")
plt.show()
trainSet = lrModel.summary
roc = trainSet.roc.toPandas()
plt.plot(roc["FPR"], roc["TPR"], "-r", label="Logistic Regression ROC Curve")
plt.legend(loc="lower right")
plt.ylabel("False Positive Rate")
plt.xlabel("True Positive Rate")
plt.title("ROC Curve")
plt.show()
print("TrainSet areaUnderROC: " + str(trainSet.areaUnderROC))
# # 6.2 RandomForest
# trainingData_oversample, testData_oversample
## Random Forest Classifier
from pyspark.ml.classification import RandomForestClassifier
# Creating RandomForest model.
rf = RandomForestClassifier(labelCol="TARGET", featuresCol="features", numTrees=2)
## train the model
rfModel = rf.fit(trainingData_oversample)
## make predictions
predictions = rfModel.transform(testData_oversample)
rfPredictions = predictions.select("TARGET", "prediction", "probability")
rfPredictions.show(10)
# the probability of getting the output either as 0 or 1
# Returns as a list (false positive rate, true positive rate)
preds = predictions.select("TARGET", "probability").rdd.map(
lambda row: (float(row["probability"][1]), float(row["TARGET"]))
)
points = CurveMetrics(preds).get_curve("roc")
plt.figure()
x_val = [x[0] for x in points]
y_val = [x[1] for x in points]
plt.title("ROC")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.plot(x_val, y_val, "-r", label="Random Forest Regression ROC Curve")
plt.legend(loc="lower right")
## evaluate the Rnadom Forest Classifier
from pyspark.ml.evaluation import BinaryClassificationEvaluator
evaluator = BinaryClassificationEvaluator(labelCol="TARGET")
evaluator.evaluate(predictions)
print("Random Forest Test areaUnderROC: {}".format(evaluator.evaluate(predictions)))
# # 6.3 Gradient-Boosted Tree Classifier
# trainingData_oversample, testData_oversample
from pyspark.ml import Pipeline
## Gradient-Boosted Tree Classifier
from pyspark.ml.classification import GBTClassifier
stages = []
gbt = GBTClassifier(labelCol="TARGET", featuresCol="features", maxIter=5)
pipeline = Pipeline(stages=stages + [gbt])
gbtModel = pipeline.fit(trainingData_oversample)
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
predictions = gbtModel.transform(testData_oversample)
# Show predictions
predictions.select("TARGET", "prediction", "probability").show(10)
# the probability of getting the output either as 0 or 1
# Returns as a list (false positive rate, true positive rate)
preds_GBT = predictions.select("TARGET", "probability").rdd.map(
lambda row: (float(row["probability"][1]), float(row["TARGET"]))
)
points_GBT = CurveMetrics(preds_GBT).get_curve("roc")
plt.figure()
x_val = [x[0] for x in points_GBT]
y_val = [x[1] for x in points_GBT]
plt.title("ROC")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.plot(x_val, y_val, "-r", label="Gradient-Boosted Regression ROC Curve")
plt.legend(loc="lower right")
evaluator = BinaryClassificationEvaluator(labelCol="TARGET")
print(
"GBT Test Area Under ROC:"
+ str(evaluator.evaluate(predictions, {evaluator.metricName: "areaUnderROC"}))
)
# warning-----------------------------------------------------------------------------------------------warning
# * code below is just extra things which I couldn't utilize. Could have good usage
# # Feature selection Matrix (extra need bit more understanding and usage)
# * find it interesting, but no time to understand optimiza the usage
# * try it out with undersample data, since the data volumn is not high.
# inspire from https://stackoverflow.com/questions/51831874/how-to-get-correlation-matrix-values-pyspark/51834729
from pyspark.ml.stat import Correlation
from pyspark.ml.feature import VectorAssembler
# drop target column
corr_df = train_spark_oversample.drop("TARGET")
# copying columns names
column_names = corr_df.columns
# get correlation matrix
# matrix = Correlation.corr(feature_vector)
vector_col = "corr_features"
assembler = VectorAssembler(
inputCols=train_spark_oversample.columns[:-1], outputCol=vector_col
)
feature_vector = assembler.transform(train_spark_oversample).select(vector_col)
matrix = Correlation.corr(feature_vector, vector_col)
# Setting column names of datafram
convert_matrix = (matrix.collect()[0][0]).toArray().tolist()
df_matrix = pd.DataFrame(convert_matrix, column_names)
df_matrix = pd.DataFrame(convert_matrix, column_names, column_names)
df_matrix
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib as mpl
import matplotlib.pyplot as plt
df_matrix_04 = df_matrix[(df_matrix[:] > 0.5) | (df_matrix[:] < -0.5)]
df_matrix_04.head(20)
fig, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(df_matrix_04, cmap="YlGnBu")
# collection of coloumns which need to be reomoved
cols_to_drop_1 = []
# Looping through
for col in range(len(df_matrix.columns)):
for row in range(col):
if (df_matrix.iloc[row, col] > 0.5 or df_matrix.iloc[row, col] < -0.5) and (
df_matrix.columns[row] not in cols_to_drop_1
):
cols_to_drop_1.append(df_matrix.columns[col])
train_spark_filter = train_spark_oversample.drop(*cols_to_drop_1)
print("Number of cols dropped: ", len(cols_to_drop_1))
print("Number of cols train_spark_filter: ", len(train_spark_filter.columns))
# there is duplicate coloumn name therefore the size the big, need sometime to look into this.
train_spark_filter.describe().toPandas()
test_spark_filter = testData_spark.drop(*cols_to_drop_1)
test_spark_filter.describe().toPandas()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
print(os.listdir("../input"))
import fiona
from datetime import datetime
from ipywidgets import SelectionSlider, interact
import geopandas as gpd
import shapely
# enable KML support which is disabled by default
fiona.drvsupport.supported_drivers["kml"] = "rw"
fiona.drvsupport.supported_drivers["KML"] = "rw"
DEFAULT_FIGSIZE = (20, 8)
DEFAULT_CRS = "EPSG3857"
world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"), crs=DEFAULT_CRS)
def show_on_earth(*dfs):
ax = world.plot(figsize=DEFAULT_FIGSIZE, edgecolor="gray", color="white")
colors = ["blue", "green", "red", "orange", "tomato", "lime"]
for color, df in zip(colors, dfs):
df.plot(ax=ax, color=color)
urls = [
"al052019_5day_001.zip",
"al052019_5day_002.zip",
"al052019_5day_003.zip",
"al052019_5day_004.zip",
"al052019_5day_004A.zip",
"al052019_5day_005.zip",
"al052019_5day_005A.zip",
"al052019_5day_006.zip",
"al052019_5day_006A.zip",
"al052019_5day_007.zip",
"al052019_5day_007A.zip",
"al052019_5day_008.zip",
"al052019_5day_008A.zip",
"al052019_5day_009.zip",
"al052019_5day_009A.zip",
"al052019_5day_010.zip",
"al052019_5day_010A.zip",
"al052019_5day_011.zip",
"al052019_5day_011A.zip",
"al052019_5day_012.zip",
"al052019_5day_012A.zip",
"al052019_5day_013.zip",
"al052019_5day_013A.zip",
"al052019_5day_014.zip",
"al052019_5day_014A.zip",
"al052019_5day_015.zip",
"al052019_5day_015A.zip",
"al052019_5day_016.zip",
"al052019_5day_016A.zip",
"al052019_5day_017.zip",
"al052019_5day_017A.zip",
"al052019_5day_018.zip",
"al052019_5day_018A.zip",
"al052019_5day_019.zip",
"al052019_5day_020.zip",
"al052019_5day_021.zip",
"al052019_5day_022.zip",
"al052019_5day_023.zip",
"al052019_5day_024.zip",
"al052019_5day_024A.zip",
"al052019_5day_025.zip",
"al052019_5day_025A.zip",
"al052019_5day_026.zip",
"al052019_5day_026A.zip",
"al052019_5day_027.zip",
"al052019_5day_027A.zip",
"al052019_5day_028.zip",
"al052019_5day_028A.zip",
"al052019_5day_029.zip",
"al052019_5day_029A.zip",
"al052019_5day_030.zip",
"al052019_5day_030A.zip",
"al052019_5day_031.zip",
"al052019_5day_031A.zip",
"al052019_5day_032.zip",
"al052019_5day_032A.zip",
"al052019_5day_033.zip",
"al052019_5day_033A.zip",
"al052019_5day_034.zip",
"al052019_5day_034A.zip",
"al052019_5day_035.zip",
"al052019_5day_035A.zip",
"al052019_5day_036.zip",
"al052019_5day_036A.zip",
"al052019_5day_037.zip",
"al052019_5day_037A.zip",
"al052019_5day_038.zip",
"al052019_5day_038A.zip",
"al052019_5day_039.zip",
"al052019_5day_039A.zip",
"al052019_5day_040.zip",
"al052019_5day_040A.zip",
"al052019_5day_041.zip",
"al052019_5day_041A.zip",
"al052019_5day_042.zip",
"al052019_5day_042A.zip",
"al052019_5day_043.zip",
"al052019_5day_043A.zip",
"al052019_5day_044.zip",
"al052019_5day_044A.zip",
"al052019_5day_045.zip",
"al052019_5day_045A.zip",
"al052019_5day_046.zip",
"al052019_5day_046A.zip",
"al052019_5day_047.zip",
"al052019_5day_047A.zip",
"al052019_5day_048.zip",
"al052019_5day_048A.zip",
"al052019_5day_049.zip",
"al052019_5day_049A.zip",
"al052019_5day_050.zip",
"al052019_5day_050A.zip",
"al052019_5day_051.zip",
"al052019_5day_051A.zip",
"al052019_5day_052.zip",
"al052019_5day_052A.zip",
"al052019_5day_053.zip",
"al052019_5day_053A.zip",
"al052019_5day_054.zip",
"al052019_5day_054A.zip",
"al052019_5day_055.zip",
"al052019_5day_055A.zip",
"al052019_5day_056.zip",
"al052019_5day_056A.zip",
"al052019_5day_057.zip",
"al052019_5day_058.zip",
"al052019_5day_059.zip",
"al052019_5day_059A.zip",
"al052019_5day_060.zip",
"al052019_5day_060A.zip",
"al052019_5day_061.zip",
"al052019_5day_061A.zip",
"al052019_5day_062.zip",
"al052019_5day_062A.zip",
"al052019_5day_063.zip",
"al052019_5day_063A.zip",
"al052019_5day_064.zip",
]
base = "https://www.nhc.noaa.gov/gis/forecast/archive"
hurrs = [gpd.read_file(f"{base}/{url}") for url in urls]
hurricane_history = pd.concat(hurrs)
hurricane_history.geometry = hurricane_history.geometry.apply(shapely.geometry.Polygon)
hurricane_history = hurricane_history[hurricane_history["STORMTYPE"] == "HU"]
hurricane_history.head()
hurricane_history["date_time"] = hurricane_history["ADVDATE"].apply(
lambda d: datetime.strptime(
d[:1] + " " + d[1:3] + " " + " " + d[-11:], "%H %M %b %d %Y"
)
)
hurricane_history = hurricane_history.set_index("date_time")
hurricane_history.sort_index()
hurricane_history = hurricane_history[hurricane_history["STORMTYPE"] == "HU"]
hurricane_history = hurricane_history[~hurricane_history.index.duplicated(keep="first")]
canada = gpd.read_file(
"https://raw.githubusercontent.com/codeforamerica/click_that_hood/master/public/data/canada.geojson",
crs=DEFAULT_CRS,
)
states = gpd.read_file(
"https://eric.clst.org/assets/wiki/uploads/Stuff/gz_2010_us_040_00_20m.json",
crs=DEFAULT_CRS,
)
north_pacific = pd.concat([canada, states])
north_pacific.head()
show_on_earth(north_pacific)
@interact(event_date=SelectionSlider(options=hurricane_history.index))
def show_timelapse(event_date):
h = hurricane_history[hurricane_history.index == event_date]
print(f"Calculating effected areas... {event_date}")
effected_areas = north_pacific[north_pacific.intersects(h.unary_union)]
print(f"{len(effected_areas)} areas effected by the hurricane")
show_on_earth(h, north_pacific, effected_areas)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import librosa
import librosa.display
import IPython.display
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
# Any results you write to the current directory are saved as output.
os.listdir("../input/train-test")
train = pd.read_csv("../input/freesound-audio-tagging/train.csv")
train
audio_path = "../input/freesound-audio-tagging/audio_train/"
from scipy.io import wavfile
fname, label, verified = train.values[0]
rate, data = wavfile.read(audio_path + fname)
print(label)
print("Sampling Rate:\t{}".format(rate))
print("Total Frames:\t{}".format(data.shape[0]))
print(data)
y, sr = librosa.load(audio_path + fname)
IPython.display.Audio(data=y, rate=sr)
a = np.load("../input/train-test/train_test.npy", allow_pickle=True)
from tensorflow.keras.preprocessing.sequence import pad_sequences
pad_audio_data = pad_sequences(a, maxlen=sr * 10, value=0, dtype="float32")
pad_audio_data.shape
labelEncoder = {}
for i, label in enumerate(train["label"].unique()):
labelEncoder[label] = i
labelEncoder
from tqdm import tqdm
Encoding_label = np.zeros(9473, dtype=object)
for i in tqdm(range(0, 9473)):
fname, label, verified = train.values[i]
Encoding_label[i] = labelEncoder[label]
from tensorflow.keras.utils import to_categorical
Encoding_label = to_categorical(Encoding_label, 41)
plt.plot(data[:1024])
D = librosa.amplitude_to_db(librosa.stft(y[:1024]), ref=np.max)
plt.plot(D.flatten())
plt.show()
S = librosa.feature.melspectrogram(y, sr=sr)
plt.figure(figsize=(12, 4))
librosa.display.specshow(
librosa.power_to_db(S, ref=np.max), sr=sr, x_axis="time", y_axis="mel"
)
plt.colorbar(format="%+2.0f dB")
plt.tight_layout()
plt.show()
mfcc = librosa.feature.mfcc(y=y, sr=sr)
plt.figure(figsize=(12, 4))
librosa.display.specshow(mfcc, x_axis="time")
plt.colorbar(format="%+2.0f dB")
plt.tight_layout()
plt.show()
min_level_db = -100
def _normalize(S):
return np.clip(
(librosa.power_to_db(S, ref=np.max) - min_level_db) / -min_level_db, 0, 1
)
norm_S = _normalize(S)
plt.figure(figsize=(12, 4))
librosa.display.specshow(norm_S, sr=sr, x_axis="time", y_axis="mel")
plt.title("norm mel power spectrogram")
plt.colorbar(format="%+0.1f dB")
plt.tight_layout()
plt.show()
from keras.models import Sequential
from keras.layers import Conv1D, Dense, Dropout, MaxPool1D, Flatten
input_length = sr * 10
n_classes = train["label"].unique().shape[0]
input_audio_data = np.expand_dims(pad_audio_data, axis=2)
def create_cnn():
model = Sequential()
model.add(
Conv1D(4, 16, activation="relu", padding="same", input_shape=(input_length, 1))
)
model.add(MaxPool1D(pool_size=5))
model.add(Dropout(0.1))
model.add(Conv1D(9, 16, activation="relu", padding="same"))
model.add(MaxPool1D(pool_size=5))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(units=100, activation="relu"))
model.add(Dense(units=n_classes, activation="softmax"))
model.compile(
loss="categorical_crossentropy", metrics=["accuracy"], optimizer="adam"
)
return model
model = create_cnn()
model.summary()
history = model.fit(input_audio_data, Encoding_label, epochs=5, validation_split=1 / 6)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Import all required library
import pandas as pd
import numpy as np
import os
# to save model
import pickle
# Import visualization modules
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
data = pd.read_csv("/kaggle/input/glass/glass.csv")
data.describe()
data.head()
# create new column for "Type" to "g_type" form 0 or 1.
data["g_type"] = data.Type.map({1: 0, 2: 0, 3: 0, 5: 1, 6: 1, 7: 1})
data.head()
# create "Glass correlation Marxix"
features = ["RI", "Na", "Mg", "Al", "Si", "K", "Ca", "Ba", "Fe", "g_type"]
mask = np.zeros_like(data[features].corr(), dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(16, 12))
plt.title("Glass Correlation Matrix", fontsize=25)
sns.heatmap(
data[features].corr(),
linewidths=0.25,
vmax=0.7,
square=True,
cmap="BuGn",
# "BuGn_r" to reverse
linecolor="b",
annot=True,
annot_kws={"size": 8},
mask=mask,
cbar_kws={"shrink": 0.9},
)
y = data.g_type
X = data.loc[:, ["Na", "Al", "Ba"]]
data.Type.value_counts().sort_index()
data.isnull().sum()
# apply model Logistic regression
model_logr = LogisticRegression()
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=10
)
output_model = model.fit(X_train, y_train)
output_model
pkl_filename = "pickle_model.pkl"
with open(pkl_filename, "wb") as file:
pickle.dump(model, file)
# Load from file
with open(pkl_filename, "rb") as file:
pickle_model = pickle.load(file)
# Calculate the accuracy score and predict target values
score = pickle_model.score(X_test, y_test)
print("Test score: {0:.2f} %".format(100 * score))
Ypredict = pickle_model.predict(X_test)
model_logr.fit(X_train, y_train)
y_predict = model_logr.predict(X_test)
y_predict
print(classification_report(y_test, y_predict))
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_predict)
plt.scatter(X, y, color="black")
|
# ## Let's look at the data first
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
train = pd.read_csv("/kaggle/input/wy4vnuumx7y294p/train.csv")
test = pd.read_csv("/kaggle/input/wy4vnuumx7y294p/test.csv")
subm = pd.read_csv("/kaggle/input/wy4vnuumx7y294p/sample_submission.csv")
# Any results you write to the current directory are saved as output.
# subm.to_csv('/kaggle/working/submission.csv', index=False)
train.head()
subm.head()
print(train.values[0])
print(train.isnull().values.any())
print(test.isnull().values.any())
# ### Distribution of train set (sentence 1 vs sentnece 2)
s1_lens = train.sentence1.str.split().str.len()
s2_lens = train.sentence2.str.split().str.len()
print(len(train))
print(s1_lens.mean(), s1_lens.std(), s1_lens.min(), s1_lens.max())
print(s2_lens.mean(), s2_lens.std(), s2_lens.min(), s2_lens.max())
s1_lens.hist()
s2_lens.hist()
# ### Distribution of test set (sentence 1 vs sentnece 2)
s1_lens = test.sentence1.str.split().str.len()
s2_lens = test.sentence2.str.split().str.len()
print(len(test))
print(s1_lens.mean(), s1_lens.std(), s1_lens.min(), s1_lens.max())
print(s2_lens.mean(), s2_lens.std(), s2_lens.min(), s2_lens.max())
s1_lens.hist()
s2_lens.hist()
# ## Create a corpus and fit TF-IDF
corpus = pd.concat(
[train.sentence1, train.sentence2, test.sentence1, test.sentence2],
ignore_index=True,
)
# corpus = [
# '17244 28497 16263',
# '5464 4053 14577 8272 15775 3437 20163 8711',
# '24645 8554 25911',
# '14080 15907 25964 3099 26989 26797 3397 9553',
# '14313 2348 4875 23364',
# ]
vectorizer = TfidfVectorizer(
lowercase=False,
strip_accents=None,
tokenizer=lambda x: x.split(),
preprocessor=lambda x: x,
ngram_range=(1, 3),
min_df=3,
max_df=0.9,
use_idf=1,
smooth_idf=1,
sublinear_tf=1,
)
vectorizer.fit(corpus)
vectorizer.get_feature_names()
len(vectorizer.get_feature_names())
# ## Train with LR w/ and w/o PCA
# Since TF-IDF creates very sparse features, let's try out with and without PCA first, before trying anything more complex like neural networks. Since the input is pair-wise there are several ways to deal with this:
# - Sum/Subtract/Concatenate the inputs or their representations (we try this for LR)
# - Use Siamese Networks and Contrastive Loss
# ### First, without PCA
train_x = vectorizer.transform(train.sentence1) + vectorizer.transform(train.sentence2)
train_y = train.label.values
test_x = vectorizer.transform(test.sentence1) + vectorizer.transform(test.sentence2)
model = LogisticRegressionCV(dual=True, solver="liblinear", max_iter=100)
# model.scores_
# model = LogisticRegression(C=1, dual=True, solver='liblinear')
model.fit(train_x, train_y)
train_preds = model.predict(train_x)
sum(train_y == train_preds) / len(train_y)
test_preds = model.predict(test_x)
submid = pd.DataFrame({"id": subm["id"]})
submission = pd.concat([submid, pd.DataFrame(test_preds, columns=["label"])], axis=1)
submission.to_csv("/kaggle/working/submission_cv_trigram_sub.csv", index=False)
# ### Second, with PCA -> actually TruncatedSVD cuz input is sparse matrix
# Define a pipeline to search for the best combination of PCA truncation and classifier regularization.
pca = TruncatedSVD()
logistic = LogisticRegression(dual=True, solver="liblinear", max_iter=100)
pipe = Pipeline(steps=[("pca", pca), ("logistic", logistic)])
# Parameters of pipelines can be set using ‘__’ separated parameter names:
param_grid = {
"pca__n_components": [50, 100, 200, 300, 500, 1000],
"logistic__C": [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4],
}
search = GridSearchCV(pipe, param_grid, n_jobs=-1)
search.fit(train_x, train_y)
print("Best parameter (CV score=%0.3f):" % search.best_score_)
print(search.best_params_)
|
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
import csv
import re
from tabulate import tabulate
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
# NLP
from textblob import TextBlob
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
test_df = pd.read_csv("../input/nlp-getting-started/test.csv")
train_df = pd.read_csv("../input/nlp-getting-started/train.csv")
combine = [train_df, test_df]
print(train_df.columns.values)
# preview the data
train_df.head()
# preview the data
train_df.tail()
train_df.info()
print("_" * 40)
test_df.info()
train_df.describe()
train_df.describe(include=["O"])
train_df["keyword"].unique()
train_df["location"].unique()
train_df[["keyword", "target"]].groupby(["keyword"], as_index=False).mean()
uniqueKeys = []
keywordTarget = (
train_df[["keyword", "target"]].groupby(["keyword"], as_index=False).mean()
)
for i in range(0, 221):
uniqueKeys.append(keywordTarget["keyword"][i])
print(uniqueKeys)
print("-" * 40)
print(len(uniqueKeys))
test_df["keyword"].unique()
train_df[["location", "target"]].groupby(["location"], as_index=False).mean()
for dataset in combine:
dataset["keyword"] = dataset["keyword"].fillna(0)
train_df.head()
|
# Import libraries
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import layers, models, regularizers
from tensorflow.keras.utils import pad_sequences
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from gensim.models import FastText
import re
import matplotlib.pyplot as plt
# Read data
df = pd.read_csv("/kaggle/input/human-stress-prediction/Stress.csv")
df.head()
df_x = df["text"]
df_y = df["label"]
y = df_y
df_x.shape
# Transform text into vectors
x = df_x.tolist()
x = [re.sub("^[A-Za-z0-9]", "", i).split(" ") for i in x]
len(x[0])
fasttest = FastText(x, vector_size=4)
x_embeds = [np.concatenate([fasttest.wv[i] for i in sent]) for sent in x]
print(len(x_embeds[0]))
x_embeds_padded = pad_sequences(x_embeds, padding="post", dtype="float64")
x_embeds_padded[0].shape
# Split into train and test sets
x_train, x_test, y_train, y_test = train_test_split(
x_embeds_padded, y, test_size=0.25, random_state=333
)
x_train.shape
# Create 1D CNN Model
n_samples, n_features = x_train.shape
n_features
model = models.Sequential()
model.add(
layers.Conv1D(
4, 32, kernel_regularizer=regularizers.l2(0.01), input_shape=(n_features, 1)
)
)
model.add(layers.MaxPooling1D(pool_size=4))
model.add(layers.Flatten())
model.add(layers.Dense(256, kernel_regularizer=regularizers.l2(0.01)))
model.add(layers.Dense(32, kernel_regularizer=regularizers.l2(0.01)))
model.add(layers.Dense(1))
model.summary()
model.compile(
optimizer="adam",
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=["accuracy"],
)
history = model.fit(
x_train, y_train, epochs=10, validation_data=(x_test, y_test), verbose=2
)
plt.plot(history.history["loss"], label="loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.ylim([0, 2])
plt.legend(loc="lower right")
# Using Keras TextVectorization
x_train, x_test, y_train, y_test = train_test_split(
df_x, df_y, train_size=0.75, random_state=16
)
max_features = 3000
max_len = 400
vectorize_layer = layers.TextVectorization(
max_tokens=max_features, output_sequence_length=max_len
)
vectorize_layer.adapt(df_x)
embedding_size = 8
n_filters = 16
kernel = (4, embedding_size)
pool_size = (4, 1)
model = models.Sequential()
model.add(tf.keras.Input(shape=(1,), dtype=tf.string))
model.add(vectorize_layer)
model.add(layers.Embedding(max_features, embedding_size))
model.add(layers.Reshape((max_len, embedding_size, 1)))
model.add(layers.Conv2D(n_filters, kernel, kernel_regularizer=regularizers.l2(0.001)))
model.add(layers.MaxPooling2D(pool_size=pool_size))
model.add(layers.Flatten())
model.add(layers.Dense(128))
model.add(layers.Dense(1, kernel_regularizer=regularizers.l2(0.001)))
model.summary()
weights = model.get_weights()
model.compile(
optimizer="adam",
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=["accuracy"],
)
history = model.fit(
x_train,
y_train,
epochs=5,
validation_data=(x_test, y_test),
verbose=2,
callbacks=[tf.keras.callbacks.EarlyStopping(patience=1)],
)
plt.plot(history.history["loss"], label="loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.ylim([0, 1])
plt.legend()
model.evaluate(x_test, y_test)
|
# # CIFAR MM
# ## Initialisations
# Pandas : librairie de manipulation de données
# NumPy : librairie de calcul scientifique
# MatPlotLib : librairie de visualisation et graphiques
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
import numpy as np
from sklearn.metrics import *
from sklearn.linear_model import *
from sklearn.model_selection import *
from sklearn.tree import *
from sklearn.ensemble import *
from sklearn.impute import KNNImputer
from xgboost import *
# Tensforflow : librairie de Deep Learning
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical
# ## Lecture des images
# Importation des données CIFAR10
from keras.datasets import cifar10
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train.shape
import random
X_train_reduced = np.array(random.sample(list(X_train), 10000))
y_train.shape
labels = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
num_classes = 10
# ## Affichage des 50 premières images
plt.figure(figsize=(10, 20))
for i in range(0, 49):
plt.subplot(10, 5, i + 1)
plt.axis("off")
plt.imshow(X_test[i])
plt.title(labels[int(y_test[i])])
# ## Transformation du dataset pour machine learning
# On "aplatit" les images pour obtenir un format similaire à MNIST :
X_train1 = X_train.reshape(len(X_train), -1)
X_test1 = X_test.reshape(len(X_test), -1)
X_train_reduced = X_train.reshape(len(X_train), -1)
X_train1.shape
model = XGBClassifier(tree_method="gpu_hist", gpu_id=0)
model.fit(X_train_reduced, y_train)
y_hat = model.predict(X_test1)
accuracy_score(y_test, y_hat)
nb_classes = 10
model = Sequential()
model.add(Dense(200, activation="relu"))
model.add(Dense(100, activation="relu"))
model.add(Dense(50, activation="relu"))
model.add(Dense(nb_classes, activation="softmax"))
X_train_reduced = X_train_reduced / 255
model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
histo = model.fit(
X_train_reduced, y_train, validation_data=(X_test1, y_test), epochs=30, verbose=1
)
def plot_scores(histo):
accuracies = histo.history["accuracy"]
plt.plot(range(len(accuracies)), accuracies, "b")
val_accuracies = histo.history["val_accuracy"]
plt.plot(range(len(val_accuracies)), val_accuracies, "r")
plot_scores(histo)
|
# # Feedback from judges
# * The submission does a great job explaining hazard modelling.
# * The visualizations that show the model training are easy to follow.
# * The submission could have been easier to read if the model fitting output/code was hidden.
# * The equation that represented the sum of player contributions could have more explanation. Locations, velocities and accelerations are mentioned, but it could be more explicit (accelerations/velocities in which direction? are they scaled in anyway?)
# * The submission could benefit from having a stability test for the metric (i.e. how do players’ scores in weeks 1-4 compare to their scores in weeks 5-8).
# * The results of the submission agree with intuition.
# The offensive line protects the quarterback, providing time to find receivers. On the other hand, the defensive line tackles the offensive line to collapse the pocket. The interaction between these players makes the space of the pocket and also the time in the pocket until the pass result comes out (PktTime).
# The time in the pocket is strongly affected by defensive and offensive formations. These formations determine what kind of interactions between players occurs, affecting the space around the quarterback. We can observe the time in the pocket, the pass result and who sacks the quarterback; however, how other players influence on this result is unclear.
# To evaluate the contributions of all players, We adopt Cox proportional hazard model to express the distribution of the time in pocket. This model decomposes the hazard, the rate of the quarterback being sacked, to the sum of each player's contributions. To consider the interaction between players, we use a gated graph neural network that utilizes the Delaunay triangulation.
# This notebook consists of the following topics.
# * How to use hazard model for time in the pocket
# * Definition of Cox proportional hazard model with gated graph neural networks
# * Application 1: Model training
# * Application 2: Hazard rate changes during play
# * Application 3: Ranking of teams
# The content of this notebook inherits my previous notebook in NFL Big Data Bowl 2022. See also https://www.kaggle.com/code/sryo188558/hazard-modeling-for-kickoff-punt-returns.
# # How to use hazard model for time in pocket
# Hazard modeling aims to model the duration until some events happen, given covariate information. For example, to know the efficacy of the medicine, doctors investigate how the medicine decreases the death rate. In hazard modeling, we estimate the hazard function instead of the probability density function. The hazard function is the instantaneous rate of the event occurrence.
# We first split the time axis into frames and denote time until pocket is collapsed (PktTime) as the number of frames. This PktTime takes a discrete value, so we here estimate the discrete hazard function:
# $h(r) = \mathrm{Pr} (r \leq R \leq r + 1)$
#
# This function explains the probability of defensive line pressure collapsing the pocket at frame $r$.
# After a quarterback receives a ball, the defensive line players try to sack this quarterback as soon as possible. In contrast, the offensive line players prevent the defensive line players from tackling the quarterback. In terms of the hazard function, defensive players try to increase $h(r)$, and offensive players decrease $h(r)$.
# We aim to estimate how each player affects this hazard. It is not sufficient to evaluate players' contributions by simple stats like PktTime and sacks. For example, when a quarterback is sacked, how other defensive players contribute to this result is unclear. Using hazard values enables us to compare players' contributions between different positions.
# # Cox proportional hazard model with gated graph neural networks
# We adopt the **Cox proportional model** to express the hazard function. Cox proportional model divides the hazard function into two parts as
# $$
# \begin{aligned}
# h(r) = h_0(r) \cdot \exp(\phi(x)), \, \text{$x$ : covariates.}
# \end{aligned}
# $$
# The former indicates how the hazard function depends on time, and the latter indicates how it depends on the covariate information. The Cox model estimates the former part in a semiparametric manner without any model specification.
# We define $\phi(x)$ as the summation of players' contributions.
# $$
# \begin{aligned}
# \phi(x) = \phi_\mathrm{R}(x_\mathrm{R} \mid x) + \sum_{i=1}^{10} \phi_\mathrm{O}(x^{(i)}_\mathrm{O} \mid x) + \sum_{j=1}^{11} \phi_\mathrm{D}(x^{(j)}_\mathrm{D} \mid x).
# \end{aligned}
# $$
# Here, $x_\mathrm{R}, \{x^{(i)}_\mathrm{O}\}_{i=1, \cdots, 10} \,\, , \{x^{(j)}_\mathrm{D}\}_{j=1, \cdots, 11} \, \, $ are player's information for a quarterback, offensive line players and defensive line players. Specifically, we use players' locations, velocities and accelerations as $x$. $\phi_\mathrm{R}(\cdot), \phi_\mathrm{O}(\cdot), \phi_\mathrm{D}(\cdot)$ are estimand functions that transform players' information into hazards.
# Each player's contribution to the hazard is also affected by nearby players. We express these interactions as graph expressions.
# First, we calculate the Delaunay diagram from players' locations and construct a graph indicating the nearby players' pairs.
# Under this graph, we adopt **Gated Graph Neural Network** for expressing $\phi_\mathrm{R}(\cdot), \phi_\mathrm{O}(\cdot), \phi_\mathrm{D}(\cdot)$.
# 
# # Application 1: Model training
# First, we extract players' locations, velocities and accelerations at each frame and define the PktTime of this frame be the duration until the pocket is collapsed. Assume that censoring occurs when the pass result is "Complete" or "Incomplete"; there is a possibility that true PktTime is longer than the observed value.
# We omit the details of our model, but this notebook contains its full implementation of it. For further information on gated graph neural networks, check the following paper.
# * Li et al., Gated Graph Sequence Neural Networks, ICLR, 2020.
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import numpy as np
import tensorflow as tf
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from scipy.spatial import Delaunay
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import seaborn as sns
import imageio
from glob import glob
from IPython import display
import warnings
warnings.filterwarnings("ignore")
sns.set()
sns.set_style("white")
def find_adjacent(loc):
n = loc.shape[0]
delau = Delaunay(loc)
adj = np.zeros((n, n))
ind = np.arange(n)
for simplice in delau.simplices:
adj[ind[simplice, np.newaxis], ind[simplice]] = 1.0
adj *= 1 - np.eye(n)
return adj
def extract_feature(game, play, play_detail, pff):
frameId = play_detail.frameId.values
event = play_detail.event.values
frame_start = frameId[event == "ball_snap"][0]
frame_end = frameId[
(event == "pass_forward") + (event == "qb_strip_sack") + (event == "qb_sack")
][0]
frame_length = frame_end - frame_start + 1
play_detail = play_detail[(frameId >= frame_start) * (frameId <= frame_end)]
homeTeam, awayTeam = game.homeTeamAbbr.values[0], game.visitorTeamAbbr.values[0]
possessionTeam = play.possessionTeam.values[0]
defensiveTeam = play.defensiveTeam.values[0]
pff_role = pd.DataFrame(
play_detail.nflId.map(dict(zip(pff.nflId.values, pff.pff_role.values)))
)
pff_role.columns = ["pff_role"]
play_detail = pd.concat([play_detail, pff_role], axis=1)
vel = pd.concat(
[
np.cos((-play_detail.dir.values + 90) / 360 * 2 * np.pi) * play_detail["s"],
np.sin((-play_detail.dir.values + 90) / 360 * 2 * np.pi) * play_detail["s"],
],
axis=1,
)
vel.columns = ["vel_x", "vel_y"]
acc = pd.concat(
[
np.cos((-play_detail.o.values + 90) / 360 * 2 * np.pi) * play_detail["a"],
np.sin((-play_detail.o.values + 90) / 360 * 2 * np.pi) * play_detail["a"],
],
axis=1,
)
acc.columns = ["acc_x", "acc_y"]
play_detail = pd.concat([play_detail, vel, acc], axis=1)
team = play_detail.team.values
play_detail_offense, play_detail_defense = (
play_detail[team == possessionTeam],
play_detail[team == defensiveTeam],
)
position_offense = play_detail_offense.position
position_defense = play_detail_defense.position
play_detail_football = play_detail[team == "football"]
play_detail_QB = play_detail_offense[position_offense == "QB"]
play_detail_OL = play_detail_offense[position_offense == "OL"]
play_detail_BR = play_detail_offense[position_offense == "BR"]
play_detail_DL = play_detail_defense[position_defense == "DL"]
play_detail_LB = play_detail_defense[position_defense == "LB"]
play_detail_DB = play_detail_defense[position_defense == "DB"]
dist = np.linalg.norm(
play_detail_QB[["x", "y"]].values - play_detail_football[["x", "y"]].values,
axis=-1,
)
catch_frame = np.min(play_detail_QB.frameId.values[dist < 1]) - frame_start
yardline = play.absoluteYardlineNumber.values[0]
direction = 2.0 * ("left" == play_detail.playDirection.values[0]) - 1
play_detail = pd.concat(
[
play_detail_QB,
play_detail_OL,
play_detail_BR,
play_detail_DL,
play_detail_LB,
play_detail_DB,
],
axis=0,
)
loc = (play_detail[["x", "y"]].values - np.array([yardline, 53.3 / 2])) * direction
vel = play_detail[["vel_x", "vel_y"]].values * direction
acc = play_detail[["acc_x", "acc_y"]].values * direction
x = np.hstack([loc, vel, acc]).reshape((22, -1, 6)).transpose((1, 0, 2))
loc_football = (
play_detail_football[["x", "y"]].values - np.array([yardline, 53.3 / 2])
) * direction
y = frame_length
c = play.passResult.values[0] == "S"
frameId = play_detail.frameId.values
position = play_detail[frameId == frame_start].position.values
nflId = play_detail[frameId == frame_start].nflId.values
pff_role = play_detail[frameId == frame_start].pff_role.values
m = np.array(
(pff_role == "Pass") + (pff_role == "Pass Block") + (pff_role == "Pass Rush")
).astype(float)
adj = []
for r in range(frame_length):
a = np.zeros((22, 22))
a[np.ix_(m == 1, m == 1)] = find_adjacent(x[r, m == 1, :2])
adj.append(a)
adj = np.stack(adj, axis=0)
return (
x,
m,
adj,
y,
c,
catch_frame,
nflId,
position,
possessionTeam,
defensiveTeam,
loc_football,
)
dirname = "/kaggle/input/nfl-big-data-bowl-2023"
games = pd.read_csv(os.path.join(dirname, "games.csv"))
plays = pd.read_csv(os.path.join(dirname, "plays.csv"))
players = pd.read_csv(os.path.join(dirname, "players.csv"))
tracking = pd.concat(
[
pd.read_csv(os.path.join(dirname, "week" + str(i) + ".csv"))
for i in [1, 2, 3, 4, 5, 6, 7, 8]
],
axis=0,
)
pffs = pd.read_csv(os.path.join(dirname, "pffScoutingData.csv"))
nflId2position = dict(zip(players.nflId.values, players.officialPosition.values))
position2position = {}
position2position.update(dict(zip(["QB"], ["QB"])))
position2position.update(dict(zip(["RB", "FB", "TE", "WR"], 4 * ["BR"])))
position2position.update(dict(zip(["C", "G", "T"], 3 * ["OL"])))
position2position.update(dict(zip(["LB", "ILB", "OLB", "MLB"], 4 * ["LB"])))
position2position.update(dict(zip(["DB", "CB", "FS", "SS"], 4 * ["DB"])))
position2position.update(dict(zip(["DE", "DT", "NT"], 3 * ["DL"])))
position = pd.DataFrame(tracking.nflId.map(nflId2position).map(position2position))
position.columns = ["position"]
tracking = pd.concat([tracking, position], axis=1)
xs, ms, adjs, ys, cs = [], [], [], [], []
catch_frames, nflIds, positions, offense_teams, defense_teams = [], [], [], [], []
gameIds = plays.gameId.values
playIds = plays.playId.values
ind = 0
inds = []
frame_max = 0
print("Loading Data")
for gameId, playId in tqdm(zip(gameIds, playIds)):
game = games[games.gameId.values == gameId]
play = plays[(plays.gameId.values == gameId) * (plays.playId.values == playId)]
play_detail = tracking[
(tracking.gameId.values == gameId) * (tracking.playId.values == playId)
]
pff = pffs[(pffs.gameId.values == gameId) * (pffs.playId.values == playId)]
try:
(
x,
m,
adj,
y,
c,
catch_frame,
nflId,
position,
offense_team,
defense_team,
_,
) = extract_feature(game, play, play_detail, pff)
if (
x.shape[1] == 22
and y > 0
and np.all(x[0, :11, 0] > 0)
and np.all(x[0, 11:, 0] < 0)
):
xs.append(x)
ms.append(m)
adjs.append(adj)
ys.append(y)
cs.append(c)
catch_frames.append(catch_frame)
nflIds.append(nflId)
positions.append(position)
offense_teams.append(offense_team)
defense_teams.append(defense_team)
if frame_max < x.shape[0]:
frame_max = x.shape[0]
inds.append(ind)
ind += 1
except:
pass
extract_frames = np.arange(0, frame_max, 10)
n_batch = 10
fs = np.stack(
[np.hstack([np.ones(x.shape[0]), np.zeros(frame_max - x.shape[0])]) for x in xs]
)
xs = np.stack(
[np.pad(x, [[0, frame_max - x.shape[0]], [0, 0], [0, 0]], "constant") for x in xs]
)
adjs = np.stack(
[
np.pad(adj, [[0, frame_max - adj.shape[0]], [0, 0], [0, 0]], "constant")
for adj in adjs
]
)
ms, ys, cs = np.stack(ms), np.hstack(ys).astype(float), np.array(cs).astype(int)
catch_frames, nflIds, positions, offense_teams, defense_teams = (
np.hstack(catch_frames),
np.vstack(nflIds),
np.vstack(positions),
np.hstack(offense_teams),
np.hstack(defense_teams),
)
inds = np.stack(inds)
(
inds_train,
inds_eval,
xs_train,
xs_eval,
adjs_train,
adjs_eval,
fs_train,
fs_eval,
ms_train,
ms_eval,
ys_train,
ys_eval,
cs_train,
cs_eval,
catch_frames_train,
catch_frames_eval,
nflIds_train,
nflIds_eval,
positions_train,
positions_eval,
offense_teams_train,
offense_teams_eval,
defense_teams_train,
defense_teams_eval,
) = train_test_split(
inds,
xs,
adjs,
fs,
ms,
ys,
cs,
catch_frames,
nflIds,
positions,
offense_teams,
defense_teams,
test_size=0.2,
random_state=0,
)
(
inds_test,
inds_eval,
xs_test,
xs_eval,
adjs_test,
adjs_eval,
fs_test,
fs_eval,
ms_test,
ms_eval,
ys_test,
ys_eval,
cs_test,
cs_eval,
catch_frames_test,
catch_frames_eval,
nflIds_test,
nflIds_eval,
positions_test,
positions_eval,
offense_teams_test,
offense_teams_eval,
defense_teams_test,
defense_teams_eval,
) = train_test_split(
inds_eval,
xs_eval,
adjs_eval,
fs_eval,
ms_eval,
ys_eval,
cs_eval,
catch_frames_eval,
nflIds_eval,
positions_eval,
offense_teams_eval,
defense_teams_eval,
test_size=0.5,
random_state=0,
)
X_orig = tf.constant(
np.stack(
sum(
[
[x[extract_frame] for x, f in zip(xs, fs) if f[extract_frame] == 1]
for extract_frame in extract_frames
],
[],
)
),
dtype=tf.float32,
)
M_orig = tf.constant(
np.vstack([ms[fs[:, extract_frame] == 1] for extract_frame in extract_frames]),
dtype=tf.float32,
)
A_orig = tf.constant(
np.stack(
sum(
[
[
adj[extract_frame]
for adj, f in zip(adjs, fs)
if f[extract_frame] == 1
]
for extract_frame in extract_frames
],
[],
)
),
dtype=tf.float32,
)
X_eval = tf.constant(
np.stack(
sum(
[
[
x[extract_frame]
for x, f in zip(xs_eval, fs_eval)
if f[extract_frame] == 1
]
for extract_frame in extract_frames
],
[],
)
),
dtype=tf.float32,
)
M_eval = tf.constant(
np.vstack(
[ms_eval[fs_eval[:, extract_frame] == 1] for extract_frame in extract_frames]
),
dtype=tf.float32,
)
A_eval = tf.constant(
np.stack(
sum(
[
[
adj[extract_frame]
for adj, f in zip(adjs_eval, fs_eval)
if f[extract_frame] == 1
]
for extract_frame in extract_frames
],
[],
)
),
dtype=tf.float32,
)
ys_eval = np.hstack(
[
ys_eval[fs_eval[:, extract_frame] == 1] - extract_frame
for extract_frame in extract_frames
]
)
cs_eval = np.hstack(
[cs_eval[fs_eval[:, extract_frame] == 1] for extract_frame in extract_frames]
)
X_test = tf.constant(
np.stack(
sum(
[
[
x[extract_frame]
for x, f in zip(xs_test, fs_test)
if f[extract_frame] == 1
]
for extract_frame in extract_frames
],
[],
)
),
dtype=tf.float32,
)
M_test = tf.constant(
np.vstack(
[ms_test[fs_test[:, extract_frame] == 1] for extract_frame in extract_frames]
),
dtype=tf.float32,
)
A_test = tf.constant(
np.stack(
sum(
[
[
adj[extract_frame]
for adj, f in zip(adjs_test, fs_test)
if f[extract_frame] == 1
]
for extract_frame in extract_frames
],
[],
)
),
dtype=tf.float32,
)
ys_test = np.hstack(
[
ys_test[fs_test[:, extract_frame] == 1] - extract_frame
for extract_frame in extract_frames
]
)
cs_test = np.hstack(
[cs_test[fs_test[:, extract_frame] == 1] for extract_frame in extract_frames]
)
xs_train = np.stack(
sum(
[
[
x[extract_frame]
for x, f in zip(xs_train, fs_train)
if f[extract_frame] == 1
]
for extract_frame in extract_frames
],
[],
)
)
ms_train = np.vstack(
[ms_train[fs_train[:, extract_frame] == 1] for extract_frame in extract_frames]
)
adjs_train = np.stack(
sum(
[
[
adj[extract_frame]
for adj, f in zip(adjs_train, fs_train)
if f[extract_frame] == 1
]
for extract_frame in extract_frames
],
[],
)
)
ys_train = np.hstack(
[
ys_train[fs_train[:, extract_frame] == 1] - extract_frame
for extract_frame in extract_frames
]
)
cs_train = np.hstack(
[cs_train[fs_train[:, extract_frame] == 1] for extract_frame in extract_frames]
)
n = xs_train.shape[0]
xs_train, ms_train, adjs_train, ys_train, cs_train = (
xs_train[: n // n_batch * n_batch],
ms_train[: n // n_batch * n_batch],
adjs_train[: n // n_batch * n_batch],
ys_train[: n // n_batch * n_batch],
cs_train[: n // n_batch * n_batch],
)
xs_train, ms_train, adjs_train, ys_train, cs_train = (
np.vstack([xs_train, xs_train * np.array([1, -1, 1, -1, 1, -1])]),
np.vstack([ms_train, ms_train]),
np.vstack([adjs_train, adjs_train]),
np.hstack([ys_train, ys_train]),
np.hstack([cs_train, cs_train]),
)
xs_train, ms_train, adjs_train, ys_train, cs_train = (
xs_train[np.argsort(ys_train)],
ms_train[np.argsort(ys_train)],
adjs_train[np.argsort(ys_train)],
ys_train[np.argsort(ys_train)],
cs_train[np.argsort(ys_train)],
)
X, M, A = (
tf.constant(xs_train, dtype=tf.float32),
tf.constant(ms_train, dtype=tf.float32),
tf.constant(adjs_train, dtype=tf.float32),
)
Xs, Ms, As = tf.split(X, n_batch), tf.split(M, n_batch), tf.split(A, n_batch)
n = xs_train.shape[0]
ys_unique, ys_index, ys_inverse, ys_count = np.unique(
ys_train, return_index=True, return_inverse=True, return_counts=True
)
ys_mask_index = np.arange(ys_unique[0], ys_unique[-1] + 1)
ys_mask = tf.constant(ys_mask_index[:, np.newaxis] == ys_unique, dtype=tf.float32)
cs_count = []
for j in ys_unique:
cs_count.append(cs_train[ys_train == j].sum())
cs_count = np.array(cs_count)
cs_mask = np.zeros((n, ys_unique.shape[0]))
for j, index, count in zip(range(ys_unique.shape[0]), ys_index, ys_count):
cs_mask[index : index + count, j] = 1.0
cs_mask = tf.constant(cs_mask, dtype=tf.float32)
mask = tf.constant(ys_index <= np.arange(n)[:, np.newaxis], dtype=tf.float32)
inf_array = tf.where(
tf.cast(mask, tf.bool), 0, -tf.ones_like(mask, dtype=tf.float32) * np.inf
)
df_true = 0.5 * (tf.sign(ys_mask_index - ys_eval[:, tf.newaxis]) + 1)
m = ys_unique.shape[0]
class DenseLayer(tf.keras.Model):
def __init__(
self,
n_units,
n_midlayers,
dropout_rate,
activation1=tf.nn.tanh,
activation2=tf.nn.tanh,
):
super(DenseLayer, self).__init__()
self.activation1 = tf.keras.activations.get(activation1)
self.activation2 = tf.keras.activations.get(activation2)
self.n_midlayers = n_layers
self.dense, self.dropout = [], []
for _ in range(n_layers - 1):
self.dense.append(tf.keras.layers.Dense(n_units, activation=activation1))
self.dropout.append(tf.keras.layers.Dropout(dropout_rate))
self.dense.append(tf.keras.layers.Dense(n_units, activation=activation2))
def call(self, inputs, training=False, masks=None):
if masks is None:
generate_masks = True
else:
generate_masks = False
masks_generate = []
for i in range(self.n_midlayers - 1):
inputs = self.dense[i](inputs)
if generate_masks:
mask = self.dropout[i](tf.ones_like(inputs), training)
else:
mask = masks[i]
masks_generate.append(mask)
inputs = mask * inputs
outputs = self.dense[i + 1](inputs)
return outputs, masks_generate
class GatedConvLayer(tf.keras.Model):
def __init__(self, n_layerGRU, n_layers, dropout_rate):
super(GatedConvLayer, self).__init__()
self.n_layers = n_layers
self.update_R, self.update_O, self.update_D = (
tf.keras.layers.Dense(n_layerGRU, activation=tf.nn.sigmoid),
tf.keras.layers.Dense(n_layerGRU, activation=tf.nn.sigmoid),
tf.keras.layers.Dense(n_layerGRU, activation=tf.nn.sigmoid),
)
self.reset_R, self.reset_O, self.reset_D = (
tf.keras.layers.Dense(n_layerGRU, activation=tf.nn.sigmoid),
tf.keras.layers.Dense(n_layerGRU, activation=tf.nn.sigmoid),
tf.keras.layers.Dense(n_layerGRU, activation=tf.nn.sigmoid),
)
self.modify_R, self.modify_O, self.modify_D = (
tf.keras.layers.Dense(n_layerGRU, activation=tf.nn.tanh),
tf.keras.layers.Dense(n_layerGRU, activation=tf.nn.tanh),
tf.keras.layers.Dense(n_layerGRU, activation=tf.nn.tanh),
)
self.dropoutGRU_R, self.dropoutGRU_O, self.dropoutGRU_D = (
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dropout(dropout_rate),
)
(
self.dropoutGRU_neighbor_R,
self.dropoutGRU_neighbor_O,
self.dropoutGRU_neighbor_D,
) = (
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dropout(dropout_rate),
)
def call(self, inputs, training=False, masks=None):
if masks is None:
generate_masks = True
else:
generate_masks = False
layerGRU_R, layerGRU_O, layerGRU_D, A = inputs
if generate_masks:
maskGRU_update_R, maskGRU_update_O, maskGRU_update_D = (
self.dropoutGRU_R(tf.ones_like(layerGRU_R), training),
self.dropoutGRU_O(tf.ones_like(layerGRU_O), training),
self.dropoutGRU_D(tf.ones_like(layerGRU_D), training),
)
maskGRU_reset_R, maskGRU_reset_O, maskGRU_reset_D = (
self.dropoutGRU_R(tf.ones_like(layerGRU_R), training),
self.dropoutGRU_O(tf.ones_like(layerGRU_O), training),
self.dropoutGRU_D(tf.ones_like(layerGRU_D), training),
)
maskGRU_modify_R, maskGRU_modify_O, maskGRU_modify_D = (
self.dropoutGRU_R(tf.ones_like(layerGRU_R), training),
self.dropoutGRU_O(tf.ones_like(layerGRU_O), training),
self.dropoutGRU_D(tf.ones_like(layerGRU_D), training),
)
(
maskGRU_update_neighbor_R,
maskGRU_update_neighbor_O,
maskGRU_update_neighbor_D,
) = (
self.dropoutGRU_neighbor_R(
tf.ones(tf.shape(layerGRU_R) * (1, 1, 2)), training
),
self.dropoutGRU_neighbor_O(
tf.ones(tf.shape(layerGRU_O) * (1, 1, 3)), training
),
self.dropoutGRU_neighbor_D(
tf.ones(tf.shape(layerGRU_D) * (1, 1, 3)), training
),
)
(
maskGRU_reset_neighbor_R,
maskGRU_reset_neighbor_O,
maskGRU_reset_neighbor_D,
) = (
self.dropoutGRU_neighbor_R(
tf.ones(tf.shape(layerGRU_R) * (1, 1, 2)), training
),
self.dropoutGRU_neighbor_O(
tf.ones(tf.shape(layerGRU_O) * (1, 1, 3)), training
),
self.dropoutGRU_neighbor_D(
tf.ones(tf.shape(layerGRU_D) * (1, 1, 3)), training
),
)
(
maskGRU_modify_neighbor_R,
maskGRU_modify_neighbor_O,
maskGRU_modify_neighbor_D,
) = (
self.dropoutGRU_neighbor_R(
tf.ones(tf.shape(layerGRU_R) * (1, 1, 2)), training
),
self.dropoutGRU_neighbor_O(
tf.ones(tf.shape(layerGRU_O) * (1, 1, 3)), training
),
self.dropoutGRU_neighbor_D(
tf.ones(tf.shape(layerGRU_D) * (1, 1, 3)), training
),
)
else:
(
maskGRU_update_R,
maskGRU_update_O,
maskGRU_update_D,
maskGRU_reset_R,
maskGRU_reset_O,
maskGRU_reset_D,
maskGRU_modify_R,
maskGRU_modify_O,
maskGRU_modify_D,
maskGRU_update_neighbor_R,
maskGRU_update_neighbor_O,
maskGRU_update_neighbor_D,
maskGRU_reset_neighbor_R,
maskGRU_reset_neighbor_O,
maskGRU_reset_neighbor_D,
maskGRU_modify_neighbor_R,
maskGRU_modify_neighbor_O,
maskGRU_modify_neighbor_D,
) = masks
masks_generate = [
maskGRU_update_R,
maskGRU_update_O,
maskGRU_update_D,
maskGRU_reset_R,
maskGRU_reset_O,
maskGRU_reset_D,
maskGRU_modify_R,
maskGRU_modify_O,
maskGRU_modify_D,
maskGRU_update_neighbor_R,
maskGRU_update_neighbor_O,
maskGRU_update_neighbor_D,
maskGRU_reset_neighbor_R,
maskGRU_reset_neighbor_O,
maskGRU_reset_neighbor_D,
maskGRU_modify_neighbor_R,
maskGRU_modify_neighbor_O,
maskGRU_modify_neighbor_D,
]
for l in range(self.n_layers):
layerGRU_neighbor_R = tf.concat(
[
tf.matmul(A[:, :1, 1:11], layerGRU_O),
tf.matmul(A[:, :1, 11:], layerGRU_D),
],
axis=-1,
)
layerGRU_neighbor_O = tf.concat(
[
tf.matmul(A[:, 1:11, :1], layerGRU_R),
tf.matmul(A[:, 1:11, 1:11], layerGRU_O),
tf.matmul(A[:, 1:11, 11:], layerGRU_D),
],
axis=-1,
)
layerGRU_neighbor_D = tf.concat(
[
tf.matmul(A[:, 11:, :1], layerGRU_R),
tf.matmul(A[:, 11:, 1:11], layerGRU_O),
tf.matmul(A[:, 11:, 11:], layerGRU_D),
],
axis=-1,
)
z_R = self.update_R(
tf.concat(
[
layerGRU_R * maskGRU_update_R,
layerGRU_neighbor_R * maskGRU_update_neighbor_R,
],
2,
)
)
r_R = self.reset_R(
tf.concat(
[
layerGRU_R * maskGRU_reset_R,
layerGRU_neighbor_R * maskGRU_reset_neighbor_R,
],
2,
)
)
layerGRU_modified_R = self.modify_R(
tf.concat(
[
layerGRU_R * r_R * maskGRU_modify_R,
layerGRU_neighbor_R * maskGRU_modify_neighbor_R,
],
2,
)
)
z_O = self.update_O(
tf.concat(
[
layerGRU_O * maskGRU_update_O,
layerGRU_neighbor_O * maskGRU_update_neighbor_O,
],
2,
)
)
r_O = self.reset_O(
tf.concat(
[
layerGRU_O * maskGRU_reset_O,
layerGRU_neighbor_O * maskGRU_reset_neighbor_O,
],
2,
)
)
layerGRU_modified_O = self.modify_O(
tf.concat(
[
layerGRU_O * r_O * maskGRU_modify_O,
layerGRU_neighbor_O * maskGRU_modify_neighbor_O,
],
2,
)
)
z_D = self.update_D(
tf.concat(
[
layerGRU_D * maskGRU_update_D,
layerGRU_neighbor_D * maskGRU_update_neighbor_D,
],
2,
)
)
r_D = self.reset_D(
tf.concat(
[
layerGRU_D * maskGRU_reset_D,
layerGRU_neighbor_D * maskGRU_reset_neighbor_D,
],
2,
)
)
layerGRU_modified_D = self.modify_D(
tf.concat(
[
layerGRU_D * r_D * maskGRU_modify_D,
layerGRU_neighbor_D * maskGRU_modify_neighbor_D,
],
2,
)
)
layerGRU_R = (1.0 - z_R) * layerGRU_R + z_R * layerGRU_modified_R
layerGRU_O = (1.0 - z_O) * layerGRU_O + z_O * layerGRU_modified_O
layerGRU_D = (1.0 - z_D) * layerGRU_D + z_D * layerGRU_modified_D
return layerGRU_R, layerGRU_O, layerGRU_D, masks_generate
class GGNN(tf.keras.Model):
def __init__(self, n_layerGRU, n_layers, n_blocks):
super(GGNN, self).__init__()
self.n_layerGRU, self.n_layers, self.n_blocks = n_layerGRU, n_layers, n_blocks
self.denseGRU_R = DenseLayer(n_layerGRU, n_midlayers, dropout_rate)
self.denseGRU_O = DenseLayer(n_layerGRU, n_midlayers, dropout_rate)
self.denseGRU_D = DenseLayer(n_layerGRU, n_midlayers, dropout_rate)
self.dense_R, self.dense_O, self.dense_D = (
tf.keras.layers.Dense(1),
tf.keras.layers.Dense(1),
tf.keras.layers.Dense(1),
)
self.mask_R, self.mask_O, self.mask_D = (
tf.keras.layers.Dense(1, activation=tf.nn.sigmoid),
tf.keras.layers.Dense(1, activation=tf.nn.sigmoid),
tf.keras.layers.Dense(1, activation=tf.nn.sigmoid),
)
self.gcls = [
GatedConvLayer(n_layerGRU, n_layers, dropout_rate) for _ in range(n_blocks)
]
def call(self, X, A, M, training=False, masks=None):
if masks is None:
generate_masks = True
masks_dense, masks_gcl = [], []
else:
generate_masks = False
masks_dense, masks_gcl = masks
X_R = tf.slice(X, [0, 0, 0], [-1, 1, -1])
X_O = tf.concat(
[tf.tile(X_R, (1, 10, 1)), tf.slice(X, [0, 1, 0], [-1, 10, -1])], axis=2
)
X_D = tf.concat(
[tf.tile(X_R, (1, 11, 1)), tf.slice(X, [0, 11, 0], [-1, 11, -1])], axis=2
)
M_R, M_O, M_D = (
tf.slice(M, [0, 0], [-1, 1]),
tf.slice(M, [0, 1], [-1, 10]),
tf.slice(M, [0, 11], [-1, 11]),
)
if generate_masks:
layerGRU_R, masks_dense_R = self.denseGRU_R(X_R, training)
layerGRU_O, masks_dense_O = self.denseGRU_O(X_O, training)
layerGRU_D, masks_dense_D = self.denseGRU_D(X_D, training)
masks_dense = [masks_dense_R, masks_dense_O, masks_dense_D]
else:
masks_dense_R, masks_dense_O, masks_dense_D = masks_dense
layerGRU_R, _ = self.denseGRU_R(X_R, training, masks_dense_R)
layerGRU_O, _ = self.denseGRU_O(X_O, training, masks_dense_O)
layerGRU_D, _ = self.denseGRU_D(X_D, training, masks_dense_D)
for i in range(self.n_blocks):
if generate_masks:
layerGRU_R, layerGRU_O, layerGRU_D, mask_gcl = self.gcls[i](
[layerGRU_R, layerGRU_O, layerGRU_D, A], training
)
masks_gcl.append(mask_gcl)
else:
layerGRU_R, layerGRU_O, layerGRU_D, _ = self.gcls[i](
[layerGRU_R, layerGRU_O, layerGRU_D, A],
training,
masks=masks_gcl[i],
)
masks_generate = [masks_dense, masks_gcl]
maskGRU_R, maskGRU_O, maskGRU_D = (
self.mask_R(layerGRU_R),
self.mask_O(layerGRU_O),
self.mask_D(layerGRU_D),
)
layerGRU_R, layerGRU_O, layerGRU_D = (
self.dense_R(layerGRU_R),
self.dense_O(layerGRU_O),
self.dense_D(layerGRU_D),
)
out_players = tf.concat(
[
M_R[:, :, tf.newaxis] * layerGRU_R * maskGRU_R,
M_O[:, :, tf.newaxis] * layerGRU_O * maskGRU_O,
M_D[:, :, tf.newaxis] * layerGRU_D * maskGRU_D,
],
axis=1,
)
out = tf.reduce_sum(out_players, 1)
return out, out_players, masks_generate
@tf.function(experimental_compile=True)
def call_batch(X_batch, A_batch, M_batch):
out_batch, _, masks_batch = model.call(X_batch, A_batch, M_batch, True)
return out_batch, masks_batch
@tf.function(experimental_compile=True)
def compute_gradients_batch(X_batch, A_batch, M_batch, masks_batch, outs_grad_batch):
with tf.GradientTape() as tape:
out_batch, _, _ = model.call(X_batch, A_batch, M_batch, True, masks_batch)
cost_batch = tf.reduce_sum(outs_grad_batch * out_batch)
gradients_batch = tape.gradient(cost_batch, model.trainable_variables)
return gradients_batch, cost_batch
@tf.function
def compute_gradients(model, Xs, As, Ms):
outs = []
masks = []
for i in range(n_batch):
out_batch, masks_batch = call_batch(Xs[i], As[i], Ms[i])
outs.append(out_batch)
masks.append(masks_batch)
out = tf.concat(outs, axis=0)
with tf.GradientTape() as tape:
tape.watch(out)
out_max = tf.reduce_max(out + inf_array, 0)
exp_sum = tf.reduce_sum(tf.exp(out - out_max) * mask, 0)
den = (out_max + tf.math.log(exp_sum)) * cs_count
cost = -tf.reduce_sum(
tf.reduce_sum(out * cs_mask, 1) * cs_train
) + tf.reduce_sum(den)
out_grad = tape.gradient(cost, out)
outs_grad = tf.split(out_grad, n_batch)
cost = 0.0
gradients = []
for i in range(n_batch):
gradients_batch, cost_batch = compute_gradients_batch(
Xs[i], As[i], Ms[i], masks[i], outs_grad[i]
)
gradients.append(gradients_batch)
cost += cost_batch
gradients = [tf.reduce_sum(grads, 0) for grads in list(zip(*gradients))]
return gradients, cost
def apply_gradients(optimizer, gradients, variables):
optimizer.apply_gradients(zip(gradients, variables))
def compute_baseline_hazard(model, X, A, M):
out, _, _ = model.call(X, A, M, False)
out_max = tf.reduce_max(out + inf_array, 0)
exp_sum = tf.reduce_sum(tf.exp(out - out_max) * mask, 0)
den = (out_max + tf.math.log(exp_sum)) * cs_count
baseline_hazard = np.sum(
ys_mask * tf.exp(-out_max) * exp_sum.numpy() ** -1 * cs_count, axis=1
)
return baseline_hazard
def compute_hazard_ratio(model, X, A, M):
out, _, _ = model.call(X, A, M, False)
hazard_ratio = tf.exp(out).numpy()
return hazard_ratio
def compute_concordance_index(hazard_ratio_eval, ys_eval, cs_eval):
n_eval = ys_eval.shape[0]
permissible = 0
concordance = 0
for i in range(n_eval):
for j in range(i + 1, n_eval):
if (
ys_eval[i] != ys_eval[j]
and not ((ys_eval[i] < ys_eval[j]) * (cs_eval[i] == 0))
and not ((ys_eval[i] > ys_eval[j]) * (cs_eval[j] == 0))
):
permissible += 1
if (
(hazard_ratio_eval[i] > hazard_ratio_eval[j])
* (ys_eval[i] < ys_eval[j])
) or (
(hazard_ratio_eval[i] < hazard_ratio_eval[j])
* (ys_eval[i] > ys_eval[j])
):
concordance += 1
c_index = concordance / permissible
return c_index
def draw_field(returnerLine, direction):
fig, ax = plt.subplots(figsize=(8, 8))
if direction == 1.0:
ax.hlines(-53.3 / 2, -returnerLine, 120 - returnerLine, color="black")
ax.hlines(53.3 / 2, -returnerLine, 120 - returnerLine, color="black")
ax.vlines(
np.arange(0 - returnerLine, 130 - returnerLine, 10),
-53.3 / 2,
53.3 / 2,
color="black",
)
ax.vlines(
np.arange(15 - returnerLine, 115 - returnerLine, 10),
-53.3 / 2,
53.3 / 2,
color="black",
lw=1.0,
)
ax.add_patch(
plt.Rectangle((-returnerLine, -53.3 / 2), 10, 53.3, fc="black", alpha=0.1)
)
ax.add_patch(
plt.Rectangle(
(110 - returnerLine, -53.3 / 2), 10, 53.3, fc="black", alpha=0.1
)
)
else:
ax.hlines(-53.3 / 2, returnerLine - 120, returnerLine, color="black")
ax.hlines(53.3 / 2, returnerLine - 120, returnerLine, color="black")
ax.vlines(
np.arange(returnerLine - 120, returnerLine + 10, 10),
-53.3 / 2,
53.3 / 2,
color="black",
)
ax.vlines(
np.arange(returnerLine - 105, returnerLine - 5, 10),
-53.3 / 2,
53.3 / 2,
color="black",
lw=1.0,
)
ax.add_patch(
plt.Rectangle(
(returnerLine - 120, -53.3 / 2), 10, 53.3, fc="black", alpha=0.1
)
)
ax.add_patch(
plt.Rectangle(
(returnerLine - 10, -53.3 / 2), 10, 53.3, fc="black", alpha=0.1
)
)
ax.vlines(0.0, -53.3 / 2, 53.3 / 2, color="violet")
return fig, ax
def draw_play(game, play, play_detail, pff, extract_frame=None):
(
x,
m,
adj,
y,
c,
catch_frame,
nflId,
position,
offense_team,
defense_team,
loc_football,
) = extract_feature(game, play, play_detail, pff)
if extract_frame is None:
extract_frame = catch_frame
xinit, minit, adjinit = (
tf.constant(x[extract_frame][tf.newaxis], dtype=tf.float32),
tf.constant(m[tf.newaxis], dtype=tf.float32),
tf.constant(adj[extract_frame][tf.newaxis], dtype=tf.float32),
)
n_offense, n_defense = 11, 11
n = n_offense + n_defense
yardline = play.absoluteYardlineNumber.values[0]
direction = 2 * float("left" == play_detail.playDirection.values[0]) - 1
xy = (play_detail[["x", "y"]].values - np.array([yardline, 53.3 / 2])) * direction
passResult = play.passResult.values[0]
_, out_players, _ = model.call(xinit, adjinit, minit, False)
score = (np.squeeze(out_players.numpy()) - out_players_mean) * m
loc, vel = xinit[0, :, :2].numpy(), xinit[0, :, 2:4].numpy()
G = nx.Graph()
G.add_nodes_from(np.arange(n_offense), bipartite=0)
G.add_nodes_from(np.arange(n_offense, n_offense + n_defense), bipartite=1)
node_color = ["r"]
node_color.extend(["b" for i in range(n_offense - 1)])
node_color.extend(["g" for i in range(n_defense)])
row, col = np.where(adjinit[0] != 0)
G.add_edges_from(zip(row, col))
fig, ax = draw_field(yardline, direction)
nx.draw_networkx_nodes(
G, loc, node_color=node_color, node_size=1000, alpha=1.0, ax=ax
)
nx.draw_networkx_edges(G, loc, alpha=0.5, style="dashed", edge_color="k", ax=ax)
m_ind = np.arange(22)[m == 1]
nx.draw_networkx_labels(
G.subgraph(m_ind),
loc,
{i: np.around(np.exp(score[i]), 2) for i in m_ind},
font_weight="bold",
font_color="white",
ax=ax,
)
for i in range(n):
ax.arrow(
loc[i, 0],
loc[i, 1],
vel[i, 0] / 2.0 + 0.01,
vel[i, 1] / 2.0 + 0.01,
width=0.01,
head_width=0.3,
head_length=0.3,
length_includes_head=True,
color="k",
alpha=0.4,
)
football = patches.Ellipse(
xy=(loc_football[extract_frame, 0], loc_football[extract_frame, 1]),
width=1,
height=0.5,
color="saddlebrown",
ec="black",
alpha=1.0,
fill=True,
angle=0,
zorder=-100,
)
ax.add_patch(football)
xmin, ymin = loc[m == 1].min(0)
xmax, ymax = loc[m == 1].max(0)
xmin, ymin = -10, -10
xmax, ymax = 10, 10
ax.set_xlim(xmin - 2, xmax + 2)
ax.set_ylim(ymin - 2, ymax + 2)
props = dict(boxstyle="round", facecolor="wheat", alpha=1)
textstr = "\n".join(
(
"PASS RESULT: " + passResult,
"OFFENSE: %s" % (np.around(np.exp(np.sum(score[:11])), 2),),
"DEFENSE: %s" % (np.around(np.exp(np.sum(score[11:])), 2),),
"TOTAL: %s" % (np.around(np.exp(np.sum(score)), 2),),
)
)
ax.text(6.5, -8, textstr, verticalalignment="top", bbox=props)
return fig
def create_movie(gameId, playId):
game = games[games.gameId.values == gameId]
play = plays[(plays.gameId.values == gameId) * (plays.playId.values == playId)]
play_detail = tracking[
(tracking.gameId.values == gameId) * (tracking.playId.values == playId)
]
pff = pffs[(pffs.gameId.values == gameId) * (pffs.playId.values == playId)]
(
x,
m,
adj,
y,
c,
catch_frame,
nflId,
position,
offense_team,
defense_team,
loc_football,
) = extract_feature(game, play, play_detail, pff)
dirname = "images"
os.makedirs(dirname, exist_ok=True)
filenames = []
for extract_frame in np.arange(x.shape[0]):
fig = draw_play(game, play, play_detail, pff, extract_frame=extract_frame)
filename = os.path.join(dirname, str(extract_frame) + ".png")
fig.savefig(filename, bbox_inches="tight")
plt.close(fig)
filenames.append(filename)
gifname = str(gameId) + "_" + str(playId) + ".gif"
with imageio.get_writer(gifname, mode="I", fps=10) as writer:
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
_ = [os.remove(filename) for filename in glob("*.png")]
return gifname
print("Training ...")
learning_rate = 0.001
dropout_rate = 0.5
n_layerGRU = 32
n_midlayers = 2
n_layers = 5
n_blocks = 2
n_ties = ys_unique.shape[0]
model = GGNN(n_layerGRU, n_layers, n_blocks)
optimizer = tf.keras.optimizers.Adam(learning_rate)
checkpoint_dir = "./training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
traning_epochs = 1000
c_index_best = 0.0
for epoch in range(traning_epochs):
gradients, cost_epoch = compute_gradients(model, Xs, As, Ms)
apply_gradients(optimizer, gradients, model.variables)
if (epoch + 1) % 50 == 0:
hazard_ratio_eval = compute_hazard_ratio(model, X_eval, A_eval, M_eval)[:, :1]
c_index = compute_concordance_index(hazard_ratio_eval, ys_eval, cs_eval)
print(
"epoch %s : cost %s: c-index %s"
% (epoch + 1, np.around(cost_epoch.numpy(), 2), np.around(c_index, 4))
)
if c_index_best < c_index:
c_index_best = c_index
checkpoint.save(file_prefix=checkpoint_prefix)
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
hazard_ratio_test = compute_hazard_ratio(model, X_test, A_test, M_test)
c_index_test = compute_concordance_index(hazard_ratio_test, ys_test, cs_test)
print("c-index of test %s" % np.around(c_index_test, 4))
_, out_players, _ = model.call(X_orig, A_orig, M_orig, False)
out_players_mean = tf.concat(
[
tf.reduce_sum(tf.reduce_sum(out_players, -1)[:, :1] * M_orig[:, :1])
/ tf.reduce_sum(M_orig[:, :1])
* tf.ones(1),
tf.reduce_sum(tf.reduce_sum(out_players, -1)[:, 1:11] * M_orig[:, 1:11])
/ tf.reduce_sum(M_orig[:, 1:11])
* tf.ones(10),
tf.reduce_sum(tf.reduce_sum(out_players, -1)[:, 11:] * M_orig[:, 11:])
/ tf.reduce_sum(M_orig[:, 11:])
* tf.ones(11),
],
axis=-1,
)
baseline_hazard = compute_baseline_hazard(model, X, A, M)
# # Application 2: Hazard rate changes during play
# The next movies show the estimation results. The value assigned to each player is the hazard ratio compared to the mean hazard. For example, if the hazard for the offensive player is 1.05, it means that this player increases the hazard by 1.05 times.
# ### Pass Complete
gameId, playId = 2021091200, 3110
gifname = create_movie(gameId, playId)
display.HTML('<img src="{}">'.format(gifname))
# ### Pass Incomplete
gameId, playId = 2021090900, 97
gifname = create_movie(gameId, playId)
display.HTML('<img src="{}">'.format(gifname))
# ### Quaterback sack
gameId, playId = 2021091200, 4346
gifname = create_movie(gameId, playId)
display.HTML('<img src="{}">'.format(gifname))
# # Application 3: Ranking of teams
# The total hazard rate indicates the performance of a formation and can be used as an indicator of a team's defense or offense. We calculate the mean hazard of each team.
_, out_players, _ = model.call(X_orig, A_orig, M_orig, False)
out_players_mean = tf.concat(
[
tf.reduce_sum(tf.reduce_sum(out_players, -1)[:, :1] * M_orig[:, :1])
/ tf.reduce_sum(M_orig[:, :1])
* tf.ones(1),
tf.reduce_sum(tf.reduce_sum(out_players, -1)[:, 1:11] * M_orig[:, 1:11])
/ tf.reduce_sum(M_orig[:, 1:11])
* tf.ones(10),
tf.reduce_sum(tf.reduce_sum(out_players, -1)[:, 11:] * M_orig[:, 11:])
/ tf.reduce_sum(M_orig[:, 11:])
* tf.ones(11),
],
axis=-1,
)
out_players = (out_players - out_players_mean[:, tf.newaxis]) * M_orig[:, :, tf.newaxis]
offense_teams_orig = np.hstack(
[offense_teams[fs[:, extract_frame] == 1] for extract_frame in extract_frames]
)
defense_teams_orig = np.hstack(
[defense_teams[fs[:, extract_frame] == 1] for extract_frame in extract_frames]
)
offense_teams_unique = np.unique(offense_teams_orig)
result = {}
for offense_team in offense_teams_unique:
result[offense_team] = np.mean(
np.sum(out_players[:, :11, 0], -1)[offense_teams_orig == offense_team]
)
offense_teams_score = pd.DataFrame.from_dict(result, orient="index")
offense_teams_score.columns = ["score"]
offense_teams_score = offense_teams_score.sort_values("score")
defense_teams_unique = np.unique(defense_teams_orig)
result = {}
for defense_team in defense_teams_unique:
result[defense_team] = np.mean(
np.sum(out_players[:, 11:, 0], -1)[defense_teams_orig == defense_team]
)
defense_teams_score = pd.DataFrame.from_dict(result, orient="index")
defense_teams_score.columns = ["score"]
defense_teams_score = defense_teams_score.sort_values("score", ascending=False)
# This figure shows the mean hazard of the teams in offensive plays. The team on the left has the smallest hazard, meaning that this team protected its quarterback well from the opposing team's players.
fig, ax = plt.subplots(figsize=(18, 4))
ax.bar(
offense_teams_score.index,
tf.exp(offense_teams_score["score"].values) - 1,
color="b",
)
ax.axhline(y=0, linewidth=1, color="k", linestyle="dashed")
_ = ax.set_yticks(ax.get_yticks(), np.around(ax.get_yticks() + 1, 3))
_ = ax.set_ylabel("Hazard ratio")
# This figure shows the average hazard of each team's defensive players. The team on the left has a higher hazard, meaning that it has succeeded in collapsing more of the opposing team's pockets.
fig, ax = plt.subplots(figsize=(18, 4))
ax.bar(
defense_teams_score.index,
tf.exp(defense_teams_score["score"].values) - 1,
color="g",
)
ax.axhline(y=0, linewidth=1, color="k", linestyle="dashed")
_ = ax.set_yticks(ax.get_yticks(), np.around(ax.get_yticks() + 1, 3))
_ = ax.set_ylabel("Hazard ratio")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head(10)
train_data.tail(10)
train_data.query("Cabin == Cabin").shape
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
len(
train_data.loc[(train_data.Sex == "female") & (train_data.Survived == 1)][
"Survived"
]
)
train_data.loc[:, ["Name", "Age", "Pclass"]]
train_data[["Name", "Age", "Pclass"]]
women = train_data.loc[train_data.Sex == "female"]["Survived"]
rate_women = sum(women) / len(women)
print("% of women who survived:", rate_women)
men = train_data.loc[train_data.Sex == "male"]["Survived"]
rate_men = sum(men) / len(men)
print("% of men who survived=", rate_men)
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
y = train_data["Survived"]
features = ["Pclass", "Sex", "SibSp", "Parch"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
actual = model.predict(X)
predictions = model.predict(X_test)
# output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
# output.to_csv('my_submission.csv', index=False)
# print ( "X data accuracy: " , round(accuracy_score(y, actual) * 100, 2), "%" )
# print("Your submission was successfully saved!")
predictions
##############################################
###### 변수 추가 Age Fare Cabin ########
# X data accuracy: 84.4 %
# 77.5%
##############################################
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
y = train_data["Survived"]
features = ["Pclass", "Sex", "SibSp", "Parch"] # class 변수
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
X["Age"] = train_data["Age"]
X["Fare"] = train_data["Fare"]
X["Cabin"] = np.where(train_data["Cabin"].isna() == True, 0, 1)
# np.where(조건, 참값, 거짓값)
X.groupby("Cabin").count()
X_test["Age"] = test_data["Age"]
X_test["Fare"] = test_data["Fare"]
X_test["Cabin"] = np.where(test_data["Cabin"].isna() == True, 0, 1)
# X.isna().sum()
X["Age"] = X["Age"].fillna(X["Age"].mean())
X_test["Age"] = X_test["Age"].fillna(X_test["Age"].mean())
X_test["Fare"] = X_test["Fare"].fillna(X_test["Fare"].mean())
model = RandomForestClassifier(n_estimators=100, max_depth=6, random_state=7)
model.fit(X, y)
actual = model.predict(X)
predictions = model.predict(X_test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("X data accuracy: ", round(accuracy_score(y, actual) * 100, 2), "%")
print("Your submission was successfully saved!")
X
# [age if age > 10 else 10 for age in X['Age']]
# new_age = []
# for age in X['Age']:
# if age > 10:
# new_age.append(age)
# else:
# new_age.append(10)
# Logistic regression
# Gradient Boosting
# SVM
# Deep learning
############################
### Logistic regression
### X data accuracy: 80.36 %
## test data : 75%
############################
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
model = LogisticRegression(C=1000.0, random_state=7)
model.fit(X, y)
actual = model.predict(X)
predictions = model.predict(X_test)
# output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
# output.to_csv('my_submission.csv', index=False)
# print ( "X data accuracy: " , round(accuracy_score(y, actual) * 100, 2), "%" )
# print("Your submission was successfully saved!")
##########################
### XGBOOST
### X data accuracy: 93.71%
## test data : 73.2%
##########################
from xgboost import plot_importance
from xgboost import XGBClassifier
model = XGBClassifier(n_estimators=500, learning_rate=0.1, max_depth=5)
model.fit(X, y)
actual = model.predict(X)
predictions = model.predict(X_test)
# output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
# output.to_csv('my_submission.csv',index=False)
# print ( "X data accuracy: " , round(accuracy_score(y, actual) * 100, 2), "%" )
# print("Your submission was successfully saved!")
############################
### SVM
### X data accuracy: 78.68 %
## test data : 76.5%
############################
from sklearn import svm
from sklearn.metrics import accuracy_score
# model = svm.SVC(kernel = 'linear')
# model.fit(X, y)
# actual = model.predict(X)
# predictions = model.predict(X_test)
# output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
# output.to_csv('my_submission.csv', index=False)
# print ( "X data accuracy: " , round(accuracy_score(y, actual) * 100, 2), "%" )
# print("Your submission was successfully saved!")
######################################
### Deep Learning
### X data accuracy: 78.68 %
## test data :
######################################
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers.core import Dense
np.random.seed(7)
# model = Sequential()
# model.add(Dense(255, input_shape=(8,), activation='relu'))
# model.add(Dense((1), activation='sigmoid'))
# model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['accuracy'])
# model.summary()
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
# SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))
# hist = model.fit(X, y,epochs=100)
import matplotlib.pyplot as plt
# predictions = model.predict(X_test)
# output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions.ravel()})
# output.to_csv('my_submission.csv', index=False)
# print ( "X data accuracy: " , round(accuracy_score(y, actual) * 100, 2), "%" )
# print("Your submission was successfully saved!")
# output
# 확률값을 1 - 0 으로 변환
# 앙상블
|
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import glob
from lxml import etree
from matplotlib.patches import Rectangle # 画矩形框的方法
import os
gpu = tf.config.experimental.list_physical_devices(device_type="GPU")
tf.config.experimental.set_memory_growth(gpu[0], True)
print(os.listdir("../input/the-oxfordiiit-pet-dataset/images/images")[:3])
image = tf.io.read_file(
"../input/the-oxfordiiit-pet-dataset/images/images/Abyssinian_1.jpg"
)
image = tf.image.decode_jpeg(image)
plt.imshow(image)
xml = open(
"../input/the-oxfordiiit-pet-dataset/annotations/annotations/xmls/Abyssinian_1.xml"
).read()
sel = etree.HTML(xml)
int(sel.xpath("//name/text()")[0] == "cat") # cat=1.dog=0
label = int(sel.xpath("//name/text()")[0] == "cat")
width = int(sel.xpath("//width/text()")[0])
height = int(sel.xpath("//height/text()")[0])
xmin = int(sel.xpath("//xmin/text()")[0])
ymin = int(sel.xpath("//ymin/text()")[0])
xmax = int(sel.xpath("//xmax/text()")[0])
ymax = int(sel.xpath("//ymax/text()")[0])
plt.imshow(image)
rect = Rectangle((xmin, ymin), (xmax - xmin), (ymax - ymin), fill=False, color="red")
ax = plt.gca()
ax.axes.add_patch(rect)
b1 = xmin / width
b2 = xmax / width
b3 = ymin / height
b4 = ymax / height
b1, b2, b3, b4
image.shape
image = tf.image.resize(image, (256, 256))
image = image / 255
xmin = b1 * 256
xmax = b2 * 256
ymin = b3 * 256
ymax = b4 * 256
plt.imshow(image)
rect = Rectangle((xmin, ymin), (xmax - xmin), (ymax - ymin), fill=False, color="red")
ax = plt.gca()
ax.axes.add_patch(rect)
print(image)
image.shape
# 数据预处理
images = glob.glob("../input/the-oxfordiiit-pet-dataset/images/images/*.jpg")
xmls = glob.glob(
"../input/the-oxfordiiit-pet-dataset/annotations/annotations/xmls/*.xml"
)
images[:3]
xmls[:3]
names = [x.split("/")[-1].split(".")[0] for x in xmls]
names[:3]
train_images = [
image for image in images if (image.split("/")[-1].split(".")[0]) in names
]
test_images = [
image for image in images if (image.split("/")[-1].split(".")[0]) not in names
]
len(train_images), len(test_images)
train_images.sort(key=lambda x: x.split("/")[-1].split(".")[0])
xmls.sort(key=lambda x: x.split("/")[-1].split(".")[0])
t = 0
for i in range(3686):
if (
train_images[i].split("/")[-1].split(".")[0]
== xmls[i].split("/")[-1].split(".")[0]
):
t += 1
print(t)
def load_label(path):
xml = open(path).read()
sel = etree.HTML(xml)
pet_label = int(sel.xpath("//name/text()")[0] == "cat")
width = int(sel.xpath("//width/text()")[0])
height = int(sel.xpath("//height/text()")[0])
xmin = int(sel.xpath("//xmin/text()")[0])
ymin = int(sel.xpath("//ymin/text()")[0])
xmax = int(sel.xpath("//xmax/text()")[0])
ymax = int(sel.xpath("//ymax/text()")[0])
return [pet_label, xmin / width, ymin / height, xmax / width, ymax / height]
labels = [load_label(path) for path in xmls]
pet_label, out1, out2, out3, out4 = list(zip(*labels))
pet_label = np.array(pet_label)
out1 = np.array(out1)
out2 = np.array(out2)
out3 = np.array(out3)
out4 = np.array(out4)
labels_ds = tf.data.Dataset.from_tensor_slices((pet_label, out1, out2, out3, out4))
labels_ds
# 处理图片
def load_image(path):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [224, 224])
image = image / 127.5 - 1
return image
image_ds = tf.data.Dataset.from_tensor_slices(train_images)
image_test_ds = tf.data.Dataset.from_tensor_slices(test_images)
image_ds = image_ds.map(load_image)
image_test_ds = image_test_ds.map(load_image)
image_ds
image_test_ds # 测试集数据
dataset = tf.data.Dataset.zip((image_ds, labels_ds))
dataset
BATCH_SIZE = 32
dataset = dataset.shuffle(len(train_images)).batch(BATCH_SIZE).repeat()
test_dataset = image_test_ds.batch(BATCH_SIZE)
dataset
test_dataset
test_count = (int)(0.2 * len(xmls)) # 验证集
train_count = len(xmls) - test_count
test_count, train_count
test_ds = dataset.skip(test_count)
test_ds
train_ds = dataset.take(train_count)
train_ds
# 检查一下
for img, label in train_ds.take(1):
plt.imshow(tf.keras.preprocessing.image.array_to_img(img[0]))
pet_label, out1, out2, out3, out4 = label
plt.title("cat" if pet_label[0] == 1 else "dog")
xmin = out1[0].numpy() * 224
ymin = out2[0].numpy() * 224
xmax = out3[0].numpy() * 224
ymax = out4[0].numpy() * 224
rect = Rectangle(
(xmin, ymin), (xmax - xmin), (ymax - ymin), fill=False, color="red"
)
ax = plt.gca()
ax.axes.add_patch(rect)
# # 创建图像定位模型
xception = tf.keras.applications.Xception(
weights="imagenet", include_top=False, input_shape=(224, 224, 3)
)
xception.summary()
inputs = tf.keras.layers.Input(shape=(224, 224, 3))
x = xception(inputs)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x.get_shape()
x = tf.keras.layers.Dense(2048, activation="relu")(x)
x = tf.keras.layers.Dense(256, activation="relu")(x)
# 输出宠物类别
out_pet = tf.keras.layers.Dense(1, activation="sigmoid", name="out_pet")(x)
out_pet.get_shape()
# 输出四个值,不需要激活了
out1 = tf.keras.layers.Dense(1, name="out1")(x)
out2 = tf.keras.layers.Dense(1, name="out2")(x)
out3 = tf.keras.layers.Dense(1, name="out3")(x)
out4 = tf.keras.layers.Dense(1, name="out4")(x)
prediction = [out_pet, out1, out2, out3, out4]
model = tf.keras.models.Model(inputs=inputs, outputs=prediction)
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True)
# 编译
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=0.0001),
loss={
"out_pet": "binary_crossentropy",
"out1": "mse",
"out2": "mse",
"out3": "mse",
"out4": "mse",
},
metrics=["acc", ["mse"], ["mse"], ["mse"], ["mse"]], # mae平均绝对误差
)
EPOCH = 15
history = model.fit(
dataset,
steps_per_epoch=train_count // BATCH_SIZE,
epochs=EPOCH,
validation_data=test_ds,
validation_steps=test_count // BATCH_SIZE,
)
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(EPOCH)
plt.figure()
plt.plot(epochs, loss, "r", label="Training loss")
plt.plot(epochs, val_loss, "bo", label="Validation loss")
plt.title("Training and Validation Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss Value")
plt.legend()
plt.show()
model.save("detect_v2.h5")
# 加载训练好的模型
new_model = tf.keras.models.load_model("../input/ximing-object-detect/detect_v2.h5")
# 创建一个画布 放三个图片
plt.figure(figsize=(8, 48))
for img in test_dataset.take(1):
pet, out1, out2, out3, out4 = new_model.predict(img)
# 画三个
for i in range(6):
plt.subplot(6, 1, i + 1)
plt.imshow(tf.keras.preprocessing.image.array_to_img(img[i]))
plt.title("cat" if pet[0][0] >= 0.5 else "dog")
xmin, ymin, xmax, ymax = (
out1[i] * 224,
out2[0] * 224,
out3[0] * 224,
out4[0] * 224,
)
rect = Rectangle(
(xmin, ymin), (xmax - xmin), (ymax - ymin), fill=False, color="red"
)
ax = plt.gca()
ax.axes.add_patch(rect)
|
li = ["abc", 34, 4.34, 23]
print(li)
st = "Hello World"
print(st)
st = "Hello World"
print(st)
st = """This is a multi-line string that uses triple quotes. """
print(st)
tu = (23, "abc", 4.56, (2, 3), "def")
print(tu[1])
print(tu[-1])
# # Iterating Through a List
# Using a for loop we can iterate though each item in a list.
#
for fruit in ["apple", "banana", "mango"]:
print("I like", fruit)
# # Numpy Arrays
# ### Creating 2D Numpy Array
import numpy as np
np_2d = np.array([[1.73, 1.68, 1.71, 1.89, 1.79], [65.4, 59.2, 63.6, 88.4, 68.7]])
np_2d.shape
# ### Calculating BMI
import numpy as np
np_height = np.array([1.73, 1.68, 1.71, 1.89, 1.79])
np_weight = np.array([65.4, 59.2, 63.6, 88.4, 68.7])
bmi = np_weight / np_height**2
print(" BMI : ", bmi)
# # Basic Statistical Analysis
#
import numpy as np
np_city = np.array(
[
[1.64, 71.78],
[1.37, 63.35],
[1.6, 55.09],
[2.04, 74.85],
[2.04, 68.72],
[2.01, 73.57],
]
)
print(np_city)
print(type(np_city))
print("Mean Height : ", np.mean(np_city[:, 0]))
print("Median Height : ", np.median(np_city[:, 0]))
np.corrcoef(np_city[:, 0], np_city[:, 1])
np.std(np_city[:, 0])
fam = [1.73, 1.68, 1.71, 1.89]
tallest = max(fam)
print("Tallest : ", tallest)
# ## Data Generation and Statistics
height = np.round(np.random.normal(1.75, 0.20, 5000), 2)
weight = np.round(np.random.normal(60.32, 15, 5000), 2)
np_city = np.column_stack((height, weight))
print(np_city)
|
import pandas as pd
import numpy as np
def number_to_filename(number):
filename = f"{number:06d}.png"
path = "/kaggle/input/spr-x-ray-gender/kaggle/kaggle/train/"
filename = path + filename
return filename
train_gender_df = pd.read_csv("/kaggle/input/spr-x-ray-gender/train_gender.csv")
train_gender_df["filepath"] = train_gender_df["imageId"].apply(number_to_filename)
train_gender_df.head(10)
train_gender_df["gender"].hist()
from matplotlib import pyplot as plt
import cv2
img = cv2.imread(train_gender_df["filepath"][0], 0)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.title(str(train_gender_df["gender"][0]))
img = cv2.imread(train_gender_df["filepath"][0], 0)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.title(str(train_gender_df["gender"][1]))
import requests
import math
import matplotlib.pyplot as plt
import shutil
from getpass import getpass
from PIL import Image, UnidentifiedImageError
from requests.exceptions import HTTPError
from io import BytesIO
from pathlib import Path
import torch
import pytorch_lightning as pl
from huggingface_hub import HfApi, HfFolder, Repository, notebook_login
from torch.utils.data import DataLoader
from torchmetrics import Accuracy
from torchvision.datasets import ImageFolder
import torchvision.transforms as transforms
from transformers import ViTFeatureExtractor, ViTForImageClassification
import cv2
from sklearn.model_selection import train_test_split
train_sampled_gender_df = (
train_gender_df.groupby("gender")
.apply(lambda x: x.sample(frac=0.9, replace=False))
.reset_index(drop=True)
)
filenames = train_sampled_gender_df["filepath"].tolist()
target = train_sampled_gender_df["gender"].tolist()
train_x, val_x, train_y, val_y = train_test_split(
filenames, target, stratify=target, test_size=0.1, random_state=0
)
test_df = pd.read_csv("/kaggle/input/spr-x-ray-gender/sample_submission_gender.csv")
def testnumber_to_filename(number):
filename = f"{number:06d}.png"
path = "/kaggle/input/spr-x-ray-gender/kaggle/kaggle/test/"
filename = path + filename
return filename
test_df["filepath"] = test_df["imageId"].apply(testnumber_to_filename)
test_x = test_df["filepath"].tolist()
test_y = test_df["gender"].tolist()
class ImageClassificationCollator:
def __init__(self, feature_extractor):
self.feature_extractor = feature_extractor
def __call__(self, batch):
encodings = self.feature_extractor([x[0] for x in batch], return_tensors="pt")
encodings["labels"] = torch.tensor([x[1] for x in batch], dtype=torch.long)
return encodings
label2id = {}
id2label = {}
label2id["0"] = str(0)
id2label[str(0)] = "0"
label2id["1"] = str(1)
id2label[str(1)] = "1"
feature_extractor = ViTFeatureExtractor.from_pretrained(
"google/vit-base-patch16-224-in21k"
)
model = ViTForImageClassification.from_pretrained(
"google/vit-base-patch16-224-in21k",
num_labels=len(label2id),
label2id=label2id,
id2label=id2label,
)
collator = ImageClassificationCollator(feature_extractor)
IMG_SIZE = 224
transforms_train = transforms.Compose(
[
transforms.Resize((IMG_SIZE, IMG_SIZE)),
transforms.RandomHorizontalFlip(p=0.1),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
)
transforms_val = transforms.Compose(
[
transforms.Resize((IMG_SIZE, IMG_SIZE)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
)
class SPRXrayGenderDataset(torch.utils.data.Dataset):
def __init__(self, image_files, labels, transforms):
self.image_files = image_files
self.labels = labels
self.transforms = transforms
def __len__(self):
return len(self.image_files)
def __getitem__(self, index):
image_filepath = self.image_files[index]
image = cv2.imread(image_filepath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return self.transforms(image), self.labels[index]
train_ds = SPRXrayGenderDataset(train_x, train_y, transforms_train)
train_loader = DataLoader(
train_ds, batch_size=32, collate_fn=collator, num_workers=2, shuffle=True
)
val_ds = SPRXrayGenderDataset(val_x, val_y, transforms_val)
val_loader = DataLoader(val_ds, batch_size=32, collate_fn=collator, num_workers=2)
test_ds = SPRXrayGenderDataset(test_x, test_y, transforms_val)
test_loader = DataLoader(test_ds, batch_size=32, collate_fn=collator, num_workers=2)
class Classifier(pl.LightningModule):
def __init__(self, model, lr: float = 2e-5, **kwargs):
super().__init__()
self.save_hyperparameters("lr", *list(kwargs))
self.model = model
self.forward = self.model.forward
self.val_acc = Accuracy(task="binary")
def training_step(self, batch, batch_idx):
outputs = self(**batch)
self.log(f"train_loss", outputs.loss)
return outputs.loss
def validation_step(self, batch, batch_idx):
outputs = self(**batch)
self.log(f"val_loss", outputs.loss)
acc = self.val_acc(outputs.logits.argmax(1), batch["labels"])
self.log(f"val_acc", acc, prog_bar=True)
return outputs.loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.hparams.lr)
pl.seed_everything(42)
classifier = Classifier(model, lr=1e-4)
trainer = pl.Trainer(gpus=1, precision=16, max_epochs=20)
trainer.fit(classifier, train_loader, val_loader)
y_pred = []
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
with torch.no_grad():
for test_data in test_loader:
test_data.to(device)
pred = model(**test_data)
outputs = pred.logits.softmax(1).argmax(1)
for i in outputs:
y_pred.append(i.item())
# print(y_pred)
test_df["gender"] = y_pred
test_df[["imageId", "gender"]].to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
from sklearn.model_selection import train_test_split
from tensorflow import keras
from keras.utils.np_utils import to_categorical
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Dropout, MaxPool2D
# **Prendere un dataset di immagini a bassa risoluzione, 28x28 pixel (784). Al momento si inserisce il numero statico, ma si dovrebbe dare in input. Stabilire il numero di classi di appartenenza finale (10).**
righe, colonne = 28, 28
n_classi = 10
test = pd.read_csv("../input/digit-recognizer/test.csv")
train = pd.read_csv("../input/digit-recognizer/train.csv")
y_train = train["label"]
X_train = train.drop(labels=["label"], axis=1)
# ------Normalizzazione-----#
X_train = X_train / 255.0
test = test / 255.0
# -----Ridimensionamento----#
X_train = X_train.values.reshape(-1, righe, colonne, 1)
test = test.values.reshape(-1, righe, colonne, 1)
# -------Label encoding-------#
y_train = to_categorical(y_train, num_classes=n_classi)
# -------Split---------------#
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=0.1, random_state=2
)
# **Creazione e Compilazione del modello**
model = Sequential()
model.add(
Conv2D(
filters=32,
kernel_size=(5, 5),
padding="Same",
activation="relu",
input_shape=(righe, colonne, 1),
)
)
model.add(Conv2D(filters=32, kernel_size=(5, 5), padding="Same", activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding="Same", activation="relu"))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding="Same", activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(n_classi, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# **Adattamento del modello del training set**
from keras.callbacks import ReduceLROnPlateau
batch = 100
epochs = 3
learning_rate = ReduceLROnPlateau(
monitor="val_accuracy", patience=3, verbose=1, factor=0.5, min_lr=0.00001
)
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False,
)
datagen.fit(X_train)
resoconto = model.fit_generator(
datagen.flow(X_train, y_train, batch_size=batch),
epochs=epochs,
validation_data=(X_val, y_val),
steps_per_epoch=100,
callbacks=[learning_rate],
)
model.history.history["val_acc"] = model.history.history["val_accuracy"]
ris = model.predict(test)
ris = np.argmax(ris, axis=1)
ris = pd.Series(ris, name="Label")
sub = pd.concat([pd.Series(range(1, 28001), name="ImageId"), ris], axis=1)
sub.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import seaborn as sns
import warnings
warnings.simplefilter("ignore")
df = sns.load_dataset("titanic")
df.head(1)
df = df[df["age"].notnull()]
# # Single Sample t-test
# Used to check if a sample mean is d/f from the population mean
# H0 -> The mean age is 35
# H1 -> The mean is less than 35
# # Assumption of single sample t-test are:
# 1.Random Sampling
# 2. Normality
# 3. Unknown Population std (Here we are considering we dont know the population std)
# 4. Independence
sample_age = df["age"].sample(25).values
sample_age
# check for normality using Shapiro Wilk test
sns.distplot(sample_age)
# if p_value is greater than 0.5 then we can say data is normally Distributed
from scipy.stats import shapiro
shapiro_tips = shapiro(sample_age)
print(shapiro_tips)
pop_mean = 35
import scipy.stats as stats
# Here we are performing 1 tail test. So we have divide the p_value by 2
t_statistic, p_value = stats.ttest_1samp(sample_age, pop_mean)
print("t-statistic:", t_statistic)
print("p-value:", p_value / 2)
alpha = 0.05
if p_value < alpha:
print("Reject the null hypothesis.")
else:
print("Fail to reject the null hypothesis.")
# # Independent 2 sample t-test
# This test is used to compare the mean of two independent variables/groups to determine is there is a significant difference between them
# Ho= There is no df btw male and female age
# H1= There is a d/f btw male and female age
# The Assumptions for this test is:
# 1. Variance(Homoscedcity) Should be same (we can use F-test to check the variance)
# 2. Indepence of Observation
# 3. Normality
# 4. Random Sampling
male_population = df[df["sex"] == "male"]["age"]
female_population = df[df["sex"] == "female"]["age"]
male_sample = male_population.sample(25).values
female_sample = female_population.sample(25).values
male_sample_mean = male_sample.mean()
female_sample_mean = female_population.mean()
# Similar Variance
from scipy.stats import levene
levene_test = levene(male_sample, female_sample)
print(levene_test)
t_stast, p_value = stats.ttest_ind(male_sample, female_sample)
print("t-stast", t_stast)
print("p value", p_value / 2)
alpha = 0.05
if p_value < alpha:
print("Reject the null hypothesis.")
else:
print("Fail to reject the null hypothesis.")
|
# # Setup and helper functions
import os
import re
import csv
import cv2
import random
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import pydicom as dicom
import nibabel as nib
rsna_root = "/kaggle/input/rsna-2022-cervical-spine-fracture-detection"
# Helper functions
def load_img_from_dcm(path):
img = dicom.dcmread(path)
img.PhotometricInterpretation = "YBR_FULL"
data = img.pixel_array
data = data - np.min(data)
if np.max(data != 0):
data = data / np.max(data)
data = (data * 255).astype(np.uint8)
return cv2.cvtColor(data, cv2.COLOR_GRAY2RGB)
def CT_path_to_3D_arr(
folder_path, l=None
): # folder_path is for folder of dcm files constituting one CT scan
if l == None:
l = os.listdir(folder_path) # list of 2D slices of the CT scan
l.sort()
l = sorted(l, key=len)
CT_arr = [] # the full 3D CT
for dcm in l:
dcm_path = os.path.join(folder_path, dcm)
dcm_arr = load_img_from_dcm(dcm_path)
CT_arr.append(dcm_arr)
CT_arr = np.asarray(CT_arr)
return CT_arr
# # Bounding boxes around vertebrae
# can be obtained from the segmentations data. First I will visualize the segmentation slices side-by-side with their corresponding CT slices.
patient_id = "1.2.826.0.1.3680043.780"
segm_path = os.path.join(rsna_root, "segmentations", patient_id + ".nii")
segm_arr = nib.load(segm_path).get_fdata()
segm_arr = np.transpose(segm_arr, (2, 0, 1))
CT_path = os.path.join(rsna_root, "train_images", patient_id)
CT_arr = CT_path_to_3D_arr(CT_path)
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
slice_num = 80 # from 0 to len(array)-1
segm_slice = segm_arr[len(segm_arr) - 1 - slice_num]
segm_slice = np.rot90(segm_slice)
CT_slice = CT_arr[slice_num]
ax[0].imshow(CT_slice, cmap=plt.get_cmap("bone"))
ax[1].imshow(segm_slice, cmap=plt.get_cmap("bone"))
# We can draw a bounding box (bbox) around the vertebrae by using the segmentation slice like this:
rows = np.any(segm_slice, axis=1)
cols = np.any(segm_slice, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
width = cmax - cmin
height = rmax - rmin
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.imshow(segm_slice)
rect = Rectangle(
(cmin, rmin), width, height, linewidth=1, edgecolor="r", facecolor="none"
)
ax.add_patch(rect)
# Drawing the bbox on the original CT scan:
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.imshow(CT_slice, cmap=plt.get_cmap("bone"))
rect = Rectangle(
(cmin, rmin), width, height, linewidth=1, edgecolor="r", facecolor="none"
)
ax.add_patch(rect)
print(cmin, rmin, width, height)
# # Save labels (bboxes) in YOLOv5 format to use for training
# I used [Ultralytics YOLOv5](https://docs.ultralytics.com/quick-start/), which I found super easy to use. The specifics are well-documented on their website, but here is the gist:
# - one txt file per image
# - one row per object/bbox
# - each row in 'class_number, xcentre, ycentre, width, height' format (here, we only have one class '0: vertebra')
# - bbox coordinates are normalized, ie divided by width/height in length
# - (0,0) is top-left
def save_yolo_coord(pt_num, slice_num, slice_, destination_folder):
"""
Saves the yolov5 coord txt file for a single slice segmentation mask
pt_num: number that follows 1.2.826.0.1.3680043.
slice_num: which slice
arr: 2D array of slice
destination_folder: where to save the txt files
"""
rows = np.any(slice_, axis=1)
cols = np.any(slice_, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
xcentre = int((cmin + cmax) / 2)
ycentre = int((rmin + rmax) / 2)
width = cmax - cmin
height = rmax - rmin
img_width = slice_.shape[1]
img_height = slice_.shape[0]
# yolo coordiates: class, xcentre, ycentre, width, height (normalized by width/hegith of image)
yolo_coord = [
0,
xcentre / img_width,
ycentre / img_height,
width / img_width,
height / img_height,
]
filename = os.path.join(
destination_folder, str(pt_num) + "_" + str(slice_num) + ".txt"
)
with open(filename, "w") as file:
writer = csv.writer(file, delimiter=" ")
writer.writerow(yolo_coord)
def save_bboxes_from_nii(pt_num, nii_folder, dest_folder_name):
"""
Saves yolov5 coord txt files for one patient's segmentation masks (ie, one nii file)
"""
destination_folder = os.path.join(os.getcwd(), dest_folder_name)
if not os.path.exists(destination_folder):
print(f"Creating destination folder in current directory: {dest_folder_name}")
os.mkdir(destination_folder)
arr = nib.load(
os.path.join(nii_folder, "1.2.826.0.1.3680043." + str(pt_num) + ".nii")
).get_fdata()
arr = np.transpose(arr, (2, 0, 1))
arr = np.flip(arr, axis=0)
for slice_num, slice_ in enumerate(arr):
if not slice_.any():
continue
slice_ = np.rot90(slice_)
save_yolo_coord(pt_num, slice_num, slice_, destination_folder)
# List of patient numbers in segmentations folder
niis = os.listdir(
"/kaggle/input/rsna-2022-cervical-spine-fracture-detection/segmentations"
)
pts = [
re.search("(?<=1.2.826.0.1.3680043.)([0-9]*)(?=.nii)", filename).group(0)
for filename in niis
]
pts = [int(s) for s in pts]
print("List of pts with segmentations: ", pts)
dest_folder_name = "yolo_coords"
nii_folder = "/kaggle/input/rsna-2022-cervical-spine-fracture-detection/segmentations"
for pt_num in pts:
save_bboxes_from_nii(pt_num, nii_folder, dest_folder_name)
# Test to see if saved coords are correct
l = os.listdir("/kaggle/working/yolo_coords")
l = sorted(l)
l.sort(key=len)
f = np.random.choice(l)
print(f)
pt_num = re.search("^([0-9]*)(?=_)", f).group(0)
slice_num = re.search("(?<=_)([0-9]*)(?=.txt)", f).group(0)
CT_path = os.path.join(rsna_root, "train_images", "1.2.826.0.1.3680043." + pt_num)
CT_arr = CT_path_to_3D_arr(CT_path)
CT_slice = CT_arr[int(slice_num)]
img_width = CT_slice.shape[1]
img_height = CT_slice.shape[0]
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.imshow(CT_slice, cmap=plt.get_cmap("bone"))
p = "/kaggle/working/yolo_coords/" + f
with open(p, "r") as txt_file:
reader = csv.reader(txt_file)
row = next(reader)
row = [float(num) for num in row[0].split()]
bbox_xcentre = img_width * row[1]
bbox_ycentre = img_height * row[2]
bbox_width = img_width * row[3]
bbox_height = img_height * row[4]
rect = Rectangle(
(bbox_xcentre - int(bbox_width / 2), bbox_ycentre - int(bbox_height / 2)),
bbox_width,
bbox_height,
linewidth=1,
edgecolor="r",
facecolor="none",
)
ax.add_patch(rect)
# # Save Images for YOLOv5 training
# I found it easier to save the CT slices as jpegs for training Ultralytics YOLO.
# yolo coord txt files for all patients
txt_files = os.listdir("/kaggle/working/yolo_coords")
# patients with segmentation data
niis = os.listdir(
"/kaggle/input/rsna-2022-cervical-spine-fracture-detection/segmentations"
)
pts = [
re.search("(?<=1.2.826.0.1.3680043.)([0-9]*)(?=.nii)", filename).group(0)
for filename in niis
]
pts = [int(s) for s in pts]
# Save slices corresponding to each yolo_coord txt file
train_images = "/kaggle/input/rsna-2022-cervical-spine-fracture-detection/train_images"
yolo_slices = os.path.join(os.getcwd(), "yolo_slices")
if not os.path.exists(yolo_slices):
os.mkdir(yolo_slices)
for pt in pts:
slice_nums = []
for txt_file in txt_files:
if re.search("^([0-9]*)(?=_)", txt_file).group(0) == str(
pt
): # txt files for pt
slice_num = int(re.search("(?<=_)([0-9]*)(?=.txt)", txt_file).group(0))
slice_nums.append(slice_num)
pt_CT = os.path.join(train_images, f"1.2.826.0.1.3680043.{str(pt)}")
pt_slices_all = os.listdir(pt_CT) # list of dcms
pt_slices_all = sorted(pt_slices_all)
pt_slices_all.sort(key=len)
pt_slices = [pt_slices_all[slice_num] for slice_num in sorted(slice_nums)]
if min(pt_slices_all, key=len) == "2.dcm": # scans that are missing 1.dcm
for slice_ in pt_slices:
img_path = os.path.join(
train_images, "1.2.826.0.1.3680043." + str(pt), slice_
)
img = load_img_from_dcm(img_path)
imgs_savepath = os.path.join(
yolo_slices, f"{str(pt)}_{int(slice_[:-4])-2}.jpg"
)
cv2.imwrite(imgs_savepath, img)
else: # normal scans that start from 1.dcm
for slice_ in pt_slices:
img_path = os.path.join(
train_images, "1.2.826.0.1.3680043." + str(pt), slice_
)
img = load_img_from_dcm(img_path)
imgs_savepath = os.path.join(
yolo_slices, f"{str(pt)}_{int(slice_[:-4])-1}.jpg"
)
cv2.imwrite(imgs_savepath, img)
# # Organize images and labels into directories
# Split into train and valid set
all_images = os.listdir("/kaggle/working/yolo_slices")
all_images = [f[:-4] for f in all_images]
random.shuffle(all_images)
SPLIT_POINT = int(len(all_images) * 0.9)
train_set = all_images[:SPLIT_POINT]
valid_set = all_images[SPLIT_POINT:]
# Put each image/label into right directory
folders = ["train/images", "train/labels", "valid/images", "valid/labels"]
for folder in folders:
if not os.path.exists(folder):
os.makedirs(folder)
for idx in train_set:
jpg_file = f"/kaggle/working/yolo_slices/{idx}.jpg"
txt_file = f"/kaggle/working/yolo_coords/{idx}.txt"
shutil.move(jpg_file, "/kaggle/working/train/images")
shutil.move(txt_file, "/kaggle/working/train/labels")
for idx in valid_set:
jpg_file = f"/kaggle/working/yolo_slices/{idx}.jpg"
txt_file = f"/kaggle/working/yolo_coords/{idx}.txt"
shutil.move(jpg_file, "/kaggle/working/valid/images")
shutil.move(txt_file, "/kaggle/working/valid/labels")
|
# !pip install scipy
# !pip install plotly
# !pip install statsmodels
# !pip install scikit-learn
# !pip install matplotlib
import pandas as pd
from datetime import datetime
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import matplotlib.pyplot as plt
import plotly.figure_factory as ff
import numpy as np
from sklearn import linear_model
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARIMA
from sklearn.preprocessing import PolynomialFeatures
import math
import os
for dirpath, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirpath, filename))
DATEPARSER = lambda x: datetime.strptime(x, "%d/%m/%Y")
def get_cryptcncy(ticker="BTC", start_date=None):
# problem: how to compare a datetimeindex with a datetime
if ticker == "BTC":
df_crptcncy = pd.read_csv(
"/kaggle/input/cryptocurrency/{}.csv".format(ticker),
parse_dates=["Date"],
date_parser=DATEPARSER,
)
else:
df_crptcncy = pd.read_csv(
"/kaggle/input/cryptocurrency/{}.csv".format(ticker), parse_dates=["Date"]
).iloc[::-1]
if start_date is not None:
start_date = datetime.strptime(start_date, "%Y-%m-%d")
df_crptcncy = df_crptcncy.loc[df_crptcncy["Date"] >= start_date]
df_crptcncy.set_index("Date", inplace=True)
df_crptcncy.index.freq = "-1D"
return df_crptcncy
def get_data(ticker="BTC", diff=None, shift=None, start_date=None):
df_cryptcncy = get_cryptcncy("BTC", start_date=start_date)
# external determinants
df_gold = pd.read_csv(
"/kaggle/input/external/gold.csv", parse_dates=["Date"], index_col="Date"
)
df_tre = pd.read_csv(
"/kaggle/input/external/treasury_bill.csv",
parse_dates=["Date"],
index_col="Date",
)
df_oil = pd.read_csv(
"/kaggle/input/external/crude_oil.csv", parse_dates=["Date"], index_col="Date"
).iloc[::-1]
df_inf = pd.read_csv(
"/kaggle/input/external/inflation_rate.csv",
parse_dates=["Date"],
index_col="Date",
)
df_ue = pd.read_csv(
"/kaggle/input/external/USD_EUR.csv", parse_dates=["Date"], index_col="Date"
)
df_uj = pd.read_csv(
"/kaggle/input/external/USD_JPY.csv", parse_dates=["Date"], index_col="Date"
)
df_uc = pd.read_csv(
"/kaggle/input/external/USD_CNY.csv", parse_dates=["Date"], index_col="Date"
)
# construct a df contains crypto and its factors
if diff is None:
df_ex = df_cryptcncy["Close"]
columns = [ticker]
else:
df_ex = df_cryptcncy["Close"].diff(diff)
columns = [ticker + "_diff {}".format(diff)]
df_ex = pd.concat(
[
df_ex,
df_gold["USD (PM)"],
df_oil["value"],
df_tre["1 MO"],
df_ue["close"],
df_uj["close"],
df_uc["close"],
],
axis=1,
).reindex(df_ex.index)
columns.extend(["Gold", "Crude Oil", "T-bill", "USD/EUR", "USD/JPY", "USD/CNY"])
df_ex.columns = columns
df_ex = df_ex.iloc[::-1]
df_ex.dropna(axis=0, how="any", inplace=True)
# df_ex.fillna(0, inplace=True)
# internal determinants
df_vl = pd.read_csv(
"/kaggle/input/internal/volume.csv", parse_dates=["Date"], index_col="Date"
)
df_bs = pd.read_csv(
"/kaggle/input/internal/block_speed.csv", parse_dates=["Date"], index_col="Date"
)
df_dff = pd.read_csv(
"/kaggle/input/internal/difficulty.csv", parse_dates=["Date"], index_col="Date"
)
df_fee = pd.read_csv(
"/kaggle/input/internal/fees.csv", parse_dates=["Date"], index_col="Date"
)
df_hr = pd.read_csv(
"/kaggle/input/internal/hash_rate.csv", parse_dates=["Date"], index_col="Date"
)
df_supply = pd.read_csv(
"/kaggle/input/internal/supply.csv", parse_dates=["Date"], index_col="Date"
)
# construct a df contains crypto and its factors
if diff is None:
df_in = df_cryptcncy["Close"]
columns = [ticker]
else:
df_in = df_cryptcncy["Close"].diff(diff)
columns = [ticker + "_diff {}".format(diff)]
df_in = pd.concat(
[
df_in,
df_vl["volume"],
df_bs["Block Speed"],
df_dff["Difficulty"],
df_fee["Average"],
df_fee["Fees Per Block"],
df_hr["Hash Rate"],
df_supply["Total Supply"],
],
axis=1,
).reindex(df_ex.index)
columns.extend(
[
"Volume",
"Block Speed",
"Difficulty",
"Average",
"Fees Per Block",
"Hash Rate",
"Total Supply",
]
)
df_in.columns = columns
df_in.dropna(axis=0, how="any", inplace=True)
# cryptocurrency makret
columns = ["BTC", "ADA", "BCH", "DASH", "EOS", "ETH", "LTC", "IOTA", "XMR", "XRP"]
columns.remove(ticker)
crypto_list = []
crypto_list.append(df_cryptcncy["Close"])
for t in columns:
crypto_list.append(get_cryptcncy(t)["Close"])
df_mkt = pd.concat(crypto_list, axis=1).reindex(df_cryptcncy["Close"].index)
columns.insert(0, ticker)
df_mkt.columns = columns
df_mkt.dropna(axis=0, how="any", inplace=True)
df_mkt["CMI10"] = (
0.25 * df_mkt["BTC"]
+ 0.25 * df_mkt["ETH"]
+ 0.1788 * df_mkt["XRP"]
+ 0.1118 * df_mkt["BCH"]
+ 0.0667 * df_mkt["EOS"]
+ 0.0457 * df_mkt["LTC"]
+ 0.0266 * df_mkt["XMR"]
+ 0.0254 * df_mkt["ADA"]
+ 0.0220 * df_mkt["IOTA"]
+ 0.0229 * df_mkt["DASH"]
)
df_event = pd.read_csv(
"/kaggle/input/events/events.csv",
parse_dates=["Date"],
date_parser=DATEPARSER,
sep=";",
)
return df_cryptcncy, df_ex, df_in, df_mkt, df_event
def corr_cof(df, transform=None):
if transform == "log":
df = np.log(df)
df_corr = df.corr(method="pearson")
z_text = np.around(df_corr.values, decimals=3)
fig = ff.create_annotated_heatmap(
z=df_corr.values,
x=list(df_corr.columns.values),
y=list(df_corr.columns.values),
annotation_text=z_text,
showscale=True,
)
fig.show()
columns = df.columns
rows_num = math.ceil(len(columns) / 2)
fig = make_subplots(rows=rows_num, cols=2, subplot_titles=columns)
for index, value in enumerate(columns):
if index == 0:
fig.add_trace(
go.Scatter(y=df[value], x=df.index, mode="markers", name=columns[0]),
row=1,
col=1,
)
else:
fig.add_trace(
go.Scatter(y=df[columns[0]], x=df[value], mode="markers", name=value),
row=int(index / 2 + 1),
col=int(index % 2 + 1),
)
fig.show()
def linear_analysis(df, determinants=[], transform=None, degree=None):
if transform == "polynomial" and degree is not None:
"You should set a degree for your polynomial regression"
X = df[determinants]
Y = df[df.columns[0]]
if transform == "log":
X = np.log(X)
Y = np.log(Y)
elif transform == "square":
X = np.square(X)
Y = np.square(Y)
elif transform == "polynomial":
polynomial_features = PolynomialFeatures(degree=degree)
X = polynomial_features.fit_transform(X)
regr = linear_model.LinearRegression()
regr.fit(X, Y)
print("Intercept: \n", regr.intercept_)
print("Coefficients: \n", regr.coef_)
X = sm.add_constant(X) # adding a constant
model = sm.OLS(Y, X).fit()
print(model.summary())
def differential_transform(timeseries, diff):
timeseries_diff = timeseries.diff(periods=diff)
timeseries_diff.fillna(0, inplace=True)
return timeseries_diff
def unit_root_test(timeseries, method="ADF", diff=None, name=None):
print("Name: {0}, Unit root test, Method:{1}, diff={2}".format(name, method, diff))
if diff is not None:
timeseries = differential_transform(timeseries, diff)
if method == "ADF":
timeseries_adf = adfuller(timeseries)
print("ADF Statistic: %f" % timeseries_adf[0])
print("p-value: %f" % timeseries_adf[1])
print("Critical Values:")
for key, value in timeseries_adf[4].items():
print("\t%s: %.3f" % (key, value))
def ACF_PFC(timeseries, lags):
fig = plt.figure()
ax1 = fig.add_subplot(211)
sm.graphics.tsa.plot_acf(timeseries, lags=lags, ax=ax1)
ax2 = fig.add_subplot(212)
sm.graphics.tsa.plot_pacf(timeseries, lags=lags, ax=ax2)
plt.show()
def decomposing(timeseries):
decomposition = seasonal_decompose(timeseries)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
plt.figure()
plt.subplot(411)
plt.plot(timeseries, label="Original")
plt.legend(loc="best")
plt.subplot(412)
plt.plot(trend, label="Trend")
plt.legend(loc="best")
plt.subplot(413)
plt.plot(seasonal, label="Seasonarity")
plt.legend(loc="best")
plt.subplot(414)
plt.plot(residual, label="Residual")
plt.legend(loc="best")
plt.show()
# problem: when to dropna and when to fillna
trend = trend.fillna(0)
seasonal = seasonal.fillna(0)
residual = residual.fillna(0)
# trend.dropna(inplace=True)
# seasonal.dropna(inplace=True)
# residual.dropna(inplace=True)
return timeseries, trend, seasonal, residual
def AIC_BIC(timeseries):
trend_evaluate = sm.tsa.arma_order_select_ic(
timeseries, ic=["aic", "bic"], trend="nc", max_ar=4, max_ma=4
)
print("trend AIC", trend_evaluate.aic_min_order)
print("trend BIC", trend_evaluate.bic_min_order)
def ARIMA_Model(df_close, order):
# check stationary
unit_root_test(df_close, diff=1)
# ACF and PACF
df_close_diff = differential_transform(df_close, diff=1)
ACF_PFC(df_close_diff, lags=20)
# decomposing
original, trend, seasonal, residual = decomposing(df_close)
unit_root_test(trend, diff=1, name="trend")
unit_root_test(residual, name="residual")
trend_diff = differential_transform(trend, diff=1)
ACF_PFC(trend_diff, lags=20)
ACF_PFC(residual, lags=20)
AIC_BIC(trend_diff)
AIC_BIC(residual)
trend_model = ARIMA(trend, order=(1, 1, 1))
residual_model = ARIMA(residual, (0, 0, 4))
trend_model.fit(disp=0)
residual_model.fit(disp=0)
print(trend_model.summary())
print(residual_model.summary())
return
def draw_candlestick(df_crycency, events=None):
fig = go.Figure()
df_crycency = df_crycency.reset_index()
fig.add_trace(
go.Candlestick(
x=df_crycency["Date"],
open=df_crycency["Open"],
high=df_crycency["High"],
low=df_crycency["Low"],
close=df_crycency["Close"],
# increasing=dict(line=dict(color='#17BECF')),
# decreasing=dict(line=dict(color='#7F7F7F'))
)
)
for event in events.values:
date = event[0]
content = event[1]
fig.add_annotation(
x=date,
y=df_crycency[df_crycency["Date"] == date]["Close"].values[0],
text=content,
arrowhead=3,
)
fig.update_layout(height=700, showlegend=False)
fig.show()
ticker = "BTC"
df_cryptcncy, df_ex, df_in, df_mkt, df_event = get_data("BTC", start_date="2017-05-01")
# external-factors
df_ex_deter = ["Gold", "Crude Oil", "USD/CNY"]
corr_cof(df_ex)
linear_analysis(df_ex, df_ex_deter, "log")
# internal-factors
# ["Block Speed", "Difficulty", "Average", "Fees Per Block", "Hash Rate", "Total Supply"]
df_in_deter = ["Block Speed", "Fees Per Block", "Volume"]
corr_cof(df_in)
linear_analysis(df_in, df_in_deter)
# crypto market factors ["BTC", "ADA", "BCH", "DASH", "EOS", "ETH", "LTC", "IOTA", "XMR", "XRP"]
corr_cof(df_mkt, "log")
df_mkt_deter = ["ETH", "XRP", "CMI10"]
linear_analysis(df_mkt, df_mkt_deter, "log")
# autocorrelation analysis
unit_root_test(df_cryptcncy["Close"], name="price")
unit_root_test(df_cryptcncy["Close"], name="price_diff", diff=1)
df_diff = differential_transform(df_cryptcncy["Close"], diff=1)
ACF_PFC(df_diff, lags=100)
# event
draw_candlestick(df_cryptcncy, events=df_event)
|
# ## Generating the training Data
# ### Loading the pictures
# Function to manage the input of Data
import glob
def data_path_from_name(name, all_names=False):
L = glob.glob(f"../**/{name}", recursive=True)
if len(L) > 1:
print(f"All path for {name} :")
print(L)
if all_names:
return L
print(f"Data path return {L[0]}")
return L[0]
from PIL import Image
picture_ims = [
Image.open(path) for path in data_path_from_name("Tile*.png", all_names=True)
]
tile_ims = [
Image.open(path) for path in data_path_from_name("Basic*.png", all_names=True)
]
from IPython.display import display
# Display the example of picture we want to shatter in tiles
def c_display(im, message="Size :"):
print(
"--------",
)
display(im)
print("________", message, im.size, end="\n\n")
for im in picture_ims:
c_display(im, message="Size of picture :")
# Display the types of tiles used for shattering the picture
for im in tile_ims:
c_display(im, message="Size of tile :")
# ### Shattering the pictures into 10x10 aeras
def shatter(im=picture_ims[0]):
l = []
for i in range(10):
for j in range(10):
l.append(im.crop((j * 10, i * 10, (j + 1) * 10, (i + 1) * 10)))
return l
i = 0
for im in shatter():
c_display(im, message="Size of area:")
i += 1
if i >= 3:
break
# ### Set of tile & their rotations
rotations = [0, 90, 180, 270]
def set_tiles(imgs=tile_ims):
l = []
for tile in imgs:
for rotation in rotations:
l.append(tile.rotate(angle=rotation))
return l
for im in set_tiles():
display(im)
# ### Generators of Data :
import random
def data_gen(
im_data,
max_it=10,
inf_gen=False,
func_encode_input=lambda x: x,
func_encode_output=lambda x: x,
):
n = 0
while True:
if n >= max_it:
return
im = random.choice(im_data)
yield (func_encode_input(im), func_encode_output(im))
n += 1
if inf_gen:
max_it += 1
x, y = None, None
for x, y in data_gen(shatter(picture_ims[0]), max_it=3):
c_display(x)
c_display(y)
print("\n\n\n\n")
import numpy as np
import torch
from_numpy_to_tensor_float = lambda z: torch.tensor(
torch.from_numpy(z), dtype=torch.float
).T
import copy
def from_im_to_array(im):
data = copy.deepcopy(np.asarray(im.convert("L"), dtype="float"))
data = data.reshape((100, 1))
# Encoding data as bipolar input
data[data == 255] = 1 # black as 1
data[data != 1] = -1 # white as -1
return data # Column vector
from functools import partial
# generator that iterate randomly over the areas of the picture
picture_area_gen = partial(
data_gen,
im_data=shatter(picture_ims[0]),
func_encode_input=lambda x: from_numpy_to_tensor_float(from_im_to_array(x)),
func_encode_output=lambda y: from_numpy_to_tensor_float(from_im_to_array(y)),
)
# generator that iterate randomly over the type of tile and their rotations
tile_gen = partial(
data_gen,
im_data=set_tiles(tile_ims),
func_encode_input=lambda x: from_numpy_to_tensor_float(from_im_to_array(x)),
func_encode_output=lambda y: from_numpy_to_tensor_float(from_im_to_array(y)),
)
for x, y in picture_area_gen(max_it=2):
print("x :", x, "\ny :", y, end="\n\n")
# ## Pytorch Neural Network
# Let's plug our problem into a pytorch NN
import torch
import torch.nn as nn
import torch.nn.functional as F
# ### NN architecture
hidden_1 = 20
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(x.size(1), hidden_1)
self.fc2 = nn.Linear(hidden_1, y.size(1))
def forward(self, x):
x = F.sigmoid(self.fc1(x))
x = F.sigmoid(self.fc2(x))
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net, end="\n\n")
for (
param
) in (
net.parameters()
): # net.parameters() Function that is yielding the learnable parameters
print(param.size())
# ### Learning method
class NN_trainer:
total = 0
def _weighted_generator(self, l_gen, l_weight=None, max_it=1000):
"""providing a list of generators it create a balanced or weigted generator of the data labeled with integer"""
l_gen = [gen(inf_gen=True) for gen in l_gen]
n = 0
if not len(l_gen) == 1:
l_weight = [1 for _ in range(len(l_gen))] if l_weight is None else l_weight
norm = lambda l: [e / sum(l) for e in l]
pb = norm(l_weight)
next_value = (
lambda i=0, s=0: (next(l_gen[i]), i)
if np.random.rand() < pb[i] / (1 - s)
else next_value(i + 1, s + pb[i])
)
while True:
if n >= max_it:
return
try:
yield next_value()
except:
# Very low pb (Theorically null probability)
print(
"Check if the max itteration for one gen is lower than max_it"
)
yield next(l_gen[-1]), (len(l_gen) - 1)
n += 1
else:
while True:
if n >= max_it:
return
yield (next(l_gen[0]), 0)
n += 1
def learn(
self,
net,
criterion,
optimizer,
n_epoch=10,
epoch_batch=100,
data_generators=[],
weight_data_gens=None,
):
total_bp = self.total
for e in range(n_epoch):
running_loss = 0
# Build a random itterator over the data
data_epoch = self._weighted_generator(
l_gen=data_generators, l_weight=weight_data_gens, max_it=epoch_batch
)
for (x, y), indice_gen in data_epoch:
# Calculate output
output = net(x)
# zero the parameter's gradient data
optimizer.zero_grad()
# Compute the loss
loss = criterion(output, y)
running_loss += loss.item()
# Backpropagate the error
loss.backward()
optimizer.step()
total_bp += 1
print(
f"epoch : {e} loss {round(running_loss/epoch_batch,3)} total : {total_bp}"
)
self.total = total_bp
net = Net()
trainer = NN_trainer()
import torch.optim as optim
trainer.learn(
net=net,
criterion=nn.MSELoss(reduction="sum"),
optimizer=optim.SGD(net.parameters(), lr=0.001, momentum=0.9),
n_epoch=10,
epoch_batch=1000,
data_generators=[picture_area_gen, tile_gen],
weight_data_gens=None,
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
from IPython.core.debugger import set_trace
# import base packages into the namespace for this program
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import time
import os
import subprocess
import sklearn
assert sklearn.__version__ >= "0.20"
import seaborn as sns
import pandas as pd
# SKlearn
from sklearn.datasets import fetch_openml
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_predict, cross_val_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score, classification_report
from sklearn.model_selection import train_test_split
# seed value for random number generators to obtain reproducible results
RANDOM_SEED = 85
#
# ### Kaggle version with different train and test dataset for submission
# ### the Kaggle train dataset is only 42K long, so the flawed experiment will be eliminated (cannot split 42K to 60K and 10K)
# ###
# Get MNIST Data Set ( https://github.com/ageron/handson-ml/issues/301#issuecomment-448853256 )
# The issue of obtaining MNIST data is solved by following "https://github.com/ageron/handson-ml/issues/143".
# from sklearn.datasets import fetch_openml
# mnist = fetch_openml('mnist_784', version=1, cache=True)
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
os.getcwd()
print(os.getcwd())
# Validate Current Path and create Path to data
from pathlib import Path
INPUT = Path("../input/digit-recognizer")
os.listdir(INPUT)
X_test = pd.read_csv(INPUT / "test.csv")
train = pd.read_csv(INPUT / "train.csv")
X_train = train.drop(["label"], axis="columns", inplace=False)
y_train = train["label"]
print(X_train.shape)
print(y_train.shape)
# rerun the experiment, using 70/30 split and two part PCA, train and test separately
# First Random Forest
# RANDOM FOREST on Dimension Reduced Data (PCA:95% variability)
RF_clf = RandomForestClassifier(
bootstrap=True, n_estimators=10, max_features="sqrt", random_state=RANDOM_SEED
)
start_RF = time.clock()
RF_clf.fit(X_train, y_train)
RF_CrossVal = cross_val_score(RF_clf, X_train, y_train, cv=10, scoring="f1_macro")
print(RF_CrossVal)
y_pred = cross_val_predict(RF_clf, X_train, y_train, cv=10)
print(classification_report(y_train, y_pred))
RF_clf_score = RF_clf.score(X_train, y_train)
print("Accuracy Score for Random Forest: {:.3f}".format(RF_clf_score))
f1score_RF_clf = f1_score(y_train, y_pred, average="macro")
print("F1 Score for Random Forest: {:.3f}".format(f1score_RF_clf))
stop_RF = time.clock()
time_RF = stop_RF - start_RF
print("Start time for Random Forest: {:.3f}".format(start_RF))
print("End_time for Random Forest: {:.3f}".format(stop_RF))
print("Runtime for Random Forest: {:.3f}".format(time_RF))
column_names = ["ImageId", "Label"]
results = pd.DataFrame(columns=column_names)
results["Label"] = pd.Series(RF_clf.predict(X_test))
IDdata = pd.DataFrame(X_test)
results["ImageId"] = X_test.index + 1 # sub = results[['ImageId','Label']]
sub = results[["ImageId", "Label"]]
sub.to_csv("submissionRF_noPCA.csv", index=False)
# Now Random Forest with reduced data
start_RF_reduced = time.clock()
rnd_pca = PCA(n_components=0.95)
X_train_reduced = rnd_pca.fit_transform(X_train)
X_test_reduced = rnd_pca.transform(X_test)
# RANDOM FOREST on Dimension Reduced Data (PCA:95% variability)
RF_clf_reduced = RandomForestClassifier(
bootstrap=True, n_estimators=10, max_features="sqrt", random_state=RANDOM_SEED
)
RF_clf_reduced.fit(X_train_reduced, y_train)
RFReducedCrossVal = cross_val_score(
RF_clf_reduced, X_train_reduced, y_train, cv=10, scoring="f1_macro"
)
print(RFReducedCrossVal)
y_pred_reduced = cross_val_predict(RF_clf_reduced, X_train_reduced, y_train, cv=10)
print(classification_report(y_train, y_pred_reduced))
RF_clf_reduced_score = RF_clf_reduced.score(X_train_reduced, y_train)
print("Accuracy Score for Random Forest Reduced: {:.3f}".format(RF_clf_reduced_score))
f1score_RF_clf_reduced = f1_score(y_train, y_pred_reduced, average="macro")
print("F1 Score for Random Forest Reduced: {:.3f}".format(f1score_RF_clf_reduced))
stop_RF_reduced = time.clock()
time_RF_reduced = stop_RF_reduced - start_RF_reduced
print("Start time for Random Forest PCA Compressed: {:.3f}".format(start_RF_reduced))
print("End_time for Random Forest PCA Compressed: {:.3f}".format(stop_RF_reduced))
print("Runtime for Random Forest PCA Compressed: {:.3f}".format(time_RF_reduced))
column_names = ["ImageId", "Label"]
results = pd.DataFrame(columns=column_names)
results["Label"] = pd.Series(RF_clf_reduced.predict(X_test_reduced))
IDdata = pd.DataFrame(X_test_reduced)
results["ImageId"] = X_test.index + 1 # sub = results[['ImageId','Label']]
sub = results[["ImageId", "Label"]]
sub.to_csv("submissionRF_PCA.csv", index=False)
# Compare the results of the test
print("Compare the time:")
print("Random Forest Time no PCA: {:.3f}".format(time_RF))
print("Random Forest Time including PCA: {:.3f}".format(time_RF_reduced))
print(" ")
print("Compare the accuracy scores:")
print("Random Forest Accuracy Score no PCA: {:.3f}".format(RF_clf_score))
print("Random Forest Accuracy Score with PCA: {:.3f}".format(RF_clf_reduced_score))
print(" ")
print("Compare the F1 scores:")
print("Random Forest F1 Score no PCA: {:.3f}".format(f1score_RF_clf))
print("Random Forest F1 Score with PCA: {:.3f}".format(f1score_RF_clf_reduced))
# Graph the cross val scores
pred = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
plt.figure()
plt.plot(pred, RF_CrossVal, "r", label="RF_CrossVal")
plt.plot(pred, RFReducedCrossVal, "b", label="RF_ReducedCrossVal")
plt.xlabel("Predicted Values")
plt.ylabel("Cross Validation Score")
plt.title("Cross Validation Comparison")
plt.legend(loc="center right")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
train.head() # 12 fields
test.head() # 11 fields
train.shape
test.shape
train.info()
test.info()
train.isnull().sum()
test.isnull().sum()
import matplotlib.pyplot as plt
import seaborn as sns
sns.set() # setting seaborn default for plots
def bar_chart(feature):
survived = train[train["Survived"] == 1][feature].value_counts()
dead = train[train["Survived"] == 0][feature].value_counts()
df = pd.DataFrame([survived, dead])
df.index = ["Survived", "Dead"]
df.plot(kind="bar", stacked=True, figsize=(10, 5))
bar_chart("Sex")
bar_chart("Pclass")
bar_chart("SibSp")
bar_chart("Parch")
bar_chart("Embarked")
# Feature Engineering
# TItanic-Survival-Infographic
train.head(10)
train_test_data = [train, test] # combining train and test dataset
for dataset in train_test_data:
dataset["Title"] = dataset["Name"].str.extract(" ([A-Za-z]+)\.", expand=False)
train["Title"].value_counts()
test["Title"].value_counts()
"""
Title map
Mr : 0
Miss : 1
Mrs: 2
Others: 3"""
title_mapping = {
"Mr": 0,
"Miss": 1,
"Mrs": 2,
"Master": 3,
"Dr": 3,
"Rev": 3,
"Col": 3,
"Major": 3,
"Mlle": 3,
"Countess": 3,
"Ms": 3,
"Lady": 3,
"Jonkheer": 3,
"Don": 3,
"Dona": 3,
"Mme": 3,
"Capt": 3,
"Sir": 3,
}
for dataset in train_test_data:
dataset["Title"] = dataset["Title"].map(title_mapping)
train.head()
test.head()
bar_chart("Title")
# delete unnecessary feature from dataset
train.drop("Name", axis=1, inplace=True)
test.drop("Name", axis=1, inplace=True)
train.head()
test.head()
"""male: 0
female: 1"""
sex_mapping = {"male": 0, "female": 1}
for dataset in train_test_data:
dataset["Sex"] = dataset["Sex"].map(sex_mapping)
bar_chart("Sex")
"""some age is missing
Let's use Title's median age for missing Age
"""
train.head(100)
# fill missing age with median age for each title (Mr, Mrs, Miss, Others)
train["Age"].fillna(train.groupby("Title")["Age"].transform("median"), inplace=True)
test["Age"].fillna(test.groupby("Title")["Age"].transform("median"), inplace=True)
train.head(30)
train.groupby("Title")["Age"].transform("median")
facet = sns.FacetGrid(train, hue="Survived", aspect=4)
facet.map(sns.kdeplot, "Age", shade=True)
facet.set(xlim=(0, train["Age"].max()))
facet.add_legend()
plt.show()
facet = sns.FacetGrid(train, hue="Survived", aspect=4)
facet.map(sns.kdeplot, "Age", shade=True)
facet.set(xlim=(0, train["Age"].max()))
facet.add_legend()
plt.xlim(0, 20)
facet = sns.FacetGrid(train, hue="Survived", aspect=4)
facet.map(sns.kdeplot, "Age", shade=True)
facet.set(xlim=(0, train["Age"].max()))
facet.add_legend()
plt.xlim(20, 30)
facet = sns.FacetGrid(train, hue="Survived", aspect=4)
facet.map(sns.kdeplot, "Age", shade=True)
facet.set(xlim=(0, train["Age"].max()))
facet.add_legend()
plt.xlim(30, 40)
facet = sns.FacetGrid(train, hue="Survived", aspect=4)
facet.map(sns.kdeplot, "Age", shade=True)
facet.set(xlim=(0, train["Age"].max()))
facet.add_legend()
plt.xlim(40, 60)
facet = sns.FacetGrid(train, hue="Survived", aspect=4)
facet.map(sns.kdeplot, "Age", shade=True)
facet.set(xlim=(0, train["Age"].max()))
facet.add_legend()
plt.xlim(40, 60)
facet = sns.FacetGrid(train, hue="Survived", aspect=4)
facet.map(sns.kdeplot, "Age", shade=True)
facet.set(xlim=(0, train["Age"].max()))
facet.add_legend()
plt.xlim(60)
train.info()
test.info()
"""Binning
Binning/Converting Numerical Age to Categorical Variable
feature vector map:
child: 0
young: 1
adult: 2
mid-age: 3
senior: 4
"""
for dataset in train_test_data:
dataset.loc[dataset["Age"] <= 16, "Age"] = 0
dataset.loc[(dataset["Age"] > 16) & (dataset["Age"] <= 26), "Age"] = 1
dataset.loc[(dataset["Age"] > 26) & (dataset["Age"] <= 36), "Age"] = 2
dataset.loc[(dataset["Age"] > 36) & (dataset["Age"] <= 62), "Age"] = 3
dataset.loc[dataset["Age"] > 62, "Age"] = 4
train.head(10)
bar_chart("Age")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import xgboost as xgb
from sklearn.metrics import mean_squared_error as mse
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# /kaggle/input/new-york-city-taxi-fare-prediction/sample_submission.csv
# /kaggle/input/new-york-city-taxi-fare-prediction/GCP-Coupons-Instructions.rtf
# /kaggle/input/new-york-city-taxi-fare-prediction/train.csv
# /kaggle/input/new-york-city-taxi-fare-prediction/test.csv
train_df = pd.read_csv("../input/date-timw/train.csv", nrows=10_000_000)
test_df = pd.read_csv("../input/nyctaxis2020/test.csv")
train_df.drop("key", axis=1, inplace=True)
train_df.head()
test_df.head()
test_df.describe()
train_df.dtypes
train_df.isna().sum()
pd.set_option("display.float_format", lambda x: "%.5f" % x)
train_df.describe()
(train_df["fare_amount"] < 0).sum()
((train_df["pickup_longitude"] < -180) | (train_df["pickup_longitude"] > 180)).sum()
((train_df["pickup_latitude"] < -90) | (train_df["pickup_latitude"] > 90)).sum()
((train_df["passenger_count"] == 0) | (train_df["passenger_count"] > 6)).sum()
(train_df["passenger_count"] > 6).sum()
print("Train Passenger count equals 0: ", (train_df["passenger_count"] == 0).sum())
print("Test Passenger count equals 0: ", (test_df["passenger_count"] == 0).sum())
plt.figure(figsize=(10, 6))
sns.histplot(train_df["fare_amount"])
plt.title("Distribution of Fare Amount")
def clean_df(df):
new_df = df[
((df["fare_amount"] > 0) & (df["fare_amount"] <= 200))
& ((df["pickup_longitude"] > -75) & (df["pickup_longitude"] < -73))
& ((df["pickup_latitude"] > 40) & (df["pickup_latitude"] < 42))
& ((df["dropoff_longitude"] > -75) & (df["dropoff_longitude"] < -73))
& ((df["dropoff_latitude"] > 40 & (df["dropoff_latitude"] < 42)))
& ((df["passenger_count"] > 0) & (df["passenger_count"] <= 6))
]
return new_df
plt.figure(figsize=(12, 8))
sns.scatterplot(x=train_df["pickup_longitude"], y=train_df["pickup_latitude"])
print("Before:", len(train_df))
train_df = clean_df(train_df)
print("After:", len(train_df))
plt.figure(figsize=(12, 8))
sns.scatterplot(x=train_df["pickup_longitude"], y=train_df["pickup_latitude"])
plt.figure(figsize=(10, 6))
sns.scatterplot(x=train_df["passenger_count"], y=train_df["fare_amount"])
plt.xlabel("Number of Passengers")
plt.ylabel("Fare Amount")
train_df.describe()
def manhattan_dist(lat_p, long_p, lat_d, long_d):
distance = np.abs(lat_d - lat_p) + np.abs(long_d - long_p)
return distance
def add_datetime_info(df, transform_datetime=False):
if transform_datetime:
df["pickup_datetime"] = pd.to_datetime(
df["pickup_datetime"], format="%Y-%m-%d %H:%M:%S UTC"
)
df["hour"] = df["pickup_datetime"].dt.hour
df["day"] = df["pickup_datetime"].dt.day
df["month"] = df["pickup_datetime"].dt.month
df["year"] = df["pickup_datetime"].dt.year
# df['weekday'] = df['pickup_datetime'].dt.weekday # removing this since it's the least important feature
df.drop("pickup_datetime", axis=1, inplace=True)
def add_airport_info(df):
# nyc = (40.7141667, -74.0063889)
# jfk = (-73.7822222222, 40.6441666667)
# ewr = (-74.175, 40.69)
# lgr = (-73.87, 40.77)
nyc = (-74.0063889, 40.7141667)
jfk = (40.6441666667, -73.7822222222)
ewr = (40.69, -74.175)
lgr = (40.77, -73.87)
df["distance_to_center"] = manhattan_dist(
nyc[0], nyc[1], df["pickup_latitude"], df["pickup_longitude"]
)
df["pickup_distance_to_jfk"] = manhattan_dist(
jfk[0], jfk[1], df["pickup_latitude"], df["pickup_longitude"]
)
df["dropoff_distance_to_jfk"] = manhattan_dist(
jfk[0], jfk[1], df["dropoff_latitude"], df["dropoff_longitude"]
)
df["pickup_distance_to_ewr"] = manhattan_dist(
ewr[0], ewr[1], df["pickup_latitude"], df["pickup_longitude"]
)
df["dropoff_distance_to_ewr"] = manhattan_dist(
ewr[0], ewr[1], df["dropoff_latitude"], df["dropoff_longitude"]
)
df["pickup_distance_to_lgr"] = manhattan_dist(
lgr[0], lgr[1], df["pickup_latitude"], df["pickup_longitude"]
)
df["dropoff_distance_to_lgr"] = manhattan_dist(
lgr[0], lgr[1], df["dropoff_latitude"], df["dropoff_longitude"]
)
df["long_diff"] = df.dropoff_longitude - df.pickup_longitude
df["lat_diff"] = df.dropoff_latitude - df.pickup_latitude
def transform(df, transform_datetime):
add_datetime_info(df, transform_datetime)
add_airport_info(df)
df["manhattan_dist"] = manhattan_dist(
df["pickup_latitude"],
df["pickup_longitude"],
df["dropoff_latitude"],
df["dropoff_longitude"],
)
return df
# train_df['pickup_datetime'] = pd.to_datetime(train_df['pickup_datetime'], format="%Y-%m-%d %H:%M:%S UTC")
train_df = transform(train_df, transform_datetime=True)
train_df.head()
def visualize_date_fare(df):
# date_objects = ['hour', 'day', 'weekday', 'month', 'year']
date_objects = ["hour", "day", "month", "year"]
for idx, obj in enumerate(date_objects):
# print("IDX", idx)
# print("OBJ", obj)
plt.figure(figsize=(10, 6))
sns.barplot(
x=df[obj], y=df["fare_amount"], ci=None
) # setting ci=None dramatically decreases the time for plotting
print(plt.get_cmap())
plt.title("Average Fare Amount by " + obj)
plt.ylabel("Fare Amount")
plt.show()
visualize_date_fare(train_df)
def visualize_date_counts(df):
# date_objects = ['hour', 'day', 'weekday', 'month', 'year']
date_objects = ["hour", "day", "month", "year"]
# fig, axes = plt.subplots(1, 5)
for obj in date_objects:
plt.figure(figsize=(10, 6))
sns.countplot(x=df[obj])
plt.ylabel("Count")
plt.title("Taxi Rides Count by " + obj)
plt.show()
visualize_date_counts(train_df)
train_df.head()
X = train_df.drop(["fare_amount"], axis=1)
y = train_df["fare_amount"]
X_train, X_val, y_train, y_val = train_test_split(X, y, random_state=42, test_size=0.05)
X_train.head()
del X
del y
def XGBoost(X_train, X_test, y_train, y_test):
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
return xgb.train(
params={
"objective": "reg:linear",
"eval_metric": "rmse",
"max_depth": 7,
"colsample_bytree": 0.9,
"gamma": 1,
},
dtrain=dtrain,
num_boost_round=400,
early_stopping_rounds=30,
evals=[(dtest, "test")],
)
xgb_model = XGBoost(X_train, X_val, y_train, y_val)
y_train_pred = xgb_model.predict(
xgb.DMatrix(X_train), ntree_limit=xgb_model.best_iteration
)
y_val_pred = xgb_model.predict(xgb.DMatrix(X_val), ntree_limit=xgb_model.best_iteration)
print("Train set error: ", np.sqrt(mse(y_train, y_train_pred)))
print("Validation set error: ", np.sqrt(mse(y_val, y_val_pred)))
# Read and preprocess test set
test_df = pd.read_csv("../input/nyctaxis2020/test.csv")
# test_df['pickup_datetime'] = pd.to_datetime(test_df['pickup_datetime'], format="%Y-%m-%d %H:%M:%S UTC")
test_df = transform(test_df, transform_datetime=True)
# test_df['manhattan_dist'] = manhattan_dist(test_df['pickup_latitude'], test_df['pickup_longitude'],
# test_df['dropoff_latitude'] , test_df['dropoff_longitude'])
test_key = test_df["key"]
# x_pred = test_df.drop(columns=['key', 'passenger_count'])
x_pred = test_df.drop(columns=["key"])
# Predict from test set
prediction = xgb_model.predict(xgb.DMatrix(x_pred))
test_df.head()
train_df.head()
len(x_pred)
prediction = prediction.round(2)
submission = pd.DataFrame({"key": test_key, "fare_amount": prediction})
# submission.to_csv('/kaggle/working/XGBSubmission.csv',index=False)
submission.to_csv("submission.csv", index=False)
pd.read_csv("submission.csv").head()
print("Plotting Feature Importance")
fig, ax = plt.subplots(figsize=(12, 16))
xgb.plot_importance(xgb_model, height=0.7, ax=ax)
ax.grid(False)
plt.title("XGBoost - Feature Importance", fontsize=14)
plt.show()
|
# Importing this libraries (modules) will make it possible to use certain packages (more like a formula)
import numpy as np # This is used used to perform a wide variety of mathematical operations on arrays
import pandas as pd # used for data science/data analysis and machine learning tasks.
import matplotlib.pyplot as plt # Matplotlib is a comprehensive library for creating static, animated, and interactive visualizations in Python
from sklearn import preprocessing
from xgboost import XGBRegressor
import sklearn.metrics as metrics
import math
from scipy.special import boxcox1p
from scipy.stats import boxcox_normmax
from scipy.stats import norm, skew
import seaborn as sns
import warnings
from scipy import stats
warnings.filterwarnings("ignore")
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import ElasticNetCV, LassoCV, RidgeCV
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import mean_squared_error
from mlxtend.regressor import StackingCVRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from datetime import datetime
from scipy.stats import skew # for some statistics
# **Gathering Data From the Source**
# I want it to be that once I display the data it should showm be 10 rows and 20 columns
pd.options.display.max_rows = 1000
pd.options.display.max_columns = 1000
train = pd.read_csv(
"../input/regression-technique-eda/House Price Regression Tecnique/train.csv"
)
test = pd.read_csv(
"../input/regression-technique-eda/House Price Regression Tecnique/test.csv"
)
# #**Carrying out EDA to proper understand and clean the data**#
# Checking the size of test file and train file
train.shape, test.shape
# From the above output it interpretes that the exist 1,460 rows and 81 column in train file, while test file has 1,459 rows and 80 columns
#
train.head()
# The reason I made it 30 rows is so that I can observer variety of data that exist in each column(Variable)
train.info()
"""This gives some insight about the data e.g Class, numbers of rows and columns, number of non-null rows in each column(Variable),
the array datatype(Dtype) and the memory size"""
# Descrption of the the data
train.describe()
# Since the data is wide, So let's see it transposed
display(train.describe().transpose())
train["SalePrice"].describe()
# This gives insight that the 'SalePrice' variable does not have zero which makes it fit for modelling
"""There are several ways to perform exploratory data analysis on data but for this particular data
I want to distinguish it into Qualitative and Quantitative data"""
quantitative = [f for f in train.columns if train.dtypes[f] != "object"]
quantitative.remove("SalePrice")
quantitative.remove("Id")
qualitative = [f for f in train.columns if train.dtypes[f] == "object"]
# Note that I removed salesprice and ID ; this is cause 'Id' is not useful (not Qualitative and quantitative) and 'Salesprice' is the target variable
# histogram
sns.distplot(train["SalePrice"])
# Trying to explore our target variable I noticed the graph deviate from normal distribution (the column is positively skewed) and showed peakedness
# checking the value for skewness and kurtosis
print("Skewness: %f" % train["SalePrice"].skew())
print("Kurtosis: %f" % train["SalePrice"].kurt())
# Since skewness > 1 then it is positive skewness
# and Kurtosis > 3 shows excess peakedness.
# **Using Correlation analysis to get the best features**
# Display a Heapmap to easily identify the best correlations
corrmat = train.corr()
f, ax = plt.subplots(figsize=(12, 9))
sns.heatmap(corrmat, vmax=0.8, square=True)
corr = train.corr()
highest_corr_features = corr.index[abs(corr["SalePrice"]) > 0.5]
plt.figure(figsize=(10, 10))
g = sns.heatmap(train[highest_corr_features].corr(), annot=True, cmap="RdYlGn")
# Note that there are cretain viraibles with the same correlation score; e.g TotalBsmtSF and 1stFlrSF, GrLivArea and TotRmsAbvGrd, GarageArea and GarageCars.
# In conclusion we can choose 1stFlrSF (has the same correlation with 1stFlrSF ), GrLivArea (More correlated to 'SalePrice') a
# **Correlation Analysis**
# Trying to check which features best correlate with 'SalePrice'
# Let's check which features are the most corelated
train.corr()["SalePrice"].sort_values(ascending=False)[1:]
import scipy.stats as st
y = train["SalePrice"]
plt.figure(1)
plt.title("Johnson SU")
sns.distplot(y, kde=False, fit=st.johnsonsu)
plt.figure(2)
plt.title("Normal")
sns.distplot(y, kde=False, fit=st.norm)
plt.figure(3)
plt.title("Log Normal")
sns.distplot(y, kde=False, fit=st.lognorm)
# It is apparent that SalePrice doesn't follow normal distribution, so before performing regression it has to be transformed. While log transformation does pretty good job, best fit is unbounded Johnson distribution.
f = pd.melt(train, value_vars=quantitative)
g = sns.FacetGrid(f, col="variable", col_wrap=4, sharex=False, sharey=False)
g = g.map(sns.distplot, "value")
# **Categorical Data**
for c in qualitative:
train[c] = train[c].astype("category")
if rain[c].isnull().any():
train[c] = train[c].cat.add_categories(["MISSING"])
train[c] = train[c].fillna("MISSING")
def boxplot(x, y, **kwargs):
sns.boxplot(x=x, y=y)
x = plt.xticks(rotation=90)
f = pd.melt(train, id_vars=["SalePrice"], value_vars=qualitative)
g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False, size=5)
g = g.map(boxplot, "value", "SalePrice")
"""def encode(frame, feature):
ordering = pd.DataFrame()
ordering['val'] = frame[feature].unique()
ordering.index = ordering.val
ordering['spmean'] = frame[[feature, 'SalePrice']].groupby(feature).mean()['SalePrice']
ordering = ordering.sort_values('spmean')
ordering['ordering'] = range(1, ordering.shape[0]+1)
ordering = ordering['ordering'].to_dict()
for cat, o in ordering.items():
frame.loc[frame[feature] == cat, feature+'_E'] = o
qual_encoded = []
for q in qualitative:
encode(train, q)
qual_encoded.append(q+'_E')
print(qual_encoded)"""
# I want to calculate the Spearman correlation between the features and the target variable (SalePrice), and to visualize the correlations in a barplot.
def spearman(frame, features):
spr = pd.DataFrame()
spr["feature"] = features
spr["spearman"] = [frame[f].corr(frame["SalePrice"], "spearman") for f in features]
spr = spr.sort_values("spearman")
plt.figure(figsize=(6, 0.25 * len(features)))
sns.barplot(data=spr, y="feature", x="spearman", orient="h")
features = quantitative + qual_encoded
spearman(train, features)
# In the above,spearman correllation was used the evaluate relationships involving ordinal variables (Against the vatiable SalePrice)
"""def pairplot(x, y, **kwargs):
ax = plt.gca()
ts = pd.DataFrame({'time': x, 'val': y})
ts = ts.groupby('time').mean()
ts.plot(ax=ax)
plt.xticks(rotation=90)
f = pd.melt(train, id_vars=['SalePrice'], value_vars=quantitative+qual_encoded)
g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False, size=5)
g = g.map(pairplot, "value", "SalePrice")"""
# **Data Processing**
test.head()
train.columns
train["SalePrice"].hist(bins=40)
# **SalePrice is not uniformly distributed and is skewed towards the left . Therefore , we use log1p to remove the skewness .
# We take log as logs are used to respond to skewness towards large values; i.e., cases in which one or a few points are much larger than the bulk of the data.**
# 4500 is used to remove every outliers that may negatively impact the model and np.log1p, this is done to make the distribution of the target variable more symmetric and improve model performance.
train = train[train.GrLivArea < 4500]
train.reset_index(drop=True, inplace=True)
train["SalePrice"] = np.log1p(train["SalePrice"])
y = train["SalePrice"].reset_index(drop=True)
# **As you can see below , the data is now more balanced , suitable for training and prediction purposes**
train["SalePrice"].hist(bins=40)
# **Data Cleaning**
# Removing missing data
# **Storing SalePrice column seperately , as it is the 'Y' label / target that our model will learn to predict. Not to be stored in 'X' or features**
train_features = train.drop(["SalePrice"], axis=1)
test_features = test
features = pd.concat([train_features, test_features]).reset_index(drop=True)
# **Now , we are merging train and test datasets , so that we can handle 'noise' and missing data in the dataset **
features.shape
# Since these column are actually a category , using a numerical number will lead the model to assume
# that it is numerical , so we convert to string .
features["MSSubClass"] = features["MSSubClass"].apply(str)
features["YrSold"] = features["YrSold"].astype(str)
features["MoSold"] = features["MoSold"].astype(str)
## Filling these columns With most suitable value for these columns
features["Functional"] = features["Functional"].fillna("Typ")
features["Electrical"] = features["Electrical"].fillna("SBrkr")
features["KitchenQual"] = features["KitchenQual"].fillna("TA")
features["PoolQC"] = features["PoolQC"].fillna("None")
## Filling these with MODE , i.e. , the most frequent value in these columns .
features["Exterior1st"] = features["Exterior1st"].fillna(
features["Exterior1st"].mode()[0]
)
features["Exterior2nd"] = features["Exterior2nd"].fillna(
features["Exterior2nd"].mode()[0]
)
features["SaleType"] = features["SaleType"].fillna(features["SaleType"].mode()[0])
### Missing data in GarageYrBit most probably means missing Garage , so replace NaN with zero .
for col in ("GarageYrBlt", "GarageArea", "GarageCars"):
features[col] = features[col].fillna(0)
for col in ["GarageType", "GarageFinish", "GarageQual", "GarageCond"]:
features[col] = features[col].fillna("None")
### Same with basement
for col in ("BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2"):
features[col] = features[col].fillna("None")
features["MSZoning"] = features.groupby("MSSubClass")["MSZoning"].transform(
lambda x: x.fillna(x.mode()[0])
)
# **Fill the remaining columns as None**
objects = []
for i in features.columns:
if features[i].dtype == object:
objects.append(i)
features.update(features[objects].fillna("None"))
print(objects)
# **For missing values in numerical cols , we fillNa with 0.**
# We are still filling up missing values
features["LotFrontage"] = features.groupby("Neighborhood")["LotFrontage"].transform(
lambda x: x.fillna(x.median())
)
numeric_dtypes = ["int16", "int32", "int64", "float16", "float32", "float64"]
numerics = []
for i in features.columns:
if features[i].dtype in numeric_dtypes:
numerics.append(i)
features.update(features[numerics].fillna(0))
numerics[1:10]
numeric_dtypes = ["int16", "int32", "int64", "float16", "float32", "float64"]
numerics2 = []
for i in features.columns:
if features[i].dtype in numeric_dtypes:
numerics2.append(i)
skew_features = (
features[numerics2].apply(lambda x: skew(x)).sort_values(ascending=False)
)
high_skew = skew_features[skew_features > 0.5]
skew_index = high_skew.index
for i in skew_index:
features[i] = boxcox1p(features[i], boxcox_normmax(features[i] + 1))
features.shape
# Get_dummies converts Categorical data to numerical , as models don't work with Text data . each category gets its different columns , mostly binary.
final_features = pd.get_dummies(features).reset_index(drop=True)
final_features.shape
# **Understandably so , no. of columns is increased .
# Here , Again train and test are spilt back seperately , as now all data processing is done .
# Y is taget and its length is used to split**
X = final_features.iloc[: len(y), :]
X_sub = final_features.iloc[len(y) :, :]
X.shape, y.shape, X_sub.shape
# Removing outliers . Read other Kernels to understand how they were found out.
# A few were already stated by the dataset provider , others can be seen by plotting them in a graph
outliers = [30, 88, 462, 631, 1322]
X = X.drop(X.index[outliers])
y = y.drop(y.index[outliers])
overfit = []
for i in X.columns:
counts = X[i].value_counts()
zeros = counts.iloc[0]
if zeros / len(X) * 100 > 99.94:
overfit.append(i)
overfit = list(overfit)
X = X.drop(overfit, axis=1)
X_sub = X_sub.drop(overfit, axis=1)
overfit
X.shape, y.shape, X_sub.shape
# **Final Step :
# Now , we are getting started with the process of modelling
# K-Folds cross-validator
# Provides train/test indices to split data in train/test sets. Split dataset into k consecutive folds (without shuffling by default).
# Each fold is then used once as a validation while the k - 1 remaining folds form the training set.**
# defining error functions for handy use.
kfolds = KFold(n_splits=10, shuffle=True, random_state=42)
def rmsle(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))
def cv_rmse(model, X=X):
rmse = np.sqrt(
-cross_val_score(model, X, y, scoring="neg_mean_squared_error", cv=kfolds)
)
return rmse
alphas_alt = [14.5, 14.6, 14.7, 14.8, 14.9, 15, 15.1, 15.2, 15.3, 15.4, 15.5]
alphas2 = [5e-05, 0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007, 0.0008]
e_alphas = [0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007]
e_l1ratio = [0.8, 0.85, 0.9, 0.95, 0.99, 1]
ridge = make_pipeline(RobustScaler(), RidgeCV(alphas=alphas_alt, cv=kfolds))
lasso = make_pipeline(
RobustScaler(), LassoCV(max_iter=1e7, alphas=alphas2, random_state=42, cv=kfolds)
)
elasticnet = make_pipeline(
RobustScaler(),
ElasticNetCV(max_iter=1e7, alphas=e_alphas, cv=kfolds, l1_ratio=e_l1ratio),
)
svr = make_pipeline(
RobustScaler(),
SVR(
C=20,
epsilon=0.008,
gamma=0.0003,
),
)
# **Final Step :
# Now , we are getting started with the process of modelling**
gbr = GradientBoostingRegressor(
n_estimators=3000,
learning_rate=0.05,
max_depth=4,
max_features="sqrt",
min_samples_leaf=15,
min_samples_split=10,
loss="huber",
random_state=42,
)
lightgbm = LGBMRegressor(
objective="regression",
num_leaves=4,
learning_rate=0.01,
n_estimators=5000,
max_bin=200,
bagging_fraction=0.75,
bagging_freq=5,
bagging_seed=7,
feature_fraction=0.2,
feature_fraction_seed=7,
verbose=-1,
)
xgboost = XGBRegressor(
learning_rate=0.01,
n_estimators=3460,
max_depth=3,
min_child_weight=0,
gamma=0,
subsample=0.7,
colsample_bytree=0.7,
objective="reg:linear",
nthread=-1,
scale_pos_weight=1,
seed=27,
reg_alpha=0.00006,
)
# **In simple words , Stacking helps avoid fitting on the same data twice , and is effective in reducing overfitting.**
stack_gen = StackingCVRegressor(
regressors=(ridge, lasso, elasticnet, gbr, xgboost, lightgbm),
meta_regressor=xgboost,
use_features_in_secondary=True,
)
# **Here , we compare the various models that we just created..**
# Using various prediction models that we just created
score = cv_rmse(ridge, X)
score = cv_rmse(lasso, X)
print(
"LASSO: {:.4f} ({:.4f})\n".format(score.mean(), score.std()),
datetime.now(),
)
score = cv_rmse(elasticnet)
print(
"elastic net: {:.4f} ({:.4f})\n".format(score.mean(), score.std()),
datetime.now(),
)
score = cv_rmse(svr)
print(
"SVR: {:.4f} ({:.4f})\n".format(score.mean(), score.std()),
datetime.now(),
)
score = cv_rmse(lightgbm)
print(
"lightgbm: {:.4f} ({:.4f})\n".format(score.mean(), score.std()),
datetime.now(),
)
score = cv_rmse(gbr)
print(
"gbr: {:.4f} ({:.4f})\n".format(score.mean(), score.std()),
datetime.now(),
)
score = cv_rmse(xgboost)
print(
"xgboost: {:.4f} ({:.4f})\n".format(score.mean(), score.std()),
datetime.now(),
)
print("START Fit")
print("stack_gen")
stack_gen_model = stack_gen.fit(np.array(X), np.array(y))
print("elasticnet")
elastic_model_full_data = elasticnet.fit(X, y)
print("Lasso")
lasso_model_full_data = lasso.fit(X, y)
print("Ridge")
ridge_model_full_data = ridge.fit(X, y)
print("Svr")
svr_model_full_data = svr.fit(X, y)
print("GradientBoosting")
gbr_model_full_data = gbr.fit(X, y)
print("xgboost")
xgb_model_full_data = xgboost.fit(X, y)
print("lightgbm")
lgb_model_full_data = lightgbm.fit(X, y)
# **Blending Models / 'Ensembling'
# Notice that we are using a few percent from different models to get our final answer , all decimals add up to 1**
def blend_models_predict(X):
return (
(0.1 * elastic_model_full_data.predict(X))
+ (0.05 * lasso_model_full_data.predict(X))
+ (0.1 * ridge_model_full_data.predict(X))
+ (0.1 * svr_model_full_data.predict(X))
+ (0.1 * gbr_model_full_data.predict(X))
+ (0.15 * xgb_model_full_data.predict(X))
+ (0.1 * lgb_model_full_data.predict(X))
+ (0.3 * stack_gen_model.predict(np.array(X)))
)
print("RMSLE score on train data:")
print(rmsle(y, blend_models_predict(X)))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
train_df = pd.read_csv("/kaggle/input/titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
train_df.head()
test_df.head()
# > **Data Cleaning: Train Dataset**
train_df.info()
# There are some missing entries in the 'Age', 'Cabin', 'Embarked', column.
# >1. We will remove the 'Cabin' column completely.
# >2. We will use the dropna method to delete the row in the 'Embarked' column which will not impact the Data Frame as only 2 entries are null,
# >3. For the 'Embarked' column we will create a function to impute the age based on the mean age with the 'Pclass' column.
train_df = train_df.drop("Cabin", axis=1)
train_df.head()
plt.figure(figsize=(12, 7))
sns.boxplot(x="Pclass", y="Age", data=train_df, palette="rainbow")
# We can see from the plot above that the average age of any given person depends on the class of cabin they are in. So we can infer as below.
# * >Class 1 = 37
# * >Class 2 = 28
# * >Class 3 = 24
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 37
elif Pclass == 2:
return 28
else:
return 24
else:
return Age
train_df["Age"] = train_df[["Age", "Pclass"]].apply(impute_age, axis=1)
train_df.dropna(inplace=True)
train_df.head()
# >we will remove all the non numerical columns and then use pd.get_dummies to get the dummy values of the Pclass & Sex column
sex = pd.get_dummies(train_df["Sex"], drop_first=True)
embarked = pd.get_dummies(train_df["Embarked"], drop_first=True)
train_df.drop(train_df[["Name", "Sex", "Ticket", "Embarked"]], axis=1, inplace=True)
train_df.head()
train_df = pd.concat([train_df, sex, embarked], axis=1)
train_df.info()
# # Test Dataset Cleaning
# > we will do the same cleaning as we did in train dataset
test_df.head()
test_df.info()
test_df["Age"] = test_df[["Age", "Pclass"]].apply(impute_age, axis=1)
test_df["Fare"].fillna(test_df["Fare"].mean(), inplace=True)
test_df.head()
sex_test = pd.get_dummies(test_df["Sex"], drop_first=True)
embarked_test = pd.get_dummies(test_df["Embarked"], drop_first=True)
test_df.drop(["Name", "Sex", "Embarked", "Ticket", "Cabin"], axis=1, inplace=True)
test_df = pd.concat([test_df, sex_test, embarked_test], axis=1)
test_df.head()
train_df.head()
# # Predictions : Finding the best model
# > we will create a train test split of the Train_df dataset in order to find the best Classification Model.
# > we will then select the best model based on our findings and then apply the same on Test Dataset
## Need to create train and test data based on the proper dataframe
# ## 1) Logistic Regression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, accuracy_score
log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)
log_pred = log_reg.predict(X_test)
print(classification_report(y_test, log_pred))
|
# 1. xsquad: https://raw.githubusercontent.com/deepmind/xquad/master/xquad.vi.json
# 2. bert-vietnamese-question-answering: https://raw.githubusercontent.com/mailong25/bert-vietnamese-question-answering/master/dataset/train-v2.0.json
# 3. Zalo 2022: https://www.kaggle.com/datasets/ducnh279/nlp-data
# # Zalo
import pandas as pd
from tqdm.notebook import tqdm
train_df = pd.read_json("/kaggle/input/nlp-data/zac2022_train_merged_final.json")
def get_keys(data):
return [k for k, _ in data.items()]
def has_answer(df):
return df[df.data.apply(get_keys).apply(lambda x: "answer" in x)]
def get_context(data):
return data["text"]
def get_question(data):
return data["question"]
def get_answer_position(data):
return data["short_candidate_start"]
def get_answer(data):
return data["short_candidate"]
def question_context(df, i):
question = df.data.apply(get_question)[i]
text = test_df.context[i]
print(f"- Question: {question}\n" f"- Context: {text}")
questions = has_answer(train_df).data.apply(get_question).reset_index(drop=True)
contexts = has_answer(train_df).data.apply(get_context).reset_index(drop=True)
answers = has_answer(train_df).data.apply(get_answer).reset_index(drop=True)
answer_starts = (
has_answer(train_df).data.apply(get_answer_position).reset_index(drop=True)
)
zalo = pd.DataFrame(
{
"question": questions,
"context": contexts,
"answer": answers,
"answer_start": answer_starts,
}
)
# # Xsquad
df = pd.read_json(
"https://raw.githubusercontent.com/deepmind/xquad/master/xquad.vi.json"
)
df.drop("version", axis=1, inplace=True)
def get_paragraph(df):
return df.data.apply(lambda x: x["paragraphs"])
para = get_paragraph(df)
dfs = []
for p in para:
p_df = pd.DataFrame(p)
question_df = p_df["qas"].apply(lambda x: x[0]["question"]).to_frame("question")
start_pos_and_text_df = (
p_df["qas"].apply(lambda x: x[0]["answers"][0]).apply(pd.Series)
)
final_p_df = pd.concat([question_df, p_df, start_pos_and_text_df], axis=1)
final_p_df.drop("qas", axis=1, inplace=True)
final_p_df.columns = ["question", "context", "answer_start", "answer"]
dfs.append(final_p_df)
xsquad = pd.concat(dfs, axis=0, ignore_index=True)
xsquad
# # Bert QA
df = pd.read_json(
"https://raw.githubusercontent.com/mailong25/bert-vietnamese-question-answering/master/dataset/train-v2.0.json"
)
para = get_paragraph(df)
dfs = []
for p in tqdm(para):
p_df = pd.DataFrame(p)
p_df = p_df[p_df.qas.apply(lambda x: x[0]["answers"] != [])].reset_index(drop=True)
if p_df.values.tolist() == []:
continue
question_df = p_df["qas"].apply(lambda x: x[0]["question"]).to_frame("question")
start_pos_and_text_df = (
p_df["qas"].apply(lambda x: x[0]["answers"][0]).apply(pd.Series)
)
final_p_df = pd.concat([question_df, p_df, start_pos_and_text_df], axis=1)
final_p_df.drop("qas", axis=1, inplace=True)
final_p_df.columns = ["question", "context", "answer_start", "answer"]
dfs.append(final_p_df)
bert_qa = pd.concat(dfs, axis=0, ignore_index=True)
# # Merge QA datasets
qa_dataset = pd.concat([zalo, xsquad, bert_qa], axis=0, ignore_index=True)
qa_dataset.to_json("qa_dataset.json")
|
x = 2
t = 5.7
k = 9j
print(type(x))
print(type(t))
print(type(k))
r = 8 # int
s = 5.7 # float
t = 2 # comlpex
# int'den float'a dönüştür:
o = float(r)
# floattan int'e dönüştür:
p = int(s)
# int'den comlpex'e dönüştür:
q = complex(r)
print(o)
print(p)
print(q)
print(type(o))
print(type(p))
print(type(q))
# # Rastgele Sayılar
import random
print(random.randrange(55, 100))
str1 = "43"
str1 = int((str1))
print(type(str1))
str1
str1 = "4"
str2 = str((str1))
print(str1 + str2)
str1 = int(str1)
str2 = int(str2)
print(str1 + str2)
# # Python Dizileri
x = "Müzik seviyorum."
print(x[3])
araba1 = "Ford"
araba2 = "Marcedes"
araba3 = "BMW"
print([araba2])
z = "Müzik seviyorum."
len(z)
# # Dizeyi Kontrol Et
myTxt = "Yarın akşam sinemaya gideceğim."
print("akşam" in myTxt)
myTxt = "Yarın akşam sinemaya gideceğim."
if "Yarın" in myTxt:
print("Evet metinde Yarın kelimesi var.")
myTxt = "Yarın akşam sinemaya gideceğim."
if "Sabah" not in myTxt:
print("Hayır, Sabah kelimesi mevcut DEĞİLDİR")
|
"chart full-width"
import requests
import lxml.html as lh
import pandas as pd
df = pd.read_html("https://www.imdb.com/chart/top/?ref_=nv_mv_250")[0]
df[0]
|
import pandas as pd
from matplotlib import pyplot as plt
# load Social_Network_Ads csv file
df = pd.read_csv("/kaggle/input/logistic-regression/Social_Network_Ads.csv")
df
# convert categorical gender to coded data
df["Gender"] = df["Gender"].astype("category").cat.codes
df
# divide the dataset into train and test data
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
df[["Age"]], df.Purchased, test_size=0.2, random_state=21
)
x_train, x_test, y_train, y_test
from sklearn.linear_model import LogisticRegression
# call regression function
model = LogisticRegression()
# fit the model according to train data
model.fit(x_train, y_train)
# score the model by test data
model.score(x_test, y_test)
predictions = model.predict(x_test)
model.predict([[23]])
# evaluate the model
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(y_test, predictions))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
TSLA = pd.read_csv("../input/tesla-stock-data-from-2010-to-2020/TSLA.csv")
TSLA.head()
# # library & dataset
# import seaborn as sns
# plt.figure( figsize = (20,10))
# sns.boxplot( x=TSLA["Close"] )
# plt.show()
from scipy import stats
pearson_coef, p_value = stats.pearsonr(TSLA["Open"], TSLA["Close"])
# Pearson coefficient / correlation coefficient - how much are the two columns correlated?
print(pearson_coef)
# P-value - how sure are we about this correlation?
print(p_value)
# libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# plot
plt.plot("Open", "High", data=TSLA, linestyle="-", marker="*")
plt.show()
TSLA.corr()
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 10))
# use the function regplot to make a scatterplot
sns.regplot(x=TSLA["High"], y=TSLA["Low"], marker=".")
plt.show()
# Without regression fit:
# sns.regplot(x=top50["Energy"], y=top50["Loudness..dB.."], fit_reg=False)
# plt.show()
slope, intercept, r_value, p_value, std_err = stats.linregress(
TSLA["Open"], TSLA["Close"]
)
print("y = " + str(slope) + "x + " + str(intercept))
# Same as (Pearson) correlation coefficient
print(r_value)
TSLA.describe()
import seaborn as sns
import matplotlib.pyplot as plt
# Servery_Data = sns.load_dataset('Servery_Data')
# use the function regplot to make a scatterplot
sns.regplot(x=TSLA["Open"], y=TSLA["Close"], fit_reg=False)
plt.show()
# Without regression fit:
# sns.regplot(x=df["sepal_length"], y=df["sepal_width"], fit_reg=False)
# sns.plt.show()
# libraries
import matplotlib.pyplot as plt
import numpy as np
# create data
plt.figure(figsize=(20, 10))
x = TSLA["Open"]
y = TSLA["Close"]
z = 0.5
plt.ylabel("Close", fontsize=20)
plt.xlabel("Open", fontsize=20)
plt.title("Tesla stock data from 2010 - 2020", fontsize=20)
# use the scatter function
plt.scatter(x, y, s=z * 1000, alpha=0.5)
plt.show()
|
# codes from Rodrigo Lima @rodrigolima82
from IPython.display import Image
Image(
url="https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcT_KKoimUiX1Z_QrC0ev_sXbb1Mr7qWnUsFv8kUnMOxLKML6Be3",
width=400,
height=400,
)
# Image quoteparrot.com - I couldn't agree more with you Idris Elba. It's just James Bond , not black James Bond.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# codes from Rodrigo Lima @rodrigolima82
from IPython.display import Image
Image(url="", width=400, height=400)
nRowsRead = 1000 # specify 'None' if want to read whole file
# bond.csv may have more rows in reality, but we are only loading/previewing the first 1000 rows
df = pd.read_csv(
"/kaggle/input/can-james-bond-be-black/bond.csv", delimiter=",", nrows=nRowsRead
)
df.dataframeName = "bond.csv"
nRow, nCol = df.shape
print(f"There are {nRow} rows and {nCol} columns")
df.head()
df.dtypes
df["Remain"].plot.hist()
plt.show()
df["Leave"].plot.box()
plt.show()
sns.pairplot(df, x_vars=["Remain"], y_vars="Leave", markers="+", size=4)
plt.show()
dfcorr = df.corr()
dfcorr
sns.heatmap(dfcorr, annot=True, cmap="winter_r")
plt.show()
fig, axes = plt.subplots(1, 1, figsize=(14, 6))
sns.boxplot(x="Remain", y="Leave", data=df, showfliers=False)
# Codes from Binu @biphili
import matplotlib.style
import matplotlib as mpl
mpl.style.use("classic")
sns.jointplot(df["Remain"], df["Leave"], data=df, kind="scatter")
fig = plt.gcf()
fig.set_size_inches(10, 7)
fig = sns.violinplot(x="Remain", y="Leave", data=df)
# Only one ridiculous violin and flying saucers!
sns.set(style="darkgrid")
fig = plt.gcf()
fig.set_size_inches(10, 7)
fig = sns.swarmplot(x="Remain", y="Leave", data=df)
# Not a swarm, just plot.
fig = sns.lmplot(x="Remain", y="Leave", data=df)
df.plot.area(y=["Remain", "Leave"], alpha=0.4, figsize=(12, 6))
sns.factorplot("Remain", "Leave", hue="Response", data=df)
plt.show()
pd.crosstab([df.Remain], df.Leave).style.background_gradient(cmap="summer_r")
# word cloud
from wordcloud import WordCloud, ImageColorGenerator
text = " ".join(str(each) for each in df.Response)
# Create and generate a word cloud image:
wordcloud = WordCloud(
max_words=200, colormap="Set3", background_color="black"
).generate(text)
plt.figure(figsize=(10, 6))
plt.figure(figsize=(15, 10))
# Display the generated image:
plt.imshow(wordcloud, interpolation="Bilinear")
plt.axis("off")
plt.figure(1, figsize=(12, 12))
plt.show()
# word cloud
from wordcloud import WordCloud, ImageColorGenerator
text = " ".join(str(each) for each in df.Group)
# Create and generate a word cloud image:
wordcloud = WordCloud(
max_words=200, colormap="Set3", background_color="black"
).generate(text)
plt.figure(figsize=(10, 6))
plt.figure(figsize=(15, 10))
# Display the generated image:
plt.imshow(wordcloud, interpolation="Bilinear")
plt.axis("off")
plt.figure(1, figsize=(12, 12))
plt.show()
# How about a black female Jane Bond? Instead of just another Bond Girl? Think about that Kaggle community.
# codes from Rodrigo Lima @rodrigolima82
from IPython.display import Image
|
import os
import urllib.request
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np # linear algebra
import matplotlib.pyplot as plt # plotting
import seaborn as sns # sstatistical data visualisation
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from xgboost import XGBRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
from sklearn import metrics
train_df = pd.read_csv("/kaggle/input/shai-club/train.csv")
test_df = pd.read_csv("/kaggle/input/shai-club/test.csv")
train_df.head()
test_df.head()
# remove ID column
df = train_df.drop("Id", axis=1)
df.head()
df.info()
df.describe()
df.isnull().any()
df.duplicated()
df.duplicated().sum()
df.drop_duplicates(keep="first", inplace=True)
df.duplicated().sum()
df.dtypes
# one_hot_encoded_training_predictors = pd.get_dummies(df)
# one_hot_encoded_training_predictors.head()
# Get list of categorical variables
s = df.dtypes == "object"
object_cols = list(s[s].index)
print("Categorical variables:", object_cols)
# Apply label encoder to each column with categorical data
label_encoder = LabelEncoder()
for col in object_cols:
df[col] = label_encoder.fit_transform(df[col])
df.head()
df.shape
plt.boxplot(df.depth)
fig = plt.figure(figsize=(10, 7))
plt.show()
# Dropping the outliers.
df = df[(df["depth"] < 75) & (df["depth"] > 45)]
df = df[(df["table"] < 80) & (df["table"] > 40)]
df = df[(df["x"] < 30)]
df = df[(df["y"] < 30)]
df = df[(df["z"] < 30) & (df["z"] > 2)]
df.shape
plt.figure(figsize=(10, 10))
heatmap = sns.heatmap(df.corr(), annot=True)
heatmap.set_title("Correlation Heatmap", fontdict={"fontsize": 12}, pad=12)
# Assigning the featurs as X and trarget as y
X = df.drop(["price"], axis=1)
y = df["price"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=7
)
# Building pipelins of standard scaler and model for varios regressors.
pipeline_lr = Pipeline(
[("scalar1", StandardScaler()), ("lr_classifier", LinearRegression())]
)
pipeline_dt = Pipeline(
[("scalar2", StandardScaler()), ("dt_classifier", DecisionTreeRegressor())]
)
pipeline_rf = Pipeline(
[("scalar3", StandardScaler()), ("rf_classifier", RandomForestRegressor())]
)
pipeline_kn = Pipeline(
[("scalar4", StandardScaler()), ("rf_classifier", KNeighborsRegressor())]
)
pipeline_xgb = Pipeline(
[("scalar5", StandardScaler()), ("rf_classifier", XGBRegressor())]
)
# List of all the pipelines
pipelines = [pipeline_lr, pipeline_dt, pipeline_rf, pipeline_kn, pipeline_xgb]
# Dictionary of pipelines and model types for ease of reference
pipe_dict = {
0: "LinearRegression",
1: "DecisionTree",
2: "RandomForest",
3: "KNeighbors",
4: "XGBRegressor",
}
# Fit the pipelines
for pipe in pipelines:
pipe.fit(X_train, y_train)
cv_results_rms = []
for i, model in enumerate(pipelines):
cv_score = cross_val_score(
model, X_train, y_train, scoring="neg_root_mean_squared_error", cv=10
)
cv_results_rms.append(cv_score)
print("%s: %f " % (pipe_dict[i], cv_score.mean()))
# Model prediction on test data
pred = pipeline_xgb.predict(X_test)
# Model Evaluation
print("R^2:", metrics.r2_score(y_test, pred))
print(
"Adjusted R^2:",
1
- (1 - metrics.r2_score(y_test, pred))
* (len(y_test) - 1)
/ (len(y_test) - X_test.shape[1] - 1),
)
print("MAE:", metrics.mean_absolute_error(y_test, pred))
print("MSE:", metrics.mean_squared_error(y_test, pred))
print("RMSE:", np.sqrt(metrics.mean_squared_error(y_test, pred)))
# Get list of categorical variables
s2 = test_df.dtypes == "object"
object_cols_test = list(s2[s2].index)
print("Categorical variables:", object_cols_test)
# Apply label encoder to each column with categorical data
label_encoder = LabelEncoder()
for col in object_cols_test:
test_df[col] = label_encoder.fit_transform(test_df[col])
test_df.head()
# remove ID column
test = test_df.drop("Id", axis=1)
test.head()
predictions = pd.Series(pipeline_xgb.predict(test))
pred = pd.DataFrame({"Id": test_df["Id"], "price": predictions})
pred.to_csv("submission.csv", index=False)
pred_df = pd.read_csv("./submission.csv")
pred_df.head()
|
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import os # accessing directory structure
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# There is 1 csv file in the current version of the dataset:
#
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# The next hidden code cells define functions for plotting data. Click on the "Code" button in the published kernel to reveal the hidden code.
# Distribution graphs (histogram/bar graph) of column data
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[
[col for col in df if nunique[col] > 1 and nunique[col] < 50]
] # For displaying purposes, pick columns that have between 1 and 50 unique values
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
plt.figure(
num=None,
figsize=(6 * nGraphPerRow, 8 * nGraphRow),
dpi=80,
facecolor="w",
edgecolor="k",
)
for i in range(min(nCol, nGraphShown)):
plt.subplot(nGraphRow, nGraphPerRow, i + 1)
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
valueCounts.plot.bar()
else:
columnDf.hist()
plt.ylabel("counts")
plt.xticks(rotation=90)
plt.title(f"{columnNames[i]} (column {i})")
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
plt.show()
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna("columns") # drop columns with NaN
df = df[
[col for col in df if df[col].nunique() > 1]
] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(
f"No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2"
)
return
corr = df.corr()
plt.figure(
num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor="w", edgecolor="k"
)
corrMat = plt.matshow(corr, fignum=1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f"Correlation Matrix for {filename}", fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include=[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna("columns")
df = df[
[col for col in df if df[col].nunique() > 1]
] # keep columns where there are more than 1 unique values
columnNames = list(df)
if (
len(columnNames) > 10
): # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(
df, alpha=0.75, figsize=[plotSize, plotSize], diagonal="kde"
)
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k=1)):
ax[i, j].annotate(
"Corr. coef = %.3f" % corrs[i, j],
(0.8, 0.2),
xycoords="axes fraction",
ha="center",
va="center",
size=textSize,
)
plt.suptitle("Scatter and Density Plot")
plt.show()
# Now you're ready to read in the data and use the plotting functions to visualize the data.
# ### Let's check 1st file: /kaggle/input/listings.csv
nRowsRead = 1000 # specify 'None' if want to read whole file
# listings.csv may have more rows in reality, but we are only loading/previewing the first 1000 rows
df1 = pd.read_csv("/kaggle/input/listings.csv", delimiter=",", nrows=nRowsRead)
df1.dataframeName = "listings.csv"
nRow, nCol = df1.shape
print(f"There are {nRow} rows and {nCol} columns")
# Let's take a quick look at what the data looks like:
df1.head(20)
# Distribution graphs (histogram/bar graph) of sampled columns:
plotPerColumnDistribution(df1, 10, 5)
# Correlation matrix:
plotCorrelationMatrix(df1, 8)
# Scatter and density plots:
plotScatterMatrix(df1, 20, 10)
# ## Conclusion
# This concludes your starter analysis! To go forward from here, click the blue "Edit Notebook" button at the top of the kernel. This will create a copy of the code and environment for you to edit. Delete, modify, and add code as you please. Happy Kaggling!
# starting on my manipulations of the AirBnB data set. I want to know which listings get the most action and in what area.
# First off, I want to browse the names of all the columns to see what I'm working with.
# This is a pretty cool data set to me, because it gives me a sense of what it would be like to possibly one day become a AirBnB host.
# I've stayed at many an AirBnB, so getting some data on Seattle's landscape is a cool deep dive into the scene
print(df1.columns.values)
# my abbreviated data set will have the listing will have:
#'name'
#'host_location'
#'smart_location'
#'number_of_reviews'
#'review_scores_rating'
#'review_scores_location'
#'reviews_per_month'
#'calculated_host_listings_count'
df2 = df1[
[
"host_name",
"host_since",
"name",
"number_of_reviews",
"reviews_per_month",
"calculated_host_listings_count",
]
]
df2.head(10)
# reviews per month look like they could be correlated with number of reviews. That's not that interesting.
# this is a good start.
# now I want to see what's the most listings from a single host. Let's sort by calc_host_listings_count.
# I also want to see the host name, because I want to see who's running that Seattle AirBnB game.
df2.sort_values("calculated_host_listings_count", ascending=False)
# WOW, corp condos and Apts are running the game apparently. I wonder why it only shows 4 of theirs.
# Does that mean the rest of their 342 listings aren't in Seattle? Huh
import matplotlib.pyplot as plt
# the average of the list is 7.622
df2["calculated_host_listings_count"].mean()
# as expected, the number of reviews are highly correlated with reviews per month. I'mc
df2.plot(kind="scatter", x="reviews_per_month", y="number_of_reviews", s=10, alpha=0.5)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import matplotlib.pyplot as plt
import seaborn as sns
# ## Import data
# ### Training data
train = pd.read_csv("/kaggle/input/titanic/train.csv")
train.head()
train.columns = train.columns.str.lower()
train.info()
# ### Test data
test = pd.read_csv("/kaggle/input/titanic/test.csv")
# ### Create a copy of test data
test_data_copy = test.copy()
test.head()
test.columns = test.columns.str.lower()
test.info()
test_data_copy.info()
# ## Exploratory Data Analysis
# ### Missing values in training data
train.isnull().sum()
plt.figure(figsize=(14, 8))
plt.title("Missing values in Training", {"fontsize": 25}, pad=20)
sns.heatmap(train.isnull(), cmap="viridis")
plt.figure(figsize=(14, 8))
plt.title("Age distribution in training data", {"fontsize": 25}, pad=20)
sns.boxplot(x="pclass", y="age", data=train, palette="rainbow")
train.groupby("pclass")["age"].median()
train["age"] = train["age"].fillna(
train.groupby(["sex", "pclass"])["age"].transform("median")
)
plt.figure(figsize=(14, 8))
sns.heatmap(train.isnull(), cmap="viridis")
train.drop("cabin", axis=1, inplace=True)
train.dropna(inplace=True)
plt.figure(figsize=(14, 8))
sns.heatmap(train.isnull(), cmap="viridis")
# ### Missing values in training
test.isnull().sum()
plt.figure(figsize=(14, 8))
plt.title("Missing values in test data", {"fontsize": 25}, pad=20)
sns.heatmap(test.isnull(), cmap="viridis")
plt.figure(figsize=(14, 8))
plt.title("Age distribution in test data", {"fontsize": 25}, pad=20)
sns.boxplot(x="pclass", y="age", data=test, palette="rainbow")
test.groupby("pclass")["age"].median()
test["age"] = test["age"].fillna(
test.groupby(["sex", "pclass"])["age"].transform("median")
)
test.groupby("pclass")["fare"].median()
test["fare"] = test["fare"].fillna(
test.groupby(["sex", "pclass"])["fare"].transform("median")
)
test.drop("cabin", axis=1, inplace=True)
test.dropna(inplace=True)
plt.figure(figsize=(14, 8))
plt.title("Missing values in test data", {"fontsize": 25}, pad=20)
sns.heatmap(test.isnull(), cmap="viridis")
# ## Handling Categorical Features
train.info()
train_data_sex = pd.get_dummies(train["sex"], drop_first=True)
test_data_sex = pd.get_dummies(test["sex"], drop_first=True)
train_data_embark = pd.get_dummies(train["embarked"], drop_first=True)
test_data_embark = pd.get_dummies(test["embarked"], drop_first=True)
train.drop(["sex", "embarked", "name", "ticket", "passengerid"], axis=1, inplace=True)
test.drop(["sex", "embarked", "name", "ticket", "passengerid"], axis=1, inplace=True)
train.info()
test.info()
train = pd.concat([train, train_data_sex, train_data_embark], axis=1)
test = pd.concat([test, test_data_sex, test_data_embark], axis=1)
# ## Logistic Regression model
# ### Create X and Y arrays for training data
X_train = train.drop("survived", axis=1)
y_train = train["survived"]
X_train.shape, y_train.shape
# ### Create X array for test data
X_test = test
X_test.shape
# ## Fit the model
from sklearn.linear_model import LogisticRegression
logmodel = LogisticRegression(C=0.001)
logmodel.fit(X_train, y_train)
y_pred = logmodel.predict(X_test)
submission = pd.DataFrame(
{"PassengerId": test_data_copy["PassengerId"], "Survived": y_pred}
)
submission.to_csv("titanic.csv", index=False)
print("My First Kaggle Submission")
# ## Implementing XG Boost
import xgboost as xgb
xg_cls = xgb.XGBClassifier()
xg_cls.fit(X_train, y_train)
xg_preds = xg_cls.predict(X_test)
submission = pd.DataFrame(
{"PassengerId": test_data_copy["PassengerId"], "Survived": xg_preds}
)
submission.to_csv("xgmodel.csv", index=False)
|
import keras
from PIL import Image
from keras import backend as k
from keras.models import Sequential
from keras.layers import Activation
from keras.layers.core import Dense, Flatten
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.metrics import categorical_crossentropy
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import *
from matplotlib import pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from keras.applications.vgg16 import decode_predictions
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.applications.vgg16 import VGG16
vgg_16Model = VGG16()
vgg_16Model.summary()
type(vgg_16Model)
model = Sequential()
i = 0
for layer in vgg_16Model.layers:
if i < len(vgg_16Model.layers) - 1:
model.add(layer)
i = i + 1
model.summary()
for layer in model.layers:
layer.trainable = False
model.add(Dense(4, activation="softmax"))
model.summary()
|
# **Different types of run time exceptions**
# **1. Index Error**
marks = [53, 76, 43, 86, 33]
marks[5] # we don'T have a index 5 thus index error will occured
# **2. Key Error**
d = {"India": 63740938, "USA": 92307140, "China": 6316978309}
d["Russia"] # we don't have russia in the dictionary thus key error
# **3. Module Not Found Error**
import panda # no library called panda its pandas
# **4. Type Error**
a = 56
b = "6"
c = a + b # we can't add int to a string
# **5. Name Error**
print(g) # we haven't defined g yet thus name error
# **6. Zero Division Error**
a = 45 / 0 # denominator is 0 thus zero division error
# **Exception Handling**
try:
a = int(input("Enter the first no"))
b = int(input("Enter the second no"))
c = (a + b) / 2
print(c)
except: # this will get executed if we have a exception in try block
print("this is some code which needs to get executed")
print("The next lines of code will get executed")
try:
a = int(input("Enter the first no"))
b = int(input("Enter the second no"))
c = a / b
print(c)
except (
Exception
) as e: # this will find the reason of exception and will print in the output
print("the value entered is not correct")
print(e)
print("The next lines of code will get executed")
try:
a = int(input("Enter the first no"))
b = int(input("Enter the second no"))
c = a + b
print(c)
except (
Exception
) as e: # this will find the reason of exception and will print in the output
print("the value entered is not correct")
print(e)
print("The next lines of code will get executed")
try:
a = int(input("Enter the first no"))
b = int(input("Enter the second no"))
c = a / b
print(ch) # name error
except ZeroDivisionError:
print("The Denominator can't be Zero")
except NameError:
print("We need to initialize the variable first")
print("The next lines of code will get executed")
try:
a = int(input("Enter the first no"))
b = int(input("Enter the second no"))
c = a / b
print(c)
except ZeroDivisionError:
print("The Denominator can't be Zero")
except NameError:
print("We need to initialize the variable first")
else: # else will only work if we don't have a exceptional error
print("Your input is perfectly fine")
print("Now let's move on to next set of code")
try:
a = int(input("Enter the first no"))
b = int(input("Enter the second no"))
c = a / b
print(c)
except ZeroDivisionError:
print("The Denominator can't be Zero")
except NameError:
print("We need to initialize the variable first")
else: # else will only work if we don't have a exceptional error
print("Your input is perfectly fine")
finally: # it will work no matter what if we have a error or not it will work
print("This will always get executed")
print("Now let's move on to next set of code")
|
# # Hacker News Submission Score Predictor w/ Keras and TensorFlow
# by Max Woolf ([@minimaxir](https://minimaxir.com))
# A model of a Hacker News post predictor, using a large number of Keras tricks with a TensorFlow backend.
# This notebook requires a GPU instance. (for the very-fast `CuDNNLSTM` to handle text data)
import pandas as pd
import numpy as np
import keras
from google.cloud import bigquery
# BigQuery:
# ```sql
# #standardSQL
# SELECT
# id,
# title,
# REGEXP_REPLACE(NET.HOST(url), 'www.', '') AS domain,
# FORMAT_TIMESTAMP("%Y-%m-%d %H:%M:%S", timestamp, "America/New_York") AS created_at,
# score,
# TIMESTAMP_DIFF(LEAD(timestamp, 30) OVER (ORDER BY timestamp), timestamp, SECOND) as time_on_new
# FROM
# `bigquery-public-data.hacker_news.full`
# WHERE
# DATETIME(timestamp, "America/New_York") BETWEEN '2017-01-01 00:00:00' AND '2018-12-01 00:00:00'
# AND type = "story"
# AND url != ''
# AND deleted IS NULL
# AND dead IS NULL
# ORDER BY
# created_at DESC
# ```
# Use the query above to get it from BigQuery. (via Kaggle tutorial: https://www.kaggle.com/mrisdal/mentions-of-kaggle-on-hacker-news) Outside of Kaggle, you can get the data using `pandas-gbq`.
# The return data is also randomized; this allows us to use the last 20% as a test set without introducing temporal dependencies.
query = """
#standardSQL
SELECT
id,
title,
REGEXP_REPLACE(NET.HOST(url), 'www.', '') AS domain,
FORMAT_TIMESTAMP("%Y-%m-%d %H:%M:%S", timestamp, "America/New_York") AS created_at,
score,
TIMESTAMP_DIFF(LEAD(timestamp, 30) OVER (ORDER BY timestamp), timestamp, SECOND) as time_on_new
FROM
`bigquery-public-data.hacker_news.full`
WHERE
DATETIME(timestamp, "America/New_York") BETWEEN '2017-01-01 00:00:00' AND '2018-12-01 00:00:00'
AND type = "story"
AND url != ''
AND deleted IS NULL
AND dead IS NULL
ORDER BY
created_at DESC
"""
client = bigquery.Client()
query_job = client.query(query)
iterator = query_job.result(timeout=30)
rows = list(iterator)
df = pd.DataFrame(data=[list(x.values()) for x in rows], columns=list(rows[0].keys()))
df = df.sample(frac=1, random_state=123).dropna().reset_index(drop=True)
df.head(10)
# ## Feature Engineering
# * Text, w/ sequences of length 15 (HN titles can be from 3 - 80 characters; since words are 5-6 characters)
# * Post domain (if in Top 100 by count; 0 otherwise)
# * Day of Week of Submission
# * Hour of Submission
# Other features I tried but did not use (since using them prevents forecasting, and they did not help improve the model):
# * Trend (time from first submission, scaled to `[0-1]`)
# * Time on `/new` page (scaled to `[0-1]`)
# Score is unmodified. Normally you'd `log` transform a skewed independent variable for a OLS, but that's not necessary for deep learning.
# ### Text
# Use a RNN to encode the title. Since we'll be using an unmasked RNN, length of the submission can be implied from the number of padding characters.
from keras.preprocessing import sequence
from keras.preprocessing.text import text_to_word_sequence, Tokenizer
num_words = 25000
tokenizer = Tokenizer(num_words=num_words)
tokenizer.fit_on_texts(df["title"].values)
maxlen = 15
titles = tokenizer.texts_to_sequences(df["title"].values)
titles = sequence.pad_sequences(titles, maxlen=maxlen)
print(titles[0:5,])
# ### Top Domains
# Identify the top *n* domains by count (in this case *n* = 100), then transform it to a *n*D vector for each post.
# * modify for smaller amount ,due to embeddings
num_domains = 300
domain_counts = df["domain"].value_counts()[0:num_domains]
print(domain_counts)
from sklearn.preprocessing import LabelBinarizer
top_domains = np.array(domain_counts.index, dtype=object)
domain_encoder = LabelBinarizer()
domain_encoder.fit(top_domains)
domains = domain_encoder.transform(df["domain"].values.astype(str))
domains[0]
# ### Day-of-Week and Hour
# Convert day-of-week to a 7D vector and hours to a 24D vector. Both pandas and keras have useful functions for this workflow.
from keras.utils import to_categorical
dayofweeks = to_categorical(pd.to_datetime(df["created_at"]).dt.dayofweek)
hours = to_categorical(pd.to_datetime(df["created_at"]).dt.hour)
print(dayofweeks[0:5])
print(hours[0:5])
# ## Sample Weights
# Weight `score=1` samples lower so model places a higher importance on atypical submissions.
weights = np.where(df["score"].values == 1, 0.5, 1.0)
print(weights[0:5])
# ## Trend and Time on New
# Unused in final model, but kept here for reference.
from sklearn.preprocessing import MinMaxScaler
trend_encoder = MinMaxScaler()
trends = trend_encoder.fit_transform(
pd.to_datetime(df["created_at"]).values.reshape(-1, 1)
)
trends[0:5]
newtime_encoder = MinMaxScaler()
newtimes = trend_encoder.fit_transform(df["time_on_new"].values.reshape(-1, 1))
newtimes[0:5]
# ## Build the Model Prototype
# Add R^2 as a performance metric: https://jmlb.github.io/ml/2017/03/20/CoeffDetermination_CustomMetric4Keras/
from keras import backend as K
def r_2(y_true, y_pred):
SS_res = K.sum(K.square(y_true - y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return 1 - SS_res / (SS_tot + K.epsilon())
# Minimizing `mse` loss as typical for regression problems will not work, as the model will realize that selecting 1 unilaterally accomplishes this task the best.
# Instead, create a hybrid loss of `mae`, `msle`, and `poisson` (see Keras's docs for more info: https://github.com/keras-team/keras/blob/master/keras/losses.py) The latter two losses can account for very high values much better; perfect for the hyper-skewed data.
def hybrid_loss(y_true, y_pred):
weight_mae = 0.1
weight_msle = 1.0
weight_poisson = 0.1
mae_loss = weight_mae * K.mean(K.abs(y_pred - y_true), axis=-1)
first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.0)
second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.0)
msle_loss = weight_msle * K.mean(K.square(first_log - second_log), axis=-1)
poisson_loss = weight_poisson * K.mean(
y_pred - y_true * K.log(y_pred + K.epsilon()), axis=-1
)
return mae_loss + msle_loss + poisson_loss
from keras.models import Input, Model
from keras.layers import (
Dense,
Embedding,
CuDNNGRU,
CuDNNLSTM,
LSTM,
concatenate,
Activation,
BatchNormalization,
)
from keras.layers.core import Masking, Dropout, Reshape, SpatialDropout1D
from keras.regularizers import l1, l2
input_titles = Input(shape=(maxlen,), name="input_titles")
input_domains = Input(shape=(num_domains,), name="input_domains")
input_dayofweeks = Input(shape=(7,), name="input_dayofweeks")
input_hours = Input(shape=(24,), name="input_hours")
# input_trend = Input(shape=(1,), name='input_trend')
# input_newtime = Input(shape=(1,), name='input_newtime')
embedding_titles = Embedding(
num_words + 1, 100, name="embedding_titles", mask_zero=False
)(input_titles)
spatial_dropout = SpatialDropout1D(0.15, name="spatial_dropout")(
embedding_titles
) # changed from 0.2 sp.dropout
rnn_titles = CuDNNLSTM(128, name="rnn_titles")(spatial_dropout)
# new
embedding_domain = Embedding(
num_domains + 1, 40, name="embedding_titles", mask_zero=False
)(input_domains)
concat = concatenate(
[rnn_titles, input_domains, input_dayofweeks, input_hours], name="concat"
)
num_hidden_layers = 3
hidden = Dense(128, activation="relu", name="hidden_1", kernel_regularizer=l2(1e-2))(
concat
)
hidden = BatchNormalization(name="bn_1")(hidden)
hidden = Dropout(0.4, name="dropout_1")(hidden)
for i in range(num_hidden_layers - 1):
hidden = Dense(
256,
activation="relu",
name="hidden_{}".format(i + 2),
kernel_regularizer=l2(1e-2),
)(hidden)
hidden = BatchNormalization(name="bn_{}".format(i + 2))(hidden)
hidden = Dropout(0.4, name="dropout_{}".format(i + 2))(hidden)
output = Dense(1, activation="relu", name="output", kernel_regularizer=l2(1e-2))(hidden)
model = Model(
inputs=[input_titles, input_domains, input_dayofweeks, input_hours],
outputs=[output],
)
model.compile(loss=hybrid_loss, optimizer="adam", metrics=["mse", "mae", r_2])
model.summary()
# The model uses a linear learning rate decay to allow it to learn better once it starts converging.
# Note: in this Kaggle Notebook, the training times out after 33 epochs when committing, so I set it to 25 here. You should probably train for longer. (50+ epochs)
from keras.callbacks import LearningRateScheduler, Callback
base_lr = 1e-3
num_epochs = 20
split_prop = 0.2
def lr_linear_decay(epoch):
return base_lr * (1 - (epoch / num_epochs))
model.fit(
[titles, domains, dayofweeks, hours],
[df["score"].values],
batch_size=1024,
epochs=num_epochs,
validation_split=split_prop,
callbacks=[LearningRateScheduler(lr_linear_decay)],
sample_weight=weights,
)
# ## Check Predictions Against Validation Set
# Predicting against data that was not trained in the model: the model does this poorly. :(
val_size = int(split_prop * df.shape[0])
predictions = model.predict(
[titles[-val_size:], domains[-val_size:], dayofweeks[-val_size:], hours[-val_size:]]
)[:, 0]
predictions
df_preds = pd.concat(
[
pd.Series(df["title"].values[-val_size:]),
pd.Series(df["score"].values[-val_size:]),
pd.Series(predictions),
],
axis=1,
)
df_preds.columns = ["title", "actual", "predicted"]
# df_preds.to_csv('hn_val.csv', index=False)
df_preds.head(50)
# ## Check Predictions Against Training Set
# The model should be able to predict these better.
train_size = int((1 - split_prop) * df.shape[0])
predictions = model.predict(
[
titles[:train_size],
domains[:train_size],
dayofweeks[:train_size],
hours[:train_size],
]
)[:, 0]
df_preds = pd.concat(
[
pd.Series(df["title"].values[:train_size]),
pd.Series(df["score"].values[:train_size]),
pd.Series(predictions),
],
axis=1,
)
df_preds.columns = ["title", "actual", "predicted"]
# df_preds.to_csv('hn_train.csv', index=False)
df_preds.head(50)
|
import os
import numpy as np
import pandas as pd
from skimage.io import imread
import matplotlib.pyplot as plt
import gc
gc.enable()
print(os.listdir("../input/airbus-ship-detection"))
masks = pd.read_csv(
os.path.join("../input/airbus-ship-detection", "train_ship_segmentations_v2.csv")
)
not_empty = pd.notna(masks.EncodedPixels)
print(
not_empty.sum(), "masks in", masks[not_empty].ImageId.nunique(), "images"
) # 非空图片中的mask数量
print(
(~not_empty).sum(), "empty images in", masks.ImageId.nunique(), "total images"
) # 所有图片中非空图片
masks.head()
masks["ships"] = masks["EncodedPixels"].map(
lambda c_row: 1 if isinstance(c_row, str) else 0
)
masks.head()
unique_img_ids = masks.groupby("ImageId").agg({"ships": "sum"}).reset_index()
unique_img_ids.head()
unique_img_ids["has_ship"] = unique_img_ids["ships"].map(
lambda x: 1.0 if x > 0 else 0.0
)
unique_img_ids.head()
ship_dir = "../input/airbus-ship-detection"
train_image_dir = os.path.join(ship_dir, "train_v2")
test_image_dir = os.path.join(ship_dir, "test_v2")
unique_img_ids["has_ship_vec"] = unique_img_ids["has_ship"].map(lambda x: [x])
unique_img_ids["file_size_kb"] = unique_img_ids["ImageId"].map(
lambda c_img_id: os.stat(os.path.join(train_image_dir, c_img_id)).st_size / 1024
)
unique_img_ids.head()
unique_img_ids = unique_img_ids[
unique_img_ids["file_size_kb"] > 50
] # keep only +50kb files
plt.hist(
x=unique_img_ids["file_size_kb"], # 指定绘图数据
bins=6, # 指定直方图中条块的个数
color="steelblue", # 指定直方图的填充色
edgecolor="black", # 指定直方图的边框色
)
plt.xticks([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])
plt.ylabel("number")
plt.xlabel("file_size_kb")
# unique_img_ids['file_size_kb'].hist()#绘制直方图
masks.drop(["ships"], axis=1, inplace=True)
unique_img_ids.sample(7)
plt.title("Number of images of each size")
SAMPLES_PER_GROUP = 1500
balanced_train_df = unique_img_ids.groupby("ships").apply(
lambda x: x.sample(SAMPLES_PER_GROUP) if len(x) > SAMPLES_PER_GROUP else x
)
# 图片有相同船舶数量,但超出2000的不要
rect = plt.hist(
x=balanced_train_df["ships"], # 指定绘图数据
bins=16, # 指定直方图中条块的个数
color="steelblue", # 指定直方图的填充色
edgecolor="black", # 指定直方图的边框色
)
plt.yticks(range(0, 1800, 300))
plt.xticks(range(0, 15))
plt.ylabel("Number of images")
plt.xlabel("Number of ships")
plt.title("Number of images containing different number of vessels")
# balanced_train_df['ships'].hist(bins=balanced_train_df['ships'].max()+1)
print(balanced_train_df.shape[0], "images", balanced_train_df.shape) # 取出1万张图片
from PIL import Image
x = np.empty(shape=(10680, 256, 256, 3), dtype=np.uint8)
y = np.empty(shape=10680, dtype=np.uint8)
for index, image in enumerate(balanced_train_df["ImageId"]):
image_array = (
Image.open("../input/airbus-ship-detection/train_v2/" + image)
.resize((256, 256))
.convert("RGB")
)
x[index] = image_array
y[index] = balanced_train_df[balanced_train_df["ImageId"] == image][
"has_ship"
].iloc[0]
print(x.shape)
print(y.shape)
# Set target to one hot target for classification problem
# 为分类问题将目标设置为一个热目标
from sklearn.preprocessing import OneHotEncoder
y_targets = y.reshape(len(y), -1)
enc = OneHotEncoder()
enc.fit(y_targets)
y = enc.transform(y_targets).toarray()
print(y.shape)
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.2)
x_train.shape, x_val.shape, y_train.shape, y_val.shape
import keras.applications
print(dir(keras.applications))
from keras.applications.vgg16 import VGG16 as PTModel
# from keras.applications.resnet50 import ResNet50 as PTModel
# from keras.applications.inception_v3 import InceptionV3 as PTModel
# from keras.applications.xception import Xception as PTModel
# from keras.applications.densenet import DenseNet169 as PTModel
# from keras.applications.densenet import DenseNet121 as PTModel
# from keras.applications.resnet50 import ResNet50 as PTModel
img_width, img_height = 256, 256
model = PTModel(weights=None, include_top=False, input_shape=(img_width, img_height, 3))
# weights=None,‘imagenet’表示不加载权重
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras.models import Sequential, Model
from keras import backend as K
for layer in model.layers:
layer.trainable = False
x = model.output
x = Flatten()(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.5)(x)
x = Dense(1024, activation="relu")(x)
predictions = Dense(2, activation="softmax")(x)
# creating the final model创建最终模型
model_final = Model(input=model.input, output=predictions)
def recall(y_true, y_pred):
# Calculates the recall召回率
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
# """精确率"""
tp = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) # true positives
pp = K.sum(K.round(K.clip(y_pred, 0, 1))) # predicted positives
precision = tp / (pp + K.epsilon())
return precision
def f1_score(y_true, y_pred):
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
f1_score = 2 * (p * r) / (p + r + K.epsilon())
return f1_score
import tensorflow as tf
def precision(y_true, y_pred):
TP = tf.reduce_sum(y_true * tf.round(y_pred))
TN = tf.reduce_sum((1 - y_true) * (1 - tf.round(y_pred)))
FP = tf.reduce_sum((1 - y_true) * tf.round(y_pred))
FN = tf.reduce_sum(y_true * (1 - tf.round(y_pred)))
precision = TP / (TP + FP)
return precision
# 召回率评价指标
def recall(y_true, y_pred):
TP = tf.reduce_sum(y_true * tf.round(y_pred))
TN = tf.reduce_sum((1 - y_true) * (1 - tf.round(y_pred)))
FP = tf.reduce_sum((1 - y_true) * tf.round(y_pred))
FN = tf.reduce_sum(y_true * (1 - tf.round(y_pred)))
recall = TP / (TP + FN)
return recall
# F1-score评价指标
def f1_score(y_true, y_pred):
TP = tf.reduce_sum(y_true * tf.round(y_pred))
TN = tf.reduce_sum((1 - y_true) * (1 - tf.round(y_pred)))
FP = tf.reduce_sum((1 - y_true) * tf.round(y_pred))
FN = tf.reduce_sum(y_true * (1 - tf.round(y_pred)))
precision = TP / (TP + FP)
recall = TP / (TP + FN)
F1score = 2 * precision * recall / (precision + recall)
return F1score
from keras import optimizers
epochs = 10
lrate = 0.001
decay = lrate / epochs
# adam = optimizers.Adam(lr=lrate,beta_1=0.9, beta_2=0.999, decay=decay)
sgd = optimizers.SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
model_final.compile(
loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"]
)
# categorical_crossentropy
# binary_crossentropy
model_final.summary()
model_final.fit(
x_train, y_train, validation_data=(x_val, y_val), epochs=1, batch_size=50
)
# score = model_final.evaluate(test_X, test_Y, batch_size=50)
model_final.save("ResNet_transfer_ship.h5")
# #收集测试集
# from sklearn.model_selection import train_test_split
# train_ids, valid_ids = train_test_split(balanced_train_df,
# test_size = 0.2,
# stratify = balanced_train_df['ships'])
# #stratify使训练和测试的ships比例一样
# train_df = pd.merge(masks, train_ids)#merge往里面train_ids添加EncodedPixels面罩信息
# valid_df = pd.merge(masks, valid_ids)
# print(train_df.shape[0], 'training masks')
# print(valid_df.shape[0], 'validation masks')
# BATCH_SIZE=48
# IMG_SCALING=(3, 3)
# #把rle解码为图像
# def make_image_gen(in_df, batch_size = BATCH_SIZE):
# all_batches = list(in_df.groupby('ImageId'))
# out_rgb = []
# out_mask = []
# while True:
# np.random.shuffle(all_batches) #打乱顺序
# for c_img_id, c_masks in all_batches:
# rgb_path = os.path.join(train_image_dir, c_img_id)
# c_img = imread(rgb_path)
# c_mask = np.expand_dims(masks_as_image(c_masks['EncodedPixels'].values), -1)
# if IMG_SCALING is not None:
# c_img = c_img[::IMG_SCALING[0], ::IMG_SCALING[1]]
# c_mask = c_mask[::IMG_SCALING[0], ::IMG_SCALING[1]]
# out_rgb += [c_img]
# out_mask += [c_mask]
# #im = Image.fromarray(out_rgb)
# #im.save('../code/input/trainmask_v2/'+c_img_id.split('.')[0] + '.png')
# #
# # if len(out_rgb)>=batch_size:
# # yield np.stack(out_rgb, 0)/255.0, np.stack(out_mask, 0)
# # out_rgb, out_mask=[], []
# def masks_as_image(in_mask_list):
# # Take the individual ship masks and create a single mask array for all ships
# #获取单个舰船面罩,并为所有舰船创建单个面罩阵列
# all_masks = np.zeros((768, 768), dtype = np.uint8)
# for mask in in_mask_list:
# if isinstance(mask, str):
# all_masks |= rle_decode(mask)
# return all_masks
# def rle_decode(mask_rle, shape=(768, 768)):
# '''
# mask_rle: run-length as string formated (start length)
# shape: (height,width) of array to return
# Returns numpy array, 1 - mask, 0 - background
# '''
# s = mask_rle.split()
# starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
# starts -= 1
# ends = starts + lengths
# img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
# for lo, hi in zip(starts, ends):
# img[lo:hi] = 1
# return img.reshape(shape).T # Needed to align to RLE direction需要与RLE方向对齐
# #得到原图和罩子图
# train_gen = make_image_gen(train_df)
# train_x, train_y = next(train_gen) #返回迭代器的下一个项目。
# print('x', train_x.shape, train_x.min(), train_x.max())
# print('y', train_y.shape, train_y.min(), train_y.max())
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# # Data Preprocessing
data = pd.read_csv("../input/chronicKidney.csv")
print(data)
X = np.array(data.iloc[:, 1:25].values)
Y = np.array(data.iloc[:, 25].values)
print(Y)
val = [5, 6, 7, 8, 18, 19, 20, 21, 22, 23]
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
for v in val:
X[:, v] = labelencoder_X.fit_transform(X[:, v])
labelencoder_y = LabelEncoder()
Y = labelencoder_y.fit_transform(Y)
print(X)
print(Y)
from sklearn.preprocessing import Imputer
miss = [1, 2, 3, 4, 9, 10, 11, 12, 13, 14, 15, 16, 17]
print(X[:2])
calcul = X[:, v].reshape(-1, 1)
imputer = Imputer(missing_values="NaN", strategy="mean", axis=0)
imputer = imputer.fit(X[:, 1:5])
z = np.array(X[:, 1:3])
X[:, 1:5] = imputer.transform(X[:, 1:5])
z1 = np.array(X[:, 1:5])
imputer = Imputer(missing_values="NaN", strategy="mean", axis=0)
imputer = imputer.fit(X[:, 9:18])
z = np.array(X[:, 9:18])
X[:, 9:18] = imputer.transform(X[:, 9:18])
z1 = np.array(X[:, 9:18])
print(X[:2])
from sklearn.preprocessing import Imputer
imp = Imputer(missing_values="NaN", strategy="mean", axis=0)
X[:, [0]] = imp.fit_transform(X[:, [0]])
print(X)
X.shape
Y.shape
np.unique(Y)
# # Train-Test Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, Y, random_state=104, test_size=0.3
)
# # Regular SVC
from sklearn.svm import SVC
model_svc = SVC(C=0.1, gamma=10, max_iter=10000, class_weight="balanced")
model_svc.fit(X_train, y_train.ravel())
y_pred_regular_svc = model_svc.predict(X_test)
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
acc_regular_svc = accuracy_score(y_test, y_pred_regular_svc)
print(acc_regular_svc)
y_pred_regular_svc
prec_regular_svc = precision_score(y_test, y_pred_regular_svc)
print(prec_regular_svc)
recall_regular_svc = recall_score(y_test, y_pred_regular_svc)
print(recall_regular_svc)
classification_report(y_test, y_pred_regular_svc)
confusion_matrix(y_test, y_pred_regular_svc)
f1_regular_svc = f1_score(y_test, y_pred_regular_svc)
print(f1_regular_svc)
# # Regular Logistic Regression
from sklearn.linear_model import LogisticRegression
lgr = LogisticRegression()
lgr.fit(X_train, y_train.ravel())
y_pred_regular_lgr = lgr.predict(X_test)
acc_regular_lgr = accuracy_score(y_test, y_pred_regular_lgr)
print(acc_regular_lgr)
prec_regular_lgr = precision_score(y_test, y_pred_regular_lgr)
print(prec_regular_lgr)
recall_regular_lgr = recall_score(y_test, y_pred_regular_lgr)
print(recall_regular_lgr)
classification_report(y_test, y_pred_regular_lgr)
confusion_matrix(y_test, y_pred_regular_lgr)
f1_regular_lgr = f1_score(y_test, y_pred_regular_lgr)
print(f1_regular_lgr)
# # Regular SGDClassifier
from sklearn.linear_model import SGDClassifier
sgd_regular = SGDClassifier()
sgd_regular = sgd_regular.fit(X_train, y_train.ravel())
y_pred_regular_sgd = sgd_regular.predict(X_test)
acc_regular_sgd = accuracy_score(y_test, y_pred_regular_sgd)
print(acc_regular_sgd)
precision_regular_sgd = precision_score(y_test, y_pred_regular_sgd)
print(precision_regular_sgd)
recall_regular_sgd = recall_score(y_test, y_pred_regular_sgd)
print(recall_regular_sgd)
f1_regular_sgd = f1_score(y_test, y_pred_regular_sgd)
print(f1_regular_sgd)
classification_report(y_test, y_pred_regular_sgd)
confusion_matrix(y_test, y_pred_regular_sgd)
# # Regular Random Forest
from sklearn.ensemble import RandomForestClassifier
rf_regular = RandomForestClassifier(n_estimators=20, max_depth=10, random_state=104)
rf_regular.fit(X_train, y_train.ravel())
y_pred_regular_rf = rf_regular.predict(X_test)
acc_regular_rf = accuracy_score(y_test, y_pred_regular_rf)
print(acc_regular_rf)
# # Perceptron Regular
from sklearn.linear_model import Perceptron
prc_regular = Perceptron()
prc_regular.fit(X_train, y_train.ravel())
y_pred_regular_prc = prc_regular.predict(X_test)
acc_regular_prc = accuracy_score(y_test, y_pred_regular_prc)
print(acc_regular_prc)
# # MultiLayer Perceptron Regular
from sklearn.neural_network import MLPClassifier
mlprc_regular = MLPClassifier()
mlprc_regular.fit(X_train, y_train.ravel())
y_pred_regular_mlprc = mlprc_regular.predict(X_test)
acc_regular_mlprc = accuracy_score(y_test, y_pred_regular_mlprc)
print(acc_regular_mlprc)
# # SMOTE Data Preprocessing
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_sample(X_train, y_train.ravel())
# # Resampled, Smotted Random forest
from sklearn.ensemble import RandomForestClassifier
rfs = RandomForestClassifier(n_estimators=20, max_depth=10, random_state=104)
rfs.fit(X_train_res, y_train_res.ravel())
y_pred_rfs = rfs.predict(X_test)
acc_rfs = accuracy_score(y_test, y_pred_rfs)
print(acc_rfs)
confusion_matrix(y_test, y_pred_rfs)
from sklearn.metrics import f1_score, recall_score
f1_rfs = f1_score(y_test, y_pred_rfs)
print(f1_rfs)
rfs_rec = recall_score(y_test, y_pred_rfs)
print(rfs_rec)
# # smotted svc
svms = SVC(C=0.1, gamma=100, kernel="linear")
svms.fit(X_train_res, y_train_res.ravel())
y_pred_svms = svms.predict(X_test)
acc_svms = accuracy_score(y_test, y_pred_svms)
print(acc_svms)
confusion_matrix(y_test, y_pred_svms)
X_train_res.shape
# # Smotted Logistic regression
from sklearn.linear_model import LogisticRegression
lgs = LogisticRegression()
lgs.fit(X_train_res, y_train_res.ravel())
y_pred_lgs = lgs.predict(X_test)
acc_lgs = accuracy_score(y_test, y_pred_lgs)
print(acc_lgs)
confusion_matrix(y_test, y_pred_lgs)
classification_report(y_test, y_pred_lgs)
f1_lgs = f1_score(y_test, y_pred_lgs)
print(f1_lgs)
recall_lgs = recall_score(y_test, y_pred_lgs)
print(recall_lgs)
precision_lgs = precision_score(y_test, y_pred_lgs)
print(precision_lgs)
# # Smotted perceptron
from sklearn.linear_model import Perceptron
ps = Perceptron()
ps.fit(X_train_res, y_train_res.ravel())
y_pred_ps = ps.predict(X_test)
acc_ps = accuracy_score(y_test, y_pred_ps)
print(acc_ps)
confusion_matrix(y_test, y_pred_ps)
classification_report(y_test, y_pred_ps)
# # stocastic gradient descent
from sklearn.linear_model import SGDClassifier
sgds = SGDClassifier()
sgds.fit(X_train_res, y_train_res.ravel())
y_pred_sgds = sgds.predict(X_test)
acc_sgds = accuracy_score(y_test, y_pred_sgds)
print(acc_sgds)
confusion_matrix(y_test, y_pred_sgds)
classification_report(y_test, y_pred_sgds)
# # Multilayer perceptron
from sklearn.neural_network import MLPClassifier
mlps = MLPClassifier()
mlps.fit(X_train_res, y_train_res.ravel())
y_pred_mlps = mlps.predict(X_test)
acc_mlps = accuracy_score(y_test, y_pred_mlps)
print(acc_mlps)
confusion_matrix(y_test, y_pred_mlps)
classification_report(y_test, y_pred_mlps)
# # ADASYN Data Preprocessing
from imblearn.over_sampling import ADASYN
ada = ADASYN()
X_train_res, y_train_res = sm.fit_sample(X_train, y_train.ravel())
# # Adasyn Random Forest
rfa = RandomForestClassifier(n_estimators=20, max_depth=10, random_state=104)
rfa.fit(X_train_res, y_train_res.ravel())
y_pred_rfa = rfa.predict(X_test)
acc_rfa = accuracy_score(y_test, y_pred_rfa)
print(acc_rfa)
f1_rfa = f1_score(y_test, y_pred_rfa)
print(f1_rfa)
confusion_matrix(y_test, y_pred_rfa)
classification_report(y_test, y_pred_rfa)
precision_rfa = precision_score(y_test, y_pred_rfa)
print(precision_rfa)
recall_rfa = recall_score(y_test, y_pred_rfa)
print(recall_rfa)
# # Adasyn SVM
svca = SVC(C=0.1, gamma=100, kernel="linear")
svca.fit(X_train_res, y_train_res.ravel())
y_pred_svca = svca.predict(X_test)
acc_svca = accuracy_score(y_test, y_pred_svca)
print(acc_svca)
f1_svca = f1_score(y_test, y_pred_svca)
print(f1_svca)
confusion_matrix(y_test, y_pred_svca)
# # Adasyn Passive agressive classifier
from sklearn.linear_model import PassiveAggressiveClassifier
pac_model = PassiveAggressiveClassifier()
pac_model.fit(X_train_res, y_train_res.ravel())
y_pred_pac = pac_model.predict(X_test)
acc_pac = accuracy_score(y_test, y_pred_pac)
print(acc_pac)
confusion_matrix(y_test, y_pred_pac)
# # Adasyn Logistic regression
lra = LogisticRegression()
lra.fit(X_train_res, y_train_res.ravel())
y_pred_lra = lra.predict(X_test)
acc_lra = accuracy_score(y_test, y_pred_lra)
print(acc_lra)
classification_report(y_test, y_pred_lra)
confusion_matrix(y_test, y_pred_lra)
# # Random Over Sampler
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler()
X_train_res, y_train_res = sm.fit_sample(X_train, y_train.ravel())
# # ROS SVM
svcros = SVC(C=0.1, gamma=100, kernel="linear")
svcros.fit(X_train_res, y_train_res.ravel())
y_pred_svcros = svcros.predict(X_test)
acc_svcros = accuracy_score(y_test, y_pred_svcros)
print(acc_svcros)
f1_svcros = f1_score(y_test, y_pred_svcros)
print(f1_svcros)
classification_report(y_test, y_pred_svcros)
# # ROS Logistic regression
lrros = LogisticRegression()
lrros.fit(X_train_res, y_train_res.ravel())
y_pred_lrros = lrros.predict(X_test)
acc_lrros = accuracy_score(y_test, y_pred_lrros)
print(acc_lrros)
classification_report(y_test, y_pred_lrros)
confusion_matrix(y_test, y_pred_lrros)
# # ROS Random Forest
rfros = RandomForestClassifier(n_estimators=20, max_depth=10, random_state=104)
rfros.fit(X_train_res, y_train_res.ravel())
y_pred_rfros = rfros.predict(X_test)
acc_rfros = accuracy_score(y_test, y_pred_rfros)
print(acc_rfros)
classification_report(y_test, y_pred_rfros)
confusion_matrix(y_test, y_pred_rfros)
f1_rfros = f1_score(y_test, y_pred_rfros)
print(f1_rfros)
|
# # Multivariate Time Series with RNN
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# IGNORE THE CONTENT OF THIS CELL
# import tensorflow as tf
# tf.compat.v1.disable_eager_execution()
# ## Data
# Let's read in the data set:
df = pd.read_csv(
"/kaggle/input/energydata_complete.csv",
index_col="date",
infer_datetime_format=True,
)
df.head()
df.info()
df["Windspeed"].plot(figsize=(12, 8))
df["Appliances"].plot(figsize=(12, 8))
# ## Train Test Split
len(df)
df.head(3)
df.tail(5)
# Let's imagine we want to predict just 24 hours into the future, we don't need 3 months of data for that, so let's save some training time and only select the last months data.
df.loc["2016-05-01":]
df = df.loc["2016-05-01":]
# Let's also round off the data, to one decimal point precision, otherwise this may cause issues with our network (we will also normalize the data anyways, so this level of precision isn't useful to us)
df = df.round(2)
len(df)
# How many rows per day? We know its every 10 min
24 * 60 / 10
test_days = 2
test_ind = test_days * 144
test_ind
# Notice the minus sign in our indexing
train = df.iloc[:-test_ind]
test = df.iloc[-test_ind:]
train
test
# ## Scale Data
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
# IGNORE WARNING ITS JUST CONVERTING TO FLOATS
# WE ONLY FIT TO TRAININ DATA, OTHERWISE WE ARE CHEATING ASSUMING INFO ABOUT TEST SET
scaler.fit(train)
scaled_train = scaler.transform(train)
scaled_test = scaler.transform(test)
# # Time Series Generator
# This class takes in a sequence of data-points gathered at
# equal intervals, along with time series parameters such as
# stride, length of history, etc., to produce batches for
# training/validation.
# #### Arguments
# data: Indexable generator (such as list or Numpy array)
# containing consecutive data points (timesteps).
# The data should be at 2D, and axis 0 is expected
# to be the time dimension.
# targets: Targets corresponding to timesteps in `data`.
# It should have same length as `data`.
# length: Length of the output sequences (in number of timesteps).
# sampling_rate: Period between successive individual timesteps
# within sequences. For rate `r`, timesteps
# `data[i]`, `data[i-r]`, ... `data[i - length]`
# are used for create a sample sequence.
# stride: Period between successive output sequences.
# For stride `s`, consecutive output samples would
# be centered around `data[i]`, `data[i+s]`, `data[i+2*s]`, etc.
# start_index: Data points earlier than `start_index` will not be used
# in the output sequences. This is useful to reserve part of the
# data for test or validation.
# end_index: Data points later than `end_index` will not be used
# in the output sequences. This is useful to reserve part of the
# data for test or validation.
# shuffle: Whether to shuffle output samples,
# or instead draw them in chronological order.
# reverse: Boolean: if `true`, timesteps in each output sample will be
# in reverse chronological order.
# batch_size: Number of timeseries samples in each batch
# (except maybe the last one).
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
# scaled_train
# define generator
length = 144 # Length of the output sequences (in number of timesteps)
batch_size = 1 # Number of timeseries samples in each batch
generator = TimeseriesGenerator(
scaled_train, scaled_train, length=length, batch_size=batch_size
)
len(scaled_train)
len(generator)
# scaled_train
# What does the first batch look like?
X, y = generator[0]
print(f"Given the Array: \n{X.flatten()}")
print(f"Predict this y: \n {y}")
# Now you will be able to edit the length so that it makes sense for your time series!
# ### Create the Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
scaled_train.shape
# define model
model = Sequential()
# Simple RNN layer
model.add(LSTM(100, input_shape=(length, scaled_train.shape[1])))
# Final Prediction (one neuron per feature)
model.add(Dense(scaled_train.shape[1]))
model.compile(optimizer="adam", loss="mse")
model.summary()
# ## EarlyStopping
from tensorflow.keras.callbacks import EarlyStopping
early_stop = EarlyStopping(monitor="val_loss", patience=1)
validation_generator = TimeseriesGenerator(
scaled_test, scaled_test, length=length, batch_size=batch_size
)
model.fit_generator(
generator, epochs=10, validation_data=validation_generator, callbacks=[early_stop]
)
model.history.history.keys()
losses = pd.DataFrame(model.history.history)
losses.plot()
# ## Evaluate on Test Data
first_eval_batch = scaled_train[-length:]
first_eval_batch
first_eval_batch = first_eval_batch.reshape((1, length, scaled_train.shape[1]))
model.predict(first_eval_batch)
scaled_test[0]
# Now let's put this logic in a for loop to predict into the future for the entire test range.
# ----
# **NOTE: PAY CLOSE ATTENTION HERE TO WHAT IS BEING OUTPUTED AND IN WHAT DIMENSIONS. ADD YOUR OWN PRINT() STATEMENTS TO SEE WHAT IS TRULY GOING ON!!**
n_features = scaled_train.shape[1]
test_predictions = []
first_eval_batch = scaled_train[-length:]
current_batch = first_eval_batch.reshape((1, length, n_features))
for i in range(len(test)):
# get prediction 1 time stamp ahead ([0] is for grabbing just the number instead of [array])
current_pred = model.predict(current_batch)[0]
# store prediction
test_predictions.append(current_pred)
# update batch to now include prediction and drop first value
current_batch = np.append(current_batch[:, 1:, :], [[current_pred]], axis=1)
test_predictions
scaled_test
# ## Inverse Transformations and Compare
true_predictions = scaler.inverse_transform(test_predictions)
true_predictions
test
true_predictions = pd.DataFrame(data=true_predictions, columns=test.columns)
true_predictions
# ### Lets save our model
from tensorflow.keras.models import load_model
model.save("multivariate.h5")
|
import os
import sys
import cv2
import numpy as np
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, BatchNormalization
from keras.layers.merge import concatenate
from keras.models import Model, load_model
from keras.callbacks import LearningRateScheduler
import matplotlib.pyplot as plt
from keras import backend as K
import tensorflow as tf
train_image_path = sorted(os.listdir("../input/street/train/train"))
train_label_path = sorted(os.listdir("../input/street/train_labels/train_labels"))
val_image_path = sorted(os.listdir("../input/street/val/val"))
val_label_path = sorted(os.listdir("../input/street/val_labels/val_labels"))
test_image_path = sorted(os.listdir("../input/street/test/test"))
test_label_path = sorted(os.listdir("../input/street/test_labels/test_labels"))
def load_images(inputdir, inputpath, imagesize):
imglist = []
for i in range(len(inputpath)):
img = cv2.imread(inputdir + inputpath[i], cv2.IMREAD_COLOR)
img = cv2.resize(img, (imagesize, imagesize), interpolation=cv2.INTER_AREA)
# img = img[::-1]
imglist.append(img)
return imglist
IMAGE_SIZE = 128
train_image = load_images("../input/street/train/train/", train_image_path, IMAGE_SIZE)
train_label = load_images(
"../input/street/train_labels/train_labels/", train_label_path, IMAGE_SIZE
)
val_image = load_images("../input/street/val/val/", val_image_path, IMAGE_SIZE)
val_label = load_images(
"../input/street/val_labels/val_labels/", val_label_path, IMAGE_SIZE
)
test_image = load_images("../input/street/test/test/", test_image_path, IMAGE_SIZE)
test_label = load_images(
"../input/street/test_labels/test_labels/", test_label_path, IMAGE_SIZE
)
train_image /= np.max(train_image)
train_label /= np.max(train_label)
val_image /= np.max(val_image)
val_label /= np.max(val_label)
test_image /= np.max(test_image)
test_label /= np.max(test_label)
num = 64
plt.figure(figsize=(14, 7))
ax = plt.subplot(1, 2, 1)
plt.imshow(np.squeeze(train_image[num]))
ax = plt.subplot(1, 2, 2)
plt.imshow(np.squeeze(train_label[num]))
def Unet():
input_img = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
enc1 = Conv2D(128, kernel_size=3, strides=1, activation="relu", padding="same")(
input_img
)
enc1 = BatchNormalization()(enc1)
enc1 = Conv2D(128, kernel_size=3, strides=1, activation="relu", padding="same")(
enc1
)
enc1 = BatchNormalization()(enc1)
down1 = MaxPooling2D(pool_size=2, strides=2)(enc1)
enc2 = Conv2D(256, kernel_size=3, strides=1, activation="relu", padding="same")(
down1
)
enc2 = BatchNormalization()(enc2)
enc2 = Conv2D(256, kernel_size=3, strides=1, activation="relu", padding="same")(
enc2
)
enc2 = BatchNormalization()(enc2)
down2 = MaxPooling2D(pool_size=2, strides=2)(enc2)
enc3 = Conv2D(512, kernel_size=3, strides=1, activation="relu", padding="same")(
down2
)
enc3 = BatchNormalization()(enc3)
enc3 = Conv2D(512, kernel_size=3, strides=1, activation="relu", padding="same")(
enc3
)
enc3 = BatchNormalization()(enc3)
down3 = MaxPooling2D(pool_size=2, strides=2)(enc3)
enc4 = Conv2D(1024, kernel_size=3, strides=1, activation="relu", padding="same")(
down3
)
enc4 = BatchNormalization()(enc4)
enc4 = Conv2D(1024, kernel_size=3, strides=1, activation="relu", padding="same")(
enc4
)
enc4 = BatchNormalization()(enc4)
down4 = MaxPooling2D(pool_size=2, strides=2)(enc4)
enc5 = Conv2D(2048, kernel_size=3, strides=1, activation="relu", padding="same")(
down4
)
enc5 = BatchNormalization()(enc5)
enc5 = Conv2D(2048, kernel_size=3, strides=1, activation="relu", padding="same")(
enc5
)
enc5 = BatchNormalization()(enc5)
up4 = UpSampling2D(size=2)(enc5)
dec4 = concatenate([up4, enc4], axis=-1)
dec4 = Conv2D(1024, kernel_size=3, strides=1, activation="relu", padding="same")(
dec4
)
dec4 = BatchNormalization()(dec4)
dec4 = Conv2D(1024, kernel_size=3, strides=1, activation="relu", padding="same")(
dec4
)
dec4 = BatchNormalization()(dec4)
up3 = UpSampling2D(size=2)(dec4)
dec3 = concatenate([up3, enc3], axis=-1)
dec3 = Conv2D(512, kernel_size=3, strides=1, activation="relu", padding="same")(
dec3
)
dec3 = BatchNormalization()(dec3)
dec3 = Conv2D(512, kernel_size=3, strides=1, activation="relu", padding="same")(
dec3
)
dec3 = BatchNormalization()(dec3)
up2 = UpSampling2D(size=2)(dec3)
dec2 = concatenate([up2, enc2], axis=-1)
dec2 = Conv2D(256, kernel_size=3, strides=1, activation="relu", padding="same")(
dec2
)
dec2 = BatchNormalization()(dec2)
dec2 = Conv2D(256, kernel_size=3, strides=1, activation="relu", padding="same")(
dec2
)
dec2 = BatchNormalization()(dec2)
up1 = UpSampling2D(size=2)(dec2)
dec1 = concatenate([up1, enc1], axis=-1)
dec1 = Conv2D(128, kernel_size=3, strides=1, activation="relu", padding="same")(
dec1
)
dec1 = BatchNormalization()(dec1)
dec1 = Conv2D(128, kernel_size=3, strides=1, activation="relu", padding="same")(
dec1
)
dec1 = BatchNormalization()(dec1)
dec1 = Conv2D(3, kernel_size=1, strides=1, activation="sigmoid", padding="same")(
dec1
)
model = Model(input=input_img, output=dec1)
return model
model = Unet()
model.summary()
def castF(x):
return K.cast(x, K.floatx())
def castB(x):
return K.cast(x, bool)
def iou_loss_core(true, pred): # this can be used as a loss if you make it negative
intersection = true * pred
notTrue = 1 - true
union = true + (notTrue * pred)
return (K.sum(intersection, axis=-1) + K.epsilon()) / (
K.sum(union, axis=-1) + K.epsilon()
)
def competitionMetric2(true, pred): # any shape can go - can't be a loss function
tresholds = [0.5 + (i * 0.05) for i in range(10)]
# flattened images (batch, pixels)
true = K.batch_flatten(true)
pred = K.batch_flatten(pred)
pred = castF(K.greater(pred, 0.5))
# total white pixels - (batch,)
trueSum = K.sum(true, axis=-1)
predSum = K.sum(pred, axis=-1)
# has mask or not per image - (batch,)
true1 = castF(K.greater(trueSum, 1))
pred1 = castF(K.greater(predSum, 1))
# to get images that have mask in both true and pred
truePositiveMask = castB(true1 * pred1)
# separating only the possible true positives to check iou
testTrue = tf.boolean_mask(true, truePositiveMask)
testPred = tf.boolean_mask(pred, truePositiveMask)
# getting iou and threshold comparisons
iou = iou_loss_core(testTrue, testPred)
truePositives = [castF(K.greater(iou, tres)) for tres in tresholds]
# mean of thressholds for true positives and total sum
truePositives = K.mean(K.stack(truePositives, axis=-1), axis=-1)
truePositives = K.sum(truePositives)
# to get images that don't have mask in both true and pred
trueNegatives = (1 - true1) * (1 - pred1) # = 1 -true1 - pred1 + true1*pred1
trueNegatives = K.sum(trueNegatives)
return (truePositives + trueNegatives) / castF(K.shape(true)[0])
def dice_coef(y_true, y_pred, smooth=1):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2.0 * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=[dice_coef])
initial_learningrate = 2e-3
def lr_decay(epoch):
if epoch < 5:
return initial_learningrate
else:
return initial_learningrate * 0.99**epoch
training = model.fit(
train_image,
train_label,
epochs=30,
batch_size=32,
shuffle=True,
validation_data=(val_image, val_label),
verbose=1,
callbacks=[LearningRateScheduler(lr_decay, verbose=1)],
)
results = model.predict(test_image, verbose=1)
n = 20
plt.figure(figsize=(140, 14))
for i in range(3):
# 原画像
ax = plt.subplot(2, n, i + 1)
plt.imshow(test_image[i])
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# 推定結果画像
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(results[i])
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.