script
stringlengths 113
767k
|
---|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import heapq
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
import random
import math
from sklearn.neural_network import MLPClassifier
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
test_csv = pd.read_csv("/kaggle/input/mnist-in-csv/mnist_test.csv")
train_csv = pd.read_csv("/kaggle/input/mnist-in-csv/mnist_train.csv")
X_test = test_csv.drop(columns=["label"]).to_numpy()
X_train = train_csv.drop(columns=["label"]).to_numpy()
y_test = test_csv["label"].copy().to_numpy()
y_train = train_csv["label"].copy().to_numpy()
class NeuralNetwork:
def __init__(self, shape, lr=1e-2, print_interval=20):
self.lr = lr
self.print_interval = print_interval
self.layers = [np.zeros((shape[0], 1))]
self.layers_pre = []
self.weights = []
self.biases = []
for i in range(1, len(shape)):
n = shape[i]
layer = np.zeros((n, 1))
weights = np.random.uniform(-0.5, 0.5, (n, shape[i - 1]))
self.layers.append(layer)
self.layers_pre.append(layer.copy())
self.biases.append(layer.copy())
self.weights.append(weights)
def forward_prop(self, input_data):
self.layers[0] = input_data
for i in range(len(self.layers) - 1):
self.layers_pre[i] = self.weights[i] @ self.layers[i] + self.biases[i]
self.layers[i + 1] = self.activate(self.layers_pre[i])
return self.layers[-1]
def back_prop(self, label):
error = self.layers[-1] - label
for i in range(len(self.layers) - 2, -1, -1):
new_error = self.weights[i].T @ error
self.weights[i] -= (
self.lr
* error
@ self.layers[i].T
* self.activate_deriv(self.layers_pre[i])
)
self.biases[i] -= self.lr * error * self.activate_deriv(self.layers_pre[i])
error = new_error
def activate(self, x):
return np.maximum(x, 0)
def activate_deriv(self, x):
return x > 0
def train(self, epochs, examples, training_labels):
training_labels = self.one_hot_encode(training_labels)
for i in range(epochs + 1):
correct = 0
for example, label in zip(examples, training_labels):
norm_example = self.normalize(example)
norm_example.shape += (1,)
label.shape += (1,)
prediction = self.forward_prop(norm_example)
self.back_prop(label)
if i % self.print_interval == 0:
correct += int(np.argmax(prediction) == np.argmax(label))
if i % self.print_interval == 0:
print(f"Epoch: {i} Accuracy: {100 * correct / len(examples) :0.2f}%")
def one_hot_encode(self, X):
one_hot_X = np.zeros((X.size, X.max() + 1))
one_hot_X[np.arange(X.size), X] = 1
return one_hot_X
def predict(self, features):
new_features = self.normalize(features)
new_features.shape += (1,)
return np.argmax(self.forward_prop(new_features))
def normalize(self, features):
return (features - np.min(features)) / (np.max(features) - np.min(features))
def score(self, X, Y):
correct = 0
Y = self.one_hot_encode(Y)
for example, label in zip(X, Y):
norm_example = self.normalize(example)
norm_example.shape += (1,)
label.shape += (1,)
prediction = self.forward_prop(norm_example)
correct += int(np.argmax(prediction) == np.argmax(label))
return correct / len(X)
nn = NeuralNetwork((784, 20, 10))
nn.train(100, X_train[:5000], y_train[:5000])
print(f"nn score: {100 * nn.score(X_test, y_test) :0.2f}")
prediction = nn.predict(X_test[157])
test1 = np.array(Image.open("/kaggle/input/seventest/Capture (2).png"))
test1_pred = nn.predict(test1.reshape(1, 784))
test2 = np.array(Image.open("/kaggle/input/test-2/newimg.jpeg"))
test2_pred = nn.predict(test2.reshape(1, 784))
fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
ax1.imshow(X_test[157].reshape(28, 28))
ax1.set_title(f"prediction={prediction}")
ax2.imshow(test1.reshape(28, 28))
ax2.set_title(f"prediction={test1_pred}")
ax3.imshow(test2.reshape(28, 28))
ax3.set_title(f"prediction={test2_pred}")
# model = MLPClassifier(alpha=1e-6, hidden_layer_sizes=(75, 50, 25))
# model.fit(X_train, y_train)
# model.score(X_test, y_test)
|
import os
import numpy as np
import pandas as pd
import random
train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
additional = pd.read_csv(
"/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
)
test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
ypo = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
train = pd.concat([train, additional], sort=False).reset_index(drop=True)
train = train.drop_duplicates().reset_index(drop=True)
train.shape
from dataprep.eda import create_report
report = create_report(train)
report.show()
report = create_report(test)
report.show()
target = "target"
to_remove = ["id"]
X = train.drop(to_remove, axis=1)
X = X.drop(target, axis=1)
X_pred = test.drop(to_remove, axis=1)
y = train[target]
from sklearn.preprocessing import RobustScaler
scaler = RobustScaler()
# from sklearn.preprocessing import StandardScaler
# scaler=StandardScaler()
scaler.fit(X)
X1 = pd.DataFrame(scaler.transform(X), columns=X.columns)
X_pred1 = pd.DataFrame(scaler.transform(X_pred), columns=X_pred.columns)
X1.shape, y.shape, X_pred1.shape
from flaml import AutoML
automl = AutoML()
from sklearn.linear_model import LogisticRegression
# from sklearn.model_selection import RepeatedStratifiedKFold
# rskf = RepeatedStratifiedKFold(n_splits=10, n_repeats=5)
automl_settings = {
"time_budget": 2000,
"metric": "roc_auc",
"task": "classification",
"estimator_list": [
"xgboost",
"xgb_limitdepth",
"lgbm",
], # ,"extra_tree","catboost","rf","lrl1"
"ensemble": {
"final_estimator": LogisticRegression(),
"passthrough": True,
},
"eval_method": "cv",
# "split_type":rskf
"n_splits": 5,
}
automl.fit(X_train=X1, y_train=y, **automl_settings)
ypo[target] = automl.predict_proba(X_pred1)[:, 1]
ypo
ypo.to_csv("./play_12_flaml_05.csv", index=False)
from verstack import LGBMTuner
tuner = LGBMTuner(
metric="auc", trials=2500, refit=True, verbosity=1, visualization=True, seed=321
)
# device_type = 'gpu')
tuner.fit(X1, y)
ypo[target] = tuner.predict_proba(X_pred1)
ypo
ypo.to_csv("./play_12_tuner_02.csv", index=False)
ypo[target] = tuner.predict_proba(X_pred1)
ypo
tuner.fit(X1, y)
ypo.to_csv("./play_12_tuner_02.csv", index=False)
# ypo1=pd.read_csv("/kaggle/input/play-12-ensemple/play_11_flaml_00 (1).csv")
# ypo2=pd.read_csv("/kaggle/input/play-12-ensemple/play_12_flaml_01 (1).csv")
# ypo3=pd.read_csv("/kaggle/input/play-12-ensemple/play_12_flaml_02 (1).csv")
# ypo4=pd.read_csv("/kaggle/input/play-12-ensemple/play_12_tuner_00 (1).csv")
ypo5 = pd.read_csv("/kaggle/working/play_12_flaml_04.csv")
ypo6 = pd.read_csv("/kaggle/working/play_12_flaml_05.csv")
# ypo7=pd.read_csv("/kaggle/working/play_12_tuner_02.csv")
ypo[target] = (
ypo1[target]
+ ypo2[target]
+ ypo3[target]
+ ypo4[target]
+ ypo5[target]
+ ypo6[target]
+ ypo7[target]
) / 7
ypo
ypo.to_csv("./play_12_best_ensermple_02.csv", index=False)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from keras import Sequential, models, layers
from tensorflow.keras.preprocessing import image
from numpy import asarray
import cv2
import PIL
from PIL import Image
import os
import glob
import io
# Get the path of files
# /kaggle/input/brain-tumor-detection
img_dir = "../input/brain-tumor-detection/"
no_images = os.listdir(img_dir + "no/")
yes_images = os.listdir(img_dir + "yes/")
len(no_images)
len(yes_images)
# initialize dataset and label arrays
datset = []
lab = []
# loop over each image in each category
# for images labelled with no
for image_name in no_images:
image = cv2.imread(img_dir + "no/" + image_name)
image = Image.fromarray(image, "RGB")
image = image.resize((64, 64))
datset.append(np.array(image))
lab.append(0)
# for images labelled with yes
for image_name in yes_images:
image = cv2.imread(img_dir + "yes/" + image_name)
image = Image.fromarray(image, "RGB")
image = image.resize((64, 64))
datset.append(np.array(image))
lab.append(1)
# convert dataset and label to numpy to array
data = np.asarray(datset)
l = np.asarray(lab)
print(data.shape, l.shape)
#
plt.imshow(data[400])
print(lab[400])
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
data, l, test_size=0.2, random_state=42
)
print(X_train.shape)
print(y_train.shape)
# Model Training
model = Sequential(
[
# Argumentation --- random rotation , scaling -- /255
# Resize --- (64,64,3)
layers.Conv2D(32, (3, 3), activation="relu", input_shape=(64, 64, 3)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(128, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(256, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
# layers.dropout(0.1),
# dense_layer
layers.Flatten(),
layers.Dense(100, activation="relu"),
layers.Dense(64, activation="relu"),
layers.Dense(2, activation="softmax"),
]
)
model.summary()
tf.keras.utils.plot_model(
model, to_file="model.png", show_shapes=True, expand_nested=True
)
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
model.fit(X_train, y_train, epochs=20, batch_size=32)
# **Preparing images for prediction**
# pred_set=[]
# pred_images=os.listdir(img_dir +'pred/')
# for image_name in pred_images:
# image=cv2.imread(img_dir +'pred/'+ image_name)
# image=Image.fromarray(image,'RGB')
# image=image.resize((64,64))
# pred_set.append(np.array(image))
# pred=np.asarray(pred_set)
prediction = model.predict(X_test)
X_test.shape
plt.imshow(X_test[1])
prediction[1]
prediction = np.argmax(prediction, axis=1)
prediction
from sklearn.metrics import accuracy_score
accuracy_score(prediction, y_test)
model.save("/kaggle/working/tumour")
model.save("/kaggle/working/tumour.h5")
model1 = tf.keras.models.load_model("/kaggle/working/tumour.h5")
model1.summary()
img = cv2.imread("/kaggle/input/test111/MRI.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (64, 64))
img.shape
img1 = tf.expand_dims(img, 0)
img1.shape
model1.predict(img1)[0]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
cp = sns.color_palette()
data = pd.read_csv("/kaggle/input/fastfood-nutrition/fastfood.csv")
data.head()
def preprocess_inputs(df):
df = df.copy()
cols_drop = ["salad"]
df = df.drop(cols_drop, axis=1)
return df
X = preprocess_inputs(data)
X
X.groupby("restaurant").max()
list = [
"calories",
"cal_fat",
"total_fat",
"sat_fat",
"trans_fat",
"cholesterol",
"sodium",
"total_carb",
"fiber",
"sugar",
"protein",
"vit_a",
"vit_c",
"calcium",
"calories",
]
fig, axs = plt.subplots(5, 3, figsize=(15, 14), sharex=True)
for i, col in enumerate(list):
row = i // 3
column = i % 3
ax = axs[row, column]
X.groupby("restaurant")[col].mean().plot(kind="bar", title=col, ax=ax, color=cp[1:])
plt.show()
from collections import Counter
list1 = X.item.unique()
new_list = [word for string in list1 for word in string.split()]
# new_list
# counted_list = Counter(new_list)
# counted_list
count = Counter(new_list)
sorted_count = sorted(count.items(), key=lambda x: x[1], reverse=True)
for item in sorted_count:
print(item[0], item[1])
# list1.extend(str.split( ))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from sklearn.preprocessing import StandardScaler
import seaborn as sns
import matplotlib.pyplot as plt
# # Importing the data
train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
sample = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
train = train.drop("id", axis=1)
test = test.drop("id", axis=1)
# # EDA
print("TRAIN DATA")
print("\n")
display(train.head())
print("\n")
print("********************************************************************")
print("TRAIN AND TEST DATA SHAPE")
print("\n")
display(train.shape)
display(test.shape)
print("\n")
print("********************************************************************")
print("TRAIN DATA INFO")
print("\n")
display(train.info())
print("\n")
print("********************************************************************")
print("NUMBER OF UNIQUE IN TRAIN DATA")
print("\n")
display(train.nunique())
print("\n")
print("********************************************************************")
print("TRAIN DATA DESCRIPTION")
print("\n")
display(train.describe())
# * Our target variable is a **binary** variable. Would bdata e using classification models.
# * Have **414 observations and 6 independent variables**. Test data has 276 observations. No categorical variables. So won't be creating any dummy variables
# * Since all our variables are continuous, need to check for **outliers, scaling and skewness**
# * Will surely need to scale our data since variables are in '0, '00 and '000
# ## To Do:
# 1. Check for mising and duplicate rows.
# 2. Correlation between variables
# 3. Outliers
# 4. Skewness
# 5. Scale the variables
# 6. Graph the data comparing train and test data
# ## Missing and Duplicate values
print("Missing - Train Data")
display(pd.isna(train).sum())
print("\n")
print("There are ", pd.isna(train).sum().sum(), "missing values in train data")
print("\n")
print("\n")
print("\n")
print("Missing - Test Data")
display(pd.isna(test).sum())
print("\n")
print("There are ", pd.isna(test).sum().sum(), "missing values in test data")
print("There are ", train.duplicated().sum(), "duplicate rows in train data")
print("There are ", test.duplicated().sum(), "duplicate rows in test data")
train = train.drop_duplicates()
test = test.drop_duplicates()
# ## Correlation
sns.heatmap(train.corr(), annot=True)
# * No strong correlation between the target variable and the independent variables. Calc(0.47) and gravity(0.28) have the highest correlation
# * Correlation between a few independent variables; osmo-urea, osmo-cond, osmo-gravity
# ## Skewness
train.skew()
# * Not very high for any variable. No need for any transformation
# ## Graphs
# Define the number of rows and columns you want
n_rows = 2
n_cols = 3
# Create the subplots
fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(30, 30))
for i, column in enumerate(test):
sns.histplot(train[column], ax=axes[i // n_cols, i % n_cols])
sns.histplot(test[column], ax=axes[i // n_cols, i % n_cols])
# Define the number of rows and columns you want
n_rows = 2
n_cols = 3
# Create the subplots
fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(30, 30))
for i, column in enumerate(test):
sns.kdeplot(train[column], ax=axes[i // n_cols, i % n_cols])
sns.kdeplot(test[column], ax=axes[i // n_cols, i % n_cols])
# * Both the histplot and kdensity plots show similar trends for both train and test data
# # Transformation and Scaling
X_train = train.drop("target", axis=1)
X_test = test
y_train = train["target"]
scalar = StandardScaler()
X_train[X_train.columns] = pd.DataFrame(scalar.fit_transform(X_train))
X_test[X_test.columns] = pd.DataFrame(scalar.fit_transform(X_test))
# # Modelling
from sklearn.model_selection import cross_val_score
from IPython.display import display
seed = 19
# MODELS
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from xgboost import XGBClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import BaggingClassifier
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
models = [
LogisticRegression(max_iter=2000, random_state=seed),
KNeighborsClassifier(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis(),
DecisionTreeClassifier(random_state=seed),
SGDClassifier(random_state=seed),
RidgeClassifier(random_state=seed),
AdaBoostClassifier(random_state=seed),
GradientBoostingClassifier(random_state=seed),
XGBClassifier(),
ExtraTreesClassifier(random_state=seed),
RandomForestClassifier(random_state=seed),
BaggingClassifier(random_state=seed),
LGBMClassifier(random_state=seed),
CatBoostClassifier(logging_level="Silent"),
]
models_name = [
LogisticRegression().__class__.__name__,
KNeighborsClassifier().__class__.__name__,
LinearDiscriminantAnalysis().__class__.__name__,
QuadraticDiscriminantAnalysis().__class__.__name__,
DecisionTreeClassifier().__class__.__name__,
SGDClassifier().__class__.__name__,
RidgeClassifier().__class__.__name__,
AdaBoostClassifier().__class__.__name__,
GradientBoostingClassifier().__class__.__name__,
XGBClassifier().__class__.__name__,
ExtraTreesClassifier().__class__.__name__,
RandomForestClassifier().__class__.__name__,
BaggingClassifier().__class__.__name__,
LGBMClassifier().__class__.__name__,
CatBoostClassifier().__class__.__name__,
]
score = []
std = []
for i in models:
try:
score.append(
np.mean(
cross_val_score(
estimator=i, X=X_train, y=y_train, cv=3, scoring="roc_auc"
)
)
)
std.append(
np.std(
cross_val_score(
estimator=i, X=X_train, y=y_train, cv=3, scoring="roc_auc"
)
)
)
print(i, ": DONE")
except:
score.append("Error Occured")
print(i, ": ERROR")
table = pd.DataFrame(models_name)
table.columns = ["Models"]
table["Result"] = score
table["Deviation"] = std
table.sort_values(by=["Result"], ascending=False)
X_train["calc"] = X_train.calc.clip(None, 8)
X_train["gravity"] = X_train.gravity.clip(None, 1.03)
X_train["calc_gravity_ratio"] = X_train["calc"] / X_train["gravity"]
X_train["calc_urea_ratio"] = X_train["calc"] / X_train["urea"]
X_train["gravity_cond_ratio"] = X_train["gravity"] / X_train["cond"]
X_test["calc"] = X_test.calc.clip(None, 8)
X_test["gravity"] = X_test.gravity.clip(None, 1.03)
X_test["calc_gravity_ratio"] = X_test["calc"] / X_test["gravity"]
X_test["calc_urea_ratio"] = X_test["calc"] / X_test["urea"]
X_test["gravity_cond_ratio"] = X_test["gravity"] / X_test["cond"]
# # Ensemble Model
from sklearn.ensemble import VotingClassifier
voting = VotingClassifier(
estimators=[
("cat", CatBoostClassifier(logging_level="Silent")),
("log", LogisticRegression()),
("ld", LinearDiscriminantAnalysis()),
],
voting="soft",
)
scores = cross_val_score(voting, X_train, y_train, scoring="roc_auc", cv=4)
print(scores.mean())
# * Previously got 0.7862.
# * Getting 0.791 now
# # Final Submission
submission = pd.DataFrame(sample["id"])
model = VotingClassifier(
estimators=[
("cat", CatBoostClassifier(logging_level="Silent")),
("log", LogisticRegression()),
("ld", LinearDiscriminantAnalysis()),
],
voting="soft",
)
model.fit(X_train, y_train)
pred = model.predict_proba(X_test)
submission["target"] = pred[:, 1]
submission.to_csv("submission.csv", index=None)
submission
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
train.head()
train.describe()
train.isnull().sum()
train.duplicated().sum()
train.info()
train.dtypes
train["target"].value_counts()
train.drop("id", axis=1, inplace=True)
train.head()
data_cleaning_suggestions(train)
train.corr()
import seaborn as sns
sns.heatmap(train.corr(), annot=True)
X = train.drop("target", axis=1)
Y = train["target"]
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
import sklearn as sk
from sklearn.linear_model import LogisticRegression
def eval_model(name, model, mean_only=True):
scores = cross_val_score(model, X, Y, scoring="roc_auc", cv=5)
print("{:<{}} {}".format(name + ":", 30, scores.mean()))
if not mean_only:
print(scores)
models = LogisticRegression(random_state=73)
model.fit(X_train, Y_train)
model.score(X_train, Y_train)
for name, model in models:
eval_model(name, model)
df_sub = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
test.drop(["id"], axis=1, inplace=True)
test_pred = model.predict(test)
df_sub["target"] = test_pred
df_sub
df_sub.to_csv("sample_submission.csv", index=False)
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Importamos las librerías que vamos a utilizar.
import pandas as pd # data processing
import numpy as np # linear algebra
import matplotlib.pyplot as plt
import seaborn as sns # data visualization
# Import dataframe and visualize first 5 rows
dataf = pd.read_csv("/kaggle/input/marketing-data/ifood_df.csv")
pd.set_option("display.max_columns", None)
dataf.head()
# get the tail of the dataframe
dataf.tail()
# Overview of the data
dataf.info()
# Data contains 2205 rows and 39 columns
# **DATA PREPARATION**
# Some initial ideas that come to mind "Teenhome" and "Kidhome" can be combined into one feature to designate number of children in the house
# What I want to predict is if they accepted any campaing, not the specific one, so we can drop those columns and keep the "Accepted campaing overall".
# There are 5 columns to designate marital status, we can combine it into one to simplify data structure.
# First check for missing values in the data
dataf.isnull().sum()
# There seems to be no missing data
# Check for duplicates
dataf_dup = dataf[dataf.duplicated()]
print("you have {} duplicate rows".format(len(dataf_dup)))
# Remove duplicated values
dataf.drop_duplicates(keep=False, inplace=True)
# Lets create a column that tells us the number of children in the house
dataf["N_children"] = dataf[["Kidhome", "Teenhome"]].sum(axis=1)
dataf = dataf.drop(["Kidhome", "Teenhome"], axis=1)
# Lets sum the levels of education into a column, the higher the number the higher the lvl
dataf["education_2n Cycle"] = dataf["education_2n Cycle"].replace({1: 1, 0: 0})
dataf["education_Basic"] = dataf["education_Basic"].replace({1: 2, 0: 0})
dataf["education_Graduation"] = dataf["education_Graduation"].replace({1: 3, 0: 0})
dataf["education_Master"] = dataf["education_Master"].replace({1: 4, 0: 0})
dataf["education_PhD"] = dataf["education_PhD"].replace({1: 5, 0: 0})
dataf["Level of education"] = dataf[
[
"education_2n Cycle",
"education_Basic",
"education_Graduation",
"education_Master",
"education_PhD",
]
].sum(axis=1)
# Drop education columns
dataf = dataf.drop(
[
"education_2n Cycle",
"education_Basic",
"education_Graduation",
"education_Master",
"education_PhD",
],
axis=1,
)
# Marital status is given in 5 columns, lets create one column to help visualization and simplify data structure
# First replace the positive data with another number.
dataf["marital_Married"] = dataf["marital_Married"].replace({1: 5, 0: 0})
dataf["marital_Single"] = dataf["marital_Single"].replace({1: 4, 0: 0})
dataf["marital_Together"] = dataf["marital_Together"].replace({1: 3, 0: 0})
dataf["marital_Widow"] = dataf["marital_Widow"].replace({1: 2, 0: 0})
dataf["marital_Divorced"] = dataf["marital_Divorced"].replace({1: 1, 0: 0})
# Combine it into a column
dataf["marital_status_str"] = dataf[
[
"marital_Married",
"marital_Single",
"marital_Together",
"marital_Widow",
"marital_Divorced",
]
].sum(axis=1)
# Map the numbers to a marital status
dataf["marital_status_str"] = dataf["marital_status_str"].map(
{1: "Divorced", 2: "Widow", 3: "Together", 4: "Single", 5: "Married"}
)
# Letscreate a new column, if the client has accepted the campaing it will show a 1 if the client has not it will show a 0
dataf["AcceptedCmp"] = dataf[
["AcceptedCmp1", "AcceptedCmp2", "AcceptedCmp3", "AcceptedCmp4", "AcceptedCmp5"]
].sum(axis=1)
dataf["AcceptedCmp"] = (dataf["AcceptedCmp"] > 0).astype(int)
# Lets drop the unnecesary columns
dataf = dataf.drop(
[
"AcceptedCmp1",
"AcceptedCmp2",
"AcceptedCmp3",
"AcceptedCmp4",
"AcceptedCmp5",
"AcceptedCmpOverall",
],
axis=1,
)
# Lets clean the data some more by dropping some unimportant columns
dataf = dataf.drop(["Z_CostContact", "Z_Revenue"], axis=1)
# Lets see the correlations to the feature
correlations = dataf.corr()["AcceptedCmp"].sort_values(ascending=False)
correlations
# There seems to be no stron regulations between
# **DATA VISUALIZATION**
# **First lets do analysis on the categorical features to see what we can discover**
# Lets see the number of people that accepted the campaing
sns.countplot(x=dataf["AcceptedCmp"])
A, Na = dataf["AcceptedCmp"].value_counts()
print("Number of accepted =", A)
print("Number of rejected = ", Na)
# We can see that the majority of people rejected the campaings
sns.countplot(x="Level of education", data=dataf)
plt.show()
# In the dataset people mainly graduated from highschool (3), followed by people with PHD's and then masters degree.
sns.stripplot(data=dataf, x="Level of education", y="MntTotal")
# It seems that people with higher levels of education tend to spend more
sns.relplot(
x="Income",
y="MntTotal",
data=dataf,
col="N_children",
hue="N_children",
kind="scatter",
palette="deep",
)
# We can observe that as the number of children increases the amount spent and income decreases, this might be counter intuitive as usually it is though that people have children in relation to their economic power.
sns.countplot(x="marital_status_str", data=dataf)
# Most of the people are married, followed by a number of people who are together (non-marital relationship) and single
# Lets see if there is any relationship between marital status, income and amount spent
sns.boxplot(data=dataf, x="Income", y="marital_status_str")
sns.boxplot(data=dataf, x="MntTotal", y="marital_status_str")
# Widows seem to have slightly higher income and spend significantly more, it is probably related to government grants
# Create dense vector for marital status columns
# dataf['marital_status_vec'] = dataf.apply(
# lambda row: np.array([row['marital_Divorced'], row['marital_Married'], row['marital_Single'],row['marital_Together'], row["marital_Widow"]]), axis=1)
# Remove the one-hot encoded columns
# dataf.drop(['marital_Divorced', 'marital_Married', 'marital_Single', "marital_Widow",'marital_Together'], axis=1, inplace=True)
# The apply method is used to apply a lambda function to each row of the dataframe. The lambda function creates a numpy array from the one-hot encoded columns and assigns it to a new column called marital_status_vec. Finally, the one-hot encoded columns are removed from the dataframe using the drop method.
# Lets do the same for education features
# data['education_vec'] = data.apply(
# lambda row: np.array([row['education_2n Cycle'], row['education_Basic'], row['education_Graduation'],row['education_Master'], row["education_PhD"]]), axis=1)
# Remove the one-hot encoded columns
# data.drop(['education_2n Cycle', 'education_Basic', 'education_Graduation', "education_Master",'education_PhD'], axis=1, inplace=True)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv")
df.head()
df.columns[-1]
df.drop(labels=df.columns[-1], axis=1, inplace=True)
df.shape # 569 instâncias e 33 atributos
print("{} registros e {} atributos".format(df.shape[0], df.shape[1]))
df.info()
df.dtypes.value_counts()
df["diagnosis"].value_counts()
df.describe().T
df.isnull().sum()
# https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html Q 2
# https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html#sklearn.preprocessing.StandardScaler
from sklearn.preprocessing import MinMaxScaler
scale = MinMaxScaler()
|
# # Multiple Selecting & Filtering in Pandas
import pandas as pd
film = pd.read_csv("http://bit.ly/imdbratings")
film.head(3)
# ## Practical column selection
film["title"].head()
film[["title", "genre"]].head()
# ## loc method
film.loc[0,]
film.loc[[0, 2, 4],]
film.loc[0:2,]
film.loc[0:5, "title"]
film.loc[0:5, "title":"genre"]
film.loc[0:5, "title":"duration"]
film.loc[:, "title":"genre"].head()
film.loc[film.genre == "Crime",].head(3)
film.loc[film.genre == "Crime", ["title", "duration"]]
film.loc[film.genre == "Crime", "title":"duration"]
# ## iloc method
film.iloc[:, 0].head()
film.columns
film.iloc[:, [0, 3]].head()
film.iloc[:, 0:3].head()
film.iloc[0, 0:3]
film.iloc[0:5, 0:3]
film.iloc[0:5,].head(3)
film.iloc[0:5, :].head(3)
# ## Multiple filtering
film.loc[film.duration >= 200,].head(3)
film.loc[film.duration >= 200, "title"].head()
film[(film.duration >= 200) | (film.genre == "Crime") | (film.genre == "Action")].head(
3
)
film[film.genre.isin(["Crime", "Drama", "Action"])]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Importing Dataset
df = pd.read_csv("/kaggle/input/simple-linear-regression-placement-data/placement.csv")
# # Inspecting the Dataset
df.head()
df.sample(5)
df.isnull().sum()
df.info()
df.describe()
# # Checking the distribution of cgpa and placement_exam_marks colums
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(16, 5))
plt.subplot(1, 2, 1)
sns.distplot(df["cgpa"])
plt.subplot(1, 2, 2)
sns.distplot(df["placement_exam_marks"])
plt.show()
print("Minimum value of cgpa is", df["cgpa"].min())
print("Maximum value of cgpa is", df["cgpa"].max())
print("Mean value of cgpa is", df["cgpa"].mean())
print("Standard Deviation value of cgpa is", df["cgpa"].std())
highest = df["cgpa"].mean() + 3 * df["cgpa"].std()
lowest = df["cgpa"].mean() - 3 * df["cgpa"].std()
print("Highest allowed", highest)
print("Lowest allowed", lowest)
df[(df["cgpa"] > highest) | (df["cgpa"] < lowest)]
# ## Trimming
df1 = df[(df["cgpa"] < highest) & (df["cgpa"] > lowest)]
df1
# # By Using Z-score
df["cgpa_z"] = (df["cgpa"] - df["cgpa"].mean()) / df["cgpa"].std()
df.head()
df[(df["cgpa_z"] > 3) | (df["cgpa_z"] < -3)]
# ## Trimming
df2 = df[(df["cgpa_z"] < 3) & (df["cgpa_z"] > -3)]
df2
# ## Capping the Dataset to preserve the shape
df4 = df.copy()
df4["cgpa"] = np.where(
df4["cgpa"] > highest, highest, np.where(df4["cgpa"] < lowest, lowest, df4["cgpa"])
)
df.describe()
df4.describe()
# # IQR Method
sns.boxplot(df["placement_exam_marks"])
percentile25 = df["placement_exam_marks"].quantile(0.25)
percentile75 = df["placement_exam_marks"].quantile(0.75)
percentile25, percentile75
IQR = percentile75 - percentile25
IQR
lower_limit = percentile25 - 1.5 * IQR
upper_limit = percentile75 + 1.5 * IQR
upper_limit, lower_limit
df[
(df["placement_exam_marks"] > upper_limit)
| (df["placement_exam_marks"] < lower_limit)
]
# ## Trimming
df3 = df[df["placement_exam_marks"] < upper_limit]
df3.shape
df3["placement_exam_marks"]
plt.figure(figsize=(16, 8))
plt.subplot(2, 2, 1)
sns.distplot(df["placement_exam_marks"])
plt.subplot(2, 2, 2)
sns.boxplot(df["placement_exam_marks"])
plt.subplot(2, 2, 3)
sns.displot(df3["placement_exam_marks"])
plt.subplot(2, 2, 4)
sns.boxplot(df3["placement_exam_marks"])
plt.show()
# ## Capping
df5 = df.copy()
df5["placement_exam_marks"] = np.where(
df5["placement_exam_marks"] > upper_limit,
upper_limit,
np.where(
df5["placement_exam_marks"] < lower_limit,
lower_limit,
df5["placement_exam_marks"],
),
)
df5.describe()
plt.figure(figsize=(16, 8))
plt.subplot(2, 2, 1)
sns.distplot(df["placement_exam_marks"])
plt.subplot(2, 2, 2)
sns.boxplot(df["placement_exam_marks"])
plt.subplot(2, 2, 3)
sns.displot(df5["placement_exam_marks"])
plt.subplot(2, 2, 4)
sns.boxplot(df5["placement_exam_marks"])
plt.show()
# # Winsorization Technique
df["cgpa"].describe()
percentile99 = df["cgpa"].quantile(0.95)
percentile01 = df["cgpa"].quantile(0.05)
IQR1 = percentile99 - percentile01
percentile99, percentile01, IQR1
lower_limit = percentile01 - 1.5 * IQR1
upper_limit = percentile99 + 1.5 * IQR1
upper_limit, lower_limit
|
# # Import Data
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
from sklearn import datasets, ensemble
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import warnings
warnings.filterwarnings("ignore")
# import numpy as np # linear algebra
# import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
data = pd.read_csv(
"/kaggle/input/us-college-completion-rate-analysis/train.csv", index_col=0
)
print(data.head())
print(data.shape)
print(data.describe())
x = data.drop(["Completion_rate"], axis=1)
y = data["Completion_rate"]
x, y
# # Create a simple regression model
from sklearn.linear_model import LinearRegression
model = LinearRegression().fit(x, y)
r_sq = model.score(x, y)
print(r_sq)
test = pd.read_csv(
"/kaggle/input/us-college-completion-rate-analysis/x_test.csv", index_col=0
)
print(test.shape)
x_pred = test
y_pred = model.predict(x_pred)
print(y_pred)
from sklearn import preprocessing # standardize the data
scaler = preprocessing.StandardScaler()
num_features = [0, 1, 2, 4, 5]
num_scaled = scaler.fit_transform(x.iloc[:, num_features])
for i in range(len(num_features)):
x.iloc[:, num_features[i]] = num_scaled[:, i]
from sklearn.linear_model import LinearRegression
model = LinearRegression().fit(x, y)
r_sq = model.score(x, y)
print(r_sq) # not improve too much
x_pred = test
scaler = preprocessing.StandardScaler()
num_features = [0, 1, 2, 4, 5]
num_scaled = scaler.fit_transform(x_pred.iloc[:, num_features])
for i in range(len(num_features)):
x_pred.iloc[:, num_features[i]] = num_scaled[:, i]
y_pred = model.predict(x_pred)
# save result
# submission = pd.DataFrame.from_dict({'Completion_rate': y_pred})
# submission.to_csv('/kaggle/working/submission.csv', index = True, index_label = "id")
# split train and test set
from sklearn import preprocessing
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)
print(X_train.shape, X_test.shape)
# cross validation
# from sklearn.model_selection import cross_val_score
# def display_scores(scores):
# print("Scores: ",scores)
# print("Mean: ",scores.mean())
# print("Std: ",scores.std())
# # tuning
# # Random Forest
# from sklearn.ensemble import RandomForestRegressor
# forest_reg=RandomForestRegressor()
# forest_reg.fit(x,y)
# forest_scores=cross_val_score(forest_reg,x,y, scoring="neg_mean_squared_error",cv=10)
# forest_rmse_scores=np.sqrt(-forest_scores)
# display_scores(forest_rmse_scores)
# from sklearn.model_selection import GridSearchCV
# param_grid=[
# {'n_estimators':[5,15,20,25,30,35,40],'max_features':[2,4,6,8,10]},
# {'bootstrap':[True,False],'n_estimators':[3,6,10,13],'max_features':[2,3,4]}
# ]
# forest_reg=RandomForestRegressor()
# grid_search=GridSearchCV(forest_reg,param_grid,cv=5,
# scoring='neg_mean_squared_error',return_train_score=True)
# grid_search.fit(x,y)
# #best hyperparameter
# print("Best params: ",grid_search.best_params_)
# #best predictor
# print("Best estimator:\n",grid_search.best_estimator_)
from sklearn.model_selection import cross_val_score
regr = ensemble.RandomForestRegressor(
bootstrap=False, max_features=3, n_estimators=120
) # 4 40
regr.fit(X_train, y_train)
forest_scores = cross_val_score(
regr, X_train, y_train, scoring="neg_mean_squared_error", cv=10
)
forest_rmse_scores = np.sqrt(-forest_scores)
print("Traing Score:%f" % regr.score(X_train, y_train))
print("Testing Score:%f" % regr.score(X_test, y_test))
# # Create Submisssion
model = regr.fit(X_train, y_train)
y_pred = model.predict(x_pred)
print(y_pred)
submission = pd.DataFrame.from_dict({"Completion_rate": y_pred})
submission
submission.to_csv("/kaggle/working/submission.csv", index=True, index_label="id")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
import plotly.graph_objects as go
import squarify
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/customers-dataset/Customers.csv")
df
df.info()
# there in no dtypes error
# checking missing values
df.isnull().sum()
df = df.fillna("unknown_Profession")
df.duplicated().sum()
# there are no duplicated values
df.nunique()
df["Profession"].unique()
Profession_df = df.groupby("Profession")[
["Annual Income ($)", "Spending Score (1-100)"]
].sum()
Profession_df = Profession_df.sort_values("Spending Score (1-100)", ascending=0)
Profession_df
sns.barplot(
x=Profession_df["Spending Score (1-100)"],
y=Profession_df.index,
orient="h",
color="c",
data=Profession_df,
)
sns.barplot(
x=Profession_df["Annual Income ($)"],
y=Profession_df.index,
orient="h",
color="c",
data=Profession_df,
)
sns.regplot(
x=Profession_df["Annual Income ($)"], y=Profession_df["Spending Score (1-100)"]
)
Profession_df[["Annual Income ($)", "Spending Score (1-100)"]].corr()
Gender = df["Gender"].value_counts()
plt.pie(Gender, labels=Gender.index, autopct="%.0f%%")
df.head()
# work_ex_and_spending=df[['Work Experience','Spending Score (1-100)']]
work_ex_and_spending = df.groupby("Work Experience")["Spending Score (1-100)"].sum()
work_ex_and_spending
plt.figure(figsize=(8, 8))
squarify.plot(
work_ex_and_spending, value=work_ex_and_spending.index, alpha=0.8, pad=True
)
plt.axis("off")
plt.title("year of work experience vrs spending")
plt.show()
# subplot
plt.subplot(2, 2, 1)
sns.barplot(
x=Profession_df["Spending Score (1-100)"],
y=Profession_df.index,
orient="h",
color="c",
data=Profession_df,
)
plt.subplot(2, 2, 2)
sns.barplot(
x=Profession_df["Annual Income ($)"],
y=Profession_df.index,
orient="h",
color="c",
data=Profession_df,
)
plt.subplot(2, 2, 3)
sns.regplot(
x=Profession_df["Annual Income ($)"], y=Profession_df["Spending Score (1-100)"]
)
plt.show()
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import statsmodels.api as sm
import os
def get_data(path="/kaggle/input/2023-collapsed-banks-stock-prices-2017-2023"):
data_dict = {}
for i in os.listdir(path):
data_dict[i.split(".")[0]] = pd.read_csv(os.path.join(path, i))
return data_dict
data_dict = get_data()
def get_stats(df, name):
print(f"Here are some basic stats for {name}")
print(
f"Highest Price recorded: {np.round(df.High.max().item(), 2)} on {df.loc[df.High==df.High.max()].Date.item()}"
)
print(
f"Lowest Price recorded: {np.round(df.Low.min().item(), 2)} on {df.loc[df.Low==df.Low.min()].Date.item()}"
)
print(f"Average Return over time: {df.Close.pct_change().mean() *100}%")
df_ = df.copy()
df_["move"] = df["Open"] - df["Close"]
print(
f"Maximum movement in a day: {np.round(max(df_.move), 2)} points on {df_.loc[df_.move==max(df_.move)].Date.item()}"
)
print(f"Standard Deviation: {df.Close.pct_change().std() *100}%")
for name, df in data_dict.items():
get_stats(df, name)
print()
print()
fig = make_subplots(rows=4, cols=1)
for i, (name, df) in enumerate(data_dict.items()):
fig.append_trace(go.Scatter(x=df.Date, y=df.Close), row=i + 1, col=1)
fig.show()
|
# # BirdClef2023 Mel torch CNN
# https://www.kaggle.com/code/stpeteishii/birdclef2023-sound-j-0-to-mel/notebook
import os
import cv2
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import Dataset
from torchvision import datasets, transforms, models
from torchvision.utils import make_grid
from torchvision.datasets import ImageFolder
# # Set Train and Test
paths = []
labels = []
for dirname, _, filenames in os.walk("/kaggle/input/birdclef2023-sound-j-0-to-mel"):
for filename in filenames:
if filename[-4:] == ".png" and "results" not in filename:
path = os.path.join(dirname, filename)
paths += [path]
label = path.split("/")[-1].split("_")[0]
labels += [label]
df = pd.DataFrame(columns=["path", "label"])
df["path"] = paths
df["label"] = labels
display(df["label"].value_counts())
df = df[df["label"] != "crefra2"]
class_names = sorted(set(labels))
N = list(range(len(class_names)))
print(len(class_names))
normal_mapping = dict(zip(class_names, N))
reverse_mapping = dict(zip(N, class_names))
df["labeli"] = df["label"].map(normal_mapping)
train_transform = transforms.Compose([transforms.Resize(217), transforms.ToTensor()])
# CAUTION!!!
path0 = "/kaggle/input/birdclef2023-sound-j-0-to-mel/mel/brrwhe3_XC289781.png"
img = Image.open(path0)
print(type(img))
img1 = np.array(img)
print(type(img1), img1.shape) # (217, 217, 4)
img2 = cv2.imread(path0)
print(type(img2), img2.shape) # (217, 217, 3)
# display(train_transform(img))##OK
# display(train_transform(img1))##impossible
# display(train_transform(img2))##impossible
class CustomDataset(Dataset):
def __init__(self, df, transform=None):
self.df = df
self.transform = transform
def __len__(self):
return len(self.df)
def __getitem__(self, index):
img_path = self.df.iloc[index]["path"]
img = Image.open(img_path)
if self.transform:
img = self.transform(img)
label = self.df.iloc[index]["labeli"]
return img, label
dataset = CustomDataset(df, transform=train_transform)
# one by one
for images, labels in dataset:
print(type(images))
print(images.shape) # torch.Size([32, 4, 217, 217])
break
dataset_size = len(dataset)
indices = list(range(dataset_size))
split_ratio = 0.8
split_index = int(dataset_size * split_ratio)
train_indices = indices[:split_index]
test_indices = indices[split_index:]
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
train_loader = DataLoader(dataset, batch_size=32, sampler=train_sampler)
test_loader = DataLoader(dataset, batch_size=32, sampler=test_sampler)
# # Images and Labels
# batch by batch
for images, labels in train_loader:
print(type(images))
print(images.shape) # torch.Size([32, 4, 217, 217])
break
im = make_grid(images, nrow=8)
img = cv2.imread("/kaggle/input/birdclef2023-sound-j-0-to-mel/mel/brrwhe3_XC289781.png")
print(img.shape)
plt.figure(figsize=(15, 10))
plt.imshow(np.transpose(im.numpy(), (1, 2, 0)))
plt.show()
# # CNN Model
class MyCNN(nn.Module):
def __init__(self):
super(MyCNN, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 3, padding=1)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
self.relu2 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(2)
self.fc1 = nn.Linear(64 * 108 * 108, 128) # 217 / 2 = 108
self.relu3 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(128, 15)
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.maxpool(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.relu3(x)
x = self.fc2(x)
return x
model = MyCNN()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
model
# # Fit
import time
start_time = time.time()
train_losses = []
test_losses = []
train_correct = []
test_correct = []
epochs = 100
for i in range(epochs):
print(i)
trn_corr = 0
tst_corr = 0
for b, (X_train, y_train) in enumerate(train_loader):
b += 1
y_pred = model(X_train)
loss = criterion(y_pred, y_train)
predicted = torch.max(y_pred.data, 1)[1]
batch_corr = (predicted == y_train).sum()
trn_corr += batch_corr
optimizer.zero_grad()
loss.backward()
optimizer.step()
if b % 200 == 0:
print(
f"epoch: {i} loss: {loss.item()} batch: {b} accuracy: {trn_corr.item() * 100 / (10 * b):7.3f}%"
)
loss = loss.detach().numpy()
train_losses.append(loss)
train_correct.append(trn_corr)
with torch.no_grad():
for b, (X_test, y_test) in enumerate(test_loader):
y_val = model(X_test)
loss = criterion(y_val, y_test)
predicted = torch.max(y_val.data, 1)[1]
batch_corr = (predicted == y_test).sum()
tst_corr += batch_corr
loss = loss.detach().numpy()
test_losses.append(loss)
test_correct.append(tst_corr)
print(f"\nDuration: {time.time() - start_time:.0f} seconds")
plt.plot(train_losses, label="train_losses")
plt.plot(test_losses, label="test_losses")
plt.legend()
plt.show()
# save model
torch.save(model.state_dict(), "model.pt")
# new_model = ConvolutionalNetwork()
# new_model.load_state_dict(torch.load('model.pt'))
# # Predict
device = torch.device("cpu") # "cuda:0"
model.eval()
y_true = []
y_pred = []
with torch.no_grad():
for test_data in test_loader:
test_images, test_labels = test_data[0].to(device), test_data[1].to(device)
pred = model(test_images).argmax(dim=1)
for i in range(len(pred)):
y_true.append(test_labels[i].item())
y_pred.append(pred[i].item())
print(y_pred[0:5])
from sklearn.metrics import classification_report
print(classification_report(y_true, y_pred, target_names=class_names, digits=4))
|
# # Index
# Heading
# Data Import
# Date Format Update
# Data Visuals
# Exponential Smoothing
# Relationship tests
# Seasonal Decomposition
# Functions
# Stationarity check
# Model Selection
# Train/ Test split
# Model with Temp & Dew
# Model with only Temp
#
# [Back to top]
# ## Heading
from IPython.display import Image
from IPython.core.display import HTML
url = "https://5vtj648dfk323byvjb7k1e9w-wpengine.netdna-ssl.com/wp-content/uploads/2018/05/shutterstock_170867918-e1525266245642.jpg"
Image(url=url, width=600, height=600, unconfined=True)
# Image source from:- www.fleetcarma.com
# Objective of this Notebook is to explore features that are critical for forcasting the power usage for a given period. In the process of exploration, we will uncover best possible ways to get to the answer.
# Details of Data: https://www.kaggle.com/srinuti/residential-power-usage-3years-data-timeseries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# [Back to top]
# ## Data Import
# Import files
import pandas as pd
import numpy as np
import seaborn as sns
import datetime as dt
import matplotlib.pyplot as plt
# Load specific forecasting tools
from statsmodels.tsa.arima_model import ARMA, ARMAResults, ARIMA, ARIMAResults
from statsmodels.graphics.tsaplots import (
plot_acf,
plot_pacf,
) # for determining (p,q) orders
from statsmodels.graphics.tsaplots import month_plot, quarter_plot
from pmdarima import auto_arima # for determining ARIMA orders
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.filters.hp_filter import hpfilter
from statsmodels.tsa.stattools import (
adfuller,
kpss,
coint,
bds,
q_stat,
grangercausalitytests,
levinson_durbin,
)
import sys
# Ignore harmless warnings
import warnings
warnings.filterwarnings("ignore")
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
from sklearn.metrics import (
mean_squared_error,
mean_absolute_error,
explained_variance_score,
r2_score,
max_error,
median_absolute_error,
mean_squared_log_error,
)
df_usage = pd.read_csv(
"../input/residential-power-usage-3years-data-timeseries/power_usage_2016_to_2020.csv"
)
df_weather = pd.read_csv(
"../input/residential-power-usage-3years-data-timeseries/weather_2016_2020_daily.csv"
)
df_usage.head()
df_weather.head()
# [Back to top]
# ## Date Format Update
# Date column update for 'df_usage'
n = df_usage.shape[0]
p1 = pd.Series(range(n), pd.period_range("2016-06-01 00:00:00", freq="1H", periods=n))
df_usage["StartDate"] = p1.to_frame().index
# Date column update for 'df_weather'
m = df_weather.shape[0]
p2 = pd.Series(range(m), pd.period_range("2016-06-01", freq="1D", periods=m))
df_weather["Date"] = p2.to_frame().index
# convert the period date into timestamp
df_usage["StartDate"] = df_usage["StartDate"].apply(lambda x: x.to_timestamp())
df_usage["Date"] = pd.DatetimeIndex(df_usage["StartDate"]).date
# convert the period date into timestamp
df_weather["Date"] = df_weather["Date"].apply(lambda x: x.to_timestamp())
df_usage_daily = df_usage.groupby("Date").sum()
df_usage_daily["day_of_week"] = df_usage_daily["day_of_week"].apply(lambda x: x / 24)
notes_col = df_usage.groupby("Date").first()["notes"].values
df_usage_daily["notes"] = notes_col
df_usage_daily.head()
# filter the weather data to match with power usage dataframe.
k = df_usage_daily.shape[0]
df_weather = df_weather[0:k]
df_weather.set_index("Date", inplace=True)
df_weather.head()
df_weather.shape
comb_df = pd.merge(df_weather, df_usage_daily, left_index=True, right_index=True)
comb_df.columns
comb_df.drop(
columns=[
"Temp_avg",
"Temp_min",
"Dew_avg",
"Dew_min",
"Hum_avg",
"Hum_min",
"Wind_avg",
"Wind_min",
"Press_avg",
"Press_min",
"Precipit",
"day_of_week_x",
"day_of_week_y",
],
inplace=True,
)
comb_df.index.freq = "D"
comb_df.head()
comb_df["Value (kWh)"].plot(figsize=(16, 9))
comb_df["Value (kWh)"].loc["2017-01-01":"2019-12-31"].plot(figsize=(16, 9))
# [Back to top]
# ## Data Visuals
comb_df[["Temp_max", "Value (kWh)", "Dew_max"]].loc["2017-01-01":"2019-12-31"].plot(
figsize=(16, 9)
)
comb_df.head()
df_short = comb_df.loc["2017-01-01":"2019-12-31"]
df_short.resample(rule="M").mean().plot(figsize=(16, 9))
df_short = df_short[["Temp_max", "Dew_max", "Value (kWh)", "notes"]]
df_short.resample(rule="W").mean().plot(figsize=(16, 9))
# The graph shows significant fluctations. Hence filters to be applied.
df_short["Value (kWh)"].loc["2017-01-01":"2018-01-01"].resample(rule="W").mean().plot(
figsize=(16, 9), legend=True
)
# From the above graph it is clear that only three features participate in predictions of power, namely Temp & Dew.
# [Back to top]
# ## Exponential Smoothing
df_short["EWMA12"] = df_short["Value (kWh)"].ewm(span=30, adjust=True).mean()
df_short["EWMA12_Temp"] = df_short["Temp_max"].ewm(span=30, adjust=True).mean()
df_short["EWMA12_Dew"] = df_short["Dew_max"].ewm(span=30, adjust=True).mean()
df_short[["Value (kWh)", "EWMA12", "EWMA12_Temp", "EWMA12_Dew"]].plot(figsize=(16, 9))
# If you closely observe the power data, and exponential smoothing, there is still fluctions, the trend line is not smooth.
## Lets see how the data compares against the monthly and quarterly
# plot all four graphs in one go to show the performance of temp vr
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(16, 9), squeeze=False)
# fig = plt.figure(8,5)
dfm = df_short["Value (kWh)"].resample(rule="M").mean()
month_plot(dfm, ylabel="Power in kWh", ax=ax1)
dfq = df_short["Value (kWh)"].resample(rule="Q").mean()
quarter_plot(dfq, ylabel="Power kWh", ax=ax2)
dftm = df_short["Temp_max"].resample(rule="M").mean()
month_plot(dftm, ylabel="Temp in Fdeg", ax=ax3)
dftq = df_short["Temp_max"].resample(rule="Q").mean()
quarter_plot(dftq, ylabel="Temp in Fdeg", ax=ax4)
fig.tight_layout(pad=1.2)
# for ax in fig.get_axes():
# ax.label_outer()
# Clearly the data shows seasonality, during summar month between may to oct the power bill is higher. The Theromstat settings is at 66F for heating and 70F for cooling.
# 1. During months of Jan, Feb, march, April, Nov and Dec the AC is not running for most of the time, occationally Heater is on. Hence you see lower power bill during these months.
# 2. Q2 and Q3 each year the power bill is higher due to summer.
# function to evaulate performance of the regressor.
def evaulation(model, y_pred, y_true):
"""
Input:- model = string (Name of the regressor)
y_pred= model prediction
y_true = actual labels.
Output:
Dataframe with evaulation matrix.
"""
# create data output frame for the evaluation.
data = [
explained_variance_score(y_true, y_pred),
max_error(y_true, y_pred),
mean_squared_error(y_true, y_pred),
mean_absolute_error(y_true, y_pred),
r2_score(y_true, y_pred, multioutput="uniform_average"),
median_absolute_error(y_true, y_pred),
]
row_index = [
"Exp_Var_Score",
"Max_Error",
"MSE",
"MAE",
"R2_Score",
"Median_Abs_Error",
]
df = pd.DataFrame(data, columns=[model], index=row_index)
return df
title = "Autocorrelation: Power usage"
lags = 50
plot_acf(df_short["EWMA12"], title=title, lags=lags)
plot_pacf(df_short["EWMA12"], title=title, lags=lags)
from statsmodels.tsa.seasonal import seasonal_decompose
result_pwr = seasonal_decompose(df_short["EWMA12"], model="additive")
result_pwr.plot()
result_pwr.trend.plot(figsize=(16, 9))
# result_pwr.seasonal.iloc[0:50].plot(figsize=(16,9))
|
# # EMNIST for ABCD
# import numpy as np # linear algebra
# import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# from cv2 import cv2
import matplotlib.pyplot as plt
from keras.models import Sequential, Model
from keras.layers import (
Dense,
Activation,
Dropout,
Flatten,
MaxPooling2D,
Conv2D,
RandomFlip,
RandomRotation,
Rescaling,
GlobalAveragePooling2D,
RandomZoom,
RandomContrast,
)
from tensorflow.keras.optimizers import Adam, RMSprop
from keras import backend as K
from subprocess import check_output
from keras.preprocessing.image import ImageDataGenerator
from keras.losses import SparseCategoricalCrossentropy
from keras.applications import MobileNetV3Small, InceptionV3
import cv2
import numpy as np
# 图像数据集的读取可以参照 https://tensorflow.google.cn/tutorials/load_data/images?hl=zh-cn 加以理解
from keras.utils import image_dataset_from_directory
K.clear_session()
n_classes = 4
batch_size = 32
img_width, img_height = 128, 128
data_dir = "/kaggle/input/zimuabcd"
trn_ds = image_dataset_from_directory(
data_dir, # 训练集目录名
color_mode="grayscale",
subset="training",
shuffle=True,
seed=123,
validation_split=0.1,
image_size=(img_height, img_width),
batch_size=batch_size,
)
val_ds = image_dataset_from_directory(
data_dir,
color_mode="grayscale",
validation_split=0.1,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size,
)
# 训练集中查看类名
class_names = trn_ds.class_names
print(class_names)
### 下面是训练数据集中的 9 个图像。
plt.figure(figsize=(8, 8))
for images, labels in trn_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i] / 255.0)
plt.title(class_names[labels[i]])
plt.axis("off")
for image_batch, labels_batch in trn_ds:
print("训练图片数据集的张量维度", image_batch.shape)
print("标签数据集的张量维度", labels_batch.shape)
break
data_augmentation = Sequential(
[
Rescaling(1.0 / 255),
RandomFlip("horizontal_and_vertical"),
RandomRotation(0.2, fill_mode="constant", fill_value=0),
RandomZoom(0.3),
RandomContrast(0.3),
# Flatten()
]
)
### 下面是训练数据集中的 9 个图像。
plt.figure(figsize=(8, 8))
for images, labels in trn_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(images[i])
plt.imshow(augmented_image)
plt.title(class_names[labels[i]])
plt.axis("off")
model = Sequential(
[
# data_augmentation,
Conv2D(32, 3, activation="relu"),
MaxPooling2D(),
Conv2D(32, 3, activation="relu"),
MaxPooling2D(),
Conv2D(32, 3, activation="relu"),
MaxPooling2D(),
Flatten(),
Dropout(0.3),
Dense(128, activation="relu"),
Dense(n_classes),
]
)
model.build(input_shape=(None, img_height, img_width, 1))
model.summary()
model.compile(
optimizer="adam",
loss=SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
# ## 6.2 Train the model with fit() method
model.fit(trn_ds, validation_data=val_ds, epochs=100)
# ## 6.3 Evaluating model performance with evaluate() method
loss, acc = model.evaluate(val_ds, batch_size=batch_size)
print("\nTest accuracy: %.1f%%" % (100.0 * acc))
path = "/kaggle/input/testdata1/three.jpg"
img = cv2.imread(path)
img
for images, labels in trn_ds.take(1):
plt.imshow(images[0] / 255.0)
y_pred = model.predict(images)
print("result:", class_names[np.argmax(y_pred[0])])
# # 7. Overfitting and Regularization
# [Back to Table of Contents](#0.1)
# - A neural network has the property to memorize the characteristics of training data. This is called **overfitting**.
# - In such a case, the network fails to generalize when subject to the test data.
# - To avoid this tendency, the model uses a regularizing layer or function. A commonly used regularizing layer is referred to as a **Dropout layer**.
# - Given a dropout rate (dropout=0.45), the **Dropout layer** randomly removes the fraction of units from participating in the next layer. For example, if the first layer has 256 units, after dropout=0.45 is applied, only (1 - 0.45) * 256 units = 140 units from layer 1 participate in layer 2.
# - The Dropout layer makes neural networks robust to unforeseen input data because the network is trained to predict correctly, even if some units are missing.
# - The dropout is not used in the output layer and it is only active during training. Moreover, dropout is not present during prediction.
# - There are regularizers that can be used other than dropouts like l1 or l2. In Keras, the bias, weight and activation output can be regularized per layer. - l1 and l2 favor smaller parameter values by adding a penalty function. Both l1 and l2 enforce the penalty using a fraction of the sum of absolute (l1) or square (l2) of parameter values.
# - So, the penalty function forces the optimizer to find parameter values that are small. Neural networks with small parameter values are more insensitive to the presence of noise from within the input data.
# - So, the l2 weight regularizer with fraction=0.001 can be implemented as:
from keras.regularizers import l2
model.add(Dense(hidden_units, kernel_regularizer=l2(0.001), input_dim=input_size))
|
# # 1. Import libraries and device setup
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import random
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import zipfile
import cv2
import copy
import natsort
from PIL import Image
import torch
import torch.nn as nn
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, transforms
from torchinfo import summary
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda:0" if USE_CUDA else "cpu")
device
# # 2. Analyze training data structure
MAIN_INPUT_DIR = "/kaggle/input/street-view-getting-started-with-julia/"
TRAIN_LABELS_CSV = os.path.join(MAIN_INPUT_DIR, "trainLabels.csv")
TRAIN_IMG_ZIP = os.path.join(MAIN_INPUT_DIR, "train.zip")
TEST_IMG_ZIP = os.path.join(MAIN_INPUT_DIR, "test.zip")
train_df = pd.read_csv(TRAIN_LABELS_CSV)
train_df.info()
train_df.columns
train_df.head()
train_df.isnull().sum()
categories = train_df["Class"].dropna().unique()
categories
print("Total number of categories: {}".format(len(categories)))
# **We can confirm there are total 6283 image files and 62 classes (0~9 digitized numbers, a to z small letters, and A to Z capital letters**
# # 3. Data split to train, validation and test set
# # 3-1. Create sub directories to store image files
ABS_PATH = os.path.abspath(os.getcwd())
ABS_PATH
BASE_DIR = os.path.join(ABS_PATH, "street-view-getting-started-with-julia/")
BASE_DIR
if not os.path.exists(BASE_DIR):
os.mkdir(BASE_DIR)
TMP_IMG_DIR = os.path.join(BASE_DIR, "tmp/")
TRAIN_IMG_DIR = os.path.join(BASE_DIR, "train/")
VALID_IMG_DIR = os.path.join(BASE_DIR, "valid/")
TEST_IMG_DIR = os.path.join(BASE_DIR, "test/")
IMG_DIRS = [TMP_IMG_DIR, TRAIN_IMG_DIR, VALID_IMG_DIR, TEST_IMG_DIR]
IMG_DIRS
for dir in IMG_DIRS:
if os.path.exists(dir):
shutil.rmtree(dir)
os.mkdir(dir)
# # 3-2. Create 62 category directories for train and valid dataset
for category in categories:
if category.isalpha(): # ASCII -> UNICODE
category = str(ord(category))
if not os.path.exists(os.path.join(TRAIN_IMG_DIR, category)):
os.mkdir(os.path.join(TRAIN_IMG_DIR, category))
if not os.path.exists(os.path.join(VALID_IMG_DIR, category)):
os.mkdir(os.path.join(VALID_IMG_DIR, category))
# # 3-3. Image file extraction
# Train image files extract
with zipfile.ZipFile(TRAIN_IMG_ZIP) as img_zip:
img_zip.extractall(TMP_IMG_DIR)
# Test image files extract
with zipfile.ZipFile(TEST_IMG_ZIP) as img_zip:
img_zip.extractall(TMP_IMG_DIR)
tmp_train_dir = os.listdir(os.path.join(TMP_IMG_DIR, "train"))
random.shuffle(tmp_train_dir)
print("Total {} files are in train set".format(len(tmp_train_dir)))
tmp_test_dir = os.listdir(os.path.join(TMP_IMG_DIR, "test"))
print("Total {} files are in test set".format(len(tmp_test_dir)))
N_train = int(len(tmp_train_dir) * 0.7)
N_valid = len(tmp_train_dir) - N_train
N_train
N_valid
for idx, img_data in enumerate(tmp_train_dir):
img_source = os.path.join(TMP_IMG_DIR, "train", img_data)
img_id = int(img_data[:-4]) - 1 # Get image id
label = train_df.loc[img_id].Class
if label.isalpha(): # ASCII -> UNICODE
label = str(ord(label))
if idx < N_train:
img_destination = os.path.join(TRAIN_IMG_DIR, label)
else:
img_destination = os.path.join(VALID_IMG_DIR, label)
shutil.move(img_source, img_destination) # move image file
for img_data in tmp_test_dir:
img_source = os.path.join(TMP_IMG_DIR, "test", img_data)
img_destination = os.path.join(TEST_IMG_DIR)
shutil.move(img_source, img_destination) # move image file
shutil.rmtree(TMP_IMG_DIR)
# # 4. Dataset and Dataloader
# **Convert all images to tensor having the same dimension**
data_transforms = {
"train": transforms.Compose(
[
transforms.Resize((20, 20)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [1.0, 1.0, 1.0]),
]
),
"valid": transforms.Compose(
[
transforms.Resize((20, 20)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [1.0, 1.0, 1.0]),
]
),
"test": transforms.Compose(
[
transforms.Resize((20, 20)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [1.0, 1.0, 1.0]),
]
),
}
class TestDataSet(Dataset):
def __init__(self, main_dir, transform):
self.main_dir = main_dir
self.transform = transform
self.img_lst = natsort.natsorted(os.listdir(main_dir))
def __len__(self):
return len(self.img_lst)
def __getitem__(self, idx):
img_data = os.path.join(self.main_dir, self.img_lst[idx])
img = Image.open(img_data).convert("RGB")
img_transformed = self.transform(img)
return img_transformed
train_dataset = datasets.ImageFolder(TRAIN_IMG_DIR, data_transforms["train"])
valid_dataset = datasets.ImageFolder(VALID_IMG_DIR, data_transforms["valid"])
test_dataset = TestDataSet(TEST_IMG_DIR, data_transforms["test"])
class_names = train_dataset.classes
class_names
def show_data(img, title=None):
img_np = img.numpy().transpose(
(1, 2, 0)
) # Convert shape Channel X Height X Width -> Height X Width X Channel
mean = np.array([0.5, 0.5, 0.5])
std = np.array([1.0, 1.0, 1.0])
img_np = std * img_np + mean
img_np = np.clip(img_np, 0, 1)
plt.imshow(img_np)
if title is not None:
plt.title(title)
train_loader = DataLoader(
dataset=train_dataset, batch_size=4, shuffle=True, num_workers=2
)
valid_loader = DataLoader(
dataset=valid_dataset, batch_size=4, shuffle=False, drop_last=True, num_workers=2
)
test_loader = DataLoader(
dataset=test_dataset, batch_size=4, shuffle=False, drop_last=False, num_workers=2
)
# Get sample data as 1st train_dataset
sample_img, sample_label = train_dataset[0]
# Get train data batch
sample_imgs, sample_labels = next(iter(train_loader))
# Create grid from the batch
sample_imgs_merged = torchvision.utils.make_grid(sample_imgs)
show_data(sample_imgs_merged, title=[class_names[x] for x in sample_labels])
# # 5. Define a simple CNN model and train the model
class CNN(nn.Module):
def __init__(self, channel_in, output_dim, ch_size=20):
super().__init__()
self.channel_in = channel_in
self.output_dim = output_dim
self.ch_size = ch_size
def conv2d_size_out(M, K=3, S=1, P=1):
return (M + 2 * P - K) // S + 1
def conv2d_maxpool_size_out(M, K=2, S=2):
return (M - K) // S + 1
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels=channel_in,
out_channels=8,
kernel_size=(3, 3),
stride=(1, 1),
padding=1,
),
nn.BatchNorm2d(8),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.ReLU(),
nn.Dropout(p=0.05),
)
self.ch_size = conv2d_maxpool_size_out(conv2d_size_out(self.ch_size))
self.conv2 = nn.Sequential(
nn.Conv2d(
in_channels=8,
out_channels=16,
kernel_size=(3, 3),
stride=(1, 1),
padding=1,
),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.ReLU(),
nn.Dropout(p=0.05),
)
self.ch_size = conv2d_maxpool_size_out(conv2d_size_out(self.ch_size))
linear_input_size = self.ch_size * self.ch_size * 16
self.fc1 = nn.Sequential(
nn.Linear(linear_input_size, 100),
nn.BatchNorm1d(100),
nn.ReLU(),
nn.Dropout(p=0.05),
)
self.fc2 = nn.Sequential(nn.Linear(100, output_dim), nn.Dropout(p=0.05))
self.apply(self._init_weights)
self._init_final_layer()
def _init_weights(self, module):
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight.data, nonlinearity="relu")
if module.bias is not None:
nn.init.constant_(module.bias.data, 0)
elif isinstance(module, nn.BatchNorm1d) or isinstance(module, nn.BatchNorm2d):
nn.init.constant_(module.weight.data, 1.0)
nn.init.constant_(module.bias.data, 0.0)
def _init_final_layer(self):
nn.init.xavier_uniform_(self.fc2[0].weight.data)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = torch.flatten(
x, 1
) # Dimension flatten except batch // shape = (batchSize, linear_input_size)
x = self.fc1(x)
x = self.fc2(x)
return x
model = CNN(channel_in=3, output_dim=len(class_names), ch_size=20).to(device)
model.eval()
summary(model, (1, 3, 20, 20))
def cal_accuracy(model, loader, train=True):
model.eval()
correct = 0
if train is True:
num_data = N_train
else:
num_data = N_valid
with torch.no_grad():
for x_loader, y_loader in loader:
x_loader = x_loader.to(device)
y_loader = y_loader.to(device)
z = model(x_loader)
_, yhat = torch.max(z, 1)
correct += (yhat == y_loader.squeeze()).sum().item()
return correct / num_data * 100
learning_rate = 5e-4
N_EPOCHS = 121
def train_model(model, train_loader, valid_loader):
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
best_model_para = copy.deepcopy(model.state_dict())
best_cost = np.inf
best_acc = 0
best_epoch = 0
results = {"train_acc": [], "valid_acc": [], "train_cost": [], "valid_cost": []}
for epoch in range(N_EPOCHS):
model.train()
train_cost = 0
valid_cost = 0
for x_train, y_train in train_loader:
x_train = x_train.to(device)
y_train = y_train.to(device)
z = model(x_train)
_, yhat = torch.max(z.data, 1)
optimizer.zero_grad()
loss = criterion(z, y_train.squeeze())
train_cost = train_cost + loss.item() / len(train_loader)
loss.backward()
optimizer.step()
for x_valid, y_valid in valid_loader:
x_valid = x_valid.to(device)
y_valid = y_valid.to(device)
z = model(x_valid)
loss = criterion(z, y_valid.squeeze())
valid_cost = valid_cost + loss.item() / len(valid_loader)
train_acc = cal_accuracy(model, train_loader, train=True)
valid_acc = cal_accuracy(model, valid_loader, train=False)
if valid_acc > best_acc:
best_model_para = copy.deepcopy(model.state_dict())
best_acc = valid_acc
best_cost = valid_cost
best_epoch = epoch
if epoch % 10 == 0:
print(
"Epoch: {}/{} \t Train accuracy: {:.3f}% \t Valid accuracy: {:.3f}%".format(
epoch + 1, N_EPOCHS, train_acc, valid_acc
)
)
results["train_acc"].append(train_acc)
results["valid_acc"].append(valid_acc)
results["train_cost"].append(train_cost)
results["valid_cost"].append(valid_cost)
print(
"Best validation epoch: {}, accuracy: {:.3f}, cost: {:.3f}".format(
best_epoch, best_acc, best_cost
)
)
return results, best_model_para
results, best_model_para = train_model(model, train_loader, valid_loader)
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
ax[0].plot(results["train_acc"], label="Train")
ax[0].plot(results["valid_acc"], label="Validation")
ax[0].set_xlabel("Epoch")
ax[0].set_ylabel("Accuracy")
ax[0].legend()
ax[1].plot(results["train_cost"], label="Train")
ax[1].plot(results["valid_cost"], label="Validation")
ax[1].set_xlabel("Epoch")
ax[1].set_ylabel("Cost")
ax[1].legend()
fig.tight_layout()
model.load_state_dict(best_model_para, strict=False)
def evaluate_model(model, test_loader):
model.eval()
with torch.no_grad():
pred_result = torch.tensor([])
for x_test in test_loader:
x_test = x_test.to(device)
z = model(x_test)
_, y_preds = torch.max(z, 1)
pred_result = torch.cat((pred_result, y_preds.cpu().flatten()))
return pred_result.numpy().astype(int)
y_preds = evaluate_model(model, test_loader)
y_preds
y_pred_results = [class_names[item] for item in y_preds] # Map to class_names
y_submission = []
for result in y_pred_results:
if int(result) > 9:
result = chr(int(result))
y_submission.append(result)
submission_pd = pd.read_csv(
"/kaggle/input/street-view-getting-started-with-julia/sampleSubmission.csv"
)
submission_pd.head()
submission_pd["Class"] = np.array(y_submission).reshape(-1, 1)
submission_pd.head()
submission_pd.to_csv("submission.csv", index=False)
shutil.rmtree(BASE_DIR)
|
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv("/kaggle/input/diabetes-dataset/diabetes.csv")
df.head()
df.shape
df.dtypes
df.isnull().sum()
df.duplicated().sum()
df.describe().T
df["Outcome"].value_counts()
features = df.drop("Outcome", axis=1)
target = df["Outcome"].values
from imblearn.over_sampling import RandomOverSampler, SMOTE
sampler = SMOTE(random_state=42)
features, target = sampler.fit_resample(features, target)
from collections import Counter
Counter(target)
class LogisticRegression:
def __init__(self, learning_rate=0.01, n_iters=1500):
self.learning_rate = learning_rate
self.n_iters = n_iters
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def fit(self, X_train, y_train):
rows, columns = X_train.shape
self.weights = np.random.random(columns)
self.bias = 0
for i in range(1, self.n_iters):
linear_prediction = np.matmul(X_train, self.weights) + self.bias
prediction = self.sigmoid(linear_prediction)
error = prediction - y_train
dervitive_weights = np.dot(X_train.T, error) / rows
dervitive_bias = np.sum(error) / rows
self.weights = self.weights - (self.learning_rate * dervitive_weights)
self.bias = self.bias - (self.learning_rate * dervitive_bias)
if i % 100 == 0:
print(f" epoch : {i} error: {np.mean(error)}")
def predict(self, X_test):
linear_prediction = np.matmul(X_test, self.weights) + self.bias
prediction = self.sigmoid(linear_prediction)
return np.round(prediction)
def score(self, X_test, y_test):
preds = self.predict(X_test)
accuracy = sum(preds == y_test) / len(y_test)
return accuracy
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
X_train, X_test, y_train, y_test = train_test_split(
features, target, test_size=0.2, random_state=42
)
col_names = X_train.columns.tolist()
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# scores = {}
# for i in [1000, 1500, 2000, 3000]:
# lr = LogisticRegression(n_iters=i)
# lr.fit(X_train, y_train)
# scores[i] = lr.score(X_test, y_test)
# print(scores)
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
import matplotlib.pyplot as plt
import seaborn as sns
# # sklearn implementation
lr = LogisticRegression(max_iter=1000)
lr.fit(X_train, y_train)
list(zip(col_names, lr.coef_.flatten()))
[[1, 2], [3, 4]]
[1, 2, 3, 4]
print(lr.score(X_train, y_train))
print("-" * 100)
print(lr.score(X_test, y_test))
preds = lr.predict(X_test)
cm = metrics.confusion_matrix(y_test, preds)
sns.heatmap(cm, annot=True)
plt.show()
# precision_pos = 84 / (84+15)
# precision_neg = 32 / 55
# recall = 84 / (84 + 23)
# recall
prec = metrics.precision_score(y_test, preds)
rec = metrics.recall_score(y_test, preds)
f1_score = metrics.f1_score(y_test, preds)
f1_score
print(metrics.classification_report(y_test, preds))
# # Grid Search Cross Validation
from sklearn.model_selection import cross_val_score
cross_v_score = cross_val_score(lr, features, target, cv=10)
print(cross_v_score)
mean_score = np.mean(cross_v_score)
print(mean_score)
# # Hyper Parmeter Tuning
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
params = {
"penalty": ["l1", "l2", "elasticnet"],
"solver": ["liblinear", "saga", "lbfgs"],
}
grid_search_model = GridSearchCV(lr, params, cv=5)
grid_search_model.fit(X_train, y_train)
grid_search_model.best_params_
grid_search_model.best_estimator_.score(X_test, y_test)
grid_search_model.best_estimator_.predict(X_test)
sum(grid_search_model.best_estimator_.predict(X_test) == y_test) / len(y_test)
# # C Regularization
params = {"C": np.arange(1, 100)}
grid_search_model = GridSearchCV(lr, params, cv=5)
grid_search_model.fit(X_train, y_train)
grid_search_model.best_params_
grid_search_model.best_estimator_.score(X_test, y_test)
# # PipeLine
steps = [
("scaler", MinMaxScaler()),
("pca", PCA(n_components=7)),
("model", LogisticRegression(max_iter=1000)),
]
pipe = Pipeline(steps=steps)
pipe.fit(X_train, y_train)
sum(pipe.predict(X_test) == y_test) / len(y_test)
# # K Nearest neighbors
# 
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
knn.score(X_test, y_test)
scores = {}
for k in range(2, 20):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
scores[k] = knn.score(X_test, y_test)
scores
d = {}
d["name"] = "abu abdelazez"
d
# ## Distance methods
# ### 1- Euclidean distance `sqrt(sum(a - b)**2)`
# ### 2 - Manhatten distance `| x 1 − x 2 | + | y 1 − y 2 |`
def manhatten(x1, x2, y1, y2):
dist = np.abs(x1 - x2) + np.abs(y1 - y2)
return dist
manhatten(-18, 10, -9, -100)
# # REGULARIZATION
x = np.array([[10, 8], [20, 100]])
print(x.shape)
slopes = np.array([0.8, 0.24])
target = np.array([20, 500])
10 * 0 + 20 * 0
np.matmul(x, slopes)
# x1* 0 + x2 * s2 + x3 * s3 = 1000
#
x = (np.random.random((10, 5)) * 0.05).flatten()
y = np.random.normal(0.9, 0.2, size=(10, 5)).flatten()
for i in y:
dist = np.sum(np.abs(self.X_train - X[i, :]), axis=1)
|
# # Predicting Stroke Events using Machine Learning
# Recently I've been fascinated by machine learning and decided to dive more into this world to learn more about it. I've taken a course on using the Scikit-learn library through Python and with my previous experience (w/pandas, matplotlib, numpy, & seaborn), I wanted to try and tackle a ML project. (I've also upped my markdown game from previous notebooks, which I want to showcase a bit as well.)
# The project I'm going to do in this notebook is to predict whether or not a patient had a stroke based on a number of medical and personal attributes. The intent is to use this ml model to predict the likelihood of a stroke in future patients. This will be a classification model as we're trying to predict whether a patient falls into the category of "Stroke" or "No Stroke". So without further ado, let's get started!
# ## Problem Definition:
# > Given clinical and personal parameters about a patient, can we predict whether or not the patient had a stroke?
# ## Data:
# I'm using the stroke-prediction-dataset by FEDESORIANO which can be found at this link:
# https://www.kaggle.com/datasets/fedesoriano/stroke-prediction-dataset?select=healthcare-dataset-stroke-data.csv
# ## Evaluation metric:
# This data isn't part of a competition, so there isn't a particular theshold that I'm trying to obtain with this ML model. Instead I'm just aiming for as high of an accuracy score as I can obtain in model predictions. But for the sake of professionalism, we'll define an arbitrary number to shoot for.
# > If the model can reach 90% or higher accuracy at predicting whether or not a patient has had a stroke, then we will consider this project a success.
# ## Features:
# I've copied and pasted the dictionary for features from the original dataset page listed above.
# **Data Dictionary**
# 1) **id**: unique identifier
# 2) **gender**: "Male", "Female" or "Other"
# 3) **age**: age of the patient
# 4) **hypertension**: 0 if the patient doesn't have hypertension, 1 if the patient has hypertension
# 5) **heart_disease**: 0 if the patient doesn't have any heart diseases, 1 if the patient has a heart disease
# 6) **ever_married**: "No" or "Yes"
# 7) **work_type**: "children", "Govt_jov", "Never_worked", "Private" or "Self-employed"
# 8) **Residence_type**: "Rural" or "Urban"
# 9) **avg_glucose_level**: average glucose level in blood
# 10) **bmi**: body mass index
# 11) **moking_status**: "formerly smoked", "never smoked", "smokes" or "Unknown"*
# 12) **stroke**: 1 if the patient had a stroke or 0 if not
# *Note: "Unknown" in smoking_status means that the information is unavailable for this patient
# # Preparing the tools
# I'm going to start by importing all of the python libraries that I will need for the duration of this project.
# EDA and plotting libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# Scikit-Learn models (Decided after analysis covered later)
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import (
LogisticRegression,
) # Regression might seem counterintuitive, but we'll address this later
# Model Evaluations
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import (
RandomizedSearchCV,
GridSearchCV,
) # Separate lines to keep everyting "Pythonic"
from sklearn.metrics import (
confusion_matrix,
classification_report,
plot_confusion_matrix,
)
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import plot_roc_curve
# # Loading Data
#
df = pd.read_csv(
"/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv"
)
# Checking to make sure the import worked
df
# # Exploratory Data Analysis (EDA)
# Before we start training models, I want to look through the data and familiarize myself with it a bit. I want to check for missing values and non-numeric values, as the ml models I'll be using need to work with numeric data. I also want to check to see if there are any outliers that are skewing the dataset. We'll just experiment a bit to get a feel for the data.
# Getting a general feel for the data
df.head()
# I'm curious about the number of stroke(1) v. non-stroke(0) patients in the dataset
|
import re
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import nltk
from nltk.stem import WordNetLemmatizer
import gensim
import matplotlib.pyplot as plt
from gensim.models import Word2Vec
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import learning_curve
from sklearn.metrics import confusion_matrix, classification_report, roc_curve, auc
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# import nltk
nltk.download("wordnet")
# nltk.data.path.append('../input/wordnet/')
nazario_df = pd.read_csv("/kaggle/input/nazario-2018-2022/nazario-2018-2022.csv")
fauci_df = pd.read_csv("/kaggle/input/fauci-email-data/fauci-email-body.csv")
enron_df = pd.read_csv("/kaggle/input/maildataset/mail_data.csv")
print("Count for each dataset:")
print("Nazario - " + str(len(nazario_df)))
print("Fauci - " + str(len(fauci_df)))
print("Spam Enron - " + str(enron_df["Category"].value_counts()["spam"]))
print("Genuine Enron - " + str(enron_df["Category"].value_counts()["ham"]))
print(
"Total number of spam: "
+ str(len(nazario_df) + enron_df["Category"].value_counts()["spam"])
)
df = pd.DataFrame(columns=["spam", "message"])
nazario_df["spam"] = 1
nazario_df = nazario_df.rename(columns={"body": "message"})
df_concat = pd.concat([df, nazario_df], ignore_index=True)
# Separate the spam and non-spam emails
enron_df = enron_df.rename(columns={"Category": "spam", "Message": "message"})
enron_df["spam"] = enron_df["spam"].replace("spam", 1)
enron_df["spam"] = enron_df["spam"].replace("ham", 0)
enron_spam = enron_df.loc[enron_df["spam"] == 1]
enron_genuine = enron_df.loc[enron_df["spam"] == 0]
df_concat = pd.concat([df_concat, enron_spam], ignore_index=True)
enron_limited_genuine = enron_genuine.sample(n=889, random_state=42)
df_concat = pd.concat([df_concat, enron_limited_genuine], ignore_index=True)
fauci_df["spam"] = 0
fauci_df = fauci_df.rename(columns={"body": "message"})
fauci_limited_df = fauci_df.sample(n=889, random_state=42)
df_concat = pd.concat([df_concat, fauci_limited_df], ignore_index=True)
print(enron_limited_genuine)
print(df_concat)
# # Clean-up data
# 1. Preprocess code - lemmitisation and tokenisation
# Define lemmatizer
lemmatizer = WordNetLemmatizer()
# Define preprocessing function with lemmatization
def preprocess(text):
if isinstance(text, float): # Check if text is missing or invalid
return [] # Return empty list if text is missing
text = re.sub(r"http\S+", "__url__", text)
text = re.sub(r"www\S+", "__url__", text)
tokens = word_tokenize(text.lower()) # Tokenize text
stop_words = set(stopwords.words("english")) # Get stop words
tokens = [
lemmatizer.lemmatize(t) for t in tokens if t.isalpha() and t not in stop_words
] # Lemmatize and remove non-alphabetic tokens and stop words
return tokens
df_concat["tokens"] = df_concat["message"].apply(preprocess)
print(df_concat)
print(df_concat["spam"].unique())
# Define the range of epoch sizes to plot
epoch_sizes = range(10, 100, 10)
model = Word2Vec(df_concat["tokens"], vector_size=100, window=5, min_count=1, workers=4)
def get_word_embeddings(model, tokens):
"""
Get the average word embedding for a list of tokens using a Word2Vec model.
"""
# Get the word embeddings for each token
embeddings = [model.wv[t] for t in tokens if t in model.wv.key_to_index]
# Calculate the average embedding for the document
if embeddings:
avg_embedding = np.mean(embeddings, axis=0)
else:
avg_embedding = np.zeros(model.vector_size)
return avg_embedding
# Vectorize text data
X = np.array([get_word_embeddings(model, tokens) for tokens in df_concat["tokens"]])
y = np.array(df_concat["spam"])
y = y.astype(int)
# Split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Train random forest classifier
rf = RandomForestClassifier(n_estimators=100, random_state=42)
rf.fit(X_train, y_train)
# Predict labels for test set
y_pred = rf.predict(X_test)
cr = classification_report(y_test, y_pred)
train_sizes, train_scores, test_scores = learning_curve(
rf,
X_train,
y_train,
cv=5,
scoring="accuracy",
train_sizes=epoch_sizes,
shuffle=True,
random_state=42,
)
# Calculate the mean and standard deviation of the training and validation scores
train_scores_mean = np.mean(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
# Calculate the false positive rate, true positive rate, and AUC for the ROC curve
fpr, tpr, thresholds = roc_curve(y_test, rf.predict_proba(X_test)[:, 1])
roc_auc = auc(fpr, tpr)
# Plot the ROC curve
plt.figure()
plt.plot(fpr, tpr, color="darkorange", lw=2, label="ROC curve (area = %0.2f)" % roc_auc)
plt.plot([0, 1], [0, 1], color="navy", lw=2, linestyle="--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver operating characteristic")
plt.legend(loc="lower right")
plt.show()
# Plot learning curve
plt.figure()
plt.title("Learning Curve")
plt.xlabel("Training examples")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.plot(train_sizes, train_scores_mean, label="Training score")
plt.plot(train_sizes, test_scores_mean, label="Cross-validation score")
plt.legend(loc="best")
plt.show()
# Print the metrics and report
print("\nClassification Report:\n", cr)
print("\nAUC:", roc_auc)
print("Validation Accuracy:", test_scores_mean[-1])
from sklearn.metrics import roc_curve, plot_roc_curve
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import learning_curve
import matplotlib.pyplot as plt
train_sizes, train_scores, test_scores = learning_curve(
rf, X_train, y_train, train_sizes=np.linspace(0.1, 1.0, 10), cv=5
)
# Plot learning curves
plt.figure()
plt.title("Learning Curves (Random Forest)")
plt.xlabel("Training examples")
plt.ylabel("Accuracy")
plt.grid()
plt.plot(
train_sizes, np.mean(train_scores, axis=1), "o-", color="r", label="Training score"
)
plt.plot(
train_sizes,
np.mean(test_scores, axis=1),
"o-",
color="g",
label="Cross-validation score",
)
plt.legend(loc="best")
plt.show()
|
# art_sp_all_sum_views.describe()# MY First Kaggle!!!
# Help of Github Copilot, ChatGPT and Google Search
# Still not finished so...
# Anyway, if you have any questions, feel free to comment!
# Thank you
# # Import Libraries & Font & File
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.font_manager as fm
import seaborn as sns
import plotly.express as px
import os
import warnings
warnings.filterwarnings("ignore") # Warning removial for better view
from IPython.display import HTML
from IPython.display import display
cellNum = 2
cellDisp = "none" # Other option is 'block'
cell = """
<script>
var divTag = document.getElementsByClassName("input")[%s]
var displaySetting = divTag.style.display;
// Default display - set to 'none'. To hide, set to 'block'.
// divTag.style.display = 'block';
divTag.style.display = '%s';
<script>
<!-- <button onclick="javascript:toggleInput(%s)" class="button">Toggle Code</button> -->
""" % (
cellNum,
"none",
cellNum,
)
h = HTML(cell)
display(h)
# Font
# If someone knows how to fix Korean, please let me know
plt.rc("font", family="Noto Sans KR")
mpl.rcParams["axes.unicode_minus"] = False
# Bring file into the notebook
sy = pd.read_csv("/kaggle/input/spotify-and-youtube/Spotify_Youtube.csv")
# # Observe & Ananyze Data
# Analyze the heads of the data
sy.head()
# do not abbreviate the columns
pd.set_option("display.max_columns", None)
sy
# ## Cleaning Data
# findout all the columns name
sy.columns
# order by licensed is false
sy[sy["Licensed"] == False]
# drop the columns that are not needed
sy.drop(
["Unnamed: 0", "Uri", "Url_spotify", "Url_youtube", "Comments", "Description"],
axis=1,
inplace=True,
)
sy.head()
# find duplicates in title
sy[sy.duplicated(["Title"])]
# drop the duplicates of title
sy.drop_duplicates(subset=["Title"], keep="first", inplace=True)
sy
# #EDA
#
# make new column name 'official_video_num' and if 'official_video' is true, then 'official_video_num' is 1, else 0
sy["official_video_num"] = sy["official_video"].apply(lambda x: 1 if x == True else 0)
sy
# ## Find some relevancy
# find relevancy
num_columns = sy.select_dtypes(include=[np.number]).columns.tolist()
corr = sy[num_columns].corr()
plt.figure(figsize=(15, 15))
sns.heatmap(corr, annot=True, cmap="coolwarm")
# Distribution of all numerical values
# all same color
sy_num_columns = sy.select_dtypes(include=[np.number])
sy_num_columns.hist(figsize=(10, 10), bins=50, xlabelsize=8, ylabelsize=8)
# # Finding some interesting aspects
# Top 10 most listened artists in Spotify, sum and group by artist
art_sp = sy.groupby("Artist")["Stream"].sum().sort_values(ascending=False).head(10)
# Top 10 most viewed artists in Youtube, sum and group by artist
art_yt = sy.groupby("Artist")["Views"].sum().sort_values(ascending=False).head(10)
# Top 10 most liked artists in Youtube, sum and group by artist
art_yt_like = sy.groupby("Artist")["Likes"].sum().sort_values(ascending=False).head(10)
# make these three as subplots
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(25, 5))
# figure placement
fig.subplots_adjust(wspace=0.5)
# figure title
fig.suptitle("Top 10 most listened and viewed artists in Spotify and Youtube")
# make graph of the top 10 most listened artists in Spotify, horizontal bar chart
art_sp.plot(kind="barh", ax=ax1, color="#1DB954")
ax1.set_title("Top 10 most listened artists in Spotify")
ax1.invert_yaxis()
ax1.set_xlabel("Stream")
ax1.set_ylabel("Artist")
# make graph of the top 10 most viewed artists in Youtube, horizontal bar chart
art_yt.plot(kind="barh", ax=ax2, color="#FF0000")
ax2.set_title("Top 10 most viewed artists in Youtube")
ax2.invert_yaxis()
ax2.set_xlabel("Views")
ax2.set_ylabel("Artist")
# make graph of the top 10 most liked artists in Youtube, horizontal bar chart
art_yt_like.plot(kind="barh", ax=ax3, color="#282828")
ax3.set_title("Top 10 most liked artists in Youtube")
ax3.invert_yaxis()
ax3.set_xlabel("Likes")
ax3.set_ylabel("Artist")
# We can see that listened and viewed, we do not have BTS but like count is absolutely insane with BLACKPINK and BTS
# Top 10 most listened music in Spotify, sum and group by artist
mus_sp = sy.groupby("Title")["Stream"].sum().sort_values(ascending=False).head(10)
# Top 10 most viewed music in Youtube, sum and group by artist
mus_yt = sy.groupby("Title")["Views"].sum().sort_values(ascending=False).head(10)
# Top 10 most liked music in Youtube, sum and group by artist
mus_yt_like = sy.groupby("Title")["Likes"].sum().sort_values(ascending=False).head(10)
# make these two as subplots
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(25, 5))
# figure placement
fig.subplots_adjust(wspace=1.5)
# figure title
fig.suptitle("Top 10 most listened and viewed music in Spotify and Youtube")
# make graph of the top 10 most listened artists in Spotify, horizontal bar chart
# get the title name affter the first dash
mus_sp.plot(kind="barh", ax=ax1, color="#1DB954")
ax1.set_title("Top 10 most listened music in Spotify")
ax1.invert_yaxis()
ax1.set_xlabel("Stream")
ax1.set_ylabel(
"Music",
)
# font size y axis
ax1.tick_params(axis="y", labelsize=8)
# make graph of the top 10 most viewed artists in Youtube, horizontal bar chart
mus_yt.plot(kind="barh", ax=ax2, color="#FF0000")
ax2.set_title("Top 10 most viewed music in Youtube")
ax2.invert_yaxis()
ax2.set_xlabel("Views")
ax2.set_ylabel("Music")
# font size y axis
ax2.tick_params(axis="y", labelsize=8)
# make graph of the top 10 most liked artists in Youtube, horizontal bar chart
mus_yt_like.plot(kind="barh", ax=ax3, color="#282828")
ax3.set_title("Top 10 most liked music in Youtube")
ax3.invert_yaxis()
ax3.set_xlabel("Likes")
ax3.set_ylabel("Music")
# font size y axis
ax3.tick_params(axis="y", labelsize=8)
# Quiet different from Youtube and Spotify. Maybe because it's more global and free?
# # Analysis about Stream, Views and Likes
# Top 100 most listened artists in Spotify, sum and group by artist, take all the music information
art_sp_all = (
sy.groupby("Artist")["Stream"].sum().sort_values(ascending=False).head(1000)
)
art_sp_all = sy[sy["Artist"].isin(art_sp_all.index)]
art_sp_all
art_sp_all_mean = art_sp_all.groupby("Artist")[
"Danceability",
"Energy",
"Key",
"Loudness",
"Speechiness",
"Acousticness",
"Instrumentalness",
"Liveness",
"Valence",
"Tempo",
"Duration_ms",
"Views",
"Likes",
"Stream",
].mean()
art_sp_all_mean
# sum of artist's 'Danceability','Energy','Key','Loudness','Speechiness','Acousticness','Instrumentalness','Liveness','Valence','Tempo','Duration_ms', 'Views','Likes', 'Stream'
art_sp_all_sum = art_sp_all.groupby("Artist")[
"Danceability",
"Energy",
"Key",
"Loudness",
"Speechiness",
"Acousticness",
"Instrumentalness",
"Liveness",
"Valence",
"Tempo",
"Duration_ms",
"Views",
"Likes",
"Stream",
].sum()
art_sp_all_sum
# ungroup the data by artist
art_sp_all_sum = art_sp_all_sum.reset_index()
art_sp_all_sum
# ## Ratio of View
# just take views column
art_sp_all_sum_views = art_sp_all_sum[["Artist", "Views"]]
# add all the views
art_sp_all_sum_views["Views"].sum()
# find the percentage of views
art_sp_all_sum_views["Views_percentage"] = (
art_sp_all_sum_views["Views"] / art_sp_all_sum_views["Views"].sum()
)
# from hightest to lowest, sort by views percentage and take 10%
art_sp_all_sum_views.sort_values(by=["Views_percentage"], ascending=False).head(100)
# sum 10% of the views
art_sp_all_sum_views_10 = (
art_sp_all_sum_views.sort_values(by=["Views_percentage"], ascending=False)
.head(100)["Views"]
.sum()
)
# sum all the views
art_sp_all_sum_views_90 = (
art_sp_all_sum_views.sort_values(by=["Views_percentage"], ascending=True)
.head(900)["Views"]
.sum()
)
# find the percentage of 5% of artists' views
art_sp_all_sum_views.sort_values(by=["Views_percentage"], ascending=False).head(100)[
"Views"
].sum() / art_sp_all_sum_views["Views"].sum()
# ## Ratio of Likes
# just take like column
art_sp_all_sum_likes = art_sp_all_sum[["Artist", "Likes"]]
# add all the like
art_sp_all_sum_likes["Likes"].sum()
# find the percentage of like
art_sp_all_sum_likes["Likes_percentage"] = (
art_sp_all_sum_likes["Likes"] / art_sp_all_sum_likes["Likes"].sum()
)
# from hightest to lowest, sort by like percentage and take 10%
art_sp_all_sum_likes.sort_values(by=["Likes_percentage"], ascending=False).head(100)
# sum 10% of the like
art_sp_all_sum_likes_10 = (
art_sp_all_sum_likes.sort_values(by=["Likes_percentage"], ascending=False)
.head(100)["Likes"]
.sum()
)
# sum lower 90# of the like
art_sp_all_sum_likes_90 = (
art_sp_all_sum_likes.sort_values(by=["Likes_percentage"], ascending=True)
.head(900)["Likes"]
.sum()
)
# find the percentage of 10% of artists' like
art_sp_all_sum_likes.sort_values(by=["Likes_percentage"], ascending=False).head(100)[
"Likes"
].sum() / art_sp_all_sum_likes["Likes"].sum()
# ## Ratio of Stream
# just take stream column
art_sp_all_sum_stream = art_sp_all_sum[["Artist", "Stream"]]
# add all the stream
art_sp_all_sum_stream["Stream"].sum()
# find the percentage of stream
art_sp_all_sum_stream["Stream_percentage"] = (
art_sp_all_sum_stream["Stream"] / art_sp_all_sum_stream["Stream"].sum()
)
# from hightest to lowest, sort by stream percentage and take 10%
art_sp_all_sum_stream.sort_values(by=["Stream_percentage"], ascending=False).head(100)
# sum 10% of the stream
art_sp_all_sum_stream_10 = (
art_sp_all_sum_stream.sort_values(by=["Stream_percentage"], ascending=False)
.head(100)["Stream"]
.sum()
)
# sum lower 90# of the stream
art_sp_all_sum_stream_90 = (
art_sp_all_sum_stream.sort_values(by=["Stream_percentage"], ascending=True)
.head(900)["Stream"]
.sum()
)
# find the percentage of 10% of artists' stream
art_sp_all_sum_stream.sort_values(by=["Stream_percentage"], ascending=False).head(100)[
"Stream"
].sum() / art_sp_all_sum_stream["Stream"].sum()
# ## Ratio between counted Views, Likes and Stream with sum of Views, Likes and Stream
# sum all of number of art_sp_all_sum_stream art_sp_all_sum_likes art_sp_all_sum_views
art_sp_all_sum_views_likes_stream = (
art_sp_all_sum_views["Views"].sum()
+ art_sp_all_sum_likes["Likes"].sum()
+ art_sp_all_sum_stream["Stream"].sum()
)
art_sp_all_sum_views_likes_stream
# art_sp_all_sum_stream/ art_sp_all_sum_views_likes_stream
art_sp_all_sum_stream_p = (
art_sp_all_sum_stream["Stream"].sum() / art_sp_all_sum_views_likes_stream
)
# art_sp_all_sum_likes/ art_sp_all_sum_views_likes_stream
art_sp_all_sum_likes_p = (
art_sp_all_sum_likes["Likes"].sum() / art_sp_all_sum_views_likes_stream
)
# art_sp_all_sum_views/ art_sp_all_sum_views_likes_stream
art_sp_all_sum_view_p = (
art_sp_all_sum_views["Views"].sum() / art_sp_all_sum_views_likes_stream
)
# make subplot piechart with art_sp_all_sum_views_10 ; all the views, art_sp_all_sum_likes_10 ; all the likes and art_sp_all_sum_stream_10, all the stream
fig, ax = plt.subplots(1, 4, figsize=(20, 5))
fig.subplots_adjust(wspace=0.7)
colors = ["#FF0000", "#1DB954", "skyblue"]
labels = "Stream", "Likes", "Views"
sizes = [art_sp_all_sum_stream_p, art_sp_all_sum_likes_p, art_sp_all_sum_view_p]
explode = (0.05, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
ax[0].pie(
sizes, labels=labels, autopct="%1.1f%%", shadow=False, startangle=90, colors=colors
)
ax[0].axis("equal") # Equal aspect ratio ensures that pie is drawn as a circle.
ax[0].set_title("All")
labels = "Top 100 Artists", "Other Artists"
sizes = [art_sp_all_sum_views_10, art_sp_all_sum_views_90]
explode = (0.05, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
ax[1].pie(
sizes,
explode=explode,
labels=labels,
autopct="%1.1f%%",
shadow=False,
startangle=90,
colors=colors,
)
ax[1].axis("equal") # Equal aspect ratio ensures that pie is drawn as a circle.
ax[1].set_title("Views")
labels = "Top 100 Artists", "Other Artists"
sizes = [art_sp_all_sum_likes_10, art_sp_all_sum_likes_90]
explode = (0.05, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
ax[2].pie(
sizes,
explode=explode,
labels=labels,
autopct="%1.1f%%",
shadow=False,
startangle=90,
colors=colors,
)
ax[2].axis("equal") # Equal aspect ratio ensures that pie is drawn as a circle.
ax[2].set_title("Likes")
labels = "Top 100 Artists", "Other Artists"
sizes = [art_sp_all_sum_stream_10, art_sp_all_sum_stream_90]
explode = (0.05, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
ax[3].pie(
sizes,
explode=explode,
labels=labels,
autopct="%1.1f%%",
shadow=False,
startangle=90,
colors=colors,
)
ax[3].axis("equal") # Equal aspect ratio ensures that pie is drawn as a circle.
ax[3].set_title("Stream")
plt.show()
# By graphs, since like takes very little proportion of the overall participation of customers,
# I think other things doesn't need user effort but likes takes efforts to be measured.
# sort by views
art_sp_all_sum_views = art_sp_all_sum.sort_values(by=["Views"], ascending=False)
art_sp_all_sum_views
art_sp_all_sum_views.describe()
# art_sp_all_sum_views top 10%
art_sp_all_sum_views_10 = art_sp_all_sum_views.head(100)
art_sp_all_sum_views_10.describe()
# art_sp_all_sum_views low 90%
art_sp_all_sum_views_90 = art_sp_all_sum_views.tail(900)
art_sp_all_sum_views_90.describe()
# top 10% of artists danceability
art_sp_10_sum_views_d = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.head(100)["Danceability"]
.mean()
)
# other 90% of artists danceability
art_sp_other_sum_views_d = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.tail(900)["Danceability"]
.mean()
)
# all artists danceability
art_sp_all_sum_views_d = art_sp_all_sum_views["Danceability"].mean()
# top 10% of artists energy
art_sp_10_sum_views_e = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.head(100)["Energy"]
.mean()
)
# other 90% of artists energy
art_sp_other_sum_views_e = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.tail(900)["Energy"]
.mean()
)
# all artists energy
art_sp_all_sum_views_e = art_sp_all_sum_views["Energy"].mean()
# top 10% of artists key
art_sp_10_sum_views_k = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.head(100)["Key"]
.mean()
)
# other 90% of artists key
art_sp_other_sum_views_k = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.tail(900)["Key"]
.mean()
)
# all artists key
art_sp_all_sum_views_k = art_sp_all_sum_views["Key"].mean()
# top 10% of artists loudness
art_sp_10_sum_views_l = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.head(100)["Loudness"]
.mean()
)
# other 90% of artists loudness
art_sp_other_sum_views_l = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.tail(900)["Loudness"]
.mean()
)
# all artists loudness
art_sp_all_sum_views_l = art_sp_all_sum_views["Loudness"].mean()
# top 10% of artists speechiness
art_sp_10_sum_views_s = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.head(100)["Speechiness"]
.mean()
)
# other 90% of artists speechiness
art_sp_other_sum_views_s = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.tail(900)["Speechiness"]
.mean()
)
# all artists speechiness
art_sp_all_sum_views_s = art_sp_all_sum_views["Speechiness"].mean()
# top 10% of artists acousticness
art_sp_10_sum_views_a = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.head(100)["Acousticness"]
.mean()
)
# other 90% of artists acousticness
art_sp_other_sum_views_a = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.tail(900)["Acousticness"]
.mean()
)
# all artists acousticness
art_sp_all_sum_views_a = art_sp_all_sum_views["Acousticness"].mean()
# top 10% of artists instrumentalness
art_sp_10_sum_views_i = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.head(100)["Instrumentalness"]
.mean()
)
# other 90% of artists instrumentalness
art_sp_other_sum_views_i = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.tail(900)["Instrumentalness"]
.mean()
)
# all artists instrumentalness
art_sp_all_sum_views_i = art_sp_all_sum_views["Instrumentalness"].mean()
# top 10% of artists liveness
art_sp_10_sum_views_live = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.head(100)["Liveness"]
.mean()
)
# other 90% of artists liveness
art_sp_other_sum_views_live = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.tail(900)["Liveness"]
.mean()
)
# all artists liveness
art_sp_all_sum_views_live = art_sp_all_sum_views["Liveness"].mean()
# top 10% of artists valence
art_sp_10_sum_views_v = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.head(100)["Valence"]
.mean()
)
# other 90% of artists valence
art_sp_other_sum_views_v = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.tail(900)["Valence"]
.mean()
)
# all artists valence
art_sp_all_sum_views_v = art_sp_all_sum_views["Valence"].mean()
# top 10% of artists tempo
art_sp_10_sum_views_t = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.head(100)["Tempo"]
.mean()
)
# other 90% of artists tempo
art_sp_other_sum_views_t = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.tail(900)["Tempo"]
.mean()
)
# all artists tempo
art_sp_all_sum_views_t = art_sp_all_sum_views["Tempo"].mean()
# top 10% of artists duration_ms
art_sp_10_sum_views_du = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.head(100)["Duration_ms"]
.mean()
)
# other 90% of artists duration_ms
art_sp_other_sum_views_du = (
art_sp_all_sum_views.sort_values(by=["Views"], ascending=False)
.tail(900)["Duration_ms"]
.mean()
)
# all artists duration_ms
art_sp_all_sum_views_du = art_sp_all_sum_views["Duration_ms"].mean()
# make subplots of bar graphs of all the averages, with color of palette
fig, axs = plt.subplots(4, 3, figsize=(15, 15))
fig.suptitle(
"Average Values of Top 100 Artists vs. All Other Artists By Views", fontsize=20
)
# danceability
axs[0, 0].bar(
["Top 100 Artists", "Other Artists", "All Artists"],
[art_sp_10_sum_views_d, art_sp_other_sum_views_d, art_sp_all_sum_views_d],
color=colors,
)
axs[0, 0].set_title("Danceability")
# energy
axs[0, 1].bar(
["Top 100 Artists", "Other Artists", "All Artists"],
[art_sp_10_sum_views_e, art_sp_other_sum_views_e, art_sp_all_sum_views_e],
color=colors,
)
axs[0, 1].set_title("Energy")
# key
axs[0, 2].bar(
["Top 100 Artists", "Other Artists", "All Artists"],
[art_sp_10_sum_views_k, art_sp_other_sum_views_k, art_sp_all_sum_views_k],
color=colors,
)
axs[0, 2].set_title("Key")
# loudness
axs[1, 0].bar(
["Top 100 Artists", "Other Artists", "All Artists"],
[art_sp_10_sum_views_l, art_sp_other_sum_views_l, art_sp_all_sum_views_l],
color=colors,
)
axs[1, 0].set_title("Loudness")
# speechiness
axs[1, 1].bar(
["Top 100 Artists", "Other Artists", "All Artists"],
[art_sp_10_sum_views_s, art_sp_other_sum_views_s, art_sp_all_sum_views_s],
color=colors,
)
axs[1, 1].set_title("Speechiness")
# acousticness
axs[1, 2].bar(
["Top 100 Artists", "Other Artists", "All Artists"],
[art_sp_10_sum_views_a, art_sp_other_sum_views_a, art_sp_all_sum_views_a],
color=colors,
)
axs[1, 2].set_title("Acousticness")
# instrumentalness
axs[2, 0].bar(
["Top 100 Artists", "Other Artists", "All Artists"],
[art_sp_10_sum_views_i, art_sp_other_sum_views_i, art_sp_all_sum_views_i],
color=colors,
)
axs[2, 0].set_title("Instrumentalness")
# liveness
axs[2, 1].bar(
["Top 100 Artists", "Other Artists", "All Artists"],
[art_sp_10_sum_views_live, art_sp_other_sum_views_live, art_sp_all_sum_views_live],
color=colors,
)
axs[2, 1].set_title("Liveness")
# valence
axs[2, 2].bar(
["Top 100 Artists", "Other Artists", "All Artists"],
[art_sp_10_sum_views_v, art_sp_other_sum_views_v, art_sp_all_sum_views_v],
color=colors,
)
axs[2, 2].set_title("Valence")
# tempo
axs[3, 0].bar(
["Top 100 Artists", "Other Artists", "All Artists"],
[art_sp_10_sum_views_t, art_sp_other_sum_views_t, art_sp_all_sum_views_t],
color=colors,
)
axs[3, 0].set_title("Tempo")
# duration_ms
axs[3, 1].bar(
["Top 100 Artists", "Other Artists", "All Artists"],
[art_sp_10_sum_views_du, art_sp_other_sum_views_du, art_sp_all_sum_views_du],
color=colors,
)
axs[3, 1].set_title("Duration_ms")
# hide empty plots
axs[3, 2].axis("off")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pandas_profiling as pp
df = pd.read_csv("/kaggle/input/wine-quality-dataset/WineQT.csv")
df.head()
df.pop("Id")
df.head()
df.describe()
df.info()
pp.ProfileReport(df)
sns.pairplot(df, hue="quality", size=3)
df.drop_duplicates()
sns.histplot(x=df["fixed acidity"])
sns.histplot(x=df["volatile acidity"])
sns.histplot(x=df["citric acid"])
sns.histplot(x=df["residual sugar"])
sns.histplot(x=df["chlorides"])
df.drop(df[df["chlorides"] >= 0.3].index, inplace=True)
df.describe()
df.info()
df.drop(df[df["residual sugar"] >= 10].index, inplace=True)
sns.histplot(x=df["residual sugar"])
df.info()
sns.histplot(x=df["free sulfur dioxide"])
sns.histplot(x=df["total sulfur dioxide"])
df.drop(df[df["total sulfur dioxide"] >= 250].index, inplace=True)
df.drop(df[df["free sulfur dioxide"] >= 60].index, inplace=True)
df.info()
sns.histplot(x=df["density"])
sns.histplot(x=df["pH"])
sns.histplot(x=df["sulphates"])
df.drop(df[df["sulphates"] >= 1.5].index, inplace=True)
df.info()
sns.histplot(x=df["alcohol"])
df.corr()
# # **Model Building**
df.pop("pH")
df.pop("residual sugar")
df.pop("free sulfur dioxide")
df.corr()
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
Y = df.pop("quality")
X = df
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25)
X_train
Y_train
# ## **Random Forest**
rf = RandomForestRegressor(max_depth=15, n_estimators=70)
rf.fit(X_train, Y_train)
rf.score(X_train, Y_train)
rf.score(X_test, Y_test)
|
import pandas as pd
from sklearn.datasets import load_breast_cancer
data = load_breast_cancer()
dataset = pd.DataFrame(data=data["data"], columns=data["feature_names"])
dataset
dataset.info()
from sklearn.model_selection import train_test_split
X = dataset.copy()
y = data["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier(ccp_alpha=0.01)
clf = clf.fit(X_train, y_train)
clf.get_params()
predictions = clf.predict(X_test)
predictions
from sklearn.metrics import accuracy_score
accuracy_score(y_test, predictions)
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, predictions, labels=[0, 1])
from sklearn.metrics import precision_score
precision_score(y_test, predictions)
from sklearn.metrics import recall_score
recall_score(y_test, predictions)
feature_names = X.columns
feature_names
clf.feature_importances_
feature_importance = pd.DataFrame(
clf.feature_importances_, index=feature_names
).sort_values(0, ascending=False)
feature_importance
features = list(feature_importance[feature_importance[0] > 0].index)
features
feature_importance.head(10).plot(kind="bar")
from sklearn import tree
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(25, 20))
_ = tree.plot_tree(
clf,
feature_names=feature_names,
class_names={0: "Malignant", 1: "Benign"},
filled=True,
fontsize=12,
)
|
import pandas as pd
df = pd.read_csv("/kaggle/input/cleaned-mentalhealth/dataset.csv")
df = df.dropna()
df.sample()
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
df["Subreddit"] = label_encoder.fit_transform(df["Subreddit"])
labels = list(label_encoder.classes_)
df.sample(2)
labels
import numpy as np
from sklearn.model_selection import (
StratifiedKFold,
train_test_split,
RandomizedSearchCV,
)
from sklearn.metrics import accuracy_score, classification_report
import xgboost as xgb
from nltk.tokenize import word_tokenize
from gensim.models import Word2Vec
import gensim
import pickle
X = df["Sentence"].values
y = df["Subreddit"].values
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42, stratify=df[["Subreddit"]]
)
print(X_train.shape)
import fasttext
embedding_dim = 100 # the size of the embedding vectors
model_path = "/kaggle/input/notebook77d10d8c32/fasttextwordembeddings100d.bin"
ft_model = fasttext.load_model(model_path)
ft_model["Khalid"]
# Convert text data to word embeddings
def get_word_embeddings(sentences):
tokenized_sentences = [word_tokenize(sentence) for sentence in sentences]
embeddings = []
for sentence in tokenized_sentences:
sentence_embeddings = []
for word in sentence:
sentence_embeddings.append(ft_model[word])
embeddings.append(np.mean(sentence_embeddings, axis=0))
return np.array(embeddings)
X_train_embeddings = get_word_embeddings(X_train)
X_test_embeddings = get_word_embeddings(X_test)
print(X_test_embeddings.shape)
# Compute class weights
num_classes = len(np.unique(y_train))
class_weights = {}
for class_id in range(num_classes):
num_positive = np.sum(y_train == class_id)
num_negative = len(y_train) - num_positive
class_weight = num_negative / num_positive
class_weights[class_id] = class_weight
class_weights
params = {
"max_depth": [4, 5, 6, 7],
"learning_rate": [0.001, 0.01, 0.05, 0.1, 0.15],
"n_estimators": [600, 1200, 1800, 2400],
"subsample": [0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
# 'colsample_bytree': [0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
# 'colsample_bylevel': [0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
"gamma": [0, 0.1, 0.25, 0.5, 1],
"reg_alpha": [0, 0.01, 0.1, 0.5, 1],
"reg_lambda": [0, 1, 2, 5],
"min_child_weight": [3, 4, 5],
}
# Train one-vs-all XGBoost classifiers with randomized search and class weights
xgb_classifiers = []
for c in range(num_classes):
print(labels[c], ":")
xgb_classifier = xgb.XGBClassifier(
objective="binary:logistic",
n_jobs=-1,
tree_method="gpu_hist",
gpu_id=0,
eval_metric="auc",
scale_pos_weight=class_weights[c],
)
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
randomized_search = RandomizedSearchCV(
estimator=xgb_classifier,
param_distributions=params,
n_iter=75,
scoring="f1_weighted",
cv=cv,
n_jobs=-1,
random_state=42,
)
randomized_search.fit(X_train_embeddings, y_train == c)
best_estimator = randomized_search.best_estimator_
filename = labels[c] + "_fasttext.pkl"
pickle.dump(best_estimator, open(filename, "wb"))
xgb_classifiers.append(best_estimator)
# Predict class probabilities for each class
class_probs = [
xgb_classifier.predict_proba(X_test_embeddings)[:, 1]
for xgb_classifier in xgb_classifiers
]
# Choose class with highest probability as prediction
y_pred = np.argmax(class_probs, axis=0)
# Compute classification report for one-vs-all XGBoost classifiers
class_reports = []
num_classes = 6
for c in range(num_classes):
y_true = (y_test == c).astype(int)
y_pred_c = class_probs[c] >= 0.5
target_names = ["Non-" + labels[c], labels[c]]
report = classification_report(y_true, y_pred_c, target_names=target_names)
class_reports.append(report)
overall_report = classification_report(y_test, y_pred, target_names=labels)
# Print individual and overall classification reports
for c in range(num_classes):
print(f"{labels[c]} report:")
print(class_reports[c], "\n")
print("Overall report:")
print(overall_report)
def predict_class(sentence, xgb_classifiers):
# Convert Sentence to np.array
sentence = [sentence]
sentence = np.array(sentence, dtype=object)
# Embed sentence
sentence_embedding = get_word_embeddings(sentence)
# Predict class probabilities for each class
class_probs = [
xgb_classifier.predict_proba(sentence_embedding.reshape(1, -1))[:, 1]
for xgb_classifier in xgb_classifiers
]
# Choose class with highest probability as prediction
class_num = np.argmax(class_probs)
return labels[class_num]
predicted_class = predict_class(
"samuel has a hard time holding conversations and interacting with his friends at school",
xgb_classifiers,
)
print(f"Predicted class: {predicted_class}") # autism
|
#
# In this nootebook I use this features for aggregation, making matrix and making ALS embanding:
# - region_name
# - city_name
# - cpe_manufacturer_name
# - cpe_model_name
# - cpe_type_cd
# - cpe_model_os_type
# - date
# - part_of_day
import sys
import os
import warnings
os.environ["OPENBLAS_NUM_THREADS"] = "1"
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import gc
import time
import pyarrow as pa
import pyarrow.parquet as pq
from tqdm import tqdm
import pickle
import scipy
import implicit
import bisect
import sklearn.metrics as m
from catboost import CatBoostClassifier, CatBoostRegressor, Pool
from sklearn.model_selection import train_test_split
from sklearn.calibration import calibration_curve, CalibratedClassifierCV
import implicit
data = pd.read_feather("/data/dataset_full.feather")
data = pa.Table.from_pandas(data)
# # Baceline
# Baceline by organizers
data_agg = (
data.select(["user_id", "url_host", "request_cnt"])
.group_by(["user_id", "url_host"])
.aggregate([("request_cnt", "sum")])
)
url_set = set(data_agg.select(["url_host"]).to_pandas()["url_host"])
print(f"{len(url_set)} urls")
url_dict = {url: idurl for url, idurl in zip(url_set, range(len(url_set)))}
usr_set = set(data_agg.select(["user_id"]).to_pandas()["user_id"])
print(f"{len(usr_set)} users")
usr_dict = {usr: user_id for usr, user_id in zip(usr_set, range(len(usr_set)))}
values = np.array(data_agg.select(["request_cnt_sum"]).to_pandas()["request_cnt_sum"])
rows = np.array(data_agg.select(["user_id"]).to_pandas()["user_id"].map(usr_dict))
cols = np.array(data_agg.select(["url_host"]).to_pandas()["url_host"].map(url_dict))
mat = scipy.sparse.coo_matrix(
(values, (rows, cols)), shape=(rows.max() + 1, cols.max() + 1)
)
als = implicit.approximate_als.FaissAlternatingLeastSquares(
factors=50,
iterations=100,
use_gpu=False,
calculate_training_loss=False,
regularization=0.1,
nlist=400,
nprobe=20,
)
als.fit(mat)
d_factors = als.item_factors
u_factors = als.user_factors
inv_usr_map = {v: k for k, v in usr_dict.items()}
bace_emb = pd.DataFrame(d_factors)
bace_emb["user_id"] = bace_emb.index.map(inv_usr_map)
bace_emb.to_csv("data/bace_preprocessing/df_bace.csv", index=False)
del data_agg
del values
del rows
del cols
del mat
del als
del d_factors
del inv_usr_map
del bace_emb
gc.collect()
# ### Saving url-embending for url encoding
def save(obj, path, verbose=True):
if verbose:
print("Saving object to {}".format(path))
with open(path, "wb") as obj_file:
pickle.dump(obj, obj_file, protocol=pickle.HIGHEST_PROTOCOL)
if verbose:
print("Object saved to {}".format(path))
pass
url_factor_features_dict = {}
url_factor_features_dict["feature_names"] = []
for i in range(len(u_factors[0])):
url_factor_features_dict["feature_names"].append("url_factor_{}".format(i))
uniq_urls = list(
sorted(list(set(data.select(["url_host"]).to_pandas()["url_host"].values)))
)
url_id_dict = {}
for i in tqdm(range(len(uniq_urls)), desc="Building url --> id mapping"):
url_id_dict[uniq_urls[i]] = i
inverted_url_id_dict = {v: k for k, v in url_id_dict.items()}
for i in tqdm(range(len(u_factors)), desc="Building url factor features dict"):
url_id = inverted_url_id_dict[i]
url_factor_features = u_factors[i]
url_factor_features_dict[url_id] = url_factor_features
save(url_factor_features_dict, "data/utils/url_only_factor_features_dict.pkl")
# # ALS embandings
# ## Count of day that urers visit url_hosts
# I count the number of non-repetitive days for each user visit each url_host and use it as weight in sparse matrix for ALS embanding
data_agg = (
pa.Table.from_pandas(
data.select(["user_id", "url_host", "date"]).to_pandas().drop_duplicates()
)
.group_by(["user_id", "url_host"])
.aggregate([("date", "count")])
)
values = np.array(data_agg.select(["date_count"]).to_pandas()["date_count"])
rows = np.array(data_agg.select(["user_id"]).to_pandas()["user_id"].map(usr_dict))
cols = np.array(data_agg.select(["url_host"]).to_pandas()["url_host"].map(url_dict))
mat = scipy.sparse.coo_matrix(
(values, (rows, cols)), shape=(rows.max() + 1, cols.max() + 1)
)
als = implicit.approximate_als.FaissAlternatingLeastSquares(
factors=50,
iterations=30,
use_gpu=False,
calculate_training_loss=False,
regularization=0.1,
)
als.fit(mat)
d_factors = als.item_factors
inv_usr_map = {v: k for k, v in usr_dict.items()}
date_emb = pd.DataFrame(d_factors)
date_emb["user_id"] = date_emb.index.map(inv_usr_map)
date_emb.to_csv("data/bace_preprocessing/date_emb.csv", index=False)
del data_agg
del values
del rows
del cols
del mat
del als
del d_factors
del inv_usr_map
del date_emb
del url_set
del url_dict
del usr_set
del usr_dict
gc.collect()
# ## Region
# I count the number of requests for each user from each region and use it as weight in sparse matrix for ALS embanding
region_agg = (
data.select(["user_id", "region_name", "request_cnt"])
.group_by(["user_id", "region_name"])
.aggregate([("request_cnt", "count")])
)
usr_set = set(region_agg.select(["user_id"]).to_pandas()["user_id"])
print(f"{len(usr_set)} users")
usr_dict = {usr: user_id for usr, user_id in zip(usr_set, range(len(usr_set)))}
region_set = set(region_agg.select(["region_name"]).to_pandas()["region_name"])
print(f"{len(region_set)} regions")
region_dict = {url: idurl for url, idurl in zip(region_set, range(len(region_set)))}
values = np.array(
region_agg.select(["request_cnt_count"]).to_pandas()["request_cnt_count"]
)
rows = np.array(region_agg.select(["user_id"]).to_pandas()["user_id"].map(usr_dict))
cols = np.array(
region_agg.select(["region_name"]).to_pandas()["region_name"].map(region_dict)
)
mat = scipy.sparse.coo_matrix(
(values, (rows, cols)), shape=(rows.max() + 1, cols.max() + 1)
)
als = implicit.approximate_als.FaissAlternatingLeastSquares(
factors=50,
iterations=30,
use_gpu=False,
calculate_training_loss=False,
regularization=0.1,
)
als.fit(mat)
d_factors = als.item_factors
inv_usr_map = {v: k for k, v in usr_dict.items()}
region_emb = pd.DataFrame(d_factors)
region_emb["user_id"] = region_emb.index.map(inv_usr_map)
region_emb.to_csv("data/bace_preprocessing/region_emb.csv", index=False)
del region_agg
del values
del rows
del cols
del mat
del als
del d_factors
del inv_usr_map
del region_emb
del region_set
del region_dict
gc.collect()
# ## City
# I count the number of requests for each user from each city and use it as weight in sparse matrix for ALS embanding
city_agg = (
data.select(["user_id", "region_name", "city_name", "request_cnt"])
.group_by(["user_id", "region_name", "city_name"])
.aggregate([("request_cnt", "count")])
)
city_agg = city_agg.to_pandas()
city_agg["city_name"] = (
city_agg["region_name"].astype("string")
+ " "
+ city_agg["city_name"].astype("string")
)
city_agg = city_agg.drop("region_name", axis=1)
city_agg
city_agg = pa.Table.from_pandas(city_agg)
city_set = set(city_agg.select(["city_name"]).to_pandas()["city_name"])
print(f"{len(city_set)} cities")
city_dict = {url: idurl for url, idurl in zip(city_set, range(len(city_set)))}
values = np.array(
city_agg.select(["request_cnt_count"]).to_pandas()["request_cnt_count"]
)
rows = np.array(city_agg.select(["user_id"]).to_pandas()["user_id"].map(usr_dict))
cols = np.array(city_agg.select(["city_name"]).to_pandas()["city_name"].map(city_dict))
mat = scipy.sparse.coo_matrix(
(values, (rows, cols)), shape=(rows.max() + 1, cols.max() + 1)
)
als = implicit.approximate_als.FaissAlternatingLeastSquares(
factors=50,
iterations=30,
use_gpu=False,
calculate_training_loss=False,
regularization=0.1,
)
als.fit(mat)
d_factors = als.item_factors
inv_usr_map = {v: k for k, v in usr_dict.items()}
city_emb = pd.DataFrame(d_factors)
city_emb["user_id"] = city_emb.index.map(inv_usr_map)
city_emb.to_csv("data/bace_preprocessing/city_emb.csv", index=False)
del city_agg
del values
del rows
del cols
del mat
del als
del d_factors
del inv_usr_map
del city_emb
del city_set
del city_dict
gc.collect()
# ## Model
# I count the number of requests for each user from each model and use it as weight in sparse matrix for ALS embanding
model_agg = (
data.select(["user_id", "cpe_model_name", "request_cnt"])
.group_by(["user_id", "cpe_model_name"])
.aggregate([("request_cnt", "count")])
)
model_set = set(model_agg.select(["cpe_model_name"]).to_pandas()["cpe_model_name"])
print(f"{len(model_set)} cities")
model_dict = {url: idurl for url, idurl in zip(model_set, range(len(model_set)))}
values = np.array(
model_agg.select(["request_cnt_count"]).to_pandas()["request_cnt_count"]
)
rows = np.array(model_agg.select(["user_id"]).to_pandas()["user_id"].map(usr_dict))
cols = np.array(
model_agg.select(["cpe_model_name"]).to_pandas()["cpe_model_name"].map(model_dict)
)
mat = scipy.sparse.coo_matrix(
(values, (rows, cols)), shape=(rows.max() + 1, cols.max() + 1)
)
als = implicit.approximate_als.FaissAlternatingLeastSquares(
factors=50,
iterations=30,
use_gpu=False,
calculate_training_loss=False,
regularization=0.1,
)
als.fit(mat)
u_factors = als.user_factors
d_factors = als.item_factors
inv_usr_map = {v: k for k, v in usr_dict.items()}
model_emb = pd.DataFrame(d_factors)
model_emb["user_id"] = model_emb.index.map(inv_usr_map)
model_emb.to_csv("data/bace_preprocessing/model_emb.csv", index=False)
del model_agg
del values
del rows
del cols
del mat
del als
del d_factors
del inv_usr_map
del model_emb
del model_set
del model_dict
del usr_set
del usr_dict
gc.collect()
# # Matrix
# I encode this features with count and sum of request_cnt
# ## PartOfDay
data_agg = (
data.select(["user_id", "part_of_day", "request_cnt"])
.group_by(["user_id", "part_of_day"])
.aggregate([("request_cnt", "sum"), ("request_cnt", "count")])
)
usr_set = set(data_agg.select(["user_id"]).to_pandas()["user_id"])
print(f"{len(usr_set)} users")
usr_dict = {usr: user_id for usr, user_id in zip(usr_set, range(len(usr_set)))}
region_set = set(data_agg.select(["part_of_day"]).to_pandas()["part_of_day"])
print(f"{len(region_set)} part_of_days")
region_dict = {url: idurl for url, idurl in zip(region_set, range(len(region_set)))}
values = np.array(
data_agg.select(["request_cnt_count"]).to_pandas()["request_cnt_count"]
)
rows = np.array(data_agg.select(["user_id"]).to_pandas()["user_id"].map(usr_dict))
cols = np.array(
data_agg.select(["part_of_day"]).to_pandas()["part_of_day"].map(region_dict)
)
count_mat = scipy.sparse.coo_matrix(
(values, (rows, cols)), shape=(rows.max() + 1, cols.max() + 1)
).toarray()
usr_set = set(data_agg.select(["user_id"]).to_pandas()["user_id"])
print(f"{len(usr_set)} users")
usr_dict = {usr: user_id for usr, user_id in zip(usr_set, range(len(usr_set)))}
region_set = set(data_agg.select(["part_of_day"]).to_pandas()["part_of_day"])
print(f"{len(region_set)} part_of_days")
region_dict = {url: idurl for url, idurl in zip(region_set, range(len(region_set)))}
values = np.array(data_agg.select(["request_cnt_sum"]).to_pandas()["request_cnt_sum"])
rows = np.array(data_agg.select(["user_id"]).to_pandas()["user_id"].map(usr_dict))
cols = np.array(
data_agg.select(["part_of_day"]).to_pandas()["part_of_day"].map(region_dict)
)
sum_mat = scipy.sparse.coo_matrix(
(values, (rows, cols)), shape=(rows.max() + 1, cols.max() + 1)
).toarray()
inv_usr_map = {v: k for k, v in usr_dict.items()}
count_mat = pd.DataFrame(count_mat)
count_mat["user_id"] = count_mat.index.map(inv_usr_map)
sum_mat = pd.DataFrame(sum_mat)
sum_mat["user_id"] = sum_mat.index.map(inv_usr_map)
count_mat = count_mat.merge(
sum_mat, on="user_id", how="inner", suffixes=("count", "sum")
)
count_mat.to_csv("data/bace_preprocessing/part_of_day.csv", index=False)
del data_agg
del values
del rows
del cols
del sum_mat
del count_mat
del inv_usr_map
del region_set
del region_dict
del usr_set
del usr_dict
gc.collect()
# # AGG
# I aggregate counts and sums of request_cnt by features and user and count mean, median, min, max and standard deviation od it by each user
# ### date
date_agg = (
data.select(["user_id", "date", "request_cnt"])
.group_by(["user_id", "date"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("date", "count"),
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
date_agg.to_pandas().to_csv("data/bace_preprocessing/date_agg.csv", index=False)
del date_agg
gc.collect()
# ### url_host
url_agg = (
data.select(["user_id", "url_host", "request_cnt"])
.group_by(["user_id", "url_host"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
url_agg.to_pandas().to_csv("data/bace_preprocessing/url_agg.csv", index=False)
del url_agg
gc.collect()
# ### region
region_agg = (
data.select(["user_id", "region_name", "request_cnt"])
.group_by(["user_id", "region_name"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
region_agg.to_pandas().to_csv("data/bace_preprocessing/region_agg.csv", index=False)
del region_agg
gc.collect()
# ### city
city_agg = (
data.select(["user_id", "region_name", "city_name", "request_cnt"])
.group_by(["user_id", "region_name", "city_name"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
city_agg.to_pandas().to_csv("data/bace_preprocessing/city_agg.csv", index=False)
del city_agg
gc.collect()
# ### model
model_agg = (
data.select(["user_id", "cpe_manufacturer_name", "cpe_model_name", "request_cnt"])
.group_by(["user_id", "cpe_manufacturer_name", "cpe_model_name"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
model_agg.to_pandas().to_csv("data/bace_preprocessing/model_agg.csv", index=False)
del model_agg
gc.collect()
# ### manufacturer_name
manuf_agg = (
data.select(["user_id", "cpe_manufacturer_name", "request_cnt"])
.group_by(["user_id", "cpe_manufacturer_name"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
manuf_agg.to_pandas().to_csv("data/bace_preprocessing/manuf_agg.csv", index=False)
del manuf_agg
gc.collect()
# ### cpe_type
cpe_agg = (
data.select(["user_id", "cpe_type_cd", "request_cnt"])
.group_by(["user_id", "cpe_type_cd"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
cpe_agg.to_pandas().to_csv("data/bace_preprocessing/cpe_agg.csv", index=False)
del cpe_agg
gc.collect()
# ### os_type
os_agg = (
data.select(["user_id", "cpe_model_os_type", "request_cnt"])
.group_by(["user_id", "cpe_model_os_type"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
os_agg.to_pandas().to_csv("data/bace_preprocessing/os_agg.csv", index=False)
del os_agg
gc.collect()
|
import os
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
import tensorflow as tf
from tensorflow.keras import Input
from tensorflow.keras.models import Model, load_model, save_model
from tensorflow.keras.layers import (
Input,
Activation,
BatchNormalization,
Dropout,
Lambda,
Conv2D,
Conv2DTranspose,
MaxPooling2D,
concatenate,
)
from tensorflow.keras import backend as K
plt.style.use("ggplot")
# # Utilities
def plot_from_img_path(rows, columns, list_img_path, list_mask_path):
fig = plt.figure(figsize=(12, 12))
for i in range(1, rows * columns + 1):
fig.add_subplot(rows, columns, i)
img_path = list_img_path[i]
mask_path = list_mask_path[i]
image = cv2.imread(img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(mask_path)
plt.imshow(image)
plt.imshow(mask, alpha=0.4)
plt.show()
def dice_coefficients(y_true, y_pred, smooth=100):
y_true_flatten = K.flatten(y_true)
y_pred_flatten = K.flatten(y_pred)
intersection = K.sum(y_true_flatten * y_pred_flatten)
union = K.sum(y_true_flatten) + K.sum(y_pred_flatten)
return (2 * intersection + smooth) / (union + smooth)
def dice_coefficients_loss(y_true, y_pred, smooth=100):
return -dice_coefficients(y_true, y_pred, smooth)
def iou(y_true, y_pred, smooth=100):
intersection = K.sum(y_true * y_pred)
sum = K.sum(y_true + y_pred)
iou = (intersection + smooth) / (sum - intersection + smooth)
return iou
def jaccard_distance(y_true, y_pred):
y_true_flatten = K.flatten(y_true)
y_pred_flatten = K.flatten(y_pred)
return -iou(y_true_flatten, y_pred_flatten)
# # Model
def unet(input_size=(256, 256, 3)):
inputs = Input(input_size)
# First DownConvolution / Encoder Leg will begin, so start with Conv2D
conv1 = Conv2D(filters=64, kernel_size=(3, 3), padding="same")(inputs)
bn1 = Activation("relu")(conv1)
conv1 = Conv2D(filters=64, kernel_size=(3, 3), padding="same")(bn1)
bn1 = BatchNormalization(axis=3)(conv1)
bn1 = Activation("relu")(bn1)
pool1 = MaxPooling2D(pool_size=(2, 2))(bn1)
conv2 = Conv2D(filters=128, kernel_size=(3, 3), padding="same")(pool1)
bn2 = Activation("relu")(conv2)
conv2 = Conv2D(filters=128, kernel_size=(3, 3), padding="same")(bn2)
bn2 = BatchNormalization(axis=3)(conv2)
bn2 = Activation("relu")(bn2)
pool2 = MaxPooling2D(pool_size=(2, 2))(bn2)
conv3 = Conv2D(filters=256, kernel_size=(3, 3), padding="same")(pool2)
bn3 = Activation("relu")(conv3)
conv3 = Conv2D(filters=256, kernel_size=(3, 3), padding="same")(bn3)
bn3 = BatchNormalization(axis=3)(conv3)
bn3 = Activation("relu")(bn3)
pool3 = MaxPooling2D(pool_size=(2, 2))(bn3)
conv4 = Conv2D(filters=512, kernel_size=(3, 3), padding="same")(pool3)
bn4 = Activation("relu")(conv4)
conv4 = Conv2D(filters=512, kernel_size=(3, 3), padding="same")(bn4)
bn4 = BatchNormalization(axis=3)(conv4)
bn4 = Activation("relu")(bn4)
pool4 = MaxPooling2D(pool_size=(2, 2))(bn4)
conv5 = Conv2D(filters=1024, kernel_size=(3, 3), padding="same")(pool4)
bn5 = Activation("relu")(conv5)
conv5 = Conv2D(filters=1024, kernel_size=(3, 3), padding="same")(bn5)
bn5 = BatchNormalization(axis=3)(conv5)
bn5 = Activation("relu")(bn5)
""" Now UpConvolution / Decoder Leg will begin, so start with Conv2DTranspose
The gray arrows (in the above image) indicate the skip connections that concatenate the encoder feature map with the decoder, which helps the backward flow of gradients for improved training. """
up6 = concatenate(
[
Conv2DTranspose(512, kernel_size=(2, 2), strides=(2, 2), padding="same")(
bn5
),
conv4,
],
axis=3,
)
""" After every concatenation we again apply two consecutive regular convolutions so that the model can learn to assemble a more precise output """
conv6 = Conv2D(filters=512, kernel_size=(3, 3), padding="same")(up6)
bn6 = Activation("relu")(conv6)
conv6 = Conv2D(filters=512, kernel_size=(3, 3), padding="same")(bn6)
bn6 = BatchNormalization(axis=3)(conv6)
bn6 = Activation("relu")(bn6)
up7 = concatenate(
[
Conv2DTranspose(256, kernel_size=(2, 2), strides=(2, 2), padding="same")(
bn6
),
conv3,
],
axis=3,
)
conv7 = Conv2D(filters=256, kernel_size=(3, 3), padding="same")(up7)
bn7 = Activation("relu")(conv7)
conv7 = Conv2D(filters=256, kernel_size=(3, 3), padding="same")(bn7)
bn7 = BatchNormalization(axis=3)(conv7)
bn7 = Activation("relu")(bn7)
up8 = concatenate(
[
Conv2DTranspose(128, kernel_size=(2, 2), strides=(2, 2), padding="same")(
bn7
),
conv2,
],
axis=3,
)
conv8 = Conv2D(filters=128, kernel_size=(3, 3), padding="same")(up8)
bn8 = Activation("relu")(conv8)
conv8 = Conv2D(filters=128, kernel_size=(3, 3), padding="same")(bn8)
bn8 = BatchNormalization(axis=3)(conv8)
bn8 = Activation("relu")(bn8)
up9 = concatenate(
[
Conv2DTranspose(64, kernel_size=(2, 2), strides=(2, 2), padding="same")(
bn8
),
conv1,
],
axis=3,
)
conv9 = Conv2D(filters=64, kernel_size=(3, 3), padding="same")(up9)
bn9 = Activation("relu")(conv9)
conv9 = Conv2D(filters=64, kernel_size=(3, 3), padding="same")(bn9)
bn9 = BatchNormalization(axis=3)(conv9)
bn9 = Activation("relu")(bn9)
conv10 = Conv2D(filters=1, kernel_size=(1, 1), activation="sigmoid")(bn9)
return Model(inputs=[inputs], outputs=[conv10])
#
# # Model Training
#
from tqdm import tqdm_notebook, tnrange
from glob import glob
from itertools import chain
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
from sklearn.model_selection import train_test_split
from skimage.color import rgb2gray
from tensorflow.keras.layers import (
Input,
Activation,
BatchNormalization,
Dropout,
Lambda,
Conv2D,
Conv2DTranspose,
MaxPooling2D,
concatenate,
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
# Setting size parameters of images
im_width = 256
im_height = 256
# # Loading Dataset
image_filenames_train = []
mask_files = glob("../input/lgg-mri-segmentation/kaggle_3m/*/*_mask*")
for i in mask_files:
image_filenames_train.append(i.replace("_mask", ""))
print(image_filenames_train[:10])
len(image_filenames_train)
# # Plotting the Collected Dataset
plot_from_img_path(4, 4, image_filenames_train, mask_files)
# # Splitting the Dataset
df = pd.DataFrame(
data={"image_filenames_train": image_filenames_train, "mask": mask_files}
)
df_train, df_test = train_test_split(df, test_size=0.1)
# Further split this val and train
df_train, df_val = train_test_split(df_train, test_size=0.2)
print(df_train.shape)
print(df_test.shape)
print(df_val.shape)
# # Data Genertator
# # Data Augmentation
def train_generator(
data_frame,
batch_size,
augmentation_dict,
image_color_mode="rgb",
mask_color_mode="grayscale",
image_save_prefix="image",
mask_save_prefix="mask",
save_to_dir=None,
target_size=(256, 256),
seed=1,
):
"""
can generate image and mask at the same time use the same seed for
image_datagen and mask_datagen to ensure the transformation for image
and mask is the same if you want to visualize the results of generator,
set save_to_dir = "your path"
"""
image_datagen = ImageDataGenerator(**augmentation_dict)
mask_datagen = ImageDataGenerator(**augmentation_dict)
image_generator = image_datagen.flow_from_dataframe(
data_frame,
x_col="image_filenames_train",
class_mode=None,
color_mode=image_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=image_save_prefix,
seed=seed,
)
mask_generator = mask_datagen.flow_from_dataframe(
data_frame,
x_col="mask",
class_mode=None,
color_mode=mask_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=mask_save_prefix,
seed=seed,
)
train_gen = zip(image_generator, mask_generator)
# Final return Tuple after image Normalization and Diagnostics
for img, mask in train_gen:
img, mask = normalize_and_diagnose(img, mask)
yield (img, mask)
def normalize_and_diagnose(img, mask):
img = img / 255
mask = mask / 255
mask[mask > 0.5] = 1
mask[mask <= 0.5] = 0
return (img, mask)
EPOCHS = 100
BATCH_SIZE = 32
learning_rate = 1e-4
smooth = 100
model = unet()
model.summary
train_generator_param = dict(
rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode="nearest",
)
train_gen = train_generator(
df_train, BATCH_SIZE, train_generator_param, target_size=(im_height, im_width)
)
test_gen = train_generator(
df_val, BATCH_SIZE, dict(), target_size=(im_height, im_width)
)
model = unet(input_size=(im_height, im_width, 3))
decay_rate = learning_rate / EPOCHS
opt = Adam(
lr=learning_rate,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=decay_rate,
amsgrad=False,
)
model.compile(
optimizer=opt,
loss=dice_coefficients_loss,
metrics=["binary_accuracy", iou, dice_coefficients],
)
callbacks = [ModelCheckpoint("unet.hdf5", verbose=1, save_best_only=True)]
history = model.fit(
train_gen,
steps_per_epoch=len(df_train) / BATCH_SIZE,
epochs=EPOCHS,
callbacks=callbacks,
validation_data=test_gen,
validation_steps=len(df_val) / BATCH_SIZE,
)
history_post_training = history.history
train_dice_coeff_list = history_post_training["dice_coefficients"]
test_dice_coeff_list = history_post_training["val_dice_coefficients"]
train_jaccard_list = history_post_training["iou"]
test_jaccard_list = history_post_training["val_iou"]
train_loss_list = history_post_training["loss"]
test_loss_list = history_post_training["val_loss"]
plt.figure(1)
plt.plot(test_loss_list, "b-")
plt.plot(train_loss_list, "r-")
plt.xlabel("iterations")
plt.ylabel("loss")
plt.title("loss graph", fontsize=12)
plt.figure(2)
plt.plot(train_dice_coeff_list, "b-")
plt.plot(test_dice_coeff_list, "r-")
plt.xlabel("iterations")
plt.ylabel("accuracy")
plt.title("Accuracy graph", fontsize=12)
plt.show()
# # Loading Trained Model
model = load_model(
"unet.hdf5",
custom_objects={
"dice_coefficient_loss": dice_coefficients_loss,
"iou": iou,
"dice_coefficient": dice_coefficients,
},
)
test_gen = train_generator(
df_test, BATCH_SIZE, dict(), target_size=(im_height, im_width)
)
results = model.evaluate(test_gen, steps=len(df_test) / BATCH_SIZE)
print("Test Loss ", results[0])
print("Test IoU ", results[1])
print("Test Dice Coefficient ", results[2])
# # Plotting Predicted Masks Segmentation results from the Test Image set
for i in range(20):
index = np.random.randint(1, len(df_test.index))
img = cv2.imread(df_test["image_filenames_train"].iloc[index])
img = cv2.resize(img, (im_height, im_width))
img = img / 255
# print(imgs.shape) (256, 256 , 3)
img = img[np.newaxis, :, :, :]
# print(img.shape) # (1, 256, 256, 3)
predicted_img = model.predict(img)
plt.figure(figsize=(12, 12))
plt.subplot(1, 3, 1)
plt.imshow(np.squeeze(img))
plt.title("Original Image")
plt.subplot(1, 3, 2)
plt.imshow(np.squeeze(cv2.imread(df_test["mask"].iloc[index])))
plt.title("Original Mask")
plt.subplot(1, 3, 3)
plt.imshow(np.squeeze(predicted_img) > 0.5)
plt.title("Prediction")
plt.show()
|
import pandas as pd
import numpy as np
url = "https://www.news18.com/cricketnext/ipl-auction-2023/"
initial = pd.read_html(url)
initial
# **GT Dataframe**
initial[1]
gujarat_titans = initial[1]
gujarat_titans["Team"] = "Gujarat Titans"
gujarat_titans.rename(columns={"2023 Squad GT": "Player"}, inplace=True)
gujarat_titans
# **CSK Dataframe**
initial[2]
CSK = initial[2]
CSK["Team"] = "Chennai Super Kings"
CSK.rename(columns={"2023 Squad CSK": "Player"}, inplace=True)
CSK
final = gujarat_titans.append(CSK, ignore_index=True)
final
initial[3]
# **DC Dataframe**
DC = initial[3]
DC["Team"] = "Delhi Capitals"
DC.rename(columns={"2023 Squad DC": "Player"}, inplace=True)
DC
final = final.append(DC, ignore_index=True)
final
# **KKR Dataframe**
initial[4]
KKR = initial[4]
KKR["Team"] = "Kolkata Knight Riders"
KKR.rename(columns={"2023 Squad KKR": "Player"}, inplace=True)
KKR
final = final.append(KKR, ignore_index=True)
final
# **PBKS Dataframe**
initial[5]
PBKS = initial[5]
PBKS["Team"] = "Punjab Kings"
PBKS.rename(columns={"2023 Squad PBKS": "Player"}, inplace=True)
PBKS
final = final.append(PBKS, ignore_index=True)
final
# **LSG Dataframe**
initial[6]
LSG = initial[6]
LSG["Team"] = "Lucknow Super Giants"
LSG.rename(columns={"2023 Squad LSG": "Player"}, inplace=True)
LSG
final = final.append(LSG, ignore_index=True)
final
# **MI Dataframe**
initial[7]
MI = initial[7]
MI["Team"] = "Mumbai Indians"
MI.rename(columns={"2023 Squad MI": "Player"}, inplace=True)
MI
final = final.append(MI, ignore_index=True)
final
# **RCB Dataframe**
initial[8]
RCB = initial[8]
RCB["Team"] = "Royal Challengers Banglore"
RCB.rename(columns={"2023 Squad RCB": "Player"}, inplace=True)
RCB
final = final.append(RCB, ignore_index=True)
final
# **RR Dataframe**
initial[9]
RR = initial[9]
RR["Team"] = "Rajasthan Royals"
RR.rename(columns={"2023 Squad RR": "Player"}, inplace=True)
RR
final = final.append(RR, ignore_index=True)
final
# **SRH Dataframe**
initial[10]
SRH = initial[10]
SRH["Team"] = "Sunrisers Hyderabad"
SRH.rename(columns={"2023 Squad SRH": "Player"}, inplace=True)
SRH
final = final.append(SRH, ignore_index=True)
final
# **Adding Information of unsold players**
initial[11]
unsold = initial[11]
unsold.rename(columns={"Players": "Player"}, inplace=True)
unsold.rename(columns={"Base Price IN ₹": "Base Price"}, inplace=True)
unsold.drop("Base Price IN $", axis=1, inplace=True)
unsold["COST IN ₹ (CR.)"] = np.nan
unsold["Cost IN $ (000)"] = np.nan
unsold["Team"] = "Unsold"
# Reordering the columns (unsold)
unsold = unsold[
[
"Player",
"Base Price",
"TYPE",
"COST IN ₹ (CR.)",
"Cost IN $ (000)",
"2022 Squad",
"Team",
]
]
final = final.append(unsold, ignore_index=True)
final
# **Exporting the Final Dataframe as CSV File**
final.to_csv("ipl_2023_dataset.csv")
|
# Data was built using "suicide-watch" by NIKHILESWAR KOMATI under the
# CC BY-SA 4.0 licence: https://creativecommons.org/licenses/by-sa/4.0/
# in this notebook, I will be building a Bayesian Model to help classify the depressed
# data with levels
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# suicide data, originally without sentiment, added to improve
# df_1 = pd.read_csv('/kaggle/input/processed-suicide-data/processedSuicidedata_standard.csv')
df_1 = pd.read_csv(
"/kaggle/input/suicide-processed-with-sentiment/processedSuicidedata_sentiment.csv"
)
# read in the data with normal feautures
df_2 = pd.read_csv(
"/kaggle/input/depression-data-tfidf-sentiment-analysis/depression_data_TFIDF_sent.csv"
)
# read in the data with extra feautures
df_3 = pd.read_csv(
"/kaggle/input/tfidf-sentiment-w-extra-features/depression_data_TFIDF_sent_extra_features (0).csv"
)
# First, need to do sentiment analysis using Vader on the suicide data and to add that as an attribute.
# install Vader
#!pip install vaderSentiment
# getting the senitment first, will be commented out
"""from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
sent_analyser = SentimentIntensityAnalyzer()
sample_num = df_1.shape[0]
score_vector_1 = []
for i in range(sample_num):
score = sent_analyser.polarity_scores(df_1.at[i,'text'])
score_vector_1.append(score['compound'])
df_1.loc[:,'sentiment'] = score_vector_1
df_1
"""
# Train the model using Bayeisan approach, check accuracy and F-score in 10-fold cross validation, after saving data for later use. Older code will be commented out.
# df_1.to_csv('processedSuicidedata_sentiment.csv',index=False)
# df_1
x_col = df_1.columns[2:285]
X = df_1[x_col]
y_col = df_1.columns[286]
Y = df_1[y_col]
# 10-fold cross validation for metrix refining (using GaussianNB, MultinomialNB, and SVM):
from sklearn.model_selection import KFold
# from sklearn.naive_bayes import GaussianNB
# from sklearn.naive_bayes import MultinomialNB
from sklearn import svm
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
ConfusionMatrixDisplay,
f1_score,
)
# model = GaussianNB()
model = svm.SVC()
# taken from documentation
kf = KFold(n_splits=10, shuffle=True, random_state=1000)
for train, test in kf.split(X):
# print("%s %s" % (train, test))
# split X and Y into X_train, Y_train, X_test, Y_test
X_train = X.loc[train]
Y_train = Y.loc[train]
X_test = X.loc[test]
Y_test = Y.loc[test]
model.fit(X_train, Y_train)
Y_pred = model.predict(X_test)
accuracy = accuracy_score(Y_test, Y_pred)
F1 = f1_score(Y_pred, Y_test, average="weighted")
labels = ["suicide", "non-suicide"]
CM = confusion_matrix(Y_test, Y_pred, labels=labels)
disp = ConfusionMatrixDisplay(confusion_matrix=CM, display_labels=labels)
disp.plot()
print("Accuracy:" + str(accuracy))
print("F1 score:" + str(F1))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Load the dataset
file_path = "/kaggle/input/loan-data/loan_data.csv" # Replace with the path to your dataset file
df = pd.read_csv(file_path)
# Display the first 5 rows of the dataset
print(df.head())
# Check the shape of the dataset
print(f"Dataset shape: {df.shape}")
# Get summary statistics for the dataset
print(df.describe())
# Check for missing values
print(df.isnull().sum())
# Check data types
print(df.dtypes)
# Visualize the distribution of the target variable ('not.fully.paid')
sns.countplot(x="not.fully.paid", data=df)
plt.show()
# Visualize the distribution of the 'purpose' variable
plt.figure(figsize=(12, 6))
sns.countplot(y="purpose", data=df, hue="not.fully.paid")
plt.show()
# Visualize the distribution of the 'fico' variable
plt.figure(figsize=(10, 6))
sns.histplot(df["fico"], bins=30, kde=True)
plt.show()
# Analyze the correlation between the features
corr = df.corr()
plt.figure(figsize=(12, 10))
sns.heatmap(corr, annot=True, cmap="coolwarm")
plt.show()
# Pairplot to visualize relationships between features (optional, might be slow for large datasets)
# sns.pairplot(df, hue='not.fully.paid')
# plt.show()
# # FE
from sklearn.preprocessing import StandardScaler
def feature_engineering(df):
# Create a new feature 'income_to_debt' by dividing 'log.annual.inc' by 'dti'
df["income_to_debt"] = df["log.annual.inc"] / df["dti"]
# Create a new feature 'credit_utilization' by dividing 'revol.bal' by 'revol.util'
df["credit_utilization"] = df["revol.bal"] / df["revol.util"]
# Create a new feature 'fico_to_income' by dividing 'fico' by 'log.annual.inc'
df["fico_to_income"] = df["fico"] / df["log.annual.inc"]
# Create a new feature 'installment_to_income' by dividing 'installment' by 'log.annual.inc'
df["installment_to_income"] = df["installment"] / df["log.annual.inc"]
# Fill in any missing or infinite values generated during feature creation
df.replace([np.inf, -np.inf], np.nan, inplace=True)
df.fillna(0, inplace=True)
return df
# Perform feature engineering on the dataset
df = feature_engineering(df)
# Display the first 5 rows of the transformed dataset
print(df.head())
# # Preprocessing and Balancing the Data
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split
def preprocess_data(df):
# Handle missing values
df.fillna(df.median(), inplace=True) # Using median to fill missing values
# Calculate additional features
df["installment_to_income_ratio"] = df["installment"] / df["log.annual.inc"]
df["credit_history"] = (df["days.with.cr.line"] / 365).round()
# Drop unnecessary columns
df = df.drop(["credit.policy", "days.with.cr.line"], axis=1)
# Encode categorical features
categorical_features = ["purpose"]
one_hot_encoder = OneHotEncoder(sparse=False, drop="first")
one_hot_encoded = one_hot_encoder.fit_transform(df[categorical_features])
encoded_df = pd.DataFrame(
one_hot_encoded, columns=one_hot_encoder.get_feature_names(categorical_features)
)
df = pd.concat([df.drop(categorical_features, axis=1), encoded_df], axis=1)
# Scale numerical features
numerical_features = [
"int.rate",
"installment",
"log.annual.inc",
"dti",
"fico",
"revol.bal",
"revol.util",
"inq.last.6mths",
"delinq.2yrs",
"pub.rec",
"installment_to_income_ratio",
"credit_history",
]
scaler = StandardScaler()
df[numerical_features] = scaler.fit_transform(df[numerical_features])
return df
# Perform feature engineering (if not already done)
df = feature_engineering(df)
# Preprocess the dataset
preprocessed_df = preprocess_data(df)
# Split the dataset into training and testing sets
X = preprocessed_df.drop("not.fully.paid", axis=1)
y = preprocessed_df["not.fully.paid"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Display the first 5 rows of the preprocessed dataset
print(preprocessed_df.head())
from imblearn.over_sampling import SMOTE
smote = SMOTE(random_state=42)
X_train_smote, y_train_smote = smote.fit_resample(X_train, y_train)
loan_df = pd.concat([X_train_smote, y_train_smote], axis=1)
loan_df["not.fully.paid"].value_counts()
# # Model Selection
from sklearn.ensemble import RandomForestClassifier
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import cross_val_score
# Define the models
models = {
"Random Forest": RandomForestClassifier(random_state=42),
"CatBoost": CatBoostClassifier(random_state=42, verbose=0),
"LightGBM": LGBMClassifier(random_state=42),
}
# Function to train and evaluate each model
def evaluate_models(models, X_train, y_train, X_test, y_test):
for name, model in models.items():
print(f"Evaluating {name}...")
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
# Using cross-validation for a more reliable performance estimation
cv_accuracy = np.mean(
cross_val_score(model, X_train, y_train, cv=5, scoring="accuracy")
)
cv_f1 = np.mean(cross_val_score(model, X_train, y_train, cv=5, scoring="f1"))
print(f" Accuracy: {accuracy:.4f}")
print(f" F1-score: {f1:.4f}")
print(f" Cross-validated Accuracy: {cv_accuracy:.4f}")
print(f" Cross-validated F1-score: {cv_f1:.4f}")
print()
# Evaluate the models
evaluate_models(models, X_train_smote, y_train_smote, X_test, y_test)
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.ensemble import RandomForestClassifier
from catboost import CatBoostClassifier
from sklearn.model_selection import GridSearchCV, cross_validate
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.pipeline import Pipeline
from sklearn.base import ClassifierMixin
import joblib
# Define a combined classifier
class CombinedClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, clf1, clf2):
self.clf1 = clf1
self.clf2 = clf2
self.n_features_in_ = None # 添加这一行
def fit(self, X, y):
self.clf1.fit(X, y)
self.clf2.fit(X, y)
self.n_features_in_ = X.shape[1] # 添加这一行
return self
def predict(self, X):
y_pred1 = self.clf1.predict(X)
y_pred2 = self.clf2.predict(X)
y_pred = np.round((y_pred1 + y_pred2) / 2).astype(int)
return y_pred
# Define the models
pipelines = {
"CombinedClassifier": Pipeline(
[
(
"classifier",
CombinedClassifier(
RandomForestClassifier(random_state=42),
CatBoostClassifier(random_state=42, verbose=0),
),
)
]
)
}
# Hyperparameters to be tuned
params = {
"CombinedClassifier": {
"classifier__clf1__n_estimators": [500],
"classifier__clf1__max_depth": [None],
"classifier__clf2__iterations": [750],
"classifier__clf2__learning_rate": [0.2],
}
}
# Evaluation metrics
scoring = ["accuracy", "precision", "recall", "f1"]
# Function to perform hyperparameter tuning and cross-validated evaluation
def evaluate_pipelines(pipelines, params, X_train, y_train):
best_models = {}
for name, pipeline in pipelines.items():
print(f"Evaluating {name}...")
# Hyperparameter tuning using GridSearchCV
grid_search = GridSearchCV(
pipeline,
params[name],
cv=5,
scoring=scoring,
refit="accuracy",
verbose=1,
n_jobs=-1,
)
grid_search.fit(X_train, y_train)
best_params = grid_search.best_params_
print(f" Best Parameters: {best_params}")
# Retrain the model with the best parameters
best_model = pipeline.set_params(**best_params)
best_model.fit(X_train, y_train)
best_models[name] = best_model
# Cross-validated evaluation
cv_results = cross_validate(
best_model, X_train, y_train, cv=5, scoring=scoring, n_jobs=-1
)
for metric in scoring:
mean_score = np.mean(cv_results[f"test_{metric}"])
print(f" Cross-validated {metric}: {mean_score:.4f}")
print()
return best_models
# Evaluate the pipelines and obtain the best models
best_models = evaluate_pipelines(pipelines, params, X_train_smote, y_train_smote)
# Save the best-performing models
for name, model in best_models.items():
joblib.dump(model, f"{name}_best_model.pkl")
import joblib
# Save the best model to disk
joblib.dump(model, "loan_classifier.joblib")
"""
import numpy as np
def check_feature_count(input_data, trained_model):
input_data = pd.get_dummies(input_data)
trained_model = pd.get_dummies(trained_model)
input_feature_count = input_data.shape[1]
# 获取最后一个步骤的分类器
final_step_classifier = trained_model.named_steps['classifier']
# 检查分类器是否有n_features_in_属性
if hasattr(final_step_classifier, 'n_features_in_'):
trained_feature_count = final_step_classifier.n_features_in_
else:
print("分类器没有n_features_in_属性,无法检查特征数。")
return False
if input_feature_count == trained_feature_count:
return True
else:
return False
# 示例
input_data = df
trained_model = pipelines # 假设模型已经被加载
if check_feature_count(input_data, trained_model):
prediction = trained_model.predict(input_data)
else:
print("请确保输入数据具有与训练数据相同的特征数")
"""
"""
import gradio as gr
import joblib
# Load the trained model
model = joblib.load("loan_classifier.joblib")
def predict_loan_status(
int_rate,
installment,
log_annual_inc,
dti,
fico,
revol_bal,
revol_util,
inq_last_6mths,
delinq_2yrs,
pub_rec,
installment_to_income_ratio,
credit_history,
):
input_dict = {
"int.rate": int_rate,
"installment": installment,
"log.annual.inc": log_annual_inc,
"dti": dti,
"fico": fico,
"revol.bal": revol_bal,
"revol.util": revol_util,
"inq.last.6mths": inq_last_6mths,
"delinq.2yrs": delinq_2yrs,
"pub.rec": pub_rec,
"installment_to_income_ratio": installment_to_income_ratio,
"credit_history": credit_history,
}
# Convert the dictionary to a 2D array
input_array = [list(input_dict.values())]
prediction = model.predict(input_array)[0]
if prediction == 0:
return "Loan fully paid"
else:
return "Loan not fully paid"
inputs = [
gr.Slider(0.06, 0.23, step=0.01, label="Interest Rate"),
gr.Slider(100, 950, step=10, label="Installment"),
gr.Slider(7, 15, step=0.1, label="Log Annual Income"),
gr.Slider(0, 40, step=1, label="DTI Ratio"),
gr.Slider(600, 850, step=1, label="FICO Score"),
gr.Slider(0, 120000, step=1000, label="Revolving Balance"),
gr.Slider(0, 120, step=1, label="Revolving Utilization"),
gr.Slider(0, 10, step=1, label="Inquiries in Last 6 Months"),
gr.Slider(0, 20, step=1, label="Delinquencies in Last 2 Years"),
gr.Slider(0, 10, step=1, label="Public Records"),
gr.Slider(0, 5, step=0.1, label="Installment to Income Ratio"),
gr.Slider(0, 1, step=0.01, label="Credit History"),
]
outputs = [gr.Label(num_top_classes=2)]
title = "Loan Approval Classifier"
description = (
"Enter the details of the loan applicant to check if the loan is approved or not."
)
gr.Interface(
fn=predict_loan_status,
inputs=inputs,
outputs=outputs,
title=title,
description=description,
).launch()
"""
|
import numpy as np
import pandas as pd
from skimage.morphology import erosion
from skimage.morphology import dilation
import os
import cv2
import matplotlib.pyplot as plt
import skimage
from skimage import io, filters
image = skimage.io.imread(
"/kaggle/input/assignment-2-computer-vision/chemical/inchi10.png", as_gray=True
)
plt.imshow(image, cmap=plt.get_cmap("gray"))
kernel = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
kernel2 = np.array([[1, 1], [1, 1]])
plt.imshow(
(dilation(dilation(erosion(image, kernel), kernel2), kernel2)),
cmap=plt.get_cmap("gray"),
)
img = cv2.imread(
"/kaggle/input/assignment-2-computer-vision/chemical/inchi10.png",
cv2.IMREAD_GRAYSCALE,
)
# Define mean filter
mean_kernel = np.ones((3, 3), np.float32) / 9
# Apply filters
mean_filtered = cv2.filter2D(img, -1, mean_kernel)
gaussian_filtered = cv2.GaussianBlur(img, (3, 3), 1)
median_filtered = cv2.medianBlur(img, 3)
bilateral_filtered = cv2.bilateralFilter(img, 250, 250, 250)
# Display images
fig, axs = plt.subplots(2, 3, figsize=(10, 5))
axs[0][0].imshow(img, cmap="gray")
axs[0][0].set_title("Original Image")
axs[0][1].imshow(mean_filtered, cmap="gray")
axs[0][1].set_title("Mean Filtered Image")
axs[1][0].imshow(gaussian_filtered, cmap="gray")
axs[1][0].set_title("Gaussian Filtered Image")
axs[1][1].imshow(median_filtered, cmap="gray")
axs[1][1].set_title("Median Filtered Image")
axs[1][2].imshow(bilateral_filtered, cmap="gray")
axs[1][2].set_title("Bilateral Filtered Image")
plt.show()
img = cv2.imread(
"/kaggle/input/assignment-2-computer-vision/chemical/inchi10.png",
cv2.IMREAD_GRAYSCALE,
)
plt.figure(figsize=(8, 3), constrained_layout=False)
img_fft = np.fft.fft2(img)
img_fftshift = np.fft.fftshift(img_fft)
img_ifftshit = np.fft.ifftshift(img_fftshift)
img_ifft = np.fft.ifft2(img_ifftshit)
plt.subplot(231), plt.imshow(img, "gray"), plt.title("Original Image")
plt.subplot(232), plt.imshow(np.log(1 + np.abs(img_fft)), "gray"), plt.title(
"Spectrum, FFT"
)
plt.subplot(233), plt.imshow(np.log(1 + np.abs(img_fftshift)), "gray"), plt.title(
"Centered Spectrum"
)
plt.subplot(234), plt.imshow(np.log(1 + np.abs(img_ifftshit)), "gray"), plt.title(
"Decentralized IFFT"
)
plt.subplot(235), plt.imshow(np.abs(img_ifft), "gray"), plt.title("Reversed Image")
plt.show()
from math import exp, sqrt
def distance(point1, point2):
return sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2)
def gaussianLP(D0, imgShape):
base = np.zeros(imgShape[:2])
rows, cols = imgShape[:2]
center = (rows / 2, cols / 2)
for x in range(cols):
for y in range(rows):
base[y, x] = exp(((-distance((y, x), center) ** 2) / (2 * (D0**2))))
return base
def try_d0s_lp(d0):
plt.figure(figsize=(25, 5), constrained_layout=False)
plt.subplot(161), plt.imshow(img, "gray"), plt.title("Original Image")
original = np.fft.fft2(img)
plt.subplot(162), plt.imshow(np.log(1 + np.abs(original)), "gray"), plt.title(
"Spectrum"
)
center = np.fft.fftshift(original)
plt.subplot(163), plt.imshow(np.log(1 + np.abs(center)), "gray"), plt.title(
"Centered Spectrum"
)
LowPassCenter = center * gaussianLP(d0, img.shape)
plt.subplot(164), plt.imshow(np.log(1 + np.abs(LowPassCenter)), "gray"), plt.title(
"Centered Spectrum multiply Low Pass Filter"
)
LowPass = np.fft.ifftshift(LowPassCenter)
plt.subplot(165), plt.imshow(np.log(1 + np.abs(LowPass)), "gray"), plt.title(
"Decentralize"
)
inverse_LowPass = np.fft.ifft2(LowPass)
plt.subplot(166), plt.imshow(np.abs(inverse_LowPass), "gray"), plt.title(
"Processed Image"
)
plt.suptitle("D0:" + str(d0), fontweight="bold")
plt.subplots_adjust(top=1.1)
plt.show()
for i in [100, 50, 30, 20, 10]:
try_d0s_lp(i)
def gaussianHP(D0, imgShape):
base = np.zeros(imgShape[:2])
rows, cols = imgShape[:2]
center = (rows / 2, cols / 2)
for x in range(cols):
for y in range(rows):
base[y, x] = 1 - exp(((-distance((y, x), center) ** 2) / (2 * (D0**2))))
return base
def try_d0s_hp(d0):
plt.figure(figsize=(25, 5), constrained_layout=False)
plt.subplot(161), plt.imshow(img, "gray"), plt.title("Original Image")
original = np.fft.fft2(img)
plt.subplot(162), plt.imshow(np.log(1 + np.abs(original)), "gray"), plt.title(
"Spectrum"
)
center = np.fft.fftshift(original)
plt.subplot(163), plt.imshow(np.log(1 + np.abs(center)), "gray"), plt.title(
"Centered Spectrum"
)
HighPassCenter = center * gaussianHP(d0, img.shape)
plt.subplot(164), plt.imshow(np.log(1 + np.abs(HighPassCenter)), "gray"), plt.title(
"Centered Spectrum multiply High Pass Filter"
)
HighPass = np.fft.ifftshift(HighPassCenter)
plt.subplot(165), plt.imshow(np.log(1 + np.abs(HighPass)), "gray"), plt.title(
"Decentralize"
)
inverse_HighPass = np.fft.ifft2(HighPass)
plt.subplot(166), plt.imshow(np.abs(inverse_HighPass), "gray"), plt.title(
"Processed Image"
)
plt.suptitle("D0:" + str(d0), fontweight="bold")
plt.subplots_adjust(top=1.1)
plt.show()
for i in [100, 50, 30, 20, 10]:
try_d0s_hp(i)
path = "/kaggle/input/assignment-2-computer-vision/speckle/"
# List all files in the directory
files = os.listdir(path)
# Create an empty list to store the images
images = []
fig, axs = plt.subplots(1, 6, figsize=(15, 15))
cnt = 0
# Loop through each file in the directory
for file in files:
# Read the image using cv2.imread()
img = cv2.imread(path + file, cv2.IMREAD_GRAYSCALE)
# Append the image to the list
images.append(img)
axs[cnt].imshow(img, cmap="gray")
cnt = cnt + 1
plt.show()
fig, axs = plt.subplots(1, 6, figsize=(15, 15))
cnt = 0
for img in images:
img_filtered = cv2.GaussianBlur(img, (9, 9), 25)
axs[cnt].imshow(img_filtered, cmap="gray")
cnt = cnt + 1
plt.show()
import pydicom as dicom
import matplotlib.pylab as plt
import imageio
import re
def sort_images(image_list):
regex_pattern = r"Image-(\d+)\.dcm"
return sorted(image_list, key=lambda x: int(re.search(regex_pattern, x).group(1)))
def gatherFolderImageToGif(patientId, screenType):
folder_path = f"/kaggle/input/rsna-miccai-brain-tumor-radiogenomic-classification/test/{patientId}/{screenType}"
files = os.listdir(folder_path)
sorted_list = sort_images(files)
files = sorted_list
images = []
for file in files:
ds = dicom.dcmread(os.path.join(folder_path, file))
images.append(ds.pixel_array)
output_file = f"output-{patientId}-{screenType}.gif" # or 'output.mp4' for video
imageio.mimsave(output_file, images)
from IPython.display import HTML
gatherFolderImageToGif("00001", "FLAIR")
gatherFolderImageToGif("00001", "T1w")
gatherFolderImageToGif("00001", "T1wCE")
gatherFolderImageToGif("00001", "T2w")
gif_path1 = "output-00001-FLAIR.gif"
gif_path2 = "output-00001-T1w.gif"
gif_path3 = "output-00001-T1wCE.gif"
gif_path4 = "output-00001-T2w.gif"
html_code = f'<div style="display: flex; flex-wrap: wrap;"><img src="{gif_path1}"> <img src="{gif_path2}"> <img src="{gif_path3}"> <img src="{gif_path4}"></div>'
HTML(html_code)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from sklearn.preprocessing import OrdinalEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
train_df = pd.read_csv("/kaggle/input/titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
gender_submission_df = pd.read_csv("/kaggle/input/titanic/gender_submission.csv")
train_df
test_df
gender_submission_df
for column in train_df.columns:
print(column)
print(train_df[column].value_counts())
print("_________________________________________________________________________")
X = train_df.loc[:, ["Pclass", "Age", "SibSp", "Parch"]]
X
y = train_df.loc[:, "Survived"]
y
X.isna().sum()
X["Age"] = X["Age"].fillna(-1)
X["Age"]
print(X.isna().sum())
print(X.isnull().sum())
X
print(X.max())
print("________________________________________________________________________")
print(X.min())
print("________________________________________________________________________")
print(X.mean())
print("________________________________________________________________________")
print(X.median())
print("________________________________________________________________________")
print(y.max())
print("________________________________________________________________________")
print(y.min())
print("________________________________________________________________________")
print(y.mean())
print("________________________________________________________________________")
print(y.median())
print("________________________________________________________________________")
def get_train_and_test_set(X, y):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, train_size=0.8
)
print(X.shape, X_train.shape, X_test.shape, pd.concat([X_train, X_test]).shape)
print(y.shape, y_train.shape, y_test.shape, pd.concat([y_train, y_test]).shape)
return X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = get_train_and_test_set(X, y)
def get_predictions(X_train, y_train, X_test):
clf = RandomForestClassifier(n_estimators=10)
clf = clf.fit(X_train, y_train)
predictions = pd.Series(clf.predict(X_test))
# print(X_, predictions)
return predictions
predictions = get_predictions(X_train, y_train, X_test)
predictions
y_test.index = range(y_test.shape[0])
y_test
predictions
comparision = predictions.compare(y_test)
comparision
def get_accuracy_metrics(predictions, y_test):
# predictions_copy = predictions.copy()
# y_test_copy = y_test.copy()
# # Since after the test-train split, the indices are incorrect
# y_test_copy.index = range(y_test_copy.shape[0])
# print(predictions_copy, y_test_copy)
# comparsion = predictions_copy.compare(y_test_copy)
# num_wrongs = comparision.shape[0]
# num_total = predictions_copy.shape[0]
# num_corrects = num_total - num_wrongs
# accuracy = num_corrects / num_total
# return num_total, num_wrongs, accuracy
def get_accuracy_helper(predictions, y_test):
num_correct = 0
num_wrong = 0
y_test_copy = y_test.copy()
y_test_copy.index = predictions.index
for i in range(179):
if y_test_copy[i] == predictions[i]:
num_correct += 1
else:
num_wrong += 1
num_total = num_correct + num_wrong
accuracy = num_correct / num_total
return num_correct, num_wrong, accuracy
total_correct, total_wrong, total_accuracy = 0, 0, 0
num_averaging = 20
# To average the metrics
for i in range(num_averaging):
num_correct, num_wrong, accuracy = get_accuracy_helper(predictions, y_test)
total_correct += num_correct
total_wrong += num_wrong
total_accuracy += accuracy
# return np.array((total_correct, total_wrong, total_accuracy)) / 20
return (
total_correct / num_averaging,
total_wrong / num_averaging,
total_accuracy / num_averaging,
)
get_accuracy_metrics(predictions, y_test)
X = train_df.loc[:, ["Pclass", "Age", "SibSp", "Parch", "Sex"]]
X
def clean_data(X, y, replacements):
X_copy = X.copy()
y_copy = y.copy()
print("BEFORE")
print("------------------")
print(X_copy.isna().sum())
print(X_copy.isnull().sum())
for replacement in replacements:
X_copy[replacement["col"]] = X_copy[replacement["col"]].fillna(
replacement["value"]
)
print("\n")
print("AFTER")
print("------------------")
print(X_copy.isna().sum())
print(X_copy.isnull().sum())
return X_copy, y_copy
X, y = clean_data(X, y, [{"col": "Age", "value": -1}])
X_train, X_test, y_train, y_test = get_train_and_test_set(X, y)
X_train
def ordinally_encode_categorical_cols(categorical_cols, X_train, X_test):
# Apply ordinal encoder to each column with categorical data
ordinal_encoder = OrdinalEncoder()
X_train[categorical_cols] = ordinal_encoder.fit_transform(X_train[categorical_cols])
X_test[categorical_cols] = ordinal_encoder.transform(X_test[categorical_cols])
ordinally_encode_categorical_cols(["Sex"], X_train, X_test)
X_train
X_train["Sex"].value_counts()
X_test
predictions = get_predictions(X_train, y_train, X_test)
get_accuracy_metrics(predictions, y_test)
# ---
# Confirming that the accuracy drops when using only one input feature
X = train_df.loc[:, ["Pclass"]]
X
X, y = clean_data(X, y, [])
X_train, X_test, y_train, y_test = get_train_and_test_set(X, y)
X_train
predictions = get_predictions(X_train, y_train, X_test)
get_accuracy_metrics(predictions, y_test)
# The accuracy really did drop quite a bit. So now I'm sure that the code is working correctly.
# Thus, I can now test with more features.
# -----------------------------------
# Testing with features: `['Pclass', 'Age', 'SibSp', 'Parch']`
X = train_df.loc[:, ["Pclass", "Age", "SibSp", "Parch"]]
y = train_df.loc[:, "Survived"]
X
y
X["Age"] = X["Age"].fillna(-1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, train_size=0.8)
X_test
y_test
y_test.index
clf = RandomForestClassifier(n_estimators=10)
clf = clf.fit(X_train, y_train)
predictions = pd.Series(clf.predict(X_test))
predictions
get_accuracy_metrics(predictions, y_test)
# -------------------
# Testing with the same feature as before, but also including `Sex`
X = train_df.loc[:, ["Pclass", "Age", "SibSp", "Parch", "Sex"]]
y = train_df.loc[:, "Survived"]
X
y
X, y = clean_data(X, y, [{"col": "Age", "value": -1}])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, train_size=0.8)
X_train
ordinally_encode_categorical_cols(["Sex"], X_train, X_test)
X_train
predictions = get_predictions(X_train, y_train, X_test)
get_accuracy_metrics(predictions, y_test)
# -------------
# Testing with the same feature as before, but also including `Fare`
X = train_df.loc[:, ["Pclass", "Age", "SibSp", "Parch", "Sex", "Fare"]]
y = train_df.loc[:, "Survived"]
X
y
X, y = clean_data(X, y, [{"col": "Age", "value": -1}])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, train_size=0.8)
X_train
ordinally_encode_categorical_cols(["Sex"], X_train, X_test)
predictions = get_predictions(X_train, y_train, X_test)
get_accuracy_metrics(predictions, y_test)
# ---
# Testing with the same feature as before, but also including `Embarked`
X = train_df.loc[:, ["Pclass", "Age", "SibSp", "Parch", "Sex", "Fare", "Embarked"]]
y = train_df.loc[:, "Survived"]
X
y
X, y = clean_data(
X, y, [{"col": "Age", "value": -1}, {"col": "Embarked", "value": "N/A"}]
)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, train_size=0.8)
X_train
ordinally_encode_categorical_cols(["Sex", "Embarked"], X_train, X_test)
X_train
X_train["Embarked"].value_counts()
predictions = get_predictions(X_train, y_train, X_test)
get_accuracy_metrics(predictions, y_test)
|
# Data Manipulation Libraries
import pandas as pd
import numpy as np
import csv
# EDA libraries
import matplotlib.pyplot as plt
import seaborn as sns
# Date libraries
import calendar
from datetime import datetime
# Jupyter Notebook Configuration
sns.set()
import warnings
warnings.filterwarnings("ignore")
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
ecom = pd.read_csv("/kaggle/input/ecommerce-data/data.csv", encoding="unicode_escape")
ecom.head()
ecom.shape
ecom.info()
ecom["InvoiceDate"] = pd.to_datetime(ecom["InvoiceDate"])
ecom.info()
# Numero de pedidos
ecom.InvoiceNo.nunique()
# total de compras
ecom.Quantity.sum()
ecom["InvoiceRevenue"] = ecom["Quantity"] * ecom["UnitPrice"]
ecom.head()
# Quantis e ranking (Opcional, mas eu achei legal levar ao gestor)
ecom["Quantiles"] = pd.qcut(ecom.InvoiceRevenue, 5)
ecom["Percentile_Rank"] = ecom.InvoiceRevenue.rank(pct=True)
ecom["Rank"] = ecom.InvoiceRevenue.rank()
ecom.head()
ecom.describe()
# Checando as top receitas (ajuda a entender os rankings criados)
ecom.sort_values(by="InvoiceRevenue", ascending=False).head()
# Vamos enfim ao RFV...
# Criando um dataframe novo, para obter recencia
df_recency = (
ecom[["InvoiceDate", "CustomerID"]] # Filtrei somente as colunas que vou usar
.groupby("CustomerID")["InvoiceDate"] # Agrupei por consumidor x data
.agg("max") # Peguei a compra mais recente dele
.reset_index()
)
df_recency.head()
# usamos a maior data como referencia
cur_time = df_recency.InvoiceDate.max()
cur_time
# funcao para calcular a diferenca entre as datas (em meses)
def month_diff(cur_time, datetime_val):
"""Calcula quantos meses entre a nossa data de referencia e a da última compra do usuário"""
return 12 * (cur_time.year - datetime_val.year) + (
cur_time.month - datetime_val.month
)
# criamos a coluna da recencia em meses
df_recency["Recency"] = df_recency.InvoiceDate.apply(
lambda datetime_val: month_diff(cur_time, datetime_val)
)
df_recency.head()
# Para se ter uma noção de como está a recência, vamos ver uma descritiva:
df_recency.describe()
# e podemos plotar o grafico de frequencia
df_recency["Recency"].value_counts().sort_index().plot.bar()
# Vamos excluir a data, pois nao será mais necessaria
df_recency = df_recency.drop("InvoiceDate", axis=1)
# Pronto, já temos a recência de cada consumidor. Agora, vamos buscar a frequência e valor!
# Separa colunas que importam: Só queremos contar as compras (frequencia) e pegar os gastos (monetary)
df_fm = ecom[["CustomerID", "InvoiceNo", "InvoiceRevenue"]]
# Contamos os pedidos e pegamos a media de gasto por compra
df_fm = (
df_fm.groupby("CustomerID")[["InvoiceNo", "InvoiceRevenue"]]
.agg(
{
"InvoiceNo": "count",
"InvoiceRevenue": "mean",
}
)
.reset_index()
)
df_fm = df_fm.rename(columns={"InvoiceNo": "Frequency", "InvoiceRevenue": "Monetary"})
# Arredonda os valores
df_fm.Monetary = df_fm.Monetary.round(2)
# dataframe final
df_fm.head()
# Agora, temos tudo, só precisamos unir os datasets:
# join to have the final data df
df_rfv = df_recency.merge(df_fm, on="CustomerID", how="left")
df_rfv = df_rfv.fillna(0)
df_rfv.head()
# # RFM SEGMENTATION VIA CLUSTER
from sklearn.preprocessing import StandardScaler
# feature columns
feature_cols = ["Recency", "Frequency", "Monetary"]
# standardized df for training
standardized_data = df_rfv.copy()
# standardization
scaler = StandardScaler()
scaler.fit(df_rfv[feature_cols])
standardized_features = scaler.transform(df_rfv[feature_cols])
standardized_data[feature_cols] = standardized_features
# fit K-means clustering on various Ks
from sklearn.cluster import KMeans
# kmeans_kwargs = {"init": "k-means++", "n_init": 3}
sse = []
for k in range(1, 15):
kmeans = KMeans(n_clusters=k) # , **kmeans_kwargs)
kmeans.fit(standardized_data[feature_cols])
sse.append(kmeans.inertia_)
# Elbow method plot
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 6))
plt.plot(sse)
# plt.xticks(range(1, 10))
plt.xlabel("Number of Clusters")
plt.ylabel("Sum of Squared Error")
plt.show()
from sklearn.metrics import silhouette_score
sil = []
kmax = 10
# dissimilarity would not be defined for a single cluster, thus, minimum number of clusters should be 2
for k in range(2, kmax + 1):
kmeans = KMeans(n_clusters=k).fit(x)
labels = kmeans.labels_
sil.append(silhouette_score(x, labels, metric="euclidean"))
kmeans = KMeans(n_clusters=9)
kmeans.fit(standardized_data[feature_cols])
df_rfv["cluster"] = kmeans.labels_
df_rfv.head()
# Vamos verificar as metricas por cluster
centroid_df = (
df_rfv.groupby("cluster")[["Recency", "Frequency", "Monetary", "Pts", "Visitor_ID"]]
.agg(
{
"Recency": "mean",
"Frequency": "mean",
"Monetary": "mean",
"Pts": "mean",
"Visitor_ID": "nunique",
}
)
.reset_index()
)
centroid_df = centroid_df.rename(columns={"Visitor_ID": "NumBuyers"})
centroid_df.sort_values(by="Recency", ascending=True)
# NOmeando os grupos
seg_map = [
"Hibernating",
"At Risk",
"Can't Loose",
"About to Sleep",
"Need Attention",
"Loyal Customers",
"Promising",
"New Customers",
"Potential Loyalists",
"Champions",
]
centroid_df["Segment"] = seg_map
centroid_df.rename({"cluster": "Cluster"})
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Import
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.manifold import TSNE
# # Data understanding and preporation
# Read the data
df = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv")
df.head()
num_of_obj, _ = df.shape
df.duplicated().sum()
df.info()
df.describe()
data_viz = df.dropna().copy()
data_viz["VIP"] = data_viz["VIP"].astype(int)
data_viz["Transported"] = data_viz["Transported"].astype(int)
sns.pairplot(
data_viz.drop(
["PassengerId", "HomePlanet", "CryoSleep", "Cabin", "Destination", "Name"],
axis=1,
),
hue="Transported",
)
def func_to_analisys_na(data_frame):
list_with_columns_with_na = []
list_with_count_of_na = []
list_with_fraction_of_na = []
list_with_types_of_columns = []
try:
for column in data_frame.columns:
num_of_miss_values = data_frame[column].isna().sum()
if num_of_miss_values:
fraction_of_null = round(num_of_miss_values / num_of_obj, 3)
list_with_columns_with_na.append(column)
list_with_count_of_na.append(num_of_miss_values)
list_with_fraction_of_na.append(fraction_of_null)
list_with_types_of_columns.append(data_frame[column].dtype)
except:
print("Check ur data!!!")
df_to_return = pd.DataFrame(
{
"columns": list_with_columns_with_na,
"count_of_na": list_with_count_of_na,
"fraction_of_na": list_with_fraction_of_na,
"type": list_with_types_of_columns,
}
)
df_to_return = df_to_return.sort_values("count_of_na", ascending=False).reset_index(
drop=True
)
return df_to_return
for column in df.select_dtypes(include=["float64"]):
df.hist(column, bins=80)
print()
df.pivot_table(index="CryoSleep")
names_of_columns = func_to_analisys_na(df)
names_of_columns
def split_cabin(x):
if len(str(x).split("/")) < 3:
return ["Missing", "Missing", "Missing"]
else:
return str(x).split("/")
def preporations(data):
data.drop("Name", axis=1, inplace=True)
data["ShoppingMall"].fillna(0, inplace=True)
data["VRDeck"].fillna(0, inplace=True)
data["FoodCourt"].fillna(0, inplace=True)
data["Spa"].fillna(0, inplace=True)
data["RoomService"].fillna(0, inplace=True)
data["TempCabin"] = data["Cabin"].apply(lambda x: split_cabin(x))
data["Deck"] = data["TempCabin"].apply(lambda x: x[0])
data["Side"] = data["TempCabin"].apply(lambda x: x[2])
data.drop(["TempCabin", "Cabin"], axis=1, inplace=True)
data["VIP"].fillna("Missing", inplace=True)
data["HomePlanet"].fillna("Missing", inplace=True)
data["Destination"].fillna("Missing", inplace=True)
data["Age"].fillna(data["Age"].median(), inplace=True)
data["Category_age"] = data["Age"].apply(lambda x: x // 10)
data.drop("Age", axis=1, inplace=True)
data.loc[
(data["CryoSleep"].isna())
& (
data["ShoppingMall"]
+ data["VRDeck"]
+ data["FoodCourt"]
+ data["Spa"]
+ data["RoomService"]
== 0
),
"CryoSleep",
] = data.loc[
(data["CryoSleep"].isna())
& (
data["ShoppingMall"]
+ data["VRDeck"]
+ data["FoodCourt"]
+ data["Spa"]
+ data["RoomService"]
== 0
),
"CryoSleep",
].fillna(
True
)
data["CryoSleep"].fillna(False, inplace=True)
def combo_info(data):
print("First 5 indexes:")
display(cdf.head())
print()
print("func_to_analisys_na:")
display(func_to_analisys_na(cdf))
print()
print("Info:")
display(data.info())
cdf = df.copy()
preporations(cdf)
combo_info(cdf)
# # Models
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
roc_curve,
roc_auc_score,
)
X = cdf.drop(["Transported", "PassengerId"], axis=1)
X = pd.get_dummies(X)
y = cdf["Transported"]
tsn = TSNE(n_components=3, init="pca", random_state=7)
d_data = tsn.fit_transform(X)
sns.scatterplot(d_data[:, 0], d_data[:, 2], hue=y)
plt.show()
sns.scatterplot(d_data[:, 1], d_data[:, 2], hue=y)
plt.show()
x_train, x_test, y_train, y_test = train_test_split(
d_data, y, test_size=0.3, random_state=7
)
print("x_train:", x_train.shape, "y_train:", y_train.shape)
from lazypredict.Supervised import LazyClassifier
clf = LazyClassifier(
verbose=0,
ignore_warnings=True,
custom_metric=None,
predictions=False,
random_state=12,
classifiers="all",
)
models, predictions = clf.fit(x_train, x_test, y_train, y_test)
models
# from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.model_selection import KFold
def stats_of_test(model):
predict = model.predict(x_test)
accuracy = accuracy_score(y_test, predict)
precision = precision_score(y_test, predict)
recall = recall_score(y_test, predict)
return [accuracy, precision, recall]
def train_model(model, splitter, x_data, y_data):
list_with_acc = []
list_with_pre = []
list_with_rec = []
for train_index, test_index in splitter.split(x_data):
# разбиение на выборки
x_train, x_test = x_data[train_index], x_data[test_index]
y_train, y_test = y_data[train_index], y_data[test_index]
# обучаем переданную модель
model.fit(x_train, y_train)
predict = model.predict(x_test)
accuracy = accuracy_score(y_test, predict)
precision = precision_score(y_test, predict)
recall = recall_score(y_test, predict)
list_with_acc.append(accuracy)
list_with_pre.append(precision)
list_with_rec.append(recall)
return [
sum(list_with_acc) / len(list_with_acc),
sum(list_with_pre) / len(list_with_pre),
sum(list_with_rec) / len(list_with_rec),
]
def plot_roc_auc(model):
y_pred = model.predict_proba(x_test)[:, 1]
fpr, tpr, _ = roc_curve(y_test, y_pred)
auc = round(roc_auc_score(y_test, y_pred), 4)
plt.figure(figsize=(15, 10))
plt.plot(fpr, tpr, label="Stacking, AUC=" + str(auc))
plt.plot([0, 1], [0, 1])
plt.legend()
plt.show()
gbc_model = GradientBoostingClassifier().fit(x_train, y_train.astype("int8"))
stats_of_test(gbc_model)
plot_roc_auc(gbc_model)
from catboost import CatBoostClassifier
cat_model = CatBoostClassifier(loss_function="Logloss", verbose=0).fit(
x_train, y_train.astype("int8")
)
stats_of_test(cat_model)
cat_model.get_feature_importance(prettified=True)
plot_roc_auc(cat_model)
import xgboost as xgb
xgb_model = xgb_cl = xgb.XGBClassifier(n_estimators=10).fit(x_train, y_train)
stats_of_test(xgb_model)
plot_roc_auc(xgb_model)
from lightgbm import LGBMClassifier
lgbm = LGBMClassifier(random_state=1)
lgbm.fit(x_train, y_train.astype("int8"))
stats_of_test(lgbm)
plot_roc_auc(lgbm)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(x_train, y_train.astype("int8"))
stats_of_test(knn)
plot_roc_auc(knn)
from sklearn.ensemble import BaggingClassifier
baggingClf = BaggingClassifier(
base_estimator=CatBoostClassifier(verbose=0), n_estimators=50, random_state=12
)
baggingClf.fit(x_train, y_train.astype("int8").ravel())
stats_of_test(baggingClf)
plot_roc_auc(baggingClf)
from sklearn.ensemble import StackingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
estimators = [
("lr", LogisticRegression()),
("boosting", CatBoostClassifier(verbose=0)),
("knn", KNeighborsClassifier()),
("lgbm", LGBMClassifier()),
]
stackingClf = StackingClassifier(
estimators=estimators, final_estimator=SVC(probability=True)
)
stackingClf.fit(x_train, y_train.astype("int8").ravel())
stats_of_test(stackingClf)
plot_roc_auc(stackingClf)
from sklearn.ensemble import RandomForestClassifier
estimators_1 = [
("lr", LogisticRegression()),
("boosting", CatBoostClassifier(verbose=0)),
("knn", KNeighborsClassifier()),
("lgbm", LGBMClassifier()),
("forest", RandomForestClassifier()),
]
stackingClf_1 = StackingClassifier(
estimators=estimators_1, final_estimator=SVC(probability=True)
)
stackingClf_1.fit(x_train, y_train.astype("int8").ravel())
stats_of_test(stackingClf_1)
plot_roc_auc(stackingClf_1)
estimators_2 = [
("lr", LogisticRegression()),
("boosting", GradientBoostingClassifier()),
("forest", RandomForestClassifier()),
("knn", KNeighborsClassifier()),
("cat", CatBoostClassifier(verbose=0)),
]
stackingClf_2 = StackingClassifier(
estimators=estimators_2, final_estimator=SVC(probability=True)
)
stackingClf_2.fit(x_train, y_train.astype("int8").ravel())
stats_of_test(stackingClf_2)
plot_roc_auc(stackingClf_2)
estimators_3 = [
("lr", LogisticRegression()),
("boosting", GradientBoostingClassifier()),
("forest", RandomForestClassifier()),
("knn", KNeighborsClassifier()),
("lgbm", LGBMClassifier()),
]
stackingClf_3 = StackingClassifier(
estimators=estimators_3, final_estimator=SVC(probability=True)
)
stackingClf_3.fit(x_train, y_train.astype("int8").ravel())
stats_of_test(stackingClf_3)
plot_roc_auc(stackingClf_3)
import pickle
with open("gradientboosted.pkl", "wb") as f:
pickle.dump(stackingClf_2, f)
with open("gradientboosted.pkl", "rb") as f:
reloaded_model = pickle.load(f)
# # Test dataframe
test_df = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv")
test_df.head()
test_cdf = test_df.copy()
preporations(test_cdf)
combo_info(test_cdf)
test_X = pd.get_dummies(test_cdf.drop("PassengerId", axis=1))
test_d_data = tsn.fit_transform(test_X)
end_predict = reloaded_model.predict(test_d_data)
pred = list(map(lambda x: False if x == 0 else True, end_predict))
output = pd.DataFrame({"PassengerId": test_cdf.PassengerId, "Transported": pred})
output.to_csv("submission.csv", index=False)
|
## Start by importing the relevant python libraries; pandas, seaborn and matplotlib
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
# # Feature abbrevations and full description
# #### Age: Patients Age in years (Numeric)
# #### Sex: Gender (Male : 1; Female : 0) (Nominal)
# #### cp: Type of chest pain experienced by patient. This term categorized into 4 category.
# 0 typical angina, 1 atypical angina, 2 non- anginal pain, 3 asymptomatic (Nominal)
# #### trestbps: patient's level of blood pressure at resting mode in mm/HG (Numerical)
# #### chol: Serum cholesterol in mg/dl (Numeric)
# #### fbs: Blood sugar levels on fasting > 120 mg/dl represents as 1 in case of true and 0 as false (Nominal)
# #### restecg: Result of electrocardiogram while at rest are represented in 3 distinct values
# 0 : Normal 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of >
# 0.05 mV) 2: showing probable or definite left ventricular hypertrophyby Estes' criteria (Nominal)
# #### thalach: Maximum heart rate achieved (Numeric)
# #### exang: Angina induced by exercise 0 depicting NO 1 depicting Yes (Nominal)
# #### oldpeak: Exercise induced ST-depression in relative with the state of rest (Numeric)
# #### slope: ST segment measured in terms of slope during peak exercise
# 0: up sloping; 1: flat; 2: down sloping(Nominal)
# #### ca: The number of major vessels (0–3)(nominal)
# #### thal: A blood disorder called thalassemia
# 0: NULL 1: normal blood flow 2: fixed defect (no blood flow in some part of the heart) 3: reversible defect (a blood flow is observed but it is not normal(nominal)
# #### target: It is the target variable which we have to predict 1 means patient is suffering from heart disease and 0 means patient is normal.
data = pd.read_csv("/kaggle/input/heart-disease-statlog/Heart_disease_statlog.csv")
data.head()
# ## Data cleaning
# #### Search for missing data, zeros or duplicates
data.isna().sum()
# Result shows there is no missing data.
data.duplicated().sum()
# No duplicated data as well
(data == 0).sum()
# Figures look realistic since the blood pressure,cholestrol are non-zero
# Next we analyse the data types
data.dtypes
data.shape
# data types look properly assigned with 14 columns and 270 rows
# Next we use seaborn to see the data distribution
sns.boxplot(data=data[["age", "trestbps", "chol", "thalach"]])
plt.xticks(rotation=50)
plt.show()
sns.set(rc={"figure.figsize": (10, 5)})
fig, axes = plt.subplots(2, 4)
sns.countplot(data=data, x="sex", ax=axes[0, 0]).set(title="Gender")
sns.countplot(data=data, x="cp", ax=axes[0, 1]).set(title="CP")
sns.countplot(data=data, x="fbs", ax=axes[0, 2]).set(title="FBS")
sns.countplot(data=data, x="restecg", ax=axes[0, 3]).set(title="RESTECG")
sns.countplot(data=data, x="exang", ax=axes[1, 0])
sns.countplot(data=data, x="slope", ax=axes[1, 1])
sns.countplot(data=data, x="ca", ax=axes[1, 2])
sns.countplot(data=data, x="thal", ax=axes[1, 3])
# or use fig.set_size_inches(row,column)
plt.show()
# Lets determine the percentage of ordinal numbers in each column
perc_gender = (data["sex"].value_counts()) / len(data) * 100
print(perc_gender.round())
perc_restecg = (data["ca"].value_counts()) / len(data) * 100
print(perc_restecg.round())
# ### Summary of Countplot results
# - GENDER (sex): 68% of the participants are male, 32% are female
# - CHEST PAIN (cp): 7% of 270 individuals experienced Typical Angina, 16% experienced Atypical Angina, 29% had Non-Anginal Pain and rest where Asymptomatic.
# - FASTING BLOOD SUGAR (fbs): 85% of individuals had a fasting blood sugar level above 120mg/dl.
# - Resting Electrocardiographic Results(restecg): 49% had a Normal result, 51% had Left ventricular hypertrophy while the rest had ST-T wave normality.
# - Exercise induced angina (exang): 67% admitted to having experienced exercise induced angina, 33% did not.
# - Number of major vessels affected (ca):59% had thallassemia,21% had a normal blood flow,12% had no blood flow in some part of their heart while 7% had a reversible defect.
# #### The next step is to find a correlation between various paramters
# create a heatmap using Seaborn
sns.heatmap(data.corr(), cmap="coolwarm", annot=True)
# -- Correlation result shows no significant correlation between 13 parameters.
# ### Next step is to predict whether an individual is suffering from heart disease or not using SKLEARN
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.inspection import permutation_importance
X = data.drop(columns="target")
y = data["target"]
# Lets train our data using the standard train_test_split function
# -- then we standardize
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
scale = StandardScaler().fit(X_train)
X_train_st = scale.transform(X_train)
X_test_st = scale.transform(X_test)
# we can also normalize......
# and compare the original results, standardized and normalized data to enhance accuracy
norm = MinMaxScaler().fit(X_train)
X_train_norm = norm.transform(X_train)
X_test_norm = norm.transform(X_test)
score = []
X_trains = [X_train, X_train_norm, X_train_st]
X_tests = [X_test, X_test_norm, X_test_st]
# Using the Support Vector Machine SVM we can predict the model accuracy
for X_train_set, X_test_set in zip(X_trains, X_tests):
svc = SVC()
svc.fit(X_train_set, y_train)
y_pred = svc.predict(X_test_set)
score.append(accuracy_score(y_pred, y_test))
pd.DataFrame({"score": score}, index=["Original", "Normalized", "Standardized"])
# Result shows a standardized data is most accurate in this case (SVM)
# #### Lets try again using XGB classifier
score_b = []
for X_train_set, X_test_set in zip(X_trains, X_tests):
xgc = XGBClassifier()
xgc.fit(X_train_set, y_train)
y_pred = xgc.predict(X_test_set)
score_b.append(accuracy_score(y_test, y_pred))
pd.DataFrame({"score": score_b}, index=["Original", "Normalized", "Standardized"])
# Original data seems most accurate with XGboost
# #### This result is however less accurate than the standardized form of SVC used earlier
# #### SO.... lets try once more with the RandomForestClassifier....
score_c = []
for X_train_set, X_test_set in zip(X_trains, X_tests):
rfc = RandomForestClassifier()
rfc.fit(X_train_set, y_train)
y_pred = rfc.predict(X_test_set)
score_c.append(accuracy_score(y_test, y_pred))
pd.DataFrame({"score": score_c}, index=["Original", "Normalized", "Standardized"])
# - Well the results shows the standardized form of SVC and Randomforest are quite similar and most accurate with 87% accuracy
# ### Next lets ascertain which of the features used in the prediction are most relevant using 'permutation importance'
perm_importance = permutation_importance(xgc, X_train_set, y_train)
sort = perm_importance.importances_mean.argsort()
df = pd.DataFrame(
perm_importance.importances_mean[sort], X_test.columns[sort], columns=["Value"]
)
columns = X_test.columns
dt = pd.Series(sort, columns)
dt = dt.sort_values(ascending=True)
dt.plot.bar()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Importing all the important libraries
import pandas as pd
from sklearn.datasets import load_diabetes
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
import warnings
warnings.simplefilter(action="ignore")
df = load_diabetes()
# description of the dataset
df["DESCR"]
cols = df.feature_names
target = df.target
data = df.data
data = pd.DataFrame(columns=cols, data=data)
data.head()
data.describe()
data.shape
data.info()
data.isnull().sum()
sns.set_style("darkgrid")
sns.pairplot(data)
plt.title("Scatter plot between BMI and BP of diabetes paitent")
plt.xlabel("BMI")
plt.ylabel("BP")
sns.scatterplot(x="bmi", y="bp", hue="sex", size="age", data=data, palette="coolwarm")
plt.legend(loc="center left", bbox_to_anchor=(1.0, 0.5))
# here values are encoded, for that reason, values are in float form.
target.shape
target = pd.DataFrame(data=target)
target.head()
target.isnull().sum()
# creating the object of the model
lr = LinearRegression()
# splitting the model
x_train, x_test, y_train, y_test = train_test_split(
data, target, test_size=0.25, random_state=105
)
lr.fit(x_train, y_train)
pred = lr.predict(x_test)
print("MSE:", metrics.mean_squared_error(y_test, pred))
print("MAE:", metrics.mean_absolute_error(y_test, pred))
print("R2-Score:", (metrics.r2_score(y_test, pred) * 100))
input_data = np.array(
[
[
-0.016412,
0.050680,
0.127443,
0.097616,
0.016318,
0.017475,
-0.021311,
0.034309,
0.034864,
0.003064,
]
]
)
# RR2-score is very less, it means the accuracy of the model is not great.
lr.predict(input_data)
plt.figure(figsize=(5, 4))
plt.scatter(x=y_test, y=pred, color="purple")
plt.title("Scatter plot of predicted and true y value")
plt.xlabel("Predicted value")
plt.ylabel("True y value")
res = pd.DataFrame(y_test - pred)
res
plt.title("Residuals")
sns.distplot(res, bins=20, color="purple")
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import VarianceThreshold, SelectFromModel
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from sklearn.utils import compute_class_weight
from sklearn.metrics import (
ConfusionMatrixDisplay,
RocCurveDisplay,
classification_report,
accuracy_score,
)
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import lightgbm as lgb
import warnings
warnings.filterwarnings("ignore")
# ## Load datasets
train_original = pd.read_csv(
"/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
)
train_e12 = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
test_e12 = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
submit = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
train_e12.head()
# to_add = train_original[train_original["target"] == 1]
# ## Check for missing data
train_e12.info()
# train_e12 = pd.concat([train_e12,to_add],axis=0)
# train_e12
# ## Is the label balanced?
sns.countplot(data=train_e12, x="target")
plt.show()
# ## Calculate weights
classes = train_e12["target"].unique()
classweight = compute_class_weight(
class_weight="balanced", classes=classes, y=train_e12["target"]
)
classweight_dict = {}
for idx, weight in enumerate(classweight):
classweight_dict[idx] = weight / classweight[0]
classweight_dict
# ## Create data and labels
X = train_e12.drop(["id", "target"], axis=1)
y = train_e12["target"]
X_test = test_e12.drop("id", axis=1)
# ## Remove constant and quasi-constant features
sel = VarianceThreshold(threshold=0.01)
sel.fit(X)
features_to_drop = X.columns[~sel.get_support()]
features_to_drop
X = X.drop(features_to_drop, axis=1)
X_test = X_test.drop(features_to_drop, axis=1)
# ## Remove duplicated features
duplicated_feat = []
for i in range(0, len(X.columns)):
col_1 = X.columns[i]
for col_2 in X.columns[i + 1 :]:
if X[col_1].equals(X[col_2]):
duplicated_feat.append(col_2)
print(f"Duplicated features: {duplicated_feat}")
# ## Remove correlated features
def correlation(dataset, threshold=0.8):
col_corr = set()
corr_matrix = dataset.corr()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if abs(corr_matrix.iloc[i, j]) > threshold:
colname = corr_matrix.columns[i]
col_corr.add(colname)
return col_corr
corr_features = correlation(X, 0.8)
print(f"Correlated Features: {corr_features}")
X = X.drop(corr_features, axis=1)
X_test = X_test.drop(corr_features, axis=1)
# ## Feature Importance
rfr = SelectFromModel(
RandomForestClassifier(n_estimators=10, random_state=49), threshold=0.05
)
rfr.fit(X, y)
imp = rfr.estimator_.feature_importances_
df = pd.DataFrame(imp, index=X.columns, columns=["Importance"])
fig, ax = plt.subplots(figsize=(10, 6))
sorted_idx = imp.argsort()
ax.barh(
df.index[sorted_idx],
df["Importance"][sorted_idx],
height=0.8,
facecolor="grey",
alpha=0.8,
edgecolor="k",
)
ax.set_xlabel("Importance score")
ax.set_title("Permutation feature importance")
plt.gca().invert_yaxis()
fig.tight_layout()
plt.show()
to_drop = list(X.columns[~(rfr.get_support())])
print(f"Suggest to drop: {to_drop}")
# ## Add/drop features
def add_features(df):
# df["calc_urea_ratio"] = df["calc"] / df["urea"]
df["calc_osmo_ratio"] = df["calc"] / df["osmo"]
df["calc_ph_ratio"] = df["calc"] / df["ph"]
df["calc_cond_ratio"] = df["calc"] / df["cond"]
# df["calc_gravity_ratio"] = df["calc"] / df["gravity"]
df = df.drop(to_drop, axis=1)
return df
X = add_features(X)
X_test = add_features(X_test)
# ## Scale data
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
X_test = scaler.transform(X_test)
# ## Split data in train and validation set
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.25, random_state=49, stratify=y
)
# ## Model selection, including gridsearch
dtc = DecisionTreeClassifier(
class_weight=classweight_dict, max_depth=3, max_features=None, random_state=49
)
rfc = RandomForestClassifier(
class_weight=classweight_dict,
max_depth=2,
max_features="auto",
n_estimators=80,
random_state=49,
)
svc = SVC(
C=0.1,
class_weight=classweight_dict,
degree=2,
gamma="auto",
kernel="sigmoid",
probability=True,
random_state=49,
)
lgbc = lgb.LGBMClassifier(
class_weight=classweight_dict,
learning_rate=0.01,
max_depth=2,
n_estimators=100,
num_leaves=4,
objective="binary",
random_state=49,
)
lr = LogisticRegression(
penalty="l1",
l1_ratio=0.01,
solver="saga",
C=0.1,
class_weight=classweight_dict,
random_state=49,
)
estimators = [
("dtc", dtc),
("rfc", rfc),
("svc", svc),
("lgbc", lgbc),
("lr", lr),
]
# ## Evaluate models to create weighted ensemble
def evaluate_models(models, X_train, X_val, y_train, y_val):
scores = list()
for name, model in models:
model.fit(X_train, y_train)
yhat = model.predict(X_val)
acc = accuracy_score(y_val, yhat)
scores.append(acc)
return scores
scores = evaluate_models(estimators, X_train, X_valid, y_train, y_valid)
scores
# ## Use VotingClassifier and apply weights
params = {
"voting": ["soft", "hard"],
}
# params = {
# "class_weight": [classweight_dict],
# "kernel": ["rbf","linear","sigmoid","poly"],
# "degree": [2,3],
# "C": [0.01,0.1,1,2],
# "gamma": ['scale', 'auto'],
# "random_state": [49]
# }
model = VotingClassifier(estimators=estimators, weights=scores, flatten_transform=True)
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=49)
grid = GridSearchCV(
estimator=model, param_grid=params, scoring="roc_auc", cv=cv, verbose=1
)
grid.fit(X_train, y_train)
best_params = grid.best_params_
best_model = grid.best_estimator_
print(f"Best params: {best_params}")
# ## Make predictions on validation set
y_pred = best_model.predict(X_valid)
print(classification_report(y_valid, y_pred))
fig, ax = plt.subplots(1, 2, figsize=(12, 5))
ConfusionMatrixDisplay.from_predictions(y_valid, y_pred, ax=ax[0])
RocCurveDisplay.from_predictions(y_valid, y_pred, ax=ax[1])
plt.tight_layout()
plt.show()
# ## Define final model with tuned hyperparameters
final_model = model.set_params(**best_params).fit(X, y)
results = final_model.predict(X_test)
submit["target"] = results
# ## Submit results
submit.to_csv("bbg007_submission.csv", index=False)
|
# **GREATEST AMONG 3 VALUES**
a = int(input("enter the 1st number"))
b = int(input("enter the 2 nd number"))
c = int(input("enter the 3rd number"))
if a > b and a < b:
print("1st number is bigger")
elif b > a and b > c:
print("2nd number is bigger")
elif c > a and c > b:
print("3rd number is bigger")
elif a == b == c == 0:
print("3 numbers are zeros")
else:
print("3 numbers are equal")
# **another way of writing**
a = int(input("enter the 1st number"))
b = int(input("enter the 2nd number"))
c = int(input("enter the 3rd number"))
max = a
if b > max:
max = b
if c > max:
max = c
print("the greatest number is", max)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Load dataset
# #https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_breast_cancer.html?highlight=load_breast#sklearn.datasets.load_breast_cancer
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer() # Another Ways:X, y = load_breast_cancer(return_X_y=True)
X = cancer.data # dataframe(row labels(index),volumn labels(columns))
Y = cancer.target
# print(X) #dataframe和ndarray对象转换:https://blog.csdn.net/weixin_43145427/article/details/124379972
print(f"The shape of cancer dataset is {X.shape}")
print(f"The shape of label is {Y.shape}")
print(cancer.feature_names) # 30 Features
# print(cancer.feature_names.shape)
# print(cancer.DESCR)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
print(X_train)
# # Normalization
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
print(X_train)
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, classification_report
# 建立SVC模型
model = SVC()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f"The accuracy of the SVC model is {accuracy*100}%")
print(f"The report of the SVC model is \n {classification_report(y_test, y_pred)}")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train = pd.read_csv(
"/kaggle/input/jobathon-analytics-vidhya-health-insurance/Train.csv", index_col="ID"
)
test = pd.read_csv(
"/kaggle/input/jobathon-analytics-vidhya-health-insurance/Test.csv", index_col="ID"
)
print(train.shape)
print(test.shape)
train.isna().sum()
test.isna().sum()
train.head()
combined = pd.concat([train, test])
combined.head()
combined.shape
combined.Response.unique()
combined.Region_Code.nunique()
import seaborn as sns
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(16, 10))
corr = train.corr()
sns.heatmap(corr.abs(), xticklabels=corr.columns, yticklabels=corr.columns, annot=True)
cat_cols = combined.columns[combined.dtypes == "object"]
cat_cols
combined.City_Code.nunique()
combined.Accomodation_Type.unique()
combined.Accomodation_Type = combined.Accomodation_Type.map(
{"Rented": 0, "Owned": 1}
).astype("uint8")
combined.Accomodation_Type.unique()
combined.Is_Spouse.unique()
combined.Is_Spouse = combined.Is_Spouse.map({"No": 0, "Yes": 1}).astype("uint8")
combined.Is_Spouse.unique()
combined[combined["Holding_Policy_Type"].isna()]["Holding_Policy_Duration"].unique()
combined["Holding_Policy_Type"].unique()
combined[combined["Holding_Policy_Duration"] == 0]["Holding_Policy_Duration"]
combined["Holding_Policy_Type"] = combined["Holding_Policy_Type"].fillna(-99)
combined["Holding_Policy_Type"].unique()
combined[combined["Holding_Policy_Duration"] == 0].shape
combined["Holding_Policy_Duration"].unique()
combined["Holding_Policy_Duration"].fillna(-1, inplace=True)
combined["Holding_Policy_Duration"].unique()
combined["Holding_Policy_Duration"].replace("14+", "99", inplace=True)
combined["Holding_Policy_Duration"].unique()
combined["Holding_Policy_Duration"] = (
combined["Holding_Policy_Duration"].astype("float64").astype("int8")
)
combined["Holding_Policy_Duration"].unique()
cat_cols = combined.columns[combined.dtypes == "object"]
cat_cols
combined["Health Indicator"].unique()
combined["Missing_Health_Indicator"] = combined["Health Indicator"].isna()
combined.head()
combined[
(combined["Reco_Insurance_Type"] == "Individual")
& (combined["Upper_Age"] != combined["Lower_Age"])
]
sns.violinplot(x="Reco_Insurance_Type", y="Lower_Age", data=combined)
sns.violinplot(x="Reco_Insurance_Type", y="Upper_Age", data=combined)
sns.countplot(x="Reco_Insurance_Type", hue="Response", data=combined)
combined["Health_Indicator"] = combined["Health Indicator"].fillna("U")
sns.countplot(x="Health_Indicator", hue="Response", data=combined)
def process_age(row):
if row["Upper_Age"] == row["Lower_Age"]:
return row["Lower_Age"]
else:
return row["Upper_Age"] + row["Lower_Age"]
# combined["Total_Age"] = combined.apply(process_age, axis=1)
# sns.catplot(x="Total_Age",y="Reco_Policy_Premium",data=combined)
sns.boxplot(x="Health_Indicator", y="Reco_Policy_Premium", data=combined)
combined["Health_Indicator"].unique()
sns.countplot(x="Response", data=combined)
combined.Reco_Insurance_Type.unique()
combined.Reco_Insurance_Type = combined.Reco_Insurance_Type.map(
{"Individual": 1, "Joint": 2}
).astype("uint8")
combined.Health_Indicator = combined.Health_Indicator.map(
{
"U": 0,
"X1": 1,
"X2": 2,
"X3": 3,
"X4": 4,
"X5": 5,
"X6": 6,
"X7": 7,
"X8": 8,
"X9": 9,
}
).astype("uint8")
import category_encoders as ce
from category_encoders.cat_boost import CatBoostEncoder
cols = list(combined.columns)
cols.remove("Response")
cols
# combined.City_Code.unique()
# combined.City_Code = combined.City_Code.astype('category').cat.codes
# policy_encoder = ce.HashingEncoder(cols=["Reco_Policy_Cat"], n_components=5)
# combined = policy_encoder.fit_transform(combined)
def process_city_region(row):
city_code = str(row["City_Code"])
region = str(len(str(row["Region_Code"])))
return int(city_code + region)
# combined["City_Region"] = combined.apply(process_city_region, axis=1).astype("uint8")#
# oh_features = ["Reco_Policy_Cat", "Holding_Policy_Type"]
# combined_dropped = pd.get_dummies(combined, columns=oh_features, prefix=oh_features, drop_first=True)
combined_dropped = combined.drop(["Health Indicator"], axis=1)
combined_dropped.dtypes
from numpy import mean
# combined_dropped["Mean_Age"] = combined[["Lower_Age","Upper_Age"]].apply(mean, axis=1)
combined_dropped.dtypes
y = train.Response.astype("uint8")
combined_dropped.drop(["Response"], inplace=True, axis=1)
X = combined_dropped.iloc[:50882]
test_data = combined_dropped.iloc[50882:]
city_encoder = ce.LeaveOneOutEncoder(cols=["City_Code", "Region_Code"])
X = city_encoder.fit_transform(X, y)
test_data = city_encoder.transform(test_data)
X.head()
# X.drop(["Upper_Age", "Lower_Age"], inplace=True, axis=1)
X.dtypes
from xgboost import XGBClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score, train_test_split
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
model = XGBClassifier(
objective="binary:logistic",
use_label_encoder=False,
tree_method="gpu_hist",
)
scores = cross_val_score(model, X, y, scoring="accuracy", cv=cv)
mean(scores) * 100
scores
from sklearn.model_selection import RandomizedSearchCV
params = {
"learning_rate": [0.03, 0.05, 0.10, 0.15, 0.20, 0.25, 0.30],
"max_depth": [4, 5, 6, 8, 10, 12, 13, 15],
"min_child_weight": [1, 3, 5, 7, 9, 10, 12, 13, 15],
"gamma": [0.0, 0.1, 0.2, 0.3, 0.4],
"max_bin": [512],
"colsample_bytree": [0.3, 0.4, 0.5, 0.7, 0.8, 0.85, 0.9],
"subsample": [0.3, 0.4, 0.5, 0.7, 0.8, 0.85, 0.9],
"n_estimators": [300, 500, 750, 1000, 1250, 1500, 1750, 2000],
}
random_search = RandomizedSearchCV(
model,
param_distributions=params,
n_iter=5,
scoring="roc_auc",
n_jobs=-1,
cv=5,
verbose=3,
)
random_search.fit(X, y)
random_search.best_params_
random_search.best_estimator_
xgb_classifier = XGBClassifier(
base_score=0.5,
booster="gbtree",
colsample_bylevel=1,
colsample_bynode=1,
colsample_bytree=0.4,
gamma=0.4,
gpu_id=0,
importance_type="gain",
interaction_constraints="",
learning_rate=0.3,
max_bin=512,
max_delta_step=0,
max_depth=10,
min_child_weight=3,
monotone_constraints="()",
n_estimators=1500,
n_jobs=2,
num_parallel_tree=1,
random_state=0,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
subsample=0.5,
tree_method="gpu_hist",
use_label_encoder=False,
validate_parameters=1,
verbosity=None,
)
from sklearn.model_selection import cross_val_score
score = cross_val_score(xgb_classifier, X, y, cv=5)
score
xgb_classifier.fit(X, y)
predictions = xgb_classifier.predict(test_data)
output = pd.DataFrame({"ID": test_data.index, "Response": predictions})
output.to_csv("xgb5.csv", index=False)
import lightgbm as lgb
from lightgbm import LGBMClassifier
params = {
"learning_rate": 0.03,
"num_leaves": 89,
"boosting_type": "gbdt",
"objective": "binary",
"metric": "binary_logloss,auc",
"max_depth": 6,
"feature_fraction": 0.85,
"bagging_freq": 10,
"bagging_fraction": 0.85,
# "n_estimators":2500,
"max_bin": 255,
"subsample_for_bin": 50000,
"min_data_in_leaf": 75,
"min_sum_hessian_in_leaf": 5.0,
"device": "gpu",
"verbose": -1,
"gpu_platform_id": 0,
"gpu_device_id": 0
# "early_stopping_rounds":10
}
params1 = {
"learning_rate": 0.01,
"num_leaves": 120,
"boosting_type": "gbdt",
"objective": "binary",
"metric": "binary_logloss,auc",
"max_depth": 8,
"n_estimators": 500,
"max_bin": 75,
"subsample_for_bin": 10000,
"min_data_in_leaf": 60,
"min_sum_hessian_in_leaf": 7.5,
"min_split_gain": 1,
"min_child_weight": 2,
"min_child_samples": 9,
"subsample": 0.995,
"alpha": 0.001,
"device": "gpu",
"gpu_platform_id": 0,
"gpu_device_id": 0,
"verbose": -1,
}
dftrainLGB = lgb.Dataset(data=X, label=y)
history = lgb.cv(train_set=dftrainLGB, params=params)
for metric in history:
print(metric, history[metric][-1] * 100)
lgb_model = LGBMClassifier(**params)
lgb_model.fit(X, y)
preds = lgb_model.predict(test_data)
output = pd.DataFrame({"ID": test_data.index, "Response": preds})
output.to_csv("lgb2.csv", index=False)
from sklearn.metrics import roc_auc_score
y_pred = lgb_model.predict(X)
print(roc_auc_score(y_pred, y) * 100)
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm
from scipy.stats import skew
from sklearn.preprocessing import PowerTransformer
from sklearn.linear_model import LinearRegression
import pylab as p
from sklearn.metrics import accuracy_score
# # 1. Importing Modules
train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
sample = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
sample.head()
sample = sample.drop(columns=["id"])
y_test = sample.iloc[:, 0]
test.head(10)
test.shape
train.head(10)
train = train.drop(columns=["id"])
train.head(10)
train.shape
train.corr()["target"]
train.describe()
train.info()
train.columns
# # 2. Outlier Detection
#
sns.boxplot(train["gravity"])
# **Removing Outliers from gravity**
train = train[train["gravity"] < 1.036]
sns.boxplot(train["gravity"])
sns.boxplot(train["ph"])
train = train[train["ph"] < 7.1]
sns.boxplot(train["ph"])
sns.boxplot(train["osmo"])
sns.boxplot(train["cond"])
sns.boxplot(train["urea"])
sns.boxplot(train["calc"])
# # 3. Identifying the distributions
fig = sm.qqplot(train["gravity"], line=45, fit=True)
fig = sm.qqplot(train["osmo"], line=45, fit=True)
fig = sm.qqplot(train["urea"], line=45, fit=True)
fig = sm.qqplot(train["calc"], line=45, fit=True)
fig = sm.qqplot(train["cond"], line=45, fit=True)
# **Data is not perfectly normally distributed**
# **converting it into normally distributed data**
train.head(10)
x = train.iloc[:, :-1]
x
y
y = train.iloc[:, -1]
pt = PowerTransformer(method="yeo-johnson")
x = pt.fit_transform(x)
print("\nSkewness for data : ", skew(x))
x.shape
y1 = 1.0 / (np.sqrt(2.0 * np.pi)) * np.exp(-0.5 * (x) ** 2)
p.plot(x, y1, "*")
# ****Now the data is normally distributed****
# # 4. Training the model
lr = LinearRegression()
lr.fit(x, y)
x_test = test.iloc[:, 1:]
# # 5.Predictions
y_pred = lr.predict(x_test)
np.savetxt("Submission.csv", y_pred, delimiter=",")
submission = pd.read_csv("/kaggle/working/Submission.csv")
submission.head()
submission.columns
|
# # Doing the data manipulation and feature engineering from project 2, comparing Pandas and Polars
import polars as pl
import pandas as pd
import numpy as np
import time
from datetime import timedelta
from mlforecast import MLForecast
from statsforecast import StatsForecast
from sklearn.preprocessing import OrdinalEncoder
from numba import njit
from window_ops.rolling import rolling_mean, rolling_std
import lightgbm as lgb
import random
random.seed(415)
data_path = "/kaggle/input/sales-prices-data/"
# ## Reading in sales and price data and merging
# ### Pandas
start_time = time.monotonic()
# Load sales data
data_pd = pd.read_parquet(f"{data_path}/sales_data.parquet")
# Load Prices data
prices_pd = pd.read_parquet(f"{data_path}/prices.parquet")
prices_pd = prices_pd.reset_index(drop=False)
# Merge prices data
data_with_prices_pd = pd.merge(
data_pd.reset_index(), prices_pd, how="left", on=["date", "store_id", "item_id"]
).set_index(["date", "id"])
end_time = time.monotonic()
pd_elapsed = end_time - start_time
print(f"Time taken = {round(pd_elapsed, 2)} s")
# ### Polars
start_time = time.monotonic()
# Load sales data
data_pl = pl.read_parquet(f"{data_path}/sales_data.parquet")
# Load Prices data
prices_pl = pl.read_parquet(f"{data_path}/prices.parquet")
# Merge prices data
data_with_prices_pl = data_pl.join(
prices_pl, on=["date", "store_id", "item_id"], how="left"
)
end_time = time.monotonic()
pl_elapsed = end_time - start_time
print(f"Time taken = {round(pl_elapsed, 2)} s")
print(f"Polars time improvement: {int(100 * (pd_elapsed - pl_elapsed)/pd_elapsed)} %")
# ## Mean sales per day
# ### Pandas
start_time = time.monotonic()
mean_sales_df = (
data_with_prices_pd.groupby(["dept_id", "item_id", "store_id"])
.sales.mean()
.to_frame()
.reset_index()
)
end_time = time.monotonic()
pd_elapsed = end_time - start_time
print(f"Time taken = {round(pd_elapsed, 2)} s")
# ### Polars
start_time = time.monotonic()
mean_sales_df = (
data_with_prices_pl.lazy()
.groupby(["dept_id", "item_id", "store_id"])
.agg(pl.col("sales").mean())
.sort("sales", descending=True)
.collect()
)
end_time = time.monotonic()
pl_elapsed = end_time - start_time
print(f"Time taken = {round(pl_elapsed, 2)} s")
print(f"Polars time improvement: {int(100 * (pd_elapsed - pl_elapsed)/pd_elapsed)} %")
# ## Filter out entries before first sale
# ### Pandas
start_time = time.monotonic()
print("Length of unfiltered dataframe: ", len(data_with_prices_pd))
data_with_prices_pd = data_with_prices_pd[
data_with_prices_pd.groupby("id").sales.cumsum() > 0
]
print("Length of filtered dataframe: ", len(data_with_prices_pd))
end_time = time.monotonic()
pd_elapsed = end_time - start_time
print(f"Time taken = {round(pd_elapsed, 2)} s")
# ### Polars
start_time = time.monotonic()
print("Length of unfiltered dataframe: ", len(data_with_prices_pl))
data_with_prices_pl = (
data_with_prices_pl.lazy()
.with_columns(pl.col("sales").cumsum().over("id").alias("cum_sales"))
.filter(pl.col("cum_sales") != 0)
.drop("cum_sales")
.collect()
)
print("Length of filtered dataframe: ", len(data_with_prices_pl))
end_time = time.monotonic()
pl_elapsed = end_time - start_time
print(f"Time taken = {round(pl_elapsed, 2)} s")
print(f"Polars time improvement: {int(100 * (pd_elapsed - pl_elapsed)/pd_elapsed)} %")
# Observation - Perhaps there's a better/faster way to do this in Polars, but this was the only operation I noticed that was slower in Polars than in Pandas.
# ## Data prep for lightGBM model
# ### Pandas
start_time = time.monotonic()
val_pd = (
data_with_prices_pd.reset_index()
.groupby("id")
.tail(28)
.rename(
columns={
"date": "ds",
"id": "unique_id",
"sales": "y",
}
)
)
train_pd = (
data_with_prices_pd.reset_index()
.drop(val_pd.index)
.rename(
columns={
"date": "ds",
"id": "unique_id",
"sales": "y",
}
)
)
# label encode categorical features
cat_feats = ["unique_id", "item_id", "dept_id", "cat_id"]
enc_cat_feats = [f"{feat}_enc" for feat in cat_feats]
encoder = OrdinalEncoder()
train_pd[enc_cat_feats] = encoder.fit_transform(train_pd[cat_feats])
val_pd[enc_cat_feats] = encoder.transform(val_pd[cat_feats])
numeric_features = ["sell_price"]
reference_cols = ["unique_id", "ds", "y"]
features = reference_cols + enc_cat_feats + numeric_features
train_pd = train_pd[features]
val_pd = val_pd[features]
end_time = time.monotonic()
pd_elapsed = end_time - start_time
print(f"Time taken = {round(pd_elapsed, 2)} s")
# ### Polars
start_time = time.monotonic()
# OrdinalEncoder doesn't like my Polars DFs. Was stuck here, but got some inspiration from another student (props to Brian Tani!)
# label encode categorical features
data_pl_prepped = data_with_prices_pl.rename(
{
"date": "ds",
"id": "unique_id",
"sales": "y",
}
)
cat_feats = ["unique_id", "item_id", "dept_id", "cat_id"]
enc_cat_feats = [f"{feat}_enc" for feat in cat_feats]
for col, enc_col in zip(cat_feats, enc_cat_feats):
data_pl_prepped = data_pl_prepped.with_columns(
pl.col(col).cast(pl.Categorical).to_physical().cast(pl.Int32).alias(enc_col)
)
numeric_features = ["sell_price"]
reference_cols = ["unique_id", "ds", "y"]
features = reference_cols + enc_cat_feats + numeric_features
data_pl_prepped = data_pl_prepped[features]
# Split into val and train
val_start_date = data_pl_prepped["ds"].max() - timedelta(days=28)
val_pl = data_pl_prepped.filter(pl.col("ds") >= val_start_date).to_pandas()
train_pl = data_pl_prepped.filter(pl.col("ds") < val_start_date).to_pandas()
end_time = time.monotonic()
pl_elapsed = end_time - start_time
print(f"Time taken = {round(pl_elapsed, 2)} s")
# Note: Converted to pandas at the end as I don't think the model will accept Polars DFs. I couldn't get it to work anyway...
print(f"Polars time improvement: {int(100 * (pd_elapsed - pl_elapsed)/pd_elapsed)} %")
# ## Define forecasting and plotting functions to check the result
@njit
def rollingmean7d(x):
return rolling_mean(x, window_size=7)
@njit
def rollingmean28d(x):
return rolling_mean(x, window_size=28)
@njit
def rollingmean180d(x):
return rolling_mean(x, window_size=180)
@njit
def rollingstd7d(x):
return rolling_std(x, window_size=7)
@njit
def rollingstd28d(x):
return rolling_std(x, window_size=28)
@njit
def rollingstd180d(x):
return rolling_std(x, window_size=180)
def run_model(train, val):
model_params = {
"verbose": -1,
"num_leaves": 256,
"n_estimators": 50,
"objective": "tweedie",
"tweedie_variance_power": 1.1,
}
models = [
lgb.LGBMRegressor(**model_params),
]
fcst = MLForecast(
models=models,
freq="D",
lags=[
7,
28,
180,
],
lag_transforms={
7: [rollingmean7d, rollingstd7d],
28: [rollingmean28d, rollingstd28d],
180: [rollingmean180d, rollingstd180d],
},
date_features=["dayofweek"],
)
fcst.fit(
train,
id_col="unique_id",
time_col="ds",
target_col="y",
dropna=False,
static_features=enc_cat_feats,
)
return fcst.predict(28, dynamic_dfs=[val[["ds", "unique_id", "sell_price"]]])
def plot_predictions(train, val, predictions):
# plot the last 45 days of the training set, the validation set, and the predictions
plot_data = pd.concat(
[
train.groupby("unique_id").tail(45)[["unique_id", "ds", "y"]],
val[["unique_id", "ds", "y"]],
predictions,
]
)
# for some reason, MLForecast doesn't have this awesome plotting method!
StatsForecast.plot(plot_data)
# ## Testing (Pandas)
predictions_pd = run_model(train_pd, val_pd)
plot_predictions(train_pd, val_pd, predictions_pd)
# ## Testing (Polars)
predictions_pl = run_model(train_pd, val_pd)
plot_predictions(train_pl, val_pl, predictions_pl)
|
# ## Keşifçi Veri Analizi | Becerileri Pekiştirme
# Aşağıda ihtiyacımız doğrultusunda kullanacağımız kütüphaneleri yükleyelim.
import numpy as np
import seaborn as sns
import pandas as pd
# **NumPy: NumPy, Python programlama dilinde sayısal hesaplamalar yapmak için kullanılan bir kütüphanedir. NumPy, büyük, çok boyutlu diziler ve matrisler üzerinde hızlı ve verimli işlemler yapmak için geliştirilmiştir. NumPy, matematiksel, bilimsel ve veri analitiği uygulamalarında sıkça kullanılır.**
# **Seaborn: Seaborn, Python'da veri görselleştirmesi için kullanılan bir istatistiksel grafik kütüphanesidir. Seaborn, matplotlib kütüphanesine dayanmaktadır ve veri görselleştirme süreçlerini daha kolay ve hızlı hale getirmek için daha yüksek düzeyde bir arayüz sunar. Seaborn, istatistiksel grafiğe özel temalar, renk paletleri ve grafik türleri sunar.**
# **Pandas: Pandas, Python'da veri analitiği ve veri işleme için kullanılan bir kütüphanedir. Pandas, yüksek performanslı, kullanıcı dostu veri yapıları (seri ve veri çerçeveleri) sağlar ve veri manipülasyonu, temizleme, dönüşüm ve analiz işlemlerini kolaylaştırır. Pandas, genellikle veri analitiği, veri bilimi ve makine öğrenimi projelerinde kullanılır.**
# Veri çerçevemizi bulunduğumuz dizinden yükleyelim ve bir veri çerçevesi haline getirerek df değişkenine atayalım. (pd.read_csv(...csv))
df = pd.read_csv("/kaggle/input/iris-flower-dataset/IRIS.csv")
# Veri çerçevesinin ilk 5 gözlemini görüntüleyelim.
df.head()
# Veri çerçevesinin kaç öznitelik ve kaç gözlemden oluştuğunu görüntüleyelim.
# (satır sayısı, sütun sayısı) şeklinde çıktı verir.
# Her bir satır, bir gözlemi temsil ederken, her bir sütun ise bir özniteliği temsil eder.
df.shape
# Veri çerçevesindeki değişkenlerin hangi tipte olduğunu ve bellek kullanımını görüntüleyelim.
df.info()
# DataFrame hakkında ayrıntılı bilgilere erişebilirsiniz.
# Çıktıda, her bir sütunun adı, veri tipi, non-null (eksik veri içermeyen) değer sayısı, bellek kullanımı ve toplam satır sayısı gibi bilgiler yer alır
# sütunların veri tiplerini gösterir.
df.dtypes
# Veri çerçevesindeki sayısal değişkenler için temel istatistik değerlerini görüntüleyelim.
# Standart sapma ve ortalama değerlerden çıkarımda bulunarak hangi değişkenlerin ne kadar varyansa sahip olduğu hakkında fikir yürütelim.
df.describe()
# ***Varyans, standart sapmanın birimsiz halidir.
# Veri noktalarının ortalamaya olan dağılımını ifade eder ve veri kümesinin dağılımının genişliğini veya yayılımını belirler. Daha yüksek varyans, veri noktalarının ortalamadan daha uzak olduğunu ve dolayısıyla daha geniş bir dağılıma sahip olduğunu gösterirken, daha düşük varyans, veri noktalarının ortalamaya daha yakın olduğunu ve daha dar bir dağılıma sahip olduğunu gösterir.***
# ****sepal_length değişkeni için ortalama değer 5.843333 ve standart sapma 0.828066'dır. Bu değişkenin varyansı orta düzeyde olduğu söylenebilir.****
# ****sepal_width değişkeni için ortalama değer 3.054000 ve standart sapma 0.433594'dir. Bu değişkenin varyansı düşük düzeyde olduğu söylenebilir.****
# ****petal_length değişkeni için ortalama değer 3.758667 ve standart sapma 1.764420'dir. Bu değişkenin varyansı yüksek düzeyde olduğu söylenebilir.****
# ****petal_width değişkeni için ortalama değer 1.198667 ve standart sapma 0.763161'dir. Bu değişkenin varyansı orta düzeyde olduğu söylenebilir.****
#
# ****Bu çıkarımlara göre, petal_length değişkeninin diğer değişkenlere göre daha yüksek varyansa sahip olduğu görülmektedir.****
# Veri çerçevesinde hangi öznitelikte kaç adet eksik değer olduğunu gözlemleyelim.
# sütunlardaki yani özniteliklerdeki eksiklerin(NaN veya null) hesabını yapar.
df.isnull().sum()
# tüm veri çerçevesindeki tüm özniteliklerin toplam eksik sayısı
df.isnull().sum().sum()
# Sayısal değişkenler arasında korelasyon olup olmadığını göstermek için korelasyon matrisi çizdirelim. Korelasyon katsayıları hakkında fikir yürütelim.
# En güçlü pozitif ilişki hangi iki değişken arasındadır?
df.corr()
# ****İki veya daha fazla değişken arasındaki ilişkinin varlığı, bu ilişkinin yönü ve
# şiddeti korelasyon analizi ile belirlenir.
# İncelenen değişken sayısı iki tane ise
# korelasyon katsayısı incelenir. İkiden fazla ise çoklu veya kısmi korelasyon katsayısı incelenir.
# Korelasyon katsayısı 1 < r < 1 aralığında değer alır.****
# ****petal_length ile petal_width arasında aynı yönlü güçlü ilişki vardır. 1 ' e yaklaştıkça ilişki güçlenir.****
# Korelasyon katsayılarını daha iyi okuyabilmek için ısı haritası çizdirelim.
kolerasyon_matrisi = df.corr()
sns.heatmap(kolerasyon_matrisi, annot=True, cmap="coolwarm")
# ****sns.heatmap: kolerasyon matrisini görselleştirmek için kullanılır.
# annot True olduğu durumda karelerin içine korelasyon katsayılarını yazdırmak için kullanılan bir parametredir.
# cmap='coolwarm' renk haritasını belirlemek için kullanılan bir parametredir. 'coolwarm' renk haritası, soğuk ve sıcak renkler arasında bir geçiş sağlar.****
# ****viridis: Yeşil ve mavi tonlarında bir renk skalası****
# ****plasma: Mor ve kırmızı tonlarında bir renk skalası****
# ****magma: Siyah ve sarı tonlarında bir renk skalası****
# ****inferno: Siyah ve turuncu tonlarında bir renk skalası****
# ****cividis: Sarı ve siyah tonlarında bir renk skalası****
# ****RdYlBu: Kırmızıdan sarıya, maviden turkuaza giden bir renk skalası****
# ****BuPu: Maviden mora giden bir renk skalası****
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz değerlerini görüntüleyelim.
df.species.unique()
# **"variety" terimi, bir veri kümesinde farklı değerlere sahip olan değişkenlerin sayısıdır.**
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz kaç adet değer içerdiğini görüntüleyelim.
df.species.nunique()
# Veri çerçevesindeki sepal.width ve sepal.length değişkenlerinin sürekli olduğunu görüyoruz. Bu iki sürekli veriyi görselleştirmek için önce scatterplot kullanalım.
sns.scatterplot(data=df, x="sepal_width", y="sepal_length")
# **Sürekli değişkenler, sayısal değerlere sahip olan ve kesirli, ondalıklı veya reel sayılar gibi herhangi bir değeri alabilen değişkenlerdir.**
# Aynı iki veriyi daha farklı bir açıdan frekanslarıyla incelemek için jointplot kullanarak görselleştirelim.
sns.jointplot(data=df, x="sepal_width", y="sepal_length")
# scatter plot, histogram ve yoğunluk grafiğini birleştirerek iki değişkenin dağılımını daha detaylı bir şekilde gösterir.
# Aynı iki veriyi scatterplot ile tekrardan görselleştirelim fakat bu sefer "variety" parametresi ile hedef değişkenine göre kırdıralım.
# 3 farklı renk arasında sepal değişkenleriyle bir kümeleme yapılabilir mi? Ne kadar ayırt edilebilir bunun üzerine düşünelim.
sns.scatterplot(data=df, x="sepal_width", y="sepal_length", hue="species")
# value_counts() fonksiyonu ile veri çerçevemizin ne kadar dengeli dağıldığını sorgulayalım.
df.species.value_counts()
# **Bu çıktı, "setosa", "versicolor" ve "virginica" olmak üzere üç farklı türün veri çerçevesinde 50'şer kez tekrarlandığını göstermektedir.**
# Keman grafiği çizdirerek sepal.width değişkeninin dağılımını inceleyin.
# Söz konusu dağılım bizim için ne ifade ediyor, normal bir dağılım olduğunu söyleyebilir miyiz?
sns.violinplot(data=df, x="sepal_width")
# **Farklı kategorilere ait veri dağılımlarını karşılaştırmak, simetri veya çarpıklık gibi veri özelliklerini incelemek, yoğunluk bilgisini görselleştirmek ve veri analizine derinlemesine bir bakış sağlamak için kullanılabilir.**
# **Aritmetik ortalama, mod ve medyan birbirine eşittir.
# Eğrinin maksimum noktası aritmetik ortalamadır (dolayısıyla mod ve medyandır).
# Eğri aritmetik ortalamaya göre simetriktir.
# Simetriktir yani normal dağılım diyebiliriz.**
# Daha iyi anlayabilmek için sepal.width üzerine bir distplot çizdirelim.
# sns.distplot(df['sepal_width'])
# distplot seaborn kütüphanesinin güncel sürümünden kaldırıldı.
sns.histplot(df["sepal_width"])
# ****sns.histplot "sepal_width" değişkeninin histogramını çizer. Histogram, bir değişkenin dağılımını görselleştirmek için kullanılan bir grafik türüdür. Yatay eksen, değişkenin değerlerini temsil ederken, dikey eksen, bu değerlerin frekansını yani kaç kez tekrarlandığını veya yoğunluğunu temsil eder.****
# ****Bu, "sepal_width" değişkeninin dağılımını görselleştirmek ve değişkenin değerlerinin hangi aralıklarda yoğunlaştığını anlamak için kullanışlı bir grafik türüdür.****
# Üç çiçek türü için üç farklı keman grafiğini sepal.length değişkeninin dağılımı üzerine tek bir satır ile görselleştirelim.
sns.violinplot(data=df, x="species", y="sepal_length")
# Hangi çiçek türünden kaçar adet gözlem barındırıyor veri çerçevemiz?
# 50 x 3 olduğunu ve dengeli olduğunu value_counts ile zaten görmüştük, ancak bunu görsel olarak ifade etmek için sns.countplot() fonksiyonuna variety parametresini vereilm.
sns.countplot(data=df, x="species")
# **Her sınıfın kaç kez tekrarlandığını veya veri çerçevesinde kaç örneğinin her bir sınıfa ait olduğunu gösterir. Bu sayede "species" değişkeninin dağılımını ve sınıf frekanslarını görsel olarak anlamak için kullanılır.**
# sepal.length ve sepal.width değişkenlerini sns.jointplot ile görselleştirelim, dağılımı ve dağılımın frekansı yüksek olduğu bölgelerini inceleyelim.
sns.jointplot(data=df, x="sepal_width", y="sepal_length")
# **İki değişken arasındaki ilişkiyi gösteren bir scatter plot (nokta grafiği) ve histogramları içeren bir joint plot çizer. Bu sayede "sepal_width" ve "sepal_length" değişkenleri arasındaki dağılımı, yoğunluklarını ve ilişkisini görsel olarak incelemek için kullanılabilir.**
# **Joint plot grafiğinde, noktaların yoğun olduğu bölgeler, frekansların yüksek olduğu yerleri temsil eder**
# Bir önceki hücrede yapmış olduğumuz görselleştirmeye kind = "kde" parametresini ekleyelim. Böylelikle dağılımın noktalı gösterimden çıkıp yoğunluk odaklı bir görselleştirmeye dönüştüğünü görmüş olacağız.
sns.jointplot(data=df, x="sepal_width", y="sepal_length", kind="kde")
# **Konturlar, yoğunluğun düşük olduğu bölgelerde daha seyrekken, yoğunluğun yüksek olduğu bölgelerde daha sık yer aldı.**
# scatterplot ile petal.length ve petal.width değişkenlerinin dağılımlarını çizdirelim.
sns.scatterplot(data=df, x="petal_length", y="petal_width")
# **Scatter Plot (Serpilme Diyagramı) iki farklı değer arasındaki ilişkiyi belirlemek için kullanılan ve noktalardan oluşan bir tablodur.**
# Aynı görselleştirmeye hue = "variety" parametresini ekleyerek 3. bir boyut verelim.
sns.scatterplot(data=df, x="petal_length", y="petal_width", hue="species")
# sns.lmplot() görselleştirmesini petal.length ve petal.width değişkenleriyle implemente edelim. Petal length ile petal width arasında ne tür bir ilişki var ve bu ilişki güçlü müdür? sorusunu yanıtlayalım.
sns.lmplot(data=df, x="petal_length", y="petal_width")
# çizgiye ne kadar yakın olurlarsa o kadar güçlü bir ilişki vardır.
# Bu sorunun yanıtını pekiştirmek için iki değişken arasında korelasyon katsayısını yazdıralım.
kor_katsayi = df["petal_length"].corr(df["petal_width"])
print(kor_katsayi)
# Petal Length ile Sepal Length değerlerini toplayarak yeni bir total length özniteliği oluşturalım.
df["total_length"] = df["petal_length"] + df["sepal_length"]
# **"petal_length" ve "sepal_length" adlı iki sütunu toplayarak "total_length" adında yeni bir sütun ekleyerek bu sütuna toplam uzunluk değerlerini atadık.**
# total.length'in ortalama değerini yazdıralım.
total_length_ort = df["total_length"].mean()
print(total_length_ort)
# total.length'in standart sapma değerini yazdıralım.
std_total_length = df["total_length"].std()
print(std_total_length)
# sepal.length'in maksimum değerini yazdıralım.
max_deger = df["sepal_length"].max()
print(max_deger)
# sepal.length'i 5.5'den büyük ve türü setosa olan gözlemleri yazdıralım.
sonuc = df[(df["species"] == "Iris-setosa") & (df["sepal_length"] > 5.5)]
print(sonuc)
# petal.length'i 5'den küçük ve türü virginica olan gözlemlerin sadece sepal.length ve sepal.width değişkenlerini ve değerlerini yazdıralım.
sonuc = df[(df["species"] == "Iris-virginica") & (df["petal_length"] < 5)]
sonuc[["sepal_length", "sepal_width"]]
# Hedef değişkenimiz variety'e göre bir gruplama işlemi yapalım değişken değerlerimizin ortalamasını görüntüleyelim.
df.groupby("species").mean()
# Hedef değişkenimiz variety'e göre gruplama işlemi yaparak sadece petal.length değişkenimizin standart sapma değerlerini yazdıralım.
std = df.groupby("species")["petal_length"].std()
print(std)
|
# ## Loading data
import pandas as pd
from sklearn.metrics import mean_squared_error
# Load the dataset
data = pd.read_csv("/kaggle/input/co2-emissions/CO2 Emissions.csv")
# Print the first 5 rows of the dataset
print(data.head())
# ## Explore the data
print(data.head())
print(data.describe())
# ## Divide the data into the training and test datasets
from sklearn.model_selection import train_test_split
X = data[
[
"Engine Size(L)",
"Cylinders",
"Fuel Consumption City (L/100 km)",
"Fuel Consumption Hwy (L/100 km)",
"Fuel Consumption Comb (mpg)",
"Fuel Consumption Comb (mpg)",
]
]
y = data["CO2 Emissions(g/km)"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# ## Normalizing / Scaling Data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# ## Implement the DL model
from keras.models import Sequential
from keras.layers import Dense
model = Sequential(
[
Dense(64, activation="relu", input_shape=(X_train.shape[1],)),
Dense(32, activation="relu"),
Dense(1),
]
)
model.compile(optimizer="adam", loss="mean_squared_error")
# ## Compile the model
optimizer = Adam(learning_rate=0.001)
model.compile(optimizer=optimizer, loss="mse")
# ## Train the best model:
history = model.fit(X_train, y_train, epochs=100, batch_size=32, validation_split=0.2)
# ## Evaluate the quality of the trained (fitted) model using the test dataset
loss = model.evaluate(X_test, y_test)
print("Test Loss:", loss)
# ## Make predictions using the trained model
predictions = model.predict(X_test)
|
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/the-boston-houseprice-data/boston.csv")
df.shape
df.columns
df.head(10)
print(df.info())
print(df.isnull().sum())
for column in df.columns:
print(f"{column} unique values:")
print(df[column].unique())
print(f"{column} missing values:")
print(df[column].isnull().sum())
df["MEDV"].value_counts()
import matplotlib.pyplot as plt
# plot a scatter plot between two variables
sns.scatterplot(x="RM", y="MEDV", data=df)
# show the plot
plt.show()
# plot a scatter plot with 'MEDV' as the hue parameter
sns.scatterplot(x="RM", y="LSTAT", hue="MEDV", data=df)
# show the plot
plt.show()
X = df.drop("MEDV", axis=1)
y = df["MEDV"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
from sklearn.neighbors import KNeighborsRegressor
# Create KNN regressor with n_neighbors = 20
knn = KNeighborsRegressor(n_neighbors=5)
# Fit the model to the training data
knn.fit(X_train, y_train)
# Make predictions on the testing data
y_pred = knn.predict(X_test)
from sklearn.metrics import r2_score
r2_score = r2_score(y_test, y_pred)
print("R-squared score:", r2_score)
my_test = [
{
"CRIM": 0.02731,
"ZN": 0.0,
"INDUS": 7.07,
"CHAS": 0,
"NOX": 0.469,
"RM": 6.421,
"AGE": 78.9,
"DIS": 4.9671,
"RAD": 2,
"TAX": 242,
"PTRATIO": 17.8,
"B": 396.9,
"LSTAT": 9.14,
}
]
test_df = pd.DataFrame(my_test)
test_df = scaler.transform(test_df)
print(knn.predict(test_df))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# #### Going through data
# train_metatdata.csv
train_metadata = pd.read_csv("/kaggle/input/birdclef-2023/train_metadata.csv")
train_metadata.head()
# Finding amount of missing data
null_values = train_metadata.isna().sum()
print(null_values)
# No. of species present in dataset
import matplotlib.pyplot as plt
species_frequency = train_metadata["primary_label"].value_counts()
print(len(species_frequency.values))
plt.bar(species_frequency.index, species_frequency.values)
Bird_Taxonomy = pd.read_csv("/kaggle/input/birdclef-2023/eBird_Taxonomy_v2021.csv")
Bird_Taxonomy.head()
null_values = Bird_Taxonomy.isna().sum()
print(null_values)
sample_submission = pd.read_csv("/kaggle/input/birdclef-2023/sample_submission.csv")
sample_submission.head()
# One-Hot Encoding can be used
# list of all found species from metadata
species = train_metadata["primary_label"].unique().tolist()
len(species)
# adding full filepath column to metadata dataframe
# This can help us to access files of audio easily
train_metadata["filename"] = train_metadata["filename"].apply(
lambda x: "/kaggle/input/birdclef-2023/train_audio/" + x
)
train_metadata.columns
train_metadata_2 = train_metadata.drop(
[
"secondary_labels",
"type",
"latitude",
"longitude",
"scientific_name",
"common_name",
"author",
"license",
"rating",
"url",
],
axis=1,
)
# #### Preprocessing
import librosa
from scipy import signal
from matplotlib import pyplot as plt
# __Librosa__ is powerful Python library built to work with audio and perform analysis on it. It is the starting point towards working with audio data at scale for a wide range of applications such as detecting voice from a person to finding personal characteristics from an audio.
# __SciPy__ is a scientific computation library that uses NumPy underneath.SciPy stands for Scientific Python.
# To convert an .ogg file to a waveform
def ogg_to_wave(filename):
ogg, sample_rate = librosa.load(filename)
int_16 = (ogg * 32767).astype(np.int16)
return int_16, sample_rate
# __ogg__ is a NumPy array that contains audio samples with float values between -1 and 1, representing the _amplitude_ of the audio signal at different points in time.
# __(ogg * 32767)__ scales the audio samples so that they fall within the range of -32767 to 32767, which is the range of values that can be represented by a _16-bit_ signed integer.
# __astype(np.int16)__ converts the scaled audio samples to 16-bit signed integer data type. This conversion is necessary because most audio processing libraries and tools work with audio data represented as 16-bit integers rather than floating point values.
# The resulting __int_16__ array contains the audio samples in the 16-bit integer format that can be used for further audio processing and analysis.
# Take in created waveform and create a spectogram matrix for neural network input layer
def wave_to_spec(waveform, sample_rate):
freq, time, spectrogram = signal.spectrogram(
waveform, sample_rate, window="hann", nperseg=256, noverlap=128
)
return freq, time, spectrogram
# A __spectrogram__ is a 2D representation of a sound signal that shows how the frequency content of a signal changes over time. It is essentially a graph of the signal's frequency spectrum, with time on the x-axis, frequency on the y-axis, and the magnitude of each frequency component represented by a color or brightness value.
# A spectrogram matrix is a matrix representation of a spectrogram, where each element of the matrix represents the amplitude or energy of a frequency component at a particular time. Spectrogram matrices are commonly used as input data for machine learning models that process audio signals.
# The function uses the __spectrogram()__ function from the __scipy.signal__ library to calculate the spectrogram. The spectrogram() function calculates the short-term Fourier transform of the waveform and returns the frequency, time, and spectrogram arrays.
# The __window__ parameter specifies the type of window function to be used. Here, the __"hann"__ window is used. The __nperseg__ parameter specifies the length of each segment of the signal to be used for calculating the _Fourier transform_. The __noverlap__ parameter specifies the number of samples of overlap between the segments.
# The function returns the frequency, time, and spectrogram arrays. The spectrogram array is a 2D matrix that contains the magnitude of the frequency components at each time segment of the waveform. It is commonly used as an input to neural networks for audio processing tasks such as speech recognition or music genre classification.
# function assembly for preprocessing data
def audio_preprocessing(filepath):
waveform, sample_rate = ogg_to_wave(filepath)
freq, time, spectrogram = wave_to_spec(waveform, sample_rate)
return freq, time, spectrogram
# Test ogg_to_wave function
wave_test, wave_test_sample_rate = ogg_to_wave(
"/kaggle/input/birdclef-2023/train_audio/afrgrp1/XC126598.ogg"
)
plt.plot(wave_test)
plt.show()
print(f"Wave form has {len(wave_test)} points.")
print(f"Wave form sample rate is {wave_test_sample_rate}Hz.")
# Test wave_to_spec function
f, t, s = wave_to_spec(wave_test, wave_test_sample_rate)
# Plotting out spectrogram
plt.pcolormesh(t, f, 10 * np.log10(s), cmap="viridis")
plt.xlabel("Time(s)")
plt.ylabel("Frequency (Hz)")
plt.title("Audio Spectrogram Representation")
plt.show()
# Test of full audio preprocessing function pipeline
f, t, s = audio_preprocessing(
"/kaggle/input/birdclef-2023/train_audio/afpwag1/XC131306.ogg"
)
plt.pcolormesh(t, f, 10 * np.log10(s), cmap="viridis")
plt.xlabel("Time(s)")
plt.ylabel("Frequency (Hz)")
plt.title("Audio Spectrogram Representation")
plt.show()
# Till now, we created a pipeline that takes filepath and return its spectrogram
test = train_metadata_2.drop_duplicates(subset=["primary_label"], keep=False)
test.head()
# replacing filename with spectrogram representation
test["spectrogram"] = test["filename"].apply(lambda x: audio_preprocessing(x)[2])
test.drop("filename", axis=1, inplace=True)
# The code __metadata.drop_duplicates(subset=["primary_label"], keep=False)__ drops duplicate rows from the metadata DataFrame based on the "primary_label" column.
# The __keep=False__ parameter indicates that all duplicates should be dropped, rather than keeping the first or last occurrence.
test.head()
test.reset_index(inplace=True)
# one-hot encoding species column
species = pd.get_dummies(test["primary_label"]).astype("float64")
# dropping primary_label column (will be replaced with OH encoded columns)
test = test.drop(["primary_label"], axis=1)
test = test.join(species)
test.head()
# Combine all functions to create a single function 'preprocessing'
# will take in .csv filepath (formatting expected to reflect that described earlier)
def preprocessing(data):
# creating dataframe
df = pd.read_csv(data).head(5)
# creating spectrogram column
file_prefix = "/kaggle/input/birdclef-2023/train_audio/"
df["input"] = df["filename"].apply(
lambda x: audio_preprocessing(file_prefix + x)[2]
)
# dropping other columns
df = df[["input", "primary_label"]]
# one-hot encoding primary_label column
species = pd.get_dummies(df["primary_label"]).astype("float64")
df = df.drop("primary_label", axis=1)
df = df.join(species)
return df
# Check whether above function works or not
example = preprocessing("/kaggle/input/birdclef-2023/train_metadata.csv")
example.head()
# #### Creation of array for audiofile
# array that will store audiofile objects
audio_files = []
# audiofile class
class AudioFile:
def __init__(self, filename):
self.filename = filename
self.freqs, self.time, self.spectrogram = audio_preprocessing(filename)
# method to output objects spectrogram to terminal
def plot(self):
plt.pcolormesh(
self.time, self.freqs, 10 * np.log10(self.spectrogram), cmap="viridis"
)
plt.xlabel("Time(s)")
plt.ylabel("Frequency (Hz)")
plt.title(f"Spectrogram for {self.filename[40:-1]}")
plt.show()
# Loading in small subset of training metadata
train_metadata_3 = pd.read_csv("/kaggle/input/birdclef-2023/train_metadata.csv")
train_metadata_3 = train_metadata_3.head(10)
train_metadata_3.info()
# Looping through dataframe and creating audio objects tied to audio in 'filename' column
for key, value in train_metadata_3["filename"].iteritems():
audio_files.append(AudioFile("/kaggle/input/birdclef-2023/train_audio/" + value))
# plotting out all spectrograms in array
for file in audio_files:
file.plot()
# Now, we created a pipeline to convert all .ogg files into spectrogram. It can be useful to train and test audio files by converting them into class objects.
# > Interesting way of generating report from the data
from pandas_profiling import ProfileReport
# Generating EDA report
report = ProfileReport(train_metadata, title="BirdCLEF 2023 Training Metadata")
report
# filepath
# Every row is being converted into tuple
X = np.asarray(train_metadata["filename"].apply(lambda x: x))
# One-hot encoding
y = np.asarray(pd.get_dummies(train_metadata["primary_label"]))
# This function will take in a tuple (input, output) and return the same
# (intput, output) tuple,but the input will have been coverted from a
# filepath to a tensorflow waveform object.
def load_audio_file(file_path):
# Read audio file contents into a tensor
audio_binary = tf.io.read_file(file_path)
# Decode audio binary into a numpy array
audio, sr = librosa.load(io.BytesIO(audio_binary.numpy()), sr=16000, mono=True)
audio = audio * 32768.0
audio = tf.cast(audio, tf.int16)
return audio
# The __sr__ parameter is used to specify the desired sampling rate of the output waveform, which is set to _16000_ samples per second in this case.
# The __mono__ parameter is set to _True_, which means that the output waveform will be converted to mono by averaging the samples across all channels, if the input audio has multiple channels.
# The __BytesIO__ function from the io library is used to convert the binary data (audio_binary) into a stream of bytes that can be read by librosa.load.
# The numpy() method is used to convert the binary data (audio_binary) from a TensorFlow tensor to a NumPy array, which is required by BytesIO.
import tensorflow as tf
import io
X[0]
# Testing dataloading function
X_test = load_audio_file(X[0])
print(type(X_test))
print(X_test.shape)
# Plotting Wavefram
plt.plot(X_test)
plt.title("Bird Call Waveform (16 kHz Downsample)")
plt.xlabel("Time (16,000 = 1 second)")
plt.ylabel("Waveform Amplitude")
plt.show()
def preprocess(filepath, labels):
# Loading in audio
waveform = tf.py_function(load_audio_file, [filepath], tf.int16)
# Normalize waveform to [-1, 1]
waveform = tf.cast(waveform, tf.float32) / 32768.0
# Getting first 3 seconds of clip
waveform = waveform[:48000]
# Add zero padding
zero_padding = tf.zeros([48000] - tf.shape(waveform), dtype=tf.float32)
wav = tf.concat([zero_padding, waveform], 0)
# Spectrogram
stft = tf.signal.stft(wav, frame_length=320, frame_step=32)
spectrogram = tf.abs(stft)
spectrogram = tf.expand_dims(spectrogram, axis=2)
return spectrogram, labels
import random
# Generating random index to test
test_index = random.randint(0, len(X))
test_index
# Should return spectrogram and numpy array of classes
X_function_test, y_function_test = preprocess(X[test_index], y[test_index])
print(f"Spectrogram is: {X_function_test} \n")
print(f"Numpy array is: {y_function_test} ")
print(f"Spectrogram shape: {X_function_test.shape}")
print(f"Spectrogram type: {type(X_function_test)}")
print(f"Classes shape: {y_function_test.shape}")
print(f"Classes type: {type(y_function_test)}")
# Plot spectrogram
plt.figure(figsize=(4, 4))
plt.imshow(
tf.transpose(X_function_test)[0], aspect="auto", origin="lower", cmap="viridis"
)
plt.colorbar()
plt.xlabel("Time")
plt.ylabel("Frequency")
plt.title("Bird Call Spectrogram (Downsampled to 16 kHz)")
plt.show()
# #### Transform the data
# Creating dataset from 'train_metadata.csv' data
data = tf.data.Dataset.from_tensor_slices((X, y))
# Full data pipeline
data = data.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)
data = data.cache()
data = data.shuffle(buffer_size=100)
data = data.batch(batch_size=16)
data = data.prefetch(8)
# Calculating batch numbers for train and test
batches = len(data)
print(batches)
print(batches * (0.8))
print(batches - (batches * (0.8)))
# 847 Training batches
# 212 Test batches
train_1 = data.take(200)
train_2 = data.skip(200).take(200)
train_3 = data.skip(400).take(200)
train_4 = data.skip(600).take(247)
test = data.skip(847).take(212)
# verify data generator
# Should output ([batch_size], [element dimension]) for both input and output
spectrograms, labels = train_1.as_numpy_iterator().next()
# 16 examples of spectrograms
spectrograms.shape
# 16 examples of species label data
labels.shape
# ### Model Creation
# TensorFlow CNN Model Dependencies
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, Flatten, Dense, MaxPooling2D
# cnn_model.add(LeakyReLU(alpha=0.1))
model = Sequential()
model.add(Conv2D(16, (3, 3), activation="relu", input_shape=(1491, 257, 1)))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(16, (3, 3), activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Flatten())
model.add(Dense(264, activation="relu"))
model.add(Dense(264, activation="softmax"))
model.summary()
model.compile(
"Adam",
loss="CategoricalCrossentropy",
metrics=[tf.keras.metrics.CategoricalAccuracy()],
)
from tensorflow.keras.callbacks import EarlyStopping
# Stops model if it is not steadily dropping loss val_loss function return
earlyStop = EarlyStopping(monitor="val_loss", patience=3, restore_best_weights=True)
# Fitting model to train_1
history_train_1 = model.fit(
train_1, epochs=100, validation_data=test, callbacks=[earlyStop]
)
|
# ## Business Understanding
# Business Understanding involves understanding the problem at hand, identifying the target audience, and setting goals and success criteria for the project.
# ### 1. Define the problem statement:
# The problem is to classify images of people doing yoga poses into different categories using deep learning techniques. This is an image recognition problem that belongs to the field of machine learning.
# ### 2. Identify the target audience:
# Yoga instructors, fitness enthusiasts, and individuals interested in yoga can benefit from this project. They can use the model to classify yoga poses accurately, which can help them in their practice.
# ### 3. Determine the goals and success criteria:
# The goal is to develop a model that can accurately classify yoga poses. Success criteria will be evaluated based on the accuracy of the model. The success criteria will depend on the specific use case, but generally, a high accuracy rate is desired.
# ### 4. Defining the data requirements:
# We need a dataset of labeled images of people performing yoga poses. The dataset should contain a variety of yoga poses and images of different people performing those poses.
# ### 5. Identify potential data sources:
# Luckily we are provided with Data: about 258 Mb of yoga images; here is a sample
#
#
#
# # Data Understanding & Data Preparation
# ### 1. Data Understanding:
# - Explore the data and get a feel for what it contains.
# - Check the number of images in the dataset and the number of classes.
# - Analyze the distribution of images across different classes.
# - Check the size and resolution of the images.
# - Visualize some sample images to get an idea of what the data looks like.
# load the necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# libraries for images dataset
import glob
import os
import cv2
from PIL import Image
from tqdm import tqdm
from tabulate import tabulate
# dataset path
data_path = "..\\data\\YOGA\\content\\cleaned\\DATASET"
# Define the path to the training and test data folders
train_folder = os.path.join(data_path, "TRAIN")
test_folder = os.path.join(data_path, "TEST")
# Define the list of classes
class_names = os.listdir(train_folder)
# Print the number of classes
print("Number of classes:", len(class_names))
# Create a dictionary to store the number of images per class
train_counts = {}
test_counts = {}
# Loop through the classes and count the number of images in the training set
for class_name in class_names:
train_counts[class_name] = len(os.listdir(os.path.join(train_folder, class_name)))
test_counts[class_name] = len(os.listdir(os.path.join(test_folder, class_name)))
# Create a list of tuples containing the counts for each class
class_counts = [
(class_name, train_counts[class_name], test_counts[class_name])
for class_name in class_names
]
# Add a row with the total count of images in both the training and test sets
total_count = ("Total", sum(train_counts.values()), sum(test_counts.values()))
class_counts.append(total_count)
# Print the class counts for the training and test sets side-by-side as a table
print(
tabulate(
class_counts, headers=["Class", "Train Count", "Test Count"], tablefmt="orgtbl"
)
)
# Create a bar plot for the number of images per class in the training set
sns.set_style("whitegrid")
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
sns.barplot(x=list(train_counts.keys()), y=list(train_counts.values()))
plt.xlabel("Class")
plt.ylabel("Number of images")
plt.title("Training set")
# Create a bar plot for the number of images per class in the test set
plt.subplot(1, 2, 2)
sns.barplot(x=list(test_counts.keys()), y=list(test_counts.values()))
plt.xlabel("Class")
plt.ylabel("Number of images")
plt.title("Test set")
# Show the plot
plt.show()
# Define the list of classes
class_names = os.listdir(train_folder)
# Set the random seed for reproducibility
np.random.seed(0)
# Create a figure to display the images
fig, axs = plt.subplots(1, len(class_names), figsize=(15, 5))
# Loop through the classes and display a random image from each class
for i, class_name in enumerate(class_names):
# Get the list of image files for this class
img_files = os.listdir(os.path.join(train_folder, class_name))
# Choose a random image file
img_file = np.random.choice(img_files)
# Load the image
img = cv2.imread(os.path.join(train_folder, class_name, img_file))
# Convert the image from BGR to RGB
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Display the image
axs[i].imshow(img)
axs[i].set_title(class_name)
axs[i].axis("off")
# Show the plot
plt.show()
# ### 2. Data preprocessing
# Preprocess an image by resizing it to a standard size (96x96), converting it to grayscale or RGB based on the architecture chosen, and normalizing the pixel values of the image to a range of 0-1.
def preprocess_image(img_path, grayscale=False):
"""
Args:
- img_path: Path to the image file.
- grayscale: Whether to convert the image to grayscale (default: False).
Returns:
- Preprocessed image as a numpy array.
"""
# Load the image using OpenCV.
img = cv2.imread(img_path)
# Resize the image to a standard size (96x96).
img = cv2.resize(img, (96, 96))
# Convert the image to grayscale or RGB based on the architecture chosen.
if grayscale:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = np.expand_dims(img, axis=-1)
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Normalize the pixel values of the image to a range of 0-1.
img = img.astype("float32") / 255.0
return img
# Define the path to the data folders.
data_path = "..\\data\\YOGA\\content\\cleaned\\DATASET"
train_folder = os.path.join(data_path, "TRAIN")
test_folder = os.path.join(data_path, "TEST")
# Define the list of classes.
pose_classes = ["downdog", "goddess", "plank", "tree", "warrior2"]
# Define the size of the images
img_size = 96
# Define whether to use grayscale images or RGB images
grayscale = True
# Load the training data
train_images = []
train_labels = []
for i, pose_class in enumerate(pose_classes):
class_folder = os.path.join(train_folder, pose_class)
for j, img_file in enumerate(
tqdm(os.listdir(class_folder), desc=f"Training data ({pose_class})")
):
img_path = os.path.join(class_folder, img_file)
img = preprocess_image(img_path, grayscale=grayscale)
train_images.append(img)
train_labels.append(i)
# Load the test data
test_images = []
test_labels = []
for i, pose_class in enumerate(pose_classes):
class_folder = os.path.join(test_folder, pose_class)
for j, img_file in enumerate(
tqdm(os.listdir(class_folder), desc=f"Test data ({pose_class})")
):
img_path = os.path.join(class_folder, img_file)
img = preprocess_image(img_path, grayscale=grayscale)
test_images.append(img)
test_labels.append(i)
# Convert the lists of images and labels to numpy arrays
train_images = np.array(train_images)
train_labels = np.array(train_labels)
test_images = np.array(test_images)
test_labels = np.array(test_labels)
# Print the shapes of the train and test data
print("Train data shape:", train_images.shape, train_labels.shape) # type: ignore
print("Test data shape:", test_images.shape, test_labels.shape) # type: ignore
# The `train_images.shape` output `(1075, 96, 96, 1)` means that the training set contains 1075 images, where each image has a height and width of `96 pixels`, and a depth of 1. The depth of 1 indicates that the images are in `grayscale`.
# Similarly, the `test_images` array also has a shape of `(466, 96, 96, 1)` which means that the test set contains 466 images, with each image having a height and width of 96 pixels and a depth of 1.
# Define a dictionary to map pose class indices to their names
pose_class_names = {0: "downdog", 1: "goddess", 2: "plank", 3: "tree", 4: "warrior2"}
# Select a random image from each class
random_images = []
for i in range(len(pose_classes)):
class_indices = np.where(train_labels == i)[0]
random_index = np.random.choice(class_indices)
random_image = train_images[random_index]
random_images.append(random_image)
# Plot the random images
fig, axs = plt.subplots(1, len(pose_classes), figsize=(15, 15))
for i in range(len(pose_classes)):
axs[i].imshow(random_images[i], cmap="gray" if grayscale else "RGB")
axs[i].set_title(pose_class_names[i])
axs[i].axis("off")
plt.show()
# - post preprocessing examples
# # Data Modeling & Model Evaluation
# For image classification problems, `Convolutional Neural Networks` (CNNs) are a popular choice of models. They have the ability to learn spatial features and patterns from images by using convolutional layers. Additionally, pooling layers can be used to reduce the spatial dimensions of the feature maps, and fully connected layers can be used for classification.
# We can use various types of `CNN architectures` such as `LeNet`, `AlexNet`, `VGG`, `ResNet`, and `Inception`. The choice of architecture will depend on the complexity of the problem, the size of the dataset, and the available computing resources.
# Another important aspect of model selection is `regularization techniques` such as `Dropout`, `L1/L2 regularization`, and `early stopping`. These techniques can help to prevent overfitting of the model to the training data and improve generalization performance on unseen test data.
# In terms of `optimization techniques`, we can use various optimization algorithms such as Stochastic Gradient Descent `SGD`, `Adam`, and `RMSprop`. The choice of optimizer will depend on the specific problem and architecture.
# We can also use various `evaluation metrics` such as `accuracy`, `precision`, `recall`, `F1-score`, and `confusion matrix` to evaluate the performance of our model. We will aim to maximize the accuracy and F1-score while minimizing the loss function during training.
# loading necessary libraries
import tensorflow as tf
from tensorflow.keras import models, layers, optimizers
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.metrics import classification_report
# #### 1. Simple CNN:
# we will define our model architecture using the Sequential API in Keras. We will use a simple CNN architecture with 2 convolutional layers and 2 fully connected (dense) layers. We will also use dropout for regularization.
# Define the model architecture
# Initialize a sequential model
model = models.Sequential()
# Add a 2D convolutional layer with 32 filters, a 3x3 kernel size, ReLU activation function, and input shape of (img_size, img_size, 1) for grayscale or (img_size, img_size, 3) for RGB
model.add(
layers.Conv2D(32, (3, 3), activation="relu", input_shape=(img_size, img_size, 1))
)
# Add a max pooling layer with a pool size of 2x2
model.add(layers.MaxPooling2D((2, 2)))
# Add another 2D convolutional layer with 64 filters and a 3x3 kernel size
model.add(layers.Conv2D(64, (3, 3), activation="relu"))
# Add another max pooling layer
model.add(layers.MaxPooling2D((2, 2)))
# Flatten the output of the convolutional layers
model.add(layers.Flatten())
# Add a dense layer with 64 neurons and ReLU activation function
model.add(layers.Dense(64, activation="relu"))
# Add a final dense layer with a number of neurons equal to the number of classes,
# and softmax activation function to output the probability of each class
model.add(layers.Dense(len(pose_classes), activation="softmax"))
# We will then compile the model by specifying the loss function, optimizer, and evaluation metric.
model.compile(
optimizer=optimizers.Adam(), # Compile the model with the Adam optimizer
loss="sparse_categorical_crossentropy", # Use sparse_categorical_crossentropy as the loss function
metrics=["accuracy"],
) # Use accuracy as the metric for evaluation
# We will also define an EarlyStopping callback to monitor the validation loss and stop training if it does not improve for a certain number of epochs.
# The `patience` parameter defines the number of epochs the training can continue with no improvement in validation loss before stopping.
# The `monitor` parameter specifies the metric to monitor for early stopping, which is validation loss in this case
early_stop = EarlyStopping(patience=3, monitor="val_loss")
# Next, we will fit the model on the training data and validate it on the test data. We will use a batch size of 25 and train the model for 20 epochs.
history = model.fit(
train_images,
train_labels,
epochs=20,
batch_size=25,
validation_split=0.2,
callbacks=[early_stop],
)
# Finally, we will evaluate the model on the test data and print the classification report.
test_loss, test_acc = model.evaluate(test_images, test_labels)
# ### evaluation interpretation:
# The evaluation shows that the model has achieved an accuracy of 0.7468 and a loss of 0.8446. The accuracy metric measures the proportion of correctly classified images, while the loss metric measures how well the model is able to fit the training data.
# The rating of the model depends on the context in which it is being used and the specific requirements of the task. However, in general, an accuracy of 0.7468 may be considered moderate, but could be improved. Similarly, a loss of 0.8446 may be considered relatively high, indicating that the model is not fitting the training data very well.
# Therefore, I would rate this model as 5 out of 10, as it has some potential but also has room for improvement.
sns.set_style("darkgrid")
plt.figure(figsize=(16, 4))
# Plot the training and validation accuracy curves
plt.subplot(1, 2, 1)
plt.plot(epochs, acc, label="Training accuracy")
plt.plot(epochs, val_acc, label="Validation accuracy")
plt.title("Training and validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
# Plot the training and validation loss curves
plt.subplot(1, 2, 2)
plt.plot(epochs, loss, label="Training loss")
plt.plot(epochs, val_loss, label="Validation loss")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Reading from train and test files
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
train_data.head()
# ## Import the packages
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import mean_squared_error
# ## Feature engineering
# Performing ordinal encoding on sex of a passenger
sex_map = {"male": 0, "female": 1}
train_data["Sex"] = train_data["Sex"].replace(sex_map)
test_data["Sex"] = test_data["Sex"].replace(sex_map)
train_data.head()
# Useful features
features = ["Pclass", "Sex", "SibSp"]
# Splitting the label into variable 'y'
y = train_data["Survived"]
X = train_data[features]
# ## Training-Validation sets
from sklearn.model_selection import train_test_split
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, train_size=0.8, random_state=1
)
# ## Training the kNN model
def kNNTrain(X_train, X_valid, y_train, y_valid, n):
model = KNeighborsClassifier(n_neighbors=n)
model.fit(X_train, y_train)
pred = model.predict(X_valid)
mse = mean_squared_error(y_valid, pred)
return mse
# Call the function kNNTrain
neighbors = [3, 5, 7]
for x in neighbors:
error = kNNTrain(X_train, X_valid, y_train, y_valid, x)
print(f"kNN with k = {x} has MSE = {error}")
# kNN with k=3 has lowest mean squared error.
# Now, properly train model with entire training dataset.
# Training final model with k=3
final_model = KNeighborsClassifier(n_neighbors=3)
final_model.fit(X, y)
# Test set with the needed features
X_test = test_data[features]
# Predict with trained kNN model
final_preds = final_model.predict(X_test)
# ## Creating submission file
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": final_preds})
output.to_csv("finalsub.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Basic Data Science and ML Pipeline
# ## OSEMN Pipeline
# * O - Obtaining our data
# * S - Scrubbing / Cleaning our data
# * E - Exploring / Visualizing our data will allow us to find patterns and trends
# * M - Modeling our data will give us our predictive power as a wizard
# * N - INterpreting our data## OSEMN Pipeline
# Loading the dataset
data = pd.read_csv(
"/kaggle/input/apartment-cost-in-new-york-city/apartment_cost_list.csv"
)
# Print the first 5 rows of the dataframe.
data.head()
data.info(verbose=True)
data = data.drop(["Curb Cut", "Horizontal Enlrgmt", "Vertical Enlrgmt"], axis=1)
data.info()
data.info()
data = data.dropna()
data.info()
data.head()
data["Job Type"].value_counts()
data["Job #"].value_counts()
data.duplicated().sum()
data = data.drop_duplicates()
data.shape
data.describe()
# **DataFrame.describe()**method generates descriptive statistics that summarize the central tendency, dispersion and shape of a dataset’s distribution, excluding NaN values. This method tells us a lot of things about a dataset. One important thing is that the describe() method deals only with numeric values. It doesn't work with any categorical values. So if there are any categorical values in a column the describe() method will ignore it and display summary for the other columns unless parameter include="all" is passed.**
# Now, let's understand the statistics that are generated by the describe() method
# * count tells us the number of NoN-empty rows in a feature.
# * mean tells us the mean value of that feature.
# * std tells us the Standard Deviation Value of that feature.
# * min tells us the minimum value of that feature.
# * 25%, 50%, and 75% are the percentile/quartile of each features. This quartile information helps us to detect Outliers.
# * max tells us the maximum value of that feature.**DataFrame.describe()** method generates descriptive statistics that summarize the central tendency, dispersion and shape of a dataset’s distribution, excluding NaN values. This method tells us a lot of things about a dataset. One important thing is that the describe() method deals only with numeric values. It doesn't work with any categorical values. So if there are any categorical values in a column the describe() method will ignore it and display summary for the other columns unless parameter include="all" is passed.
data.describe().T
p = data.hist(figsize=(20, 20))
data["Bin #"].value_counts()
data["Proposed Zoning Sqft"].value_counts()
data["Enlargement SQ Footage"].value_counts()
data["Borough"].value_counts()
data["House #"].value_counts()
data["Community - Board"].value_counts()
data = data.drop(["Job #", "Proposed Zoning Sqft", "Enlargement SQ Footage"], axis=1)
data.shape
data.head()
data["Job Description"].value_counts()
data["Fully Permitted"].value_counts()
|
# # 강의 내용 요약
# ----------------------
# ## 07. SHAP & RFECV
# #### SHAP란?
# 각각의 예측값을 설명하는 *Local Model-Agnostic Method* 중 하나
# 각 feature의 기여도를 동일 조건+해당 feature의 유/무로 나눠 결과를 비교함으로써 해당 feature가 예측에 어느 정도로 영향(magnitude of feature attributions)을 미쳤는지 확인
# #### VS Permutation Featrue Importance
# Permutation Featrue Importance(이하, PFI)의 경우 *Global Model-Agnostic Method*로, 머신러닝 모델의 전반적인 행동을 묘사하는 방식
# PFI는 특정 feature 값을 순열(permute) 형식으로 뒤섞어 예측했을 때, 예측값이 정답과 멀어질 수록 더 중요한 feature라 판단
# #### RFECV
# RFE의 경우 원하는 개수(N)의 변수들이 남을 때까지 학습을 반복하며 중요도가 낮은 feature를 제거해나감
# 저 N을 임의로 정하는 것이 어렵기 때문에, 최소한으로 선택할 변수의 개수와 매 단계에서 제거할 변수의 개수를 지정해주면 CV를 활용해 최적의 값을 반환해주는 것이 RFECV
# ## 08. Comparing Models
# 활용할 법한 분류 모델을 추려 아래 단계를 거쳐 모델 성능 확인
# 1. 기본 하이퍼 파라미터 적용해 모델 생성
# 2. 1에서 생성한 모델 기반으로 best feature 설정 및 해당 feature 기반으로 GridSearchCV 진행
# 3. best estimator 기준으로 모델 학습 및 scoring 확인
# ## 09. Feature Engineering and Prediction
# 모델 성능 고도화 진행
# - feature들을 생성하여 테스트
# - dataset Scaling
# # 퀴즈
# ---------------------------
#
# Libraries
import numpy as np
import pandas as pd
from scipy import stats
pd.set_option("max_columns", None)
import json
import ast
import time
import datetime
import os
from operator import itemgetter
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
# 정우일님의 관련 블로그 https://wooiljeong.github.io/python/python_plotly/
from sklearn.preprocessing import StandardScaler
from sklearn import model_selection
from sklearn.model_selection import (
train_test_split,
StratifiedKFold,
KFold,
cross_val_score,
GridSearchCV,
RepeatedStratifiedKFold,
)
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.feature_selection import (
GenericUnivariateSelect,
SelectPercentile,
SelectKBest,
f_classif,
mutual_info_classif,
RFECV,
)
import xgboost as xgb
import lightgbm as lgb
from sklearn.svm import SVC
from sklearn.neighbors import NearestNeighbors
from sklearn.ensemble import AdaBoostClassifier
from xgboost import XGBClassifier
from sklearn import linear_model
import statsmodels.api as sm
import eli5
from eli5.sklearn import PermutationImportance
import shap
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs
import warnings
warnings.filterwarnings("ignore")
path = "/kaggle/input/dont-overfit-ii"
train = pd.read_csv(f"{path}/train.csv")
test = pd.read_csv(f"{path}/test.csv")
X_train = train.drop(["id", "target"], axis=1)
y_train = train["target"]
X_test = test.drop(["id"], axis=1)
folds = StratifiedKFold(n_splits=20, shuffle=True, random_state=42)
repeated_folds = RepeatedStratifiedKFold(n_splits=10, n_repeats=5, random_state=42)
def train_model(X_train, y_train, X_test, folds=folds, model=None):
prediction = np.zeros(len(X_test))
scores_train = []
scores_valid = []
for fold_n, (train_index, valid_index) in enumerate(folds.split(X_train, y_train)):
X_train_fold, X_valid_fold = X_train[train_index], X_train[valid_index]
y_train_fold, y_valid_fold = y_train[train_index], y_train[valid_index]
model.fit(X_train_fold, y_train_fold)
y_pred_train = model.predict(X_train_fold).reshape(
-1,
)
train_score = roc_auc_score(y_train_fold, y_pred_train)
scores_train.append(train_score)
y_pred_valid = model.predict(X_valid_fold).reshape(
-1,
)
valid_score = roc_auc_score(y_valid_fold, y_pred_valid)
scores_valid.append(valid_score)
y_pred = model.predict_proba(X_test)[:, 1]
prediction += y_pred
prediction /= folds.get_n_splits()
print(
f"Mean train auc: {np.mean(scores_train):.4f}, std: {np.std(scores_train):.4f}."
)
print(
f"Mean valid auc: {np.mean(scores_valid):.4f}, std: {np.std(scores_valid):.4f}."
)
return scores_valid, prediction
# # SHAP
# 아래 코드 실행 후 얻는 그래프 기반으로 아래 질문에 답해주세요.
# 1. 양의 상관관계를 갖는 top feature?
# 2. 음의 상관관계를 갖는 feature top 5?
# 3. shap를 활용해 모델 테스트를 진행할 feature를 총 10개 선택해주세요 (변수명 : best_features)
#
print("--- Default Model ---")
model = linear_model.LogisticRegression(
class_weight="balanced", penalty="l1", C=0.1, solver="liblinear"
)
scores, prediction = train_model(
X_train.values, y_train, X_test, folds=repeated_folds, model=model
)
print("--- SHAP graph ---")
explainer = shap.LinearExplainer(model, X_train)
shap_values = explainer.shap_values(X_train)
shap.summary_plot(shap_values, X_train)
# shap.summary_plot(shap_values, X_train, max_display=40) << max_display 인자 기반으로 최대 몇 개까지 볼지 설정 가능
# # **! 스크롤 주의 !**
# 다음 셀에 정답이 적혀있습니다
# -----------------
# ### 정답
# ----------------
# 1. 양의 상관관계를 갖는 top feature?
# 1. 127번 (feature 값이 클수록 1로 예측할 수록 양의 상관관계를 갖는다고 볼 수있다.)
#
# 1. 음의 상관관계를 갖는 feature top 5?
# 1. 176, 135, 16, 59, 199 (feature 값이 클수록 1로 예측할 수록 양의 상관관계를 갖는다고 볼 수있다.)
#
# 1. shap를 활용해 모델 테스트를 진행할 feature를 총 10개 선택해주세요
# > vals= np.abs(shap_values).mean(0)
# feature_importance = pd.DataFrame(list(zip(X_train.columns,vals)),columns=['col_name','feature_importance_vals'])
# feature_importance.sort_values(by=['feature_importance_vals'],ascending=False,inplace=True)
# best_features = feature_importance.head(10).columns
# display(feature_importance.head(10))
vals = np.abs(shap_values).mean(0)
feature_importance = pd.DataFrame(
list(zip(X_train.columns, vals)), columns=["col_name", "feature_importance_vals"]
)
feature_importance.sort_values(
by=["feature_importance_vals"], ascending=False, inplace=True
)
best_features = feature_importance.head(10).columns
display(feature_importance.head(10))
def get_best_feat_shap(model, X_train, model_type, show=False):
if model_type == "linear":
explainer = shap.LinearExplainer(model, X_train)
elif model_type == "tree":
explainer = shap.TreeExplainer(model, X_train)
elif model_type == "kernel":
explainer = shap.KernelExplainer(model, X_train)
shap_values = explainer.shap_values(X_train)
vals = np.abs(shap_values).mean(0)
feature_importance = pd.DataFrame(
list(zip(X_train.columns, vals)),
columns=["col_name", "feature_importance_vals"],
)
feature_importance.sort_values(
by=["feature_importance_vals"], ascending=False, inplace=True
)
best_features = list(
feature_importance[feature_importance.feature_importance_vals > 0.1]
.head(10)
.col_name
)
if show:
display(feature_importance.head(10))
return best_features
best_features = get_best_feat_shap(model, X_train, model_type="linear", show=True)
print("best features : ", best_features)
# # Comparing Models
# 다음과 같이 해보기:
# * default parameter로 모델을 학습하고 기본 점수를 확인합니다
# * best feature들을 선택합니다 (shap 사용, 불가능할 경우 eli5 사용)
# * grid search를 실행합니다
# * best model을 훈련시키고 다시 점수를 봅니다
# logistic regression
model = linear_model.LogisticRegression(
class_weight="balanced", penalty="l1", C=0.1, solver="liblinear"
)
print("Default scores")
scores, prediction = train_model(
X_train.values, y_train, X_test, folds=folds, model=model
)
print()
top_features = get_best_feat_shap(model, X_train, model_type="linear", show=True)
X_train_selected = train[top_features]
y_train = train["target"]
X_test_selected = test[top_features]
lr = linear_model.LogisticRegression(max_iter=1000)
parameter_grid = {
"class_weight": ["balanced", None],
"penalty": ["l2", "l1"],
"C": [0.001, 0.05, 0.08, 0.01, 0.1, 1.0, 10.0],
"solver": ["liblinear"],
}
grid_search = GridSearchCV(
lr, param_grid=parameter_grid, cv=folds, scoring="roc_auc", n_jobs=-1
)
grid_search.fit(X_train_selected, y_train)
print(f"Best score of GridSearchCV: {grid_search.best_score_}")
print(f"Best parameters: {grid_search.best_params_}")
print()
scores_logreg, prediction = train_model(
X_train_selected.values,
y_train,
X_test_selected,
folds=repeated_folds,
model=grid_search.best_estimator_,
)
model = linear_model.SGDClassifier(
eta0=1, max_iter=1000, tol=0.0001, loss="modified_huber"
)
print("Default scores")
scores, prediction = train_model(
X_train.values, y_train, X_test, folds=folds, model=model
)
print()
top_features = get_best_feat_shap(model, X_train, model_type="linear", show=True)
X_train_selected = train[top_features]
y_train = train["target"]
X_test_selected = test[top_features]
sgd = linear_model.SGDClassifier(eta0=1, max_iter=1000, tol=0.0001)
parameter_grid = {
"loss": ["log", "modified_huber"],
"penalty": [
"l1",
"l2",
"elasticnet",
], # l1 : elasticnet 과 l2로는 달성할 수 없는 모델의항목 선택 또는 희소성을 줄 수 있음
"alpha": [0.001, 0.01, 0.1, 0.5], # 정규화항을 곱한 상수
"l1_ratio": [0, 0.15, 0.5, 1.0],
"learning_rate": ["optimal", "invscaling", "adaptive"],
}
grid_search = GridSearchCV(
sgd, param_grid=parameter_grid, cv=folds, scoring="roc_auc", n_jobs=-1
)
grid_search.fit(X_train_selected, y_train)
print(f"Best score of GridSearchCV: {grid_search.best_score_}")
print(f"Best parameters: {grid_search.best_params_}")
print()
scores_sgd, prediction = train_model(
X_train_selected.values,
y_train,
X_test_selected,
folds=repeated_folds,
model=grid_search.best_estimator_,
)
model = AdaBoostClassifier()
print("Default scores")
scores, prediction = train_model(
X_train.values, y_train, X_test, folds=folds, model=model
)
print()
top_features = [
i[1:]
for i in eli5.formatters.as_dataframe.explain_weights_df(model).feature
if "BIAS" not in i
]
X_train_selected = train[top_features]
y_train = train["target"]
X_test_selected = test[top_features]
abc = AdaBoostClassifier()
parameter_grid = {
"n_estimators": [5, 10, 20, 50, 100],
"learning_rate": [0.001, 0.01, 0.1, 1.0, 10.0],
}
grid_search = GridSearchCV(
abc, param_grid=parameter_grid, cv=folds, scoring="roc_auc", n_jobs=-1
)
grid_search.fit(X_train_selected, y_train)
print(f"Best score of GridSearchCV: {grid_search.best_score_}")
print(f"Best parameters: {grid_search.best_params_}")
print()
scores_abc, prediction = train_model(
X_train_selected.values,
y_train,
X_test_selected,
folds=repeated_folds,
model=grid_search.best_estimator_,
)
plt.figure(figsize=(12, 8))
scores_df = pd.DataFrame({"LogisticRegression": scores_logreg})
scores_df["SGDClassifier"] = scores_sgd
scores_df["AdaBoostClassifier"] = scores_abc
sns.boxplot(data=scores_df)
plt.xticks(rotation=45)
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.utils import pad_sequences
from keras.layers import (
Embedding,
Dropout,
Dense,
LSTM,
GRU,
Bidirectional,
SpatialDropout1D,
Conv1D,
GlobalMaxPool1D,
)
from keras.models import Sequential
import matplotlib.pyplot as plt
from sklearn.metrics import (
confusion_matrix,
classification_report,
accuracy_score,
precision_score,
recall_score,
f1_score,
roc_curve,
roc_auc_score,
)
import itertools
import seaborn as sns
# # Hyperparameters
MAX_LEN = 100
DROPOUT = 0.2
EPOCHS = 1
BATCH_SIZE = 100
EMB_DIR = "/kaggle/input/pashto-word-embeddings"
DATASET = "/kaggle/input/pold-dataset/dataset.csv"
# # Dataset
df = pd.read_csv(DATASET)
df = df[:3000]
X_train, X_test, y_train, y_test = train_test_split(
df.text, df.label, test_size=0.20, random_state=42, stratify=df.label
)
X_test, X_val, y_test, y_val = train_test_split(
X_test, y_test, test_size=0.5, random_state=42, stratify=y_test
)
print("Train: ", len(y_train))
print("Test: ", len(y_test))
print("Val: ", len(y_val))
# # Tokenization
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X_train)
vocab_size = len(tokenizer.word_index) + 1
print("Vocab Size: ", vocab_size)
# # Padding
seq_X_Train = tokenizer.texts_to_sequences(X_train)
seq_X_test = tokenizer.texts_to_sequences(X_test)
seq_X_val = tokenizer.texts_to_sequences(X_val)
X_train_padded = pad_sequences(seq_X_Train, maxlen=MAX_LEN)
X_test_padded = pad_sequences(seq_X_test, maxlen=MAX_LEN)
X_val_padded = pad_sequences(seq_X_val, maxlen=MAX_LEN)
print("X_train_padded", X_train_padded.shape)
print("X_test_padded", X_test_padded.shape)
print("X_test_padded", X_val_padded.shape)
# # Training
def train(emb, clf, epochs, dropout):
MODEL_NAME = f"{emb}+{clf}"
f = open(f"{EMB_DIR}/{emb}.txt", "r", encoding="utf8")
lines = f.readlines()
f.close()
embedding_dict = {}
for line in lines:
values = line.split()
word = values[0]
vec = np.array(values[1:], "float32")
embedding_dict[word] = vec
embedding_matrix = np.zeros((vocab_size, MAX_LEN))
for word, i in tokenizer.word_index.items():
if word in embedding_dict:
embedding_matrix[i] = embedding_dict[word]
model = Sequential()
model.add(
Embedding(
vocab_size,
MAX_LEN,
weights=[embedding_matrix],
input_length=MAX_LEN,
trainable=False,
)
)
model.add(Dropout(dropout))
if clf == "CNN":
model.add(Conv1D(100, 3))
model.add(GlobalMaxPool1D())
model.add(Dropout(dropout))
if clf == "LSTM":
model.add(LSTM(100))
if clf == "BiLSTM":
model.add(Bidirectional(LSTM(100, dropout=dropout)))
if clf == "GRU":
model.add(GRU(100, dropout=dropout))
if clf == "BiGRU":
model.add(Bidirectional(GRU(100, dropout=dropout)))
model.add(Dense(1, activation="sigmoid"))
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
history = model.fit(
X_train_padded,
y_train,
batch_size=BATCH_SIZE,
epochs=epochs,
validation_data=[X_val_padded, y_val],
verbose=1,
)
model.save(f"models/{MODEL_NAME}/model.h5")
preds = model.predict(X_test_padded, verbose=0)
preds = np.rint(preds.flatten())
prec = round(precision_score(y_test, preds, average="macro") * 100, 2)
rec = round(recall_score(y_test, preds, average="macro") * 100, 2)
f1 = round(f1_score(y_test, preds, average="macro") * 100, 2)
acc = round(accuracy_score(y_test, preds) * 100, 2)
result = [emb, clf, prec, rec, f1, acc]
return result, history.history
embdd = ["GloVe", "Word2Vec", "fastText"]
classifiers = ["CNN", "LSTM", "BiLSTM", "GRU", "BiGRU"]
results = []
histories = []
for emb in embdd:
for clf in classifiers:
print(f"\n{emb} + {clf} ------------------------")
result, history = train(emb, clf, EPOCHS, DROPOUT)
results.append(result)
histories.append(history)
# # Results
df_results = pd.DataFrame(results, columns=["Emb", "Clf", "Pre", "Rec", "F1", "Acc"])
df_results.to_csv("results.txt", sep="\t", index=False)
pd.DataFrame(histories, columns=["model", "history"]).to_csv(
"histories.txt", sep="\t", index=False
)
df = pd.DataFrame(df_results, columns=["Emb", "Clf", "Pre", "Rec", "F1", "Acc"])
df["model"] = df["Emb"] + "+" + df["Clf"]
df = df.sort_values(by=["F1"])
print(df_results)
# # Bar Graph
barWidth = 0.1
fig = plt.subplots(figsize=(10, 3))
br1 = np.arange(len(f1))
br2 = [x + barWidth for x in br1]
br3 = [x + barWidth for x in br2]
br4 = [x + barWidth for x in br3]
plt.bar(br1, pre, width=barWidth)
plt.bar(br2, rec, width=barWidth)
plt.bar(br3, f1, width=barWidth)
plt.bar(br4, acc, width=barWidth)
plt.xlabel("Model", fontsize=9)
plt.ylabel("Score", fontsize=9)
plt.xticks([r + barWidth for r in range(len(f1))], models)
plt.ylim(80, 95)
plt.xticks(rotation=80)
plt.legend(["F1", "P", "R"])
plt.show()
|
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
# Read the dataset
file_path = "../input/cancer/data.csv"
data1 = pd.read_csv(file_path)
data = data1[["diagnosis", "radius_mean", "texture_mean"]].copy()
# Choose features and label
k1, k2, k3 = "radius_mean", "texture_mean", "diagnosis"
X = data[[k1, k2]]
y = data[k3]
# Use numbers to represent different types
y[y == "M"] = 0
y[y == "B"] = 1
# Change the data type
y = y.astype(dtype=np.uint8)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
for max_leaf_nodes in [10, 20, 30, 40, 50]:
model1 = DecisionTreeClassifier(max_leaf_nodes=max_leaf_nodes, random_state=0)
model1.fit(X_train, y_train)
pred = model1.predict(X_test)
print(mean_absolute_error(y_test, pred))
# Of the options listed, choose 10 as the value of 'max_leaf_nodes'
X.shape
# Build the model
model = DecisionTreeClassifier(max_leaf_nodes=10, random_state=0)
# Split X, y into 5 groups respectively
index1 = list(range(0, 114))
index2 = list(range(114, 228))
index3 = list(range(228, 342))
index4 = list(range(342, 456))
index5 = list(range(456, 569))
X1, X2, X3, X4, X5 = (
X.iloc[index1],
X.iloc[index2],
X.iloc[index3],
X.iloc[index4],
X.iloc[index5],
)
y1, y2, y3, y4, y5 = (
y.iloc[index1],
y.iloc[index2],
y.iloc[index3],
y.iloc[index4],
y.iloc[index5],
)
# Manually implement 'k-fold cross validation', in which k = 5
# Loop 1
X_train_1 = pd.concat([X2, X3, X4, X5])
y_train_1 = pd.concat([y2, y3, y4, y5])
model.fit(X_train_1, y_train_1)
pred1 = model.predict(X1)
error_1 = mean_absolute_error(y1, pred1)
# Loop 2
X_train_2 = pd.concat([X1, X3, X4, X5])
y_train_2 = pd.concat([y1, y3, y4, y5])
model.fit(X_train_2, y_train_2)
pred2 = model.predict(X2)
error_2 = mean_absolute_error(y2, pred2)
# Loop 3
X_train_3 = pd.concat([X1, X2, X4, X5])
y_train_3 = pd.concat([y1, y2, y4, y5])
model.fit(X_train_3, y_train_3)
pred3 = model.predict(X3)
error_3 = mean_absolute_error(y3, pred3)
# Loop 4
X_train_4 = pd.concat([X1, X2, X3, X5])
y_train_4 = pd.concat([y1, y2, y3, y5])
model.fit(X_train_4, y_train_4)
pred4 = model.predict(X4)
error_4 = mean_absolute_error(y4, pred4)
# Loop 5
X_train_5 = pd.concat([X1, X2, X3, X4])
y_train_5 = pd.concat([y1, y2, y3, y4])
model.fit(X_train_5, y_train_5)
pred5 = model.predict(X5)
error_5 = mean_absolute_error(y5, pred5)
average = (error_1 + error_2 + error_3 + error_4 + error_5) / 5
print("average error is", average)
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
y_prob_1 = model.predict_proba(X1)[:, 1]
fpr, tpr, thre = roc_curve(y1, y_prob_1)
plt.title("Receiver Operating Characteristic-1")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.plot(fpr, tpr)
plt.show()
y_prob_2 = model.predict_proba(X2)[:, 1]
fpr, tpr, thre = roc_curve(y2, y_prob_2)
plt.title("Receiver Operating Characteristic-2")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.plot(fpr, tpr)
plt.show()
y_prob_3 = model.predict_proba(X3)[:, 1]
fpr, tpr, thre = roc_curve(y3, y_prob_3)
plt.title("Receiver Operating Characteristic-3")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.plot(fpr, tpr)
plt.show()
y_prob_4 = model.predict_proba(X4)[:, 1]
fpr, tpr, thre = roc_curve(y4, y_prob_4)
plt.title("Receiver Operating Characteristic-4")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.plot(fpr, tpr)
plt.show()
y_prob_5 = model.predict_proba(X5)[:, 1]
fpr, tpr, thre = roc_curve(y5, y_prob_5)
plt.title("Receiver Operating Characteristic-5")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.plot(fpr, tpr)
plt.show()
|
import numpy as np
import cupy as cp
import pandas as pd
from sklearn import preprocessing
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from scipy.sparse.linalg import eigsh
from scipy.spatial.distance import pdist, squareform
from sklearn.cluster import KMeans
col_names = [
"duration",
"protocol_type",
"service",
"flag",
"src_bytes",
"dst_bytes",
"land",
"wrong_fragment",
"urgent",
"hot",
"num_failed_logins",
"logged_in",
"num_compromised",
"root_shell",
"su_attempted",
"num_root",
"num_file_creations",
"num_shells",
"num_access_files",
"num_outbound_cmds",
"is_host_login",
"is_guest_login",
"count",
"srv_count",
"serror_rate",
"srv_serror_rate",
"rerror_rate",
"srv_rerror_rate",
"same_srv_rate",
"diff_srv_rate",
"srv_diff_host_rate",
"dst_host_count",
"dst_host_srv_count",
"dst_host_same_srv_rate",
"dst_host_diff_srv_rate",
"dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate",
"dst_host_serror_rate",
"dst_host_srv_serror_rate",
"dst_host_rerror_rate",
"dst_host_srv_rerror_rate",
"label",
]
# # Pre-Processing
train_df = pd.read_csv(
"/kaggle/input/kdd-cup-1999-data/kddcup.data_10_percent.gz",
compression="gzip",
header=None,
names=col_names,
sep=",",
quotechar='"',
)
train2_df = pd.read_csv(
"/kaggle/input/kdd-cup-1999-data/kddcup.data.gz",
compression="gzip",
header=None,
names=col_names,
sep=",",
quotechar='"',
)
test_df = pd.read_csv(
"/kaggle/input/kdd-cup-1999-data/corrected.gz",
compression="gzip",
header=None,
names=col_names,
sep=",",
quotechar='"',
)
train_df.head()
train_df = train_df.dropna()
train_df = train_df.dropna(axis=1)
train2_df = train2_df.dropna()
train2_df = train2_df.dropna(axis=1)
test_df = test_df.dropna()
test_df = test_df.dropna(axis=1)
train2_df
train_df[train_df.duplicated()]
train_df = train_df.drop_duplicates()
train2_df = train2_df.drop_duplicates()
test_df = test_df.drop_duplicates()
frames = [train_df, test_df]
df = pd.concat(frames, ignore_index=True)
# # convert categoric data to numeric
label_encoder = preprocessing.LabelEncoder()
df["protocol_type"] = label_encoder.fit_transform(df["protocol_type"])
df["service"] = label_encoder.fit_transform(df["service"])
df["flag"] = label_encoder.fit_transform(df["flag"])
df["label"] = label_encoder.fit_transform(df["label"])
# # check redundant columns
for col in df.columns:
print(df[col].value_counts())
print("============")
train_df["label"].value_counts()
# # By inspection , num_outbound_cmds has only one value , so it will not have any effect , so drop it
df = df.drop(["num_outbound_cmds"], axis=1)
# # showing correlation matrix
cor = df.corr()
plt.figure(figsize=(35, 35))
import seaborn as sns
sns.heatmap(cor, annot=True, cmap="coolwarm")
# # Some features have nearly equal correlation , so they have same effect , so drop features with corr >= 99%
import warnings
warnings.filterwarnings("ignore")
upper_tri = cor.where(np.triu(np.ones(cor.shape), k=1).astype(np.bool))
to_drop = [column for column in upper_tri.columns if any(upper_tri[column] >= 0.99)]
df = df.drop(columns=to_drop, axis=1)
train_df = df.iloc[:145586]
test_df = df.iloc[145586:]
# # Dropping labels column
train_df_unlabeled = train_df.drop(["label"], axis=1)
test_df_unlabeled = test_df.drop(["label"], axis=1)
# # As we have large range of numbers in some columns and small range in others , we will scale the data
new_cols = train_df_unlabeled.columns
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
train_df_unlabeled = scaler.fit_transform(train_df_unlabeled)
test_df_unlabeled = scaler.fit_transform(test_df_unlabeled)
train_df_unlabeled = pd.DataFrame(train_df_unlabeled, columns=new_cols)
test_df_unlabeled = pd.DataFrame(test_df_unlabeled, columns=new_cols)
# # K-Means
def euclidean(point, data):
return np.sqrt(np.sum((point - data) ** 2, axis=1))
import random
from tqdm import tqdm
class K_Means:
def __init__(self, k=7, max_iter=300):
self.k = k
self.max_iter = max_iter
def fit(self, data):
self.centroids = {}
self.centroids[0] = random.choice(data)
# Initializing centroids with Kmeans++
for i in range(1, self.k):
dists = np.sum(
[euclidean(centroid, data) for centroid in self.centroids.values()],
axis=0,
)
dists /= np.sum(dists)
(new_centroid_idx,) = np.random.choice(range(len(data)), size=1, p=dists)
self.centroids[i] = data[new_centroid_idx]
for iter in tqdm(range(self.max_iter)):
self.classifications = (
{}
) ## This dictionary will contain centroid index as key and the assigned points as values
self.indeces = {}
for i in range(self.k):
self.classifications[
i
] = [] # Empty list for each centroid to append data points in it
self.indeces[
i
] = [] # Empty list for each centroid to append index of point in it
for i, featureset in enumerate(data):
distances = [
np.linalg.norm(featureset - self.centroids[centroid])
for centroid in self.centroids
] ## calculate distance between the point and each centroid
classification = np.argmin(distances) # get the minimum distance
self.classifications[classification].append(
featureset
) ## add data point to the corresponding centroid
self.indeces[classification].append(i)
prev_centroids = dict(self.centroids)
for classification in self.classifications:
self.centroids[classification] = np.mean(
self.classifications[classification], axis=0
) ## update centroids according to clusters
for centroid in self.centroids:
if np.isnan(self.centroids[centroid]).any():
print("YA SATEER")
self.centroids[centroid] = prev_centroids[centroid]
if prev_centroids.keys() == self.centroids.keys() and all(
np.allclose(prev_centroids[k], self.centroids[k])
for k in prev_centroids.keys()
):
break
def predict(self, data): # takes a data point
distances = [
np.linalg.norm(data - self.centroids[centroid])
for centroid in self.centroids
]
classification = distances.index(min(distances))
return classification # return the assigned centroid
km = K_Means(23)
km.fit(train_df_unlabeled.to_numpy())
test = []
for i in range(23):
row = []
for j in tqdm(range(len(km.classifications[i]))):
index = km.indeces[i][j]
label = train_df.iloc[index, 35]
row.append(label)
test.append(row)
from collections import Counter
majority = {}
for i in range(23):
print(Counter(test[i]))
print(int(list(Counter(test[i]).most_common())[0][0]))
majority[i] = int(list(Counter(test[i]).most_common())[0][0])
list(set(majority.values()))
y_true = []
y_pred = []
from IPython.display import clear_output
for index, entry in enumerate(test_df_unlabeled.to_numpy()):
if test_df.iloc[index, 35] in list(set(majority.values())):
y_true.append(test_df.iloc[index, 35])
y_pred.append(km.predict(entry))
else:
continue
y_predict = []
for i in range(len(y_pred)):
y_pred[i] = majority[y_pred[i]]
from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score
micro_precision = precision_score(y_true, y_pred, average="macro")
print("Micro-averaged precision:", micro_precision)
macro_f1 = f1_score(y_true, y_pred, average="macro")
print("Micro-averaged F1 score:", macro_f1)
accuracy_score(y_pred, y_true)
from collections import Counter
majority = {}
for i in range(23):
print(Counter(test[i]))
print(int(list(Counter(test[i]).most_common())[0][0]))
majority[i] = int(list(Counter(test[i]).most_common())[0][0])
test_df[test_df.duplicated()]
test_df.head()
test_df["label"].value_counts()
label_encoder = preprocessing.LabelEncoder()
train_df["protocol_type"] = label_encoder.fit_transform(train_df["protocol_type"])
test_df["protocol_type"] = label_encoder.fit_transform(test_df["protocol_type"])
train_df["service"] = label_encoder.fit_transform(train_df["service"])
test_df["service"] = label_encoder.fit_transform(test_df["service"])
train_df["flag"] = label_encoder.fit_transform(train_df["flag"])
test_df["flag"] = label_encoder.fit_transform(test_df["flag"])
train_df["label"] = label_encoder.fit_transform(train_df["label"])
test_df["label"] = label_encoder.fit_transform(test_df["label"])
train2_df["protocol_type"] = label_encoder.fit_transform(train2_df["protocol_type"])
train2_df["service"] = label_encoder.fit_transform(train2_df["service"])
train2_df["flag"] = label_encoder.fit_transform(train2_df["flag"])
train2_df["label"] = label_encoder.fit_transform(train2_df["label"])
train_df["protocol_type"].value_counts()
train2_df.head()
y = train2_df["label"]
X = train2_df.drop(columns=["label"])
y
X
from sklearn.model_selection import train_test_split
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.0025, random_state=42, stratify=y
)
# # Take only 0.25% of the data in the new training set
# X_train, _, y_train, _ = train_test_split(X_train, y_train, train_size=0.0025, random_state=42, stratify=y_train)
y_train.value_counts()
# # APPLYING NORMALIZED CUT
import numpy as np
from sklearn.cluster import KMeans
def spectral_clustering(df, n_clusters):
# Compute the similarity matrix
X = df.values
X_normalized = X / np.linalg.norm(X, axis=1, keepdims=True)
similarity_matrix = X_normalized @ X_normalized.T
# Compute the degree matrix
degree_matrix = np.diag(similarity_matrix.sum(axis=1))
# Compute the Laplacian matrix
laplacian_matrix = degree_matrix - similarity_matrix
# Compute the eigenvectors and eigenvalues of the Laplacian matrix
eigenvalues, eigenvectors = np.linalg.eig(laplacian_matrix)
# Find the indices of the eigenvectors corresponding to the smallest eigenvalues
indices = np.argsort(eigenvalues)[:n_clusters]
# Use the eigenvectors to cluster the data
clusters = np.column_stack((eigenvectors[:, i] for i in indices))
clusters_normalized = clusters / np.linalg.norm(clusters, axis=1, keepdims=True)
# Fit KMeans on the normalized clusters
kmeans = KMeans(n_clusters=n_clusters)
kmeans.fit(clusters_normalized)
# Predict the labels for the original data points
labels = kmeans.predict(clusters_normalized)
return labels, clusters_normalized
y_pred, clusters = spectral_clustering(X_train, n_clusters=23)
pd.DataFrame(clusters)
pd.DataFrame(y_pred).value_counts()
from sklearn.metrics import silhouette_score
# Evaluate clustering performance on new data
new_silhouette_score = silhouette_score(clusters, y_pred)
new_silhouette_score
from sklearn.cluster import KMeans
from sklearn.metrics import classification_report
# kmeans = KMeans(n_clusters=3)
# kmeans.fit(X)
# y_pred = kmeans.predict(X)
# cluster_labels = kmeans.labels_
# Compute the majority label for each cluster
majority_labels = []
for i in range(23):
cluster_indices = np.where(y_pred == i)[0]
cluster_labels = y_train.iloc[cluster_indices]
majority_label = np.bincount(cluster_labels).argmax()
majority_labels.append(majority_label)
print("Majority labels for each cluster:", majority_labels)
# Map each data point in the test set to its corresponding cluster label
train_cluster_map = y_pred.reshape(-1, 1)
# Use the cluster map to assign the majority label to each data point
train_major_labels = np.take(majority_labels, train_cluster_map).reshape(-1)
train_major_labels
from sklearn.metrics import f1_score, recall_score, precision_score
# Calculate the F1 score and recall for each class based on the testing set
test_f1_scores = f1_score(y_train, train_major_labels, average="weighted")
test_recall_scores = recall_score(y_train, train_major_labels, average="weighted")
test_precision_scores = precision_score(y_train, train_major_labels, average="weighted")
print(f"Testing F1 scores: {test_f1_scores}")
print(f"Testing recall scores: {test_recall_scores}")
print(f"Testing precision scores: {test_precision_scores}")
from collections import Counter
from sklearn.cluster import KMeans
# Compute the joint probability of true label y and cluster label c
def compute_joint_probability(majority_labels, y_train, X_train, n_clusters):
kmeans = KMeans(n_clusters=n_clusters, random_state=42)
kmeans.fit(X_train)
n_samples = len(y_train)
joint_prob = np.zeros((n_clusters, len(np.unique(y_train))))
for i in range(n_samples):
cluster = kmeans.labels_[i]
label = y_train.loc[i]
joint_prob[cluster, label] += 1
joint_prob /= n_samples
return joint_prob
# Compute the conditional probability of true label y given cluster label c
def compute_conditional_probability(joint_prob):
conditional_prob = joint_prob / np.sum(joint_prob, axis=1)[:, np.newaxis]
return conditional_prob
# Compute the conditional entropy of the cluster labels given the true labels
def compute_conditional_entropy(conditional_prob):
entropy = 0
for i in range(conditional_prob.shape[0]):
for j in range(conditional_prob.shape[1]):
p = conditional_prob[i, j]
if p > 0:
entropy -= p * np.log2(p)
return entropy
# Compute the joint probability of true label y and cluster label c in the training set
joint_prob_train = compute_joint_probability(train_major_labels, y_train, X_train, 23)
# Compute the conditional probability of true label y given cluster label c in the training set
conditional_prob_train = compute_conditional_probability(joint_prob_train)
# Compute the conditional entropy of the cluster labels given the true labels in the training set
H_Y_given_C_train = compute_conditional_entropy(conditional_prob_train)
|
# The dataset we are using here contains two data files about two marketing campaigns (Control Campaign and Test Campaign).
import pandas as pd
import datetime
from datetime import date, timedelta
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "plotly_white"
control_data = pd.read_csv(
"/kaggle/input/example-dataset-for-ab-test/control_group.csv", sep=";"
)
test_data = pd.read_csv(
"/kaggle/input/example-dataset-for-ab-test/test_group.csv", sep=";"
)
control_data.head(2)
test_data.head(2)
# prepare the Data
# * Update column name
# * Search for null value
# * Replace null value with mean value
control_data.columns = [
"Campaign Name",
"Date",
"Amount Spent",
"Number of Impressions",
"Reach",
"Website Clicks",
"Searches Received",
"Content Viewed",
"Added to Cart",
"Purchases",
]
test_data.columns = [
"Campaign Name",
"Date",
"Amount Spent",
"Number of Impressions",
"Reach",
"Website Clicks",
"Searches Received",
"Content Viewed",
"Added to Cart",
"Purchases",
]
control_data.head(2)
test_data.head(2)
control_data.isnull().sum()
test_data.isnull().sum()
control_data["Number of Impressions"].fillna(
value=control_data["Number of Impressions"].mean(), inplace=True
)
control_data["Reach"].fillna(value=control_data["Reach"].mean(), inplace=True)
control_data["Website Clicks"].fillna(
value=control_data["Website Clicks"].mean(), inplace=True
)
control_data["Searches Received"].fillna(
value=control_data["Searches Received"].mean(), inplace=True
)
control_data["Content Viewed"].fillna(
value=control_data["Content Viewed"].mean(), inplace=True
)
control_data["Added to Cart"].fillna(
value=control_data["Added to Cart"].mean(), inplace=True
)
control_data["Purchases"].fillna(value=control_data["Purchases"].mean(), inplace=True)
control_data.isnull().sum()
control_data.info()
control_data.describe()
test_data.info()
test_data.describe()
# Merge both Dataset
ab_data = control_data.merge(test_data, how="outer").sort_values(["Date"])
ab_data = ab_data.reset_index(drop=True)
ab_data.head()
# have a look if the dataset has an equal number of samples about both campaigns
ab_data["Campaign Name"].value_counts()
# A/B Testing to Find the Best Marketing Strategy
figure = px.scatter(
data_frame=ab_data,
x="Number of Impressions",
y="Amount Spent",
size="Amount Spent",
color="Campaign Name",
trendline="ols",
)
figure.show()
# Control Campaign shows more impressions according to the amount spent on both campaigns
# Number of searches performed on the website from both campaigns
label = ["Total Searches from Control Campaign", "Total Searches from Test Campaign"]
counts = [sum(control_data["Searches Received"]), sum(test_data["Searches Received"])]
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(title_text="Control Vs Test: Searches")
fig.update_traces(
hoverinfo="label+percent",
textinfo="value",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
# Audience of the control campaign viewed more content than the test campaign.
# Although there is not much difference, as the website clicks of the control campaign were low, its engagement on the website is higher than the test campaign
# the number of products added to the cart from both campaigns
label = [
"Products Added to Cart from Control Campaign",
"Products Added to Cart from Test Campaign",
]
counts = [sum(control_data["Added to Cart"]), sum(test_data["Added to Cart"])]
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(title_text="Control Vs Test: Added to Cart")
fig.update_traces(
hoverinfo="label+percent",
textinfo="value",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
# Despite low website clicks more products were added to the cart from the control campaign
# let’s have a look at the amount spent on both campaigns
label = ["Amount Spent in Control Campaign", "Amount Spent in Test Campaign"]
counts = [sum(control_data["Amount Spent"]), sum(test_data["Amount Spent"])]
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(title_text="Control Vs Test: Amount Spent")
fig.update_traces(
hoverinfo="label+percent",
textinfo="value",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
# The amount spent on the test campaign is higher than the control campaign.
# But as we can see that the control campaign resulted in more content views and more products in the cart, the control campaign is more efficient than the test campaign.
# let’s have a look at the purchases made by both campaigns
label = ["Purchases Made by Control Campaign", "Purchases Made by Test Campaign"]
counts = [sum(control_data["Purchases"]), sum(test_data["Purchases"])]
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(title_text="Control Vs Test: Purchases")
fig.update_traces(
hoverinfo="label+percent",
textinfo="value",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
# There’s only a difference of around 1% in the purchases made from both ad campaigns.
# As the Control campaign resulted in more sales in less amount spent on marketing, the control campaign wins here!
# Let’s analyze some metrics to find which ad campaign converts more.
# Let's look at the relationship between the number of website clicks and content viewed from both campaigns
figure = px.scatter(
data_frame=ab_data,
x="Content Viewed",
y="Website Clicks",
size="Website Clicks",
color="Campaign Name",
trendline="ols",
)
figure.show()
# The website clicks are higher in the test campaign, but the engagement from website clicks is higher in the control campaign
# analyze the relationship between the amount of content viewed and the number of products added to the cart
figure = px.scatter(
data_frame=ab_data,
x="Added to Cart",
y="Content Viewed",
size="Added to Cart",
color="Campaign Name",
trendline="ols",
)
figure.show()
# Control Campaign wins again
# the relationship between the number of products added to the cart and the number of sales
figure = px.scatter(
data_frame=ab_data,
x="Purchases",
y="Added to Cart",
size="Purchases",
color="Campaign Name",
trendline="ols",
)
figure.show()
# The conversation rate of the test campaign is higher.
|
# # Purpose of this notebook
# - Provide a cheat sheet of EDA.
# - We will not focus on the profound knowledge of EDA here.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import yfinance
# to ignore some warnings in python when fitting models ARMA or ARIMA
import warnings
warnings.filterwarnings("ignore")
# display all the columns and rows
pd.set_option("display.max_rows", 300)
pd.set_option("display.max_colwidth", None),
# set figure size for single graphs
# plt.rcParams['figure.figsize'] = [15, 6]
# # Import Data
df1 = pd.read_csv("/kaggle/input/car-prices-market/used_car_prices.csv")
df2 = pd.read_csv("/kaggle/input/car-prices-market/new_cars_prices.csv")
# df2 = pd.read_csv("/kaggle/input/car-prices-market/new_cars_prices.csv", sheet_name='new_cars_prices', skiprows=None)
df = yfinance.download(
tickers="GC=F", start="2020-01-30", end="2022-01-30", group_by="column"
)[["Adj Close"]]
df1.head()
df2.head()
df.head()
# ## Overview
df1.info()
# describe with some aspects only, like max values
df1.describe().loc[["count", "top"]]
df1.shape
df1.dtypes
# ## Column names, index changing
df1.columns
# change the name of columns
df1.columns = [
"web_scraper_order",
"Car Model",
"Month/Year",
"Average price",
"Minimum price",
"Maximum price",
]
# Another way
df1.rename(
columns={
"web-scraper-order": "web_scraper_order",
"Car Model": "car_model",
"Month/Year": "month_year",
"Average price": "average_price",
"Minimum price": "minimum_price",
"Maximum price": "maximum_price",
},
inplace=True,
errors="ignore",
) # can set raise error if needed
df.reset_index(
inplace=True,
# drop = True
)
# ## Unique values, Value counts
# Count number of distinct elements by row
df1.nunique(axis=0, dropna=True)
# Count number of distinct elements by column
df1.nunique(axis=1)
df1.car_model.unique()
df1["car_model"].value_counts()
# ## Null values
df1.isnull().values.any()
df1.isnull().sum().sort_values(ascending=False)
# print out the rows with null values
df_null_row = df1[df1.isna().any(axis=1)]
df_null_row.head()
df1.dropna(inplace=True)
# Drop null values with conditions
# Drop the rows where at least one element is missing
# df1.dropna(,inplace=True)
# Define in which columns to look for missing values.
# df.dropna(subset=['name', 'toy'])
# Drop the columns where at least one element is missing.
# df.dropna(axis='columns')
# Drop the rows where all elements are missing.
# df.dropna(how='all')
# drop null values based on a threshold
# df.dropna(thresh = len(df) * 0.2, axis = 1, inplace = True)
# ## Date and time
# fill all rows in a column with the same values
df["today"] = "2023-05-02"
from datetime import datetime
# convert
df["today"] = pd.to_datetime(df["today"])
# Calculate days between 2 columns of datetime
df["days_count"] = (pd.Timestamp("today") - df["Date"]).dt.days
df.head()
# # String, text
list_col = ["average_price", "minimum_price", "maximum_price"]
for i in list_col:
df1[i] = df1[i].str.strip("EGP")
df1[i] = df1[i].str.replace(",", "")
df1[i] = df1[i].str.strip(" ")
df1[i] = df1[i].astype("int")
df1.dtypes
# other
t = "2023-03"
t.split("-")[0]
list_year = []
for i in df1["month_year"]:
a = i.split("-")[0]
list_year.append(a)
df1["year"] = list_year
df1.head()
# # Loop, concat loop, lamba
# Split Car Full Data into Brand, Model, And Year of The Model
model = df1["car_model"].apply(
lambda x: "".join((x.split()[1], " ", x.split()[2]))
if len(x.split()) > 3
else x.split()[1]
)
year = df1["car_model"].apply(lambda x: x.split()[-1])
brand = df1["car_model"].apply(lambda x: x.split()[0])
df1["brand"], df1["model"], df1["model_year"] = brand, model, year
# total rows of 2 dataframes
df1.shape[0] + df2.shape[0]
# total columns of 2 dataframes
df1.shape[1] + df2.shape[1]
# Take a small part of dataframe to test
df1_test = df1.head(3)[["web_scraper_order", "car_model"]]
df2_test = df2.head(3)[["web-scraper-order", "Car Model"]]
df1_test
df2_test
# concat 2 dataframes with the same number of columns and same names
pd.concat(
[
df1_test,
df2_test.rename(
columns={"web-scraper-order": "web_scraper_order", "Car Model": "car_model"}
),
],
ignore_index=True,
)
# merge data using a column
# add a random columns for testing
df1_test["color"] = ["red", "blue", "green"]
df2_test["color"] = ["blue", "red", "red"]
print(df1_test)
print(df2_test)
df1_test.merge(df2_test, how="inner", on="color")
df1_test.merge(df2_test, how="outer", on="color")
# # Simple plots with df.plot()
# ## Line graph
df["Adj Close"].plot().set_ylabel("Gold prices")
# ## Histogram and pie chart
df["Adj Close"].plot(kind="hist", bins=20)
fig, axs = plt.subplots(1, 2, figsize=(15, 6))
df1["average_price"].hist(bins=10, ax=axs[0])
axs[0].set_title("average_price", fontsize=16)
axs[0].set_ylabel("Frequency")
axs[0].set_xlabel("EGP")
df1["year"].value_counts().plot.pie(ax=axs[1], autopct="%.2f%%")
axs[1].set_title("Year#", fontsize=16)
plt.show()
# ## Bar chart
# in percentage
df1["year"].value_counts(normalize=True).mul(100).plot.bar()
# ## Boxplot
df1.boxplot(
column=["minimum_price", "maximum_price"],
grid=True,
)
# # Relationships among variables via plots
# # Univariate
df.cov()
df.var()
df.std()
# skewness and kurtosis
print("Skewness: %f" % df["Adj Close"].skew())
print("Kurtosis: %f" % df["Adj Close"].kurt())
# histogram
sns.distplot(df["Adj Close"])
# probability plot
res = stats.probplot(df["Adj Close"], plot=plt)
# # Bivariate
# ## Numeric - Numeric
# - Correlation Matrix
# - Scatterplot
# ### Correlation Matrix
# correlation matrix
cm = df1.corr()
sns.heatmap(cm, cbar=True, annot=True, square=True, fmt=".2f", annot_kws={"size": 10})
# ### Scatterplot
sns.pairplot(data=df1)
# ## Category - Category
pd.crosstab(df1["brand"], df1["year"]).plot(kind="bar", stacked=True)
plt.title("Year vs Model", fontsize=16)
plt.ylabel("Frequency")
plt.show()
# ## Category - Numeric
sns.boxplot(
x="year",
y="minimum_price",
data=df1,
palette="Accent", # color='g'
)
# # Fill, slice values
df1[["brand", "model", "average_price"]].head()
df1.loc[6:8]
# # Convert df, list, set, zip and mapping
x = df1["car_model"].unique().tolist()
# x
# zip and map
y = ["Category A", "Category B", "Category C"]
z = dict(zip(x, y))
df1["Category"] = df1["car_model"].map(z)
df1[:15]
s = pd.Series(["1.0", "2", -3])
pd.to_numeric(s, downcast="float")
# # For finance
# annualized
df.cov() * 252
df.std() * np.sqrt(252)
# updating
# # Combinations
from itertools import combinations, combinations_with_replacement
# Get all combinations of [1, 2, 3]
# and length 2
comb = combinations(["Beer", "Whiskey", "CubaLibre"], 3)
# Print the obtained combinations
for i in list(comb):
print(i)
comb = combinations_with_replacement(["Beer", "Whiskey", "CubaLibre"], 3)
# Print the obtained combinations
for i in list(comb):
print(i)
# # Export to excel or csv, modify available file
# df1.to_csv("old_cars_df.csv", index=False)
# df2.to_csv("new_cars_df.xlsx", sheet_name='Details',index=False)
# with pd.ExcelWriter('new_cars_df.xlsx', engine='openpyxl', mode='a') as writer:
# df2.to_excel(writer, sheet_name='Customer2')
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
data = pd.read_csv(
"/kaggle/input/hierarchical-clustering-for-customer-data/segmented_customers.csv"
)
data
plt.scatter(data["Annual Income (k$)"], data["Spending Score (1-100)"], s=10)
plt.xlabel("Annual Income")
plt.ylabel("Spending_Score")
plt.show()
IncomeVsSpending = data[["Annual Income (k$)", "Spending Score (1-100)"]]
No_of_clusters = [2, 3, 4, 5, 6, 7, 8, 9]
Avg_silhouette_score = []
Sum_of_squared_distances = []
for cluster_size in No_of_clusters:
# defining the kmeans algorithmn and fitting with data for different no of clusters
kmeans = KMeans(n_clusters=cluster_size, init="k-means++")
kmeans.fit(IncomeVsSpending)
identified_clusters = kmeans.fit_predict(IncomeVsSpending)
data_with_clusters = IncomeVsSpending
data_with_clusters["clusters"] = identified_clusters
plt.title(f"Cluster_size = {cluster_size}")
plt.scatter(
data_with_clusters["Annual Income (k$)"],
data_with_clusters["Spending Score (1-100)"],
c=data_with_clusters["clusters"],
cmap="rainbow",
)
plt.show()
cluster_labels = kmeans.labels_
# silhouette_score
Avg_silhouette_score.append(
silhouette_score(X=IncomeVsSpending, labels=cluster_labels)
)
# Elbow method
Sum_of_squared_distances.append(kmeans.inertia_)
# Elbow method to find the optimum number of clusters
plt.plot(No_of_clusters, Sum_of_squared_distances, "bx-")
plt.title("Elbow Method")
plt.xlabel("No_of_clusters")
plt.ylabel("Reduction in variance")
plt.show()
# silhouette_score method to find the optimum nuber of clusters
plt.plot(No_of_clusters, Avg_silhouette_score, "bx-")
plt.title("silhouette_score_method")
plt.xlabel("No_of_clusters")
plt.ylabel("silhouette_score")
plt.show()
max_idx = pd.Series(Avg_silhouette_score).idxmax()
optimal_cluster = No_of_clusters[max_idx]
print(f"Optimum number of clusters, {optimal_cluster}")
|
# # House Price Prediction
# ## Problem Statement:
# Consider a real estate company that has a dataset containing the prices of properties in the Delhi region. It wishes to use the data to optimise the sale prices of the properties based on important factors such as area, bedrooms, parking, etc.
# Essentially, the company wants —
# To identify the variables affecting house prices, e.g. area, number of rooms, bathrooms, etc.
# To create a linear model that quantitatively relates house prices with variables such as number of rooms, area, number of bathrooms, etc.
# To know the accuracy of the model, i.e. how well these variables can predict house prices.
# importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import RFE
# ## Data Understanding
# import the housing dataset
df = pd.read_csv("/kaggle/input/sample-housing-data/Housing.csv")
# checking how dataframe looks
df.head()
# checking the shape of data
df.shape
# checking the info for dataset
df.info()
# ```
# No null value found
# ```
# checking for duplicate rows
df.duplicated().sum()
# ```
# No duplicate rows found
# ```
# checking statistical info for categorical data
df.select_dtypes("object").describe()
# checking statistical info for numerical data
df.describe()
# ## Data Cleaning
# ```
# ['mainroad','guestroom','basement','hotwaterheating','airconditioning','prefarea']
# These variables are binary categorial variables. We will convert yes to 1 and no 0 as linear regression model only deals with numeric values.
# ```
# converting yes to 1 and no 0 for binary categorical features.
binary_categorical_vars = [
"mainroad",
"guestroom",
"basement",
"hotwaterheating",
"airconditioning",
"prefarea",
]
df[binary_categorical_vars] = df[binary_categorical_vars].apply(
lambda x: x.map({"yes": 1, "no": 0})
)
# checking the dataframe
df.head()
# ## Bivariate Analysis
def create_bar_chart(feature):
sns.barplot(data=df, x=feature, y="price")
plt.show()
categorical_vars = [
"bedrooms",
"bathrooms",
"stories",
"mainroad",
"guestroom",
"basement",
"hotwaterheating",
"airconditioning",
"parking",
"prefarea",
"furnishingstatus",
]
for feature in categorical_vars:
create_bar_chart(feature)
sns.pairplot(data=df, x_vars=["area"], y_vars="price")
plt.show()
plt.figure(figsize=(20, 15))
sns.heatmap(df.corr(), annot=True)
plt.show()
# ### Model Preparation
df.head()
# creating dummy variable furnishingstatus
furnishing_status_dummy = pd.get_dummies(data=df["furnishingstatus"], drop_first=True)
furnishing_status_dummy.head()
# adding new columns to main dataframe
df = pd.concat([df, furnishing_status_dummy], axis="columns")
df.head()
# removing furnishingstatus feature
df.drop("furnishingstatus", inplace=True, axis="columns")
# diving the dataset into train test
df_train, df_test = train_test_split(df, train_size=0.7, random_state=100)
# printing shape for train and test dataset
print(df_train.shape)
print(df_test.shape)
df_train.describe()
# scaling the variables
vars_to_scale = ["price", "area", "bedrooms", "bathrooms", "stories", "parking"]
scaler = MinMaxScaler()
df_train[vars_to_scale] = scaler.fit_transform(df_train[vars_to_scale])
# checking the dataframe after scaling
df_train.describe()
# making dependent and independent variables
y_train = df_train.pop("price")
X_train = df_train
# function to calculate vif
def calculate_vif(X_train):
vif = pd.DataFrame()
vif["Features"] = X_train.columns
vif["Vif"] = [
variance_inflation_factor(X_train.values, i) for i in range(X_train.shape[1])
]
vif = vif.sort_values(by="Vif", ascending=False)
return vif
# creating a function make model
def create_model(y_train, X_train):
X_train_rfe = sm.add_constant(X_train)
lr = sm.OLS(y_train, X_train_rfe).fit()
return (X_train_rfe, lr)
# creating 1st model
X_train_rfe_1, lr_1 = create_model(y_train, X_train)
lr_1.summary()
X_train_rfe_1.pop("const")
calculate_vif(X_train_rfe_1)
# ```
# Removing 'semi-furnished' due to high p value
# ```
X_train.drop("semi-furnished", inplace=True, axis="columns")
# model 2
X_train_rfe_2, lr_2 = create_model(y_train, X_train)
lr_2.summary()
X_train_rfe_2.pop("const")
calculate_vif(X_train_rfe_2)
# ```
# removing 'bedrooms' due high p value and vif
# ```
X_train.drop("bedrooms", inplace=True, axis="columns")
# model 3
X_train_rfe_3, lr_3 = create_model(y_train, X_train)
lr_3.summary()
X_train_rfe_3.pop("const")
calculate_vif(X_train_rfe_3)
# ```
# removing basement as the p value as greater than 0.005
# ```
X_train.drop("basement", inplace=True, axis="columns")
# model 4
X_train_rfe_4, lr_4 = create_model(y_train, X_train)
lr_4.summary()
X_train_rfe_4.pop("const")
calculate_vif(X_train_rfe_4)
# ```
# model 4 or lr_4 is the final mode
# ```
# ### Residual Analysis
X_train = sm.add_constant(X_train)
y_train_pred = lr_4.predict(X_train)
# checking normality of error terms
sns.histplot(x=(y_train - y_train_pred), kde=True)
plt.show()
# ```
# Error terms are normally distributed
# ```
# Homoscedasticity check (There should be no visible pattern in residual values.)
sns.scatterplot(x=y_train_pred, y=(y_train - y_train_pred))
plt.show()
# ```
# There is no visible pattern in residual values.
# ```
# ### Prediction on data
# scaling the test dataset
df_test[vars_to_scale] = scaler.transform(df_test[vars_to_scale])
# creating a list of final variables
final_vars = list(lr_4.params.index)
final_vars.remove("const")
final_vars.append("price")
final_vars
# selecting only the final variables from the test dataset
df_test = df_test[final_vars]
df_test.head()
df_test.describe()
# creating dependent and independent variables
y_test = df_test.pop("price")
X_test = df_test
# predicting the values using lr_4 model
X_test = sm.add_constant(X_test)
y_test_pred = lr_4.predict(X_test)
# checking the regression line
sns.regplot(x=(y_test), y=(y_test - y_test_pred), fit_reg=True)
plt.show()
# checking the r2 score for training and testing
print("Training r2 score: ", r2_score(y_true=y_train, y_pred=y_train_pred))
print("Testing r2 score: ", r2_score(y_true=y_test, y_pred=y_test_pred))
# checking the mean squared error for training and testing
print(
"Training mean squared error: ",
mean_squared_error(y_true=y_train, y_pred=y_train_pred),
)
print(
"Testing mean squared error: ",
mean_squared_error(y_true=y_test, y_pred=y_test_pred),
)
# final params with coeff
lr_4.params
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing Library
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv("/kaggle/input/credit-card-customers/BankChurners.csv")
df.head()
df.head()
# # Dropping first and last two column
df.drop(
[
"Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1",
"Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2",
"CLIENTNUM",
],
axis=1,
inplace=True,
)
df.head()
# # Checking for null values
df.isnull().sum()
# # Mapping Attrition_Flag
df["Attrition_Flag"] = df["Attrition_Flag"].map(
{"Existing Customer": 1, "Attrited Customer": 0}
)
# # Correlation
sns.heatmap(df.corr())
# # Removing Unwanted columns
df = df.drop(columns="Avg_Open_To_Buy")
df = df.drop(columns="Credit_Limit")
# # Balance / Imbalance
df["Attrition_Flag"].value_counts()
# # One Hot Encoding
df = pd.get_dummies(df, drop_first=True)
# # Dependent and Independent Variable
x = df.drop(columns="Attrition_Flag")
y = df["Attrition_Flag"]
# # Test set and Train set
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=10)
rf.fit(x_train, y_train)
y_pred = rf.predict(x_test)
y_pred
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score
accuracy_score(y_pred, y_test)
confusion_matrix(y_pred, y_test)
precision_score(y_pred, y_test)
sns.distplot(y_test - y_pred)
|
# 1. Open the webpage of the book “The Elements of Statistical Learning”, go to the “Data”
# section and download the info and data files for the dataset called Prostate
# - Hint: https://web.stanford.edu/~hastie/ElemStatLearn/
# 2. Open the file prostate.info.txt
# - How many predictors are present in the dataset?
#
# There are eight predictors.
#
# - What are their names?
# - lcavol
# -lweight
# - age
# - lbph
# - svi
# - lcp
# - gleason
# - pgg45
#
# - How many responses are present in the dataset?
#
# There is just one response.
# - What are their names?
#
# Its name is lpsa.
# - How did the authors split the dataset in training and test set?
#
# The authors randomly split the dataset into a training set of size 67 and a test set of size 30.
# - Hint: please, refer also to Section 3.2.1 (page 49) of the book “The Elements of Statistical Learning” to gather this information
# 3. Open the file prostate.data by a text editor or a spreadsheet and have a quick look at the data
# - How many observations are present?
#
# There are 97 obeservations.
# - Which is the symbol used to separate the columns?
#
# A dash is used to separate the columns.
# 4. Open Kaggle, generate a new notebook and give it the name “SL_EX2_ProstateCancer_Surname”
# 5. Add the dataset prostate.data to the kernel
# - Hint: See the Add Dataset button on the right
# - Hint: use import option “Convert tabular files to csv”
# 6. Run the first cell of the notebook to check if the data file is present in folder /kaggle/input
# 7. Add to the first cell new lines to load the following libraries: seaborn, matplotlib.pyplot,
# sklearn.linear_model.LinearRegression
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn
import matplotlib.pyplot
from sklearn.linear_model import LinearRegression
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# 8. Add a Markdown cell on top of the notebook, copy and paste in it the text of this exercise
# and provide in the same cell the answers to the questions that you get step-by-step.
# 9. Load the Prostate Cancer dataset into a Pandas DataFrame variable called data
data = pd.read_csv("/kaggle/input/prostate/prostate.csv", sep="\t") # \s+
# • How can you say Python to use the right separator between columns?
# Through sep='\t'
# 10. Display the number of rows and columns of variable data
print("The number of rows is:", data.shape[0])
print("The number of columns is:", data.shape[1])
# 11. Show the first 5 rows of the dataset
data.head()
# 12. Remove the first column of the dataset which contains observation indices
#
data = data.drop(["Unnamed: 0"], axis=1)
# 13. Save column train in a new variable called train and having type Series (the Pandas data structure used to represent DataFrame columns), then drop the column train from the data DataFrame
train = data["train"].copy()
train1 = data[["train"]].copy()
data = data.drop(["train"], axis=1)
# 14. Save column lpsa in a new variable called lpsa and having type Series (the Pandas data structure used to represent DataFrame columns), then drop the column lpsa from the data DataFrame and save the result in a new DataFrame called predictors
lpsa = data["lpsa"].copy()
predictors = data.drop(["lpsa"], axis=1)
# • How many predictors are available?
print(predictors.shape[1], " predictors are available.")
# 15. Check the presence of missing values in the data variable
data.info()
# - How many missing values are there? In which columns?
#
# There are no missing values
#
# - Which types do the variable have?
# 16. Show histograms of all variables in a single figure
# • Use argument figsize to enlarge the figure if needed
data.hist(figsize=(10, 10))
# 17. Show the basic statistics (min, max, mean, quartiles, etc. for each variable) in data
data.describe()
# 18. Generate a new DataFrame called dataTrain and containing only the rows of data in which the train variable has value “T”
# • Hint: use the loc attribute of DataFrame to access a groups of rows and columns by label(s) or boolean arrays
train.head()
train1["train"] = train1["train"].map({"T": 1, "F": 0}).astype(int)
dataTrain = data.loc[train1["train"] == 1]
# • How many rows and columns does dataTrain have?
print("The number of rows is:", dataTrain.shape[0])
print("The number of columns is:", dataTrain.shape[1])
# 19. Generate a new DataFrame called dataTest and containing only the rows of data in which the train variable has value “F”
dataTest = data.loc[train1["train"] == 0]
# • How many rows and columns does dataTest have?
print("The number of rows is:", dataTest.shape[0])
print("The number of columns is:", dataTest.shape[1])
# 20. Generate a new Series called lpsaTrain and containing only the values of variable lpsa in which the train variable has value “T”
lpsaTrain = data["lpsa"].loc[train1["train"] == 1].copy()
# • How many valuses does lpsaTrain have?
print(len(lpsaTrain))
# 21. Generate a new Series called lpsaTest and containing only the values of variable lpsa in which the train variable has value “F”
lpsaTest = data["lpsa"].loc[train1["train"] == 0].copy()
# • How many valuses does lpsaTest have?
print(len(lpsaTest))
# 22. Show the correlation matrix among all the variables in dataTrain
# • Hint: use the correct method in DataFrame
# • Hint: check if the values in the matrix correspond to those in Table 3.1 of the book
correlation_matrix = dataTrain.corr()
correlation_matrix
# 23. Drop the column lpsa from the dataTrain DataFrame and save the result in a new DataFrame called predictorsTrain
predictorsTrain = dataTrain.drop(["lpsa"], axis=1)
# 24. Drop the column lpsa from the dataTest DataFrame and save the result in a new DataFrame called predictorsTest
predictorsTest = dataTest.drop(["lpsa"], axis=1)
# 25. Generate a new DataFrame called predictorsTrain_std and containing the standardized variables of DataFrame predictorsTrain
# - Hint: compute the mean of each column and save them in variable predictorsTrainMeans
#
# - Hint: compute the standard deviation of each column and save them in variable predictorsTrainSt
#
# - Hint: compute the standardization of each variable by the formula (predictorsTrainpredictorsTrainMeans)/predictorsTrainStd
predictorsTrain_std = (predictorsTrain - predictorsTrain.mean()) / predictorsTrain.std()
"""with OLS there is no need to standardize the model (without we could stress better the fact that a variable is
more important in terms of correlation),
if we use ridge, lasso we must use it because it would weight too much if the variables are too far away
from each other
"""
predictorsTrain_std.head()
# 26. Show the histogram of each variables of predictorsTrain_std in a single figure
# - Use argument figsize to enlarge the figure if needed
# - Hint: which kind of difference can you see in the histograms?
predictorsTrain_std.hist(figsize=(10, 10))
# 27. Generate a linear regression model using predictorsTrain_std as dependent variables and lpsaTrain as independent variable
# - Hint: find a function for linear regression model learning in sklearn (fit)
X = predictorsTrain_std
Y = lpsaTrain
reg = LinearRegression().fit(X, Y) # create the object and create it through fit
#
# - How do you set parameter fit_intercept? Why?
reg_intercept = LinearRegression(fit_intercept=False)
reg_intercept.fit(predictorsTrain_std, lpsaTrain)
# We set it to zero
# To set parameter fit_intercept
# Default fit_intercept = True, the y-intercept is determined by the line of best fit
# fit_intercept = False, sets the y-intercept to 0
#
# - How do you set parameter normalize? Why? Can this parameter be used to simplify the generation of the predictor matrix?
reg_normalize = LinearRegression(normalize=True)
reg_normalize.fit(predictorsTrain_std, lpsaTrain)
# To set parameter normalize
# Default normalize = False
# If True, the regressors X will be normalized before regression.
# This parameter is ignored when fit_intercept is set to False.
# reg_normalize = LinearRegression(normalize = True)
# reg_normalize.fit(predictorsTrain, lpsaTrain)
# 28. Show the parameters of the linear regression model computed above. Compare the
# parameters with those shown in Table 3.2 of the book (page 50)
reg.coef_
# >|lcavol|lweight|age|lpbh|svi|lcp|gleason|pgg45
# |:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
# |0.68|0.26|-0.14|0.21|0.31|-0.29|-0.02|0.27|
# The first two coefficients are a little bit smaller than the ones computed by the book.
# 29. Compute the coefficient of determination of the prediction
reg.score(X, Y)
# 30. Compute the standard errors, the Z scores (Student’s t statistics) and the related p-values
# • Hint: use library statsmodels instead of sklearn
# • Hint: compare the results with those in Table 3.2 of the book (page 50)
import statsmodels.api as sm
Xc = sm.add_constant(
predictorsTrain
) # to add the columns of ones to the matrix on the right in order to get the intercept
model = sm.OLS(lpsaTrain, Xc).fit()
print(model.summary())
# Exercise 3: Analysis of Prostate Cancer dataset – variable subset selection
# Please, execute the following tasks and provide answers to the proposed questions.
# 1. Open your kernel SL_EX2_ProstateCancer_Surname in Kaggle
# 2. Generate a copy called SL_EX3_SubsetSelection_Surname by the Fork button
# 3. Starting from the ols models achieved in the last steps, perform best-subset selection.
# - Generate one model (using only the 67 samples in the training set) for each combination of the 8 variables available
# - For each model compute the RSS on training and test set, the number of variables and the $R^2$ of the model
# - Save these numbers in suitable data structures
# - Suggestion: use itertools.combinations (https://docs.python.org/3/library/itertools.html#itertools.combinations) to generate all variable combinations
# - Suggestion: consider using the statsmodels library(https://www.statsmodels.org/stable/index.html) to generate the models. It provides direct access to evaluation measures as Sum of Squared Residuals (SSR), R^2, AIC, BIC
from sklearn.metrics import mean_squared_error
from itertools import combinations
def fit_linear_reg(X, Y):
# Fit linear regression model and return RSS and R squared values
model_k = LinearRegression(fit_intercept=True).fit(X, Y)
RSS = mean_squared_error(Y, model_k.predict(X)) * len(Y)
R_squared = model_k.score(X, Y)
return RSS, R_squared
from tqdm import tnrange, tqdm_notebook
X = predictorsTrain
Y = lpsaTrain
X1 = predictorsTest
Y1 = lpsaTest
k = 8
a = ((lpsaTrain - lpsaTrain.mean()) ** 2).sum()
b = ((lpsaTest - lpsaTest.mean()) ** 2).sum()
RSS_list, RSS_lista, R_squared_list, feature_list = (
[a],
[b],
[0],
["no variables"],
) # chiedi a fabio che ce lo spiega lui
num_variables = [0]
# Looping over k = 1 to k = 8 features in X
for k in tnrange(1, len(X.columns) + 1):
# Looping over all possible combinations: from 8 choose k
for combo in combinations(X.columns, k):
tmp_result = fit_linear_reg(X[list(combo)], Y) # Store temp result
RSS_list.append(tmp_result[0]) # Append lists
R_squared_list.append(tmp_result[1])
feature_list.append(combo)
num_variables.append(len(combo))
for combo in combinations(X1.columns, k):
tmp_result = fit_linear_reg(X1[list(combo)], Y1) # Store temp result
RSS_lista.append(tmp_result[0]) # Append lists
# Store in DataFrame
df = pd.DataFrame(
{
"num_variables": num_variables,
"Rss_train": RSS_list,
"Rss_test": RSS_lista,
"R_squared": R_squared_list,
"variables": feature_list,
}
)
df.head()
"""
import scipy.special as sp
import numpy as np
import itertools
varIndices=range(0,8)
nTotVars=len(varIndices)
nTotModels=0
for k in range(0, nTotVars+1):
print(k)
nTotmodels=nTotModels+sp.binom(nTotVars, k)
nTotModels = int(nTotModels) #we turn it into an integer
models_NVars = np.zeros(nTotModels)
models_AICs=np.zeros(nTotModels)
models_BICs=np.zeros(nTotModels)
models_R2=np.zeros(nTotModels)
models_SSR=np.zeros(nTotModels)
models_RMSE=np.zeros(nTotModels)
models_parIs=[None]+(nTotModels)
i=0
for k in range(0, nTotVariables+1):
selVars = =np.zeros(k+1)
for subset in itertools.compbinations(varIndices, k):
selVars[list(range(1,k+1))]=np.asarray(subset)+1
selVars=selVars.astype(int)
X2_sel=X2.iloc[:,selVars]
"""
# 4. Generate a chart having the subset size in the x-axis and the RSS for the training set of all models generated at step 3 in the y-axis
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 8))
plt.title("RSS")
rss0 = df["Rss_train"].loc[df["num_variables"] == 0].copy()
plt.plot(0, rss0, "ro", label="0")
rss1 = df["Rss_train"].loc[df["num_variables"] == 1].copy()
ax1 = []
for i in range(len(rss1 + 1)):
ax1.append(1)
plt.plot(ax1, rss1, "ro", label="1")
rss2 = df["Rss_train"].loc[df["num_variables"] == 2].copy()
ax2 = []
for i in range(len(rss2 + 1)):
ax2.append(2)
plt.plot(ax2, rss2, "ro", label="2")
rss3 = df["Rss_train"].loc[df["num_variables"] == 3].copy()
ax3 = []
for i in range(len(rss3 + 1)):
ax3.append(3)
plt.plot(ax3, rss3, "ro", label="3")
rss4 = df["Rss_train"].loc[df["num_variables"] == 4].copy()
ax4 = []
for i in range(len(rss4 + 1)):
ax4.append(4)
plt.plot(ax4, rss4, "ro", label="4")
rss5 = df["Rss_train"].loc[df["num_variables"] == 5].copy()
ax5 = []
for i in range(len(rss5 + 1)):
ax5.append(5)
plt.plot(ax5, rss5, "ro", label="5")
rss6 = df["Rss_train"].loc[df["num_variables"] == 6].copy()
ax6 = []
for i in range(len(rss6 + 1)):
ax6.append(6)
plt.plot(ax6, rss6, "ro", label="6")
rss7 = df["Rss_train"].loc[df["num_variables"] == 7].copy()
ax7 = []
for i in range(len(rss7 + 1)):
ax7.append(7)
plt.plot(ax7, rss7, "ro", label="7")
rss8 = df["Rss_train"].loc[df["num_variables"] == 8].copy()
ax8 = []
for i in range(len(rss8 + 1)):
ax8.append(8)
plt.plot(ax8, rss8, "ro", label="8")
plt.xlabel("Flexibility")
plt.ylabel("RSS")
plt.show()
# 5. Generate a chart having the subset size in the x-axis and the $R^2$ of all models generated at step 3 in the y-axis
plt.figure(figsize=(10, 8))
plt.title("R_squared")
r0 = df["R_squared"].loc[df["num_variables"] == 0].copy()
plt.plot(0, r0, "ro", label="0")
r1 = df["R_squared"].loc[df["num_variables"] == 1].copy()
plt.plot(ax1, r1, "ro", label="1")
r2 = df["R_squared"].loc[df["num_variables"] == 2].copy()
plt.plot(ax2, r2, "ro", label="2")
r3 = df["R_squared"].loc[df["num_variables"] == 3].copy()
plt.plot(ax3, r3, "ro", label="3")
r4 = df["R_squared"].loc[df["num_variables"] == 4].copy()
plt.plot(ax4, r4, "ro", label="4")
r5 = df["R_squared"].loc[df["num_variables"] == 5].copy()
plt.plot(ax5, r5, "ro", label="5")
r6 = df["R_squared"].loc[df["num_variables"] == 6].copy()
plt.plot(ax6, r6, "ro", label="6")
r7 = df["R_squared"].loc[df["num_variables"] == 7].copy()
plt.plot(ax7, r7, "ro", label="7")
r8 = df["R_squared"].loc[df["num_variables"] == 8].copy()
plt.plot(ax8, r8, "ro", label="8")
plt.xlabel("Flexibility")
plt.ylabel("R_squared")
plt.show()
# 6. Generate a chart having the subset size in the x-axis and the RSS for the test set of all models generated at step 3 in the y-axis (other performance measures can be used, e.g., AIC or BIC)
plt.figure(figsize=(10, 8))
plt.title("Rss_test")
rsst0 = df["Rss_test"].loc[df["num_variables"] == 0].copy()
plt.plot(0, rsst0, "ro", label="0")
rsst1 = df["Rss_test"].loc[df["num_variables"] == 1].copy()
plt.plot(ax1, rsst1, "ro", label="1")
rsst2 = df["Rss_test"].loc[df["num_variables"] == 2].copy()
plt.plot(ax2, rsst2, "ro", label="2")
rsst3 = df["R_squared"].loc[df["num_variables"] == 3].copy()
plt.plot(ax3, rsst3, "ro", label="3")
rsst4 = df["Rss_test"].loc[df["num_variables"] == 4].copy()
plt.plot(ax4, rsst4, "ro", label="4")
rsst5 = df["Rss_test"].loc[df["num_variables"] == 5].copy()
plt.plot(ax5, rsst5, "ro", label="5")
rsst6 = df["Rss_test"].loc[df["num_variables"] == 6].copy()
plt.plot(ax6, rsst6, "ro", label="6")
rsst7 = df["Rss_test"].loc[df["num_variables"] == 7].copy()
plt.plot(ax7, rsst7, "ro", label="7")
rsst8 = df["R_squared"].loc[df["num_variables"] == 8].copy()
plt.plot(ax8, rsst8, "ro", label="8")
plt.xlabel("Flexibility")
plt.ylabel("Rss_test")
plt.show()
# 7. Perform forward selection
# - Start from the empty model
# - Add at each step the variable that minimizes the RSS on the training set (other performance measures can be used, e.g., what happens if you use the RSS on the test set?)
# df['variables'].loc[df['Rss_train'] == rss1.min()]
remaining_features = list(X.columns.values)
features = []
RSS_list, R_squared_list = [np.inf], [np.inf] # Due to 1 indexing of the loop...
features_list = dict()
for i in range(1, k + 1):
best_RSS = np.inf
for combo in combinations(remaining_features, 1):
RSS = fit_linear_reg(X[list(combo) + features], Y) # Store temp result
if RSS[0] < best_RSS:
best_RSS = RSS[0]
best_R_squared = RSS[1]
best_feature = combo[0]
# Updating variables for next loop
features.append(best_feature)
remaining_features.remove(best_feature)
# Saving values for plotting
RSS_list.append(best_RSS)
R_squared_list.append(best_R_squared)
features_list[i] = features.copy()
print("Forward stepwise subset selection")
print("Number of features |", "Features |", "RSS")
display([(i, features_list[i], round(RSS_list[i])) for i in range(1, 9)])
|
# ## Model : Attention-Unet
# ***
# Dataset: Echonet and Echonet-Pediatric
# ### MultiResUnet
# - [MultiResUNet: Rethinking the U-Net architecture for multimodal biomedical image segmentation](https://arxiv.org/pdf/1902.04049.pdf)
# - [MultiResnet Blog](https://sh-tsang.medium.com/review-multiresunet-rethinking-u-net-biomedical-image-segmentation-4f0f21a4ed84)
#
# References:
# - Original Code : pytorch - [nibtehaz](https://github.com/nibtehaz/MultiResUNet/blob/master/pytorch/MultiResUNet.py)
# - Other Code : pytorch - [nikhilroxtomar](https://github.com/nikhilroxtomar/Semantic-Segmentation-Architecture/blob/main/PyTorch/multiresunet.py)
# ### MultiResUnet Architecture
# 
# 
# ### Residual Path
# - This reduces the semantic gap uisng the 3x3 and 1x1 filters and concatenate them
# - Number of repition is based on the number of layers in architecture
# 
# ### Residual Block
# 
# ### Libraries
import torch
import torchvision
import torchmetrics
import torch.nn as nn
from torchvision.transforms import transforms
import torch.nn.functional as F
from torchmetrics.classification import (
Accuracy,
Precision,
Recall,
F1Score,
JaccardIndex,
Dice,
)
from torchmetrics import MetricCollection
import torch.optim as optim
import tqdm
from tqdm import tqdm
import cv2
import os
import shutil
import argparse
import time
import random
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ### Model Architecture
import torch
import torch.nn as nn
import torchvision.transforms.functional as TF
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
super(DoubleConv, self).__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(
in_channels, out_channels, kernel_size, stride, padding, bias=False
),
nn.BatchNorm2d(out_channels),
nn.ReLU(True),
nn.Conv2d(
out_channels, out_channels, kernel_size, stride, padding, bias=False
),
nn.BatchNorm2d(out_channels),
nn.ReLU(True),
)
def forward(self, x):
return self.double_conv(x)
class DownSample(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
super(DownSample, self).__init__()
self.double_conv = DoubleConv(
in_channels, out_channels, kernel_size=3, stride=1, padding=1
)
self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = self.double_conv(x)
return x, self.max_pool(x)
class GatingSignal(nn.Module):
def __init__(self, in_channels, out_channels):
super(GatingSignal, self).__init__()
self.gating_signal = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True),
)
def forward(self, x):
return self.gating_signal(x)
class AttentionGate(nn.Module):
def __init__(
self, skip_channels, gating_channels, inter_channels=None
): # gating channels > skip channels, gating size < skip size
super(AttentionGate, self).__init__()
if not inter_channels:
inter_channels = skip_channels // 2
self.W_g = nn.Sequential(
nn.Conv2d(gating_channels, inter_channels, kernel_size=1, bias=True),
nn.BatchNorm2d(inter_channels),
)
self.W_x = nn.Sequential(
nn.Conv2d(
skip_channels, inter_channels, kernel_size=2, stride=2, bias=False
),
nn.BatchNorm2d(inter_channels),
)
self.sigma_1 = nn.ReLU(True)
# getting the attention mask
self.psi = nn.Sequential(
nn.Conv2d(inter_channels, 1, kernel_size=1, bias=True), nn.BatchNorm2d(1)
)
self.sigma_2 = nn.Sigmoid()
self.resampler = nn.Upsample(scale_factor=2, mode="bilinear")
def forward(self, x, g):
g_prime = self.W_g(g)
x_prime = self.W_x(x)
g_prime = TF.resize(g_prime, size=x_prime.shape[2:])
attention_mask = self.sigma_2(self.psi(self.sigma_1(g_prime + x_prime)))
resampler_out = self.resampler(attention_mask)
x = TF.resize(x, size=resampler_out.shape[2:])
return resampler_out * x
class UpSample(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
mode="nearest",
):
super(UpSample, self).__init__()
if mode == "transpose":
self.upsample = nn.ConvTranspose2d(
in_channels, in_channels // 2, kernel_size=2, stride=2
)
elif mode == "nearest":
self.upsample = nn.Sequential(
nn.Upsample(scale_factor=2, mode="nearest"),
nn.Conv2d(
in_channels,
in_channels // 2,
kernel_size=3,
stride=1,
padding=1,
bias=True,
),
nn.BatchNorm2d(out_channels),
nn.ReLU(True),
)
self.double_conv = DoubleConv(
in_channels, out_channels, kernel_size=3, stride=1, padding=1
)
def forward(self, x, skip):
x = self.upsample(x)
if x.shape != skip.shape:
x = TF.resize(x, size=skip.shape[2:])
x = torch.cat([skip, x], dim=1)
return self.double_conv(x)
class AttentionUNet(nn.Module):
def __init__(
self,
in_channels=3,
out_channels=1,
channels=[64, 128, 256, 512],
get_attention_maps=False,
):
super(AttentionUNet, self).__init__()
self.get_attention_maps = get_attention_maps
# Down sample blocks
down_blocks = []
in_chan = in_channels
for out_chan in channels:
down_blocks.append(DownSample(in_chan, out_chan))
in_chan = out_chan
self.downs = nn.Sequential(*down_blocks)
# Bottleneck block
self.bottleneck = DoubleConv(channels[-1], channels[-1] * 2)
# Up sample blocks
up_blocks = []
gating_signal_blocks = []
attention_blocks = []
in_chan = channels[-1] * 2
for out_chan in reversed(channels):
gating_signal_blocks.append(GatingSignal(in_chan, out_chan))
attention_blocks.append(AttentionGate(out_chan, out_chan))
up_blocks.append(UpSample(in_chan, out_chan))
in_chan = out_chan
self.ups = nn.Sequential(*up_blocks)
self.gating_signals = nn.Sequential(*gating_signal_blocks)
self.attentions = nn.Sequential(*attention_blocks)
# Final conv layer
self.final = nn.Conv2d(channels[0], out_channels, kernel_size=1)
def forward(self, x):
skip = []
attention_maps = []
for down in self.downs:
s, x = down(x)
skip.append(s)
x = self.bottleneck(x)
skip = skip[::-1]
for i in range(len(self.ups)):
gating_signal = self.gating_signals[i](x)
attention_out = self.attentions[i](skip[i], gating_signal)
x = self.ups[i](x, attention_out)
attention_maps.append(attention_out)
x = self.final(x)
if self.get_attention_maps:
return x, attention_maps
return x
def test():
x = torch.randn((1, 3, 112, 112))
model = AttentionUNet(in_channels=3, out_channels=1)
pred = model(x)
print(pred.shape)
# assert pred.shape == x.shape
if __name__ == "__main__":
test()
# ### Essential Functions
# #### Preprocessing functions
preprocessing = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
"""
set of transformations on frame, mask
"""
def rotate(frame, mask):
angle = random.uniform(10.5, 50.5)
frame = torchvision.transforms.functional.rotate(
torch.from_numpy(frame).permute(2, 0, 1), angle
)
mask = torchvision.transforms.functional.rotate(
torch.from_numpy(mask).unsqueeze(0), angle
)
return frame.permute(1, 2, 0).numpy(), mask.squeeze(0).numpy()
def noise(frame):
noise = np.random.randint(-40, 40, size=frame.shape)
frame = np.clip(frame + noise, 0, 255)
return frame
def crop_in(frame, mask):
h, w, _ = frame.shape
a = np.random.randint(10, 20)
b = np.random.randint(10, 20)
frame = torchvision.transforms.functional.crop(
torch.from_numpy(frame).permute(2, 0, 1), a, b, h, w
)
mask = torchvision.transforms.functional.crop(
torch.from_numpy(mask).unsqueeze(0), a, b, h, w
)
return frame.permute(1, 2, 0).numpy(), mask.squeeze(0).numpy()
def flip(frame, mask):
frame = cv2.flip(frame, 1)
mask = cv2.flip(mask, 1)
return frame, mask
def transformation(frame, mask):
random = np.random.randint(0, 5)
if random in range(2, 4):
noised = noise(frame)
cframe, cmask = crop_in(noised, mask)
return rotate(cframe, cmask)
if random == 5:
return flip(frame, mask)
return frame, mask
# #### Training functions
class MyDiceLoss(nn.Module):
def __init__(self, weight=None, size_average=True):
super(MyDiceLoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
# comment out if your model contains a sigmoid or equivalent activation layer
inputs = torch.sigmoid(inputs)
# flatten label and prediction tensors
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
dice = (2.0 * intersection + smooth) / (inputs.sum() + targets.sum() + smooth)
return 1 - dice
def extract_item(d):
new_d = copy.deepcopy(d)
for k, v in d.items():
if type(v) == torch.Tensor:
new_d[k] = v.item()
return new_d
def update(epoch, loss, metric):
temp = {"epoch": epoch}
loss1 = {"diceloss": loss}
temp.update(loss1.items())
metric = extract_item(metric)
temp.update(metric.items())
return temp
def update_test(loss, metric):
temp = {"diceloss": loss}
metric = extract_item(metric)
temp.update(metric.items())
return temp
def clearit(temp):
for k in temp.keys():
temp[k].clear()
return temp
def load_checkpoint(path, device_type):
return torch.load(path, map_location=device_type)
def save_checkpoint(model, optimizer, epoch, combined_loss, save_path):
checkpoint = {
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"epoch": epoch,
"combined_loss": combined_loss,
}
return torch.save(checkpoint, save_path + f"checkpoint_{epoch}.pt")
def save_model(model, optimizer, epoch, combined_loss, save_path):
final = {
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"epoch": epoch,
"combined_loss": combined_loss,
}
return torch.save(final, save_path + "model.pth")
# ### Data Preparation
dataset_path = "/kaggle/input/echonet-frames-masks-dataset/Echonet-Frames-Masks-Dataset"
class EchonetDataset(torch.utils.data.Dataset):
def __init__(self, frame_path, mask_path, transform=None):
self.frame_path = [
os.path.join(frame_path, frames_id)
for frames_id in sorted(os.listdir(frame_path))
]
self.mask_path = [
os.path.join(mask_path, mask_id)
for mask_id in sorted(os.listdir(mask_path))
]
self.preprocessing = preprocessing
self.transformation = transformation
self.transform = transform
def __getitem__(self, index):
# get items
frame = cv2.imread(self.frame_path[index])
mask = cv2.imread(self.mask_path[index], -1)
if self.transform:
frame, mask = self.transformation(frame, mask)
frame = self.preprocessing(frame).float()
mask = self.preprocessing(mask).float()
return frame, mask
def __len__(self):
return len(self.frame_path)
TrainDataset = EchonetDataset(
dataset_path + "/train/frames", dataset_path + "/train/mask", transform=False
)
ValidDataset = EchonetDataset(
dataset_path + "/valid/frames", dataset_path + "/valid/mask", transform=False
)
# dataloaders -> Train and Valid
TrainDataLoader = torch.utils.data.DataLoader(TrainDataset, batch_size=32, shuffle=True)
ValidDataLoader = torch.utils.data.DataLoader(ValidDataset, batch_size=32, shuffle=True)
frame, mask = next(iter(TrainDataset))
plt.imshow(frame.permute(1, 2, 0))
plt.imshow(mask.permute(1, 2, 0), cmap="gray", alpha=0.5)
# ### Model and Parameters
model = AttentionUNet()
model = model.to("cuda:0")
ck_path = "/kaggle/working/model/"
epochs = 50
resume = False
device = True
device_type = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
criterion = torch.nn.BCEWithLogitsLoss()
optmizier = torch.optim.Adam(model.parameters(), lr=1e-3)
# ### Training of Model
os.mkdir("/kaggle/working/model")
def Trainer(
model,
train_dataloader,
valid_dataloader,
optimizer,
criterion,
epochs,
resume=False,
ck_path="",
device=True,
device_type="cuda:0",
):
start_epoch = 0
save_path = "/kaggle/working/model/"
training = pd.DataFrame(
columns=["epoch", "bcelogitloss", "dicecoeff", "acc", "prec", "rec", "f1", "ji"]
)
validation = pd.DataFrame(
columns=["epoch", "bcelogitloss", "dicecoeff", "acc", "prec", "rec", "f1", "ji"]
)
train_loss = 0.0
valid_loss = 0.0
train_avg_loss = 0.0
valid_avg_loss = 0.0
tmetric = MetricCollection(
{
"dicecoeff": Dice(),
"acc": Accuracy(task="binary"),
"prec": Precision(task="binary"),
"rec": Recall(task="binary"),
"f1": F1Score(task="binary"),
"ji": JaccardIndex(task="binary"),
}
)
vmetric = MetricCollection(
{
"dicecoeff": Dice(),
"acc": Accuracy(task="binary"),
"prec": Precision(task="binary"),
"rec": Recall(task="binary"),
"f1": F1Score(task="binary"),
"ji": JaccardIndex(task="binary"),
}
)
tmetric, vmetric = tmetric.to(device_type), vmetric.to(device_type)
if resume:
ckpt = load_checkpoint(ck_path)
model.load_state_dict(ckpt["model_state_dict"])
start_epoch = ckpt["epoch"]
optim.load_state_dict(ckpt["optimizer_state_dict"])
print("last checkpoint restored")
for epoch in range(start_epoch, epochs):
model.train()
train_loss = 0.0
valid_loss = 0.0
train_avg_loss = 0.0
valid_avg_loss = 0.0
local = tqdm(enumerate(train_dataloader), total=len(train_dataloader))
start_time = time.time()
for i, data in local:
frame, mask = data
if device:
frame, mask = frame.to(device_type), mask.to(device_type)
prepare_time = start_time - time.time()
optimizer.zero_grad()
prediction = model(frame)
tbce_logitloss = criterion(prediction, mask)
tbce_logitloss.backward()
optimizer.step()
train_loss += tbce_logitloss.item()
tmetric.update(prediction, mask.type(torch.int64))
process_time = start_time - time.time() - prepare_time
compute_efficiency = process_time / (process_time + prepare_time)
local.set_description(
f"Training: Compute efficiency: {compute_efficiency:.2f}, loss: {tbce_logitloss.item():.5f}, epoch: {epoch}/{epochs}"
)
start_time = time.time()
train_avg_loss = train_loss / len(train_dataloader)
tmetric_evaluation = tmetric.compute()
tdict = update(epoch, train_avg_loss, tmetric_evaluation)
training.loc[len(training)] = tdict
print(
f'Training: \n\
- epoch: {epoch} \n\
- BCElogitloss: {train_avg_loss:.4f} \n\
- dicecoeff: {tdict["dicecoeff"]:.3f} \n\
- Accuracy:{tdict["acc"]:.3f} \n\
- Precision:{tdict["prec"]:.3f} \n\
- Recall:{tdict["rec"]:.3f} \n\
- F1Score:{tdict["f1"]:.3f} \n\
- JaccardIndex:{tdict["ji"]:.3f}'
)
model.eval()
with torch.no_grad():
valid_local = tqdm(enumerate(valid_dataloader), total=len(valid_dataloader))
for i, valid_data in valid_local:
vframe, vmask = valid_data
if device:
vframe, vmask = vframe.to(device_type), vmask.to(device_type)
vprediction = model(vframe)
tbce_logitloss = criterion(vprediction, vmask)
valid_loss += tbce_logitloss.item()
vmetric.update(vprediction, vmask.type(torch.int64))
process_time = start_time - time.time() - prepare_time
compute_efficiency = process_time / (process_time + prepare_time)
valid_local.set_description(
f"Training: Compute efficiency: {compute_efficiency:.2f}, loss: {tbce_logitloss.item():.5f}, epoch: {epoch}/{epochs}"
)
start_time = time.time()
valid_avg_loss = valid_loss / len(valid_dataloader)
vmetric_evaluation = vmetric.compute()
vdict = update(epoch, valid_avg_loss, vmetric_evaluation)
validation.loc[len(validation)] = vdict
print(
f'Validation: \n\
- epoch: {epoch} \n\
- BCElogitloss: {valid_avg_loss:.4f} \n\
- dicecoeff: {vdict["dicecoeff"]:.3f} \n\
- Accuracy:{vdict["acc"]:.3f} \n\
- Precision:{vdict["prec"]:.3f} \n\
- Recall:{vdict["rec"]:.3f} \n\
- F1Score:{vdict["f1"]:.3f} \n\
- JaccardIndex:{vdict["ji"]:.3f}'
)
print(
f"---------------------------------Epoch {epoch} Completed----------------------------------------------------------------"
)
if (epoch + 1) % 5 == 0:
save_checkpoint(model, optimizer, epoch + 1, valid_avg_loss, save_path)
save_model(model, optimizer, epochs, valid_avg_loss, save_path)
return model, training, validation
model, training_csv, validation_csv = Trainer(
model,
TrainDataLoader,
ValidDataLoader,
optmizier,
criterion,
epochs=epochs,
resume=False,
ck_path=ck_path,
device=device,
device_type=device_type,
)
TestDataset = EchonetDataset(
dataset_path + "/test/frames", dataset_path + "/test/mask", transform=False
)
TestDataLoader = torch.utils.data.DataLoader(TrainDataset, batch_size=32, shuffle=True)
test_model = AttentionUNet()
test_model.load_state_dict(
torch.load("/kaggle/working/model/model.pth", map_location="cuda:0")[
"model_state_dict"
]
)
test_model = test_model.to("cuda:0")
def Tester(model, test_dataloader, criterion, device=True, device_type="cuda:0"):
testing = pd.DataFrame(
columns=["bcelogitloss", "dicecoeff", "acc", "prec", "rec", "f1", "ji"]
)
tloss = 0.0
tmetric = MetricCollection(
{
"dicecoeff": Dice(),
"acc": Accuracy(task="binary"),
"prec": Precision(task="binary"),
"rec": Recall(task="binary"),
"f1": F1Score(task="binary"),
"ji": JaccardIndex(task="binary"),
}
)
tmetric = tmetric.to(device_type)
model.eval()
with torch.no_grad():
test_local = tqdm(enumerate(test_dataloader), total=len(test_dataloader))
for i, data in test_local:
frame, mask = data
if device:
frame, mask = frame.to(device_type), mask.to(device_type)
prediction = model(frame)
tloss_bce = criterion(prediction, mask)
tloss += tloss_bce.item()
tmetric.update(prediction, mask.type(torch.int64))
test_avg_loss = tloss / len(test_dataloader)
print(tmetric)
tmetric_evaluation = tmetric.compute()
tdict = update_test(tloss, tmetric_evaluation)
testing.loc[len(testing)] = tdict
print(
f'Testing: \n\
- BCElogitloss: {test_avg_loss:.4f} \n\
- dicecoeff: {tdict["dicecoeff"]:.3f} \n\
- Accuracy:{tdict["acc"]:.3f} \n\
- Precision:{tdict["prec"]:.3f} \n\
- Recall:{tdict["rec"]:.3f} \n\
- F1Score:{tdict["f1"]:.3f} \n\
- JaccardIndex:{tdict["ji"]:.3f}'
)
return testing
testing_csv = Tester(test_model, TestDataLoader, criterion, device, device_type)
testing_csv.to_csv(
"/kaggle/working/attentionunet_withBCELogitloss_testing.csv", index=False
)
output = test_model(fr)
fr.shape[0]
fig, axis = plt.subplots(fr.shape[0], 3, figsize=(40, 20), constrained_layout=True)
c = 0
for i in range(fr.shape[0]):
for j in range(3):
axis[i][j].axis("off")
axis[i][j].imshow(fr[c].squeeze(0).permute(1, 2, 0).cpu().detach().numpy())
axis[i][j].imshow(mk[c].squeeze(0))
axis[i][j].imshow(
output[c].squeeze(0).squeeze(0).cpu().detach().numpy() > 0.9, cmap="gray"
)
c += 1
fr, mk = next(iter(TestDataLoader))
fr = fr.to("cuda")
plt.imshow(mk[1].squeeze(0))
plt.imshow(fr[0].squeeze(0).permute(1, 2, 0).cpu().detach().numpy())
plt.imshow(
test_model(fr)[0].squeeze(0).squeeze(0).cpu().detach().numpy() > 0.9,
alpha=0.5,
cmap="gray",
)
test_model = AttentionUNet()
test_model.load_state_dict(
torch.load("/kaggle/working/model/model.pth", map_location="cuda:0")[
"model_state_dict"
]
)
frame, mask = cv2.imread(
"/kaggle/input/echonet-frames-masks-dataset/Echonet-Frames-Masks-Dataset/test/frames/0X1005D03EED19C65B_24.png"
), cv2.imread(
"/kaggle/input/echonet-frames-masks-dataset/Echonet-Frames-Masks-Dataset/test/mask/0X1005D03EED19C65B_24.png"
)
output = test_model(preprocessing(frame).unsqueeze(0))
plt.imshow(frame)
plt.imshow(frame)
plt.imshow(
output.squeeze(0).squeeze(0).detach().cpu().numpy() > 0.9, cmap="gray", alpha=0.3
)
plt.imshow(frame)
plt.imshow(mask, cmap="gray", alpha=0.5)
shutil.make_archive("Attention-Unet-BCELogitLoss", "zip", "/kaggle/working/model")
import gc
torch.cuda.empty_cache()
gc.collect()
training_csv.to_csv(
"/kaggle/working/attentionunet_withBCELogitloss_training.csv", index=False
)
validation_csv.to_csv(
"/kaggle/working/attentionunet_withBCELogitloss_validation.csv", index=False
)
# ### Testing of Model
def Tester(model, test_dataloader, criterion, device=True, device_type="cuda:0"):
save_path = "/kaggle/working/model/"
testing = pd.DataFrame(
columns=["epoch", "diceloss", "bce", "bfl", "acc", "prec", "rec", "f1", "ji"]
)
tloss = {"dice": 0.0, "bce": 0.0, "bfl": 0.0}
test_met = [
Accuracy(task="binary"),
Precision(task="binary"),
Recall(task="binary"),
F1Score(task="binary"),
JaccardIndex(task="binary")["IOU"],
]
tmetric = MetricCollection(test_met)
tmetric = tmetric.to(device_type)
sig = nn.Sigmoid()
model.eval()
with torch.no_grad():
for data in test_dataloader:
frame, mask = data
if device:
frame, mask = frame.to(device_type), mask.to(device_type)
prediction = model(frame)
loss_dice = criterion[0](prediction, mask)
loss_bce = criterion[1](sig(prediction), sig(mask))
loss_bfl = criterion[2](sig(prediction), sig(mask))
tloss["dice"] += loss_dice.item()
tloss["bce"] += loss_bce.item()
tloss["bfl"] += loss_bfl.item()
tmetric.update(prediction, mask.type(torch.int64))
test_avg_loss = {
"dice": tloss["dice"] / len(test_dataloader),
"bce": tloss["bce"] / len(test_dataloader),
"bfl": tloss["bfl"] / len(test_dataloader),
}
tmetric_evaluation = tmetric.compute()
tdict = update(epoch, tloss, tmetric_evaluation)
testing.loc[len(validation)] = tdict
print(
f'Test set: Dice Loss: {test_avg_loss["dice"]:.3f}, BinaryCrossEntropy Loss: {test_avg_loss["bce"]:.3f}, BinaryFocalLoss: {test_avg_loss["bfl"]:.3f}, \
Accuracy:{tmetric["acc"]:.4f}, Precision:{tmetric["prec"]:.3f}, Recall:{tmetric["rec"]:.3f}, F1Score:{tmetric["f1"]:.3f}, JaccardIndex:{tmetric["ji"]:.3f}'
)
return testing
|
import pandas as pd
import numpy as np
import panel as pn
pn.extension("tabulator")
import dask.dataframe as dd
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv(
"/kaggle/input/favorita-grocery-sales-forecasting/train.csv", low_memory=False
)
test = pd.read_csv("/kaggle/input/favorita-grocery-sales-forecasting/test.csv")
sample_sub = pd.read_csv(
"/kaggle/input/favorita-grocery-sales-forecasting/sample_submission.csv"
)
stores = pd.read_csv("/kaggle/input/favorita-grocery-sales-forecasting/stores.csv")
items = pd.read_csv("/kaggle/input/favorita-grocery-sales-forecasting/items.csv")
transactions = pd.read_csv(
"/kaggle/input/favorita-grocery-sales-forecasting/transactions.csv"
)
oil = pd.read_csv("/kaggle/input/favorita-grocery-sales-forecasting/oil.csv")
holiday = pd.read_csv(
"/kaggle/input/favorita-grocery-sales-forecasting/holidays_events.csv"
)
print("Shape of train:", train.shape)
print("Shape of test:", test.shape)
print("Shape of stores:", stores.shape)
print("Shape of items:", items.shape)
print("Shape of transactions:", transactions.shape)
print("Shape of oil:", oil.shape)
print("Shape of holiday:", holiday.shape)
print("size before:", train["date"].memory_usage(deep=True) * 1e-6)
train["date"] = pd.to_datetime(train["date"])
print("size after: ", train["date"].memory_usage(deep=True) * 1e-6)
store_number = (
stores.loc[
(stores["city"] == "Santo Domingo")
| (stores["city"] == "Quito")
| (stores["city"] == "Daule")
]
)["store_nbr"].tolist()
print("Stores which are present in these 3 citites:", "\n", store_number)
item_number = (
items.loc[(items["family"] == "PERSONAL CARE") | (items["family"] == "DAIRY")]
)["item_nbr"].tolist()
train_subset = train[
train["store_nbr"].isin(store_number) & train["item_nbr"].isin(item_number)
]
print(train_subset.shape)
train_subset.head()
train_subset = pd.merge(train_subset, stores, on="store_nbr", how="left")
train_subset.head()
train_subset = pd.merge(train_subset, items, on="item_nbr", how="left")
train_subset.head()
oil["date"] = pd.to_datetime(oil["date"])
train_subset = pd.merge(train_subset, oil, on="date", how="left")
train_subset.head()
holiday["type"] = holiday["type"].replace(
["Additional", "Bridge", "Event", "Transfer"], "Holiday"
)
mask = holiday["transferred"] == True
holiday["type"][mask] = "Work Day"
print(holiday["type"].value_counts())
holiday["date"] = pd.to_datetime(holiday["date"])
train_subset = pd.merge(train_subset, holiday, on="date", how="left")
train_subset = train_subset.drop(
["locale", "locale_name", "description", "transferred"], axis=1
)
train_subset.head()
train_subset = train_subset.rename(
columns={"type_y": "day_type", "type_x": "type", "dcoilwtico": "oil_price"}
)
train_subset.head()
train_subset.isnull().sum().sort_values(ascending=False)
train_subset["day_type"] = train_subset["day_type"].fillna("Work Day")
train_subset["oil_price"] = train_subset["oil_price"].fillna(axis=0, method="ffill")
train_subset["onpromotion"] = train_subset["onpromotion"].fillna("Not Mentioned")
del oil
del holiday
del items
del stores
train_subset["date"] = pd.to_datetime(train_subset["date"])
train_subset["Month"] = train_subset["date"].dt.strftime("%B")
train_subset["Year"] = train_subset["date"].dt.strftime("%Y")
train_subset
train_subset["day"] = train_subset["date"].dt.day
train_subset["quarter"] = train_subset["date"].dt.quarter
train_subset["month"] = train_subset["date"].dt.month
train_subset["year"] = train_subset["date"].dt.year
train_subset.head()
train_subset = train_subset.drop(
[
"city",
"state",
"perishable",
"type",
"cluster",
"class",
"date",
"Month",
"Year",
],
axis=1,
)
train_subset.head()
train_subset["onpromotion"] = train_subset["onpromotion"].replace(True, 1)
train_subset["onpromotion"] = train_subset["onpromotion"].replace(False, 0)
train_subset["onpromotion"] = train_subset["onpromotion"].replace("Not Mentioned", 2)
train_subset["family"] = train_subset["family"].replace("PERSONAL CARE", 0)
train_subset["family"] = train_subset["family"].replace("DAIRY", 1)
train_subset["day_type"] = train_subset["day_type"].replace("Holiday", 0)
train_subset["day_type"] = train_subset["day_type"].replace("Work Day", 1)
# train1
train1 = train_subset.drop(["unit_sales"], axis=1)
train1.head()
# train2
train2 = train_subset.drop(
[
"id",
"store_nbr",
"item_nbr",
"onpromotion",
"family",
"oil_price",
"day_type",
"month",
"year",
"day",
"quarter",
],
axis=1,
)
train2.head()
import xgboost as xgb
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
import time
# The dataset is divided into two parts
X_train, X_test, y_train, y_test = train_test_split(
train1, train2, test_size=0.2, random_state=42
)
# An XGBoost model is created with basic parameters
xgb_model = xgb.XGBRegressor(
objective="reg:squarederror",
colsample_bytree=0.3,
learning_rate=0.1,
max_depth=10,
alpha=10,
n_estimators=1000,
)
# The model is trained on the train set
start_time = time.time()
xgb_model.fit(X_train, y_train)
end_time = time.time()
# The model is used to predict on the test set
y_pred = xgb_model.predict(X_test)
# The performance of the model is evaluated using three metrics: Root Mean Squared Error (RMSE), Mean Absolute Error (MAE), and R2 score
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
print("RMSE:", rmse)
r2 = r2_score(y_test, y_pred)
print("R2 score:", r2)
mae = mean_absolute_error(y_test, y_pred)
print("MAE:", mae)
training_time = end_time - start_time
print("Training time:", training_time)
|
import pandas as pd
import numpy as np
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import xgboost as xgb
from sklearn.metrics import mean_squared_error
color_pal = sns.color_palette()
plt.style.use("fivethirtyeight")
print("Setup Complete")
# Path of the file to read
pricelab_filepath = "/kaggle/input/price-data/price_data.csv"
# Read the file into a variable fifa_data
df = pd.read_csv(pricelab_filepath, parse_dates=True)
# Print the first 5 rows of the data
df = df.set_index("Date")
df.index = pd.to_datetime(df.index)
# df.tail()
# Final DataFrame
final = pd.DataFrame(
pd.date_range(start="2016-01-17", end="2022-02-28"), columns=["Date"]
)
final["Price"] = ""
final = final.set_index("Date")
final.index = pd.to_datetime(final.index)
final.head()
# statistic to figure out outliers
# The mean is sensitive to outliers,but the fact the mean is small compared to the
# max value indicates the max value is an outlier.
df.describe()
# **Outlier Finding using Histogram/ BoxPlot/ Scatterplot**
# create a histogram
fig = px.histogram(df, x="Price")
fig.show()
# create a box plot
box_fig = px.box(df, y="Price")
box_fig.show()
# Scatter Plots
# line plots
df.plot(linewidth=0.5)
# Using Dots instead of lines to better segregate consecutive values
df.plot(marker=".", alpha=0.5, linestyle="None", figsize=(11, 9), subplots=True)
df.plot(style=".", figsize=(15, 6), color=color_pal[1], title="Hotel Prices")
plt.show()
# **Train/Test Splits**
train = df.loc[df.index < "01-01-2015"]
test = df.loc[df.index >= "01-01-2015"]
fig, ax = plt.subplots(figsize=(15, 5))
train.plot(ax=ax, label="Training Set", title="Data Train/Test Split")
test.plot(ax=ax, label="Test Set")
ax.axvline("01-01-2015", color="black", ls="--")
ax.legend(["Training Set", "Test Set"])
plt.show()
df.loc[(df.index > "01-01-2012") & (df.index < "02-01-2012")].plot(
figsize=(15, 5), title="Month Of Data"
)
plt.show()
# **Feature Creation**
def create_features(df):
"""
Create time series features based on time series index.
"""
df = df.copy()
df["quarter"] = df.index.quarter
df["month"] = df.index.month
df["year"] = df.index.year
df["day"] = df.index.day
return df
df = create_features(df)
# df.axes
# df.head()
# df.tail()
# **Visualize our Feature / Target Relationship**
fig, ax = plt.subplots(figsize=(10, 8))
sns.boxplot(data=df, x="month", y="Price", palette="Blues")
ax.set_title("Prices by Month")
plt.show()
# **Create our Model**
train = create_features(train)
test = create_features(test)
final = create_features(final)
FEATURES = ["day", "quarter", "month", "year"]
TARGET = "Price"
X_train = train[FEATURES]
y_train = train[TARGET]
X_test = test[FEATURES]
y_test = test[TARGET]
X_final = final[FEATURES]
y_final = final[TARGET]
reg = xgb.XGBRegressor(
base_score=0.5,
booster="gbtree",
n_estimators=2000,
early_stopping_rounds=50,
objective="reg:linear",
max_depth=3,
learning_rate=0.01,
)
reg.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], verbose=100)
# **Feature Importance**
fi = pd.DataFrame(
data=reg.feature_importances_, index=reg.feature_names_in_, columns=["importance"]
)
fi.sort_values("importance").plot(kind="barh", title="Feature Importance")
plt.show()
# **Forecast on Test**
test["prediction"] = reg.predict(X_test)
df = df.merge(test[["prediction"]], how="left", left_index=True, right_index=True)
ax = df[["Price"]].plot(figsize=(15, 5))
df["prediction"].plot(ax=ax, style=".")
plt.legend(["Truth Data", "Predictions"])
ax.set_title("Raw Dat and Prediction")
plt.show()
ax = df.loc[(df.index > "01-01-2015") & (df.index < "01-01-2016")]["Price"].plot(
figsize=(15, 5), title="Year Of Data"
)
df.loc[(df.index > "01-01-2015") & (df.index < "01-01-2016")]["prediction"].plot(
style="."
)
plt.legend(["Truth Data", "Prediction"])
plt.show()
# **Score (RMSE)**
score = np.sqrt(mean_squared_error(test["Price"], test["prediction"]))
print(f"RMSE Score on Test set: {score:0.2f}")
# **Calculate Error**
# * Look at the worst and best predicted days
test["error"] = np.abs(test[TARGET] - test["prediction"])
test["date"] = test.index.date
test.groupby(["date"])["error"].mean().sort_values(ascending=False).head(10)
# **Predicting Future Values**
final["prediction"] = reg.predict(X_final)
final["prediction"].plot(ax=ax)
final.tail(28)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import altair as alt # graphs
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/ftc-cigarettes/cigs.csv")
# # Data Wrangling
df.head()
df.info()
df.describe()
# Checking total number of rows and columns
df.shape
# Checking for duplicate values
df.duplicated().sum()
# Removing duplicate rows
df = df.drop_duplicates()
df.isnull().sum()
# Changing 'NaN' values to 'Unflavored' for 'flavor' column
df["flavor"] = df["flavor"].fillna("Unflavored")
# Verifying the change in the dataframe
df.head()
# ## Distributions
alt.Chart(df).mark_bar(color="red").encode(
x=alt.X("nic", title="Nicotine (mg)", bin=True), y="count()"
).properties(title="Histogram of Nicotine Concentration")
alt.Chart(df).mark_bar(color="black").encode(
x=alt.X("CO", title="CO (mg)", bin=True), y="count()"
).properties(title="Histogram of CO Concentration")
alt.Chart(df).mark_bar(color="brown").encode(
x=alt.X("tar", title="Tar (mg)", bin=True), y="count()"
).properties(title='Histogram of Tar Concentration"')
# ## Correlations
df.corr()
alt.Chart(df).mark_circle().encode(
x=alt.X("tar", title="Tar (mg)"),
y=alt.Y("nic", title="Nicotine (mg)"),
tooltip=["Brand_Name", "flavor"],
).properties(
title={
"text": "Relationship Between Nicotine and Tar",
"subtitle": "Correlation = 0.918",
}
)
alt.Chart(df).mark_circle().encode(
x=alt.X("CO", title="CO (mg)"), y=alt.Y("nic", title="Nicotine (mg)")
).properties(
title={
"text": "Relationship Between Nicotine and CO",
"subtitle": "Correlation = 0.700",
}
)
alt.Chart(df).mark_circle().encode(
x=alt.X("tar", title="Tar (mg)"), y=alt.Y("CO", title="CO (mg)")
).properties(
title={"text": "Relationship Between CO and Tar", "subtitle": "Correlation = 0.824"}
)
|
# # Introduction
# In this Notebook, we provide you a step-by-step guide in learning about a data set, handling common data quality issues, and performing preprocessing. We will rely on [pandas](https://pandas.pydata.org/), a popular data analysis and manipulation library for Python.
# We'll use a variant of a popular learning dataset every practitioner should be familiar with: Wine Magazine wine reviews.
# # Step 1: Acquire Data
# Using the Notebook sidebar, use the _Add Data_ button to search for the data set with the following URL, https://www.kaggle.com/datasets/pabloa/wine-magazine, and is titled Wine Magazine by Pablo Albani. (Note that this is a different dataset than the popular and semi-official [Wine Reviews](https://www.kaggle.com/datasets/zynicide/wine-reviews) data set.)
# When complete, close the _Add Data_ panel and confirm that the data set appears in the sidebar under _Data - Input_.
# # Step 2: Import **pandas**
# While we could perform much of our data analysis and preprocessing from scratch with our own Python code, **[pandas](https://pandas.pydata.org/)** provides us with objects and functions for loading, analyzing and manipulating data. **pandas** is short for "Panel Data Analysis," and was created by Wes McKinney in 2008.
# Please: Do not just copy and paste the sample code in this notebook. Practice typing the code we provide in each code cell.
# In the next code cell, import the **pandas** library, and, per convention, name it `pd` for short.
# ```python
# import pandas as pd
# ```
# Be sure to run the cell when you are done.
# # Step 3: Get to Know **pandas**
# **pandas** provides us with two important classes: DataFrame and Series. You can think of a DataFrame as a table, that we construct with columns of data, where each column has a title and a list of values. Try constructing a DataFrame with a Python dictionary, and inspecting it.
# ```python
# pd.DataFrame({'Yes': [19, 21], 'No': [78, 2], 'Maybe': [3, 4]})
# ```
# Here we have passed the DataFrame constructor a dictionary containing the key-value pairs of "column label" and column data, which were simple Python lists.
# Behind the scenes, however, a DataFrame takes the data we give it and constructs each column as a Series. You can think of a Series as a column of data, but a Series can also just be any sequence of data. Try creating a simple Series and inspecting it.
# ```python
# pd.Series([8, 6, 7, 5, 3, 0, 9])
# ```
# ### 💡 Knowledge Check 1
# Q1. Recall our Exploration on types of data. Notice the `dtype` component of the code cell's output. What is the data type of the values in the Series above?
# _Write your answer here_
# Q2. Modify the code above, by replacing the `9` with the string `'widget'` and re-run the code cell. What is the data type now?
# _Write your answer here_
# If you only master one Python library for data analysis and machine learning, we encourage you to master every aspect of **pandas**. The time-saving features it provides have great value!
# For now, let's use pandas to open and explore our data set.
# # Step 4: Load Data from a File into a DataFrame
# Let's use **pandas** to create a DataFrame from the data in a CSV file, using the `read_csv` method. (CSV files are humble and ubiquitous, and easily read, but pandas can load data from other sources too.)
# ```python
# wine_reviews = pd.read_csv(path_to_file, sep=';')
# ```
# Notice that we will pass a `sep` argument to `read_csv` - this is because the data author chose to use semicolons instead of commas between values. With out this argument, pandas would have a problem parsing the CSV file.
# Try loading the file in the next code cell. We have defined a variable for the path, or location, of the wine magazine data you added to this notebook. Use it when invoking `read_csv`.
wine_reviews_path = "../input/wine-magazine/winemag-data_first150k.csv"
# # Step 5: Explore the Data Set
# Recall that two of the first characteristics of a dataset we should discover are the size, or number of data objects, and the width, or the number of dimensions. We can obtain that information with the `shape` attribute.
# ```python
# wine_reviews.shape
# ```
# ### 💡 Knowledge Check 2
# 1. How many data objects are in the data set, and how many dimensions are there?
# _Write your answer here_
# Now, let's inspect the `wine_reviews` DataFrame. Because a Jupyter Notebook will always help us inspect the last line of code in a code cell, we can just write one statement with `wine_reviews` in it.
# ```python
# wine_reviews
# ```
# Notice how the DataFrame includes an unlabeled index column, to identify each data object.
# ### 💡 Knowledge Check 3
# Recall the importance of handling missing data, and common techniques for handling missing data.
# 1. Look carefully at the rendering of the DataFrame above, what do you believe the NaN indicators mean in two of the columns of data?
# _Write your answer here_
# 2. How might you react to the missing data in the **designation** column, and why?
# _Write your answer here_
# 3. How might you address the problem of a large number of missing values in the **region_2** column, and why?
# _Write your answer here_
# Now that we know the size and dimensions of the data set, and that we have seen a sampling of the data objects, let us further characterize the data set with summary statistics. While we can do this ourselves with our own Python code, **pandas** makes this straightforward.
# ```python
# wine_reviews.describe()
# ```
# Notice how `describe` computes the summary statistics only for numeric features. Notice the different _count_ values - what do the differences in these numbers indicate?
# To explore and summarize the categorical data in the data set, we can use **pandas** grouping features. We can use the `value_counts` method to summarize the categorical attributes. Let's start with country.
# ```python
# wine_reviews.value_counts('country')
# ```
# ### 💡 Knowledge Check 4
# Using the`value_counts` method, answer the following questions.
# 1. How many countries are represented in the data set?
# _Write your answer here_
# 2. How many provinces are represented in the data set?
# _Write your answer here_
# 3. How many varieties are represented in the data set?
# _Write your answer here_
# # Step 6: Preprocessing
# ## Handling missing values
# Based on our inspection so far, let us choose to remove the **region_2** feature, and remove any data objects that have a missing value. We can remove a column from a DataFrame using Python's `del` keyword and the syntactic sugar of dictionary-like syntax that **pandas** provides, via `del frame['col_name']`. We can remove all rows that contain at least one missing value with the DataFrame `dropna` method by passing it the named parameter `axis=0`.
# Then, let's inspect the shape of the data after this processing, with `shape`.
# ```python
# del wine_reviews['region_2']
# wine_reviews = wine_reviews.dropna(axis=0)
# wine_reviews.shape
# ```
# Note: if you encouter a KeyError: 'region_2', then "Run All" the code in the notebook.
# This is because the `del` operation mutates the DataFrame "in-place."
# Notice how there is one less dimension in the dataset, and we are also left with only 73,440 data objects that have no missing values.
# Let's use `describe` again to see the summary statistics again of this slightly 'cleaner' data set.
# ```python
# wine_reviews.describe()
# ```
# Let's view the first five rows of the DataFrame with the `head` method.
# ```python
# wine_reviews.head()
# ```
# ## Creating a Feature
# Our dataset has two interesting dimensions: _points_ and *last_year_points*. One might imagine an experiment where we would like to determine if considering the difference between the current year _points_ and the *last_year_points* might enhance the prediction accuracy of a trained model. In order to train a model with the difference between the current year _points_ and the *last_year_points*, it would be handy if we had a new feature, *point_change* in our dataset, computed prior to training.
# There are many "syntactically sexy" ways to use *pandas* to synthesize new features. One of these methods involves adding a new column using the `assign` method, and passing `assign` an expression to evaluate. (Behind the scenes, a `map` operation and `lambda` expression are used to compute the new values for every entry in the data set.)
# ```python
# wine_reviews = wine_reviews.assign(point_change = wine_reviews.points - wine_reviews.last_year_points)
# wine_reviews.head()
# ```
# ### 💡 Knowledge Check 5
# Apply the ideas and examples in this notebook to create a new column in the dataset, named **points_normalized**. Ensure that the values in this new column represent the points values normalized between 0 and 1.
# Hint: There are important clues in the data exploration within this Notebook about how you might want to implement this.
# Implement your solution in the following code cell, and be sure to invoke `wine_reviews.head()` at the end, so we can inspect your new column.
#
# Write your answer here. Be sure to conclude with `wine_reviews.head()`
|
# # **UW Madison GI Tract Image Segmentation**
# Realizzato da:
# * Marena Jestin
# * Alice Latino
# * Maria Rausa
# L'obiettivo del progetto è creare un modello per segmentare automaticamente lo stomaco e l'intestino nelle scansioni di risonanza magnetica.
# L'algoritmo si basa su un set di dati per trovare soluzioni di deep learning che aiutino i pazienti oncologici a ricevere cure migliori.
# Caricamento delle librerie
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from PIL import Image
from skimage.transform import resize
# # Caricamento dei path
# Le annotazioni di addestramento vengono fornite come maschere in codifica RLE e le immagini sono in formato PNG in scala di grigi a 16 bit.
DATA_DIR = "/kaggle/input/uw-madison-gi-tract-image-segmentation"
TRAIN_DIR = os.path.join(DATA_DIR, "train")
TRAIN_CSV = os.path.join(DATA_DIR, "train.csv")
train_df = pd.read_csv(TRAIN_CSV)
path_images = []
for dirname, _, filenames in os.walk(TRAIN_DIR):
for filename in filenames:
path = os.path.join(dirname, filename)
path_images.append(path)
print(len(path_images))
print(path_images[0])
# converte in stringhe le maschere RLE
train_df["segmentation"] = train_df["segmentation"].astype("str")
# mostra le prime 10 righe del file train.csv
train_df.head(10)
# # Funzioni di encode e decode di RLE
# * La funzione **rle_encode()** serve per effettuare la codifica della maschera dal formato png al formato RLE, utilizzata in seguito per salvare le maschere predette nel file submission.csv.
# * La funzione **rle_decode()** serve per effettuare la decodifica della maschera dal formato RLE al formato png.
# Run-length encoding
def rle_encode(img):
pixels = img.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return " ".join(str(x) for x in runs)
def rle_decode(mask_rle, shape, color=1):
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros((shape[0] * shape[1], shape[2]), dtype=np.float32)
for lo, hi in zip(starts, ends):
img[lo:hi] = color
return img.reshape(shape)
# # Creazione dei path per le maschere
# Vengono creati dei path per memorizzare le maschere, le quali vengono decoficate, ridimensionate e salvate.
# Le maschere effettivamente presenti nel file train.csv sono 33.913, suddivise in:
# * classe **stomach** 8.627,
# * classe **large_bowel** 14.085,
# * classe **small_bowel** 11.201.
# Le restanti maschere "nan" vengono decodificate come maschere di tutti zeri (maschere nere).
masks_paths = []
large_bowel_paths = []
small_bowel_paths = []
stomach_paths = []
for id, classe, segmentation in zip(
train_df["id"], train_df["class"], train_df["segmentation"]
):
columns = id.split("_")
case = columns[0]
case_day = case + "_" + columns[1]
id_img = columns[2] + "_" + columns[3]
name_img = os.listdir(TRAIN_DIR + "/" + case + "/" + case_day + "/scans")[0]
name_img = name_img.split("_")
path2 = (
"/kaggle/working/uw-madison-gi-tract-image-segmentation/train/"
+ case
+ "/"
+ case_day
+ "/masks/"
)
classe = classe.replace("_", "")
path_final = path2 + id_img + "_" + classe + ".png"
masks_paths.append(path_final)
mask_rle = segmentation.strip()
# se la maschera è "nan" viene creata una maschera di zeri (maschera nera)
if mask_rle == "nan":
mask = Image.fromarray(np.zeros((256, 256), dtype=np.uint8))
# altrimenti si decodifica con la funzione rle_decode() per ottenere una maschera .png
else:
height = int(name_img[2])
width = int(name_img[3])
mask = rle_decode(mask_rle, shape=(width, height, 1), color=1).reshape(
width, height
)
mask = resize(mask, (256, 256))
if classe == "largebowel":
large_bowel_paths.append(path_final)
elif classe == "smallbowel":
small_bowel_paths.append(path_final)
else:
stomach_paths.append(path_final)
# se non esiste il path specificato viene creato
if not os.path.exists(path2):
os.makedirs(path2)
mask_png = Image.fromarray(np.uint8(mask), mode="L")
# salvataggio delle maschere in formato .png ridimensionate nei path corrispondenti
mask_png.save(path_final)
print("Numero totale di maschere: ", len(masks_paths))
print("Numero di maschere (non 'nan') --> Large bowel: ", len(large_bowel_paths))
print("Numero di maschere (non 'nan') --> Small bowel: ", len(small_bowel_paths))
print("Numero di maschere (non 'nan') --> Stomach: ", len(stomach_paths))
print(masks_paths[0])
print(large_bowel_paths[0])
print(small_bowel_paths[0])
print(stomach_paths[0])
# # Campionamento
# Per il campionamento delle maschere vengono prese delle maschere casuali tra tutte quelle possibili appartenenti alla classe **large_bowel** per addestrare il modello.
# Sarà possibile scegliere una classe differente (large_bowel, small_bowel oppure stomach) semplicemente cambiando il valore assegnato alla varibile *classe*.
# campionamento delle maschere
# Vengono prese maschere e path corrispondenti che verranno utilizzati per addestrare il modello
sampled_paths_mask = []
sampled_masks = []
classe = "large_bowel"
if classe == "large_bowel":
for i in range(0, len(large_bowel_paths), 7):
mask_path = large_bowel_paths[i]
sampled_masks.append(np.array(Image.open(mask_path)))
sampled_paths_mask.append(mask_path)
elif classe == "small_bowel":
for i in range(0, len(small_bowel_paths), 6):
mask_path = small_bowel_paths[i]
sampled_masks.append(np.array(Image.open(mask_path)))
sampled_paths_mask.append(mask_path)
else:
for i in range(0, len(stomach_paths), 5):
mask_path = stomach_paths[i]
sampled_masks.append(np.array(Image.open(mask_path)))
sampled_paths_mask.append(mask_path)
print(f"{len(sampled_masks)} maschere campionate della classe {classe}.")
print(len(sampled_paths_mask))
# # Caricamento delle immagini
# Qui, vengono associate le immagini alle maschere campionate del training set.
# vengono prese le immagini corrispondenti alle maschere campionate
sampled_imgs = []
sampled_imgs_paths = []
for path in sampled_paths_mask:
subfolder = path.split("/")
case = subfolder[5]
case_day = subfolder[6]
id_mask = subfolder[-1].split("_")
id_mask = id_mask[0] + "_" + id_mask[1]
path_img = TRAIN_DIR + "/" + case + "/" + case_day + "/scans"
name_img_list = os.listdir(path_img)
for name in name_img_list:
name_img2 = name.split("_")
name_img2 = name_img2[0] + "_" + name_img2[1]
if id_mask == name_img2:
path_final = path_img + "/" + name
sampled_imgs_paths.append(path_final)
sampled_imgs.append(np.array(Image.open(path_final).resize((256, 256))))
images_np = np.asarray(sampled_imgs)
masks_np = np.asarray(sampled_masks)
print(len(sampled_imgs))
print(len(sampled_masks))
img_max = images_np.max()
mask_max = masks_np.max()
print(img_max, mask_max)
# # Normalizzazione delle immagini
# La normalizzazzione delle immagini consiste nel convertire le immagini in un array numpy, dove ogni valore è compreso tra 0 e 1.
x = np.asarray(images_np, dtype=np.float32) / img_max
y = np.asarray(masks_np, dtype=np.float32) / mask_max
print(x.shape, y.shape)
x = x.reshape(x.shape[0], x.shape[1], x.shape[2], 1)
y = y.reshape(y.shape[0], y.shape[1], y.shape[2], 1)
print(x.shape, y.shape)
from keras_unet.utils import plot_imgs
plot_imgs(images_np, masks_np, nm_img_to_plot=10, figsize=4)
# # Suddivisione del dataset
# Suddivisione degli elementi campionati:
# * 80% training set
# * 20% validation set
from sklearn.model_selection import train_test_split
x_train, x_valid, y_train, y_valid = train_test_split(
x, y, test_size=0.2, random_state=0
)
print("x_train: ", x_train.shape)
print("y_train: ", y_train.shape)
print("x_val: ", x_valid.shape)
print("y_val: ", y_valid.shape)
# # Data augmentation
# Viene esteso il numero di campioni a disposizione applicando una serie di trasformazioni:
# * rotazione
# * traslazione (sia lungo le ascisse che le ordinate)
# * zoom
# per far apprendere meglio la rete e aumentarne la capacità di generalizzazione.
from keras_unet.utils import get_augmented
data_augmented = get_augmented(
x_train,
y_train,
batch_size=32,
data_gen_args=dict(
rotation_range=5.0,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=40,
zoom_range=0.2,
fill_mode="reflect",
),
)
input_shape = x_train[0].shape
print(input_shape)
# # **U-net e Addestramento**
# L'architettura di rete neurale utilizzata è U-Net.
# Questa architettura è stata sviluppata per la segmentazione delle immagini mediche e si basa sul principio di codifica-decodifica.
# La rete U-Net è costituita da:
# * un'area di codifica, in cui l'immagine viene gradualmente ridotta di dimensioni attraverso convoluzioni e max-pooling
# * un'area di decodifica, in cui l'immagine viene ricostruita a dimensioni originali attraverso convoluzioni e up-sampling.
# U-net
from keras_unet.models import custom_unet
model = custom_unet(
input_shape,
filters=32, # numero di filtri di convoluzione
use_batch_norm=True, # utilizzo della normalizzazione batch dopo ogni livello di convoluzione
dropout=0.3, # percentuale di unità di attivazioni scartate per ridurre l'overfitting
dropout_change_per_layer=0.0, # miglioramento al modello
num_layers=4, # layer di convoluzione
)
# struttura della rete
model.summary()
# ModelCheckpoint salva il miglior modello ottenuto durante l'addestramento nel file model_filename solo se la metrica monitorata migliora rispetto alla precedente epoca di addestramento. Lo scopo è ottenere il modello migliore.
from keras.callbacks import ModelCheckpoint
model_filename = "/kaggle/working/best_model_" + classe + ".h5"
callback_checkpoint = ModelCheckpoint(
model_filename, # file di output
verbose=0, # non viene stampato alcun messaggio sulla console durante il salvataggio
monitor="val_loss", # metrica monitorata
save_best_only=True,
)
# compile compila il modello U-Net, definendo l'addestramento, la funzione di perdita e le matriche usate per addestrare il modello
from tensorflow.keras.optimizers import Adam
from keras_unet.metrics import iou, iou_thresholded, dice_coef
model.compile(
optimizer=Adam(),
loss="binary_crossentropy",
metrics=[iou, iou_thresholded, dice_coef],
)
# fit addestra il modello e ne conserva la storia di addestramento in un oggetto history
history = model.fit(
data_augmented,
batch_size=32,
steps_per_epoch=63, # numero di passi di addestramento eseguiti per ogni epoca: numero di campioni di addestramento/batch_size
epochs=60,
validation_data=(x_valid, y_valid),
callbacks=[callback_checkpoint], # callback per salvare il modello migliore
)
from keras_unet.utils import plot_segm_history
# visualizzazione delle. metriche di addestramento e di valutazione
plot_segm_history(history)
# # **Predizione**
# caricamento dei pesi del miglior modello salvato durante l'addestramento
model.load_weights(model_filename)
# predizione: produce le maschere di segmentazione predette per ogni immagine del dataset di validazione
y_pred = model.predict(x_valid)
plot_imgs(x_valid, y_valid, y_pred, nm_img_to_plot=10)
# # Testset
# Per il test set vengono prese 1000 immagini, che non sono presenti nè nel training set nè nel validation set, per capire se le predizioni sono corrette anche con immagini mai viste precedentemente dalla rete.
dim_test = 1000
x_test = [] # per le immagini di test
x_test_path = [] # per i path delle immagini di test
y_test = [] # per le maschere di test
y_test_path = [] # per i path delle maschere di test
for path in path_images:
if path not in sampled_imgs_paths and len(x_test) < dim_test:
x_test_path.append(path)
img = np.array(Image.open(path).resize((256, 256)))
x_test.append(img)
print(len(x_test)) # 1000
print(len(x_test_path)) # 1000
for path in x_test_path:
TRAIN = TRAIN_DIR.replace("input", "working")
path = path.split("/")
case = path[5]
case_day = path[6]
id_img = path[-1]
id_img = id_img.split("_")
new_classe = classe.replace("_", "")
id_mask = id_img[0] + "_" + id_img[1] + "_" + new_classe + ".png"
path_mask = TRAIN + "/" + case + "/" + case_day + "/masks/" + id_mask
mask = np.array(Image.open(path_mask))
y_test.append(mask)
path_for_submission = case + "_" + case_day + "_" + id_img[0] + "_" + id_img[1]
y_test_path.append(path_for_submission)
x_test = np.asarray(x_test)
y_test = np.asarray(y_test)
x_test_max = x_test.max()
y_test_max = y_test.max()
print(x_test_max, y_test_max)
x_test = np.asarray(x_test, dtype=np.float32) / x_test_max
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1)
y_test = np.asarray(y_test, dtype=np.float32) / y_test_max
y_test = y_test.reshape(y_test.shape[0], y_test.shape[1], y_test.shape[2], 1)
y_pred_test = model.predict(x_test)
plot_imgs(x_test, y_test, y_pred_test, nm_img_to_plot=10)
y_test.shape
# # File submission.csv
# Per ogni immagine del test set viene predetta una maschera, quest'ultima viene convertita in formato RLE per essere salvata.
# I dati vengono organizzati in un DataFrame strutturato con 3 colonne: id, class e prediction.
# Il DataFrame viene poi salvato nel file submission.csv.
#
path_submission = "/kaggle/working/submission.csv"
df = pd.DataFrame(columns=["id", "class", "prediction"])
i = 0
for path, pred in zip(y_test_path, y_pred_test):
val_max = pred.max()
mask_pred = (pred > (val_max * 0.75)) * 1
mask_rle = rle_encode(mask_pred)
df.loc[i] = [path, classe, mask_rle]
i = i + 1
df.to_csv(path_submission, index=False)
test_df = pd.read_csv(path_submission)
# Mostra le prime 10 righe del file submission.csv
test_df[0:10]
|
# # INDEX
# 1. Data Exploration
# 2. Feature Engineering
# 3. Models
# 4. Prediction
# ## 1. Explore the Data
import numpy as np
import pandas as pd
train = pd.read_csv(
r"/kaggle/input/test-competition-2783456756923/airline_tweets_train.csv"
)
test = pd.read_csv(
r"/kaggle/input/test-competition-2783456756923/airline_tweets_test.csv"
)
test.describe()
# ## 2. Feature Engineering
# **A) Feature Extraction**
# **B) Data Cleaning & Preprocessing**
# **C) Feature Selection**
# **D) Feature Encoding**
# ### A) Feature Extraction
# Here we are generating some simple variables from the text:
# * **'tweet_length':** Lenght of the tweet, number of characters
# * **'tweet_words':** Number of words in the tweet
# * **'avg_word_length':** Average length of words (tweet_length/tweet_words)
# * **'has_retweets':** Binary variable indicating if tweet has been retweeted
train["tweet_length"] = train.text.apply(lambda x: len(x))
train["tweet_words"] = train.text.apply(lambda x: len(x.split()))
train["avg_word_length"] = train["tweet_length"] / train["tweet_words"]
train["has_retweets"] = np.where(train.retweet_count > 0, 1, 0)
train["user_timezone"] = np.where(
train.user_timezone.isnull(), "unknown", train.user_timezone
)
# We will also generate two more variables which are a bit more complex:
# * **'num_accounts':** How many twitter accounts are mentioned (@Delta, @pierre,...)
# * **'has_numbers':** Binary variable indicating if tweet contains numbers
def num_accounts(text):
# Split the text into words
words = text.split()
# Count the number of words starting with '@'
count = sum(1 for word in words if word.startswith("@"))
return count
train["num_accounts"] = train["text"].apply(num_accounts)
import re
def contains_numbers(string):
match = re.search(r"\d", string)
if match:
return 1
else:
return 0
train["has_numbers"] = train["text"].apply(contains_numbers)
train.head()
# **TASK:**
# 1. Think of an example of a new variable that may help identifying the sentiment, e.g.:
# * Number of times a specific character appears (!, ?)
# * Any linear combination of existing variables
# 2. Code this new variable (with the help of GPT if needed)
# Generate a new variable
# train['name of your variable'] = your code here
# ### B) Feature Selection
numeric_cols = [
"retweet_count",
"tweet_length",
"tweet_words",
"avg_word_length",
"has_retweets",
"has_numbers",
"num_accounts"
# ,'name of your variable'
]
# Split Data into Features(X) and Target(Y)
x_train_numeric = train[numeric_cols]
y_train = train[["airline_sentiment"]]
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier(
random_state=1, max_depth=4, max_leaf_nodes=10, min_samples_split=60
)
model_tree = clf.fit(x_train_numeric, y_train)
import matplotlib.pyplot as plt
from sklearn import tree
fig = plt.figure(figsize=(15, 10))
_ = tree.plot_tree(
clf,
feature_names=x_train_numeric.columns,
class_names=["negative", "positive", "neutral"],
filled=True,
)
# Select the 3 most important features according to the Decision Tree
selected_cols = ["tweet_words", "avg_word_length", "has_numbers"]
# ## C) Text Cleaning & Lemmatization
import nltk
# nltk.download("stopwords")
# nltk.download('wordnet')
import string
from nltk.corpus import stopwords
stopwords = stopwords.words("english")
# Function to clean text
def cleaner(text):
text = "".join([word for word in text if word not in string.punctuation])
text = "".join([word for word in text if not word.isnumeric()])
text = text.lower()
text = " ".join([word for word in text.split() if word not in stopwords])
return text
string.punctuation
original_text = "@VirginAmerica thanks to your outstanding crew who moved mountains to get me home to San Francisco tonight!. Our flight was fantastic"
clean_text = cleaner(original_text)
print("Original Text:")
print(original_text)
print("Clean Text:")
print(clean_text)
import stanza
def lemmatize(text):
nlp = stanza.Pipeline(lang="en", processors="tokenize,mwt,pos,lemma", verbose=False)
doc = nlp(text)
lemm_text = [word.lemma for sent in doc.sentences for word in sent.words]
try:
lemm_text = " ".join(lemm_text)
except:
lemm_text = ""
return lemm_text
def word_types(text):
nlp = stanza.Pipeline(lang="en", processors="tokenize,mwt,pos,lemma", verbose=False)
doc = nlp(text)
lemm_text = [word.upos for sent in doc.sentences for word in sent.words]
try:
lemm_text = " ".join(lemm_text)
except:
lemm_text = ""
return lemm_text
nlp = stanza.Pipeline(lang="en", processors="tokenize,mwt,pos,lemma", verbose=False)
" ".join([word.lemma for sent in nlp(original_text).sentences for word in sent.words])
# Types of Words:
# * ADJ: adjective
# * ADP: adposition
# * ADV: adverb
# * AUX: auxiliary
# * CCONJ: coordinating conjunction
# * DET: determiner
# * INTJ: interjection
# * NOUN: noun
# * NUM: numeral
# * PART: particle
# * PRON: pronoun
# * PROPN: proper noun
# * PUNCT: punctuation
# * SCONJ: subordinating conjunction
# * SYM: symbol
# * VERB: verb
# * X: other
print(original_text)
print(clean_text)
print(lemmatize(clean_text))
print(word_types(clean_text))
train["clean_text"] = train["text"].apply(cleaner)
# train['lemm_text'] = train['clean_text'].apply(lambda x: lemmatize(x))
# ## D) Encoding
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(min_df=20)
X = vectorizer.fit_transform(train.clean_text.values.astype(str))
x_train_words = pd.DataFrame(
X.toarray(), columns=vectorizer.get_feature_names_out(), index=train.index
)
print(x_train_words.shape)
x_train_words.head()
x_train_final = pd.concat([x_train_numeric, x_train_words], axis=1)
x_train_final.head()
# ## D) Data Cleaning & Pre-Processing
# We must perform the same processing on the testing dataset
# Generate Simple Columns
test["tweet_length"] = test.text.apply(lambda x: len(x))
test["tweet_words"] = test.text.apply(lambda x: len(x.split()))
test["avg_word_length"] = test["tweet_length"] / test["tweet_words"]
test["has_retweets"] = np.where(test.retweet_count > 0, 1, 0)
test["user_timezone"] = np.where(
test.user_timezone.isnull(), "unknown", test.user_timezone
)
# Generate Complex Columns
test["num_accounts"] = test["text"].apply(num_accounts)
test["has_numbers"] = test["text"].apply(contains_numbers)
# Generate your specific column
# test['your column name'] = your code here
# Select numeric columns
x_test_numeric = test[numeric_cols]
# Clean & Lemmatize the text
test["clean_text"] = test["text"].apply(cleaner)
# train['lemm_text'] = train['clean_text'].apply(lambda x: lemmatize(x))
# Vectorize the text
X = vectorizer.transform(test["clean_text"].values.astype(str))
x_test_words = pd.DataFrame(
X.toarray(), columns=vectorizer.get_feature_names_out(), index=test.index
)
x_test_final = pd.concat([x_test_numeric, x_test_words], axis=1)
x_test_final.head()
# Separate target
y_test = test[["airline_sentiment"]]
# Summary of variables
print("Shape of the training Dataset")
print("Features (X): ", x_train_final.shape)
print("Target (Y): ", y_train.shape)
print("Shape of the testing Dataset")
print("Features (X): ", x_test_final.shape)
print("Target (Y): ", y_test.shape)
# ## Models
#
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(
random_state=1, n_estimators=50, min_samples_split=2, max_depth=10
)
model_rf = clf.fit(x_train_final, y_train)
y_test = clf.predict(x_test_final)
y_test.shape
submission_file = test.copy()
submission_file = submission_file[["Id"]]
submission_file["Category"] = y_test
submission_file.head()
submission_file.to_csv(r"submission.csv", index=False)
|
# # semi supervised learning
import torch
from torch import nn
import torch.nn.functional as F
import torchvision
from torch.utils.data import DataLoader
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, kernel_size=5)
self.conv2 = nn.Conv2d(20, 40, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(640, 150)
self.fc2 = nn.Linear(150, 10)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, x):
x = x.view(-1, 1, 28, 28)
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 640)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
x = self.log_softmax(x)
return x
net = Net().cuda()
from tqdm import tqdm_notebook
T1 = 100
T2 = 700
af = 3
def alpha_weight(step):
if step < T1:
return 0.0
elif step > T2:
return af
else:
return ((step - T1) / (T2 - T1)) * af
def semisup_train(model, train_loader, unlabeled_loader, test_loader):
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
EPOCHS = 100
# Instead of using current epoch we use a "step" variable to calculate alpha_weight
# This helps the model converge faster
step = 100
model.train()
for epoch in tqdm_notebook(range(EPOCHS)):
for batch_idx, x_unlabeled in enumerate(unlabeled_loader):
# Forward Pass to get the pseudo labels
x_unlabeled = x_unlabeled[0]
x_unlabeled = x_unlabeled.cuda()
model.eval()
output_unlabeled = model(x_unlabeled)
_, pseudo_labeled = torch.max(output_unlabeled, 1)
model.train()
# Now calculate the unlabeled loss using the pseudo label
output = model(x_unlabeled)
unlabeled_loss = alpha_weight(step) * F.nll_loss(output, pseudo_labeled)
# Backpropogate
optimizer.zero_grad()
unlabeled_loss.backward()
optimizer.step()
# For every 50 batches train one epoch on labeled data
if batch_idx % 50 == 0:
# Normal training procedure
for batch_idx, (X_batch, y_batch) in enumerate(train_loader):
X_batch = X_batch.cuda()
y_batch = y_batch.cuda()
output = model(X_batch)
labeled_loss = F.nll_loss(output, y_batch)
optimizer.zero_grad()
labeled_loss.backward()
optimizer.step()
# Now we increment step by 1
step += 1
import torch
from torchvision.datasets import ImageFolder
from torch.utils.data import Subset
from sklearn.model_selection import train_test_split
from torchvision.transforms import Compose, ToTensor, Resize
from torch.utils.data import DataLoader
def train_val_dataset(dataset, val_split=0.2):
train_idx, val_idx = train_test_split(
list(range(len(dataset))), test_size=val_split
)
return Subset(dataset, train_idx), Subset(dataset, val_idx)
train_data = torchvision.datasets.MNIST(
root="./mnist",
train=True,
transform=torchvision.transforms.ToTensor(),
# 把灰階從0~255壓縮到0~1
download=True,
)
train_data, test_data = train_val_dataset(train_data, val_split=0.2)
label_data, unlabel_data = train_val_dataset(train_data, val_split=0.5)
label_loader = DataLoader(dataset=label_data, batch_size=128, shuffle=True)
unlabel_loader = DataLoader(dataset=unlabel_data, batch_size=128, shuffle=True)
test_loader = DataLoader(dataset=test_data, batch_size=128, shuffle=True)
# semisup_train(net, label_loader, unlabel_loader, test_loader)
# # transfer learning
# https://www.kaggle.com/code/carloalbertobarbano/vgg16-transfer-learning-pytorch
# # lifelong learning EWC
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import autograd
import numpy as np
from torch.utils.data import DataLoader
class ElasticWeightConsolidation:
def __init__(self, model, crit, lr=0.001, weight=1000000):
self.model = model
self.weight = weight
self.crit = crit
self.optimizer = optim.Adam(self.model.parameters(), lr)
def _update_mean_params(self):
for param_name, param in self.model.named_parameters():
_buff_param_name = param_name.replace(".", "__")
self.model.register_buffer(
_buff_param_name + "_estimated_mean", param.data.clone()
)
def _update_fisher_params(self, current_ds, batch_size, num_batch):
dl = DataLoader(current_ds, batch_size, shuffle=True)
# dl = current_ds
log_liklihoods = []
# qerror_losses = []
for i, (data, target) in enumerate(dl):
if i > num_batch:
break
data = data.cuda()
output = F.log_softmax(self.model(data), dim=1)
log_liklihoods.append(output[:, target])
# loss = self.crit(output, target)
# qerror_losses.append(loss)
log_likelihood = torch.cat(log_liklihoods).mean()
grad_log_liklihood = autograd.grad(log_likelihood, self.model.parameters())
_buff_param_names = [
param[0].replace(".", "__") for param in self.model.named_parameters()
]
for _buff_param_name, param in zip(_buff_param_names, grad_log_liklihood):
self.model.register_buffer(
_buff_param_name + "_estimated_fisher", param.data.clone() ** 2
)
def register_ewc_params(self, dataset, batch_size, num_batches):
self._update_fisher_params(dataset, batch_size, num_batches)
self._update_mean_params()
def _compute_consolidation_loss(self, weight):
try:
losses = []
for param_name, param in self.model.named_parameters():
_buff_param_name = param_name.replace(".", "__")
estimated_mean = getattr(
self.model, "{}_estimated_mean".format(_buff_param_name)
)
estimated_fisher = getattr(
self.model, "{}_estimated_fisher".format(_buff_param_name)
)
losses.append((estimated_fisher * (param - estimated_mean) ** 2).sum())
return (weight / 2) * sum(losses)
except AttributeError:
return 0
def forward_backward_update(self, target, input_data):
output = self.model(input_data)
print(output, target)
loss = self.crit(output, target)
loss += self._compute_consolidation_loss(self.weight)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss
def save(self, filename):
torch.save(self.model, filename)
def load(self, filename):
self.model = torch.load(filename)
def list_of_groups(init_list, childern_list_len):
"""
init_list为初始化的列表,childern_list_len初始化列表中的几个数据组成一个小列表
:param init_list:
:param childern_list_len:
:return:
"""
list_of_group = zip(*(iter(init_list),) * childern_list_len)
end_list = [list(i) for i in list_of_group]
count = len(init_list) % childern_list_len
end_list.append(init_list[-count:]) if count != 0 else end_list
return end_list
train_group = list_of_groups(list(range(len(train_data))), int(len(train_data) / 10))
ewc_model = ElasticWeightConsolidation(net, torch.nn.CrossEntropyLoss())
for i, train_idx in enumerate(train_group):
print("dataset", i)
train_ds = Subset(train_data, train_idx)
train_loader = DataLoader(dataset=train_ds, batch_size=2, shuffle=True)
for epoch in tqdm_notebook(range(1)):
for batch_idx, (x, y) in enumerate(train_loader):
x = x.cuda()
y = y.cuda()
ewc_model.forward_backward_update(y, x)
break
ewc_model.register_ewc_params(train_ds, 2, 100)
|
# ## 1. Introduction
# Name: Alec Daalman
# Username: AlecDaalman
# Leaderboard rank:
# ## 2. Data
# ### 2.1 Dataset
# In this section, we load and explore the dataset.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
print(os.listdir("../input"))
train = pd.read_csv(
"../input/LANL-Earthquake-Prediction/train.csv",
dtype={"acoustic_data": np.int16, "time_to_failure": np.float64},
)
# ### 2.2 Data Exploration
# Explore the features and target variables of the dataset. Think about making some scatter plots, box plots, histograms or printing the data, but feel free to choose any method that suits you.
# What do you think is the right performance
# metric to use for this dataset? Clearly explain which performance metric you
# choose and why.
# Algorithmic bias can be a real problem in Machine Learning. So based on this,
# should we use the Race and the Sex features in our machine learning algorithm? Explain what you believe.
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2, 1, figsize=(20, 12))
ax[0].scatter(train.index[:10000000], train.acoustic_data[:10000000], c="darkred")
ax[0].set_title("Acoustic data of 10 Mio rows")
ax[0].set_xlabel("Index")
ax[0].set_ylabel("Quaketime in ms")
ax[1].scatter(train.index[:10000000], train.time_to_failure[:10000000], c="darkred")
ax[1].set_title("Quaketime of 10 Mio rows")
ax[1].set_xlabel("Index")
ax[1].set_ylabel("Acoustic signal")
# split_size = 150_000
# box_acoustic = [[] for i in range(int(np.floor(train.shape[0] / split_size)))]
# for i in range(int(np.floor(train.shape[0] / split_size))):
# box_acoustic[i] = train.acoustic_data[i*split_size:(i+1)*split_size]
# ax[2].boxplot(np.asarray(box_acoustic)[:int(np.floor(len(box_acoustic)/1000)),:]);
acoustic_fft = np.fft.fft(np.asarray(train.acoustic_data))
print(acoustic_fft)
fig, ax = plt.subplots(2, 1, figsize=(20, 12))
ax[0].plot(acoustic_fft)
# ### 2.3 Data Preparation
# This dataset hasn’t been cleaned yet. Meaning that some attributes (features) are in numerical format and some are in categorial format. Moreover, there are missing values as well. However, all Scikit-learn’s implementations of these algorithms expect numerical features. Check for all features if they are in categorial and use a method to transform them to numerical values. For the numerical data, handle the missing data and normalize the data.
# Note that you are only allowed to use training data for preprocessing but you then need to perform similar changes on test data too.
# You can use [pipelining](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html) to help with the preprocessing.
# ##### 2.3.1 Feature extraction
from sklearn.model_selection import train_test_split
rows = 150_000
segments = int(np.floor(train.shape[0] / rows))
x = pd.DataFrame(
index=range(segments), dtype=np.float64, columns=["ave", "std", "max", "min"]
)
y = pd.DataFrame(index=range(segments), dtype=np.float64, columns=["time_to_failure"])
for segment in range(segments):
samples = train.acoustic_data[segment * rows : (segment + 1) * rows]
x.loc[segment, "ave"] = np.mean(samples)
x.loc[segment, "std"] = np.std(samples)
x.loc[segment, "max"] = np.max(samples)
x.loc[segment, "min"] = np.min(samples)
y.loc[segment, "time_to_failure"] = train.time_to_failure[(segment + 1) * rows]
|
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, roc_curve, roc_auc_score
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
data = pd.read_csv(
"/kaggle/input/cross-site-scripting-xss-dataset-for-deep-learning/XSS_dataset.csv"
)
# Converting HTML code to plain text
data["Sentence"] = data["Sentence"].replace("<[^<]+?>", "", regex=True)
def train_and_evaluate(split_ratio):
# Splitting the data into training and testing sets
train_data = data[: int(len(data) * split_ratio)]
test_data = data[int(len(data) * split_ratio) :]
# Initializing the count vectorizer
vectorizer = CountVectorizer(stop_words="english")
# Creating the feature matrix for the training data
train_features = vectorizer.fit_transform(train_data["Sentence"])
# Fitting a logistic regression model to the training data
model = LogisticRegression()
model.fit(train_features, train_data["Label"])
# Creating the feature matrix for the testing data and making predictions on the testing data
test_features = vectorizer.transform(test_data["Sentence"])
preds = model.predict(test_features)
preds_proba = model.predict_proba(test_features)[:, 1]
# Evaluating the model's accuracy
accuracy = accuracy_score(test_data["Label"], preds)
print(
f"Accuracy (split ratio {int(split_ratio * 100)}:{int((1 - split_ratio) * 100)}): {accuracy}"
)
# Calculating ROC curve data and AUC score
fpr, tpr, _ = roc_curve(test_data["Label"], preds_proba)
auc = roc_auc_score(test_data["Label"], preds_proba)
return fpr, tpr, auc
split_ratios = [0.6, 0.7, 0.8]
# Train and evaluate for each split ratio
roc_data = [train_and_evaluate(split_ratio) for split_ratio in split_ratios]
# Plot separate ROC curves
fig, axes = plt.subplots(1, 3, figsize=(18, 4))
for i, (fpr, tpr, auc) in enumerate(roc_data):
axes[i].plot(fpr, tpr, label=f"AUC: {auc:.2f}")
axes[i].plot([0, 1], [0, 1], linestyle="--", label="Random Classifier")
axes[i].set_xlabel("False Positive Rate")
axes[i].set_ylabel("True Positive Rate")
axes[i].set_title(
f"ROC Curve (split ratio {int(split_ratios[i] * 100)}:{int((1 - split_ratios[i]) * 100)})"
)
axes[i].legend()
plt.show()
|
# # Imports
from fastai.imports import *
from fastai.tabular.all import *
from pandas_summary import DataFrameSummary
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from IPython.display import display
from sklearn import metrics
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
import seaborn as sns
import math
# # Load the dataset and report basic info
dataset_path = "/kaggle/input/vehicle-dataset-from-cardekho/car data.csv"
df = pd.read_csv(dataset_path)
df.Car_Name.describe()
# sorted(df.Car_Name.unique())
df.head()
# # Feature Engineering
# Refer to https://docs.fast.ai/tutorial.tabular.html
splits = RandomSplitter(valid_pct=0.2)(range_of(df))
to = TabularPandas(
df,
procs=[Categorify],
cat_names=["Car_Name", "Fuel_Type", "Seller_Type", "Transmission"],
cont_names=["Year", "Present_Price", "Kms_Driven", "Owner"],
y_names="Selling_Price",
splits=splits,
)
to.xs.head()
to.ys.head().values.ravel()
# # Train RandomForestRegressor
X_train, y_train = to.train.xs, to.train.ys.values.ravel()
X_valid, y_valid = to.valid.xs, to.valid.ys.values.ravel()
m = RandomForestRegressor()
m.fit(X_train, y_train)
# # Score Data Sets
def mse(actual, expected):
return ((actual - expected) ** 2).mean()
def rmse(actual, expected):
return math.sqrt(mse(actual, expected))
predict_train = m.predict(X_train)
r2_train = m.score(X_train, y_train)
mse_train = mse(predict_train, y_train)
rmse_train = rmse(predict_train, y_train)
predict_valid = m.predict(X_valid)
r2_valid = m.score(X_valid, y_valid)
mse_valid = mse(predict_valid, y_valid)
rmse_valid = rmse(predict_valid, y_valid)
print(f"R^2 Train: {r2_train}")
print(f"R^2 Valid: {r2_valid}")
print(f"\nMSE Train: {mse_train}")
print(f"MSE Valid: {mse_valid}")
print(f"\nRMSE Train: {rmse_train}")
print(f"RMSE Valid: {rmse_valid}")
# # Visualization
plt.figure(figsize=(14, 6))
plt.title("Comparison of Training Data")
sns.lineplot(y_train, label="Actual Selling Price")
sns.lineplot(predict_train, label="Predicted Selling Price")
plt.figure(figsize=(14, 6))
plt.title("Comparison of Validation Data")
sns.lineplot(y_valid, label="Actual Selling Price")
sns.lineplot(predict_valid, label="Predicted Selling Price")
|
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# np.set_printoptions(threshold = np.inf)
# pd.options.display.max_rows = None
from xgboost import XGBRegressor, XGBClassifier
from lightgbm import LGBMRegressor, LGBMClassifier
import lightgbm as lgb
from sklearn.model_selection import (
GridSearchCV,
KFold,
cross_val_score,
train_test_split,
)
from sklearn.metrics import (
mean_squared_error,
f1_score,
classification_report,
accuracy_score,
mean_absolute_error,
log_loss,
)
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
submission = pd.read_csv("/kaggle/input/titanic/gender_submission.csv")
train.shape, test.shape, submission.shape
train.head(3)
test.head(3)
submission.head(3)
train_X = train.drop("Survived", axis=1)
train_y = train["Survived"]
test_X = test
df = pd.concat([train_X, test_X])
df.shape
# df["Cabin"][df["Cabin"].notnull()]
df["len_Name"] = df["Name"].apply(len)
df["len_Ticket"] = df["Ticket"].apply(len)
df["Cabin"] = df["Cabin"].fillna("Z")
df["Cabin"] = df["Cabin"].apply(lambda x: x[0])
df["cabin_alphabet"] = df["Cabin"].apply(
lambda x: "ABCDEFGHIJKLMNOPQRSTUVWXYZ".index(x)
)
df.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1, inplace=True)
df["Sex"] = df["Sex"].map({"female": 0, "male": 1})
df["Embarked"] = df["Embarked"].map({"C": 2, "Q": 0, "S": 1})
df
df.info()
def fill_nan(columns, dtype=float):
"""
predict features with XGBoost and fill nan
dtype : type of predicted value (ex. float, int, ...)
"""
print(f"number of NaN : {df[columns].isnull().sum()}")
train_x = df.drop(columns, axis=1)[df[columns].notnull()].values
train_y = df[columns][df[columns].notnull()].values
test_x = df.drop(columns, axis=1)[df[columns].isnull()].values
print(f"shape : {train_x.shape}, {train_y.shape}, {test_x.shape}")
model = XGBRegressor()
model.fit(train_x, train_y)
pred = model.predict(test_x).astype(dtype)
df[columns][df[columns].isnull()] = pred
print(f"predict : {pred}")
df[columns] = df[columns].astype(dtype)
fill_nan("Embarked", dtype=int)
fill_nan("Fare", dtype=float)
fill_nan("Age", dtype=int)
df["Age"][df["Age"] < 0] = 0 # 1つだけ負の年齢が出力された
df.info()
target = "Survived"
x_train_df = df.iloc[: len(train), :]
y_train_df = train[target]
x_test_df = df.iloc[len(train) :, :]
x_train = x_train_df.values
y_train = y_train_df.values
x_test = x_test_df.values
def validation(model):
score_log, score_acc = [], []
kf = KFold(n_splits=4, shuffle=True, random_state=71)
for tr_idx, va_idx in kf.split(x_train):
tr_x, va_x = x_train_df.iloc[tr_idx], x_train_df.iloc[va_idx]
tr_y, va_y = y_train_df.iloc[tr_idx], y_train_df.iloc[va_idx]
model.fit(tr_x, tr_y)
va_pred = model.predict_proba(va_x)[:, 1]
score_log += [log_loss(va_y, va_pred)]
score_acc += [accuracy_score(va_y, va_pred > 0.5)]
return round(np.mean(score_log), 4), round(np.mean(score_acc), 4)
logloss, accuracy = validation(LGBMClassifier(n_estimators=20, random_state=71))
logloss, accuracy
import optuna.integration.lightgbm as lgbopt
def params_tuning(NUM_BOOST_ROUND=30, TEST_SIZE=0.3, x_train=x_train):
opt_params = {
"objective": "regression",
"metric": "rmse",
"boosting_type": "gbdt",
"feature_pre_filter": False,
"verbosity": -1,
}
X_tra, X_tes, y_tra, y_tes = train_test_split(x_train, y_train, test_size=TEST_SIZE)
reg_train = lgb.Dataset(X_tra, y_tra)
reg_eval = lgb.Dataset(X_tes, y_tes, reference=reg_train)
opt = lgbopt.train(
opt_params,
reg_train,
valid_sets=reg_eval, # valid_sets=[reg_eval]
verbose_eval=False, # verbose_eval=100# logを見るかどうか?
num_boost_round=NUM_BOOST_ROUND,
)
print(opt.params)
return opt.params
params_optuna = params_tuning(NUM_BOOST_ROUND=30, TEST_SIZE=0.3, x_train=x_train)
params_optuna
model = LGBMClassifier()
model.fit(x_train, y_train)
pred = model.predict(x_test)
pred
submission["Survived"] = pred
submission
submission.to_csv("submission.csv", index=False)
|
# Introduction
# Developing a GAN for gener- ating images requires both a discriminator convolutional neural network model for classifying whether a given image is real or generated and a generator model that uses inverse convolutional layers to transform an input to a full two-dimensional image of pixel values.
# 
# Tutorial Link of "Detailed working on DCGAN network from scratch"
# https://www.kaggle.com/ashrafkhan94/tutorial-deep-convolutional-gans-on-mnist?scriptVersionId=55785402
# 1. Importing Libraries
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.datasets import cifar10
from keras.models import Sequential
from keras.optimizers import Adam
from keras.layers import Dense
from keras.layers import Conv2D, Conv2DTranspose
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import LeakyReLU
from keras.layers import Reshape
from keras.utils.vis_utils import plot_model
# 2. Loading & Exploring CIFAR-10 Dataset
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print("Train", X_train.shape, y_train.shape)
print("Test", X_test.shape, y_test.shape)
plt.figure(figsize=(15, 7))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.axis("off")
plt.imshow(X_train[i])
plt.show()
# 3. Define the Discriminator Model
def define_discriminator(in_shape=(32, 32, 3)):
model = Sequential()
model.add(Conv2D(64, (3, 3), strides=(2, 2), padding="same", input_shape=in_shape))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), strides=(2, 2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), strides=(2, 2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), strides=(2, 2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(1, activation="sigmoid"))
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
return model
model = define_discriminator()
plot_model(
model, to_file="discriminator_plot.png", show_shapes=True, show_layer_names=True
)
# 4. Generate Samples
def load_real_samples():
(trainX, _), (_, _) = cifar10.load_data()
X = trainX.astype("float32")
X = (X - 127.5) / 127.5
return X
def generate_real_samples(dataset, n_samples):
ix = np.random.randint(0, dataset.shape[0], n_samples)
X = dataset[ix]
y = np.ones((n_samples, 1))
return X, y
def generate_fake_samples(n_samples):
X = np.random.rand(32 * 32 * 3 * n_samples)
# update to have the range [-1, 1]
X = -1 + X * 2
X = X.reshape((n_samples, 32, 32, 3))
y = np.zeros((n_samples, 1))
return X, y
# 5. Train the Discriminator
# First we will train the discriminator model to identify fake and real images by passing batches of fake & real images separately. Y=1 for real images, Y=0 for fake images
#
def train_discriminator(model, dataset, n_epochs=40, n_batch=128):
half_batch = int(n_batch / 2)
for i in range(n_epochs):
X_real, y_real = generate_real_samples(dataset, half_batch)
model.train_on_batch(X_real, y_real)
X_fake, y_fake = generate_fake_samples(half_batch)
model.train_on_batch(X_fake, y_fake)
_, acc_real = model.evaluate(X_real, y_real, verbose=0)
_, acc_fake = model.evaluate(X_fake, y_fake, verbose=0)
print(">%d real=%.0f%% fake=%.0f%%" % (i + 1, acc_real * 100, acc_fake * 100))
model = define_discriminator()
dataset = load_real_samples()
train_discriminator(model, dataset)
# 6. Define and Use the Generator Model
#
def define_generator(latent_dim):
model = Sequential()
n_nodes = 256 * 4 * 4
model.add(Dense(n_nodes, input_dim=latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(Reshape((4, 4, 256)))
# upsample to 8x8
model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
# upsample to 16x16
model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
# upsample to 32x32
model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(3, (4, 4), activation="tanh", padding="same"))
return model
latent_dim = 100
model = define_generator(latent_dim)
plot_model(model, to_file="generator_plot.png", show_shapes=True, show_layer_names=True)
# 7. Generate points in Latent space.
# We have to generate new random points in the latent space. We can achieve this by calling the randn() NumPy function for generating arrays of random numbers drawn from a standard Gaussian.
#
def generate_latent_points(latent_dim, n_samples):
x_input = np.random.randn(latent_dim * n_samples)
# reshape into a batch of inputs for the network
x_input = x_input.reshape(n_samples, latent_dim)
return x_input
def generate_fake_samples(generator, latent_dim, n_samples):
x_input = generate_latent_points(latent_dim, n_samples)
X = generator.predict(x_input)
y = np.zeros((n_samples, 1))
return X, y
latent_dim = 100
# define the discriminator model
model = define_generator(latent_dim)
# generate samples
n_samples = 25
X, _ = generate_fake_samples(model, latent_dim, n_samples)
X = (X + 1) / 2.0
plt.figure(figsize=(10, 5))
for i in range(n_samples):
plt.subplot(5, 5, i + 1)
plt.axis("off")
plt.imshow(X[i])
plt.show()
#
# Running the example generates 49 examples of fake CIFAR-10 images and visualizes them on a single plot of 5 by 5 images. As the model is not trained, the generated images are completely random pixel values in [-1, 1], rescaled to [0, 1]. As we might expect, the images look like a mess of gray.
#
# 8. Train Generator Model.
# define the combined generator and discriminator model, for updating the generator
def define_gan(generator, discriminator):
discriminator.trainable = False
model = Sequential()
model.add(generator)
model.add(discriminator)
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss="binary_crossentropy", optimizer=opt)
return model
latent_dim = 100
discriminator = define_discriminator()
generator = define_generator(latent_dim)
gan_model = define_gan(generator, discriminator)
# plot gan model
plot_model(gan_model, to_file="gan_plot.png", show_shapes=True, show_layer_names=True)
def save_plot(examples, epoch, n=10):
# plot images
plt.figure(figsize=(10, 5))
for i in range(n * n):
# define subplot
plt.subplot(n, n, 1 + i)
# turn off axis
plt.axis("off")
# plot raw pixel data
plt.imshow(examples[i])
# save plot to file
filename = "generated_plot_e%03d.png" % (epoch + 1)
plt.savefig(filename)
plt.close()
def summarize_performance(
epoch, generator, discriminator, dataset, latent_dim, n_samples=100
):
x_real, y_real = generate_real_samples(dataset, n_samples)
_, acc_real = discriminator.evaluate(x_real, y_real, verbose=0)
x_fake, y_fake = generate_fake_samples(generator, latent_dim, n_samples)
_, acc_fake = discriminator.evaluate(x_fake, y_fake, verbose=0)
print(">Accuracy real: %.0f%%, fake: %.0f%%" % (acc_real * 100, acc_fake * 100))
save_plot(x_fake, epoch)
# save plot to file
filename = "generator_model_%03d.h5" % (epoch + 1)
generator.save(filename)
def train(g_model, d_model, gan_model, dataset, latent_dim, n_epochs=150, n_batch=128):
bat_per_epo = int(dataset.shape[0] / n_batch)
half_batch = int(n_batch / 2)
for i in range(n_epochs):
g_losses, d_losses = list(), list()
for j in range(bat_per_epo):
X_real, y_real = generate_real_samples(dataset, half_batch)
X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)
X, y = np.vstack((X_real, X_fake)), np.vstack((y_real, y_fake))
# Train & Update Discriminator weights
d_loss, _ = d_model.train_on_batch(X, y)
# prepare points in latent space as input for the generator
X_gan = generate_latent_points(latent_dim, n_batch)
# create inverted labels for the fake samples
y_gan = np.ones((n_batch, 1))
# update the generator via the discriminator's error
g_loss = gan_model.train_on_batch(X_gan, y_gan)
g_losses.append(g_loss)
d_losses.append(d_loss)
print(">%d, d=%.3f, g=%.3f" % (i + 1, np.mean(d_losses), np.mean(g_losses)))
# evaluate the model every n_eval epochs
if (i + 1) % 10 == 0:
summarize_performance(i, g_model, d_model, dataset, latent_dim)
latent_dim = 100
discriminator = define_discriminator()
generator = define_generator(latent_dim)
gan_model = define_gan(generator, discriminator)
dataset = load_real_samples()
train(generator, discriminator, gan_model, dataset, latent_dim)
# 9. Model Performance
# 1. After 10 Epochs
PATH = "/kaggle/working/"
img = plt.imread(PATH + "./generated_plot_e010.png")
_ = plt.figure(figsize=(17, 7))
_ = plt.axis("off")
_ = plt.imshow(img)
# 2. After 50 EPOCHS
img = plt.imread(PATH + "./generated_plot_e050.png")
_ = plt.figure(figsize=(17, 7))
_ = plt.axis("off")
_ = plt.imshow(img)
# 3. After 100 Epochs
img = plt.imread(PATH + "./generated_plot_e100.png")
_ = plt.figure(figsize=(17, 7))
_ = plt.axis("off")
_ = plt.imshow(img)
# 4. After 150 Epochs
img = plt.imread(PATH + "./generated_plot_e150.png")
_ = plt.figure(figsize=(17, 7))
_ = plt.axis("off")
_ = plt.imshow(img)
# 10. Use Final Generator Model
from keras.models import load_model
def generate_latent_points(latent_dim, n_samples):
x_input = np.random.randn(latent_dim * n_samples)
x_input = x_input.reshape(n_samples, latent_dim)
return x_input
def show_plot(examples, n):
# plot images
plt.figure(figsize=(20, 10))
for i in range(n * n):
# define subplot
plt.subplot(n, n, 1 + i)
# turn off axis
plt.axis("off")
# plot raw pixel data
plt.imshow(examples[i])
plt.show()
model = load_model("generator_model_150.h5")
latent_points = generate_latent_points(100, 100)
X = model.predict(latent_points)
# scale from [-1,1] to [0,1]
X = (X + 1) / 2.0
# plot the result
show_plot(X, 10)
|
# # Install and Import Modules
import pickle
import PIL
import math
import albumentations
import tensorflow as tf
import tensorflow_datasets as tfds
import codelab_utils.mpl_styles as mpl_styles
import numpy as np
from tensorflow.keras import layers, optimizers, metrics, losses, utils
from tensorflow.keras import applications as apps, callbacks as cbs
from tensorflow.keras.utils import plot_model
from matplotlib import pyplot as plt
from matplotlib import patches as patches
from itertools import accumulate
from functools import reduce
mpl_styles.set_default_styles()
plt.rcParams["font.family"] = "Poppins"
# Load wider_face dataset
train_ds, val_ds, test_ds = tfds.load(
"wider_face",
# split=['train[:80%]', 'train[80%:]', 'test'],
split=["train[:5%]", "validation[:5%]", "test[:5%]"],
data_dir="/kaggle/input/wider-face-tensorflow",
read_config=tfds.ReadConfig(try_autocache=False),
)
# train_ds, = tfds.load('wider_face', split=['train[:5]'], data_dir='/kaggle/input/wider-face-tensorflow')
IMG_SIZE = 256
def resize_fn(image_shape, bboxes):
# Resize ratios
H, W = image_shape
ratio_H, ratio_W = IMG_SIZE / H, IMG_SIZE / W
bboxes = bboxes * [ratio_H, ratio_W, ratio_H, ratio_W]
return bboxes.astype("float32")
def preprocess(item):
image, bboxes = item["image"], item["faces"]["bbox"]
# Resize image to IMG_SIZE
image = tf.image.resize(image, [IMG_SIZE, IMG_SIZE])
image = tf.cast(image, tf.float32) / 255.0
# Resize bounding boxes to IMG_SIZE
bboxes = tf.numpy_function(
func=resize_fn, inp=[image.shape[:-1], bboxes], Tout=tf.float32
)
bboxes.set_shape([None, 4])
# return image, resolve_bbox_as_grid(bboxes)
# return image, tf.RaggedTensor.from_tensor(tf.expand_dims(bboxes, axis=0))
return image, tf.RaggedTensor.from_tensor(bboxes)
# train_prep_ds = train_ds.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)
train_prep_ds = train_ds.map(preprocess, num_parallel_calls=None)
val_prep_ds = val_ds.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)
test_prep_ds = test_ds.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)
bitr = iter(train_prep_ds.batch(2))
images, bboxes = next(bitr)
images.shape, bboxes.shape
itr = iter(train_prep_ds)
# next(itr)
def bbox_to_rect(bbox, image_shape):
image_h, image_w = image_shape
y_min, x_min, y_max, x_max = bbox * [image_h, image_w, image_h, image_w]
width, height = x_max - x_min, y_max - y_min
return [x_min, y_min, width, height]
def apply_bbox(axes, bbox, image_shape):
x_min, y_min, width, height = bbox_to_rect(bbox, image_shape)
rect_patch = patches.Rectangle([x_min, y_min], width, height, ec="red", fc="none")
axes.add_patch(rect_patch)
def show_input(axes, image, bboxes):
axes.imshow(image)
for bbox in bboxes:
apply_bbox(axes, bbox, image.shape[:-1])
def plot_grid(items, cols=5):
rows = (len(items) + cols - 1) // cols
fig, axes = plt.subplots(rows, cols, figsize=(10, 10))
def axis_off(ax):
ax.set_axis_off()
return ax
raveled_axes = list(map(axis_off, axes.ravel()))
for index, item in enumerate(items):
axes = raveled_axes[index]
show_input(axes, item[0], item[1])
itr = iter(train_prep_ds)
items = [next(itr) for _ in range(20)]
plot_grid(items)
# # UNet Model
# ## Blocks
INITIAL_WIDTH = 64
DROPOUT_RATE = 0.6
DEBUG = False
# ### Attention
def attention_block(x, g, dims):
"""
x: Feature map from skip connection.
g: Feature map from the last layer. It has smaller spatial dimensions
"""
# Transform x with strided convolution to match gating dimensions
stride_x = x.shape[1] // g.shape[1]
# print('Initial Shapes: ', x.shape, g.shape, stride_x)
x_transform = layers.Conv2D(dims, 3, (stride_x, stride_x), padding="same")(x)
# Transform g with 1x1 convolution
g_transform = layers.Conv2D(dims, 1, padding="same")(g)
# print('x_transform: ', x_transform.shape, 'g_transform: ', g_transform.shape)
# Combine transformed x and g and transform them to a single channel output
# which will be used to scale the input x. A sigmoid function is applied
# to the comination to ensure scaling factors in range [0,1)
x_g_combined = layers.Activation("relu")(x_transform + g_transform)
x_g_collapsed = layers.Conv2D(1, 1, padding="same", activation="relu")(x_g_combined)
# Match the computed weights to the input x.
attention_weights = layers.UpSampling2D((stride_x, stride_x))(x_g_collapsed)
attention_weights = tf.repeat(attention_weights, x.shape[-1], axis=-1)
# print('attention_weights: ', attention_weights.shape)
# Scale input x with attention
scaled_x = attention_weights * x
# Apply another convolution to compute the output
output = layers.Conv2D(x.shape[-1], 1, padding="same")(scaled_x)
output = layers.BatchNormalization()(output)
# print('attention_block: ', output.shape)
return output
# x = tf.random.normal((1, 32, 32, 64))
# g = tf.random.normal((1, 16, 16, 128))
# attention_block(x, g, 32)
# ### Encoder and Decoder
def encoder_block(input, width_multiplier, name="block"):
conv_1 = layers.Conv2D(
INITIAL_WIDTH * width_multiplier, (3, 3), activation="relu", padding="same"
)(input)
conv_2 = layers.Conv2D(
INITIAL_WIDTH * width_multiplier, (3, 3), activation="relu", padding="same"
)(conv_1)
normalization = layers.BatchNormalization()(conv_2)
pool = layers.MaxPooling2D((2, 2))(normalization)
dropout = layers.Dropout(DROPOUT_RATE)(pool)
if DEBUG:
print(name, input.shape, conv_1.shape, conv_2.shape, pool.shape)
return dropout
def central_crop(x, target_size):
current_size = x.shape[1]
extra_size = current_size - target_size
start = extra_size // 2
end = start + target_size
return x[:, start:end, start:end, :]
def decoder_block(input, skip_input, width_multiplier, name="block"):
# Apply attention to the skip input
attended_skip_input = attention_block(
skip_input, input, INITIAL_WIDTH * width_multiplier
)
conv_transpose = layers.Conv2DTranspose(
INITIAL_WIDTH * width_multiplier, (3, 3), strides=(2, 2), padding="same"
)(input)
cropped_skip_input = central_crop(attended_skip_input, conv_transpose.shape[1])
conv_input = layers.Concatenate()([conv_transpose, cropped_skip_input])
conv_1 = layers.Conv2D(
INITIAL_WIDTH * width_multiplier, (3, 3), activation="relu", padding="same"
)(conv_input)
conv_2 = layers.Conv2D(
INITIAL_WIDTH * width_multiplier, (3, 3), activation="relu", padding="same"
)(conv_1)
normalization = layers.BatchNormalization()(conv_2)
dropout = layers.Dropout(DROPOUT_RATE)(normalization)
# dropout = layers.Dropout(DROPOUT_RATE)(conv_2)
if DEBUG:
print(name, conv_input.shape, conv_1.shape, conv_2.shape)
return dropout
# ### Loss Function
SMOOTH_L1_LOSS = tf.keras.losses.Huber()
def resolve_bbox_as_grid(bboxes):
"""
bboxes: A list of boxes with shape (N_BOXES, 4)
The last dimension contains box coordinates arranged as [y_min, x_min, y_max, x_max]
Returns:
A tensor of shape (IMG_SIZE, IMG_SIZE, 2)
The last dimension contains the box dimensions arranged as (H, W)
"""
# Extract box properties.
[y_min, x_min, y_max, x_max] = tf.split(bboxes, bboxes.shape[-1], axis=-1)
# Compute box dimensions which includes their heights and widths
h, w = y_max - y_min, x_max - x_min
hw = tf.concat([h, w], axis=-1).to_tensor()
# Compute (x_min, y_min) for the boxes which
# represents their top-left corner.
x_indices = tf.cast(x_min * IMG_SIZE, tf.int64)
y_indices = tf.cast(y_min * IMG_SIZE, tf.int64)
indices = tf.concat([x_indices, y_indices], axis=-1).to_tensor()
# Compute true pixel boxes per input pixel.
hw_grid = tf.zeros((IMG_SIZE, IMG_SIZE, 2))
if tf.shape(bboxes)[0] != 0:
hw_grid = tf.tensor_scatter_nd_update(hw_grid, indices, hw)
return hw_grid
def compute_h_loss():
def fn(y_true, y_pred):
# Compute a (IMG_SIZE, IMG_SIZE, 2) grid for each input
# The last dimension contains [h, w] of the box associated with the grid element.
true_hw = tf.map_fn(
resolve_bbox_as_grid,
y_true,
parallel_iterations=True,
fn_output_signature=tf.float32,
)
true_h, _ = tf.split(true_hw, true_hw.shape[-1], axis=-1)
# Compute a mask for valid boxes using a non-zero height criteria.
# We could do the same with a non-zero width as well.
valid_bboxes = tf.cast(
tf.math.greater(true_h, tf.zeros_like(true_h)), tf.float32
)
# Extract height, width and confidence of the box per grid element
pred_h, _, _ = tf.split(y_pred, y_pred.shape[-1], axis=-1)
# Compute the loss of Width.
loss = SMOOTH_L1_LOSS(true_h, pred_h * valid_bboxes)
return loss
fn.__name__ = "h_loss"
return fn
def compute_w_loss():
def fn(y_true, y_pred):
# Compute a (IMG_SIZE, IMG_SIZE, 2) grid for each input
# The last dimension contains [h, w] of the box associated with the grid element.
true_hw = tf.map_fn(
resolve_bbox_as_grid,
y_true,
parallel_iterations=True,
fn_output_signature=tf.float32,
)
true_h, true_w = tf.split(true_hw, true_hw.shape[-1], axis=-1)
# Compute a mask for valid boxes using a non-zero height criteria.
# We could do the same with a non-zero width as well.
valid_bboxes = tf.cast(
tf.math.greater(true_h, tf.zeros_like(true_h)), tf.float32
)
# Extract height, width and confidence of the box per grid element
_, pred_w, _ = tf.split(y_pred, y_pred.shape[-1], axis=-1)
# Compute the loss of Width.
loss = SMOOTH_L1_LOSS(true_w, pred_w * valid_bboxes)
return loss
fn.__name__ = "w_loss"
return fn
def compute_loss():
h_loss = compute_h_loss()
w_loss = compute_w_loss()
def fn(y_true, y_pred):
return h_loss(y_true, y_pred) + w_loss(y_true, y_pred)
# return h_loss(y_true, y_pred)
fn.__name__ = "loss"
return fn
# ### Metrics
class HLoss(tf.keras.metrics.Metric):
def __init__(self, name="h_loss", **kwargs):
super(HLoss, self).__init__(name=name, **kwargs)
self.loss_fn = compute_h_loss()
self.h_losses = self.add_weight(name="h_losses", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
loss = self.loss_fn(y_true, y_pred)
self.h_losses.assign_add(tf.reduce_sum(loss))
def result(self):
return self.h_losses
class WLoss(tf.keras.metrics.Metric):
def __init__(self, name="w_loss", **kwargs):
super(WLoss, self).__init__(name=name, **kwargs)
self.loss_fn = compute_w_loss()
self.w_losses = self.add_weight(name="w_losses", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
loss = self.loss_fn(y_true, y_pred)
self.w_losses.assign_add(tf.reduce_sum(loss))
def result(self):
return self.w_losses
# ### Model
N_ENCODERS = 2
LEARNING_RATE = 0.0003
STEPS_PER_EPOCH = 184
SMOOTH_L1_LOSS = tf.keras.losses.Huber()
def resize_block(input):
conv_transpose = layers.Conv2DTranspose(2 + 1, 3, strides=(2, 2), padding="same")(
input
)
normalization = layers.Normalization()(conv_transpose)
return normalization
class IoUMetric(metrics.BinaryIoU):
def __init__(self, name="iou", from_logits=False, **kwargs):
super().__init__(name=name, **kwargs)
self.transform = lambda x: tf.math.softmax(x) if from_logits else x
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.split(self.transform(y_pred), y_pred.shape[-1], axis=-1)[1]
super().update_state(y_true, y_pred)
def create_model():
inp = tf.keras.Input((IMG_SIZE, IMG_SIZE, 3))
encoder_blocks = list(
accumulate(
[inp] + list(range(1, N_ENCODERS + 1)),
lambda x, idx: encoder_block(x, idx, name="ENC_BLOCK_{}".format(idx)),
)
)
# Remove the initial input from the list of encoder blocks
encoder_blocks.pop(0)
# Create the mid block. It is kept separate from the encoder blocks
# because it doesn't have a corresponding decoder block.
mid_block = encoder_block(encoder_blocks[-1], N_ENCODERS + 1, name="MID_BLOCK")
decoder_blocks = list(
accumulate(
[mid_block] + list(enumerate(reversed(encoder_blocks))),
lambda x, item: decoder_block(
x, item[1], N_ENCODERS - item[0], "DEC_BLOCK_{}".format(item[0] + 1)
),
)
)
# Remove the mid block from the list of decoder blocks
decoder_blocks.pop(0)
output = resize_block(decoder_blocks[-1])
m = tf.keras.Model(inputs=inp, outputs=output)
lr_schedule = optimizers.schedules.InverseTimeDecay(
LEARNING_RATE,
decay_steps=STEPS_PER_EPOCH * 10,
decay_rate=0.75,
staircase=False,
)
optimizer = optimizers.Adam(lr_schedule)
loss = compute_loss()
# metrics = [compute_h_loss(), compute_w_loss()]
metrics = [HLoss(), WLoss()]
# metrics = []
m.compile(loss=loss, optimizer=optimizer, metrics=metrics)
return m
model = create_model()
model.summary()
utils.plot_model(model, show_shapes=True)
# # Training
def train(model, tds, vds, epochs=100, callbacks=[]):
tds = tds.prefetch(buffer_size=tf.data.AUTOTUNE)
vds = vds.prefetch(buffer_size=tf.data.AUTOTUNE) if vds else None
# tds = tds.prefetch(buffer_size=tf.data.AUTOTUNE).take(1)
# vds = vds.prefetch(buffer_size=tf.data.AUTOTUNE).take(1) if vds else None
history = model.fit(tds, validation_data=vds, epochs=epochs, callbacks=callbacks)
return history
checkpoint_name_tmpl = "weights-epoch-{epoch:d}-val_loss-{val_loss:.4f}.h5"
# checkpoint_name_tmpl = 'weights-epoch-{epoch:d}-val_accuracy-{val_accuracy:.4f}.h5'
checkpoint = cbs.ModelCheckpoint(
checkpoint_name_tmpl, save_best_only=True, monitor="val_loss"
)
EPOCHS = 10
# EPOCHS = 2
# EPOCHS = 50
BATCH_SIZE = 16
callbacks = [checkpoint]
tds = (
train_prep_ds.cache().shuffle(1000, reshuffle_each_iteration=True).batch(BATCH_SIZE)
)
vds = val_prep_ds.batch(256).cache()
hist = train(model, tds, vds, epochs=EPOCHS, callbacks=callbacks)
with open("training.results.pkl", "wb") as f:
pickle.dump(hist.history, f)
# # Training Metrics
def plot_history(h):
fig, axes = plt.subplots(1, 1, figsize=(10, 5), facecolor="w", edgecolor="k")
axes.plot(h["loss"], label="Loss")
axes.plot(h["h_loss"], label="Height Loss")
axes.plot(h["w_loss"], label="Width Loss")
axes.plot(h["val_loss"], label="Validation Loss")
axes.plot(h["val_h_loss"], label="Height Validation Loss")
axes.plot(h["val_w_loss"], label="Width Validation Loss")
axes.set_xlabel("Epoch")
axes.set_ylabel("Loss")
axes.set_title("Losses")
fig.legend()
plot_history(hist.history)
hist.history
# # Evaluation
# model.evaluate(test_prep_ds.batch(256))
# batch = iter(test_prep_ds.shuffle(buffer_size=5).batch(10)).next()
# # masks = tf.expand_dims(tf.math.argmax(tf.math.softmax(model(batch[0]), axis=-1), axis=-1), axis=-1)
# # masks = to_binary(tf.math.sigmoid(model(batch[0])), 1)
# masks = tf.split(tf.math.softmax(model(batch[0])), 2, axis=-1)[1]
# for image, mask in zip(batch[0], masks):
# sample = {'Input Image': image, 'Input Mask': mask}
# plot_grid(sample, cols=2)
# masks.shape
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import networkx as nx # for graphical functioning
import matplotlib.pyplot as plt
from difflib import SequenceMatcher
import time
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# https://medium.com/code-science/sudoku-solver-graph-coloring-8f1b4df47072
sudokuList = pd.read_csv("../input/sudoku/sudoku.csv")
print(sudokuList)
# As can see, sudoku.csv have 2 columns, puzzle and solution
sudoku = sudokuList.loc[0]
puzzle = sudoku["puzzle"]
solution = sudoku["solution"]
puzzle = list(puzzle)
print(puzzle)
puzzle = greedyApproach(puzzle)
print(puzzle)
puzzle = "".join(puzzle)
print(puzzle)
# puzzle = np.array(puzzle).reshape(9,9)
# print(type(puzzle))
# print((puzzle))
greedyScore = 0
greedyTime = 0
greedyCompleteMatch = 0
welshPowellScore = 0
welshPowellTime = 0
welshPowellCompleteMatch = 0
itrCount = 200000
for i in range(itrCount):
if i % (itrCount / 100) == 0:
print(i * 100 / itrCount, "%,", end=" ")
sudoku = sudokuList.loc[i]
puzzle = sudoku["puzzle"]
solution = sudoku["solution"]
puzzle = list(puzzle)
# solve puzzle using greedy
startTime = time.time()
puzzleSol = greedyApproach(puzzle)
greedyTime = greedyTime + (time.time() - startTime)
puzzleSol = "".join(puzzle)
score = SequenceMatcher(None, puzzleSol, solution).ratio()
greedyScore = greedyScore + score
if score == 1:
greedyCompleteMatch = greedyCompleteMatch + 1
# solve puzzle using welsh powell
startTime = time.time()
puzzleSol = welshPowell(puzzle)
welshPowellTime = welshPowellTime + (time.time() - startTime)
puzzleSol = "".join(puzzle)
score = SequenceMatcher(None, puzzleSol, solution).ratio()
welshPowellScore = welshPowellScore + score
if score == 1:
welshPowellCompleteMatch = welshPowellCompleteMatch + 1
greedyScore = greedyScore / itrCount
welshPowellScore = welshPowellScore / itrCount
print("100.0%")
print("Similarity Score for greedy algo: ", greedyScore)
print("Time taken by greedy: ", greedyTime)
print("Complete Match in greedy: ", greedyCompleteMatch)
print("Similarity Score for welshPowell: ", welshPowellScore)
print("Time taken by welshPowell: ", welshPowellTime)
print("Complete Match in welshPowell: ", welshPowellCompleteMatch)
def greedyApproach(puzzle):
# isVisited if puzzle[i][j] != '0'
colors = {"1", "2", "3", "4", "5", "6", "7", "8", "9"}
for i in range(0, 81):
if puzzle[i] != "0":
continue
row = int(i / 9)
col = i % 9
for c in colors:
if isSafe(row, col, c, puzzle):
puzzle[i] = c
break
return puzzle
def welshPowell(puzzle):
# degree of each box of sudoku is 20(8 row wise, 8 col wise, and remaining 4 in blocks)
# finding degree and sorting it will give same order as 1 to 81
# which will run the algo for numerical order only
colors = {"1", "2", "3", "4", "5", "6", "7", "8", "9"}
for c in colors:
for i in range(0, 81):
if puzzle[i] != "0":
continue
row = int(i / 9)
col = i % 9
if isSafe(row, col, c, puzzle):
puzzle[i] = c
break
return puzzle
def backTracking(puzzle):
colors = {"1", "2", "3", "4", "5", "6", "7", "8", "9"}
for i in range(0, 81):
if puzzle[i] != "0":
continue
for i in range(0, 81):
if puzzle[i] != "0":
continue
row = int(i / 9)
col = i % 9
if isSafe(row, col, c, puzzle):
puzzle[i] = c
break
return puzzle
def isSafe(row, col, color, puzzle):
for i in range(0, 9):
index = row * 9 + i
if puzzle[index] == color:
return False
index = i * 9 + col
if puzzle[index] == color:
return False
index = ((int(row / 3) * 3) + int(i / 3)) * 9 + ((int(col / 3) * 3) + (i % 3))
if puzzle[index] == color:
return False
return True
print("Function ready")
x = "0"
x = chr(ord(x) + 1)
print(x)
nodes = np.arange(1, 82)
edges = []
# column wise connection
k = 0
while k < 81:
for i in range(k + 1, k + 10):
for j in range(i + 1, k + 10):
edges.append(i)
edges.append(j)
k = k + 9
# row wise
for k in range(0, 9):
for i in range(k + 1, k + 74, 9):
for j in range(i + 9, k + 74, 9):
edges.append(i)
edges.append(j)
# block wise
for i in range(1, 10):
for j in range(1, 10):
x = (i - 1) * 9 + j
block = (int((i - 1) / 3) * 3) + 1 + int((j - 1) / 3)
for r in range(1, 4):
for c in range(1, 4):
row = r + (int((block - 1) / 3) * 3)
col = c + ((int(block + 2) % 3) * 3)
y = (row - 1) * 9 + col
if i != row and j != col:
edges.append(x)
edges.append(y)
edges = np.array(edges).reshape(-1, 2)
print("Total nodes: ", len(nodes))
print("Total edges: ", len(edges))
graph = []
graph = nx.Graph()
graph.add_nodes_from(nodes) # adds nodes
graph.add_edges_from(edges)
nx.draw(graph, with_labels=True, node_color="red")
plt.show()
sudoku = sudokuList.loc[0]
puzzle = sudoku["puzzle"]
solution = sudoku["solution"]
puzzle = list(puzzle)
print((puzzle))
puzzleGraph = graph
nx.draw(puzzleGraph, with_labels=True)
plt.show()
|
import os
import random
import pandas as pd
import numpy as np
import librosa
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_io as tfio
from keras.models import Sequential
from keras.layers import Dense
from tensorflow.keras.layers import Dropout
model_google = hub.load(
"https://kaggle.com/models/google/bird-vocalization-classifier/frameworks/tensorFlow2/variations/bird-vocalization-classifier/versions/2"
)
# set the number of files to select for each species
N = 200
augmented_path = "/kaggle/input/birdclef2023-augmented/5s_augmented_audio"
# create empty dataframes for training and validation sets
selected_files_df = pd.DataFrame(columns=["file_path", "species"])
counter = 0
# walk through all folders in 5s_audio_frames
for root, dirs, files in os.walk("/kaggle/input/birdclef-5s-frames/5s_audio_frames"):
print(counter)
# print('\r{}'.format(counter), end='')
counter += 1
# check if the current folder is a species folder
if len(files) > 0 and len(dirs) == 0:
# get the species name from the current folder
species = os.path.basename(root)
# check if the number of files for this species is greater than N
if len(files) > N:
augmented = False
# select N files randomly
selected_files = random.sample(files, N)
elif len(files) == N:
augmented = False
# select all files
selected_files = files
else:
augmented = True
species = os.path.basename(root)
files_2 = os.listdir(os.path.join(augmented_path, species))
selected_files = random.sample(files_2, N)
# split the selected files into training and validation sets
# random.shuffle(selected_files)
# train_files = selected_files[:int(0.8*len(selected_files))]
# valid_files = selected_files[int(0.8*len(selected_files)):]
# add the file paths to the dataframes
if augmented == False:
for file in selected_files:
file_path = os.path.join(root, file)
selected_files_df = selected_files_df.append(
{"file_path": file_path, "species": species}, ignore_index=True
)
else:
for file in selected_files:
file_path = os.path.join(augmented_path, species, file)
selected_files_df = selected_files_df.append(
{"file_path": file_path, "species": species}, ignore_index=True
)
len(selected_files_df)
pd.set_option("display.max_rows", 3000)
# new_row = pd.DataFrame(columns=[f"col_{i}" for i in range(1280)], index=[0])
# new_row.iloc[0,:] = np.zeros(1280)
# new_row2 = pd.DataFrame(columns=["pri_label","full_path"], index=[0])
# embed_vec = pd.concat([new_row, new_row2], axis=1)
# row_embed_vect = 0
# #genrate 5s files
# for index in selected_files_df.index:
# print(index)
# # print(train_df.loc[index]["file_path"])
# # print('\r{}'.format(index), end='')
# audio, sample_rate = librosa.load(selected_files_df.loc[index]["file_path"],sr=32000)
# logits, embeddings = model_google.infer_tf(audio[np.newaxis, :])
# numpy_array = np.array(embeddings)
# df_temp1 = pd.DataFrame(numpy_array)
# new_row.iloc[0] = df_temp1.iloc[0]
# new_row2.loc[0]["pri_label"] = selected_files_df.loc[index]["species"]
# new_row2.loc[0]["full_path"] = selected_files_df.loc[index]["file_path"]
# temp_emb = pd.concat([new_row, new_row2], axis=1)
# if (row_embed_vect == 0):
# embed_vec.loc[0] = temp_emb.loc[0]
#
# else:
# embed_vec = embed_vec.append( temp_emb)
# row_embed_vect = row_embed_vect+1
#
import concurrent.futures
new_row = pd.DataFrame(columns=[f"col_{i}" for i in range(1280)], index=[0])
new_row.iloc[0, :] = np.zeros(1280)
new_row2 = pd.DataFrame(columns=["pri_label", "full_path"], index=[0])
embed_vec = pd.concat([new_row, new_row2], axis=1)
def process_file(index):
audio, sample_rate = librosa.load(
selected_files_df.loc[index]["file_path"], sr=32000
)
logits, embeddings = model_google.infer_tf(audio[np.newaxis, :])
numpy_array = np.array(embeddings)
df_temp1 = pd.DataFrame(numpy_array)
new_row.iloc[0] = df_temp1.iloc[0]
new_row2.loc[0]["pri_label"] = selected_files_df.loc[index]["species"]
new_row2.loc[0]["full_path"] = selected_files_df.loc[index]["file_path"]
temp_emb = pd.concat([new_row, new_row2], axis=1)
return temp_emb
batch_size = 1000
batch_num = 1
with concurrent.futures.ThreadPoolExecutor() as executor:
results = [
executor.submit(process_file, index) for index in selected_files_df.index
]
for i, future in enumerate(concurrent.futures.as_completed(results)):
temp_emb = future.result()
embed_vec = embed_vec.append(temp_emb)
if (i + 1) % batch_size == 0:
filename = f"embed_vec_batch_{batch_num}.csv"
embed_vec.to_csv(filename, index=False)
print(f"Saved {batch_size} rows to {filename}")
embed_vec = pd.concat([new_row, new_row2], axis=1)
batch_num += 1
if i % 100 == 0:
print(f"P {i+1}/{len(results)} f")
# print(get_ram_usage())
# print(get_cpu_usage())
# print(get_gpu_usage())
# print_top_5_ram_variables()
print("---------------------")
# print(f"P {i+1}/{len(results)} f")
# print('\r{}',f"Processed {i+1}/{len(results)} files", end='')
# print('\r{}'.format(index), end='')
# embed_vec.to_csv("embedding-features-birdCLEF2023.csv")
from sklearn.model_selection import train_test_split
X = embed_vec.drop(["pri_label", "full_path"], axis=1)
y = embed_vec["pri_label"]
# Split X and y into training and validation sets
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
train_y_np = train_y.values[:, np.newaxis]
val_y_np = val_y.values[:, np.newaxis]
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
label_encoder = LabelEncoder()
one_hot_encoder = OneHotEncoder(sparse=False)
one_hot_encoder.fit(train_y_np.reshape(-1, 1))
train_y = one_hot_encoder.transform(train_y_np)
val_y = one_hot_encoder.transform(val_y_np)
train_X_tf = tf.convert_to_tensor(train_X.values, dtype=tf.float32)
val_X_tf = tf.convert_to_tensor(val_X.values, dtype=tf.float32)
print(train_y.shape)
print(val_y.shape)
train_y.shape[1]
# Define the input shape
input_shape = (train_X_tf.shape[1],)
model = Sequential()
model.add(Dense(2048, activation="relu", input_shape=input_shape))
model.add(Dropout(0.25))
model.add(tf.keras.layers.BatchNormalization())
model.add(Dense(3000, activation="relu"))
model.add(Dropout(0.25))
model.add(tf.keras.layers.BatchNormalization())
model.add(Dense(3000, activation="relu"))
model.add(Dropout(0.25))
model.add(tf.keras.layers.BatchNormalization())
model.add(Dense(2000, activation="relu"))
model.add(Dropout(0.25))
model.add(tf.keras.layers.BatchNormalization())
model.add(Dense(1000, activation="relu"))
model.add(Dropout(0.25))
model.add(tf.keras.layers.BatchNormalization())
model.add(Dense(512, activation="relu"))
model.add(Dropout(0.25))
model.add(tf.keras.layers.BatchNormalization())
model.add(Dense(train_y.shape[1], activation="softmax"))
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
history = model.fit(
train_X_tf, train_y, epochs=50, batch_size=20, validation_data=(val_X_tf, val_y)
)
model.save("pretrained_model.h5")
|
import copy
import torch
import math
import numpy as np
import torch.nn as nn
from typing import Optional, Any
import torch.nn.functional as F
from torch import Tensor
def _get_clones(mod, n):
return nn.ModuleList([copy.deepcopy(mod) for _ in range(n)])
def data_pre1(x):
"取出54个点"
col0 = [489, 490, 491, 494, 496, 497, 500, 501, 502, 503, 504, 505]
col1 = [i + 468 for i in range(21)] # left_hand
col2 = [j + 522 for j in range(21)] # right_hand
x_pose = x[:, col0, :2]
x_left = x[:, col1, :2]
x_right = x[:, col2, :2]
x_left = x_left - x_left[~torch.isnan(x_left)].mean(0, keepdim=True)
x_left = x_left / x_left[~torch.isnan(x_left)].std(0, keepdim=True)
x_right = x_right - x_right[~torch.isnan(x_right)].mean(0, keepdim=True)
x_right = x_right / x_right[~torch.isnan(x_right)].std(0, keepdim=True)
x_pose = x_pose - x_pose[~torch.isnan(x_pose)].mean(0, keepdim=True)
x_pose = x_pose / x_pose[~torch.isnan(x_pose)].std(0, keepdim=True)
x_out = torch.cat([x_pose, x_left, x_right], dim=1)
x_out = torch.where(torch.isnan(x_out), torch.full_like(x_out, 0.0), x_out)
return x_out
def data_pre2(x):
"取出54个点"
col0 = [489, 490, 491, 494, 496, 497, 500, 501, 502, 503, 504, 505]
col1 = [i + 468 for i in range(21)] # left_hand
col2 = [j + 522 for j in range(21)] # right_hand
x_pose = x[:, col0, :2]
x_left = x[:, col1, :2]
x_right = x[:, col2, :2]
x_left = torch.where(torch.isnan(x_left), torch.full_like(x_left, 0.0), x_left)
x_right = torch.where(torch.isnan(x_right), torch.full_like(x_right, 0.0), x_right)
x_left = x_left - x_left.mean(1, keepdim=True)
x_left = x_left / x_left.std(dim=1, keepdim=True)
x_right = x_right - x_right.mean(1, keepdim=True)
x_right = x_right / x_right.std(dim=1, keepdim=True)
x_out = torch.cat([x_pose, x_left, x_right], dim=1)
x_out = torch.where(torch.isnan(x_out), torch.full_like(x_out, 0.0), x_out)
return x_out
def data_pre3(x):
"取出54个点"
col0 = [489, 490, 491, 494, 496, 497, 500, 501, 502, 503, 504, 505]
col1 = [i + 468 for i in range(21)] # left_hand
col2 = [j + 522 for j in range(21)] # right_hand
x_pose = x[:, col0, :2]
x_left = x[:, col1, :2]
x_right = x[:, col2, :2]
x_out = torch.cat([x_pose, x_left, x_right], dim=1)
x_out = torch.where(torch.isnan(x_out), torch.full_like(x_out, 0.0), x_out)
return x_out
class MultiheadAttention(nn.Module):
def __init__(self, d_model, num_heads):
super(MultiheadAttention, self).__init__()
self.d_model = d_model
self.num_heads = num_heads
self.head_dim = d_model // num_heads
self.q_linear = nn.Linear(d_model, d_model, bias=True)
self.k_linear = nn.Linear(d_model, d_model, bias=True)
self.v_linear = nn.Linear(d_model, d_model, bias=True)
self.out_linear = nn.Linear(d_model, d_model, bias=True)
def forward(self, query, key, value, mask=None):
# x: [batch_size, seq_len, embed_dim]
q = (
self.q_linear(query)
.view(query.size(0), -1, self.num_heads, self.head_dim)
.transpose(1, 2)
) # [batch_size, num_heads, seq_len, head_dim]
k = (
self.k_linear(key)
.view(key.size(0), -1, self.num_heads, self.head_dim)
.transpose(1, 2)
) # [batch_size, num_heads, seq_len, head_dim]
v = (
self.v_linear(value)
.view(value.size(0), -1, self.num_heads, self.head_dim)
.transpose(1, 2)
) # [batch_size, num_heads, seq_len, head_dim]
# 计算注意力得分
scores = torch.matmul(q, k.transpose(-2, -1)) / (
self.head_dim**0.5
) # [batch_size, num_heads, seq_len, seq_len]
if mask is not None:
mask = mask.unsqueeze(1).unsqueeze(1)
scores = scores.masked_fill(mask == 0, float("-inf"))
attn = torch.softmax(scores, dim=-1)
# 计算加权和
attn_out = torch.matmul(attn, v) # [batch_size, num_heads, seq_len, head_dim]
attn_out = (
attn_out.transpose(1, 2)
.contiguous()
.view(query.size(0), -1, self.num_heads * self.head_dim)
) # [batch_size, seq_len, embed_dim]
# 输出投影
attn_out = self.out_linear(attn_out) # [batch_size, seq_len, embed_dim]
return attn_out
class TransformerEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward, dropout):
super().__init__()
self.self_attn = MultiheadAttention(d_model, nhead)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
# self-attention
src2 = self.self_attn(src, src, src)
src = src + self.dropout(src2)
src = self.norm1(src)
# feedforward
src2 = self.linear2(self.dropout(F.relu(self.linear1(src))))
src = src + self.dropout(src2)
src = self.norm2(src)
return src
class TransformerEncoder(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward, dropout, num_layers):
super().__init__()
self.layers = nn.ModuleList(
[
TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout)
for i in range(num_layers)
]
)
def forward(self, src, mask=None, src_key_padding_mask=None):
output = src
for layer in self.layers:
output = layer(output)
return output
class SPOTERTransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=1024, dropout=0.1):
super(SPOTERTransformerDecoderLayer, self).__init__()
self.multihead_attn = MultiheadAttention(d_model, nhead)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
def forward(self, tgt, memory):
tgt = tgt + self.dropout1(tgt)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, memory)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(F.relu(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
class TransformerDecoder(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward, dropout, num_layers):
super().__init__()
self.layers = nn.ModuleList(
[
SPOTERTransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout)
for i in range(num_layers)
]
)
def forward(self, tgt, memory):
output = tgt
for layer in self.layers:
output = layer(output, memory)
return output
class SPOTER1(nn.Module):
def __init__(
self,
num_classes,
hidden_dim=108,
nhead=9,
dim_feedforward=1024,
dropout=0.1,
num_layers=1,
):
super().__init__()
self.row_embed = nn.Parameter(torch.rand(50, hidden_dim))
self.pos = nn.Parameter(
torch.cat([self.row_embed[0].unsqueeze(0).repeat(1, 1, 1)], dim=-1)
.flatten(0, 1)
.unsqueeze(0)
)
self.class_query = nn.Parameter(torch.rand(1, hidden_dim))
self.Encoder = TransformerEncoder(
hidden_dim, nhead, dim_feedforward, dropout, num_layers
)
self.Decoder = TransformerDecoder(
hidden_dim, nhead, dim_feedforward, dropout, num_layers
)
self.linear_class = nn.Linear(hidden_dim, num_classes)
def forward(self, inputs):
inputs = data_pre1(inputs)
h = torch.unsqueeze(
inputs.flatten(start_dim=1), 0
).float() # 将inputs的维度使用flatten降低,(54,2)--->(108);再使用unsqueeze进行升维变成(1,108),最后h为(1,T,108)
src = self.pos + h
tgt = self.class_query.unsqueeze(0)
menery = self.Encoder(src)
out = self.Decoder(tgt, menery)
sign = self.linear_class(out)
sign = sign.reshape(-1)
sign = F.softmax(sign, dim=0)
return sign
class SPOTER2(nn.Module):
def __init__(
self,
num_classes,
hidden_dim=512,
nhead=4,
dim_feedforward=2048,
dropout=0.1,
num_layers=1,
):
super().__init__()
self.row_embed = nn.Parameter(torch.rand(50, hidden_dim))
self.pos = nn.Parameter(
torch.cat([self.row_embed[0].unsqueeze(0).repeat(1, 1, 1)], dim=-1)
.flatten(0, 1)
.unsqueeze(0)
)
self.class_query = nn.Parameter(torch.rand(1, hidden_dim))
self.embeding = nn.Linear(108, hidden_dim)
self.Encoder = TransformerEncoder(
hidden_dim, nhead, dim_feedforward, dropout, num_layers
)
self.Decoder = TransformerDecoder(
hidden_dim, nhead, dim_feedforward, dropout, num_layers
)
self.linear_class = nn.Linear(hidden_dim, num_classes)
def forward(self, inputs):
inputs = data_pre2(inputs)
h = torch.unsqueeze(
inputs.flatten(start_dim=1), 0
).float() # 将inputs的维度使用flatten降低,(54,2)--->(108);再使用unsqueeze进行升维变成(1,108),最后h为(1,T,108)
h = self.embeding(h)
src = self.pos + h
tgt = self.class_query.unsqueeze(0)
menery = self.Encoder(src)
out = self.Decoder(tgt, menery)
sign = self.linear_class(out)
sign = sign.reshape(-1)
sign = F.softmax(sign, dim=0)
return sign
class SPOTER3(nn.Module):
def __init__(
self,
num_classes,
hidden_dim=108,
nhead=9,
dim_feedforward=2048,
dropout=0.1,
num_layers=6,
):
super().__init__()
self.row_embed = nn.Parameter(torch.rand(50, hidden_dim))
self.pos = nn.Parameter(
torch.cat([self.row_embed[0].unsqueeze(0).repeat(1, 1, 1)], dim=-1)
.flatten(0, 1)
.unsqueeze(0)
)
self.class_query = nn.Parameter(torch.rand(1, hidden_dim))
self.Encoder = TransformerEncoder(
hidden_dim, nhead, dim_feedforward, dropout, num_layers
)
self.Decoder = TransformerDecoder(
hidden_dim, nhead, dim_feedforward, dropout, num_layers
)
self.linear_class = nn.Linear(hidden_dim, num_classes)
def forward(self, inputs):
inputs = data_pre3(inputs)
h = torch.unsqueeze(
inputs.flatten(start_dim=1), 0
).float() # 将inputs的维度使用flatten降低,(54,2)--->(108);再使用unsqueeze进行升维变成(1,108),最后h为(1,T,108)
src = self.pos + h
tgt = self.class_query.unsqueeze(0)
menery = self.Encoder(src)
out = self.Decoder(tgt, menery)
sign = self.linear_class(out)
sign = sign.reshape(-1)
sign = F.softmax(sign, dim=0)
return sign
def data_pre6(x):
"""
取出54+16*2+4+20=110 个点
return:x_out,x_reye,x_leye,x_nose,x_slip
"""
POSE = [489, 490, 491, 494, 496, 497, 500, 501, 502, 503, 504, 505] # pose 12
left_hand = [i + 468 for i in range(21)] # left_hand 21
right_hand = [j + 522 for j in range(21)] # right_hand 21
REYE = [
33,
7,
163,
144,
145,
153,
154,
155,
133,
246,
161,
160,
159,
158,
157,
173,
] # 16
LEYE = [
263,
249,
390,
373,
374,
380,
381,
382,
362,
466,
388,
387,
386,
385,
384,
398,
] # 16
NOSE = [1, 2, 98, 327] # 4
SLIP = [
78,
95,
88,
178,
87,
14,
317,
402,
318,
324,
308,
191,
80,
81,
82,
13,
312,
311,
310,
415,
] # 20
x_left = x[:, left_hand, :2]
x_right = x[:, right_hand, :2]
left_isnan = torch.isnan(x_left).any(dim=2)
right_isnan = torch.isnan(x_right).any(dim=2)
both_nan_frames = torch.nonzero(left_isnan & right_isnan).squeeze()
both_nan_frames_list = both_nan_frames[:, 0].tolist()
both_nan_frames_list = sorted(list(set(both_nan_frames_list)))
all_frames = list(range(len(x)))
# 计算有效帧索引列表
valid_frames = list(set(all_frames) - set(both_nan_frames_list))
# print(valid_frames,len(valid_frames),len(both_nan_frames_list),len(x))
nan_frames_fre = len(both_nan_frames_list) / len(x)
if nan_frames_fre > 0.0 and nan_frames_fre < 0.8:
x = x[valid_frames]
x_left = x_left[valid_frames]
x_right = x_right[valid_frames]
x_pose = x[:, POSE, :2]
x_reye = x[:, REYE, :2]
x_leye = x[:, LEYE, :2]
x_nose = x[:, NOSE, :2]
x_slip = x[:, SLIP, :2]
x_left = x_left - x_left[~torch.isnan(x_left)].mean(0, keepdim=True)
x_left = x_left / x_left[~torch.isnan(x_left)].std(0, keepdim=True)
x_right = x_right - x_right[~torch.isnan(x_right)].mean(0, keepdim=True)
x_right = x_right / x_right[~torch.isnan(x_right)].std(0, keepdim=True)
x_out = torch.cat([x_pose, x_left, x_right], dim=1)
x_out = torch.where(torch.isnan(x_out), torch.full_like(x_out, 0.0), x_out)
x_reye = torch.where(torch.isnan(x_reye), torch.full_like(x_reye, 0.0), x_reye)
x_leye = torch.where(torch.isnan(x_leye), torch.full_like(x_leye, 0.0), x_leye)
x_nose = torch.where(torch.isnan(x_nose), torch.full_like(x_nose, 0.0), x_nose)
x_slip = torch.where(torch.isnan(x_slip), torch.full_like(x_slip, 0.0), x_slip)
data_out = torch.cat([x_out, x_reye, x_leye, x_nose, x_slip], dim=1) # (None,110,2)
data_out = torch.where(
torch.isnan(data_out), torch.full_like(data_out, 0.0), data_out
)
return data_out
class XEmbed(nn.Module):
def __init__(self, point_dim, embed_dim):
super().__init__()
self.v = nn.Sequential(
nn.Linear(point_dim, embed_dim * 2, bias=True),
nn.LayerNorm(embed_dim * 2),
nn.ReLU(inplace=True),
nn.Linear(embed_dim * 2, embed_dim, bias=True),
nn.LayerNorm(embed_dim),
nn.ReLU(inplace=True),
)
def forward(self, x):
v = self.v(x)
return v
class SPOTER6(nn.Module):
"""
Implementation of the SPOTER (Sign POse-based TransformER) architecture for sign language recognition from sequence
of skeletal data.
"""
def __init__(
self,
num_classes,
hidden_dim=220,
point_num=110,
nhead=4,
dim_feedforward=1024,
dropout=0.1,
num_layers=2,
):
super().__init__()
self.x_embed = XEmbed(point_num * 2, hidden_dim)
self.row_embed = nn.Parameter(torch.rand(50, hidden_dim))
self.pos = nn.Parameter(
torch.cat([self.row_embed[0].unsqueeze(0).repeat(1, 1, 1)], dim=-1)
.flatten(0, 1)
.unsqueeze(0)
)
self.class_query = nn.Parameter(torch.rand(1, hidden_dim))
self.Encoder = TransformerEncoder(
hidden_dim, nhead, dim_feedforward, dropout, num_layers
)
self.Decoder = TransformerDecoder(
hidden_dim, nhead, dim_feedforward, dropout, num_layers
)
self.linear_class = nn.Linear(hidden_dim, num_classes)
def forward(self, inputs):
# inputs: torch.size(None,110,2)
inputs = data_pre6(inputs)
h = torch.unsqueeze(
inputs.flatten(start_dim=1), 0
).float() # 将inputs的维度使用flatten降低,(54,2)--->(108);再使用unsqueeze进行升维变成(1,108),最后h为(T,1,108)
# torch.Size([1, None, 220])
h = self.x_embed(h)
# torch.Size([1, None, 256])
src = self.pos + h
tgt = self.class_query.unsqueeze(0)
menery = self.Encoder(src)
out = self.Decoder(tgt, menery)
# out = F.dropout(out,p=0.1,training=self.training)
sign = self.linear_class(out)
sign = sign.reshape(-1)
sign = F.softmax(sign, dim=0)
return sign
slr_model6 = SPOTER6(num_classes=250, hidden_dim=220, num_layers=2)
model6_pth = "/kaggle/input/train5-pth/train6_aug_best.pth"
slr_model6.load_state_dict(torch.load(model6_pth, map_location=torch.device("cpu")))
slr_model1 = SPOTER1(num_classes=250)
model_pth1 = "/kaggle/input/train5-pth/all_train5_best.pth"
slr_model1.load_state_dict(torch.load(model_pth1, map_location=torch.device("cpu")))
# slr_model.train(False)
# slr_model.eval()
slr_model2 = SPOTER2(num_classes=250, hidden_dim=512)
model_pth2 = "/kaggle/input/train5-pth/epoch_27_512dim.pth"
slr_model2.load_state_dict(torch.load(model_pth2, map_location=torch.device("cpu")))
slr_model3 = SPOTER3(num_classes=250, hidden_dim=108)
model_pth3 = "/kaggle/input/train3-pth/fold_0.pth"
slr_model3.load_state_dict(torch.load(model_pth3, map_location=torch.device("cpu")))
REYE = [33, 7, 163, 144, 145, 153, 154, 155, 133, 246, 161, 160, 159, 158, 157, 173]
LEYE = [
263,
249,
390,
373,
374,
380,
381,
382,
362,
466,
388,
387,
386,
385,
384,
398,
]
NOSE = [1, 2, 98, 327]
SLIP = [
78,
95,
88,
178,
87,
14,
317,
402,
318,
324,
308,
191,
80,
81,
82,
13,
312,
311,
310,
415,
]
SPOSE = (
np.array(
[
11,
13,
15,
12,
14,
16,
23,
24,
]
)
+ 489
).tolist()
def do_hflip_hand(lhand, rhand):
rhand[..., 0] = 1 - rhand[..., 0]
lhand[..., 0] = 1 - lhand[..., 0]
rhand, lhand = lhand, rhand
return lhand, rhand
def do_hflip_eye(leye, reye):
reye[..., 0] = 1 - reye[..., 0]
leye[..., 0] = 1 - leye[..., 0]
reye, leye = leye, reye
return leye, reye
def do_hflip_spose(spose):
spose[..., 0] = 1 - spose[..., 0]
spose = spose[:, [3, 4, 5, 0, 1, 2, 7, 6]]
return spose
def do_hflip_slip(slip):
slip[..., 0] = 1 - slip[..., 0]
slip = slip[
:, [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + [19, 18, 17, 16, 15, 14, 13, 12, 11]
]
return slip
class XEmbed(nn.Module):
def __init__(self, point_dim, embed_dim):
super().__init__()
self.v = nn.Sequential(
nn.Linear(point_dim, embed_dim * 2, bias=True),
nn.LayerNorm(embed_dim * 2),
nn.ReLU(inplace=True),
nn.Linear(embed_dim * 2, embed_dim, bias=True),
nn.LayerNorm(embed_dim),
nn.ReLU(inplace=True),
)
def forward(self, x):
# B,L = x.shape
v = self.v(x)
return v
x_embed = XEmbed(212, 256)
x = torch.randn([1, 56, 212])
y = x_embed(x)
print(y.shape)
class Net3(nn.Module):
def __init__(self, model1, model2, model3):
super().__init__()
self.model1 = model1
self.model2 = model2
self.model3 = model3
def forward(self, x):
y1 = self.model1(x)
y2 = self.model2(x)
y3 = self.model3(x)
out = 0.3 * y1 + 0.2 * y2 + 0.5 * y3
return out
# slr_model = Net(slr_model1,slr_model2,slr_model3)
# slr_model.train(False)
class Net2(nn.Module):
def __init__(self, model1, model2):
super().__init__()
self.model1 = model1
self.model2 = model2
def forward(self, x):
y1 = self.model1(x)
y2 = self.model2(x)
out = 0.3 * y1 + 0.7 * y2
return out
slr_model = Net2(slr_model1, slr_model6)
# slr_model.train(False)
# 测试模型的准确率
import tqdm
import numpy as np
import time
import json
import pandas as pd
def load_relevant_data_subset(pq_path):
ROWS_PER_FRAME = 543 # number of landmarks per frame
data_columns = ["x", "y", "z"]
data = pd.read_parquet(pq_path, columns=data_columns)
n_frames = int(len(data) / ROWS_PER_FRAME)
data = data.values.reshape(n_frames, ROWS_PER_FRAME, len(data_columns))
return data.astype(np.float32)
def load_dataset(file_location):
sign_dic = {}
label_dic_path = "/kaggle/input/asl-signs/sign_to_prediction_index_map.json"
with open(label_dic_path, "r") as sign_f:
sign_dic = json.load(sign_f)
df = pd.read_csv(file_location, encoding="utf-8")
df_signs = df["sign"].to_list()
df_labels = [sign_dic.get(i) for i in df_signs] # 从字典返回labels
df_paths = df["path"].to_list()
return df_paths, df_labels
data_file = "/kaggle/input/asl-signs/train.csv"
df_paths, df_labels = load_dataset(data_file)
sign_list = [0] * 250
def test(slr_model, df_path, de_label):
slr_model.train(False)
frames = load_relevant_data_subset("/kaggle/input/asl-signs/" + df_path)
inputs = torch.Tensor(frames)
# inputs = data_pre(inputs)
# print(inputs)
# print(inputs.shape)
output = slr_model(inputs)
# print(output)
sign = torch.argmax(output)
# print(output)
print("pre:{}\t true:{}".format(sign, de_label))
if int(sign) == int(de_label):
sign_list[de_label] = sign_list[de_label] + 1
# print(sign)
for i in range(100):
test(slr_model6, df_paths[i], df_labels[i])
print(sign_list)
print(sum(sign_list))
import pandas
import numpy as np
import torch
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
def plot_confusion_matrix(model, df_paths, df_labels, device, num_classes):
true_labels, predicted_labels = [], []
pred_correct, pred_all = 0, 0
data_top1 = {i: [0, 0] for i in range(num_classes)} # 存储每一类的数量和识别正确的数量
model.eval()
with torch.no_grad():
for i in range(len(df_paths)):
df_path = df_paths[i]
labels = df_labels[i]
inputs = load_relevant_data_subset("/kaggle/input/asl-signs/" + df_path)
# print(inputs)
# print(labels)
inputs = torch.Tensor(inputs)
inputs = inputs.to(device)
outputs = model(inputs)
pred = int(torch.argmax(torch.nn.functional.softmax(outputs, dim=0)))
label = int(labels)
predicted_labels.append(pred)
true_labels.append(label)
if pred == label:
pred_correct += 1
# print('pre:{}\t true:{}'.format(pred,label))
pred_all += 1
cm_pt = confusion_matrix(true_labels, predicted_labels, labels=range(num_classes))
return cm_pt, pred_correct, pred_all
# if torch.cuda.is_available():
# device = torch.device("cuda")
# else:
# device = torch.device("cpu")
# slr_model1.train(False)
# slr_model1.to(device)
# num_classes = 250
# cm, pred_correct, pred_all = plot_confusion_matrix(slr_model1, df_paths, df_labels, device, num_classes)
# print(pred_correct, pred_all, pred_correct/pred_all)
# print(cm)
# plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
# plt.colorbar()
# tick_marks = np.arange(num_classes)
# # plt.xticks(tick_marks, range(num_classes))
# # plt.yticks(tick_marks, range(num_classes))
# plt.xlabel('Predicted Label')
# plt.ylabel('True Label')
# plt.show()
# plt.savefig("/kaggle/working/confusion_matrix.png")
# # **将pytorch模型转换为onnx模型**
input_onnx_file = "/kaggle/working/model.onnx"
def run_convert_onnx(slr_model):
torch.onnx.export(
slr_model,
# torch.jit.script(input_net),
# torch.jit.trace(input_net, torch.zeros(100,num_landmark,3)), # model being run
torch.randn((60, 543, 3)), # torch.randn((60,54,2))
input_onnx_file, # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=12, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=["inputs"], # the model's input names
output_names=["outputs"], # the model's output names
dynamic_axes={
"inputs": {0: "length"},
#'output': {0: 'length'},
},
# verbose = True,
)
run_convert_onnx(slr_model)
print("model.onnx saved !!")
# # **检查模型输出是否相同**
# 导入tf模型
import onnxruntime as ort
model_path = "/kaggle/working/model.onnx"
ort_session = ort.InferenceSession(model_path)
# 定义测试输入
import numpy as np
input_tensor = torch.randn(60, 543, 3)
input_data = input_tensor.numpy()
# # 检查onnx输出
# print(ort_session.get_inputs())
ort_inputs = {ort_session.get_inputs()[0].name: input_data}
ort_outputs = ort_session.run(None, ort_inputs)
# print(ort_outputs)
# 检查pytorch输出
# slr_model.train(False)
# slr_model.eval()
# slr_output = slr_model(input_tensor)
# print(slr_output.shape)
# # **将onnx模型转换为tf_model模型**
from onnx_tf.backend import prepare
import onnx
TF_PATH = "tf_model" # 保存tf模型的位置
ONNX_PATH = "/kaggle/working/model.onnx" # onnx模型的path
onnx_model = onnx.load(ONNX_PATH) # load onnx model
tf_rep = prepare(onnx_model) # creating TensorflowRep object
tf_rep.export_graph(TF_PATH)
print("tf.saved_model() passed !!")
# # **测试tf_model模型是否准确**
import tensorflow as tf
mobilenet_save_path = "/kaggle/working/tf_model"
loaded = tf.saved_model.load(mobilenet_save_path)
# print(list(loaded.signatures.keys()))
infer = loaded.signatures["serving_default"]
# print(infer)
# 检查tf输出
x1 = tf.constant(input_tensor)
label = infer(x1)
# print(label['outputs'])
# # **将转换好的.pb模型打包**
packagePath = "/kaggle/working/tf_model"
zipPath = "/kaggle/working/"
# # **将tf_model模型转为tflite模型**
import tensorflow as tf
# print(tf.__version__)
saved_model_dir = "/kaggle/working/tf_model" # path to the SavedModel directory
tflite_path = "/kaggle/working/model.tflite" # path to the tflite model
# Convert the model
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS, # enable TensorFlow ops.
]
tflite_model = converter.convert()
open(tflite_path, "wb").write(tflite_model)
# # **测试转换的tflite模型是否准确**
import tqdm
import tflite_runtime.interpreter as tflite
model_path = "/kaggle/working/model.tflite"
interpreter = tflite.Interpreter(model_path)
found_signatures = list(interpreter.get_signature_list().keys())
prediction_fn = interpreter.get_signature_runner("serving_default")
# input_tensor = torch.randn(50,543,3)
# input_data = input_tensor.numpy()
# print(input_data)
output = prediction_fn(inputs=input_data)
sign = np.argmax(output["outputs"])
print(output["outputs"])
# # **submission**
ROWS_PER_FRAME = 543 # number of landmarks per frame
import pandas as pd
def load_relevant_data_subset(pq_path):
data_columns = ["x", "y", "z"]
data = pd.read_parquet(pq_path, columns=data_columns)
n_frames = int(len(data) / ROWS_PER_FRAME)
data = data.values.reshape(n_frames, ROWS_PER_FRAME, len(data_columns))
return data.astype(np.float32)
import tqdm
import tflite_runtime.interpreter as tflite
import time
t1 = time.time()
model_path = "/kaggle/working/model.tflite"
interpreter = tflite.Interpreter(model_path)
found_signatures = list(interpreter.get_signature_list().keys())
pq_path = "/kaggle/input/asl-signs/train_landmark_files/18796/1020380433.parquet"
prediction_fn = interpreter.get_signature_runner("serving_default")
frames = load_relevant_data_subset(pq_path)
# print(frames)
output = prediction_fn(inputs=frames)
# print(output)
sign = np.argmax(output["outputs"])
print(sign, output["outputs"].shape)
t2 = time.time()
print("{:.2f}ms".format((t2 - t1) * 1000))
a = 61 / 152
b = round(a, 3)
c = {1: b}
print(c)
import json
import time
def load_dataset(file_location):
sign_dic = {}
label_dic_path = "/kaggle/input/asl-signs/sign_to_prediction_index_map.json"
with open(label_dic_path, "r") as sign_f:
sign_dic = json.load(sign_f)
# 读入数据集中所有的data数据与label,返回np.array类型(读进内存中)
# Load the datset csv file
df = pd.read_csv(file_location, encoding="utf-8")
df_signs = df["sign"].to_list()
df_labels = [sign_dic.get(i) for i in df_signs] # 从字典返回labels
df_paths = df["path"].to_list()
return df_paths, df_labels
data_file = "/kaggle/input/asl-signs/train.csv"
df_paths, df_labels = load_dataset(data_file)
# print(df_paths[:50])
sign_list = [0] * 250
sign_all = [0] * 250
def run_tflite(df_path, de_label):
df_path = "/kaggle/input/asl-signs/" + df_path
model_path = "/kaggle/working/model.tflite"
interpreter = tflite.Interpreter(model_path)
found_signatures = list(interpreter.get_signature_list().keys())
prediction_fn = interpreter.get_signature_runner("serving_default")
frames = load_relevant_data_subset(df_path)
# print(frames.shape)
output = prediction_fn(inputs=frames)
sign = np.argmax(output["outputs"])
# print(output)
print("pre:{}\t true:{}".format(sign, de_label))
if sign == de_label:
sign_list[de_label] += 1
sign_all[de_label] += 1
t1 = time.time()
video_sum = 100
for i in range(900, 1000):
run_tflite(df_paths[i], df_labels[i])
t2 = time.time()
print(sign_list)
print(sum(sign_list))
print(sum(sign_all))
print("cost {:.4f}".format(t2 - t1))
print("pre video {:.4f}".format((t2 - t1) / video_sum))
import shutil
print(f"submit ok")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
pd.set_option("display.max_columns", None)
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from pprint import pprint
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.compose import ColumnTransformer
from sklearn.metrics import classification_report
train_data = pd.read_csv(
"/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv"
)
test_data = pd.read_csv(
"/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv"
)
X, Y = train_data.drop("Made_Purchase", axis=1), train_data["Made_Purchase"]
# # Exploratory Data Analysis
X.head()
test_data.head()
X.info()
X.describe()
# ### List all numerical and categorical feature
# Note :- Target label is Made_Purchase, So we remove it. It is a boolean label.
num_col = []
cat_col = []
for col in X.columns.values:
if X[col].dtype == "object":
cat_col.append(col)
else:
num_col.append(col)
print(num_col)
print(cat_col)
# ### Let's see all the values in categorical feature.
for col in cat_col:
print(col)
print(" ", X[col].unique())
for col in cat_col:
print(col)
print(" ", test_data[col].unique())
# ### Check if classes is balance or not.
Y.value_counts()
# ### Check for Null Values
X.isna().sum()
print("total Null Values :", X.isna().sum().sum())
print(
"Null values where Purchased is made :",
train_data[train_data["Made_Purchase"] == True].isna().sum().sum(),
)
print(
"Null values where Purchased is not made :",
train_data[train_data["Made_Purchase"] == False].isna().sum().sum(),
)
corr = X.corr()
plt.figure(figsize=(15, 12))
sns.heatmap(corr, annot=True)
X.hist(figsize=(24, 16))
# # Data Preprocessing
# ## Fill the missing Values
# Method :- Using Knn Imputer for numerical feature and Ordinal Encoder and then use KNN Imputer for categorical feature
#
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.impute import KNNImputer, SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
num_tf = Pipeline([("KNNimpute", KNNImputer(n_neighbors=5))])
ord_tf = Pipeline(
[("OneHotEncoder", OneHotEncoder(sparse=False)), ("KNNimpute", KNNImputer())]
)
# ord_tf = Pipeline([('SimpleImputer', SimpleImputer(strategy="most_frequent"))])
combine_tf = ColumnTransformer(
transformers=[("num_tf", num_tf, num_col), ("ord_tf", ord_tf, cat_col)]
)
complete_pipeline = Pipeline([("combine_tf", combine_tf)])
X_encode = complete_pipeline.fit_transform(X, Y)
# ### Convert the categorical feature into numerical feature
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer()
Y_label = lb.fit_transform(Y)
# Feature Scaling on all the columns
feat_pipe = Pipeline([("MinMaxScaler", MinMaxScaler())])
X_scaled = feat_pipe.fit_transform(X_encode, Y_label)
# ### Remove the class imbalance by using SMOTE
from imblearn.over_sampling import SMOTE
def remove_imbalance(X, y):
smote = SMOTE()
x_smote, y_smote = smote.fit_resample(X, Y)
return x_smote, y_smote
X_bal, Y_bal = remove_imbalance(X_scaled, Y_label)
Y_bal.value_counts()
# ## Model Building
from sklearn.model_selection import train_test_split
# # 10% for test, 20% for validation and 70% for training
# X_data,X_test,Y_data,Y_test = train_test_split(X_bal,Y_bal,test_size=0.1)
# X_train,X_val,Y_train,Y_val = train_test_split(X_data,Y_data,test_size=0.22)
X_train, X_test, Y_train, Y_test = train_test_split(X_bal, Y_bal, test_size=0.2)
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import RandomizedSearchCV
ada_clf_est = AdaBoostClassifier()
param_grid = {
"n_estimators": range(100, 401, 25),
"learning_rate": [0.001, 0.01, 0.1, 0.2, 0.5, 0.75, 1],
}
ada_clf_random = RandomizedSearchCV(
estimator=ada_clf_est,
param_distributions=param_grid,
n_iter=10,
cv=3,
verbose=2,
random_state=42,
n_jobs=-1,
)
ada_clf_random.fit(X_train, Y_train)
ada_clf = ada_clf_random.best_estimator_
ada_clf.fit(X_train, Y_train)
Y_train_pred = ada_clf.predict(X_train)
print(classification_report(Y_train, Y_train_pred))
ada_clf_random.best_params_
Y_test_pred = ada_clf.predict(X_test)
print(classification_report(Y_test, Y_test_pred))
test_data_tf = complete_pipeline.transform(test_data)
test_data_scaled = feat_pipe.transform(test_data_tf)
y_pred = ada_clf.predict(test_data_scaled)
y_pred_csv = pd.DataFrame(
{"id": test_data.index, "Made_Purchase": lb.inverse_transform(y_pred)}
)
y_pred_csv.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df_list_raw = [
(pd.read_csv("/kaggle/input/m5-data/m5/processed/sales_lv1.csv"), 1),
(pd.read_csv("/kaggle/input/m5-data/m5/processed/sales_lv2.csv"), 2),
(pd.read_csv("/kaggle/input/m5-data/m5/processed/sales_lv3.csv"), 3),
(pd.read_csv("/kaggle/input/m5-data/m5/processed/sales_lv4.csv"), 4),
(pd.read_csv("/kaggle/input/m5-data/m5/processed/sales_lv5.csv"), 5),
(pd.read_csv("/kaggle/input/m5-data/m5/processed/sales_lv6.csv"), 6),
(pd.read_csv("/kaggle/input/m5-data/m5/processed/sales_lv7.csv"), 7),
(pd.read_csv("/kaggle/input/m5-data/m5/processed/sales_lv8.csv"), 8),
(pd.read_csv("/kaggle/input/m5-data/m5/processed/sales_lv9.csv"), 9),
]
df_list_1st = [
(pd.read_csv("/kaggle/input/m5-data/m5/submissions/m5_1st_eval_lv1.csv"), 1),
(pd.read_csv("/kaggle/input/m5-data/m5/submissions/m5_1st_eval_lv2.csv"), 2),
(pd.read_csv("/kaggle/input/m5-data/m5/submissions/m5_1st_eval_lv3.csv"), 3),
(pd.read_csv("/kaggle/input/m5-data/m5/submissions/m5_1st_eval_lv4.csv"), 4),
(pd.read_csv("/kaggle/input/m5-data/m5/submissions/m5_1st_eval_lv5.csv"), 5),
(pd.read_csv("/kaggle/input/m5-data/m5/submissions/m5_1st_eval_lv6.csv"), 6),
(pd.read_csv("/kaggle/input/m5-data/m5/submissions/m5_1st_eval_lv7.csv"), 7),
(pd.read_csv("/kaggle/input/m5-data/m5/submissions/m5_1st_eval_lv8.csv"), 8),
(pd.read_csv("/kaggle/input/m5-data/m5/submissions/m5_1st_eval_lv9.csv"), 9),
]
LEVEL_MAX = 9
LEVELS_KEYS = {
"Level1": [],
"Level2": ["state_id"],
"Level3": ["store_id"],
"Level4": ["cat_id"],
"Level5": ["dept_id"],
"Level6": ["state_id", "cat_id"],
"Level7": ["state_id", "dept_id"],
"Level8": ["store_id", "cat_id"],
"Level9": ["store_id", "dept_id"],
"Level10": ["item_id"],
"Level11": ["state_id", "item_id"],
"Level12": ["item_id", "store_id"],
}
HIERARCHY_LEVELS_SPEC = [
["country_id"], # Level1: Total
["country_id", "state_id"], # Level2: State
["country_id", "state_id", "store_id"], # Level3: Store
["country_id", "cat_id"], # Level4: Category
["country_id", "cat_id", "dept_id"], # Level5: Department (sub-category)
["country_id", "state_id", "cat_id"], # Level6: State-Category
["country_id", "state_id", "cat_id", "dept_id"], # Level 7: State-Department
["country_id", "state_id", "store_id", "cat_id"], # Level 8: Store-Category
[
"country_id",
"state_id",
"store_id",
"cat_id",
"dept_id",
], # Level 9: Store-Department
]
from hierarchicalforecast.utils import aggregate
def build_summing_matrix(Y_df, S_df):
"""
Given base model output at the lowest level (lv12),
returns the summing matrix and level tags, which are
required by hierarchicalforecast before running
any reconciliation method.
Y_df: Sales at the lowest level
S_df: Categorical exog df
level: To fetch on which cols to merge the above dfs
return:
sum_mtx, levels_tags
"""
lv_keys = LEVELS_KEYS[f"Level{LEVEL_MAX}"]
unused_lv_keys = ["item_id", "unique_id"]
Y_df_reduced = Y_df.groupby(lv_keys).head(2)
S_df_reduced = (
S_df.drop(unused_lv_keys, axis="columns")
.drop_duplicates()
.reset_index(drop=True)
)
S_df_reduced["country_id"] = "US" # to act as key of Level 1
df = Y_df_reduced.merge(S_df_reduced, how="inner", on=lv_keys)
# aggregate
_, Sum_df, levels_tags = aggregate(df, spec=HIERARCHY_LEVELS_SPEC)
return Sum_df, levels_tags
Y_df_lv9 = pd.read_csv("/kaggle/input/m5-data/m5/processed/sales_lv9.csv")
S_df = pd.read_csv("/kaggle/input/m5-data/m5/processed/categorical_exog.csv")
Sum_df, levels_tags = build_summing_matrix(Y_df_lv9, S_df)
tag_keys = list(levels_tags.keys())
def create_Y_df(df_list):
"""
Given a list of (df,level) tuple,
returns the Y_df with unique_ids
formed for tags.
df_list: Tuple(df,level_num)
return:
Y_df
"""
Y_df = pd.DataFrame()
for df, level_num in df_list:
if level_num == 1:
df["unique_id"] = "US"
Y_df = Y_df.append(df)
continue
if len(LEVELS_KEYS[f"Level{level_num}"]) == 1:
df["unique_id"] = df[LEVELS_KEYS[f"Level{level_num}"]]
else:
df["unique_id"] = (
df[LEVELS_KEYS[f"Level{level_num}"][0]]
+ "/"
+ df[LEVELS_KEYS[f"Level{level_num}"][1]]
)
df = df.drop(LEVELS_KEYS[f"Level{level_num}"], axis="columns")
id_list = df["unique_id"].unique()
tag_list = levels_tags[tag_keys[level_num - 1]]
mapping = dict(zip(id_list, tag_list))
df["unique_id"] = df["unique_id"].replace(mapping)
Y_df = Y_df.append(df)
Y_df.ds = pd.to_datetime(Y_df.ds)
return Y_df
def create_train_test():
"""
Creates train and test df's
from raw data to use for
reconciliation & evaluation.
return:
Y_train_df, Y_test_df
"""
Y_df = create_Y_df(df_list_raw)
date_last = np.max(Y_df["ds"])
date_cutoff = date_last - pd.Timedelta(days=28)
Y_test_df = Y_df[Y_df["ds"] > date_cutoff]
Y_train_df = Y_df[Y_df["ds"] <= date_cutoff]
Y_test_df = Y_test_df.reset_index(drop=True)
Y_train_df = Y_train_df.reset_index(drop=True)
return Y_train_df, Y_test_df
Y_train_df, Y_test_df = create_train_test()
Y_hat_df = create_Y_hat_df(df_list)
from hierarchicalforecast.core import HierarchicalReconciliation
from hierarchicalforecast.methods import BottomUp, TopDown, MiddleOut
reconcilers = [
BottomUp(),
TopDown(method="forecast_proportions"),
MiddleOut(
middle_level="country_id/state_id/store_id/cat_id",
top_down_method="forecast_proportions",
),
]
hrec = HierarchicalReconciliation(reconcilers=reconcilers)
Y_rec_df = hrec.reconcile(
Y_hat_df=Y_hat_df, Y_df=Y_train_df, S=Sum_df, tags=levels_tags
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from matplotlib.lines import Line2D
from warnings import filterwarnings
sns.set_style("whitegrid")
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing dataset to the notebook
df = pd.read_csv(
"/kaggle/input/amazon-top-50-bestselling-books-2009-2019/bestsellers with categories.csv"
)
# # Data Preparation ------ Getting the basic information of dataset nad then performing analysis----------
# --- Getting the details of dataset as dataframe ---
df
# --- Checking the type after converting it into data frame---
type(df)
# ---Reteriving the basic information of the data set---
df.info()
# The data set seems to be correct as there is non null values are there .
# ---Overview of the numeric data of the dataset such as userrating , reviews , price and year---
df.describe()
# ---Listing the name of columns present in the dataset---
df.columns
# --- Determining the shape of the dataset ---
df.shape
# ### Converting the dataframe as an array format using pandas inbuilt function to make the retrieval of data easier from a particular index
amazon_data_dict = {
"Name": [
"10-Day Green Smoothie Cleanse",
"11/22/63: A Novel",
"You Are a Badass: How to Stop Doubting Your Greatness and Start Living an Awesome Life",
"You Are a Badass: How to Stop Doubting Your Greatness and Start Living an Awesome Life",
],
"Author": ["JJ Smith", "Stephen King", "Jen Sincero", "Jen Sincero"],
"User Rating": [4.7, 4.6, 4.7, 4.7],
"Reviews": [17350, 2052, 14331, 14331],
"Price ": [8, 22, 8, 8],
"Year": [2016, 2011, 2018, 2019],
"Genre": ["Non Fiction", "Fiction", "Non Fiction", "Non Fiction"],
}
# --- Viewing the dataframe once again---
df
# ---List of authors in the dataset---
amazon_data_dict["Author"]
# ---List of the names of the books in the dataset---
df["Name"]
# ---Datatype of the dataframe ---
type(df["Name"])
# --- Review of the book at the 50th index---
df["Reviews"][50]
# ---User rating of the book at the 50th index---
df["User Rating"][50]
# --- List of genre of all the books ---
df.Genre
# --- List of the books with their genre ---
Genre_df = df[["Name", "Genre"]]
Genre_df
# ---Creating a copy of original dataframe . The data in df is completely separate from df and changing value inside will not affect the other---
df.copy = df.copy()
# --- Accessing the information of first row of the data using loc method ---
df.loc[0]
# --- Reteriving the first 5 rows ---
df.head(5)
# ---Reteriving the last 5 rows---
df.tail(5)
# --- Reteriving the random 15 rows from the data to draw some conclusions ---
df.sample(15)
# # Analyzing Data
# Q1. What are name of distinct books?
df["Name"].unique()
# Q2. Name of different authors?
df["Author"].unique()
# Q3. What is the total profit earned by amazon on each book?
total_profit = df.Price.sum()
print(total_profit, "$")
# Q4. What is the data type of price row ?
df.dtypes.Price
#
# Q5. Converting the data of Price from int to float
df.Price.astype("float64")
# # Querying and Sorting the rows
# Q6. Getting the list of books having rating less than 4.5
Average_rated = df["User Rating"] < 4.5
df[Average_rated]
# Q7. Getting the list of books with the rating of greater than 4.8 i.e the highest rated books
Highest_rated = df["User Rating"] > 4.8
df[Highest_rated]
# Q8.Ten most reviewed books ?
df.sort_values("Reviews", ascending=False).head(10)
# As we can see there are multiple enteries of the same book so we will now get the list of most reviwed books by removing duplicates
df.drop_duplicates("Name").sort_values("Reviews", ascending=False).head(10)
# Q9. Getting the List of books by "George R. R. Martin"
Books_by_George_R_R_Martin = df["Author"] == "George R. R. Martin"
df[Books_by_George_R_R_Martin]
# Q10. Maximum values along all the rows of the data
df.max(axis=0, skipna="True", numeric_only="True")
# Q11. Which is the most expensive book?
df["Price"].max()
# # Some visualiztions for EDA
# 1. Plot showing the user rating distribution
plt.figure(figsize=(10, 5))
sns.countplot(x=df["User Rating"], palette="viridis_r")
plt.title("User Rating Distribution Plot", fontsize=16)
plt.show()
# 2. Simple plot of the review given to the books
df.Reviews.plot(title="Reviews", color="Teal")
# 3. Simple Year distribution plot
plt.figure(figsize=(6, 6))
sns.distplot(df["Year"], color="purple")
plt.title("Year Distribution Plot", fontsize=16)
plt.show()
filterwarnings("ignore")
# 4.Plot of year and price as compare the other dataaset
plt.plot(df["Year"], df["Price"])
# Q10. Pie chart of the books whose Genre was Fiction and Non-Fiction.
pie_1 = (
df.drop_duplicates("Name")
.sort_values("Reviews", ascending=False)["Genre"]
.head(10)
.value_counts()
)
sns.set_palette("Set3_r")
plt.figure(figsize=(8, 8))
plt.pie(
pie_1,
explode=[0, 0.15],
labels=["Fiction", "Non Fiction"],
autopct="%.1f%%",
shadow=True,
startangle=20,
)
plt.title(
"Genre Pie Chart for the top 10 Bestselling Books on Amazon (2009-2019)",
fontdict={"size": 14},
y=0,
)
# Q11.The distribution plot for the Price of the books:
sns.barplot(data=df, x="Genre", y="Reviews")
# Q12. The distribution plot for the Reviews of the books:
sns.jointplot(
x=df["Price"], y=df["Reviews"], alpha=1, kind="scatter", height=5, hue=df["Genre"]
)
plt.show()
# --- We'll try to find the most famous Author on the basis of the number of different books which were counted as the Bestsellers on Amazon and to see IF there's any correlation between the books---
Counter(df.drop_duplicates("Name")["Author"]).most_common()[0]
# Q13. Now the books of jeff kinney which are selected as best sellers
new_df = df.drop_duplicates("Name")
new_df[new_df["Author"] == "Jeff Kinney"]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import plotly.express as px
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv("/kaggle/input/big-mart-sales/Train-Set.csv")
data.sample(20)
data.shape
data.info()
data.isnull().sum()
# ## Percentage for Missing Values
missing_percentage = data.isnull().sum() / len(data) * 100
plt.figure(figsize=(20, 5))
missing_percentage.plot(kind="bar", x="Column", y="Percentage", colormap="Set2")
plt.xlabel("Column")
plt.ylabel("Percentage")
plt.title("Percentage of missing values")
plt.show()
# *As we are able to see that only two columns have missing values that also below 30%*
# * So we will perfrom some operation to sort these missing values.
# *For 'Weight'*
data.Weight.mean()
data["Weight"].fillna(data["Weight"].mean(), inplace=True)
data.Weight.isnull().sum()
# *For 'OutletSize'*
data["OutletSize"].isnull().sum()
data["OutletSize"].mode()
data.OutletSize.value_counts()
from scipy.stats import mode
outletsize_mode = data.pivot_table(
values="OutletSize",
index="OutletType",
aggfunc=(lambda x: mode(x.dropna()).mode[0]),
)
outletsize_mode
missingvalues = data["OutletSize"].isnull()
missingvalues
data.loc[missingvalues, "OutletSize"] = data.loc[missingvalues, "OutletType"].apply(
lambda x: outletsize_mode.loc[x]
)
data.isnull().sum()
# *Now we don't have any null values, so we can proceed for analysis*
# ### Univariate Analysis
# #### *Weight Column*
fig = px.histogram(data["Weight"])
fig.show()
# #### *Fat Content Column*
data["FatContent"].mask(data["FatContent"] == "LF", "Low Fat", inplace=True)
data["FatContent"].mask(data["FatContent"] == "low fat", "Low Fat", inplace=True)
data["FatContent"].mask(data["FatContent"] == "reg", "Regular", inplace=True)
# * Need to replace the same value names as they were under different aliases.
data["FatContent"].unique()
fig = px.histogram(data["FatContent"])
fig.show()
# #### *Product Type Column*
fig = px.histogram(data["ProductType"])
fig.show()
data.EstablishmentYear.unique()
data["EstablishmentYear"].value_counts()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# **Importing dependencies**
import re
from nltk.corpus import (
stopwords,
) # Natural language (stopwords are words that doesnt add much context value to our dataset)
from nltk.stem.porter import (
PorterStemmer,
) # Stemming takes a word and removes the prefix and suffix
from sklearn.feature_extraction.text import (
TfidfVectorizer,
) # used to convert text into feature vectors(numbers)
from sklearn.model_selection import (
train_test_split,
) # used to split our data into training and testing data
from sklearn.linear_model import LogisticRegression # importing classification model
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
classification_report,
) # Evaluation metric
import nltk
nltk.download("stopwords")
# **Data Preprocessing**
df = pd.read_csv("/kaggle/input/fakevsreal-news-dataset/train.csv")
df.head()
df.shape
df.isnull().sum()
df = df.fillna("")
df.isnull().sum()
# **Merging the author name and title**
df["content"] = df["author"] + " " + df["title"] # Concatenation
df["content"][0]
x = df.drop("label", axis=1)
y = df["label"]
# **Stemming**
# *Proces of reducing a word to its root-
# Example- actor,actress,acting--> act*
ps = PorterStemmer()
print(stopwords.words("english"))
def stemming(content):
stemmed_content = re.sub(
"[^a-zA-Z]", " ", content
) # ^=explosion- extract only alphabest and words
stemmed_content = stemmed_content.lower() # converting all alphabets to lower case
stemed_content = (
stemmed_content.split()
) # Splitting all the alphabets and coverting into list
# removing stopwords/insignificant words and looping over to choose only the porterstemmer words
stemmed_content = [
ps.stem(word)
for word in stemmed_content
if not word in stopwords.words("english")
]
# joining the alphabets list back to words
stemmed_content = " ".join(stemmed_content)
# returning the content
return stemmed_content
df["content"] = df["content"].apply(stemming)
df["content"][0]
x = df["content"].values
y = df["label"].values
print(x)
# **Converting textual data into numerical data**
vec = TfidfVectorizer() # tf- termfrequency/ idf- inverse frequency
vec.fit(x)
x = vec.transform(x)
|
# In this nootbook I parse sites in 40 threads
# Parsing 199683 sites took 4.5 hours
import sys
import os
import warnings
os.environ["OPENBLAS_NUM_THREADS"] = "1"
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import time
import pyarrow as pa
import pyarrow.parquet as pq
import scipy
import implicit
import bisect
import sklearn.metrics as m
from catboost import CatBoostClassifier, CatBoostRegressor, Pool
from sklearn.model_selection import train_test_split
from sklearn.calibration import calibration_curve, CalibratedClassifierCV
import implicit
# I get non-repetitive url_hosts
urls = pd.read_feather("/kaggle/input/mts-ml-cookies/dataset_full.feather")
urls = urls["url_host"].drop_duplicates()
urls
# Function for writing sites data to csv-file
import csv
def csv_writer(data):
with open("sites.csv", "a", encoding="utf-8") as file:
writer = csv.writer(file)
writer.writerow(data)
# ### Function for reading information from sites and writing it to file
# In this function I read html-page by url, parse description, title and keywords from html and write it to csv-file. There are 5 urls that crash script and I had to remove them:
# - sanstv.ru
# - t.lordfilms-filmy.online
# - z.lordfilms-film.online
# - n.lordfilm-smotret.one
# - z.lordfilmy-film.online
import requests
from bs4 import BeautifulSoup
from multiprocessing import Pool
from urllib3.exceptions import LocationParseError
def parse_url(url):
url_data = np.empty([5], dtype=object)
url_data[0] = url[0]
url_data[1] = url[1]
try:
if url[1] == "sanstv.ru":
csv_writer(url_data)
del url_data
return
if url[1] == "t.lordfilms-filmy.online":
csv_writer(url_data)
del url_data
return
if url[1] == "z.lordfilms-film.online":
csv_writer(url_data)
del url_data
return
if url[1] == "n.lordfilm-smotret.one":
csv_writer(url_data)
del url_data
return
if url[1] == "z.lordfilmy-film.online":
csv_writer(url_data)
del url_data
return
html = requests.get(
f"http://{url[1]}",
timeout=6,
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36",
"Accept": "image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7",
"Cookie": "vid=1b448c0013086e08644a; mkmgsgp=RQXPUS",
},
)
# Due to problems with encoding in sites I forced decode text as utf-8 and I for bad decoded sites I use cp1251
html.encoding = "utf-8"
soup = BeautifulSoup(html.text, "lxml")
descr = soup.find("meta", attrs={"name": "Description", "content": True})
if descr is not None:
url_data[3] = descr["content"]
descr = soup.find("meta", attrs={"name": "description", "content": True})
if descr is not None:
url_data[3] = descr["content"]
del descr
title = soup.find("title")
if title is not None:
url_data[2] = title.text
del title
keywords = soup.find("meta", attrs={"name": "keywords", "content": True})
if keywords is not None:
url_data[4] = keywords["content"]
keywords = soup.find("meta", attrs={"name": "Keywords", "content": True})
if keywords is not None:
url_data[4] = keywords["content"]
del keywords
csv_writer(url_data)
del html
del soup
del url_data
except AttributeError:
csv_writer(url_data)
del url_data
except requests.exceptions.RequestException:
csv_writer(url_data)
del url_data
except LocationParseError:
csv_writer(url_data)
del url_data
i = 0
j = 5000
while j < 199684:
# I use 40 threads for parsing
with Pool(40) as p:
p.map(parse_url, enumerate(urls[urls.index[range(i, j)]]))
i = j
print(i)
if j != 195000:
j += 5000
else:
j = 199683
site_data = pd.read_csv("/kaggle/working/sites.csv", header=None)
site_data.columns = ["position", "url", "title", "description", "keywords"]
site_data = site_data[site_data["title"].str.contains("[�]+", na=False)]
site_data
no_lang = site_data["url"]
no_lang
import csv
def csv_writer(data):
with open("sites-cp1251.csv", "a", encoding="utf-8") as file:
writer = csv.writer(file)
writer.writerow(data)
# ### Function for reading information from sites and writing it to file with cp1251 encoding
import requests
from bs4 import BeautifulSoup
from multiprocessing import Pool
from urllib3.exceptions import LocationParseError
def parse_url(url):
url_data = np.empty([5], dtype=object)
url_data[0] = url[0]
url_data[1] = url[1]
try:
if url[1] == "sanstv.ru":
csv_writer(url_data)
del url_data
return
if url[1] == "t.lordfilms-filmy.online":
csv_writer(url_data)
del url_data
return
if url[1] == "z.lordfilms-film.online":
csv_writer(url_data)
del url_data
return
if url[1] == "n.lordfilm-smotret.one":
csv_writer(url_data)
del url_data
return
if url[1] == "z.lordfilmy-film.online":
csv_writer(url_data)
del url_data
return
html = requests.get(
f"http://{url[1]}",
timeout=6,
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36",
"Accept": "image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7",
"Cookie": "vid=1b448c0013086e08644a; mkmgsgp=RQXPUS",
},
)
html.encoding = "cp1251"
soup = BeautifulSoup(html.text, "lxml")
descr = soup.find("meta", attrs={"name": "Description", "content": True})
if descr is not None:
url_data[3] = descr["content"]
descr = soup.find("meta", attrs={"name": "description", "content": True})
if descr is not None:
url_data[3] = descr["content"]
del descr
title = soup.find("title")
if title is not None:
url_data[2] = title.text
del title
keywords = soup.find("meta", attrs={"name": "keywords", "content": True})
if keywords is not None:
url_data[4] = keywords["content"]
keywords = soup.find("meta", attrs={"name": "Keywords", "content": True})
if keywords is not None:
url_data[4] = keywords["content"]
del keywords
csv_writer(url_data)
del html
del soup
del url_data
except AttributeError:
csv_writer(url_data)
del url_data
except requests.exceptions.RequestException:
csv_writer(url_data)
del url_data
except LocationParseError:
csv_writer(url_data)
del url_data
i = 0
j = 5000
while j < 14510:
with Pool(40) as p:
p.map(parse_url, enumerate(no_lang[no_lang.index[range(i, j)]]))
i = j
print(i)
if j != 10000:
j += 5000
else:
j = 14509
|
from pathlib import Path
import numpy as np
import pandas as pd
from datetime import datetime as dt
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
import optuna
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras import optimizers
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
tf.get_logger().setLevel("ERROR")
import warnings
warnings.filterwarnings("ignore")
tf.__version__, sklearn.__version__
# ## Setup data
input_dir = r"../input/playground-series-s3e12"
working_dir = Path(r"../working")
train = pd.read_csv(Path(input_dir, "train.csv"), header=0, index_col="id")
test = pd.read_csv(Path(input_dir, "test.csv"), header=0, index_col="id")
submission = pd.read_csv(
Path(input_dir, "sample_submission.csv"), header=0, index_col="id"
)
train.shape, test.shape, submission.shape
X = train.drop("target", axis=1)
y = train["target"].copy()
X.shape, y.shape
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.2, random_state=42, stratify=y
)
X_train.shape, X_valid.shape, y_train.shape, y_valid.shape
# standardization
scaler = StandardScaler()
scaler.fit(X_train)
X_train_std = scaler.transform(X_train)
X_valid_std = scaler.transform(X_valid)
X_train_std.mean(), X_train_std.std(), X_valid_std.mean(), X_valid_std.std()
# ## Define model
from functools import partial
def create_model(
activation=None, dout=None, learning_rate=None, kernel_initializer=None, L2=None
):
dns = [128, 32, 8] # the number of unit for each layer
RegularizedDense = partial(
Dense, kernel_initializer=kernel_initializer, activation=activation
)
model = Sequential(
[
Dense(dns[0], input_shape=(6,)),
RegularizedDense(dns[1]),
RegularizedDense(dns[2]),
Dense(1, activation="linear"),
]
)
model.compile(
loss="mean_squared_error",
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
metrics=[tf.keras.metrics.MeanAbsoluteError()],
)
return model
# # Optimize parameters
# set early-stopping
callbacks = [
EarlyStopping(monitor="val_loss", patience=10, mode="auto", verbose=0),
ReduceLROnPlateau(monitor="val_loss", factor=0.1, patience=10),
]
# Define the function which returns the score to be minimized.
def objective(trial):
params = {
"activation": trial.suggest_categorical("activation", ["relu", "gelu", "selu"]),
"dout": trial.suggest_categorical("dout", [0.1, 0.2, 0.3, 0.4, 0.5]),
"kernel_initializer": trial.suggest_categorical(
"kernel_initializer", [None, "he_normal", "glorot_normal", "glorot_uniform"]
),
"learning_rate": trial.suggest_float("learning_rate", 0.001, 0.01),
}
model = create_model(
activation=params["activation"],
dout=params["dout"],
learning_rate=params["learning_rate"],
kernel_initializer=params["kernel_initializer"],
)
batch_size = 32 # default
epochs = 100
history = model.fit(
X_train_std,
y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks,
validation_data=(X_valid_std, y_valid),
verbose=0,
)
y_valid_pred_proba = model.predict(X_valid_std, verbose=0)
y_valid_pred_proba = [x if x >= 0 else 1e-8 for x in y_valid_pred_proba.reshape(-1)]
logloss = metrics.log_loss(y_valid, y_valid_pred_proba)
return logloss
# Create optuna.study instance
study = optuna.create_study(sampler=optuna.samplers.RandomSampler(seed=42))
# Optimize with the above objective function
s_time = dt.now()
print("Start Time: {}".format(s_time))
study.optimize(objective, n_trials=30)
print("Training Computation Time: {}".format(dt.now() - s_time))
study.best_params
# # Re-train with the best parameters
params = study.best_params.copy()
model = create_model(**params)
model.summary()
# set early-stopping
callbacks = [
EarlyStopping(monitor="val_loss", patience=10, mode="auto", verbose=0),
ReduceLROnPlateau(monitor="val_loss", factor=0.1, patience=10),
]
s_time = dt.now()
print("Start Time: {}".format(s_time))
batch_size = 32 # default
epochs = 100
history = model.fit(
X_train_std,
y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks,
validation_data=(X_valid_std, y_valid),
verbose=1,
)
print("Training Computation Time: {}".format(dt.now() - s_time))
pd.DataFrame(history.history).mean()
# convergence history
def draw_convergence_history(history):
"""draw convergence history"""
fig, ax = plt.subplots(1, 2, figsize=(8, 3), facecolor="w")
training_loss = history.history["loss"]
validation_loss = history.history["val_loss"]
training_mae = history.history["mean_absolute_error"]
validation_mae = history.history["val_mean_absolute_error"]
epoch_count = range(1, len(training_loss) + 1)
# plot loss history
ax[0].plot(epoch_count, training_loss, "rx--")
ax[0].plot(epoch_count, validation_loss, "bo-")
ax[0].set_title("convergence history of loss")
ax[0].legend(["Training Loss", "Validation Loss"], loc="upper right")
ax[0].set_xlabel("Epoch")
ax[0].set_ylabel("Loss")
ax[0].grid()
# plot mae history
ax[1].plot(epoch_count, training_mae, "rx--")
ax[1].plot(epoch_count, validation_mae, "bo-")
ax[1].set_title("convergence history of MAE")
ax[1].legend(["Training MAE", "Validation MAE"], loc="upper right")
ax[1].set_xlabel("Epoch")
ax[1].set_ylabel("MAE")
ax[1].grid()
plt.tight_layout()
draw_convergence_history(history)
# # Evaluate
# AUC
y_valid_pred_proba = model.predict(X_valid_std, verbose=0)
y_valid_pred = np.array([y_valid_pred_proba > 0.5]).astype("int").reshape(-1)
print(f"AUC= {metrics.roc_auc_score(y_valid, y_valid_pred_proba):.2f}")
# classification report
clf_report = metrics.classification_report(y_valid, y_valid_pred)
print(clf_report)
# confusion matrix
confusion_matrix = metrics.confusion_matrix(y_valid, y_valid_pred)
plt.figure(figsize=(3, 2), facecolor="w")
sns.heatmap(confusion_matrix, annot=True, cmap="Blues", fmt="d")
# Create submission file
scaler = StandardScaler()
test_std = scaler.fit_transform(test)
submission["target"] = model.predict(test_std, verbose=0)
submission.to_csv(Path(working_dir, "submission.csv"), header=True, index=True)
submission[:5]
|
import numpy as np, pandas as pd, matplotlib.pyplot as plt, seaborn as sns
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVR
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# importing the datasets and merging them into one
test = pd.read_csv("/kaggle/input/loan-prediciton-classification/test_AbJTz2l.csv")
train = pd.read_csv("/kaggle/input/loan-prediciton-classification/train_v9rqX0R.csv")
data = pd.concat([train, test], axis=0, keys=["train", "test"])
data
print(data.info()) # identifying the columns and data types
# understanding the basic statistical values of numerical features to form questions
base_stat = data.describe()
base_stat
# Based on min, max, std and the mean, it seems like there **could be** outliers in 'Item_Outlet_Sales' and 'Item_Visibility' . Moreover, 'Item_Outlet_Sales' seems to have a significant number of NULL values followed by 'Item_Weight'.
print(
"The percentage of null values in ",
"%.2f" % (((data["Item_Outlet_Sales"].isnull().sum()) / 14204) * 100),
"%",
)
print(
"The count of null values in Item_Outlet_Sales is",
data["Item_Outlet_Sales"].isnull().sum(),
)
# calculating the null values for each feature
data.isnull().sum()
# **Features present in dataset (from the above dataframe info):**
# **1) Categorical :** 'Item_Identifier' , 'Item_Fat_Content' , 'Item_Type' , 'Outlet_Size' , 'Outlet_Location_Type' , 'Outlet_Type'
# **2) Numerical :** 'Item_Weight' , 'Item_Visibility' , 'Item_MRP' , 'Outlet_Establishment_Year' , 'Item_Outlet_Sales'
# checking just in case for any duplicates in the combined data
print(data.shape)
data.duplicated().shape
# based on previous output, there are no duplicates in the data
# seperating the categorical features with numerical features
data_cat = data.loc[
:,
[
"Item_Identifier",
"Outlet_Identifier",
"Item_Fat_Content",
"Item_Type",
"Outlet_Size",
"Outlet_Location_Type",
"Outlet_Type",
],
]
data_num = data.loc[
:,
[
"Item_Weight",
"Item_Visibility",
"Item_MRP",
"Outlet_Establishment_Year",
"Item_Outlet_Sales",
],
]
# plotting categorical features to understand their distribution
# we only plot the cateogical features that are revelant to our objective
for i in data_cat.columns.values[1:]:
plt.figure(figsize=(11, 5), dpi=150, facecolor="lightgray", edgecolor="black")
plt.bar(
data_cat[i].dropna().unique(),
data_cat[i].dropna().value_counts().values,
width=0.3,
facecolor="brown",
edgecolor="black",
lw=2,
align="center",
)
plt.title(
label=i + " Distribution",
style="italic",
fontdict={"fontsize": 12, "fontweight": "bold"},
loc="left",
)
plt.xlabel(i)
plt.ylabel("Frequency")
plt.xticks(data_cat[i].dropna().unique(), rotation=50)
plt.show()
# Many inferences can be made.
# 1. **The frequency of sales for Supermarket Type 1 is significantly higher than the other types.**
# 2. **In Outlet location type, Tier 1 attracts more number of consumers. However, Tier 3 has more sales across all outlets compared to Tier 1 and Tier 2.**
# 3. **Dairy items and soft drinks along with 'Low Fat' items have been sold more than their other respective types. Medium sized outlets also share the same outcome**
# excluding null values since they contribute towards a significant portion of data
for i in data_num.columns.values:
plt.figure(figsize=(11, 5), dpi=200, facecolor="#e9e9e9")
plt.boxplot(
data_num[i].dropna(),
vert=True,
notch=True,
boxprops=dict(color="#1E3F66", lw=1.5),
flierprops=dict(color="black"),
)
plt.title(
label=i + " Box Plot",
style="italic",
fontdict={"fontsize": 12, "fontweight": "bold"},
loc="center",
)
plt.show()
# From above, there seems to be potential outliers in Item_Visibilty and Item_Outlet_Sales. However, given the context, this could a natural variation in sales data which is also heavily influenced by different outlet features. The same can be said aboud item visibility as item types range from naked veggies to fully enclosed/canned food.
# looking at potential outlier for comprehension purposes
print(
data[data["Item_Outlet_Sales"] > 10000]
.loc[
:,
[
"Item_Identifier",
"Item_Type",
"Item_Weight",
"Item_Fat_Content",
"Outlet_Type",
"Outlet_Size",
"Item_Outlet_Sales",
],
]
.dropna(how="all"),
"\n\n",
)
# looking at the other feature values for outliers, the natural variation holds
# however, we will look at the model accuracy to decide the removal of outliers
# plotting the distribution of numerical features
for i in data_num.columns.values:
plt.figure(figsize=(11, 5), dpi=200, facecolor="#e9e9e9")
plt.hist(
data_num[i].dropna(),
edgecolor="black",
facecolor="pink",
hatch=".",
align="left",
)
plt.title(label=i + " Distribution", fontdict=dict(fontsize=12), loc="center")
plt.show()
# 1. The distribution of Item_Weight does not change significantly across different weights. So, the number of consumers attracted by outlets is not majorly affected by item weights. Moreover, it also contains 2439 NULL values and a very weak correlation value with sales (shown later).
# > **So, Item_Weight won't be taken as a predictor variable.**
# 2. It also seems that products with high visibility sell to more customers potentially increasing sales (will be explored later)
# 3. High Item_MRP tends to attract less customers. Item_MRP also has a pretty good moderate correlation with sales (explored later)
# **Since 40% of outlet sales feature is NULL value, we will botb remove and fill seperately for model evaulation**
# creating a dataframe without NA values in outlet sales
data2 = data.copy()
data2 = data2[data2["Item_Outlet_Sales"].notna()]
data_cat2 = data2.loc[
:,
[
"Item_Identifier",
"Outlet_Identifier",
"Item_Fat_Content",
"Item_Type",
"Outlet_Size",
"Outlet_Location_Type",
"Outlet_Type",
],
]
data_num2 = data2.loc[
:,
[
"Item_Weight",
"Item_Visibility",
"Item_MRP",
"Outlet_Establishment_Year",
"Item_Outlet_Sales",
],
]
# exploring relationships between different outlet features
f = pd.pivot_table(
data,
index=["Outlet_Type"],
columns=["Outlet_Size"],
values="Item_Outlet_Sales",
aggfunc=np.sum,
).round(2)
f["Total"] = f.sum(axis=1)
f.loc[len(f)] = f.sum(axis=0)
f
# From output below and the distributions given above, it seems that outlet type heavily influences the frequency of consumers and item sales across all outlets.
# It also looks like more items are sold at supermarket type 1. The same is concluded for medium sized outlets followed by small size and then high size. This also enforces the fact that medium outlet size and supermarket type 1 attracts more number of customers.
# > **So, Outlet_Type will be taken as a predictor variable.**
# outlet location type and outlet size
pd.pivot_table(
data,
index=["Outlet_Location_Type"],
columns=["Outlet_Size"],
values="Item_Outlet_Sales",
aggfunc=np.sum,
).round(2)
# relation between outlet location type and outlet type/size in terms of sales
pd.pivot_table(
data,
index=["Outlet_Location_Type", "Outlet_Type"],
columns=["Outlet_Size"],
values="Item_Outlet_Sales",
aggfunc=np.sum,
).round(2)
# Tier 3 outlet location produces more sales across all products and outlets. Outlet location type does seem to have an influence on item sales but including it raises the concern of overfitting the model.
# > **However, given the context of this dataset, outlet location type will be taken as a predictor variable.**
# understanding relation between item fat content and outlet size & type in terms of sales
a = pd.pivot_table(
data,
index=["Item_Fat_Content"],
columns=["Outlet_Size", "Outlet_Type"],
values="Item_Outlet_Sales",
aggfunc=np.sum,
).round(2)
a["Total"] = a.sum(axis=1)
a
# From the table above, the different types in item fat content have a significant difference in sales. The correlation with outlet sales is very weak.
# > **However, given the context of this data and distribution of item_fat_content, it is taken as a predictor variable.**
# understanding relation between item visibility and item type in terms of sales
b = (
pd.pivot_table(
data,
index=["Item_Visibility"],
columns=["Item_Type"],
values="Item_Outlet_Sales",
aggfunc=np.sum,
)
.round(2)
.dropna(how="all")
.head(20)
)
b["Total"] = b.sum(axis=1)
b.loc[len(b)] = b.sum(axis=0)
b
# From the above table, item_visibility also has a significant influence on output sales. Moreover, item type also plays also has a huge significance. From the data, dairy items sell 19.5% more than canned produce, 90% more that breakfast items and so on.
# > **So, Item_Visibility and Item_Type are taken as predictor variables**
#
# understanding relation between item visibility and item fat content in terms of sales
pd.pivot_table(
data,
columns=["Item_Visibility"],
index=["Item_Fat_Content"],
values="Item_Outlet_Sales",
aggfunc=np.sum,
).round(2)
# indentifying correlation between values
plt.figure(figsize=(10, 5), dpi=300)
cor = data.corr(method="pearson")
sns.heatmap(cor, cmap="BrBG", annot=True)
# Correlation between the numerical features is very weak apart from Item_MRP and Sales.
# > **Given the context of data and above pivot table, Item_MRP will be taken as a predictor variable.**
# normalizing the numerical features
data_norm = data2.copy()
for i in data_num2.columns.values:
data_norm[i] = (data_num2[i] - data_num2[i].min()) / (
data_num2[i].max() - data_num2[i].min()
)
plt.figure(figsize=(10, 5), dpi=300)
cor = data_norm.corr(method="pearson")
sns.heatmap(cor, cmap="BrBG", annot=True)
# converting Item_Type,Item_Fat_Content, Outlet_Location_Type and Outlet_Type
# to numerical variables using one-hot encoding to fit the model
oh_enc = pd.get_dummies(
data_norm,
columns=["Item_Type", "Item_Fat_Content", "Outlet_Location_Type", "Outlet_Type"],
)
oh_enc = oh_enc.droplevel(0)
oh_enc.columns.values
# splitting the data into test and train sets
x = pd.concat([oh_enc.iloc[:, [2, 3]], oh_enc.iloc[:, 8:]], axis=1)
y = oh_enc["Item_Outlet_Sales"]
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, shuffle=True, random_state=120
)
print(x_train.shape, y_train.shape)
print(x_train)
# using random forests to identify the importance of different encoded columns
# for dimentionality reduction
rf = RandomForestRegressor(n_estimators=150)
rf.fit(x_train, y_train)
print(rf.score(x_test, y_test) * 100, "%")
# plotting the weight of influence of different features
imp = rf.feature_importances_
print(len(imp))
r = 0
for i in range(6, 31, 6):
plt.figure(figsize=(15, 7), dpi=300)
plt.bar(x_train.columns.values[r:i], imp[r:i], facecolor="#005353")
plt.xticks(x_train.columns.values[r:i], rotation=55)
for l, w, text in zip(x_train.columns.values[r:i], imp[r:i], imp[r:i].round(4)):
plt.text(l, w + 0.0015, text, fontdict=dict(weight="bold"))
plt.title("Importance of Features", fontdict=dict(weight="bold"), loc="center")
plt.yticks(np.arange(0, 0.5, 0.1))
plt.show()
r = i
# fitting a linear Support Vector Regression model and validating using cross-validation
mod = SVR(kernel="linear")
# mod.fit(x_train,y_train)
cv_score = cross_val_score(mod, x_train, y_train, cv=6)
print(
"\033[1m",
"The accuracy of (using cross-validation)",
np.round(cv_score.mean(), 2) * 100,
"%",
)
# print('\033[1m','The accuracy of ',mod.score(x_test,y_test))
# **Since accuracy is low, we will remove features that have low influence**
# removing the features with less influence
# finding the range of importances or weights
print(abs(imp.mean() - np.std(imp)), imp.mean() + np.std(imp), "\n")
# so, predictors variables with weights above 0.06 are dropped
con = dict(zip(x_train.columns.values, imp))
for i in x_train.columns.values:
if con[i] < 0.046:
del con[i]
print(con, "\n\n")
x_train2 = x_train[con.keys()]
x_test2 = x_test[con.keys()]
x_train2
# running the model again to test accuracy
# fitting a linear Support Vector Regression model and validating using cross-validation
mod = SVR(kernel="linear")
mod.fit(x_train2, y_train)
cv_score = cross_val_score(mod, x_train2, y_train, cv=6)
print(
"\033[1m",
"The accuracy of (using cross-validation)",
np.round(cv_score.mean(), 2) * 100,
"%",
)
print("\033[1m", "The accuracy of ", mod.score(x_test2, y_test) * 100)
|
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras import layers, Sequential, callbacks, models
data_dir = "../input/sport-celebrity-image-classification/Sports-celebrity images"
img_size = 256
batch_size = 32
df = tf.keras.preprocessing.image_dataset_from_directory(
directory=data_dir,
shuffle=True,
image_size=(img_size, img_size),
batch_size=batch_size,
)
# We put the images together.
classes = df.class_names # Classes of data
classes
# batch size:Number of images in a pack
len(df)
# len(df)*batch_size=14*32->448(about # of files)
for image_batch, label_batch in df.take(1):
print(image_batch.shape)
print(label_batch.numpy())
plt.figure(figsize=(8, 8))
for image_batch, label_batch in df.take(1):
for i in range(12):
ax = plt.subplot(4, 3, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(classes[label_batch[i]])
plt.axis("off")
resize_and_rescale = tf.keras.Sequential(
[
layers.experimental.preprocessing.Resizing(img_size, img_size),
layers.experimental.preprocessing.Rescaling(1.0 / 255),
]
)
data_augmentation = tf.keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"),
layers.experimental.preprocessing.RandomRotation(0.2),
layers.experimental.preprocessing.RandomContrast(0.4),
layers.experimental.preprocessing.RandomZoom(0.35),
]
)
channels = 3
input_shape = (batch_size, img_size, img_size, channels)
model = models.Sequential(
[
resize_and_rescale,
data_augmentation,
layers.Conv2D(32, (3, 3), activation="relu", input_shape=input_shape),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation="relu"),
layers.Dense(len(classes), activation="softmax"),
]
)
model.build(input_shape=input_shape)
model.summary()
model.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=["accuracy"],
)
def train_test_split(
data,
train_split=0.7,
val_split=0.25,
test_split=0.05,
shuffle=True,
shuffle_size=10000,
):
data_size = len(data)
if shuffle:
data = data.shuffle(shuffle_size, seed=17)
train_size = int(train_split * data_size)
val_size = int(val_split * data_size)
train_data = data.take(train_size)
val_data = data.skip(train_size).take(val_size)
test_data = data.skip(train_size).skip(val_size)
return train_data, val_data, test_data
train_data, val_data, test_data = train_test_split(df)
print("Len of train:{}".format(len(train_data)))
print("Len of val:{}".format(len(val_data)))
print("Len of test:{}".format(len(test_data)))
train_data = train_data.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
val_data = val_data.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
test_data = test_data.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
history = model.fit(
train_data, epochs=100, batch_size=batch_size, verbose=1, validation_data=val_data
)
scores = model.evaluate(test_data)
history.params
# Accuracy Plot
plt.figure(figsize=(10, 4))
plt.subplot(1, 2, 1)
plt.plot(
range(history.params["epochs"]),
history.history["accuracy"],
label="Training Accuracy",
)
plt.plot(
range(history.params["epochs"]),
history.history["val_accuracy"],
label="Validation Accuracy",
)
plt.legend(loc="lower right")
plt.title("Training and Validation Accuracy")
plt.subplot(1, 2, 2)
plt.plot(
range(history.params["epochs"]), history.history["loss"], label="Training Loss"
)
plt.plot(
range(history.params["epochs"]),
history.history["val_loss"],
label="Validation Loss",
)
plt.legend(loc="upper right")
plt.title("Training and Validation Loss")
def predicts(model, img):
img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy())
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
predicted_class = classes[np.argmax(predictions[0])]
confidence = round(100 * (np.max(predictions[0])), 2)
return predicted_class, confidence
plt.figure(figsize=(15, 15))
for images, labels in test_data.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
predicted_class, confidence = predicts(model, images[i].numpy)
actual_class = classes[labels[i]]
plt.title(
f"Actual: {actual_class},\n Predicted: {predicted_class},\
\n Confidence: {confidence}%"
)
plt.axis("off")
|
import os
import cv2
import numpy as np
import pandas as pd
import SimpleITK as sitk
import matplotlib.pyplot as plt
import albumentations as A
from albumentations.augmentations.transforms import CLAHE
import matplotlib.image as mpimg
from skimage.io import imsave
import matplotlib.patches as patches
def load_image(image_path):
"""
Loads a PNG image from a given file path.
Args:
image_path (str): The file path of the image to load.
Returns:
numpy.ndarray: The loaded image as a NumPy array with pixel values in the [0, 255] range.
"""
# Load the image using Matplotlib
image = mpimg.imread(image_path)
# Normalize the array to the [0, 1] range
array_min = np.min(image)
array_max = np.max(image)
array = (image - array_min) / (array_max - array_min)
# Scale the array to the [0, 255] range
converted_image = (array * 255).astype(np.uint8)
return converted_image
def apply_clahe(image, clip_limit=8.0, tile_grid_size=(4, 4), p=1.0, alpha=0):
"""Applies CLAHE to a given image array using Albumentation library.
Args:
image (numpy.ndarray): The input image array.
clip_limit (float): Threshold for contrast limiting. Higher values result in more contrast.
tile_grid_size (tuple): Size of grid for histogram equalization. Higher values result in more smoothing.
alpha (int): Value added to the output image after CLAHE enhancement.
Returns:
numpy.ndarray: The image array with CLAHE applied.
"""
clahe_transform = CLAHE(
clip_limit=clip_limit, tile_grid_size=tile_grid_size, always_apply=True, p=p
)
augmented = clahe_transform(image=image)
final_img = augmented["image"] + alpha
return final_img
def put_bounding_box(image_location, loaded_image):
# Extract filename from image_location
full_filename = os.path.basename(image_location)
# filename = full_filename.split("_masked")[0] + os.path.splitext(full_filename)[1]
filename = full_filename
# Read the metadata CSV file into a pandas DataFrame
image_df = pd.read_csv("/kaggle/input/cxr-lung-dataset-png/metadata.csv")
# Extract the rows that correspond to the filename
rows = image_df.loc[image_df["img_name"] == filename]
# Create a figure and axes to display the image
fig, ax = plt.subplots()
plt.axis("off")
# Loop over the rows and add a bounding box for each one
for index, row in rows.iterrows():
# Extract the bounding box coordinates and dimensions from the row
x, y, height, width = row["x"], row["y"], row["height"], row["width"]
# Define the coordinates and dimensions of the bounding box
bbox = patches.Rectangle(
(x, y), width, height, linewidth=1, edgecolor="r", facecolor="none"
)
# Add the bounding box to the axes
ax.add_patch(bbox)
# Show the plot with all bounding boxes
# Display the image on the axes
plt.imshow(loaded_image, cmap="gray")
plt.show()
def gamma_correction(image, gamma=1.0):
"""Applies gamma correction to a given image using the specified gamma value.
Args:
image (numpy.ndarray): The input image to be corrected.
gamma (float): The gamma value to use for correction. Default is 1.0 (no correction applied).
Returns:
numpy.ndarray: The gamma-corrected image.
"""
# Ensure gamma is non-negative
if gamma < 0:
raise ValueError("Gamma value should be non-negative.")
# Normalize the image to the [0, 1] range
normalized_image = image.astype(np.float32) / 255.0
# Apply gamma correction
corrected_image = np.power(normalized_image, gamma)
# Scale the image to the [0, 255] range
corrected_image = (corrected_image * 255.0).clip(0, 255).astype(np.uint8)
return corrected_image
import itertools
image_id = "n0030"
# image_loc = "/kaggle/input/node21-masked-lungs/masked_images/"+image_id+"_masked.png"
image_loc = "/kaggle/input/cxr-lung-dataset-png/" + image_id + ".png"
image = load_image(image_loc)
def tune_clahe(
image,
clip_limit_range=(4.0, 8.0),
tile_grid_size_range=((4, 4), (8, 8), (16, 16)),
p_range=(0.2, 0.4, 0.8, 1.0),
alpha_range=(0, 20, 30),
):
for clip_limit, tile_grid_size, p, alpha in itertools.product(
clip_limit_range, tile_grid_size_range, p_range, alpha_range
):
clahe_image = apply_clahe(
image,
clip_limit=clip_limit,
tile_grid_size=tile_grid_size,
p=p,
alpha=alpha,
)
print(clip_limit, tile_grid_size, p, alpha)
put_bounding_box(image_loc, clahe_image)
tune_clahe(image)
def tune_gamma_correction(image, gamma_range=list(range(210, 310, 10))):
for gamma in [x / 100.0 for x in gamma_range]:
corrected_image = gamma_correction(image, gamma=gamma)
print(gamma)
put_bounding_box(image_loc, corrected_image)
tune_gamma_correction(image)
image_loc1 = "/kaggle/input/cxr-lung-dataset-png/" + image_id + ".png"
image1 = load_image(image_loc1)
put_bounding_box(image_loc1, image1)
def find_best_canny_threshold(image):
# Calculate the optimal threshold value using Otsu's method
threshold, _ = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Calculate the lower and upper thresholds for the Canny edge detection algorithm
lower_threshold = int(max(0, 0.7 * threshold))
upper_threshold = int(min(255, 1.3 * threshold))
# Return the lower and upper thresholds
return lower_threshold, upper_threshold
find_best_canny_threshold(image)
def apply_canny_edges(image, threshold1=100, threshold2=200):
# Apply Canny edge detection
edges = cv2.Canny(image, threshold1, threshold2)
# Return the resulting image
return edges
canny_image = apply_canny_edges(image, 100, 120)
put_bounding_box(image_loc, canny_image)
|
import numpy as np
import pandas as pd
data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
data
m, n = data.shape
data = np.array(data)
np.random.shuffle(data)
# *now we convert the data into array format since we will be implementing neural network architechture without tensorflow/keras*
data
data.shape
m, n
# *Shuffling data serves the purpose of reducing variance and making sure that models remain general and overfit less.*
# We transpose the given data to
batch = data[0:1000].T # transposing the data set for matrix calculations
batch
y_batch = batch[0] # first row is the target variable
x_batch = batch[1:n]
x_batch = x_batch / 255
data_train = data[1000:m].T
y_train = data_train[0]
x_train = data_train[1:n]
x_train = x_train / 255
x_train.shape
(
_,
m_train,
) = (
x_train.shape
) # we have 784 pixel values (number of features (n) ) and 41000 training examples(m) with us
y_train # we have 41000 target variables which matches with the number of training examples #number of elemnts in the row
def init_params():
W1 = np.random.rand(10, 784) - 0.5
b1 = np.random.rand(10, 1) - 0.5
W2 = np.random.rand(10, 10) - 0.5
b2 = np.random.rand(10, 1) - 0.5
return W1, b1, W2, b2
def relu(z):
return np.maximum(z, 0)
def softmax(z):
exp = np.exp(z - np.max(z))
return exp / exp.sum(axis=0)
def forward_propagation(w1, b1, w2, b2, x):
z1 = w1.dot(x) + b1 # feed x (og data) into first layer
a1 = relu(z1) # activation function for first layer
z2 = w2.dot(a1) + b2 # passing the output og first layer into second layer
a2 = softmax(
z2
) # calculating probabilities of output of second layer using softmax
return z1, a1, z2, a2
# a = np.array([1, 0, 3])
# oh=pd.get_dummies(a)
# oh=np.array(oh)
# oh
def OHE(y):
one_hot_y = np.zeros((y.size, y.max() + 1))
one_hot_y[np.arange(y.size), y] = 1
one_hot_y = (
one_hot_y.T
) # we transpose it because we want each column to be an example (linear algerba rules we want Zi and Y to be in the same format)
return one_hot_y
# y=np.array([1, 0, 3])
# OHE(y)
# a = np.array([1, 0, 3])
# zerosmat=np.zeros((y.size,y.max()+1))
# range=np.arange(y.size)
# oh=pd.get_dummies(a)
# oh=np.array(oh)
# range ,zerosmat ,y.max()+1
def relu_derivative(Z):
return Z > 0
def back_propgation(z1, a1, a2, w2, x, y):
OHE_y = OHE(y)
dz2 = 2 * (a2 - OHE_y) # 10,784
dw2 = (1 / m) * (dz2.dot(a1.T)) # 10,10
db2 = 1 / m * np.sum(dz2) # 10,1
dz1 = (w2.T).dot(dz2) * (relu_derivative(z1)) # 10,m
dw1 = (1 / m) * (dz1.dot(x.T)) # 10,784
db1 = 1 / m * np.sum(dz1) # 10,1
return dw1, db1, dw2, db2
def update_parameters(W1, b1, W2, b2, dW1, db1, dW2, db2, alpha):
W1 = W1 - alpha * dW1
b1 = b1 - alpha * db1
W2 = W2 - alpha * dW2
b2 = b2 - alpha * db2
return W1, b1, W2, b2
def get_predictions(A2):
return np.argmax(A2, 0)
def get_accuracy(predictions, Y):
print(predictions, Y)
return np.sum(predictions == Y) / Y.size
def gradient_descent(X, Y, alpha, iterations):
W1, b1, W2, b2 = init_params()
for i in range(iterations):
Z1, A1, Z2, A2 = forward_propagation(W1, b1, W2, b2, x_train)
dW1, db1, dW2, db2 = back_propgation(Z1, A1, A2, W2, x_train, y_train)
W1, b1, W2, b2 = update_parameters(W1, b1, W2, b2, dW1, db1, dW2, db2, alpha)
if i % 10 == 0:
print("Iteration: ", i)
predictions = get_predictions(A2)
print(get_accuracy(predictions, Y))
return W1, b1, W2, b2
W1, b1, W2, b2 = gradient_descent(x_train, y_train, 0.15, 600)
import matplotlib.pyplot as plt
def make_predictions(X, W1, b1, W2, b2):
_, _, _, A2 = forward_propagation(W1, b1, W2, b2, X)
predictions = get_predictions(A2)
return predictions
def test_prediction(index, W1, b1, W2, b2):
current_image = x_train[:, index, None]
prediction = make_predictions(x_train[:, index, None], W1, b1, W2, b2)
label = y_train[index]
print("Prediction: ", prediction)
print("Label: ", label)
current_image = current_image.reshape((28, 28)) * 255
plt.gray()
plt.imshow(current_image, interpolation="nearest")
plt.show()
test_prediction(0, W1, b1, W2, b2)
test_prediction(1, W1, b1, W2, b2)
test_prediction(2, W1, b1, W2, b2)
test_prediction(3, W1, b1, W2, b2)
|
# # The Best Cities In The World For A Workation
# ---
# - author: โยษิตา ปึงศิริเจริญ
# - student id: 6341191826
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib qt
# %pylab qt
print(f"pandas version = {pd.__version__}")
print(f"numpy version = {np.__version__}")
print(f"seaborn version = {sns.__version__}")
pd.Timestamp.now()
df = pd.read_csv(
"https://raw.githubusercontent.com/YoBbell/BusInt2023Part1/main/best%20cities%20for%20a%20workation.csv"
)
df
# สำรวจข้อมูล
df.info()
# ---
# ### แปลงให้เป็นหน่วยเดียวกัน ใช้ yeo-johnson
from sklearn import preprocessing
df.columns
cols = [
"Remote connection: Average WiFi speed (Mbps per second)",
"Co-working spaces: Number of co-working spaces",
"Caffeine: Average price of buying a coffee",
"Travel: Average price of taxi (per km)",
"After-work drinks: Average price for 2 beers in a bar",
"Accommodation: Average price of 1 bedroom apartment per month",
"Food: Average cost of a meal at a local, mid-level restaurant",
"Climate: Average number of sunshine hours",
"Tourist attractions: Number of ‘Things to do’ on Tripadvisor",
"Instagramability: Number of photos with #",
]
# เลือกใช้ yeo-johnson method เพราะเพื่อให้รองรับข้อมูลที่ติดลบและเป็น 0 ได้ ซึ่งจะมีความยืดหยุ่นมากกว่า
pt = preprocessing.PowerTransformer(method="yeo-johnson", standardize=True)
mat = pt.fit_transform(df[cols])
mat[:5].round(4)
# ตั้งชื่อคอลัมน์ของข้อมูลที่ tranform แล้ว
tr_cols = [f"tr_{c}" for c in cols]
tr_cols
# รวมข้อมูลที่ tranform แล้วเข้ามาใน dataframe เดียวกับ df
ds = pd.concat([df, pd.DataFrame(mat, columns=tr_cols)], axis="columns")
ds
# สังเกตดู histogram ของข้อมูลที่ยังไม่ทำ standardize scale
ds[cols].hist(figsize=(15, 10))
# สังเกตข้อมูลที่ผ่านการ standardize scale แล้ว พบว่า transformation ทำงานได้ดีกับข้อมูลที่เบ้ขวา ซึ่งจะทำให้ข้อมูลที่เบ้ขวานั้นกระจายตัวแบบ normal มากขึ้น
ds[tr_cols].hist(figsize=(15, 10))
# #### ข้อมูลที่ผ่านการ transformation
X = pd.DataFrame(mat, columns=cols)
X
# ดูการกระจายตัวของ cluster ทั้งหมด โดยจับคู่ทีละ 2 ปัจจัย
sns.pairplot(X, plot_kws={"alpha": 0.4})
# ---
# ### K Means Clustering
from sklearn.cluster import KMeans
# ##### หา optimal number of cluster โดยใช้ elbow method
ssd = []
for k in range(2, 10):
m = KMeans(n_clusters=k)
m.fit(X)
ssd.append([k, m.inertia_])
ssd
# ทำให้เป็น array
xy = np.array(ssd)
print(xy)
# สร้างเป็น dataframe
dd = pd.DataFrame(ssd, columns=["k", "ssd"])
dd
# หา % ผลต่างของ ssd ในแต่ละ cluster
dd["pct_chg"] = dd["ssd"].pct_change() * 100
dd
# กราฟแสดง %ผลต่าง ระหว่างแต่ละ cluster
plt.plot(xy[:, 0], xy[:, 1], linestyle="--", marker="o")
for index, row in dd.iterrows():
plt.text(row["k"] + 0.02, row["ssd"] + 0.02, f'{row["pct_chg"]:.2f}', fontsize=10)
# **เลือก optimal number of cluster = 5 เพราะหลังจาก cluster 5 ค่า inertia เริ่มคงที่แล้วดูได้จากเส้นกราฟที่ตอนแรกความชันดิ่งลงอย่างเห็นได้ชัด แต่เมื่อถึงจุดที่ 5 ก็เริ่มเป็นแนวราบมากขึ้น**
# #### fit model
model = KMeans(n_clusters=5)
model
model.fit(X)
model.cluster_centers_.round(4)
# หาตัวที่น้อยสุดในแต่ละแถว เพื่อบอกว่าอยู่ใน cluster ไหน
model.transform(X)
# ดูว่าแต่ละตัวถูก label ให้อยู่ใน cluster ไหน
model.labels_
df["cluster"] = model.labels_
df.head()
# นับความถี่ของแต่ละ cluster ซึ่งจำนวนแต่ละอันอยู่ในระดับที่ไม่น้อยจนเกินไป ถือว่าใช้ได้
sns.countplot(x="cluster", data=df)
# ดูแผนภาพการกระจาย cluster ทั้งหมด โดยจับคู่ทีละ 2 ปัจจัย
sns.pairplot(df, vars=cols, hue="cluster", plot_kws={"alpha": 0.4})
# ---
# #### how to ตั้งชื่อ cluster วิธี 1: ใช้ violin plot
fig, ax = plt.subplots(nrows=5, ncols=2, figsize=(20, 20))
ax = ax.ravel()
for i, col in enumerate(cols):
sns.violinplot(x="cluster", y=col, data=df, ax=ax[i])
# #### how to ตั้งชื่อ cluster วิธี 2: ใช้ boxen plot
fig, ax = plt.subplots(nrows=5, ncols=2, figsize=(20, 20))
ax = ax.ravel()
for i, col in enumerate(cols):
sns.boxenplot(x="cluster", y=col, data=df, ax=ax[i])
dx = X
dx["cluster"] = model.labels_
dx
# หาค่า median ของแต่ละ cluster ตามคอลัมน์
dx.groupby("cluster").median()
# เลือกข้อมูลในแต่ละ cluster ออกมาดูเพื่อสังเกตข้อมูลในการนำมาตั้งชื่อ cluster ต่อไป
df.groupby("cluster").head(10).sort_values("cluster")
# #### how to ตั้งชื่อ cluster วิธี 3: ใช้ heatmap
fig, ax = plt.subplots(figsize=(10, 8))
sns.heatmap(
dx.groupby("cluster").median(),
cmap="Blues",
linewidths=1,
square=True,
annot=True,
fmt=".2f",
)
|
# ## The goal of this notebook
# Many people working with data don't use additional text other than basic text when working with plots.
# However, text is the easiest and most effective tool humans can understand.
# This notebook introduces you to how to use text effectively and neatly.
# ## Basic Setting for Explanation
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
print(f"numpy ver : {np.__version__}")
print(f"pandas ver : {pd.__version__}")
print(f"matplotlib ver : {mpl.__version__}")
plt.rcParams["figure.dpi"] = 150 # for high-resolution figure
data = pd.read_csv("/kaggle/input/netflix-shows/netflix_titles.csv")
data.sample(3)
# To creat a barplot from raw data, you need to preprocessing what to draw.
# In this notebook, I want to show you trends by year through `Netflix Movie/TV Show per Years`
cnt_tmp = (
data.groupby("type")["release_year"]
.value_counts()
.unstack()
.fillna(0)
.astype(int)
.T
)
movie_count, tvshow_count = cnt_tmp["Movie"], cnt_tmp["TV Show"]
# Better than this way
# movie_count = data[data['type']=='Movie']['release_year'].value_counts().sort_index()
# tvshow_count = data[data['type']=='TV Show']['release_year'].value_counts().sort_index()
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator
np.random.seed(19680801)
X = np.linspace(0.5, 3.5, 100)
Y1 = 3 + np.cos(X)
Y2 = 1 + np.cos(1 + X / 0.75) / 2
Y3 = np.random.uniform(Y1, Y2, len(X))
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1, aspect=1)
def minor_tick(x, pos):
if not x % 1.0:
return ""
return f"{x:.2f}"
ax.xaxis.set_major_locator(MultipleLocator(1.000))
ax.xaxis.set_minor_locator(AutoMinorLocator(4))
ax.yaxis.set_major_locator(MultipleLocator(1.000))
ax.yaxis.set_minor_locator(AutoMinorLocator(4))
ax.xaxis.set_minor_formatter(minor_tick)
ax.set_xlim(0, 4)
ax.set_ylim(0, 4)
ax.tick_params(which="major", width=1.0)
ax.tick_params(which="major", length=10)
ax.tick_params(which="minor", width=1.0, labelsize=10)
ax.tick_params(which="minor", length=5, labelsize=10, labelcolor="0.25")
ax.grid(linestyle="--", linewidth=0.5, color=".25", zorder=-10)
ax.plot(X, Y1, c=(0.25, 0.25, 1.00), lw=2, label="Blue signal", zorder=10)
ax.plot(X, Y2, c=(1.00, 0.25, 0.25), lw=2, label="Red signal")
ax.set_title("Anatomy of a figure (Text Ver.)", fontsize=20, verticalalignment="bottom")
ax.set_xlabel("X axis label")
ax.set_ylabel("Y axis label")
ax.legend()
def circle(x, y, radius=0.15):
from matplotlib.patches import Circle
from matplotlib.patheffects import withStroke
circle = Circle(
(x, y),
radius,
clip_on=False,
zorder=10,
linewidth=1,
edgecolor="black",
facecolor=(0, 0, 0, 0.0125),
path_effects=[withStroke(linewidth=5, foreground="w")],
)
ax.add_artist(circle)
def text(x, y, text):
ax.text(
x,
y,
text,
backgroundcolor="white",
ha="center",
va="top",
weight="bold",
color="blue",
)
# Minor tick
circle(0.50, -0.10)
text(0.50, -0.32, "Minor tick label")
circle(-0.15, 3.00)
text(-0.15, 2.80, "Major tick label")
# X Label
circle(1.80, -0.27)
text(1.80, -0.45, "X axis label")
# Y Label
circle(-0.27, 1.80)
text(-0.27, 1.6, "Y axis label")
# Title
circle(1.60, 4.13)
text(1.60, 3.93, "Title")
# Legend
circle(3.70, 3.80)
text(3.70, 3.60, "Legend")
plt.show()
# In information visualization, text can be broadly classified as follows.
# - **Title**
# - Text that can describe the topic of the visualization
# - In general, there is only one title, and if there are more than one subplot, it can be classified into the title of `subplot` and the title of `figure`
# - **Label**
# - Provide information about the axis
# - `X-axis` and `Y-axis` in Cartesian coordinate system
# - In polar coordinates, the r and theta axes
# - **Tick(ticklabel)**
# - Provides scale information for the axis
# - **Legend**
# - Supplementary information used to classify two or more different data in a graph
# - **Text (Annotation)**
# - Added explanation for other visualizations
# - There is a method of adding text at a desired point or by using pointing
# In matplotlib, the following APIs are supported.
# |pyplot API|Objecte-oriented API|description|
# |-|-|-|
# |`suptitle`|`suptitle`|title of figure|
# |`title`|`set_title`|title of subplot `ax`|
# |`xlabel`|`set_xlabel`|x-axis label|
# |`ylabel`|`set_ylabel`|y-axis label|
# |`figtext`|`text`|figure text|
# |`text`|`text`|Axes taext|
# |`annoatate`|`annotate`|Axes annotation with arrow|
#
# Let's draw a basic plot with various text information added.
# Default Setting
fig, ax = plt.subplots(1, 1, figsize=(15, 7))
ax.bar(movie_count.index, movie_count, label="Movie") # Label for legend
ax.bar(tvshow_count.index, tvshow_count, bottom=movie_count, label="TV Show")
ax.set_title("Netflix Movie/TV Show per Years") # Title
ax.set_xlabel("Release Year") # Label
ax.set_ylabel("# of Movies/Tv Shows")
ax.legend() # Legend
plt.show()
# ## Text Properties
#
# Default Setting for Text
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
ax.text(0.5, 0.5, s="Default")
# for remove spine and ticks and highlighting text
for spine in ["top", "bottom", "left", "right"]:
ax.spines[spine].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
# ### Font Components
# Easy to Use Properties
# - `size` or `fontsize`
# - `xx-small`, `x-small`, `small`, `medium`, `large`, `x-large`, `xx-large`
# - `weight` or `fontweight`
# - `light`, `normal`, `medium`, `semibold`, `bold`, `heavy`, `black`
# - `family`
# - `serif`, `sans-serif`, `cursive`, `fantasy`, `monospace`
# - `style` or `fontstyle`
# - `normal`, `italic`, `oblique`
# ---
# - `variant`
# - `stretch`
# - `name` or `fontname`
# - `fontproperties`
#
# Size
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
for idx, size in enumerate(
["xx-small", "x-small", "small", "medium", "large", "x-large", "xx-large"], 1
):
ax.text(0.5, 1 - 0.12 * idx, size, size=size)
for spine in ["top", "bottom", "left", "right"]:
ax.spines[spine].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
# Size
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
for idx, weight in enumerate(
["light", "normal", "medium", "semibold", "bold", "heavy", "black"], 1
):
ax.text(0.5, 1 - 0.12 * idx, weight, weight=weight)
for spine in ["top", "bottom", "left", "right"]:
ax.spines[spine].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
# Family
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
for idx, family in enumerate(
["serif", "sans-serif", "cursive", "fantasy", "monospace"], 1
):
ax.text(0.5, 1 - 0.15 * idx, family, family=family)
for spine in ["top", "bottom", "left", "right"]:
ax.spines[spine].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
# Style
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
for idx, style in enumerate(["normal", "italic", "oblique"], 1):
ax.text(0.5, 1 - 0.15 * idx, style, style=style)
for spine in ["top", "bottom", "left", "right"]:
ax.spines[spine].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
|
# ### Competition Link: https://www.kaggle.com/competitions/asl-signs
# ### CNN-LSTM EDA Link:https://www.kaggle.com/code/geyiming/cnn-lstm-eda
# ## Install
# Install
# ## Import
# Imports
# activate interactive mode of pd.dataframe
import pandas as pd
from itables import init_notebook_mode
init_notebook_mode(all_interactive=True, connected=True)
import os
import json
from tqdm import tqdm
import numpy as np
import itertools
import tensorflow as tf
# pytorch model
import torch
import torch.nn.functional as F
import torch.nn as nn
import seaborn as sns
import mediapipe as mp
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from matplotlib import animation
from pathlib import Path
import IPython
from IPython import display
from IPython.core.display import display, HTML, Javascript
from IPython.display import Markdown as md
import mediapipe as mp
from mediapipe.framework.formats import landmark_pb2
from sklearn.model_selection import train_test_split
# ## Configuration
# Config
class Config:
INPUT_ROOT = Path("/kaggle/input/asl-signs/")
OUTPUT_ROOT = Path("kaggle/working")
INDEX_MAP_FILE = INPUT_ROOT / "sign_to_prediction_index_map.json"
TRAN_FILE = INPUT_ROOT / "train.csv"
INDEX = "sequence_id"
ROW_ID = "row_id"
def read_index_map(file_path=Config.INDEX_MAP_FILE):
"""Reads the sign to predict as json file."""
with open(file_path, "r") as f:
result = json.load(f)
return result
LANDMARK_FILES_DIR = "/kaggle/input/asl-signs/train_landmark_files"
label_map = read_index_map()
train_df = pd.read_csv(
"/kaggle/input/gislr-extended-train-dataframe/extended_train.csv"
)
train_df["label"] = train_df["sign"].map(label_map)
# ## Helpers
# Helpers
ROWS_PER_FRAME = 543
def load_relevant_data_subset(pq_path):
data_columns = ["x", "y", "z"]
data = pd.read_parquet(pq_path, columns=data_columns)
n_frames = int(len(data) / ROWS_PER_FRAME)
data = data.values.reshape(n_frames, ROWS_PER_FRAME, len(data_columns))
return data.astype(np.float32)
# https://www.kaggle.com/code/ted0071/gislr-visualization
def read_index_map(file_path=Config.INDEX_MAP_FILE):
"""Reads the sign to predict as json file."""
with open(file_path, "r") as f:
result = json.load(f)
return result
def read_train(file_path=Config.TRAN_FILE):
"""Reads the train csv as pandas data frame."""
train_df = pd.read_csv(file_path).set_index(Config.INDEX)
train_df["label"] = train_df["sign"].map(read_index_map())
return train_df
def read_landmark_data_by_path(file_path, input_root=Config.INPUT_ROOT):
"""Reads landmak data by the given file path."""
data = pd.read_parquet(input_root / file_path)
return data.set_index(Config.ROW_ID)
def read_landmark_data_by_id(sequence_id, train_data):
"""Reads the landmark data by the given sequence id."""
file_path = train_data.loc[sequence_id]["path"]
return read_landmark_data_by_path(file_path)
# Helper Functions
# [C1] adjusted from Roland Abel: https://www.kaggle.com/code/ted0071/gislr-visualization
train_data = read_train()
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
mp_face_mesh = mp.solutions.face_mesh
mp_pose = mp.solutions.pose
# contour connections
CONTOURS = list(itertools.chain(*mp_face_mesh.FACEMESH_CONTOURS))
def create_blank_image(height, width):
return np.zeros((height, width, 3), np.uint8)
def get_landmarks_dataframe(data, frame_id, landmark_type):
"""Get a dataframe with the landmarks for the specified frame and landmark type."""
df = data.groupby(["frame", "type"]).get_group((frame_id, landmark_type)).copy()
if landmark_type == "face":
df.loc[~df["landmark_index"].isin(CONTOURS), "x"] = float(
"NaN"
) # -1*df[~df['landmark_index'].isin(CONTOURS)]['x'].values
return df
def get_landmark_list(df):
"""Get a list of normalized landmarks from the specified dataframe."""
landmarks = [
landmark_pb2.NormalizedLandmark(x=lm.x, y=lm.y, z=lm.z)
for idx, lm in df.iterrows()
]
landmark_list = landmark_pb2.NormalizedLandmarkList(landmark=landmarks)
return landmark_list
def draw_landmarks(
image,
landmark_list,
connection_type,
landmark_color,
connection_color,
thickness,
circle_radius,
):
"""Draw landmarks and connections on the specified image."""
mp_drawing.draw_landmarks(
image=image,
landmark_list=landmark_list,
connections=connection_type,
landmark_drawing_spec=mp_drawing.DrawingSpec(
color=landmark_color, thickness=thickness, circle_radius=circle_radius
),
connection_drawing_spec=mp_drawing.DrawingSpec(
color=connection_color, thickness=thickness, circle_radius=circle_radius
),
)
return image
# Clean representation for frame idx map inspired by Darien Schettler [C3]
IDX_MAP = {
"contours": list(set(CONTOURS)),
"left_hand": np.arange(468, 489).tolist(),
"upper_body": np.arange(489, 511).tolist(),
"right_hand": np.arange(522, 543).tolist(),
}
FIXED_FRAMES = 37 # based on the above observations
# ## Feature Processing
class FeaturePreprocess(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x_in):
n_frames = x_in.shape[0]
# Normalization to a common mean by Heng CK [C4]
x_in = x_in - x_in[~torch.isnan(x_in)].mean(0, keepdim=True)
x_in = x_in / x_in[~torch.isnan(x_in)].std(0, keepdim=True)
# Landmarks reduction
contours = x_in[:, IDX_MAP["contours"]]
lhand = x_in[:, IDX_MAP["left_hand"]]
pose = x_in[:, IDX_MAP["upper_body"]]
rhand = x_in[:, IDX_MAP["right_hand"]]
x_in = torch.cat([contours, lhand, pose, rhand], 1) # (n_frames, 192, 3)
# Replace nan with 0 before Interpolation
x_in[torch.isnan(x_in)] = 0
# Frames interpolation inspired by Robert Hatch [C2]
# If n_frames < k, use linear interpolation,
# else, use nearest neighbor interpolation
x_in = x_in.permute(2, 1, 0) # (3, 192, n_frames)
if n_frames < FIXED_FRAMES:
x_in = F.interpolate(x_in, size=(FIXED_FRAMES), mode="linear")
else:
x_in = F.interpolate(x_in, size=(FIXED_FRAMES), mode="nearest-exact")
return x_in.permute(2, 1, 0)
x_in = torch.tensor(load_relevant_data_subset(train_df.path[0]))
feature_preprocess = FeaturePreprocess()
feature_preprocess(x_in).shape, x_in[0]
# ## Save processing features
# adapted and adjusted from Robert Hatch [C2]
right_handed_signer = [
26734,
28656,
25571,
62590,
29302,
49445,
53618,
18796,
4718,
2044,
37779,
30680,
]
left_handed_signer = [
16069,
32319,
36257,
22343,
27610,
61333,
34503,
55372,
]
both_hands_signer = [37055]
messy = [29302]
def convert_row(row, right_handed=True):
x = torch.tensor(load_relevant_data_subset(row[1].path))
x = feature_preprocess(x).cpu().numpy()
return x, row[1].label
def convert_and_save_data(df):
total = df.shape[0]
npdata = np.zeros((total, 37, 192, 3))
nplabels = np.zeros(total)
for i, row in tqdm(enumerate(df.iterrows()), total=total):
(x, y) = convert_row(row)
npdata[i, :, :, :] = x
nplabels[i] = y
np.save("feature_data.npy", npdata)
np.save("feature_labels.npy", nplabels)
convert_and_save_data(train_df)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
df = pd.read_csv("../input/most-subscribed-1000-youtube-channels/topSubscribed.csv")
df.head()
df.info()
# Let's find how many rows and columns are there in our data
df.shape
# # Clean and Prepare Data
# Need to clean data in "Subscribers", "Video Views" and "Video Count" to remove "," and change its type to int for further analysis
cols = ["Subscribers", "Video Views", "Video Count"]
for i in cols:
df[i] = df[i].str.replace(",", "")
df.head()
# changing the datatype to int64
for i in cols:
df[i] = df[i].astype("int64")
df.info()
df["Category"].unique()
# we have to replace the irregularities in the category column
df["Category"] = df["Category"].str.replace(
"https://us.youtubers.me/global/all/top-1000-most_subscribed-youtube-channels",
"other",
)
df["Category"] = df["Category"].str.capitalize()
df["Category"].unique()
df.info()
# Check null values
df.isnull().sum()
df.describe()
df = df[df["Video Views"] != 0]
df["Category"].value_counts()
df["Started"].value_counts()
# 1970 is an outlier and therefore we need to remove it from the dataframe
df.drop(df[df["Started"] == 1970].index, axis=0, inplace=True)
df["Started"].value_counts()
df.shape
# # Data Visualization
# # Number of Channels started over the years
# Need to group data as per the year when the channel started
var1 = df.groupby(["Started"])["Youtube Channel"].count()
var1 = var1.reset_index()
fig1 = px.bar(var1, x="Started", y="Youtube Channel", template="plotly_dark")
fig1.show()
# # Total Video Views and Total Subscibers by Category
var2 = (
df.groupby(["Category"])
.agg({"Video Views": "sum", "Subscribers": "sum"})
.sort_values(by=["Video Views"], ascending=False)
)
var2 = var2.reset_index()
fig2 = px.scatter(
var2,
x="Subscribers",
y="Video Views",
size="Video Views",
color="Category",
log_x=True,
size_max=40,
title="Video views and Subscribers By Category",
template="plotly_dark",
)
fig2.show()
# # Distribution of Subscribers in Each Category
px.box(
df,
y="Subscribers",
x="Category",
color="Category",
title="Subscribers distribution by category",
template="plotly_dark",
)
# # Distribution of Video Views in Each Category
px.box(
df,
y="Video Views",
x="Category",
color="Category",
title="Video Views distribution by category",
template="plotly_dark",
)
# # Video Views and Subscibers by Youtube Channel
fig3 = px.scatter(
df,
x="Subscribers",
y="Video Views",
size="Video Views",
color="Youtube Channel",
log_x=True,
size_max=60,
title="Video views and Subscribers By Youtube Channel",
template="plotly_dark",
)
fig3.show()
var4 = (
df.groupby(["Youtube Channel"])["Subscribers"]
.sum()
.sort_values(ascending=False)
.head(10)
)
var4 = var4.reset_index()
fig4 = px.pie(
var4,
values="Subscribers",
names="Youtube Channel",
title="Youtube Channels with Subscribers",
template="plotly_dark",
)
fig4.update_traces(textposition="inside", textinfo="percent+label")
fig4.update_layout(
title={
"text": "Top 10 Youtube Channels with Subscribers",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
}
)
fig4.show()
# # Top 10 Channels with maximum Video Views
var5 = (
df.groupby(["Youtube Channel"])["Video Views"]
.sum()
.sort_values(ascending=False)
.head(10)
)
var5 = var5.reset_index()
px.bar(
var5,
x="Youtube Channel",
y="Video Views",
template="plotly_dark",
)
# # Number of Youtube Channels in 20M Subscibers Bins
px.histogram(
df,
x="Subscribers",
title="Number of Youtube Channels in Subsciber Count Category",
nbins=20,
template="plotly_dark",
)
# # Correlation Heatmap
plt.figure(figsize=(20, 10))
sns.heatmap(df.corr(), annot=True, center=True)
fig6 = px.scatter_matrix(df)
fig6.update_layout(
width=1000,
height=1000,
)
|
import os
import cv2
import gc
import numpy as np
import pandas as pd
import itertools
from tqdm.autonotebook import tqdm
import albumentations as A
import matplotlib.pyplot as plt
import torch
from torch import nn
import torch.nn.functional as F
import timm
from transformers import DistilBertModel, DistilBertConfig, DistilBertTokenizer
import os
import collections
import json
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_hub as hub
import tensorflow_text as text
import tensorflow_addons as tfa
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from tqdm import tqdm
import pandas as pd
from transformers import DistilBertModel, DistilBertConfig, DistilBertTokenizer
import torch
from torch import nn
import torch.nn.functional as F
# Suppressing tf.hub warnings
tf.get_logger().setLevel("ERROR")
df = pd.read_csv(
"../input/flickr-image-dataset/flickr30k_images/results.csv", delimiter="|"
)
df.columns = ["image", "caption_number", "caption"]
df["caption"] = df["caption"].str.lstrip()
df["caption_number"] = df["caption_number"].str.lstrip()
df.loc[19999, "caption_number"] = "4"
df.loc[19999, "caption"] = "A dog runs across the grass ."
ids = [id_ for id_ in range(len(df) // 5) for i in range(5)]
df["id"] = ids
df.to_csv("captions.csv", index=False)
df.head()
class CFG:
debug = False
image_path = "../input/flickr-image-dataset/flickr30k_images/flickr30k_images"
captions_path = "."
batch_size = 32
num_workers = 4
head_lr = 1e-3
image_encoder_lr = 1e-4
text_encoder_lr = 1e-5
weight_decay = 1e-3
patience = 1
factor = 0.8
epochs = 2
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_name = "resnet50"
image_embedding = 2048
text_encoder_model = "distilbert-base-uncased"
text_embedding = 768
text_tokenizer = "distilbert-base-uncased"
max_length = 200
pretrained = True # for both image encoder and text encoder
trainable = True # for both image encoder and text encoder
temperature = 1.0
# image size
size = 224
# for projection head; used for both image and text encoders
num_projection_layers = 1
projection_dim = 256
dropout = 0.1
class AvgMeter:
def __init__(self, name="Metric"):
self.name = name
self.reset()
def reset(self):
self.avg, self.sum, self.count = [0] * 3
def update(self, val, count=1):
self.count += count
self.sum += val * count
self.avg = self.sum / self.count
def __repr__(self):
text = f"{self.name}: {self.avg:.4f}"
return text
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group["lr"]
class CLIPDataset(torch.utils.data.Dataset):
def __init__(self, image_filenames, captions, tokenizer, transforms):
"""
image_filenames and cpations must have the same length; so, if there are
multiple captions for each image, the image_filenames must have repetitive
file names
"""
self.image_filenames = image_filenames
self.captions = list(captions)
self.encoded_captions = tokenizer(
list(captions), padding=True, truncation=True, max_length=CFG.max_length
)
self.transforms = transforms
def __getitem__(self, idx):
item = {
key: torch.tensor(values[idx])
for key, values in self.encoded_captions.items()
}
image = cv2.imread(f"{CFG.image_path}/{self.image_filenames[idx]}")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = self.transforms(image=image)["image"]
item["image"] = torch.tensor(image).permute(2, 0, 1).float()
item["caption"] = self.captions[idx]
return item
def __len__(self):
return len(self.captions)
def get_transforms(mode="train"):
if mode == "train":
return A.Compose(
[
A.Resize(CFG.size, CFG.size, always_apply=True),
A.Normalize(max_pixel_value=255.0, always_apply=True),
]
)
else:
return A.Compose(
[
A.Resize(CFG.size, CFG.size, always_apply=True),
A.Normalize(max_pixel_value=255.0, always_apply=True),
]
)
def make_train_valid_dfs():
dataframe = pd.read_csv(f"{CFG.captions_path}/captions.csv")
max_id = dataframe["id"].max() + 1 if not CFG.debug else 100
image_ids = np.arange(0, max_id)
np.random.seed(42)
valid_ids = np.random.choice(
image_ids, size=int(0.2 * len(image_ids)), replace=False
)
train_ids = [id_ for id_ in image_ids if id_ not in valid_ids]
train_dataframe = dataframe[dataframe["id"].isin(train_ids)].reset_index(drop=True)
valid_dataframe = dataframe[dataframe["id"].isin(valid_ids)].reset_index(drop=True)
return train_dataframe, valid_dataframe
def build_loaders(dataframe, tokenizer, mode):
transforms = get_transforms(mode=mode)
dataset = CLIPDataset(
dataframe["image"].values,
dataframe["caption"].values,
tokenizer=tokenizer,
transforms=transforms,
)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=CFG.batch_size,
num_workers=CFG.num_workers,
shuffle=True if mode == "train" else False,
)
return dataloader
train_df, valid_df = make_train_valid_dfs()
def create_vision_encoder(dropout_rate, trainable=False):
# Load the pre-trained Xception model to be used as the base encoder.
resnet = keras.applications.ResNet50(
include_top=False, weights="imagenet", pooling="avg"
)
# Set the trainability of the base encoder.
for layer in resnet.layers:
layer.trainable = trainable
# Receive the images as inputs.
inputs = layers.Input(shape=(224, 224, 3), name="image_input")
# Preprocess the input image.
resnet_input = tf.keras.applications.resnet.preprocess_input(inputs)
# Generate the embeddings for the images using the xception model.
embeddings = resnet(resnet_input)
# Create the vision encoder model.
return keras.Model(inputs, embeddings, name="vision_encoder")
def create_text_encoder(
model_name=CFG.text_encoder_model,
pretrained=CFG.pretrained,
trainable=CFG.trainable,
):
if pretrained:
model = DistilBertModel.from_pretrained(model_name)
else:
model = DistilBertModel(config=DistilBertConfig())
for p in model.parameters():
p.requires_grad = trainable
return model
def project_embeddings(
embeddings,
num_projection_layers,
projection_dim=CFG.projection_dim,
dropout_rate=CFG.dropout,
):
projected_embeddings = layers.Dense(units=projection_dims)(embeddings)
for _ in range(num_projection_layers):
x = tf.nn.gelu(projected_embeddings)
x = layers.Dense(projection_dims)(x)
x = layers.Dropout(dropout_rate)(x)
x = layers.Add()([projected_embeddings, x])
projected_embeddings = layers.LayerNormalization()(x)
return projected_embeddings
|
# A language model is trained on large amounts of textual data to understand the patterns and structure of language. The primary goal of a language model is to predict the probability of the next word or sequence of words in a sentence given the previous words.
# Language models can be used for a variety of natural language processing (NLP) tasks, such as text classification, machine translation, text summarization, speech recognition, and sentiment analysis. There are many types of language models, ranging from simple n-gram models to more complex neural network-based models such as recurrent neural networks (RNNs) and transformers.
# The transformer architecture is currently mostly used for language models and can be divided into an encoder and/or decoder architecture depending on the specific task. In general, transformers are trained on a large quantity of unlabeled text using self-supervised learning. The training of a transformer model on a lot of data takes a lot of computational effort and the training of language models can get expensive very quickly. So, often the best way to have a task-specific transformer model is to use a pre-trained model from [Hugging Face](https://huggingface.co/) and fine-tune the model based on your data.
# Based on my work experience with invoices, fine-tuning a pre-existing model didn't work well. I received the best results for text classification after I fine-tuned a french base-model on german invoices. Nevertheless the overall F1-score wasn't worth the effort. I assume that the content and structure of an invoice differs too much from the training data (e.g. no continuous text and many numbers). Additional, the tokenizers of the pre-trained models are not optimied for invoices, so the context window of a transformer will contain less text, which makes the training less effective.
# I worked on text classification of invoices for multiple clients. I trained a base-model on a few million invoices (mostly german and english) and fine-tuned the base model for each client with around 2000 - 50000 invoices and 70 - 2000 labels. Initially I used the Longformer architecture ([Beltagy et al. 2020](https://arxiv.org/pdf/2004.05150.pdf)), but based on a [bug](https://github.com/pytorch/pytorch/issues/94810), I couldn't deploy the models. Besides its limitations, I used the BERT architecture [Devlin et al. 2019](https://arxiv.org/pdf/1810.04805.pdf).
# Obviously, I can't share anything from my work related data, so I use a german corpus and draft the training of a base-model. Another tutorial can be found [on Hugging Face](https://huggingface.co/blog/how-to-train).
import warnings
warnings.filterwarnings("ignore")
import os
from pathlib import Path
import random
from datasets import load_dataset, Dataset
import pandas as pd
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
import torch
from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
from transformers import DataCollatorForLanguageModeling
from transformers import BertConfig, BertForMaskedLM, PreTrainedTokenizerFast
from transformers import Trainer, TrainingArguments, EarlyStoppingCallback
from tokenizers import normalizers
from tokenizers.decoders import ByteLevel as ByteLevelDecoder
from tokenizers.normalizers import NFD, Lowercase, NFKC
from tokenizers import pre_tokenizers
from tokenizers.pre_tokenizers import Whitespace, ByteLevel
from tokenizers import Tokenizer, models, trainers
from tqdm.auto import tqdm
os.environ["WANDB_DISABLED"] = str("true")
os.environ["TOKENIZERS_PARALLELISM"] = str("true")
# ## Split data
# Finding a good dataset is hard. I picked the [3 Million German Sentences](https://www.kaggle.com/datasets/rtatman/3-million-german-sentences) to showcase a short model training. Also, I don't expect any useful model here.
# Each sentence has an id and a timestamp. For training a model, this is not needed. Additional, I use around 3000 sentences for the evaluation for the model training. The datasets will fit in memory, but to able to scale, I will use a streaming approch.
def clean_text(examples):
return {"text": examples["text"].split("\t")[1].split(":")[-1].strip()}
dataset = load_dataset(
"text",
data_files="/kaggle/input/3-million-german-sentences/deu_news_2015_3M-sentences.txt",
split="train",
streaming=False,
)
dataset = dataset.map(clean_text)
dataset = dataset.train_test_split(test_size=0.01)
train_length = len(dataset["train"])
eval_length = len(dataset["test"])
# convenience beats best practice
df = pd.DataFrame({"text": dataset["train"]["text"]})
df.to_parquet("/kaggle/working/train_data.snap.parquet", compression="snappy")
df = pd.DataFrame({"text": dataset["test"]["text"]})
df.to_parquet("/kaggle/working/eval_data.snap.parquet", compression="snappy")
# del [dataset, df]
# Finally, let's "load" the data. Did I mention, that I really like the [datasets library](https://huggingface.co/docs/datasets/index).
train_dataset = load_dataset(
"parquet",
data_files="/kaggle/working/train_data.snap.parquet",
streaming=True,
split="train",
)
eval_dataset = load_dataset(
"parquet",
data_files="/kaggle/working/eval_data.snap.parquet",
streaming=True,
split="train",
)
# ## Tokenizer
# A tokenizer converts raw text into into smaller units, such as words or subwords, that can be used for training machine learning models. The tokenizer takes as input a string of text and outputs a sequence of tokens, each of which represents a distinct unit of meaning. The subword tokenizer breaks down words into smaller subword units. This is useful for handling out-of-vocabulary (OOV) words, which are words that are not in the training data.
# I use the [Byte-Pair Encoding tokenizer](https://huggingface.co/course/chapter6/5?fw=pt), which is a data compression algorithm, where the most common pair of consecutive bytes of data is replaced with a byte that does not occur in that data ([Gage 1994](https://www.derczynski.com/papers/archive/BPE_Gage.pdf), [Sennrich et al. 2016](https://arxiv.org/pdf/1508.07909.pdf)).
# First, we define our BPE tokenizer. As a next step, we define the preprocessing steps for the incoming text data. Here, we enable only unicode-normalization and lower case and converting the input to a ByteLevel representation, which is split by whitespaces. As a last step, we decode a tokenized input to the original one.
# In German, setting the text to lower case doesn't make sense, but since the word corpus is small, we want to make it easier for the BPE tokenizer.
tokenizer = Tokenizer(models.BPE())
tokenizer.normalizer = normalizers.Sequence([NFD(), Lowercase()])
# Our tokenizer also needs a pre-tokenizer responsible for converting the input to a ByteLevel representation.
tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
[ByteLevel(add_prefix_space=False), Whitespace()]
)
tokenizer.decoder = ByteLevelDecoder()
# Here a small example, how the normalization and pre_tokenization works. Especially, the special german letters cause some problems.
print(tokenizer.normalizer.normalize_str("""Gute Laue ist wichtig. Immer. Äoüß"""))
print(
tokenizer.pre_tokenizer.pre_tokenize_str("""Gute Laue ist wichtig. Immer. Äoüß""")
)
# Next, we define the base vocabulary of the tokenizer. Here, we map the utf-8 bytes to unicode strings, which will be later used to initialize the tokenizer. As you see, the german letters are now there.
byte_to_unicode_map = bytes_to_unicode()
unicode_to_byte_map = dict((v, k) for k, v in byte_to_unicode_map.items())
base_vocab = list(unicode_to_byte_map.keys())
print(f"Size of our base vocabulary: {len(base_vocab)}")
print(f"First element: `{base_vocab[0]}`, last element: `{base_vocab[-1]}`")
print(base_vocab[160])
print(base_vocab[177])
print(base_vocab[184])
# Here, we define the tokenization trainer. We define the size of the vocabular and the special tokens we use.
vocab_size = 32768
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
show_progress=True,
initial_alphabet=base_vocab,
special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"],
)
# To avoid loading the dataset into memory, we stream the training data with the batch_iterator.
iter_dataset = iter(train_dataset)
def batch_iterator(batch_size=10):
for _ in tqdm(range(0, round(train_length, -1), batch_size)):
yield [next(iter_dataset)["text"] for _ in range(batch_size)]
# Finally, we can train our tokenizer.
tokenizer.train_from_iterator(batch_iterator(), trainer=trainer)
# Let's have some sanity checks. Since the vocabularly is quite special, I encode a sentence from the data.
# It looks okayish so far. The tokenizer recognized common words and represents them with one token. Longer and uncommon words will be representated with multiple tokens. Addionally, Ġ indicates, that a whitespace appears before this word.
print(tokenizer.get_vocab_size())
output = tokenizer.encode(
"Die Authentifikation wird mit OpenLDAP erledigt, die Einrichtung oder Aktualisierung von Systemen mit Yum und Servern für DNS, DHCP und TFTP."
)
print(output.tokens)
print(output.ids)
# The dataset commes with a word list. Here it doesn't make much sense, but in case you train your model on very specific text, you can evaluate the efficency of the tokenizer, we can test it to very specific words.
words = pd.read_csv(
"/kaggle/input/3-million-german-sentences/deu_news_2015_3M-words.csv", dtype=str
)
words.rename(columns={"!": "word", "53658": "count"}, inplace=True)
words = words[["word", "count"]]
print(words.head())
many_tokens = []
for word in tqdm(words["word"].tolist()):
if not isinstance(word, str):
continue
enc = tokenizer.encode(word)
if len(enc) > 2: # ignore cls and sep
many_tokens.append(word)
# The tokenizer behaves okayish. For 75% of the words in the text corpus, more than 2 tokens are used. It would be helpful, to increase the vocabulary size and probably to increase the data size.
print(f"{len(many_tokens)/words.shape[0]:.2f}")
# Anyway, let's save the tokenizer.
tokenizer.save(f"/kaggle/working/tokenizer.json")
# The transformers library complains about using the tokenizers library. So, we convert it to the PreTrainedTokenizerFast classes. Everything stays the same, we just need to add the special tokens again.
tk_tokenizer = Tokenizer.from_file(f"/kaggle/working/tokenizer.json")
tokenizer = PreTrainedTokenizerFast(tokenizer_object=tk_tokenizer)
tokenizer.add_special_tokens(
{
"pad_token": "[PAD]",
"unk_token": "[UNK]",
"sep_token": "[SEP]",
"cls_token": "[CLS]",
"bos_token": "[CLS]",
"eos_token": "[SEP]",
"mask_token": "[MASK]",
}
)
# ## Model Training
# We processed the data and setup data streaming and trained a tokenizer. Now we can finally start with the model training. I follow the BERT architecture [Devlin et al. 2019](https://arxiv.org/pdf/1810.04805.pdf) and also use their initial setup and hyperparameters. The model is trained via masked language modelling, where 20 % of the tokens will be masked. From those 20% of masked tokens, 80 % will be untouched, 10 % will be replaced with random tokens and 10 % will be replaced with the original tokens. [Hugging Face](https://github.com/huggingface/transformers/blob/main/src/transformers/data/data_collator.py#L607) provides an implementation for it.
# [Wettig et al. 2023](https://arxiv.org/pdf/2202.08005.pdf) scrutinized the impact of the mlm parameters towards the model result.
# Training a masked language model will basically randomly mask tokens, which then the model will predict based on the context of the whole sentence.
print(f"Mask Token id: {tokenizer.mask_token_id}")
output = tokenizer.encode(
"Die Authentifikation wird mit OpenLDAP erledigt, die Einrichtung oder Aktualisierung von Systemen mit Yum und Servern für DNS, DHCP und TFTP."
)
masked = random.sample(range(0, 36), 7)
for mask in masked:
output[mask] = tokenizer.mask_token_id
print(f"Masked encoding: {tokenizer.decode(output)}")
# To setup the Bert Model, we use only two attention layers with four attention heads for each layer. Also I reduce the context size, since the text data contains only short sentences. This should speed up the training.
# To get an idea about the attention mechanism, please follow my short [blog post](https://steffenhaeussler.github.io/posts/attention_layer/).
use_mlm = True
mlm_probability = 0.2 # still keeping the 80 - 10 - 10 rule
max_length = 256 # 512
block_size = 256 # 512
max_position_embeddings = 256 # 512
hidden_size = 768
num_hidden_layers = 2 # 12
num_attention_heads = 4 # 12
intermediate_size = 3072
drop_out = 0.1
model_path = "/kaggle/working/model"
config = BertConfig(
# attention_window = [block_size]*num_attention_heads,
# mask_token_id = 4,
bos_token_id=1,
sep_token_id=2,
# pad_token_id = 3,
eos_token_id=2,
max_position_embeddings=max_position_embeddings,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act="gelu",
hidden_dropout_prob=drop_out,
attention_probs_dropout_prob=drop_out,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
vocab_size=vocab_size,
use_cache=True,
classifier_dropout=None,
onnx_export=False,
)
# AS the next step, we need to encode our streaming data via the tokenizer. We will set the max length of the tokenized output to 256. Depending of the text length, we will pad the encoding to this length or truncate it.
def encode(examples, max_length, tokenizer):
return tokenizer.batch_encode_plus(
examples["text"],
padding=True,
truncation=True,
max_length=max_length,
# return_special_tokens_mask=True,
# return_tensors="pt"
)
train_dataset = train_dataset.map(
lambda x: tokenizer.batch_encode_plus(
x["text"], padding=True, truncation=True, max_length=max_length
),
batched=True,
)
eval_dataset = eval_dataset.map(
lambda x: tokenizer.batch_encode_plus(
x["text"], padding=True, truncation=True, max_length=max_length
),
batched=True,
)
train_dataset = train_dataset.with_format("torch")
eval_dataset = eval_dataset.with_format("torch")
# Here, we set up the data collator with the masked language modelling.
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=use_mlm, mlm_probability=mlm_probability
)
# The model will have around 40 million parameters. With torch 2, we would be able to compile the model, which will boost the training time.
model = BertForMaskedLM(config=config)
print(f"n of parameters: {model.num_parameters():_}")
# comp_model = torch.compile(model)
# print(f"n of parameters: {comp_model.num_parameters()}")
# ### Trainer config
# As a last step, we have to setup the model training. We use the AdamW optimizer [Loshchilov & Hutter 2019](https://arxiv.org/pdf/1711.05101.pdf).
# We train the model for only one epoch. The rest of the parameters follow the initial Bert paper.
# We have 86_000 training steps, so we will evaluate the model after 5000 steps.
learning_rate = 1e-4 # bert
weight_decay = 1e-2 # bert
lr_scheduler_type = "linear"
num_train_epochs = 1 # 5 but training set is small
train_batch_size = 32
eval_batch_size = 32
gradient_accumulation_steps = 2
eval_accumulation_steps = 2
warmup_steps = 1_000
adam_beta1 = 0.9 # bert
adam_beta2 = 0.999 # bert
adam_epsilon = 1e-8 # bert
max_grad_norm = 1.0 # bert
max_steps = train_length // train_batch_size # 1_000_000
print(max_steps)
training_args = TrainingArguments(
output_dir=model_path,
overwrite_output_dir=True,
learning_rate=learning_rate,
weight_decay=weight_decay,
lr_scheduler_type=lr_scheduler_type,
num_train_epochs=num_train_epochs,
adam_beta1=adam_beta1,
adam_beta2=adam_beta2,
adam_epsilon=adam_epsilon,
max_grad_norm=max_grad_norm,
evaluation_strategy="steps",
eval_steps=5_000,
max_steps=max_steps,
per_device_train_batch_size=train_batch_size, # depends on memory
per_device_eval_batch_size=eval_batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
# eval_accumulation_steps=eval_accumulation_steps,
save_strategy="steps",
save_steps=5_000,
save_total_limit=3,
prediction_loss_only=False,
report_to="tensorboard",
log_level="warning",
logging_strategy="steps",
fp16=True,
fp16_full_eval=True,
load_best_model_at_end=True,
metric_for_best_model="loss",
greater_is_better=False,
push_to_hub=False,
dataloader_pin_memory=True,
)
# For fine-tuning, we need to compute the metrics. In our case, those metrics doesn't make much sense. Instead, we can think about using rogue or perplexity metrics for model evaluation. But here I decide to stay with the loss.
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
accuracy = accuracy_score(y_true=labels, y_pred=predictions)
recall = recall_score(y_true=labels, y_pred=predictions, average="weighted")
precision = precision_score(y_true=labels, y_pred=predictions, average="weighted")
f1 = f1_score(y_true=labels, y_pred=predictions, average="weighted")
return {"accuracy": accuracy, "precision": precision, "recall": recall, "f1": f1}
early_stopping = EarlyStoppingCallback(
early_stopping_patience=3, early_stopping_threshold=0.02
)
callbacks = [early_stopping]
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
# compute_metrics=compute_metrics,
tokenizer=tokenizer,
callbacks=callbacks,
)
# Finally, everything is setup and we can train our model. Depending on the data, model and budget size, you can enjoy your holidays and hopefully, the model is trained, when you come back.
# As you see, the training and validation loss is quite high.So we don't expect the best model ever.
trainer.train()
# The evaluation via [perplexity](https://huggingface.co/spaces/evaluate-metric/perplexity) says the same. The perplexity is 22.92, which is too high. The lower the perplexity is, the better the model performs.
import math
eval_results = trainer.evaluate()
print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}")
# #### 🎉 Save final model (+ tokenizer + config) to disk
trainer.save_model(f"{model_path}/main/")
# And as a final test, let's predict the masked tokens from above. As you see, sometimes the model output makes sense and sometimes it doesn't. Overall, it is not as bad as expected.
from transformers import pipeline
original_text = "Die Authentifikation wird mit OpenLDAP erledigt, die Einrichtung oder Aktualisierung von Systemen mit Yum und Servern für DNS, DHCP und TFTP."
mask_filler = pipeline("fill-mask", f"{model_path}/main/")
mask_filler(tokenizer.decode(output), top_k=3)
# ## Fine-tuning
# I don't have any dataset here, but in practice, you can load the model and train it with the code above, you should change:
# from transformers import BertForSequenceClassification, DataCollatorWithPadding
# data_collator = DataCollatorWithPadding(tokenizer=tokenizer, pad_to_multiple_of=block_size)
# model = BertForSequenceClassification.from_pretrained(model_path, num_labels=num_label)
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import svm, metrics
import warnings
from matplotlib import pyplot
from sklearn.feature_selection import SelectPercentile, chi2
warnings.filterwarnings("ignore")
# Read the csv file and with the head() function, preview the first 5 rows
adult_df = pd.read_csv("/kaggle/input/adult-census-income/adult.csv")
adult_df.head()
# It is evident that the dataset contains 32561 rows. To prevent any influence on our final model, we should shuffle the data and reserve a holdout set that won't be used for analysis and modeling. This approach ensures that the model's outcome is not biased by prior exposure to the holdout set.
adult_df = adult_df.sample(frac=1, random_state=10)
holdout = adult_df[:3000]
adult_df = adult_df[:-3000]
# The last 3000 rows of the shuffled dataset were removed to create the holdout set for testing purposes later. It's important to check the lengths of each dataframe to ensure their accuracy.
print(len(adult_df))
print(len(holdout))
# # Data pre-processing
# First replace any occurrence of the character '?' in the DataFrame with NaN. This is done because the character '?' is commonly used to represent missing values in the Adult dataset. Then we show the resulting output which indicates the number of missing values in each column of the DataFrame. By identifying the missing values, we can determine how to handle them before using the dataset for analysis or modeling.
adult_df = adult_df.replace("?", pd.np.nan)
adult_df.isnull().sum()
# There are 3 columns with missing values, "workclass", "occupation", and "native.country" and fill them using the fillna() method from Pandas library.
adult_df["workclass"].fillna(value=adult_df["workclass"].mode()[0], inplace=True)
adult_df["occupation"].fillna(value=adult_df["occupation"].mode()[0], inplace=True)
adult_df["native.country"].fillna(
value=adult_df["native.country"].mode()[0], inplace=True
)
# ## Data Visualisation
# The EDA revealed the most significant observations in the data, which are presented here.
# The "education" variable appears to be strongly correlated with the "income" variable and is one of the primary variables that caught our attention. To explore this relationship further, we can generate a bar plot of the "income" variable grouped by the "education" variable.
sns.set(style="whitegrid", font_scale=1.2)
a4_dims = (15, 5)
fig, ax = pyplot.subplots(figsize=a4_dims)
custom_palette = ["darkorange", "darkblue"]
g = sns.countplot(y="education", hue="income", data=adult_df, palette=custom_palette)
# The data shows that as the level of education increases, the proportion of individuals with incomes above 50k also increases. However, this growth is minimal for the lower levels of education, from pre-school to 12th grade, suggesting that the relationship between education and income is not entirely linear. For individuals with a Master's or Doctorate degree, there are more people with incomes over 50k than under. Overall, these findings indicate a positive correlation between education level and income.
#
sns.set(style="whitegrid", font_scale=1.2)
# Draw a nested barplot to show survival for class and sex
a4_dims = (15, 5)
fig, ax = pyplot.subplots(figsize=a4_dims)
custom_palette = ["darkorange", "darkblue"]
g = sns.countplot(
y="marital.status", hue="income", data=adult_df, palette=custom_palette
)
# The majority of individuals in all statuses earn less than 50k, except for those in the Married-civ-spouse category where the number of people earning more than or equal to 50k is similar to the number of people earning less than 50k. This could be because as a spouse, a higher income may be necessary to support a family or children. Overall, there are noticeable differences between the income variable and the status variable, indicating a correlation between the two.
# Additionally, we will examine the relationships between the numerical variables and encode the income variable as a numerical one using one-hot encoding. However, we will only keep one of the newly created income variables as having both will not provide significant new information.
import seaborn as sns
adult_df = pd.get_dummies(adult_df, columns=["income"])
adult_df = adult_df.drop(["income_<=50K"], axis=1)
corr = adult_df.corr()
a4_dims = (10, 5)
fig, ax = pyplot.subplots(figsize=a4_dims)
sns.heatmap(corr, annot=True)
# # Feature Selection
# Firstly, we will convert our categorical variables to numerical ones using the method of one-hot encoding.
adult_df2 = adult_df.copy()
adult_df = pd.get_dummies(
adult_df,
columns=["occupation", "race", "workclass", "marital.status", "relationship"],
)
# We will examine the variables that are highly correlated as they may provide additional information.
adult_df.corr().unstack().sort_values().drop_duplicates()
# It is noteworthy that the marital.status variable appears to be highly correlated with the relationship variable. This is reasonable as a marital status of "Married-civ-spouse" would clearly imply that the relationship variable is "husband." This may also imply that the relationship variable is already accounted for by the marital.status variable, thus retaining both variables would not add significant value to our model. Consequently, we will remove the relationship variable and only retain the marital.status variable.Another essential observation is that workclass and occupation have an almost perfect correlation with each other. Later on, we will discard one of the two.
adult_df = adult_df2
adult_df = pd.get_dummies(
adult_df, columns=["occupation", "race", "workclass", "marital.status"]
)
# The previous observation indicated that the relationship between "education" and "income" may not be a linear one due to the minimal increase in the first few education levels. Therefore, we will manipulate the "education.num" variable, which is the numerical version of education. Let's examine the correlation between "income_>50k" and "education.num" and combine the first eight levels into a single group.
#
adult_df["income_>50K"].corr(adult_df["education.num"])
adult_df["new_educ_num"] = adult_df.apply(
lambda x: x["education.num"] - 8 if x["education.num"] >= 9 else 0, axis=1
)
# This is asking to investigate the correlation between the new variable that groups the first 8 educational levels and the "income_>50K" variable.
adult_df["income_>50K"].corr(adult_df["new_educ_num"])
# To select the independent variables for our model, we'll use the Chi-square feature selection method which identifies variables that are highly dependent on the response variable. We will select the top 16% highest-scoring variables as our features for the model. It's important to note that this method only considers the statistical significance of the relationship between the variables and does not necessarily imply causation.
X = adult_df.drop(
[
"income_>50K",
"education",
"sex",
"native.country",
"education.num",
"relationship",
],
axis=1,
)
y = adult_df[["income_>50K"]]
test = SelectPercentile(score_func=chi2, percentile=16)
fit = test.fit(X, y)
X.columns[test.get_support()]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train_ = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
test_ = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
original = pd.read_csv(
"/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
)
train_.info()
train_.drop(columns=["id"], inplace=True)
train_df = pd.concat([train_, original])
train_df = train_df.drop_duplicates()
train_df.info()
corr = train_df.corr()
mask = np.triu(corr)
ax, fig = plt.subplots(figsize=(20, 10))
sns.heatmap(corr, mask=mask, annot=True, fmt=".3f")
train_df.columns
def kde_df(data, grid, figsize=(20, 20)):
x, y = grid[0], grid[1]
fig, axes = plt.subplots(x, y, figsize=figsize)
for i, col in enumerate(data.columns):
ax = axes[i // y, i % y]
sns.kdeplot(data=data[col], ax=ax, fill=None)
ax.axvline(data[col].mean(), color="red")
fig.suptitle("Density function of each features", y=0.9, fontsize=20)
kde_df(train_df, (4, 2))
sns.pairplot(
data=train_df,
hue="target",
corner=True,
plot_kws={"s": 80, "edgecolor": "white", "linewidth": 2.5},
palette="viridis",
)
plt.show()
import pygam
from sklearn.model_selection import StratifiedKFold
from pygam import LogisticGAM, s
from sklearn.metrics import roc_auc_score
y = train_df["target"]
X = train_df.drop(columns=["target"])
kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
scores = []
for train_idx, val_idx in kf.split(X, y):
X_train, y_train = X.iloc[train_idx], y.iloc[train_idx]
X_val, y_val = X.iloc[val_idx], y.iloc[val_idx]
model = LogisticGAM(
s(0, n_splines=5)
+ s(1, n_splines=5)
+ s(2, n_splines=5)
+ s(3, n_splines=5)
+ s(4, n_splines=5)
+ s(5, n_splines=5)
).fit(X_train, y_train)
y_pred = model.predict(X_val)
score = roc_auc_score(y_val, y_pred)
scores.append(score)
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
import xgboost as xgb
y = train_df["target"]
X = train_df.drop(columns=["target"])
kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
scores = []
for train_idx, val_idx in kf.split(X, y):
X_train, y_train = X.iloc[train_idx], y.iloc[train_idx]
X_val, y_val = X.iloc[val_idx], y.iloc[val_idx]
params = {
"objective": "binary:logistic",
"n_estimators": 635,
"learning_rate": 0.01055242568633828,
"max_depth": 3,
"colsample_bytree": 0.8515610712807532,
"alpha": 37.73505082661144,
"lambda": 21.407162084345476,
"booster": "gbtree",
"min_child_weight": 1,
}
model = xgb.XGBClassifier(**params).fit(X_train, y_train)
y_pred = model.predict_proba(X_val)
score = roc_auc_score(y_val, y_pred[:, 1])
scores.append(score)
np.array(scores).mean()
from sklearn.model_selection import cross_val_score, RepeatedStratifiedKFold
from sklearn.preprocessing import FunctionTransformer
from sklearn.pipeline import make_pipeline
from sklearn.neighbors import KNeighborsClassifier
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=1, random_state=10)
model = make_pipeline(
FunctionTransformer(lambda X: X * np.array([[0.01, 1]])), KNeighborsClassifier(62)
)
auc = cross_val_score(
model, train_df[["cond", "calc"]], train_df["target"], scoring="roc_auc", cv=cv
).mean()
print(f"AUC = {auc:.3f}")
np.array(scores).mean()
import xgboost as xgb
from sklearn.inspection import permutation_importance
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
params = {
"objective": "binary:logistic",
"n_estimators": 635,
"learning_rate": 0.01055242568633828,
"max_depth": 3,
"colsample_bytree": 0.8515610712807532,
"alpha": 37.73505082661144,
"lambda": 21.407162084345476,
"booster": "gbtree",
"min_child_weight": 1,
}
model = xgb.XGBClassifier(**params)
model.fit(X_train, y_train)
def plot_fi(data, ax=None, title=None):
fi = pd.Series(data, index=X.columns).sort_values(ascending=True)
fi.plot(kind="barh", ax=ax)
plot_fi(model.feature_importances_)
r = permutation_importance(model, X_test, y_test, n_repeats=1, random_state=46)
plot_fi(
r["importances"].reshape(
6,
)
)
cols_to_drop = ["gravity", "cond", "urea", "osmo"]
y = train_df["target"]
X = train_df.drop(columns=cols_to_drop + ["target"])
test_df = test_.drop(columns=(cols_to_drop + ["id"]))
X = train_df.drop(["target"], axis=1)
Y = train_df["target"]
# Xgb model and optuna optimisation
import optuna
import xgboost as xgb
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import RepeatedStratifiedKFold, cross_val_score
optuna.logging.set_verbosity(optuna.logging.WARNING)
def objective(trial):
params = {
"verbosity": 0,
"n_estimators": trial.suggest_int("n_estimators", 50, 1500),
"learning_rate": trial.suggest_float("learning_rate", 1e-7, 1e-1),
"max_depth": trial.suggest_int("max_depth", 3, 20),
"colsample_bytree": trial.suggest_float("colsample_bytree", 0.1, 1.0),
"alpha": trial.suggest_float("alpha", 1e-5, 1e2),
"lambda": trial.suggest_float("lambda", 1e-5, 1e2),
"objective": "binary:logistic",
"eval_metric": "auc",
"booster": trial.suggest_categorical("booster", ["dart", "gbtree", "gblinear"]),
"min_child_weight": trial.suggest_int("min_child_weight", 0, 5),
"tree_method": "gpu_hist",
}
kf = RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42)
scores = []
for train_idx, val_idx in kf.split(X, Y):
X_train, X_val = X.iloc[train_idx], X.iloc[val_idx]
y_train, y_val = Y.iloc[train_idx], Y.iloc[val_idx]
d_train = xgb.DMatrix(X_train, label=y_train)
d_val = xgb.DMatrix(X_val, label=y_val)
evallist = [(d_val, "eval")]
xgb_model = xgb.train(
params,
d_train,
num_boost_round=100,
evals=evallist,
early_stopping_rounds=20,
verbose_eval=False,
)
y_pred = xgb_model.predict(d_val)
score = roc_auc_score(y_val, y_pred)
scores.append(score)
return np.mean(scores)
study = optuna.create_study(direction="maximize")
params = pd.DataFrame(study.best_params, index=[0])
params.to_csv("xgb_bestparams.csv")
best_params_xgb = {
"n_estimators": 1419,
"learning_rate": 0.09261096117850519,
"max_depth": 11,
"colsample_bytree": 0.9756369619241885,
"alpha": 0.041667155431533875,
"lambda": 7.308005725380826,
"booster": "gbtree",
"min_child_weight": 3,
}
from sklearn.model_selection import RepeatedStratifiedKFold
import xgboost as xgb
from sklearn.metrics import roc_auc_score
kfold = RepeatedStratifiedKFold(n_splits=10, n_repeats=10, random_state=42)
scores = []
# target_pred = np.zeros(len(test_df))
for train_idx, valid_idx in kfold.split(X, y):
# Split data into training and validation sets
X_train, y_train = X.iloc[train_idx], y.iloc[train_idx]
X_valid, y_valid = X.iloc[valid_idx], y.iloc[valid_idx]
# Initialize and train the model
model = xgb.XGBClassifier(**best_params)
model.fit(X_train, y_train)
y_pred = model.predict_proba(X_valid)[:, 1]
score = roc_auc_score(y_valid, y_pred)
scores.append(score)
# pred = model.predict_proba(test_df)[:, 1]
# target_pred += pred / 100
np.array(scores).mean()
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.preprocessing import MinMaxScaler
import warnings
warnings.filterwarnings("ignore")
# Define the hyperparameters grid
params = {
"penalty": ["l1", "l2", "elasticnet"],
"C": [0.001, 0.01, 0.1, 1, 10, 100],
"class_weight": [None, "balanced"],
"solver": ["newton-cg", "lbfgs", "liblinear", "sag", "saga"],
"max_iter": [100, 400, 1000],
}
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
# Create a logistic regression model
lr = LogisticRegression()
# Create a stratified k-fold cross-validator
skf = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=42)
# Perform grid search cross-validation
grid = GridSearchCV(lr, params, cv=skf, scoring="roc_auc", verbose=1, n_jobs=-1)
grid.fit(X, y)
# Print the best hyperparameters and the corresponding score
print("Best score:", grid.best_score_)
print("Best hyperparameters:", grid.best_params_)
best_params_logit = {
"C": 1,
"class_weight": None,
"max_iter": 100,
"penalty": "l1",
"solver": "saga",
}
# GAM model
from pygam import LogisticGAM, s
from sklearn.metrics import roc_auc_score
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
scores = []
for train_idx, valid_idx in kfold.split(X, y):
# Split data into training and validation sets
X_train, y_train = X.iloc[train_idx], y.iloc[train_idx]
X_valid, y_valid = X.iloc[valid_idx], y.iloc[valid_idx]
model = LogisticGAM(s(0, n_splines=10) + s(1, n_splines=10)).fit(X_train, y_train)
y_pred = model.predict(X_valid)
score = roc_auc_score(y_valid, y_pred)
scores.append(score)
np.array(scores).mean()
target_pred
test_df.info()
model = xgb.XGBClassifier(**study.best_params)
model.fit(X, y)
model.predict(test_df)
submission = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
submission["target"] = target_pred
submission.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# import the libraries
import pandas as pd
import numpy as np
# read the dataset
data = pd.read_csv("/kaggle/input/sample-data/sample-data.csv")
data
d = np.array(data)[:, :-1]
print("The attributes are: ", d)
target = np.array(data)[:, -1]
print("The target is: ", target)
def train(c, t):
for i, val in enumerate(t):
if val == "yes":
specific_hypothesis = c[i].copy()
break
for i, val in enumerate(c):
if t[i] == "yes":
for x in range(len(specific_hypothesis)):
if val[x] != specific_hypothesis[x]:
specific_hypothesis[x] = "?"
else:
pass
return specific_hypothesis
print("The final hypothesis is: ", train(d, target))
|
# # Amazon Customer Reviews
# - Learn more: https://www.linkedin.com/in/bhue/
# - Dataset: https://www.kaggle.com/datasets/vivekprajapati2048/amazon-customer-reviews
import sqlite3
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
# ## Show the Table name
# Connect to the SQLite database
path = "/kaggle/input/amazon-customer-reviews/database.sqlite"
conn = sqlite3.connect(path)
def get_table_names(connection):
cursor = connection.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cursor.fetchall()
cursor.close()
return [table[0] for table in tables]
table_names = get_table_names(conn)
print("Tables in the database:", table_names)
# ## Do EDA
# Connect to the SQLite database and fetch the data
conn = sqlite3.connect(path)
query = "SELECT * FROM Reviews"
df = pd.read_sql_query(query, conn)
conn.close()
# Set seaborn style to minimal and pastel color palette
sns.set(style="whitegrid", palette="pastel")
# Visualization 1: Distribution of review scores
plt.figure(figsize=(8, 5))
ax = sns.countplot(x="Score", data=df)
plt.title("Distribution of Review Scores")
plt.xlabel("Score")
plt.ylabel("Count")
# Print the value of reviews distribution above the bars
for p in ax.patches:
ax.annotate(
f"{p.get_height()}",
(p.get_x() + p.get_width() / 2.0, p.get_height()),
ha="center",
va="baseline",
)
sns.despine()
plt.show()
# Visualization 2: Correlation between numeric columns
numeric_columns = ["HelpfulnessNumerator", "HelpfulnessDenominator", "Score", "Time"]
correlation_matrix = df[numeric_columns].corr()
# Use a custom colormap for the heatmap (pastel light orange to light blue, reversed)
cmap = sns.diverging_palette(20, 220, as_cmap=True)
plt.figure(figsize=(8, 5))
sns.heatmap(correlation_matrix, annot=True, cmap=cmap, cbar=False)
plt.title("Correlation Heatmap")
sns.despine()
plt.show()
# Display the correlation values as a table using Pandas formatting options
print("Correlation Values Table:")
print(correlation_matrix.round(4).to_string())
# ## Density Chart of Review Scores
# Create a density chart for the Score column
plt.figure(figsize=(10, 6))
sns.kdeplot(df["Score"], shade=True)
plt.title("Density Chart of Review Scores")
plt.xlabel("Score")
plt.ylabel("Density")
plt.show()
from IPython.display import display, HTML
# Display the summary of the DataFrame
print(df.info(verbose=True))
display(HTML(df.head().to_html(index=False)))
# Convert UNIX timestamp to datetime object
df["Time"] = pd.to_datetime(df["Time"], unit="s")
# Review count per year
df["Year"] = df["Time"].dt.year
plt.figure(figsize=(10, 6))
ax = sns.countplot(x="Year", data=df)
plt.title("Review Count Per Year")
plt.xlabel("Year")
plt.ylabel("Count")
# Print the value of reviews distribution above the bars
for p in ax.patches:
ax.annotate(
f"{p.get_height()}",
(p.get_x() + p.get_width() / 2.0, p.get_height()),
ha="center",
va="baseline",
)
sns.despine()
plt.show()
# # Business Questions
# ## What are the most popular products based on the number of reviews?
import matplotlib.pyplot as plt
import seaborn as sns
# Set the style and color palette for seaborn
sns.set(style="whitegrid")
sns.set_palette("pastel")
# Count the number of reviews for each product and sort by the top 20
popular_products = df["ProductId"].value_counts().head(20)
# Create a bar chart
plt.figure(figsize=(12, 6))
bar_plot = sns.barplot(x=popular_products.index, y=popular_products.values)
# Customize the chart
plt.title("Top 20 Most Popular Products Based on Number of Reviews")
plt.xlabel("Product ID")
plt.ylabel("Number of Reviews")
plt.xticks(rotation=90)
# Add the value above the top of each bar
for p in bar_plot.patches:
bar_plot.annotate(
format(p.get_height(), ".0f"),
(p.get_x() + p.get_width() / 2.0, p.get_height()),
ha="center",
va="baseline",
fontsize=10,
color="black",
xytext=(0, 5),
textcoords="offset points",
)
# Show the chart
plt.show()
# ## What is the average score for each product and which products have the highest and lowest average scores?
# Calculate the average score for each product
average_scores = df.groupby("ProductId")["Score"].mean()
# Get the top 10 highest and lowest average scores
highest_average_scores = average_scores.nlargest(10)
lowest_average_scores = average_scores.nsmallest(10)
# Set up the subplots
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
fig.suptitle("Top 10 Highest and Lowest Average Scores by Product")
# Create the highest average scores bar chart
bar_plot1 = sns.barplot(
x=highest_average_scores.index,
y=highest_average_scores.values,
ax=ax1,
palette="pastel",
)
ax1.set_title("Top 10 Highest Average Scores")
ax1.set_xlabel("Product ID")
ax1.set_ylabel("Average Score")
ax1.set_xticklabels(ax1.get_xticklabels(), rotation=90)
# Add the value above the top of each bar
for p in bar_plot1.patches:
bar_plot1.annotate(
format(p.get_height(), ".2f"),
(p.get_x() + p.get_width() / 2.0, p.get_height()),
ha="center",
va="baseline",
fontsize=10,
color="black",
xytext=(0, 5),
textcoords="offset points",
)
# Create the lowest average scores bar chart
bar_plot2 = sns.barplot(
x=lowest_average_scores.index,
y=lowest_average_scores.values,
ax=ax2,
palette="pastel",
)
ax2.set_title("Top 10 Lowest Average Scores")
ax2.set_xlabel("Product ID")
ax2.set_ylabel("Average Score")
ax2.set_xticklabels(ax2.get_xticklabels(), rotation=90)
# Add the value above the top of each bar
for p in bar_plot2.patches:
bar_plot2.annotate(
format(p.get_height(), ".2f"),
(p.get_x() + p.get_width() / 2.0, p.get_height()),
ha="center",
va="baseline",
fontsize=10,
color="black",
xytext=(0, 5),
textcoords="offset points",
)
# Show the charts
plt.show()
# ## What is the distribution of review scores across all products?
plt.figure(figsize=(10, 6))
sns.set_palette("pastel")
sns.countplot(x="Score", data=df)
plt.title("Distribution of Review Scores Across All Products")
plt.xlabel("Score")
plt.ylabel("Number of Reviews")
# Add the value above the top of each bar
ax = plt.gca()
for p in ax.patches:
ax.annotate(
format(p.get_height(), ".0f"),
(p.get_x() + p.get_width() / 2.0, p.get_height()),
ha="center",
va="baseline",
fontsize=10,
color="black",
xytext=(0, 5),
textcoords="offset points",
)
plt.show()
# ## How do review scores change over time for the most popular products?
# Identify the top 5 most reviewed products
top_5_products = df["ProductId"].value_counts().head(5).index.tolist()
# Filter the dataset to include only the top 5 most reviewed products
top_5_df = df[df["ProductId"].isin(top_5_products)]
# Calculate the average score for each product by year
top_5_yearly_avg = top_5_df.groupby(["Year", "ProductId"])["Score"].mean().reset_index()
# Create the pastel-colored line plot
plt.figure(figsize=(10, 6))
sns.set_palette("pastel")
sns.lineplot(x="Year", y="Score", hue="ProductId", data=top_5_yearly_avg, marker="o")
plt.title("Average Review Scores Over Time for the Top 5 Most Reviewed Products")
plt.xlabel("Year")
plt.ylabel("Average Score")
plt.legend(title="ProductId", title_fontsize="13", loc="upper right")
plt.xticks(rotation=45)
plt.show()
# ## Are there any seasonal trends in the volume or sentiment of reviews (using the "Month" column)?
# Extract the month from the "Time" column
df["Month"] = df["Time"].dt.month
# Calculate the total number of reviews and average score for each month
monthly_stats = df.groupby("Month").agg({"Score": ["count", "mean"]}).reset_index()
monthly_stats.columns = ["Month", "Total Reviews", "Average Score"]
# Create a pastel-colored line plot for the volume of reviews
plt.figure(figsize=(10, 6))
sns.set_palette("pastel")
sns.lineplot(x="Month", y="Total Reviews", data=monthly_stats, marker="o")
plt.title("Seasonal Trends in the Volume of Reviews")
plt.xlabel("Month")
plt.ylabel("Total Reviews")
plt.xticks(
range(1, 13),
[
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
],
)
plt.show()
# Create a pastel-colored line plot for the sentiment of reviews
plt.figure(figsize=(10, 6))
sns.set_palette("pastel")
sns.lineplot(x="Month", y="Average Score", data=monthly_stats, marker="o")
plt.title("Seasonal Trends in the Sentiment of Reviews")
plt.xlabel("Month")
plt.ylabel("Average Score")
plt.xticks(
range(1, 13),
[
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
],
)
plt.show()
# ## Are there any seasonal trends in the volume or sentiment of reviews every six months?
import matplotlib.dates as mdates
monthly_stats["Year-Month"] = df["Year"].astype(str) + "-" + df["Month"].astype(str)
# Convert the "Year-Month" column back to a datetime object for better tick formatting
monthly_stats["Year-Month"] = pd.to_datetime(monthly_stats["Year-Month"])
fig, axes = plt.subplots(1, 2, figsize=(20, 6))
sns.set_palette("pastel")
# Volume of Reviews
sns.lineplot(
ax=axes[0], x="Year-Month", y="Total Reviews", data=monthly_stats, marker="o"
)
axes[0].set_title("Trends in the Volume of Reviews")
axes[0].set_xlabel("Year-Month")
axes[0].set_ylabel("Total Reviews")
axes[0].xaxis.set_major_locator(mdates.MonthLocator(interval=6))
axes[0].xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m"))
plt.setp(axes[0].get_xticklabels(), rotation=45)
# Sentiment of Reviews
sns.lineplot(
ax=axes[1], x="Year-Month", y="Average Score", data=monthly_stats, marker="o"
)
axes[1].set_title("Trends in the Sentiment of Reviews")
axes[1].set_xlabel("Year-Month")
axes[1].set_ylabel("Average Score")
axes[1].xaxis.set_major_locator(mdates.MonthLocator(interval=6))
axes[1].xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m"))
plt.setp(axes[1].get_xticklabels(), rotation=45)
plt.tight_layout()
plt.show()
# ## How does the length of a review (in terms of words) correlate with the review score?
# Add a new column with the word count of each review
df["Word Count"] = df["Text"].apply(lambda x: len(x.split()))
# Create a scatterplot to visualize the relationship between word count and review score
plt.figure(figsize=(12, 6))
sns.set_palette("pastel")
sns.scatterplot(x="Word Count", y="Score", data=df, alpha=0.5)
plt.title("Word Count vs. Review Score")
plt.xlabel("Word Count")
plt.ylabel("Review Score")
plt.show()
# Group the data by 'Score' and get the summary statistics for the 'Word Count' column
score_word_count_summary = df.groupby("Score")["Word Count"].describe()
print(score_word_count_summary)
# ## Top 10 Positive and Negative Words
import nltk
from nltk.corpus import stopwords
from collections import Counter
import random
import string
nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
# Add common and less informative words to the stopwords list
stop_words.update(["br", "like", "one"])
# Filter high-score (positive) and low-score (negative) reviews
positive_reviews = df[df["Score"] >= 4]["Text"].sample(frac=0.1, random_state=42)
negative_reviews = df[df["Score"] <= 2]["Text"].sample(frac=0.1, random_state=42)
# Preprocess the text and remove stopwords, punctuation, and non-alphabetic characters
positive_words = [
word.lower()
for review in positive_reviews
for word in nltk.word_tokenize(review)
if word.lower() not in stop_words
and word.lower() not in string.punctuation
and word.isalpha()
]
negative_words = [
word.lower()
for review in negative_reviews
for word in nltk.word_tokenize(review)
if word.lower() not in stop_words
and word.lower() not in string.punctuation
and word.isalpha()
]
# Count the occurrences of each word
positive_word_counts = Counter(positive_words)
negative_word_counts = Counter(negative_words)
import pandas as pd
# Create a DataFrame for the most common words in positive and negative reviews
common_words_df = pd.DataFrame(
{
"Positive Words": [
word for word, count in positive_word_counts.most_common(10)
],
"Positive Counts": [
count for word, count in positive_word_counts.most_common(10)
],
"Negative Words": [
word for word, count in negative_word_counts.most_common(10)
],
"Negative Counts": [
count for word, count in negative_word_counts.most_common(10)
],
}
)
common_words_df
from wordcloud import WordCloud
# Generate word clouds
positive_wordcloud = WordCloud(background_color="white", colormap="Greens")
negative_wordcloud = WordCloud(background_color="white", colormap="Reds")
positive_text = " ".join(common_words_df["Positive Words"])
negative_text = " ".join(common_words_df["Negative Words"])
positive_wordcloud.generate(positive_text)
negative_wordcloud.generate(negative_text)
# Plot the word clouds side by side
fig, axes = plt.subplots(1, 2, figsize=(15, 7))
axes[0].imshow(positive_wordcloud, interpolation="bilinear")
axes[0].set_title("Positive Reviews")
axes[0].axis("off")
axes[1].imshow(negative_wordcloud, interpolation="bilinear")
axes[1].set_title("Negative Reviews")
axes[1].axis("off")
plt.show()
# ## Wordcloud form all data
from wordcloud import WordCloud
import matplotlib.pyplot as plt
positive_set = set(positive_words)
negative_set = set(negative_words)
unique_positive_words = list(positive_set - negative_set)
unique_negative_words = list(negative_set - positive_set)
def plot_wordcloud(words1, words2, title1, title2):
wordcloud1 = WordCloud(
width=800,
height=800,
background_color="white",
colormap="Greens",
min_font_size=10,
).generate(" ".join(words1))
wordcloud2 = WordCloud(
width=800,
height=800,
background_color="white",
colormap="Reds",
min_font_size=10,
).generate(" ".join(words2))
fig, axes = plt.subplots(1, 2, figsize=(16, 8), facecolor=None)
axes[0].imshow(wordcloud1)
axes[0].set_title(title1, fontsize=20)
axes[0].axis("off")
axes[1].imshow(wordcloud2)
axes[1].set_title(title2, fontsize=20)
axes[1].axis("off")
plt.tight_layout(pad=0)
plt.show()
plot_wordcloud(
unique_positive_words,
unique_negative_words,
"Word Cloud of Unique Positive Reviews",
"Word Cloud of Unique Negative Reviews",
)
# ## What is the relationship between HelpfulnessNumerator and HelpfulnessDenominator? Are there any patterns or trends in the helpfulness of reviews?
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
# Create a new column for helpfulness ratio
df["HelpfulnessRatio"] = df["HelpfulnessNumerator"] / df["HelpfulnessDenominator"]
# Remove instances where HelpfulnessDenominator is 0 to avoid division by zero
df_filtered = df[df["HelpfulnessDenominator"] != 0]
# Calculate correlation coefficient
correlation = np.corrcoef(
df_filtered["HelpfulnessNumerator"], df_filtered["HelpfulnessDenominator"]
)[0, 1]
print(
f"Correlation between HelpfulnessNumerator and HelpfulnessDenominator: {correlation:.2f}"
)
# Scatterplot
plt.figure(figsize=(10, 6))
sns.scatterplot(
x="HelpfulnessNumerator", y="HelpfulnessDenominator", data=df_filtered, alpha=0.5
)
plt.title("Relationship between HelpfulnessNumerator and HelpfulnessDenominator")
plt.xlabel("HelpfulnessNumerator")
plt.ylabel("HelpfulnessDenominator")
plt.show()
# Helpfulness ratio distribution
plt.figure(figsize=(10, 6))
sns.histplot(df_filtered["HelpfulnessRatio"], bins=50, kde=True, color="skyblue")
plt.title("Distribution of Helpfulness Ratio")
plt.xlabel("Helpfulness Ratio")
plt.ylabel("Frequency")
plt.show()
# The correlation coefficient of 0.97 indicates a strong positive relationship between HelpfulnessNumerator and HelpfulnessDenominator. This means that as the number of helpful votes (HelpfulnessNumerator) increases, the total number of votes (HelpfulnessDenominator) also tends to increase.
# Keep in mind that the correlation coefficient only measures the linear relationship between the two variables and does not imply causation. However, it does suggest that reviews with more votes overall are likely to have more helpful votes.
# This code will create a scatterplot to visualize the relationship between HelpfulnessNumerator and HelpfulnessDenominator, and it will also display the distribution of the helpfulness ratio. The correlation coefficient will give an indication of how closely the two variables are related.
# ## Are there any recurring themes in the most helpful reviews (those with the highest HelpfulnessNumerator)?
import nltk
from nltk.corpus import stopwords
from wordcloud import WordCloud
import matplotlib.pyplot as plt
nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
# Filter the top 100 most helpful reviews
top_helpful_reviews = df.nlargest(100, "HelpfulnessNumerator")["Text"]
# Preprocess the text and remove stopwords
processed_text = " ".join(
[
word.lower()
for review in top_helpful_reviews
for word in nltk.word_tokenize(review)
if word.lower() not in stop_words
]
)
# Generate a word cloud
wordcloud = WordCloud(
width=800, height=400, background_color="white", colormap="Pastel1"
).generate(processed_text)
# Display the word cloud
plt.figure(figsize=(16, 8))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# ## Are there any specific users who consistently provide high-quality reviews (i.e., high helpfulness scores and/or high review scores)?
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# Calculate average review score and helpfulness score for each user
average_scores = df.groupby("UserId").agg(
{"Score": "mean", "HelpfulnessNumerator": "mean"}
)
# Set a threshold for high-quality reviews
score_threshold = 4.5
helpfulness_threshold = 5
# Filter users who consistently provide high-quality reviews
high_quality_users = average_scores[
(average_scores["Score"] >= score_threshold)
& (average_scores["HelpfulnessNumerator"] >= helpfulness_threshold)
]
# Display the result as a readable table
print("Users who consistently provide high-quality reviews:")
display(high_quality_users.head(10))
# Set pastel color palette
sns.set_palette("pastel")
# Function to add value labels above bars
def add_value_labels(ax, spacing=5):
for rect in ax.patches:
y_value = rect.get_height()
x_value = rect.get_x() + rect.get_width() / 2
label = "{:.2f}".format(y_value)
ax.annotate(
label,
(x_value, y_value),
textcoords="offset points",
xytext=(0, spacing),
ha="center",
va="bottom",
)
# Plot side-by-side bar charts for the top 10 users with the highest average review scores and helpfulness numerators
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
sns.barplot(
x=high_quality_users.head(10).index,
y="Score",
data=high_quality_users.head(10),
ax=ax1,
)
ax1.set_title("Top 10 Users by Average Review Score")
ax1.set_xlabel("User ID")
ax1.set_ylabel("Average Review Score")
ax1.tick_params(axis="x", rotation=45)
add_value_labels(ax1)
sns.barplot(
x=high_quality_users.head(10).index,
y="HelpfulnessNumerator",
data=high_quality_users.head(10),
ax=ax2,
)
ax2.set_title("Top 10 Users by Average Helpfulness Numerator")
ax2.set_xlabel("User ID")
ax2.set_ylabel("Average Helpfulness Numerator")
ax2.tick_params(axis="x", rotation=45)
add_value_labels(ax2)
plt.tight_layout()
plt.show()
# The output shows the UserId of the top 10 users who consistently provide high-quality reviews based on the average review score and helpfulness numerator. You can use this information to further explore the types of reviews these users provide or analyze their preferences to gain insights into what makes a review helpful or well-received.
# ## What is the distribution of review counts per user? Are there any outliers or users who contribute a significantly larger number of reviews than others?
# Calculate the review counts per user
user_review_counts = (
df.groupby("UserId")["Id"]
.count()
.reset_index()
.rename(columns={"Id": "Review_Count"})
)
# Visualize the distribution using a histogram
plt.figure(figsize=(16, 6))
sns.set_palette("pastel")
sns.histplot(data=user_review_counts, x="Review_Count", bins=1, kde=True)
plt.xlabel("Review Count per User")
plt.ylabel("Frequency")
plt.title("Distribution of Review Counts per User")
plt.show()
# Visualize outliers using a box plot
plt.figure(figsize=(16, 6))
sns.set_palette("pastel")
sns.boxplot(x=user_review_counts["Review_Count"], color=sns.color_palette("pastel")[0])
plt.xlabel("Review Count per User")
plt.title("Box Plot of Review Counts per User")
plt.show()
# ## Can we identify any potential fake reviews or spam by analyzing patterns in the text, user, or other variables?
# - Users with an unusually high number of reviews, as it might indicate that they are posting fake reviews.
# - Reviews with very short text or very repetitive text, which could indicate spam or low-effort fake reviews.
# - Reviews with a high proportion of positive or negative words but an overall neutral score, or vice versa.
# - Users who consistently rate products from a specific brand highly or poorly, which might indicate bias or affiliation with the brand.
import nltk
from nltk.sentiment import SentimentIntensityAnalyzer
import pandas as pd
nltk.download("vader_lexicon")
sia = SentimentIntensityAnalyzer()
# Create a new DataFrame for this analysis
df_analysis = df.sample(frac=0.1, random_state=42).copy()
# Calculate the length of each review in words
df_analysis["Text_Length"] = df_analysis["Text"].apply(lambda x: len(x.split()))
# Define a function to count the proportion of positive and negative words in a review
def count_sentiment_words(text, sentiment):
tokens = nltk.word_tokenize(text.lower())
sentiment_words = [
word for word in tokens if sia.polarity_scores(word)[sentiment] > 0
]
return len(sentiment_words) / len(tokens)
# Calculate the proportion of positive and negative words in each review
df_analysis["Positive_Word_Proportion"] = df_analysis["Text"].apply(
lambda x: count_sentiment_words(x, "pos")
)
df_analysis["Negative_Word_Proportion"] = df_analysis["Text"].apply(
lambda x: count_sentiment_words(x, "neg")
)
# Filter potential fake reviews or spam based on text length and sentiment word proportions
suspect_reviews = df_analysis[
(df_analysis["Text_Length"] < 10)
| ((df_analysis["Positive_Word_Proportion"] > 0.5) & (df_analysis["Score"] < 3))
| ((df_analysis["Negative_Word_Proportion"] > 0.5) & (df_analysis["Score"] > 3))
]
# Display the suspect reviews
suspect_reviews.head()
sns.set_palette("pastel")
plt.figure(figsize=(10, 6))
# Scatter plot for high-score reviews
sns.scatterplot(
x="Positive_Word_Proportion",
y="Negative_Word_Proportion",
data=df_analysis[df_analysis["Score"] > 3],
alpha=0.5,
label="High-score Reviews",
)
# Scatter plot for low-score reviews
sns.scatterplot(
x="Positive_Word_Proportion",
y="Negative_Word_Proportion",
data=df_analysis[df_analysis["Score"] < 3],
alpha=0.5,
label="Low-score Reviews",
)
plt.title("Positive and Negative Word Proportions in Reviews")
plt.xlabel("Positive Word Proportion")
plt.ylabel("Negative Word Proportion")
plt.legend()
plt.show()
# ## Are there any correlations between the time it takes for a review to be considered helpful (difference between review time and current time) and the review's score or helpfulness?
import datetime
# Set reference date to September 2021
reference_date = datetime.datetime(2021, 9, 1)
# Calculate the time difference in days
df["Days_Since_Review"] = (
reference_date - pd.to_datetime(df["Time"], unit="s")
).dt.days
# Calculate the correlation between the time difference, review score, and helpfulness
correlation_matrix = df[
["Days_Since_Review", "Score", "HelpfulnessNumerator", "HelpfulnessDenominator"]
].corr()
import pandas as pd
formatted_correlation = correlation_matrix.reset_index().rename(
columns={"index": "Variables"}
)
formatted_correlation = formatted_correlation[
[
"Variables",
"Days_Since_Review",
"Score",
"HelpfulnessNumerator",
"HelpfulnessDenominator",
]
]
formatted_correlation["Variables"] = formatted_correlation["Variables"].replace(
{
"Days_Since_Review": "Days Since Review",
"HelpfulnessNumerator": "Helpfulness Numerator",
"HelpfulnessDenominator": "Helpfulness Denominator",
}
)
display(formatted_correlation)
sns.set_palette("pastel")
plt.figure(figsize=(10, 6))
sns.heatmap(correlation_matrix, annot=True, cmap="coolwarm")
plt.title("Correlation Heatmap")
plt.show()
# ## What are the most common words or phrases used in review summaries for different score levels?
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, RegexpTokenizer
from collections import Counter
import pandas as pd
nltk.download("stopwords")
nltk.download("punkt")
stop_words = set(stopwords.words("english"))
tokenizer = RegexpTokenizer(r"\w+")
def preprocess_text(text):
tokens = tokenizer.tokenize(text.lower())
filtered_tokens = [word for word in tokens if word not in stop_words]
return filtered_tokens
# Group the dataset by Score and concatenate summaries
summary_by_score = (
df.groupby("Score")["Summary"].apply(lambda x: " ".join(x)).reset_index()
)
# Preprocess text and get most common words for each score level
summary_by_score["Tokenized_Summary"] = summary_by_score["Summary"].apply(
preprocess_text
)
summary_by_score["Most_Common_Words"] = summary_by_score["Tokenized_Summary"].apply(
lambda x: Counter(x).most_common(10)
)
# Display the most common words for each score level in a readable table
common_words_table = pd.DataFrame(summary_by_score[["Score", "Most_Common_Words"]])
common_words_table = common_words_table.explode("Most_Common_Words")
common_words_table[["Word", "Count"]] = pd.DataFrame(
common_words_table["Most_Common_Words"].tolist(), index=common_words_table.index
)
common_words_table = common_words_table.drop(columns=["Most_Common_Words"]).reset_index(
drop=True
)
display(common_words_table)
import seaborn as sns
import matplotlib.pyplot as plt
# Set the color palette
colors = sns.color_palette("pastel", 5)
# Create the bar plot
plt.figure(figsize=(12, 6))
sns.barplot(data=common_words_table, x="Word", y="Count", hue="Score", palette=colors)
# Add a title and labels
plt.title("Most Common Words in Review Summaries by Score", fontsize=16)
plt.xlabel("Words", fontsize=12)
plt.ylabel("Counts", fontsize=12)
plt.xticks(rotation=45)
# Show the values above the bars
for p in plt.gca().patches:
if not np.isnan(p.get_height()):
plt.gca().annotate(
f"{int(p.get_height())}",
(p.get_x() + p.get_width() / 2, p.get_height()),
ha="center",
va="baseline",
fontsize=5,
)
plt.show()
# ## Can we cluster products based on the similarity of their reviews or review scores?
from sklearn.feature_extraction.text import TfidfVectorizer
grouped_products = df.groupby("ProductId")["Text"].apply(" ".join).reset_index()
vectorizer = TfidfVectorizer(stop_words="english", max_features=1000)
X = vectorizer.fit_transform(grouped_products["Text"])
from sklearn.cluster import KMeans
num_clusters = 5 # Choose the number of clusters you want
kmeans = KMeans(n_clusters=num_clusters, random_state=42)
grouped_products["Cluster"] = kmeans.fit_predict(X)
cluster_counts = grouped_products["Cluster"].value_counts()
print(cluster_counts)
# - Cluster 0: 5,271 products
# - Cluster 1: 17,087 products
# - Cluster 2: 41,886 products
# - Cluster 3: 5,720 products
# - Cluster 4: 4,294 products
# - Keep in mind that these clusters were created based on the text similarity of the reviews, using the TF-IDF representation of the review text and the K-means clustering algorithm. These clusters group products with similar reviews
# ### Visualize clusters: Create visualizations to better understand the distribution of review scores, helpfulness, or other relevant features within each cluster.
# Merge the product clusters dataframe with the original dataframe
merged_df = df.merge(grouped_products, on="ProductId")
# Create a single figure with two subplots
fig, axes = plt.subplots(1, 2, figsize=(16, 6))
# Box plot for review scores by cluster
sns.boxplot(x="Cluster", y="Score", data=merged_df, palette="pastel", ax=axes[0])
axes[0].set_title("Review Scores Distribution by Cluster")
axes[0].set_xlabel("Cluster")
axes[0].set_ylabel("Review Score")
# Box plot for helpfulness by cluster
merged_df["Helpfulness_Ratio"] = (
merged_df["HelpfulnessNumerator"] / merged_df["HelpfulnessDenominator"]
)
merged_df["Helpfulness_Ratio"].fillna(0, inplace=True)
sns.boxplot(
x="Cluster", y="Helpfulness_Ratio", data=merged_df, palette="pastel", ax=axes[1]
)
axes[1].set_title("Helpfulness Distribution by Cluster")
axes[1].set_xlabel("Cluster")
axes[1].set_ylabel("Helpfulness Ratio")
# Display the combined figure
plt.tight_layout()
plt.show()
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
sample_df = df.sample(frac=0.5, random_state=42)
# Convert 'Summary' column into numerical features using TF-IDF
vectorizer = TfidfVectorizer(stop_words="english", max_features=500)
summary_tfidf = vectorizer.fit_transform(sample_df["Summary"])
# Convert the sparse matrix to a dense matrix and create a DataFrame
summary_tfidf_df = pd.DataFrame(
summary_tfidf.todense(), columns=vectorizer.get_feature_names_out()
)
# Add 'Score' column to the features
X = pd.concat([summary_tfidf_df, sample_df["Score"].reset_index(drop=True)], axis=1)
y = sample_df["HelpfulnessNumerator"].reset_index(drop=True)
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
# Train a linear regression model
model = LinearRegression()
model.fit(X_train, y_train)
# Make predictions on the testing set
y_pred = model.predict(X_test)
# Evaluate the performance of the model
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print("Mean Squared Error:", mse)
print("R-squared:", r2)
# The results show that the model's performance is not very good. The Mean Squared Error (MSE) is 57.07, which means, on average, the model's predictions are off by a squared difference of 57.07. The R-squared value is 0.0143, which is very low, indicating that the model explains only 1.43% of the variance in the helpfulness numerator.
# These results suggest that predicting the helpfulness of a review based on its text and score using a linear regression model is not effective. It might be worth exploring other machine learning techniques or incorporating additional features to improve the model's performance.
import matplotlib.pyplot as plt
# Generate predictions
y_pred = model.predict(X_test)
# Create a scatter plot of actual vs. predicted helpfulness numerators
plt.scatter(y_test, y_pred, alpha=0.5)
plt.xlabel("Actual Helpfulness Numerator")
plt.ylabel("Predicted Helpfulness Numerator")
plt.title("Actual vs. Predicted Helpfulness Numerator")
# Add a reference line for perfect predictions
plt.plot([0, max(y_test)], [0, max(y_test)], color="red", linestyle="--", linewidth=1)
plt.show()
# Since the linear regression model has performed poorly on this dataset, visualizing the predictions with a prediction line might not provide meaningful insights.
# ## Try Deep Learning
import pandas as pd
from surprise import Reader, Dataset, KNNBasic, accuracy
from surprise.model_selection import train_test_split, cross_validate
# Create a smaller sample of the dataset
sample_df = df.sample(frac=0.1, random_state=42)
# Create a Surprise Reader object with the review score range
reader = Reader(rating_scale=(1, 5))
# Create a Surprise Dataset from the sample dataframe
data = Dataset.load_from_df(sample_df[["UserId", "ProductId", "Score"]], reader)
# Split the data into training and testing sets
trainset, testset = train_test_split(data, test_size=0.3, random_state=42)
# Train a KNNBasic model with user-based collaborative filtering
sim_options = {"name": "cosine", "user_based": True}
model = KNNBasic(sim_options=sim_options)
model.fit(trainset)
# Test the model on the test set
predictions = model.test(testset)
# Calculate the accuracy of the model
accuracy.rmse(predictions)
# The RMSE (Root Mean Squared Error) of the user-based collaborative filtering model is 1.3350. This value represents the average difference between the actual review scores and the predicted review scores. Lower values for RMSE indicate better model performance.
import matplotlib.pyplot as plt
# Extract the actual scores and predicted scores from the predictions
actual_scores = [pred.r_ui for pred in predictions]
predicted_scores = [pred.est for pred in predictions]
# Plot the distributions
plt.figure(figsize=(10, 5))
plt.hist(actual_scores, bins=5, alpha=0.5, label="Actual Scores", color="blue")
plt.hist(predicted_scores, bins=5, alpha=0.5, label="Predicted Scores", color="green")
plt.xlabel("Score")
plt.ylabel("Frequency")
plt.legend(loc="upper left")
plt.title("Distribution of Actual Scores vs. Predicted Scores")
plt.show()
# ## User behavior: Analyze user behavior by examining the distribution of reviews per user or the average scores given by each user.
import pandas as pd
# Load the dataset
# df = pd.read_csv("your_dataset.csv")
# Group the data by UserId and calculate the number of reviews and average score for each user
user_behavior = df.groupby("UserId").agg({"Score": ["count", "mean"]})
# Reset the index and rename columns
user_behavior.columns = ["Number_of_Reviews", "Average_Score"]
user_behavior.reset_index(inplace=True)
# Sort the users by the number of reviews in descending order
user_behavior_sorted = user_behavior.sort_values(
by="Number_of_Reviews", ascending=False
)
# Display the top 10 users with the most reviews
print(user_behavior_sorted.head(10))
# You can also visualize the distribution of reviews per user using a histogram
user_behavior["Number_of_Reviews"].hist(bins=50, log=True)
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Load the dataset
# df = pd.read_csv("your_dataset.csv")
# Calculate the correlation matrix
correlation_matrix = df[["Word Count", "HelpfulnessRatio", "Score"]].corr()
# Print the correlation matrix
print(correlation_matrix)
# Visualize the correlation matrix using a heatmap
sns.heatmap(correlation_matrix, annot=True, cmap="coolwarm")
plt.show()
# The correlation matrix you provided shows the following relationships between the variables:
# - Word Count and Score: The correlation coefficient is -0.077, which indicates a very weak negative linear relationship. This means that as the word count of a review increases, the score might slightly decrease, but the relationship is not strong.
# - Helpfulness Ratio and Score: The correlation coefficient is 0.377, which indicates a moderate positive linear relationship. This means that as the helpfulness ratio of a review increases, the score is also likely to increase. In other words, higher-rated reviews tend to be considered more helpful by users.
# - Word Count and Helpfulness Ratio: The correlation coefficient is 0.041, which indicates a very weak positive linear relationship. This means that there is almost no relationship between the word count and the helpfulness ratio of a review.
# - These findings suggest that the helpfulness ratio is somewhat related to the score of a review, while the word count has very little impact on both the helpfulness ratio and the score. However, it's important to remember that correlation does not imply causation, and further analysis might be needed to understand the nature of these relationships.
#
import matplotlib.pyplot as plt
# Create a scatter plot for Word Count vs. Score
plt.subplot(1, 2, 1)
plt.scatter(df["Word Count"], df["Score"], c="blue", alpha=0.5)
plt.xlabel("Word Count")
plt.ylabel("Score")
plt.title("Word Count vs. Score")
# Create a scatter plot for Helpfulness Ratio vs. Score
plt.subplot(1, 2, 2)
plt.scatter(df["HelpfulnessRatio"], df["Score"], c="green", alpha=0.5)
plt.xlabel("Helpfulness Ratio")
plt.ylabel("Score")
plt.title("Helpfulness Ratio vs. Score")
# Display the plots
plt.tight_layout()
plt.show()
# Group the data by year and month
reviews_by_month = (
df.groupby(["Year", "Month"]).agg({"Score": ["count", "mean"]}).reset_index()
)
reviews_by_month.columns = ["Year", "Month", "Review Count", "Average Score"]
# Create a datetime column for easier plotting
reviews_by_month["Date"] = pd.to_datetime(
reviews_by_month[["Year", "Month"]].assign(day=1)
)
# Plot the number of reviews over time
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(reviews_by_month["Date"], reviews_by_month["Review Count"], marker="o")
plt.xlabel("Time")
plt.ylabel("Number of Reviews")
plt.title("Number of Reviews Over Time")
# Plot the average scores over time
plt.subplot(1, 2, 2)
plt.plot(
reviews_by_month["Date"],
reviews_by_month["Average Score"],
marker="o",
color="orange",
)
plt.xlabel("Time")
plt.ylabel("Average Score")
plt.title("Average Score Over Time")
# Display the plots
plt.tight_layout()
plt.show()
# ## Overlap Between 5-Star Rating Users and Helpful Users
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib_venn import venn2
# Filter users with 5-star ratings
five_star_users = set(df[df["Score"] == 5]["UserId"].unique())
# Filter users with helpfulness ratio >= 0.5
helpful_users = set(df[df["HelpfulnessRatio"] >= 0.5]["UserId"].unique())
# Calculate intersection, difference and exclusive counts
intersection_count = len(five_star_users.intersection(helpful_users))
five_star_exclusive_count = len(five_star_users) - intersection_count
helpful_exclusive_count = len(helpful_users) - intersection_count
# Create the Venn diagram
plt.figure(figsize=(8, 8))
venn = venn2(
[five_star_users, helpful_users],
set_labels=("5-Star Rating Users", "Helpful Users"),
set_colors=("red", "green"),
alpha=0.5,
)
plt.title("Overlap Between 5-Star Rating Users and Helpful Users")
# Add counts to the diagram
venn.get_label_by_id("10").set_text(str(five_star_exclusive_count))
venn.get_label_by_id("01").set_text(str(helpful_exclusive_count))
venn.get_label_by_id("11").set_text(str(intersection_count))
plt.show()
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
# Convert 'Time' column to datetime format
df["Time"] = pd.to_datetime(df["Time"], unit="s")
# Create a 'Days_Since_Review' column
df["Days_Since_Review"] = (datetime.datetime.now() - df["Time"]).dt.days
# Remove rows with NaN in HelpfulnessRatio
df_filtered = df.dropna(subset=["HelpfulnessRatio"])
# Set pastel color palette
sns.set_palette("pastel")
# Create a bubble plot
plt.figure(figsize=(15, 10))
scatter = plt.scatter(
x=df_filtered["HelpfulnessRatio"],
y=df_filtered["Days_Since_Review"],
s=df_filtered["Word Count"] * 100 / df_filtered["Word Count"].max(),
alpha=0.5,
c=df_filtered["Score"],
cmap="viridis",
)
# Customize the plot
plt.title(
"Bubble Plot: HelpfulnessRatio vs. Days Since Review, Bubble Size - Word Count, Color - Review Score"
)
plt.xlabel("HelpfulnessRatio")
plt.ylabel("Days Since Review")
plt.xlim(0, 1)
# Add a colorbar
plt.colorbar(scatter, label="Review Score")
# Show the plot
plt.show()
# ## Correlogram
# Convert 'Time' column to datetime format
df["Time"] = pd.to_datetime(df["Time"], unit="s")
# Create a 'Days_Since_Review' column
df["Days_Since_Review"] = (datetime.datetime.now() - df["Time"]).dt.days
# Remove rows with NaN in HelpfulnessRatio
df_filtered = df.dropna(subset=["HelpfulnessRatio"])
# Select columns for the pairplot
columns = ["Score", "HelpfulnessRatio", "Word Count", "Days_Since_Review"]
df_pairplot = df_filtered[columns]
# Define pastel color palette
pastel_colors = sns.color_palette("pastel")
# Create the pairplot with pastel colors
sns.pairplot(
df_pairplot, diag_kind="kde", markers="o", corner=True, palette=pastel_colors
)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
# Compute average scores by month
average_scores_by_month = df.groupby("Month")["Score"].mean().reset_index()
# Set up the figure and polar axis
fig, ax = plt.subplots(figsize=(8, 8), subplot_kw=dict(polar=True))
theta = np.linspace(0, 2 * np.pi, len(average_scores_by_month) + 1)[
:-1
] # Angle for each month
# Create bars
bars = ax.bar(
theta, average_scores_by_month["Score"], width=0.8, color=plt.cm.tab20c.colors
)
# Set up the labels and ticks for each month
ax.set_xticks(theta)
ax.set_xticklabels(
["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
)
ax.set_yticklabels([]) # Remove radial ticks
# Display the value of the score on top of each bar
for i, bar in enumerate(bars):
score = average_scores_by_month["Score"].iloc[i]
ax.text(
bar.get_x() + bar.get_width() / 2,
bar.get_height() + 0.1,
f"{score:.2f}",
ha="center",
va="bottom",
)
# Add a title
ax.set_title("Average Review Score by Month", y=1.08)
plt.show()
# Find the top 10 ProductIds with the most reviews
top_10_product_ids = df["ProductId"].value_counts().head(10).index
# Filter the data to include only the top 10 ProductIds
top_10_products_df = df[df["ProductId"].isin(top_10_product_ids)]
# Create a violin plot for each of the top 10 ProductIds
plt.figure(figsize=(15, 8))
sns.violinplot(x="ProductId", y="Score", data=top_10_products_df, palette="pastel")
plt.title("Violin Plot of Review Scores for Top 10 ProductIds")
plt.xlabel("ProductId")
plt.ylabel("Review Score")
plt.show()
|
# * We have used RSNA official datset.It has dcm files to store mammograms.
# * Have to convert to png
# * below code converts train,test dcm sets to png sets
# import libraries
import os
import cv2
import glob
import gdcm
import pydicom
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
from joblib import Parallel, delayed
import tensorflow as tf
import tensorflow.keras.backend as K
import matplotlib.image as mpimg
pd.options.display.max_columns = 50
# ### **Convert test data DCM to PNG**
# Save the processed image
def process(f, size=512, save_folder="", extension="png"):
patient = f.split("/")[-2]
image = f.split("/")[-1][:-4]
dicom = pydicom.dcmread(f)
img = dicom.pixel_array
img = (img - img.min()) / (img.max() - img.min())
if dicom.PhotometricInterpretation == "MONOCHROME1":
img = 1 - img
img = cv2.resize(img, (size, size))
cv2.imwrite(
save_folder + f"{patient}_{image}.{extension}", (img * 255).astype(np.uint8)
)
def create_pngs_from_dcms(dcm_path, SAVE_FOLDER, SIZE, EXTENSION):
train_images = glob.glob(dcm_path)
len(train_images) # 54706
print("...........Images loaded")
os.makedirs(SAVE_FOLDER, exist_ok=True)
# an empty directory called train_output in kaggle/working path is created
print("...........New folder created")
_ = Parallel(n_jobs=4)(
delayed(process)(uid, size=SIZE, save_folder=SAVE_FOLDER, extension=EXTENSION)
for uid in tqdm(train_images)
)
print("............Finished")
dcm_path = "/kaggle/input/rsna-breast-cancer-detection/test_images/*/*.dcm"
SAVE_FOLDER = "test_output/"
SIZE = 512
EXTENSION = "png"
create_pngs_from_dcms(dcm_path, SAVE_FOLDER, SIZE, EXTENSION)
# ## **To visulaise data that we are working with**
train_images = glob.glob("/kaggle/input/rsna-breast-cancer-512-pngs/*.png")
train_images[0]
for f in tqdm(train_images[-4:]):
img = cv2.imread(f)
plt.figure(figsize=(15, 15))
plt.imshow(img, cmap="gray")
plt.show()
# ## **Load train and test datasets**
train_df = pd.read_csv("/kaggle/input/rsna-breast-cancer-detection/train.csv")
test_df = pd.read_csv("/kaggle/input/rsna-breast-cancer-detection/test.csv")
train_df.head()
train_df.info()
# ## **Add image paths to train Dataframe**
# row = train_df[train_df['']]
train_images = glob.glob("/kaggle/input/rsna-breast-cancer-512-pngs/*.png")
for path in tqdm(train_images):
name = path.split("/")[-1]
chunks = name.split(".")[0]
patient_id = chunks.split("_")[0]
image_id = chunks.split("_")[1]
idx = (train_df["patient_id"] == int(patient_id)) & (
train_df["image_id"] == int(image_id)
)
train_df.loc[idx, "img_path"] = path
train_df[["patient_id", "image_id", "img_path"]].head()
# ## **Add image paths to test Dataframe**
test_images = glob.glob("/kaggle/working/test_output/*.png")
for path in tqdm(test_images):
name = path.split("/")[-1]
chunks = name.split(".")[0]
patient_id = chunks.split("_")[0]
image_id = chunks.split("_")[1]
idx = (test_df["patient_id"] == int(patient_id)) & (
test_df["image_id"] == int(image_id)
)
test_df.loc[idx, "img_path"] = path
test_df[["patient_id", "image_id", "img_path"]].head()
# # Exploration and Feature Engineering
# * 'site_id'
# * 'patient_id'
# * 'image_id'
# * 'laterality'
# * 'view' = the craniocaudal (CC) view and the mediolateral oblique (MLO) view.
# * 'age'
# * 'cancer'
# * 'biopsy'
# * 'invasive'
# * 'BIRADS'
# * 'implant'
# * 'density'
# * 'machine_id'
# * 'difficult_negative_case'
# * 'img_path'
# train_df = train_df.drop(labels=['ID'],axis=1)
train_df.columns
train_df.head()
drop_these = ["site_id", "view", "machine_id"]
train_df2 = train_df.drop(drop_these, axis=1)
train_df2.head()
cols = ["image_id", "age", "machine_id", "img_path"]
for i in list(train_df.drop(cols, axis=1).columns):
print(i)
print(train_df[i].value_counts())
print("----------------\n")
train_df = train_df.drop(labels=["site_id", "view"], axis=1)
val_df = val_df.drop(labels=["ID", "Disease_Risk", "ODPM", "HR"], axis=1)
test_df = test_df.drop(labels=["ID", "Disease_Risk", "ODPM", "HR"], axis=1)
Y_train = list(train_df.drop(["img_path"], axis=1).columns)
Y_val = list(val_df.drop(["img_path"], axis=1).columns)
Y_test = list(test_df.drop(["img_path"], axis=1).columns)
unq_disease = len(Y_train)
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255,
horizontal_flip=True,
vertical_flip=True,
rotation_range=90,
brightness_range=[0, 0.1],
)
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
# The value for class_mode in flow_from_dataframe MUST be 'raw' if you are attempting to do multilabel classification.
train_gen = train_datagen.flow_from_dataframe(
train_df,
x_col="img_path",
y_col=Y_train,
target_size=(150, 150),
class_mode="raw",
batch_size=16,
shuffle=True,
)
test_gen = test_datagen.flow_from_dataframe(
test_df, x_col="img_path", y_col=Y_test, target_size=(150, 150), class_mode="raw"
)
|
# # CNNs - Advanced methods
# This notebook will cover advanced methods for regularizing CNN (and other) models, data augmentation and transfer learning.
# Before we start, let's copy some useful functions from the last notebook (of course, you can also simply continue to edit your previous notebook).
# Don't forget that it might be a good idea to turn on GPU accelaration.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm_notebook as tqdm
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import *
from glob import glob
test_images = sorted(
[
fn.split("/")[-1]
for fn in glob(
"/kaggle/input/kit-predictive-data-analytics-2023-part-2/test/test/*.jpg"
)
]
)
train_df = pd.read_csv(
"/kaggle/input/kit-predictive-data-analytics-2023-part-2/train.csv"
)
train_dir = "/kaggle/input/kit-predictive-data-analytics-2023-part-2/train/train/"
test_dir = "/kaggle/input/kit-predictive-data-analytics-2023-part-2/test/test/"
classes = sorted(train_df["class"].unique())
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
validation_split=0.2, rescale=1.0 / 255
)
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(
train_df,
directory=train_dir,
batch_size=32,
x_col="fn",
y_col="class",
target_size=(64, 64),
subset="training",
)
valid_generator = train_datagen.flow_from_dataframe(
train_df,
directory=train_dir,
batch_size=32,
x_col="fn",
y_col="class",
target_size=(64, 64),
subset="validation",
)
test_generator = test_datagen.flow_from_directory(
directory="/kaggle/input/kit-predictive-data-analytics-2023-part-2/",
classes=["test"],
batch_size=32,
target_size=(64, 64),
shuffle=False,
)
# ## Returning to our large CNN model
model = tf.keras.Sequential(
[
Input(shape=(64, 64, 3)),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(),
Flatten(),
Dense(64, activation="relu"),
Dense(10, activation="softmax"),
]
)
model.summary()
model.compile("adam", "categorical_crossentropy", metrics=["accuracy"])
h = model.fit(train_generator, epochs=10, validation_data=valid_generator)
plt.plot(h.history["accuracy"][1:])
plt.plot(h.history["val_accuracy"][1:])
plt.plot(h.history["loss"][1:])
plt.plot(h.history["val_loss"][1:])
# As noted in Notebook 4, we observe some overfitting. Just as we did there, let's again reduce the learning rate.
model.optimizer.lr = 1e-4
h = model.fit(train_generator, epochs=10, validation_data=valid_generator)
plt.plot(h.history["accuracy"][1:])
plt.plot(h.history["val_accuracy"][1:])
plt.plot(h.history["loss"][1:])
plt.plot(h.history["val_loss"][1:])
# Next, we will take a look at some additional techniques that are sometimes helpful: L2 regularization (weight decay) and Dropout.
# ## Regularization #1: L2
# L2 regularization is a way to force the network to not focus too much on a few parameters. This helps distribute the weights more evenly and thereby reduces overfitting.
# We need to add L2 separately to all layers with a $\lambda$ parameter. This is another hyper-parameter that we need to tune.
l2_lambda = 2e-5
model = tf.keras.Sequential(
[
Input(shape=(64, 64, 3)),
Conv2D(
128,
kernel_size=(3, 3),
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(l2_lambda),
),
Conv2D(
128,
kernel_size=(3, 3),
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(l2_lambda),
),
MaxPooling2D(),
Conv2D(
128,
kernel_size=(3, 3),
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(l2_lambda),
),
Conv2D(
128,
kernel_size=(3, 3),
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(l2_lambda),
),
MaxPooling2D(),
Conv2D(
128,
kernel_size=(3, 3),
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(l2_lambda),
),
Conv2D(
128,
kernel_size=(3, 3),
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(l2_lambda),
),
MaxPooling2D(),
Flatten(),
Dense(64, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(1e-5)),
Dense(
10, activation="softmax", kernel_regularizer=tf.keras.regularizers.l2(1e-5)
),
]
)
model.summary()
model.compile(
tf.keras.optimizers.Adam(1e-3), "categorical_crossentropy", metrics=["accuracy"]
)
h = model.fit(train_generator, epochs=10, validation_data=valid_generator)
plt.plot(h.history["accuracy"][1:])
plt.plot(h.history["val_accuracy"][1:])
plt.plot(h.history["loss"][1:])
plt.plot(h.history["val_loss"][1:])
# The regularization seems to largely avoid overfitting. Let's again decrease the learning rate and continue training.
model.optimizer.lr = 1e-4
h = model.fit(train_generator, epochs=10, validation_data=valid_generator)
plt.plot(h.history["accuracy"][1:])
plt.plot(h.history["val_accuracy"][1:])
plt.plot(h.history["loss"][1:])
plt.plot(h.history["val_loss"][1:])
# We get a much better validation accuracy because we are overfitting less.
# ## Regularization #2: Dropout
# Another very popular way to prevent overfitting is using dropout. This is usually only done for fully connected layers but also works for convolutional layers. In dropout we need to set a ratio of neurons to be randomly dropped for each batch. Let's add dropout layers with a 25% dropout.
model = tf.keras.Sequential(
[
Input(shape=(64, 64, 3)),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(),
Dropout(0.25),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(),
Dropout(0.25),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(),
Flatten(),
Dropout(0.25),
Dense(64, activation="relu"),
Dense(10, activation="softmax"),
]
)
model.summary()
model.compile("adam", "categorical_crossentropy", metrics=["accuracy"])
h = model.fit(train_generator, epochs=10, validation_data=valid_generator)
plt.plot(h.history["accuracy"][1:])
plt.plot(h.history["val_accuracy"][1:])
plt.plot(h.history["loss"][1:])
plt.plot(h.history["val_loss"][1:])
# Dropout also works. It is also possible to combine L2 regularization and dropout.
# We could go ahead and again reduce the learning rate here, but let's skip that for now.
# A useful way of reducing the learning rate automatically when the validation loss stops decreasing is the `ReduceLROnPlateau` callback, see https://keras.io/api/callbacks/reduce_lr_on_plateau/ for details.
# Recall that to create a submission, you can use the function we wrote in the previous notebook.
def create_submission(model, test_generator, classes, test_images):
probs = model.predict(test_generator)
preds = np.argmax(probs, 1)
pred_classes = [classes[i] for i in preds]
sub = pd.DataFrame({"fn": test_images, "class": pred_classes})
return sub
sub = create_submission(model, test_generator, classes, test_images)
# sub.to_csv('submission2.csv', index=False)
# ## Transfer learning
# Next, we will see how we can get even better scores in less than a minute.
# Let's take an already defined and trained model from Keras called EfficientNet. You can find out more about EfficientNet at https://arxiv.org/abs/1905.11946.
from tensorflow.keras.applications import EfficientNetB0
# This model we are using requires images to be of size 224, so we just upscale them by adjusting our generators. Note that we also do not rescale the inputs here since EfficientNet does its own normalization and expects inputs between 0 and 255.
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(validation_split=0.2)
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator()
size = 224
train_generator = train_datagen.flow_from_dataframe(
train_df,
directory=train_dir,
x_col="fn",
y_col="class",
target_size=(size, size),
subset="training",
)
valid_generator = train_datagen.flow_from_dataframe(
train_df,
directory=train_dir,
x_col="fn",
y_col="class",
target_size=(size, size),
subset="validation",
)
test_generator = test_datagen.flow_from_directory(
directory="/kaggle/input/ml-unibern-2022-part2/",
classes=["test"],
batch_size=32,
target_size=(size, size),
shuffle=False,
)
X, y = train_generator[0]
X.shape, y.shape
# We are going to load the model that has already been trained and just add a few more layers to it. Then we train as usual.
inp = Input(shape=(224, 224, 3))
model = EfficientNetB0(
include_top=False, input_tensor=inp, weights="imagenet", classes=len(classes)
)
model.trainable = False
# Rebuild top
x = GlobalAveragePooling2D()(model.output)
x = BatchNormalization()(x)
top_dropout_rate = 0.2
x = Dropout(top_dropout_rate)(x)
out = Dense(len(classes), activation="softmax")(x)
model = tf.keras.Model(inp, out)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
model.summary()
tf.keras.utils.plot_model(model)
model.fit(train_generator, epochs=1, validation_data=valid_generator)
# After only one epoch we are already doing better than with our previous model.
model.fit(train_generator, epochs=5, validation_data=valid_generator)
# ## Data augmentation
# Let's see how we can do even better with one last and very powerful technique to prevent overfitting: data augmentation. This doesn't change anything about the network itself but rather modifies the training features.
# In the case of images, for example, the network should still come up with the same output regardless of whether the image is flipped or rotated a little. Let see how we can do this.
# Here, we will pass the augmentation arguments to the ImageDataGenerator class.
def _create_generator(**kwargs):
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(**kwargs)
size = 224
train_generator = train_datagen.flow_from_dataframe(
train_df,
directory=train_dir,
x_col="fn",
y_col="class",
target_size=(size, size),
subset="training",
shuffle=False,
)
return train_generator
np.random.seed(42)
def plot_augmentation(batch, sample, **kwargs):
train_generator = _create_generator(**kwargs)
imgs = []
for i in range(8):
imgs.append(train_generator[batch][0][sample].astype(int))
fig, axs = plt.subplots(2, 4, figsize=(15, 10))
for img, ax in zip(imgs, axs.flat):
ax.imshow(img)
plt.tight_layout()
# Without augmentation, the network will see exactly the same sample every epoch.
plot_augmentation(0, 3)
# Now we can turn on horizontal and vertical flipping. This will be randomly chosen every epoch.
plot_augmentation(0, 3, horizontal_flip=True, vertical_flip=True)
# We can also rotate the image within a range. Here we have to be careful though because this will introduce artifacts at the edges.
plot_augmentation(0, 3, rotation_range=45)
# Finally, we could also change the brightness.
plot_augmentation(0, 3, brightness_range=(0.5, 1.5))
# So which ones make sense?
# The image should still clearly be identifyable by a human. So use your intuition or just try it out. For example, vertical flipping makes sense for satellite images but not so much for pictures of cars.
# Check out all the possible augmentations in the ImageDataGenerator class: https://keras.io/api/preprocessing/image/.
# ### Training models with augmentation
# So let's now see what effect this has on our accuracy. For now we will chose flipping, and moderate rotation and brightness changes.
# However, one technical thing to be aware of is that using rotation from the ImageDataGenerator class is very slow because it's doing matrix multiplications on the CPU. If we wish to do so, we can add it into our model later (see below).
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
validation_split=0.2,
brightness_range=(0.8, 1.2),
# rotation_range=10,
horizontal_flip=True,
vertical_flip=True,
)
size = 224
train_generator = train_datagen.flow_from_dataframe(
train_df,
directory=train_dir,
x_col="fn",
y_col="class",
target_size=(size, size),
subset="training",
)
valid_generator = train_datagen.flow_from_dataframe(
train_df,
directory=train_dir,
x_col="fn",
y_col="class",
target_size=(size, size),
subset="validation",
)
test_generator = test_datagen.flow_from_directory(
directory="/kaggle/input/kit-predictive-data-analytics-2023-part-2/",
classes=["test"],
batch_size=32,
target_size=(size, size),
shuffle=False,
)
from tensorflow.keras.layers.experimental import preprocessing
X, y = train_generator[0]
X.shape
inp = Input(shape=(224, 224, 3))
x = inp
# x = preprocessing.RandomRotation(factor=0.05)(x) # turn on rotation
model = EfficientNetB0(
include_top=False, input_tensor=x, weights="imagenet", classes=len(classes)
)
model.trainable = False
# Rebuild top
x = GlobalAveragePooling2D()(model.output)
x = BatchNormalization()(x)
top_dropout_rate = 0.2
x = Dropout(top_dropout_rate)(x)
out = Dense(len(classes), activation="softmax")(x)
model = tf.keras.Model(inp, out)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
model.fit(
train_generator,
epochs=5,
validation_data=valid_generator,
workers=4,
use_multiprocessing=True,
)
# ### Transfer learning part 2: unfreezing the model
# Finally we can try unfreezing (parts of) the backbone of the network. That means making these weights trainable as well. Because we don't want to change them too much, let's lower the learning rate as well.
# We will also leave the BatchNormalization layers frozen. You can read more about batch normalization here: https://machinelearningmastery.com/batch-normalization-for-training-of-deep-neural-networks/.
def unfreeze_model(model):
# We unfreeze the top 20 layers while leaving BatchNorm layers frozen
for layer in model.layers[-20:]:
if not isinstance(layer, BatchNormalization):
layer.trainable = True
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)
model.compile(
optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"]
)
unfreeze_model(model)
model.fit(
train_generator,
epochs=5,
validation_data=valid_generator,
workers=4,
use_multiprocessing=True,
)
def create_submission(model, test_generator, classes, test_images):
probs = model.predict(test_generator)
preds = np.argmax(probs, 1)
pred_classes = [classes[i] for i in preds]
sub = pd.DataFrame({"fn": test_images, "class": pred_classes})
return sub
sub = create_submission(model, test_generator, classes, test_images)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import random
from tqdm import tqdm
# ## Load dataset
# import du jeu de donnée
mnist = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
submission = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv")
mnist.shape
# 42 000 images avec des pixels allant de 0 à 785
len(mnist)
# 42000 labels
X = mnist.iloc[:, 1:].to_numpy()
# transformation en tableau numpy
y = mnist.iloc[:, 0:1].to_numpy()
X.shape
len(y)
# ## Separation de notre jeu de données
X_train = X[:32000, :]
X_test = X[32000:, :]
y_train = y[:32000, :]
y_test = y[32000:, :]
# ## Convertir nos données en image 28*28
# Nous avons pour le moment pour chaque ligne un vecteur avec 784 données possibles.
X_train = X_train.reshape(-1, 28, 28, 1)
# 1ère argument : correspond au nombre d'images déterminés automatiquement par numpy avec la valeur -1
# 2ème argument: correspond à la couleur de l'image
# 2ème argument : correspond à la hauteur de l'image
# 3ème argument : correspond à la largeur de l'image
X_test = X_test.reshape(-1, 28, 28, 1)
# X_train = X_train.reshape(-1,28,28)
# 1ère argument : correspond au nombre d'images déterminés automatiquement par numpy avec la valeur -1
# 2ème argument : correspond à la hauteur de l'image
# 3ème argument : correspond à la largeur de l'image
# X_test = X_test.reshape(-1, 28, 28)
# ## Pretraitement des données
X_train = X_train / 255.0
X_test = X_test / 255.0
# on vérifie le format en affichant les 25 premières images
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i + 1) # 5 lignes, 5 colonnes
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(X_train[i], cmap=plt.cm.binary)
plt.xlabel(y_train[i])
plt.show()
# ## Neural Network
# **Simple neural net**
def build_fc_model():
fc_model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(), # applatissement des données
tf.keras.layers.Dense(128, activation=tf.nn.relu), # 1st hidden layer
tf.keras.layers.Dense(10, activation=tf.nn.softmax),
]
)
return fc_model
model = build_fc_model()
# compile the model
model.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=1e-1),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
## Train the model
BATCH_SIZE = 64
EPOCHS = 10
model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS)
## EVALUATE
test_loss, test_acc = model.evaluate(X_test, y_test)
print("Test accuracy", test_acc)
# ## CNN
def build_cnn_model():
cnn_model = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(
filters=24, kernel_size=(3, 3), activation=tf.nn.relu
),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(
filters=36, kernel_size=(2, 2), activation=tf.nn.relu
),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax),
]
)
return cnn_model
cnn_model = build_cnn_model()
cnn_model.predict(X_train[[0]])
print(cnn_model.summary())
# train and test CNN Model
from keras.backend import sparse_categorical_crossentropy
cnn_model.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=1e-1),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
cnn_model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS)
# Evaluate
test_loss, test_acc = cnn_model.evaluate(X_test, y_test)
print("Test accuracy", test_acc)
# make predictions
predictions = cnn_model.predict(X_test)
predictions[0]
prediction = np.argmax(predictions[0])
print(prediction)
print("label of this digit is :", y_test[0])
plt.imshow(X_test[0, :, :, 0], cmap=plt.cm.binary)
# ## Model 2.0
# Rebuild the CNN model
cnn_model = build_cnn_model()
loss_history = []
batch_size = 12
optimizer = tf.keras.optimizers.SGD(learning_rate=1e-2) # define our optimizer
if hasattr(tqdm, "_instances"):
tqdm._instances.clear() # clear if it exists
for idx in tqdm(range(0, train_images.shape[0], batch_size)):
# First grab a batch of training data and convert the input images to tensors
(images, labels) = (
X_train[idx : idx + batch_size],
y_train[idx : idx + batch_size],
)
images = tf.convert_to_tensor(images, dtype=tf.float32)
# GradientTape to record differentiation operations
with tf.GradientTape() as tape:
#'''TODO: feed the images into the model and obtain the predictions'''
logits = cnn_model(images)
# logits = # TODO
#'''TODO: compute the categorical cross entropy loss
loss_value = tf.keras.backend.sparse_categorical_crossentropy(labels, logits)
# loss_value = tf.keras.backend.sparse_categorical_crossentropy('''TODO''', '''TODO''') # TODO
loss_history.append(
loss_value.numpy().mean()
) # append the loss to the loss_history record
# Backpropagation
"""TODO: Use the tape to compute the gradient against all parameters in the CNN model.
Use cnn_model.trainable_variables to access these parameters."""
grads = tape.gradient(loss_value, cnn_model.trainable_variables)
# grads = # TODO
optimizer.apply_gradients(zip(grads, cnn_model.trainable_variables))
plt.plot(loss_history)
# # Submissions
test = test.to_numpy()
test = test.reshape(-1, 28, 28, 1)
test = test / 255.0
outputs = cnn_model(test)
preds = np.argmax(outputs, 1)
print(preds)
preds.shape
sample_sub = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv")
sample_sub.head()
sample_sub["Label"] = preds
submission = sample_sub
submission.head()
# save the df
submission.to_csv("submission.csv", index=False)
|
# # Evolution Strategies
from numpy import asarray
from numpy import exp
from numpy import sqrt
from numpy import cos
from numpy import e
from numpy import pi
from numpy import argsort
from numpy.random import randn
from numpy.random import rand
from numpy.random import seed
def objective(v):
x, y = v
return (
-20.0 * exp(-0.2 * sqrt(0.5 * (x**2 + y**2)))
- exp(0.5 * (cos(2 * pi * x) + cos(2 * pi * y)))
+ e
+ 20
)
def in_bounds(point, bounds):
# enumerate all dimensions of the point
for d in range(len(bounds)):
# check if out of bounds for this dimension
if point[d] < bounds[d, 0] or point[d] > bounds[d, 1]:
return False
return True
def es_comma(objective, bounds, n_iter, step_size, mu, lam):
best, best_eval = None, 1e10
# calculate the number of children per parent
n_children = int(lam / mu)
# initial population
population = list()
for _ in range(lam):
candidate = None
while candidate is None or not in_bounds(candidate, bounds):
candidate = bounds[:, 0] + rand(len(bounds)) * (bounds[:, 1] - bounds[:, 0])
population.append(candidate)
# perform the search
for epoch in range(n_iter):
# evaluate fitness for the population
scores = [objective(c) for c in population]
# rank scores in ascending order
ranks = argsort(argsort(scores))
# select the indexes for the top mu ranked solutions
selected = [i for i, _ in enumerate(ranks) if ranks[i] < mu]
# create children from parents
children = list()
for i in selected:
# check if this parent is the best solution ever seen
if scores[i] < best_eval:
best, best_eval = population[i], scores[i]
print("%d, Best: f(%s) = %.5f" % (epoch, best, best_eval))
# create children for parent
for _ in range(n_children):
child = None
while child is None or not in_bounds(child, bounds):
child = population[i] + randn(len(bounds)) * step_size
children.append(child)
# replace population with children
population = children
return [best, best_eval]
# seed the pseudorandom number generator
seed(1)
# define range for input
bounds = asarray([[-5.0, 5.0], [-5.0, 5.0]])
# define the total iterations
n_iter = 5000
# define the maximum step size
step_size = 0.15
# number of parents selected
mu = 20
# the number of children generated by parents
lam = 100
# perform the evolution strategy (mu, lambda) search
best, score = es_comma(objective, bounds, n_iter, step_size, mu, lam)
print("Done!")
print("f(%s) = %f" % (best, score))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.