script
stringlengths 113
767k
|
---|
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.feature_selection import SelectFromModel
df = pd.read_csv("/kaggle/input/cleaning-after-corr/Cleaning_after_corr.csv")
label = df["Label"]
df.head()
df.drop(["Label", "Unnamed: 0"], inplace=True, axis="columns")
df.head()
sel_ = SelectFromModel(LogisticRegression(solver="saga", penalty="l2", C=1))
sel_.fit(df, label)
non_imp = sel_.get_support()
non_imp
non_imp_index = []
for i in range(len(non_imp)):
if non_imp[i] == False:
non_imp_index.append(i)
non_imp_index
df_final = df.drop(df.columns[non_imp_index], axis="columns")
df_final.head()
from sklearn.metrics import classification_report
class elm_model:
def __init__(self, hidden_units, activation_function, x, y, weight, bias):
self.hidden_units = hidden_units
self.activation_function = activation_function
self.x = x
self.y = y
# self.class_num = np.unique(self.y).shape[0]
# self.beta = np.zeros((self.hidden_units, self.class_num))
# self.label = pd.get_dummies(self.y)
# self.label_df = pd.DataFrame(self.label)
self.m, self.n = self.x.shape
self.mu, self.sigma = 0, 0.01
# self.weight = np.matrix(np.random.uniform(weight[0], weight[1], (self.hidden_units, self.n)))
# self.bias = np.matrix(np.random.uniform(bias[0], bias[1], (1, self.hidden_units)))
self.weight = np.matrix(
np.random.normal(self.mu, self.sigma, (self.hidden_units, self.n))
)
self.bias = np.matrix(
np.random.normal(self.mu, self.sigma, (1, self.hidden_units))
)
self.H = 0
self.beta = 0
def sigmoid(self, z):
return 1 / (1 + np.exp(-1 * z))
def swish(self, z):
return z / (1 + np.exp(-1 * z))
def bipolar_sigmoid(self, z):
return (1 - np.exp(-1 * z)) / (1 + np.exp(-1 * z))
def relu(self, z):
z[z < 0] = 0
return z
def linear(self, z):
return z
def arc_tan(self, z):
return np.arctan(z)
def train(self, x, y):
x = np.matrix(x)
y = pd.get_dummies(y)
y = np.matrix(y)
# print(y)
self.H = np.dot(x, self.weight.T) + self.bias
if self.activation_function == "sigmoid":
self.H = self.sigmoid(self.H)
elif self.activation_function == "swish":
self.H = self.swish(self.H)
elif self.activation_function == "arc_tan":
self.H = self.arc_tan(self.H)
elif self.activation_function == "bipolar_sigmoid":
self.H = self.bipolar_sigmoid(self.H)
elif self.activation_function == "relu":
self.H = self.relu(self.H)
elif self.activation_function == "linear":
self.H = self.linear(self.H)
# H_plus = np.linalg.inv(self.H.T * self.H) * self.H.T
H_plus = np.dot(np.linalg.inv(np.dot(self.H.T, self.H)), self.H.T)
self.beta = np.dot(H_plus, y)
return
def predict(self, x):
y_pred = self.predict_proba(x)
y_pred = y_pred.T[0]
y_pred[y_pred > 0.5] = 0
y_pred[y_pred != 0] = 1
return y_pred
def softmax(self, x):
return np.exp(x) / np.sum(np.exp(x), axis=1)
def predict_proba(self, x):
x = np.matrix(x)
# H = (x * self.weight.T) + self.bias
self.H = np.dot(x, self.weight.T) + self.bias
if self.activation_function == "sigmoid":
self.H = self.sigmoid(self.H)
if self.activation_function == "swish":
self.H = self.swish(self.H)
if self.activation_function == "arc_tan":
self.H = self.arc_tan(self.H)
if self.activation_function == "bipolar_sigmoid":
self.H = self.bipolar_sigmoid(self.H)
if self.activation_function == "relu":
self.H = self.relu(self.H)
if self.activation_function == "linear":
self.H = self.linear(self.H)
# y_pred = H * self.beta
y_pred = np.dot(self.H, self.beta)
y_pred_T = y_pred.T
return np.array(self.softmax(y_pred).tolist())
def score(self, x, y):
y_pred = self.predict(x)
# print(y_pred)
# y_pred = y_pred.T.tolist()
print(classification_report(y, y_pred, digits=5))
from sklearn.model_selection import train_test_split
# np.set_printoptions(formatter={'float_kind':'{:f}'.format})
x_train, x_test, y_train, y_test = train_test_split(
df_final, label, test_size=0.2, random_state=42, stratify=label
)
weight = [-0.5, 0.5]
bias = [0, 1.0]
activation_fun = ["linear", "relu", "arc_tan", "sigmoid", "bipolar_sigmoid", "swish"]
# activation_fun = ['relu']
for activation in activation_fun:
print(activation)
elm = elm_model(70, activation, x_train, y_train, weight, bias)
elm.train(x_train, y_train)
# y_pred = elm.predict(x_test)
elm.score(x_test, y_test)
print()
elm = elm_model(70, "arc_tan", x_train, y_train, weight, bias)
elm.train(x_train, y_train)
from lime.lime_tabular import LimeTabularExplainer
class_names = ["0", "1"]
explainer = LimeTabularExplainer(
x_train.values,
feature_names=df_final.columns,
class_names=class_names,
mode="classification",
)
def cf(d):
# print(d)
x = elm.predict_proba(d)
# x = np.expand_dims(x, -1)
# print(x)
x = np.array(x.tolist())
first_column = 1 - x
y = np.concatenate(((1 - x), (x)), axis=1)
print(type(x))
print(x)
return x
print(x_test.iloc[2])
np.set_printoptions(formatter={"float_kind": "{:f}".format})
y_test.iloc[1000]
explaination = explainer.explain_instance(
x_test.iloc[100], elm.predict_proba, num_features=12
)
explaination.show_in_notebook(show_all=False)
# for i in activation_fun:
# model = elm(hidden_units=32, activation_function= i, random_type='normal', x=x_train, y=y_train, C=0.1, elm_type='clf')
# beta, train_accuracy, running_time = model.fit('solution1')
# prediction = model.predict(x_test)
# print(i)
# print('classifier test accuracy:', model.score(x_test, y_test))
# y_pred = prediction.tolist()
# abc = y_test.to_frame()
# y_true = abc['Label'].to_list()
# target_names = ['class 0', 'class 1']
# print(classification_report(y_true, y_pred, target_names=target_names, digits=4))
# print()
# print()
|
# # CNN using transfer learning in Keras
# ## Abstraction
# What if we can detect cancer at early stage? In this competition, the challenge is to develop an model that can accurately identify metastatic cancer in small image patches extracted from larger digital pathology scans. The required task is to predict the probability, so it is not the classification but binary task.
# This project goal is to develop a deep learning model using transfer learning to classify the image patches into positive or negative for metastatic cancer. We utilize the pretrained ResNet152 model as a feature extraction backbone and build a classifier on top of it, using some fully connected dense layers. The model is trained on a single GPU and evaluated using area under the ROC curve as the primary metric. We also monitor accuracy and validation loss during training to ensure optimal performance. We utilie hyper param tuning to get the appropriate learning rate for our model. After getting the final model, we make the submission file to get the final score of this kaggle's competition.
# The link to the original competition is, https://www.kaggle.com/competitions/histopathologic-cancer-detection/overview. You can get the same dataset as we used in this notebook.
# **keywords**: binary classification, Keras, CNN, transfer learning, resnet152, image augumentation, hyper param tuning
# ### Import libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import os
import tensorflow as tf
print(tf.__version__)
from tensorflow.keras import models, layers, mixed_precision
train_dir = "/kaggle/input/histopathologic-cancer-detection/train"
test_dir = "/kaggle/input/histopathologic-cancer-detection/test"
policy = mixed_precision.Policy("mixed_float16")
mixed_precision.set_global_policy(policy)
print("Compute dtype: %s" % policy.compute_dtype)
print("Variable dtype: %s" % policy.variable_dtype)
print("Traing Number: ", len(os.listdir(train_dir)))
print("Test Number: ", len(os.listdir(test_dir)))
# We get the training dataframe for later image loading. The trainingset has 2 classes, 0 for no cancer, 1 for at least 1 cancer. 40 % of training datasets are cancer images.
df = pd.read_csv("/kaggle/input/histopathologic-cancer-detection/train_labels.csv")
print(
"Data's target distribution ((1) label num/ (1 + 0) label num): ",
len(df[df.label == 1]) / len(df),
)
df.head()
df.label = df.label.astype(str)
df.id = df.id + ".tif"
print(df.info())
df.head()
# ## EDA
# The plot shows some training images. There are not obvious features that we find to classfiy which images indicate cancer or not.
w = 10
h = 10
fig = plt.figure(figsize=(15, 15))
columns = 10
rows = 5
for i in range(1, columns * rows + 1):
img = plt.imread(train_dir + "/" + df.iloc[i]["id"])
fig.add_subplot(rows, columns, i)
plt.axis("off")
plt.title(df.iloc[i]["label"])
plt.imshow(img)
plt.show()
# One image shape is `(width: 96, height: 96, color channel: 3)`.
plt.figure()
img = plt.imread(train_dir + "/" + df.iloc[0]["id"])
print("Image shape: ", img.shape)
print("Label: ", df.iloc[0]["label"])
plt.imshow(img)
plt.colorbar()
plt.grid(False)
plt.show()
# Belows codes are helper function we will use in model building and evaluating.
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def get_train_val_generator(train_datagen, df, sample_frac=1.0, bs=64):
df = df.sample(frac=sample_frac, random_state=42)
train_generator = train_datagen.flow_from_dataframe(
dataframe=df,
directory=train_dir,
x_col="id",
y_col="label",
subset="training",
target_size=(96, 96),
batch_size=bs,
class_mode="binary",
)
valid_generator = train_datagen.flow_from_dataframe(
dataframe=df,
directory=train_dir,
x_col="id",
y_col="label",
subset="validation",
target_size=(96, 96),
batch_size=bs,
shuffle=False,
class_mode="binary",
)
return train_generator, valid_generator
def get_model(pretrained_model, preprocess_input):
inputs = tf.keras.Input(shape=(96, 96, 3))
# For feature extraction using transfer learning
x = preprocess_input(inputs)
x = pretrained_model(x)
# For classifier
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.2)(x)
x = tf.keras.layers.Dense(64, activation="relu")(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dense(64, activation="relu")(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dense(1)(x)
outputs = tf.keras.layers.Activation("sigmoid", dtype="float32")(x)
return tf.keras.Model(inputs, outputs)
def fit_model(model, train_generator, valid_generator, epochs=5, callbacks=[]):
return model.fit(
train_generator,
steps_per_epoch=train_generator.n // train_generator.batch_size,
epochs=epochs,
validation_data=valid_generator,
validation_steps=valid_generator.n // valid_generator.batch_size,
use_multiprocessing=True,
workers=4,
callbacks=callbacks,
)
def plt_performance(train, valid, title):
plt.figure(figsize=(10, 10))
plt.subplot(2, 1, 1)
plt.plot(train, label="Training")
plt.plot(valid, label="Validation")
plt.legend(loc="upper left")
plt.ylim([min(plt.ylim()) - 0.1, max(plt.ylim()) + 0.1])
plt.title(title)
# ### Load sample training and validation set
# We get 30% sample of all training data so that we can iterate our experiments more faster. Then dataset is split into 2 parts, training and validating. We treat validation set to see the model' performance while training.
train_datagen = ImageDataGenerator(validation_split=0.2)
train_generator, valid_generator = get_train_val_generator(
train_datagen, df, sample_frac=0.3
)
# ## Model Building and Evaluation
# ### Compare pretrained model
# The pretrained models will be used as feature extraction layers. We compare each model's initial validaiton loss, and conclude to use `Resnet152` as our base model. The efficent net model would also seem good. However, after some training, resnet would have better performance among all.
preprocess_mobile = tf.keras.applications.mobilenet_v2.preprocess_input
mobilenet_v2 = tf.keras.applications.MobileNetV2(
input_shape=(96, 96, 3), include_top=False, weights="imagenet"
)
preprocess_res = tf.keras.applications.resnet_v2.preprocess_input
resnet_v2 = tf.keras.applications.ResNet152V2(
input_shape=(96, 96, 3), include_top=False, weights="imagenet"
)
preprocess_incep = tf.keras.applications.inception_resnet_v2.preprocess_input
incep_v2 = tf.keras.applications.InceptionResNetV2(
input_shape=(96, 96, 3), include_top=False, weights="imagenet"
)
preprocess_dense = tf.keras.applications.densenet.preprocess_input
dense = tf.keras.applications.DenseNet201(
input_shape=(96, 96, 3), include_top=False, weights="imagenet"
)
preprocess_eff = tf.keras.applications.efficientnet.preprocess_input
effnet_b2 = tf.keras.applications.EfficientNetB2(
input_shape=(96, 96, 3), include_top=False, weights="imagenet"
)
models = [
(mobilenet_v2, preprocess_mobile),
(resnet_v2, preprocess_res),
(incep_v2, preprocess_incep),
(dense, preprocess_incep),
(effnet_b2, preprocess_eff),
]
for pretrained_model, preprocess in models:
model = get_model(pretrained_model, preprocess)
model.compile(
optimizer="adam",
loss="binary_crossentropy",
metrics=["accuracy"],
)
val_loss, val_acc = model.evaluate(valid_generator)
print("\nPretrained Model: ", pretrained_model.name)
print("Val Loss: ", val_loss)
print("Val Acc: ", val_acc)
# ## Model building and evaluation
# On top of resnet model, we put 2 fully connected dense layers for classifier. The loss is `BinaryCrossEntropy` since this is binary task, and put label smooothing to `0.1`. This smoothing formula is, `y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing`. The optimizer is `Adam` with just quick learning rate. We tune this LR value at later part of this notebook. We chage the top 10 % resnet layer as trainable, holding other layers untrainable. It could increase the overfitting possibility, so we need to care for that. We see `Trainable params: 17,078,721` from model's summary, and we find this is good amount for this model and task. Thus, we keep it up and see the model's structure.
resnet_v2.trainable = True
print("Number of layers in the base net: ", len(resnet_v2.layers))
fine_tune_at = round(len(resnet_v2.layers) * 0.9)
print("Mobile model would be trainable from ", fine_tune_at)
for l in resnet_v2.layers[:fine_tune_at]:
l.trainable = False
base_lr = 3e-3
model = get_model(resnet_v2, preprocess_res)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=base_lr),
loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=0.1),
metrics=["accuracy"],
)
print("Model trainable param number: ", len(model.trainable_variables))
model.summary()
# One important layer when we use transfer learning is pretrained model's preprocess input layer. In our model's case, it is `tf.keras.applications.resnet_v2.preprocess_input`. The pretrained model is trained on this preprocessd data, so we need to convert our data as the same way before feeding it into the model. Therefore, we apply the same preprocessing function on our input data to ensure that it matches the input format of the pretrained ResNet152 model.
tf.keras.utils.plot_model(model, show_shapes=True)
# ### Model training
# We first iterate 3 epochs to see if the model can traing our dataset. It should the model have about `90` % accuracy on validation set in first 2 epoch. Hoever, at 3 poch, the model would somewhat overfit and less generalize with lower performance. It indicates that we need to deal with that overfitting. We add some treatment, like adding dropout and batchnomalization, but still overfittting exits. Therefre we decided to add more randomized image data, applying image augmentation for our models genelization.
# Training
decay_steps = 20
lr_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(base_lr, decay_steps)
callbacks = [tf.keras.callbacks.LearningRateScheduler(lr_decayed_fn)]
history = fit_model(
model, train_generator, valid_generator, epochs=3, callbacks=callbacks
)
# Evaluating
plt_performance(
history.history["accuracy"], history.history["val_accuracy"], "Train/Valid Accuracy"
)
plt_performance(
history.history["loss"], history.history["val_loss"], "Train/Valid Loss"
)
# We apply some augmentation here at the stage of loading image datasets. Some random transformation would increase the training data and improve the model's validation performance. We didnot try test time augumentation(TTA), but it is also valid way to ease the overfitting.
train_datagen = ImageDataGenerator(
validation_split=0.2,
vertical_flip=True,
horizontal_flip=True,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.2,
)
train_aug_generator, valid_aug_generator = get_train_val_generator(
train_datagen, df, sample_frac=0.3
)
# After loading augumented training dataset, we fit the model and see its performance. We can find the augumentation improve the model's validaiton accuracy with less overfitting.
# Training
model = None
model = get_model(effnet_b2, preprocess_eff)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=base_lr),
loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=0.1),
metrics=["accuracy"],
)
history = fit_model(
model, train_aug_generator, valid_aug_generator, epochs=3, callbacks=callbacks
)
# Evaluating
plt_performance(
history.history["accuracy"], history.history["val_accuracy"], "Train/Valid Accuracy"
)
plt_performance(
history.history["loss"], history.history["val_loss"], "Train/Valid Loss"
)
# ### Hyper parameter tuning for better learning rate
# So far we use some arbitray learning rate, but it could be better. We search more better LR using built in, `keras_tuner` and get the better one.
x_train, y_train = train_aug_generator.next()
x_val, y_val = valid_aug_generator.next()
import keras_tuner as kt
class MyHyperModel(kt.HyperModel):
def build(self, hp):
model = get_model(resnet_v2, preprocess_res)
model.compile(
optimizer=tf.keras.optimizers.Adam(
learning_rate=hp.Float(
"learning_rate", min_value=1e-4, max_value=1e-2, sampling="log"
)
),
loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=0.1),
metrics=["accuracy"],
)
return model
tuner = kt.RandomSearch(MyHyperModel(), objective="val_loss", max_trials=10)
tuner.search(
x_train,
y_train,
validation_data=(x_val, y_val),
epochs=10,
callbacks=[tf.keras.callbacks.EarlyStopping(patience=2)],
)
best_hps = tuner.get_best_hyperparameters(1)[0]
print("Best Learning Rate: ", best_hps.get("learning_rate"))
# ### Model training with full training dataset
# We test our final model on testset. We first load all training dataset and train our model with optimized hyper parameters. The fian model's accuracy are `0.95` on training set, and `0.93` on validation set.
train_full_generator, valid_full_generator = get_train_val_generator(
train_datagen, df, sample_frac=1
)
best_lr = 0.009
model = None
model = get_model(resnet_v2, preprocess_res)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=best_lr),
loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=0.1),
metrics=["accuracy"],
)
lr_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(best_lr, 25)
callbacks = [
tf.keras.callbacks.LearningRateScheduler(lr_decayed_fn),
tf.keras.callbacks.EarlyStopping(patience=2),
]
history = fit_model(
model, train_full_generator, valid_full_generator, epochs=12, callbacks=callbacks
)
# Evaluating
plt_performance(
history.history["accuracy"], history.history["val_accuracy"], "Train/Valid Accuracy"
)
plt_performance(
history.history["loss"], history.history["val_loss"], "Train/Valid Loss"
)
# ### Model inference on testset
# The code is for making submission file to get the final score on the original kaggle competition.
df_test = pd.read_csv(
"/kaggle/input/histopathologic-cancer-detection/sample_submission.csv"
)
df_test.id = df_test.id + ".tif"
test_generator = ImageDataGenerator().flow_from_dataframe(
dataframe=df_test,
directory=test_dir,
x_col="id",
y_col=None,
target_size=(96, 96),
batch_size=2,
shuffle=False,
class_mode=None,
)
test_generator.reset()
preds = model.predict(test_generator, verbose=1)
submission = pd.DataFrame()
submission["id"] = df_test["id"].apply(lambda x: x.split(".")[0])
submission["label"] = preds[:, 0]
submission.to_csv("submission.csv", index=False)
submission.head()
|
# # An exploration of the cities I would like to live
import numpy as np
import pandas as pd
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
import seaborn as sns
# Import "quality of life" data and reformat country column by removing whtespace on lhs.
quality_of_life_filepath = "../input/city-quality-of-life-dataset/uaScoresDataFrame.csv"
quality_of_life_data = pd.read_csv(quality_of_life_filepath, index_col=0)
quality_of_life_data["UA_Country"] = quality_of_life_data["UA_Country"].str.lstrip()
quality_of_life_data.head()
# Plot of the "cost of living" for all the data split across the continents. All the columns in the "quality of life" data are range from 0.0 - 10.0 with the larger numbers indicating higher desirability. For cost of living, we can see that the cities in Europe, North America and Asia have a large range in the "cost of living" whereas, Oceania is mid-range and South America and Africa are on average cheaper.
sns.swarmplot(
x=quality_of_life_data["UA_Continent"], y=quality_of_life_data["Cost of Living"]
)
# I calculated a score "Score_1" by weighting different columns from the "quality of life" data on how important each variable is to me.
quality_of_life_data["Score_1"] = (
quality_of_life_data["Housing"]
+ quality_of_life_data["Cost of Living"]
+ (quality_of_life_data["Travel Connectivity"] * 0.9)
+ (quality_of_life_data["Safety"] * 0.9)
+ (quality_of_life_data["Healthcare"] * 0.8)
+ (quality_of_life_data["Education"] * 0.5)
+ (quality_of_life_data["Environmental Quality"] * 0.8)
+ (quality_of_life_data["Internet Access"] * 0.8)
+ (quality_of_life_data["Economy"] * 0.5)
+ (quality_of_life_data["Taxation"] * 0.5)
+ (quality_of_life_data["Leisure & Culture"] * 2)
+ (quality_of_life_data["Tolerance"] * 3)
+ (quality_of_life_data["Outdoors"] * 2)
)
# missing climate and public transport and salary and unesco
# score out of 100
# Plotting the distribution of scores across the continents, I can see that cities in Europe are on average scoring higher than the other continents and are more numerous. This is lucky as being European, I have a preference to stay within Europe.
sns.swarmplot(x=quality_of_life_data["UA_Continent"], y=quality_of_life_data["Score_1"])
# I limit the "quality of life" data down to Europe and create a list "country" for all the countries that feature.
Europe_quality = quality_of_life_data.loc[quality_of_life_data.UA_Continent == "Europe"]
Europe_quality
arr1 = Europe_quality.UA_Country.unique()
country = arr1.tolist()
# Italy_quality = quality_of_life_data.loc[quality_of_life_data.UA_Country == 'Italy']
# UK_quality = Europe_quality.loc[Europe_quality.UA_Country == 'United Kingdom']
Europe_quality_scored = Europe_quality.sort_values(by=["Score"], ascending=False)
Europe_quality_scored[0:25]
Italy_quality.sort_values(by=["Score"], ascending=False)
UK_quality.sort_values(by=["Score"], ascending=False)
sns.kdeplot(data=Italy_quality["Cost of Living"], shade=True, label="Italy")
sns.kdeplot(data=UK_quality["Cost of Living"], shade=True, label="UK")
plt.legend()
sns.kdeplot(data=Italy_quality["Housing"], shade=True, label="Italy")
sns.kdeplot(data=UK_quality["Housing"], shade=True, label="UK")
plt.legend()
sns.swarmplot(x=Italy_quality["UA_Name"], y=Italy_quality["Cost of Living"])
plt.figure(figsize=(15, 4))
sns.swarmplot(x=UK_quality["UA_Name"], y=UK_quality["Cost of Living"])
plt.figure(figsize=(15, 4))
sns.swarmplot(x=UK_quality["UA_Name"], y=UK_quality["Housing"])
sns.swarmplot(x=Italy_quality["UA_Name"], y=Italy_quality["Housing"])
sns.kdeplot(data=Italy_quality["Score"], shade=True, label="Italy")
sns.kdeplot(data=UK_quality["Score"], shade=True, label="UK")
plt.legend()
sns.swarmplot(x=Italy_quality["UA_Name"], y=Italy_quality["Score"])
plt.figure(figsize=(15, 4))
sns.swarmplot(x=UK_quality["UA_Name"], y=UK_quality["Score"])
# I would like to live in a reasonably warm climate so added a climate variable. Ideal summer temp = average over July and August 20oC. Ideal winter temp = 5 oC over jan and feb
world_temp_filepath = "../input/world-average-temperature/Avg_World_Temp_2020.csv"
world_temp_data = pd.read_csv(world_temp_filepath, index_col=0)
world_temp_data
Europe_temp = world_temp_data.loc[world_temp_data.Continent == "Europe"]
Europe_temp["summer"] = (Europe_temp.Jul + Europe_temp.Aug) / 2
Europe_temp["winter"] = (Europe_temp.Jan + Europe_temp.Feb) / 2
Europe_temp.sort_values(by=["summer"], ascending=False)
Europe_temp.loc[Europe_temp.Country == "Italy"]
surface_temp_country_filepath = "../input/climate-change-earth-surface-temperature-data/GlobalLandTemperaturesByCountry.csv"
country_temp_data = pd.read_csv(surface_temp_country_filepath, index_col="dt")
country_temp_data
Italy_temps = country_temp_data.loc[country_temp_data.Country == "Italy"]
Italy_temps.tail(25)
# sns.lineplot(data=Italy_temps)
# Italy_quality = quality_of_life_data.loc[quality_of_life_data.UA_Country == ' Italy']
UK_temps = country_temp_data.loc[country_temp_data.Country == "United Kingdom"]
UK_temps.tail(25)
unesco_filepath = "../input/unesco-world-heritage-sites/whc-sites-2019.csv"
unesco_data = pd.read_csv(unesco_filepath, index_col="id_no")
Italy_unesco = unesco_data.loc[unesco_data.states_name_en == "Italy"]
Italy_unesco.describe()
# Italy_unesco_natural = Italy_unesco.loc[Italy_unesco.category == "Natural"]
# Italy_unesco_natural
# Split country column such that when a UNESCO site is shared by multiple, each country has an entry
unesco_data_split = unesco_data.assign(
states_name_en=unesco_data.states_name_en.str.split(",")
).explode("states_name_en")
unesco_europe_NA_split = unesco_data_split.loc[
unesco_data_split.region_en == "Europe and North America"
]
unesco_europe_NA_split = unesco_europe_NA_split.replace(
[
"United Kingdom of Great Britain and Northern Ireland",
"North Macedonia",
"Russian Federation",
"Republic of Moldova",
],
["United Kingdom", "Macedonia", "Russia", "Moldova"],
)
unesco_europe_D = unesco_europe.to_dict()
unesco_europe_D
# unesco_countries_europe.sort()
# unesco_europe_F = unesco_europe.to_frame()
# unesco_europe_F = unesco_europe_F.rename(columns={'states_name_en' : 'unesco'})
# unesco_europe_F = unesco_europe_F.rename(columns={'' : 'Country'})
# unesco_europe_F
# unesco_europe_F = unesco_europe_F.set_index()
arr2 = unesco_europe_NA_split.states_name_en.unique()
unesco_europe = arr2.tolist()
unesco_europe
# unesco_data_split
# arr1 = Europe_quality.UA_Country.unique()
# country = arr1.tolist()
unesco_europe_D["Italy"]
Europe_quality["unesco"] = Europe_quality["UA_Country"].map(unesco_europe_D)
Europe_quality
# Europe_quality['unesco'].unique()
Europe_quality["unesco"] = Europe_quality["unesco"].fillna(0.0)
Europe_quality[Europe_quality.unesco.isnull()]
Europe_quality
# Europe_quality['unesco'] = Europe_quality['UA_Country'].isin(unesco_europe)
# for ind in Europe_quality.index:
# if Europe_quality['unesco'] == True:
# Europe_quality['unesco'] = unesco_europe_D[Europe_quality['UA_Country']]
# else:
# Europe_quality['unesco'] = 0
Italy_quality["unesco"] = Italy_quality["UA_Country"].map(unesco_europe_D)
Italy_quality
for ind in Europe_quality.index:
if Europe_quality["UA_Country"] in unesco_europe:
Europe_quality["unesco"] = unesco_europe_D[Europe_quality["UA_Country"]]
else:
Europe_quality["unesco"] = 0
Europe_quality
# [[i] for i in range(len(Europe_quality)])
Italy_unesco_split = unesco_data_split.loc[unesco_data_split.states_name_en == "Italy"]
Italy_unesco_split
UK_unesco = unesco_data.loc[
unesco_data.states_name_en == "United Kingdom of Great Britain and Northern Ireland"
]
UK_unesco
# UK_unesco_natural = UK_unesco.loc[UK_unesco.category == "Natural"]
# UK_unesco_natural
unesco_europe_NA = unesco_data.loc[unesco_data.region_en == "Europe and North America"]
# unesco_europe_NA
# unesco_country = unesco_europe_NA.groupby(['states_name_en']).size()
# unesco_country
unesco_country.value_counts
n_unesco_cntry = []
for c in country:
n_unesco_cntry = unesco_europe_NA_split.states_name_en.map(
lambda state: "c" in state
).sum()
print(n_unesco_cntry)
# n_unesco_cntry
# country_counts = pd.Series([n_unesco_cntry],index = [country])
# country_counts
# n_unesco_Italy = unesco_europe_NA.states_name_en.map(lambda state: "Italy" in state).sum()
|
# ## Introduction
# ### In this notebook we use for Land Cover Classfication from Satellite Imagery using [DeepGlobe Land Cover Classification Dataset](https://www.kaggle.com/balraj98/deepglobe-land-cover-classification-dataset) using fasai Datablock API
from fastai.vision.all import *
from fastai.data.all import *
from pathlib import Path
path = Path("../input/deepglobe-land-cover-classification-dataset/train")
path.ls()
df1 = pd.read_csv("../input/deepglobe-land-cover-classification-dataset/class_dict.csv")
codes = df1["name"]
codes = array(codes, dtype=str)
df1
df1["pixel_value"] = round(
df1["r"] * 299 / 1000 + df1["g"] * 587 / 1000 + df1["b"] * 114 / 1000, 0
).astype(int, copy=False)
df1.sort_values(by="pixel_value")
vals = [0, 29, 105, 150, 179, 226, 255]
p2d = dict()
for i, val in enumerate(vals):
p2d[val] = i
p2d
items = partial(get_files, extensions=".jpg")
def masks(o):
return path / f"{o.stem[:-4]}_mask.png"
def get_msk(fn, clas_dic):
mask = masks(fn)
mask_img = PILMask.create(mask)
mask_tensor = tensor(mask_img)
for i in vals:
mask_tensor[mask_tensor == i] = p2d[i]
return mask_tensor
get_y = lambda o: get_msk(o, p2d)
dblock = DataBlock(
blocks=(ImageBlock, MaskBlock(codes=codes)),
get_items=items,
get_y=get_y,
splitter=RandomSplitter(valid_pct=0.1, seed=4),
item_tfms=[Resize(128)],
batch_tfms=[*aug_transforms(), Normalize.from_stats(*imagenet_stats)],
)
dls = dblock.dataloaders(path, bs=4)
dls.show_batch(max_n=4)
dblock.summary(path)
learn = unet_learner(dls, resnet34)
learn.lr_find()
learn.fit_one_cycle(2, lr_max=3.9e-4)
learn.show_results()
preds = learn.get_preds()
p = preds[0][0]
plt.imshow(p[0])
plt.imshow(p[1])
learn.save("base_model")
|
import pandas as pd
df = pd.read_csv(
"/kaggle/input/tweets-about-the-top-companies-from-2015-to-2020/Tweet.csv"
)
df["engagement_num"] = df["comment_num"] + df["retweet_num"] + df["like_num"]
df = df[df["engagement_num"] > 2] # select only tweets with the highest engagement
df = df[["body", "post_date"]].drop_duplicates()
df = df.sort_values(by="post_date", ascending=True) # sort values chronologically
print(df.shape)
df.sample(5).head()
tweets = "\n".join(df["body"].values)
print(len(tweets) / 1e6)
print(tweets[:250])
with open("tweets.txt", "w", encoding="utf-8") as f:
f.write(tweets)
del df
del tweets
import gc
gc.collect()
# Follow https://github.com/google/sentencepiece/issues/101#issuecomment-436256818
import sentencepiece as spm
spp = spm.SentencePieceProcessor()
# use Byte Pair Encoding instead of Unigram for faster processing
spm.SentencePieceTrainer.train(
input="tweets1.txt",
model_prefix="tweets",
vocab_size=30000,
model_type="bpe",
user_defined_symbols=["<n>"],
character_coverage=1.0,
minloglevel=1,
)
# makes segmenter instance and loads the model file (m.model)
sp = spm.SentencePieceProcessor()
sp.load("tweets.model")
# returns vocab size
print(sp.get_piece_size())
# # encode: text => id
# print(sp.encode_as_ids('''Jeff Bezos lost <n> $AMZN <n>Analyst Report on Top Stock Market'''))
# # decode: id => text
# print(sp.decode_ids([4, 1227, 1422, 1233, 4, 3, 6, 119, 4, 3, 1371, 1218, 45, 824, 31, 4, 778, 4, 603]))
import torch
import torch.nn as nn
from torch.nn import functional as F
torch.cuda.is_available()
# hyperparameters
batch_size = 128 # 16 # how many independent sequences will we process in parallel?
block_size = 128 # 32 # what is the maximum context length for predictions?
max_iters = 20000 # 100
eval_interval = 500
learning_rate = 2e-4 # 1e-3
device = "cuda" if torch.cuda.is_available() else "cpu"
eval_iters = 200
n_embd = 384 # #64
n_head = 16 # 4
n_layer = 4 # 4
dropout = 0.2 # 0.0
# ------------
torch.manual_seed(83)
with open("tweets1.txt", "r", encoding="utf-8") as f:
text = f.read()
# adapt for sentencepiece by changing \n to <n>
# text = text.replace('\n', '<n>')
# here are all the unique characters that occur in this text
vocab_size = sp.get_piece_size()
print(vocab_size)
# # create a mapping from characters to integers
# chars = sorted(list(set(sp.encode_as_ids(text))))
# stoi = { ch:i for i,ch in enumerate(chars) }
# itos = { i:ch for i,ch in enumerate(chars) }
# encode = lambda s: [stoi[c] for c in s] # encoder: take a string, output a list of integers
# decode = lambda l: ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string
# Train and test splits
data = torch.tensor(list(sp.encode_as_ids(text)), dtype=torch.long)
n = int(0.9 * len(data)) # first 90% will be train, rest val
train_data = data[:n]
val_data = data[n:]
# data loading
def get_batch(split):
# generate a small batch of data of inputs x and targets y
data = train_data if split == "train" else val_data
ix = torch.randint(len(data) - block_size, (batch_size,))
x = torch.stack([data[i : i + block_size] for i in ix])
y = torch.stack([data[i + 1 : i + block_size + 1] for i in ix])
x, y = x.to(device), y.to(device)
return x, y
@torch.no_grad()
def estimate_loss():
out = {}
model.eval()
for split in ["train", "val"]:
losses = torch.zeros(eval_iters)
for k in range(eval_iters):
X, Y = get_batch(split)
logits, loss = model(X, Y)
losses[k] = loss.item()
out[split] = losses.mean()
model.train()
return out
class Head(nn.Module):
"""one head of self-attention"""
def __init__(self, head_size):
super().__init__()
self.key = nn.Linear(n_embd, head_size, bias=False)
self.query = nn.Linear(n_embd, head_size, bias=False)
self.value = nn.Linear(n_embd, head_size, bias=False)
self.register_buffer("tril", torch.tril(torch.ones(block_size, block_size)))
self.dropout = nn.Dropout(dropout)
def forward(self, x):
B, T, C = x.shape
k = self.key(x) # (B,T,C)
q = self.query(x) # (B,T,C)
# compute attention scores ("affinities")
wei = q @ k.transpose(-2, -1) * C**-0.5 # (B, T, C) @ (B, C, T) -> (B, T, T)
wei = wei.masked_fill(self.tril[:T, :T] == 0, float("-inf")) # (B, T, T)
wei = F.softmax(wei, dim=-1) # (B, T, T)
wei = self.dropout(wei)
# perform the weighted aggregation of the values
v = self.value(x) # (B,T,C)
out = wei @ v # (B, T, T) @ (B, T, C) -> (B, T, C)
return out
class MultiHeadAttention(nn.Module):
"""multiple heads of self-attention in parallel"""
def __init__(self, num_heads, head_size):
super().__init__()
self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
self.proj = nn.Linear(n_embd, n_embd)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
out = torch.cat([h(x) for h in self.heads], dim=-1)
out = self.dropout(self.proj(out))
return out
class FeedFoward(nn.Module):
"""a simple linear layer followed by a non-linearity"""
def __init__(self, n_embd):
super().__init__()
self.net = nn.Sequential(
nn.Linear(n_embd, 4 * n_embd),
nn.ReLU(),
nn.Linear(4 * n_embd, n_embd),
nn.Dropout(dropout),
)
def forward(self, x):
return self.net(x)
class Block(nn.Module):
"""Transformer block: communication followed by computation"""
def __init__(self, n_embd, n_head):
# n_embd: embedding dimension, n_head: the number of heads we'd like
super().__init__()
head_size = n_embd // n_head
self.sa = MultiHeadAttention(n_head, head_size)
self.ffwd = FeedFoward(n_embd)
self.ln1 = nn.LayerNorm(n_embd)
self.ln2 = nn.LayerNorm(n_embd)
def forward(self, x):
x = x + self.sa(self.ln1(x))
x = x + self.ffwd(self.ln2(x))
return x
# super simple bigram model
class BigramLanguageModel(nn.Module):
def __init__(self):
super().__init__()
# each token directly reads off the logits for the next token from a lookup table
self.token_embedding_table = nn.Embedding(vocab_size, n_embd)
self.position_embedding_table = nn.Embedding(block_size, n_embd)
self.blocks = nn.Sequential(
*[Block(n_embd, n_head=n_head) for _ in range(n_layer)]
)
self.ln_f = nn.LayerNorm(n_embd) # final layer norm
self.lm_head = nn.Linear(n_embd, vocab_size)
def forward(self, idx, targets=None):
B, T = idx.shape
# idx and targets are both (B,T) tensor of integers
tok_emb = self.token_embedding_table(idx) # (B,T,C)
pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T,C)
x = tok_emb + pos_emb # (B,T,C)
x = self.blocks(x) # (B,T,C)
x = self.ln_f(x) # (B,T,C)
logits = self.lm_head(x) # (B,T,vocab_size)
if targets is None:
loss = None
else:
B, T, C = logits.shape
logits = logits.view(B * T, C)
targets = targets.view(B * T)
loss = F.cross_entropy(logits, targets)
return logits, loss
def generate(self, idx, max_new_tokens):
# idx is (B, T) array of indices in the current context
for _ in range(max_new_tokens):
# crop idx to the last block_size tokens
idx_cond = idx[:, -block_size:]
# get the predictions
logits, loss = self(idx_cond)
# focus only on the last time step
logits = logits[:, -1, :] # becomes (B, C)
# apply softmax to get probabilities
probs = F.softmax(logits, dim=-1) # (B, C)
# sample from the distribution
idx_next = torch.multinomial(probs, num_samples=1) # (B, 1)
# append sampled index to the running sequence
idx = torch.cat((idx, idx_next), dim=1) # (B, T+1)
return idx
model = BigramLanguageModel()
m = model.to(device)
# print the number of parameters in the model
print(sum(p.numel() for p in m.parameters()) / 1e6, "M parameters")
# create a PyTorch optimizer
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
iter_list = []
train_loss_list = []
val_loss_list = []
for iter in range(max_iters):
# every once in a while evaluate the loss on train and val sets
if iter % eval_interval == 0 or iter == max_iters - 1:
losses = estimate_loss()
print(
f"step {iter}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}"
)
iter_list.append(iter)
train_loss_list.append(losses["train"])
val_loss_list.append(losses["val"])
# sample a batch of data
xb, yb = get_batch("train")
# evaluate the loss
logits, loss = model(xb, yb)
optimizer.zero_grad(set_to_none=True)
loss.backward()
optimizer.step()
import matplotlib.pyplot as plt
plt.plot(iter_list, train_loss_list, label="Loss, train")
plt.plot(iter_list, val_loss_list, label="Loss, validation")
plt.legend()
plt.xlabel("Number of Iterations")
plt.ylabel("Loss")
plt.show()
# generate from the model
context = torch.zeros((1, 1), dtype=torch.long, device=device)
# add replace for sentensepiece
gen_text = sp.decode_ids(m.generate(context, max_new_tokens=1500)[0].tolist())
print(gen_text.count("<n>"))
print(gen_text.replace("<n>", "\n"))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
# Aşağıda ihtiyacımız doğrultusunda kullanacağımız kütüphaneleri yükleyelim.
import numpy as np
import seaborn as sns
import pandas as pd
# Veri çerçevemizi bulunduğumuz dizinden yükleyelim ve bir veri çerçevesi haline getirerek df değişkenine atayalım. (pd.read_csv(...csv))
# Veri çerçevesinin ilk 5 gözlemini görüntüleyelim.
df = pd.read_csv("../input/iris-flower-dataset/IRIS.csv")
df.head(5)
# Veri çerçevesinin kaç öznitelik ve kaç gözlemden oluştuğunu görüntüleyelim.
# öznitelik ve gözlem açıkla
df.shape
# Veri çerçevesindeki değişkenlerin hangi tipte olduğunu ve bellek kullanımını görüntüleyelim.
df.info
# Hedef değişkenimiz variety'e göre gruplama işlemi yaparak sadece petal.length değişkenimizin standart sapma değerlerini yazdıralım.
# Veri çerçevesindeki sayısal değişkenler için temel istatistik değerlerini görüntüleyelim.
# Standart sapma ve ortalama değerlerden çıkarımda bulunarak hangi değişkenlerin ne kadar varyansa sahip olduğu hakkında fikir yürütelim.
df.describe().T
# Veri çerçevesinde hangi öznitelikte kaç adet eksik değer olduğunu gözlemleyelim.
# isna sorgulama yapar sum eksik bilgileri çıkarır
df.isna().sum()
# Sayısal değişkenler arasında korelasyon olup olmadığını göstermek için korelasyon matrisi çizdirelim. Korelasyon katsayıları hakkında fikir yürütelim.
# En güçlü pozitif ilişki hangi iki değişken arasındadır?
df.corr()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
pl_results_filepath = "/kaggle/input/english-premier-league-results/results.csv"
pl_results_data = pd.read_csv(pl_results_filepath, encoding="ISO-8859-1")
pl_results_data.dropna(subset=["HTR", "HTHG", "HTAG"], inplace=True)
pl_results_data.DateTime = pd.to_datetime(pl_results_data.DateTime)
pl_results_data["year"] = pl_results_data.DateTime.dt.year
pl_results_data["month"] = pl_results_data.DateTime.dt.month
dummy_teams = pd.get_dummies(pl_results_data[["HomeTeam", "AwayTeam"]])
pl_results_data = pd.concat([pl_results_data, dummy_teams], axis=1)
pl_results_data.drop(["HomeTeam", "AwayTeam"], axis=1, inplace=True)
pl_results_data.head()
y = pl_results_data.FTR
pl_results_features = ["HTHG", "HTAG", "year", "month"]
# pl_results_features = ['year', 'month']
X = pd.concat([pl_results_data[pl_results_features], dummy_teams], axis=1)
# X=pl_results_data[pl_results_features]
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=0)
# Define model. Specify a number for random_state to ensure same results each run
pl_results_model = RandomForestClassifier(max_depth=35, random_state=1)
# pl_results_model = MLPClassifier(alpha=0.6, max_iter=5000, random_state=1)
# pl_results_model = AdaBoostClassifier(random_state=1)
# pl_results_model = KNeighborsClassifier(50)
# Fit model
pl_results_model.fit(train_X, train_y)
def train_model(max_depth):
pl_results_model = RandomForestClassifier(max_depth=max_depth)
pl_results_model.fit(train_X, train_y)
return pl_results_model
# ## Model Validation
from sklearn.metrics import classification_report, accuracy_score
val_predictions = pl_results_model.predict(val_X)
print(accuracy_score(val_y, val_predictions))
print(classification_report(val_y, val_predictions))
# ## Testing the model on single values
def predict_result(model, HomeTeam, AwayTeam, year, month, HTHG, HTAG):
dataframe = val_X.iloc[0]
dataframe.HTHG = HTHG
dataframe.HTAG = HTAG
dataframe.HomeTeam_Newcastle = 0
dataframe.AwayTeam_Fulham = 0
dataframe[f"HomeTeam_{HomeTeam}"] = 1
dataframe[f"AwayTeam_{AwayTeam}"] = 1
dataframe.year = year
dataframe.month = month
return model.predict([dataframe])
print(predict_result(pl_results_model, "Chelsea", "Bolton", 2022, 5, 1, 7))
# ## Extending to return probabilities
number_of_iterations = 200
pretrained_models = []
for i in range(number_of_iterations):
pretrained_models.append(train_model(30))
import warnings
warnings.filterwarnings("ignore")
def predict_probabilities(
pretrained_models, HomeTeam, AwayTeam, year, month, HTHG, HTAG
):
number_of_iterations = len(pretrained_models)
predictions_tally = {"H": 0, "D": 0, "A": 0}
for model in pretrained_models:
result = predict_result(model, HomeTeam, AwayTeam, year, month, HTHG, HTAG)[0]
predictions_tally[result] += 1 / number_of_iterations
return predictions_tally
print(predict_probabilities(pretrained_models, "Tottenham", "Liverpool", 2005, 5, 2, 2))
# ## Testing on recent Premier League Games
print(predict_probabilities(pretrained_models, "Man United", "Everton", 2023, 4, 1, 0))
print(
predict_probabilities(
pretrained_models, "Aston Villa", "Nott'm Forest", 2023, 4, 0, 0
)
)
print(predict_probabilities(pretrained_models, "Brentford", "Newcastle", 2023, 4, 1, 0))
print(predict_probabilities(pretrained_models, "Fulham", "West Ham", 2023, 4, 0, 1))
print(
predict_probabilities(pretrained_models, "Leicester", "Bournemouth", 2023, 4, 0, 1)
)
print(predict_probabilities(pretrained_models, "Tottenham", "Brighton", 2023, 4, 1, 1))
print(predict_probabilities(pretrained_models, "Wolves", "Chelsea", 2023, 4, 1, 0))
print(
predict_probabilities(pretrained_models, "Southampton", "Man City", 2023, 4, 0, 1)
)
|
# # Introduction
# ## Background
# This notebook represents my first foray into Kaggle competitions. I am currently doing Jeremy Howard's Fast.ai course. In lesson 4, Jeremy strongly encouraged his students to do some Kaggle competitions. This is so as Kaggle competitions contain real problems that real companies are willing to pay money to solve. Thus, we know that we are working on a realistic problem. At the same time, the data has been sufficiently prepared, which reduces the operational complexity of collecting and cleaning data.
# ## Why a Getting Started Competition?
# It has been ages since I have actively developed a ML workflow. In addition, my honest assessment of my ML skills is that I am at best slightly better than amateur. Thus, I plan to use a Getting Started competition to get some momentum going and familiarise myself with Kaggle competitions.
# Thereafter, I will be joining another active Kaggle competition with prize money to further hone my skills. I will do this before starting on lesson 5 of fast.ai course.
# # Setup
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Import and EDA
import os
iskaggle = os.environ.get("KAGGLE_KERNEL_RUN_TYPE", "")
from pathlib import Path
if iskaggle:
path = Path("../input/nlp-getting-started")
train_df = pd.read_csv(path / "train.csv")
train_df
train_df["keyword"].isna().sum()
train_df["location"].isna().sum()
# 'keyword' is mostly filled, while 'location' has NaN for about a third of the dataset.
# I'm currently not sure how I will handle all the NaN. Keep this in mind moving forward.
train_df["keyword"].nunique()
train_df["location"].nunique()
# Keywords have a low number of unique values, while location has a high number of unique values.
# Let's see if there is any relationship between a location and target.
# count the occurrences of each value in the 'location' column
location_counts = train_df["location"].value_counts()
# group by 'location' and sum by 'target'
sum_by_location = train_df.groupby("location")["target"].sum()
percentage_of_positive_target = sum_by_location / location_counts
# combine the two series into a single dataframe
result = pd.concat([location_counts, percentage_of_positive_target], axis=1)
# rename the columns
result.columns = ["count", "perc_positive"]
print(result)
result["perc_positive"].hist(bins=10)
# Seems like the results are extreme- for some locations, the target is highly likely to be positive. The opposite is true for some other locations.
print(
"Proportion of positive targets in train_df is:",
train_df["target"].sum() / len(train_df.index),
)
train_df.describe(include="object")
train_df.fillna("None", inplace=True)
train_df["input"] = "KEYWORD: " + train_df.keyword + "; TEXT: " + train_df["text"]
train_df.input
# # Tokenization
from datasets import Dataset, DatasetDict
train_ds = Dataset.from_pandas(train_df)
train_ds
# pick the same model as Jeremy Howard's intro guide
# we can select another model later on
model_nm = "microsoft/deberta-v3-small"
iteration_idx = 1
models = ["microsoft/deberta-v3-small", "cardiffnlp/twitter-roberta-base-sentiment"]
from transformers import AutoModelForSequenceClassification, AutoTokenizer
tokz = AutoTokenizer.from_pretrained(models[iteration_idx])
def tok_func(x):
return tokz(x["input"])
tok_train_ds = train_ds.map(tok_func, batched=True)
train_ds[0]
tok_train_ds[0]
tok_train_ds = tok_train_ds.rename_columns({"target": "labels"})
dds = tok_train_ds.train_test_split(0.25, seed=42)
# # Validation and test sets
# We want to create a good validation set from our `train.csv`.
# It is important to investigate if there exists `location`/`keyword` in the test set that does not exist in the train set.
eval_df = pd.read_csv(path / "test.csv")
eval_df.fillna("None", inplace=True)
# Assuming the column containing keywords is called "keyword"
train_keywords = set(train_df["keyword"])
eval_keywords = set(eval_df["keyword"])
# Find the set of keywords in test.df that are not in train.df
missing_keywords = eval_keywords - train_keywords
if missing_keywords:
print("The following keywords are missing from train_df:")
# print(missing_keywords)
else:
print("All keywords in eval_df are present in train_df.")
# Assuming the column containing keywords is called "keyword"
train_location = set(train_df["location"])
eval_location = set(eval_df["location"])
# Find the set of keywords in test.df that are not in train.df
missing_location = eval_location - train_location
if missing_location:
print("The following location are missing from train_df:")
# print(missing_location)
else:
print("All location in eval_df are present in train_df.")
len(missing_location)
# Find the set of keywords in train_df that are not in test_df
missing_location_from_eval = train_location - eval_location
if missing_location_from_eval:
print("The following location are missing from eval_df:")
# print(missing_location_from_eval)
else:
print("All location in train_df are present in eval_df.")
len(missing_location_from_eval)
# Given the clear lack of overlap between locations in `train_df` and `test_df`, we will edit the code above to remove `location` as a feature from the train data.
# Once done, we will proceed with creating our validation set.
eval_df["input"] = "KEYWORD: " + eval_df.keyword + "; TEXT: " + eval_df["text"]
eval_ds = Dataset.from_pandas(eval_df).map(tok_func, batched=True)
# # Metrics
# We are using F1 score as our optimisation metric. F1 is the official metric that the competition is judging our results by.
import evaluate
f1_metric = evaluate.load("f1")
def f1_hf(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return f1_metric.compute(predictions=predictions, references=labels)
# # Train our model
from transformers import TrainingArguments, Trainer
bs = 128
epochs = 4
lr = 1e-5
args = TrainingArguments(
"outputs",
learning_rate=lr,
warmup_ratio=0.1,
lr_scheduler_type="cosine",
fp16=True,
evaluation_strategy="epoch",
per_device_train_batch_size=bs,
per_device_eval_batch_size=bs * 2,
num_train_epochs=epochs,
weight_decay=0.01,
report_to="none",
)
model = AutoModelForSequenceClassification.from_pretrained(
models[iteration_idx], num_labels=2, ignore_mismatched_sizes=True
)
trainer = Trainer(
model,
args,
train_dataset=dds["train"],
eval_dataset=dds["test"],
tokenizer=tokz,
compute_metrics=f1_hf,
)
trainer.train()
preds = trainer.predict(eval_ds).predictions
preds = np.argmax(preds, axis=1)
preds
import datasets
submission = datasets.Dataset.from_dict({"id": eval_ds["id"], "target": preds})
submission.to_csv("submission.csv", index=False)
|
import pandas as pd
from pathlib import Path
from tqdm import tqdm
import numpy as np
import os
import tensorflow_hub as hub
import tensorflow as tf
import torchaudio
import torch
from torch.utils.data import DataLoader, Dataset
df = pd.read_csv("/kaggle/input/birdclef-2023/train_metadata.csv")
AUDIO_PATH = Path("/kaggle/input/birdclef-2023/train_audio")
model = hub.load(
"https://kaggle.com/models/google/bird-vocalization-classifier/frameworks/TensorFlow2/variations/bird-vocalization-classifier/versions/2"
)
model_labels_df = pd.read_csv(
hub.resolve(
"https://kaggle.com/models/google/bird-vocalization-classifier/frameworks/tensorFlow2/variations/bird-vocalization-classifier/versions/2"
)
+ "/assets/label.csv"
)
SAMPLE_RATE = 32000
WINDOW = 5 * SAMPLE_RATE
bc2023_labels = sorted(df.primary_label.unique())
label_to_index = {v: k for k, v in enumerate(bc2023_labels)}
model_labels = {v: k for k, v in enumerate(model_labels_df.ebird2021)}
model_bc2023_indexes = [
model_labels[label] if label in model_labels else -1 for label in bc2023_labels
]
# # Save embeddings and predictions for every 5 sec non-overlapping audio
# use a torch dataloader to decode audio in parallel on CPU while GPU is running
class AudioDataset(Dataset):
def __len__(self):
return len(df)
def __getitem__(self, i):
filename = df.filename[i]
# audio = torchaudio.load(AUDIO_PATH / filename)[0].numpy()[0]
audio = torchaudio.load(AUDIO_PATH / filename, 0, WINDOW)[0].numpy()[0]
return audio, filename
dataloader = DataLoader(AudioDataset(), batch_size=1, num_workers=os.cpu_count())
# embeddings are formated like {"filename": np.array(nx1280)}
# (where n = the number of non overlapping 5 sec chunks in the audio)
all_embeddings = {}
# predictiones formated like {"filename": np.array(nx264)}
all_predictions = {}
with tf.device("/gpu:0"):
for audio, filename in tqdm(dataloader):
audio = audio[0]
filename = filename[0]
file_embeddings = []
file_predictions = []
# for i in range(0, len(audio), WINDOW):
for i in range(0, 1):
clip = audio[i : i + WINDOW]
if len(clip) < WINDOW:
clip = np.concatenate([clip, np.zeros(WINDOW - len(clip))])
result = model.infer_tf(clip[None, :])
file_embeddings.append(result[1][0].numpy())
prediction = np.concatenate(
[result[0].numpy(), -100], axis=None
) # add -100 logit for unpredicted birds
file_predictions.append(prediction[model_bc2023_indexes])
all_embeddings[filename] = np.stack(file_embeddings)
all_predictions[filename] = np.stack(file_predictions)
torch.save(all_embeddings, "embeddings.pt")
torch.save(all_predictions, "predictions.pt")
# # Scores of predictions on the first 5 seconds of each recording
predicted_classes = torch.tensor([row[0].argmax() for row in all_predictions.values()])
actual_classes = torch.tensor([label_to_index[label] for label in df.primary_label])
correct = predicted_classes == actual_classes
accuracy = correct.float().mean()
accuracy
logits = torch.stack([torch.tensor(row[0]) for row in all_predictions.values()])
ce_loss = torch.nn.CrossEntropyLoss()(logits, actual_classes)
ce_loss
actual_probs = torch.eye(len(bc2023_labels))[actual_classes]
bce_loss = torch.nn.BCEWithLogitsLoss()(logits, actual_probs)
bce_loss
import pandas as pd
import sklearn.metrics
def padded_cmap(solution, submission, padding_factor=5):
# solution = solution.drop(['row_id'], axis=1, errors='ignore')
# submission = submission.drop(['row_id'], axis=1, errors='ignore')
new_rows = []
for i in range(padding_factor):
new_rows.append([1 for i in range(len(solution.columns))])
new_rows = pd.DataFrame(new_rows)
new_rows.columns = solution.columns
padded_solution = pd.concat([solution, new_rows]).reset_index(drop=True).copy()
padded_submission = pd.concat([submission, new_rows]).reset_index(drop=True).copy()
score = sklearn.metrics.average_precision_score(
padded_solution.values,
padded_submission.values,
average="macro",
)
return score
solution = pd.DataFrame(actual_probs.numpy(), columns=bc2023_labels)
padded_cmap(
solution=solution,
submission=pd.DataFrame(torch.softmax(logits, 1).numpy(), columns=bc2023_labels),
)
padded_cmap(
solution=solution,
ssubmission=pd.DataFrame(torch.sigmoid(logits).numpy(), columns=bc2023_labels),
)
# # Analysis
df["correct"] = correct.bool().numpy()
import plotly.express as px
px.histogram(
df,
title="Distribution of accuracy",
x="primary_label",
color="correct",
).update_xaxes(categoryorder="total descending").show()
# # Failure examples
from IPython.display import Audio
import torchaudio
import matplotlib.pyplot as plt
compute_melspec = torchaudio.transforms.MelSpectrogram(
sample_rate=SAMPLE_RATE,
n_mels=128,
n_fft=2048,
hop_length=512,
f_min=0,
f_max=SAMPLE_RATE // 2,
)
power_to_db = torchaudio.transforms.AmplitudeToDB(
stype="power",
top_db=80.0,
)
def show_bird(index, start=0):
audio = torchaudio.load(AUDIO_PATH / df.filename[index], start, start + WINDOW)[0][
0
]
display(df.iloc[index])
display(Audio(audio, rate=SAMPLE_RATE))
plt.figure(figsize=(12, 2.5))
plt.subplot(121)
plt.plot(audio)
plt.gca().get_xaxis().set_visible(False)
plt.subplot(122)
plt.imshow(power_to_db(compute_melspec(audio)))
plt.show()
return
# filter out birds that the model doesn't predict
missing_birds = set(np.array(bc2023_labels)[np.array(model_bc2023_indexes) == -1])
probs = torch.sigmoid(logits)
for count, i in enumerate(
df[~df.correct & ~df.primary_label.isin(missing_birds)].sample(10).index
):
print("### EXAMPLE", count + 1, "###")
correct_class = df.primary_label[i]
predicted_class_index = label_to_index[correct_class]
predicted_prob_for_correct_label = probs[i, predicted_class_index]
rank = 1 + sorted(probs[i], reverse=True).index(predicted_prob_for_correct_label)
predicted_class_index = probs[i].argmax().item()
predicted_clas = bc2023_labels[predicted_class_index]
max_predicted_prob = probs[i][predicted_class_index]
print(
f"correct was {correct_class}, given prob {predicted_prob_for_correct_label} and ranked #{rank}"
)
print(f"predicted {predicted_clas}, with prob {max_predicted_prob}")
print()
show_bird(i)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import json
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
pd.set_option("display.max_colwidth", None)
filename = "SKumanev_ngkii.csv"
path = "/kaggle/input/zhukovsk/zhukov_sk_sentences_4.pkl"
import pickle5 as pickle
with open(path, "rb") as fh:
df = pickle.load(fh)
df = df[:350_000]
df.head()
X_train, X_test = train_test_split(df, test_size=0.2, random_state=0, shuffle=True)
X_train.shape, X_test.shape
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
tfIdfVectorizer = TfidfVectorizer(use_idf=True)
tfIdfVectorizer.fit(X_train.values)
vocabulary = {key: value + 1 for key, value in tfIdfVectorizer.vocabulary_.items()}
vocabulary["unknown"] = 0 # для неизвестных слов и маскирования
len(vocabulary)
X_train = X_train.str.split().apply(lambda x: [vocabulary[item] for item in x])
X_train
# EMBEDDING_SIZE = 10
class EntityEmbedding(torch.nn.Module):
EMBEDDING_SIZE = 10
N_features = 6
def __init__(self):
super().__init__()
self.embeddings = nn.Embedding(len(vocabulary), self.EMBEDDING_SIZE)
self.linear1 = nn.Linear(self.N_features * self.EMBEDDING_SIZE, 1000)
self.activation_function1 = nn.ReLU()
# out:
self.linear2 = nn.Linear(1000, 5000)
self.activation_function2 = nn.ReLU()
# out: one-hot x vocab_size
self.linear3 = nn.Linear(5000, len(vocabulary))
self.activation_function3 = nn.LogSoftmax(dim=-1)
def forward(self, inputs):
self.embed = self.embeddings(inputs)
out = self.linear1(torch.cat([t for t in self.embed]))
out = self.activation_function1(out)
out = self.linear2(out)
out = self.activation_function2(out)
out = self.linear3(out)
out = self.activation_function3(out)
return out
def get_word_emdedding(self, word):
word = torch.tensor([word_to_ix[word]])
return self.embeddings(word).view(1, -1)
# Маскирование случайных слов
from random import randint
def make_masked_data(df):
data = []
for s in df:
mask_idx = randint(0, len(s) - 1)
target = s[mask_idx]
context = s[:]
context[mask_idx] = 0
# print(mask_idx, s, target, context)
data.append((context, target))
return data
data = make_masked_data(X_train.copy().values)
model = EntityEmbedding()
model.to(device)
model
criterion = nn.NLLLoss()
# criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)
# TRAINING
for epoch in range(10):
loss = 0
data = make_masked_data(X_train.copy().values)
print(data[0])
for context, target in tqdm(data):
log_probs = model(torch.tensor(context).to(device))
loss += criterion(log_probs, torch.tensor(target).to(device))
print(loss)
# optimize at the end of each epoch
print("=== Embeddings ===")
print(model.embeddings.weight)
print(model.embeddings.weight.grad)
print("=== Layer1 ===")
print(model.linear1.weight)
print(model.linear1.weight.grad)
print("=== Layer2 ===")
print(model.linear2.weight)
print(model.linear2.weight.grad)
print("=== Layer3 ===")
print(model.linear3.weight)
print(model.linear3.weight.grad)
print("=== LogProbs ===")
print(log_probs.max(), log_probs.min())
optimizer.zero_grad()
loss.backward()
optimizer.step()
from torch.utils.data import Dataset, DataLoader
class Build_Data(Dataset):
def __init__(self, data):
data = make_masked_data(X_train.copy().values)
self.x = torch.tensor([item[0] for item in data]).to(device)
self.y = torch.tensor([item[1] for item in data]).to(device)
self.len = self.x.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.len
dataset = Build_Data(data)
train_loader = DataLoader(dataset=dataset, batch_size=1000)
train_loader
class EntityEmbedding(torch.nn.Module):
EMBEDDING_SIZE = 10
N_features = 6
def __init__(self):
super().__init__()
self.embeddings = nn.Embedding(len(vocabulary), self.EMBEDDING_SIZE)
self.linear1 = nn.Linear(self.N_features * self.EMBEDDING_SIZE, 1000)
self.activation_function1 = nn.ReLU()
# out:
self.linear2 = nn.Linear(1000, 5000)
self.activation_function2 = nn.ReLU()
# out: one-hot x vocab_size
self.linear3 = nn.Linear(5000, len(vocabulary))
self.activation_function3 = nn.LogSoftmax(dim=-1)
def forward(self, inputs):
self.embed = self.embeddings(inputs)
# print(self.embed.shape)
# print(model.embed.view(-1, model.embed.shape[1] * model.embed.shape[2]).shape)
# out = self.linear1(torch.cat([t for t in self.embed]))
out = self.linear1(
self.embed.view(-1, model.embed.shape[1] * model.embed.shape[2])
)
out = self.activation_function1(out)
out = self.linear2(out)
out = self.activation_function2(out)
out = self.linear3(out)
out = self.activation_function3(out)
return out
def get_word_emdedding(self, word):
word = torch.tensor([word_to_ix[word]])
return self.embeddings(word).view(1, -1)
model = EntityEmbedding()
model.to(device)
model
criterion = nn.NLLLoss()
# criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
history = []
# TRAINING
for epoch in range(50):
data = make_masked_data(X_train.copy().values)
dataset = Build_Data(data)
train_loader = DataLoader(dataset=dataset, batch_size=1000)
loss = 0
for x, y in tqdm(train_loader):
y_hat = model(x).to(device)
loss = criterion(y_hat, y).to(device)
loss.backward()
optimizer.step()
optimizer.zero_grad()
history.append(loss.item())
# print(f'loss: {loss.item()}')
# optimize at the end of each epoch
# print('=== Embeddings ===')
# print(model.embeddings.weight[0])
plt.plot(history) # 0.001, 1000
model.embeddings.weight.to_dense()
# torch.save(model, 'file_model')
model = torch.load("/kaggle/input/zhukovsk/file_model")
from sklearn.manifold import TSNE
import seaborn as sns
idx2word = {value: key for key, value in vocabulary.items()}
tsne = TSNE(n_components=2, verbose=1, random_state=123)
x = model.embeddings.weight.detach().cpu().numpy()
z = tsne.fit_transform(x)
res = pd.DataFrame()
res["name"] = pd.Series([idx2word[i] for i in range(len(vocabulary))]).rename("name")
res["class"] = res["name"].apply(lambda x: x.split("__")[0])
res["x1"] = z[:, 0]
res["y1"] = z[:, 1]
sns.scatterplot(
x="x1",
y="y1",
hue=res["class"].tolist(),
palette=sns.color_palette("hls", 6),
data=res,
).set(title="data T-SNE projection")
embeddings = model.embeddings
embeddings
class SequenceDataset(Dataset):
def __init__(self, data, sequence_length=5):
self.sequence_length = sequence_length
x = []
y = []
for i in range(self.sequence_length, len(data)):
x.append(
torch.stack(
[
embeddings(torch.tensor(sentence).to(device)).flatten()
for sentence in data[i - self.sequence_length : i]
]
)
)
y.append(
[int(x in data[i]) for x in range(len(vocabulary))]
) # multi-hot encoding
self.x = torch.stack(x, dim=0).to(device)
self.y = torch.tensor(y).to(device)
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.x.shape[0]
class LSTMPrediction(torch.nn.Module):
def __init__(self):
super().__init__()
self.num_layers = 1
self.hidden_units = 800
self.lstm = nn.LSTM(
input_size=60,
hidden_size=self.hidden_units,
batch_first=True,
num_layers=self.num_layers,
)
self.linear = nn.Linear(800, len(vocabulary))
self.activation_function = nn.LogSoftmax(dim=-1)
def forward(self, inputs):
out, _ = self.lstm(inputs)
out = self.linear(out)
out = out[:, -1, :]
out = self.activation_function(out)
return out
model2 = LSTMPrediction()
model2.to(device)
model2
from torchinfo import summary
# summary(model2, input_size=(1000, 800, 60))
torch.autograd.set_detect_anomaly(False)
criterion = nn.BCEWithLogitsLoss() # nn.NLLLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
history = []
# TRAINING
for epoch in tqdm(range(10)):
dataset = SequenceDataset(X_train[:10_000].copy().to_list()) # иначе не хватает RAM
train_loader = DataLoader(dataset=dataset, batch_size=1000)
loss = 0
for batchN, (x, y) in enumerate(train_loader):
print(f"batch: {batchN}")
y_hat = model2(x).to(device)
loss = criterion(y_hat.detach(), y.float())
optimizer.zero_grad()
loss.backward()
optimizer.step()
history.append(loss.item())
print(f"epoch {epoch} loss: {loss.item()}")
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv", index_col=0)
train.head()
train.shape
train_X = train.drop(["target"], axis=1)
train_y = train["target"]
# As train size is small, I use cross validation.
"""
from sklearn.model_selection import train_test_split
train_X,valid_X,train_y,valid_y=train_test_split(train_all_X,train_all_y,random_state=42)
"""
from xgboost import XGBClassifier
from sklearn.model_selection import RandomizedSearchCV
model = XGBClassifier(random_state=42)
cv_params = {
"learning_rate": [0.002, 0.005, 0.01, 0.02, 0.05, 0.1],
"max_depth": [2, 4, 6, 8, 10],
"colsample_bytree": [0.2, 0.4, 0.6, 0.8, 1],
"gamma": [0.002, 0.005, 0.01, 0.02, 0.05, 0.1],
"min_child_weight": [2, 4, 6, 8, 10],
}
randcv = RandomizedSearchCV(
model, cv_params, cv=5, scoring="roc_auc", n_iter=100, verbose=1
)
randcv.fit(train_X, train_y)
pd.DataFrame(randcv.cv_results_).sort_values(by="mean_test_score", ascending=False).T
test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv", index_col=0)
best_estimator = randcv.best_estimator_
test_pred = best_estimator.predict_proba(test)[:, 1]
submission = pd.DataFrame({"id": test.index, "target": test_pred})
submission.set_index("id", inplace=True)
submission.to_csv("submission.csv")
submission.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Importing the data
# private to define
train_data = pd.read_csv("/kaggle/input/us-college-completion-rate-analysis/train.csv")
train_data = train_data.drop("Unnamed: 0", axis=1)
train_data["private"] = np.where(
(train_data["Tuition_in_state"] == train_data["Tuition_out_state"]), 1, 0
)
train_data["tuition_mean"] = train_data[["Tuition_in_state", "Tuition_out_state"]].mean(
axis=1
)
train_data
# independent = train_data[train_data.columns[3:]]
# independent=train_data.drop('Tuition_in_state',axis = 1)
# independent=independent.drop('Tuition_out_state',axis = 1)
independent = train_data
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 18))
ax = sns.heatmap(independent.corr(), vmin=-1, vmax=1, annot=True)
# high collinearity: tuitions,SAT & ACT, parents_education
# "SAT_average" and "ACT_50thPercentile" has high linear correlation.
# "Parents_college" and "Parents_highsch" also has high negative linear correlation.
X = train_data[
[
"SAT_average",
"Pell_grant_rate",
"Faculty_salary",
"Parents_highsch",
"tuition_mean",
"pct_Asian",
"pct_Black",
"pct_White",
"Parents_middlesch",
"private",
"pct_Hispanic",
]
].values
y = train_data["Completion_rate"]
X, y
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import r2_score
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
from sklearn.linear_model import Ridge
"""
def create_nn():
model = Sequential()
model.add(Dense(64, activation='relu', input_shape=(X.shape[1],)))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='linear'))
optimizer = Adam(learning_rate=0.001)
model.compile(optimizer=optimizer, loss='mean_squared_error')
return model
"""
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, BatchNormalization
from tensorflow.keras.optimizers import Adam
def create_nn():
model = Sequential()
model.add(Dense(128, activation="relu", input_shape=(X.shape[1],)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(64, activation="relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(32, activation="relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(16, activation="relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(1, activation="linear"))
optimizer = Adam(learning_rate=0.001)
model.compile(optimizer=optimizer, loss="mean_squared_error")
return model
knn = KNeighborsRegressor()
svm = SVR()
rf = RandomForestRegressor(
max_depth=30,
min_samples_leaf=1,
min_samples_split=2,
n_estimators=200,
random_state=32,
)
nn = KerasRegressor(build_fn=create_nn, epochs=100, batch_size=32, verbose=0)
xgb_model = xgb.XGBRegressor(
learning_rate=0.1,
max_depth=9,
min_child_weight=3,
n_estimators=200,
objective="reg:squarederror",
n_jobs=-1,
)
etr_model = ExtraTreesRegressor(max_depth=30, n_estimators=200, n_jobs=-1)
models = [knn, svm, rf, nn, xgb_model, etr_model]
"""
for train_index, val_index in kf.split(X):
X_train, X_val = X[train_index], X[val_index]
y_train, y_val = y[train_index], y[val_index]
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
for model in models:
model.fit(X_train, y_train)
predictions = []
for model in models:
y_pred = model.predict(X_val)
predictions.append(y_pred)
stacked_predictions = np.column_stack(predictions)
#meta_model = Ridge(alpha=1.0)
#meta_model.fit(stacked_predictions, y_val)
meta_model = LinearRegression()
meta_model.fit(stacked_predictions, y_val)
train_predictions = []
for model in models:
y_pred = model.predict(X_train)
train_predictions.append(y_pred)
stacked_train_predictions = np.column_stack(train_predictions)
y_val_pred = meta_model.predict(stacked_predictions)
r2 = r2_score(y_val, y_val_pred)
r2_scores.append(r2)
average_r2 = np.mean(r2_scores)
print(r2_scores)
average_r2
"""
r2_scores = []
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
# X_train, X_val = X[train_index], X[val_index]
# y_train, y_val = y[train_index], y[val_index]
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
for model in models:
model.fit(X_train, y_train)
predictions = []
for model in models:
y_pred = model.predict(X_val)
predictions.append(y_pred)
stacked_predictions = np.column_stack(predictions)
# meta_model = Ridge(alpha=1.0)
# meta_model.fit(stacked_predictions, y_val)
meta_model = LinearRegression()
meta_model.fit(stacked_predictions, y_val)
train_predictions = []
for model in models:
y_pred = model.predict(X_train)
train_predictions.append(y_pred)
stacked_train_predictions = np.column_stack(train_predictions)
y_val_pred = meta_model.predict(stacked_predictions)
r2 = r2_score(y_val, y_val_pred)
r2_scores.append(r2)
average_r2 = np.mean(r2_scores)
print(r2_scores)
"""
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import ExtraTreesRegressor
# Define the hyperparameter grid
param_grid = {
'n_estimators': [100, 200], # Number of trees in the forest
'max_depth': [10, 20, 30], # Maximum depth of a tree
'min_samples_split': [2, 5, 10], # Minimum number of samples required to split an internal node
'min_samples_leaf': [1, 2, 4], # Minimum number of samples required to be at a leaf node
'max_features': ['auto', 'sqrt'], # Number of features to consider when looking for the best split
}
# Create an ExtraTreesRegressor model
etr_model = ExtraTreesRegressor()
# Set up GridSearchCV with cross-validation
grid_search = GridSearchCV(estimator=etr_model, param_grid=param_grid, cv=5, scoring='neg_mean_squared_error', verbose=1, n_jobs=-1)
# Fit the GridSearchCV object to the training data
grid_search.fit(X_train, y_train)
# Get the best ExtraTreesRegressor model found by GridSearchCV
best_etr_model = grid_search.best_estimator_
print(grid_search.best_params_)
"""
"""
import numpy as np
from sklearn.model_selection import GridSearchCV
from xgboost import XGBRegressor
# Define the hyperparameter grid
param_grid = {
'n_estimators': [50, 100, 200], # Number of boosting rounds
'learning_rate': [0.01, 0.1, 0.2], # Step size shrinkage used in update
'max_depth': [3, 6, 9], # Maximum depth of a tree
'min_child_weight': [1, 3, 5], # Minimum sum of instance weight needed in a child
}
# Create an XGBRegressor model
xgb_model = XGBRegressor()
# Set up GridSearchCV with cross-validation
grid_search = GridSearchCV(estimator=xgb_model, param_grid=param_grid, cv=5, scoring='neg_mean_squared_error', verbose=1, n_jobs=-1)
# Fit the GridSearchCV object to the training data
grid_search.fit(X, y)
# Get the best XGBRegressor model found by GridSearchCV
best_xgb_model = grid_search.best_estimator_
print(grid_search.best_params_)
"""
"""
from sklearn.model_selection import GridSearchCV
# Set the hyperparameters to be tuned
param_grid = {
'n_estimators': [10, 50, 100, 200],
'max_depth': [None, 10, 20, 30],
'min_samples_split': [2, 5, 10],
'min_samples_leaf': [1, 2, 4]
}
RandomForestRegressor(max_depth=30, n_estimators=200)
# Perform grid search for RandomForestRegressor
grid_search = GridSearchCV(RandomForestRegressor(), param_grid, cv=5, scoring='r2', verbose=2, n_jobs=-1)
grid_search.fit(X, y)
# Update the RandomForestRegressor with the best parameters
best_rf = grid_search.best_estimator_
print(grid_search.best_params_)
"""
# ### Creating a simple regression model
test = pd.read_csv("/kaggle/input/us-college-completion-rate-analysis/x_test.csv")
test["tuition_mean"] = test[["Tuition_in_state", "Tuition_out_state"]].mean(axis=1)
test["private"] = np.where(
(test["Tuition_in_state"] == test["Tuition_out_state"]), 1, 0
)
x_pred = test[
[
"SAT_average",
"Pell_grant_rate",
"Faculty_salary",
"Parents_highsch",
"tuition_mean",
"pct_Asian",
"pct_Black",
"pct_White",
"Parents_middlesch",
"private",
"pct_Hispanic",
]
].values
x_pred = scaler.transform(x_pred)
test_predictions = []
for model in models:
y_pred = model.predict(x_pred)
test_predictions.append(y_pred)
stacked_test_predictions = np.column_stack(test_predictions)
y_test_pred = meta_model.predict(stacked_test_predictions)
print(y_test_pred)
submission = pd.DataFrame.from_dict({"Completion_rate": y_test_pred})
submission
submission.to_csv("submission.csv", index=True, index_label="id")
|
# Covid Cases and Deaths Worldwide
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## 1 | About Dataset
# Coronavirus disease (COVID-19) is an infectious disease caused by the SARS-CoV-2 virus.
# Most people infected with the virus will experience mild to moderate respiratory illness and recover without requiring special treatment. However, some will become seriously ill and require medical attention. Older people and those with underlying medical conditions like cardiovascular disease, diabetes, chronic respiratory disease, or cancer are more likely to develop serious illness. Anyone can get sick with COVID-19 and become seriously ill or die at any age.
# The best way to prevent and slow down transmission is to be well informed about the disease and how the virus spreads. Protect yourself and others from infection by staying at least 1 metre apart from others, wearing a properly fitted mask, and washing your hands or using an alcohol-based rub frequently. Get vaccinated when it’s your turn and follow local guidance.
# The virus can spread from an infected person’s mouth or nose in small liquid particles when they cough, sneeze, speak, sing or breathe. These particles range from larger respiratory droplets to smaller aerosols. It is important to practice respiratory etiquette, for example by coughing into a flexed elbow, and to stay home and self-isolate until you recover if you feel unwell.
# Where are cases still high?
# Daily global cases fell after a spike in the spring but are now rising again, with the emergence of the BA.4 and BA.5 subvariants of the Omicron variant.
# Studies suggest that Omicron - which quickly became dominant in numerous countries - is milder than the Delta variant, but far more contagious. The subvariants are even more contagious.
# ## 2 | Libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# ## 3 | Dataset
# Download data
df = pd.read_csv("/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv")
df
# ## 4 | Inspecting Dataframe Structure
print(df.shape)
df.info()
# Turning object columns into float
columns = [
"Total Cases",
"Total Deaths",
"Total Recovered",
"Active Cases",
"Total Test",
"Population",
]
df[columns] = df[columns].apply(lambda x: x.str.replace(",", "").astype("float"))
df.info()
# ## 5 | EDA
# ### 5.1 | Total Deaths
# Where are located the highest 'Total Deaths'?
import plotly.express as px
total_death = pd.DataFrame(
df.groupby("Country")[["Country", "Total Deaths"]]
.mean()
.sort_values("Total Deaths", ascending=False)
.round(2)
.head(10)
)
fig = px.bar(
total_death,
x=total_death.index,
y="Total Deaths",
title="Total Deaths according to Country - Highest",
template="seaborn",
color=total_death.index,
text="Total Deaths",
)
fig.show()
total_death
# Pie chart for top 10 countries in Total Deaths
fig = plt.figure(figsize=(10, 7))
plt.pie(total_death["Total Deaths"], labels=total_death.index)
plt.show()
# Where are located the lowest 'Total Deaths'?
import plotly.express as px
total_death = pd.DataFrame(
df.groupby("Country")[["Country", "Total Deaths"]]
.mean()
.sort_values("Total Deaths", ascending=True)
.round(2)
.head(10)
)
fig = px.bar(
total_death,
x=total_death.index,
y="Total Deaths",
title="Total Deaths according to Country - Lowest",
template="seaborn",
color=total_death.index,
text="Total Deaths",
)
fig.show()
total_death
# Total Deaths per country
fig = px.choropleth(
df,
locations="Country",
locationmode="country names",
scope="world",
color="Total Deaths",
color_continuous_scale="Viridis_r",
)
fig.update_layout(
margin={"r": 0, "t": 0, "l": 0, "b": 0},
coloraxis_colorbar=dict(
title="Total Deaths",
ticks="outside",
tickvals=[0, 200000, 400000, 600000, 800000, 1000000],
dtick=5,
),
)
fig.show()
# Treemap
ax1 = px.treemap(df, path=["Country"], values="Total Deaths", title="Top Total Deaths")
ax1.show()
# ### 5.2 | Organizing the dataset
# Checking null values
print(df.isnull().sum())
# Checking null values in "Total Deaths" column
df[df["Total Deaths"].isna()]
#
# Insight: As they are countries with low population, we are not going to do anything.
# Checking null values in Population column
df[df["Population"].isna()]
#
# Insight: Only China has a large population, so let's add the population of this country.
# **The China’s population fell in 2022 to 1.411 billion, down some 850,000 people from the previous year, China’s National Bureau of Statistics (NBS) announced.**
# Filling China Population
df.loc[90, "Population"] = 1411000000
df.loc[90]
# Selecting required columns.
df = df[["Country", "Total Cases", "Total Deaths", "Population"]]
df.head()
# Deleting records with null data.
df = df.dropna()
print(df.isnull().sum())
# Create new columns
df["% Cases"] = (df["Total Cases"] / df["Population"]) * 100
df["% Deaths"] = (df["Total Deaths"] / df["Population"]) * 100
df
# ### 5.3 | % Deaths
# Where are located the highest '% Deaths'?
import plotly.express as px
per_death = pd.DataFrame(
df.groupby("Country")[["Country", "% Deaths"]]
.mean()
.sort_values("% Deaths", ascending=False)
.round(2)
.head(10)
)
fig = px.bar(
per_death,
x=per_death.index,
y="% Deaths",
title="% Deaths according to Country - Highest",
template="seaborn",
color=per_death.index,
text="% Deaths",
)
fig.show()
per_death
# Where are located the lowest '% Deaths'?
import plotly.express as px
per_death = pd.DataFrame(
df.groupby("Country")[["Country", "% Deaths"]]
.mean()
.sort_values("% Deaths", ascending=True)
.round(5)
.head(10)
)
fig = px.bar(
per_death,
x=per_death.index,
y="% Deaths",
title="% Deaths according to Country - Lowest",
template="seaborn",
color=per_death.index,
text="% Deaths",
)
fig.show()
per_death
# '% Deaths' per country
fig = px.choropleth(
df,
locations="Country",
locationmode="country names",
scope="world",
color="% Deaths",
color_continuous_scale="Viridis_r",
)
fig.update_layout(
margin={"r": 0, "t": 0, "l": 0, "b": 0},
coloraxis_colorbar=dict(
title="% Deaths",
ticks="outside",
tickvals=[0.0, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70],
dtick=8,
),
)
fig.show()
# Treemap
ax2 = px.treemap(df, path=["Country"], values="% Deaths", title="Top % Deaths")
ax2.show()
# ### 5.4 | % Cases
# Where are located the highest '% Cases'?
per_cases = pd.DataFrame(
df.groupby("Country")[["Country", "% Cases"]]
.mean()
.sort_values("% Cases", ascending=False)
.round(2)
.head(10)
)
fig = px.bar(
per_cases,
x=per_cases.index,
y="% Cases",
title="% Cases according to Country - Highest",
template="seaborn",
color=per_cases.index,
text="% Cases",
)
fig.show()
per_cases
# Where are located the lowest '% Cases'?
per_cases = pd.DataFrame(
df.groupby("Country")[["Country", "% Cases"]]
.mean()
.sort_values("% Cases", ascending=True)
.round(2)
.head(10)
)
fig = px.bar(
per_cases,
x=per_cases.index,
y="% Cases",
title="% Cases according to Country - Lowest",
template="seaborn",
color=per_cases.index,
text="% Cases",
)
fig.show()
per_cases
# '% Cases' per country
fig = px.choropleth(
df,
locations="Country",
locationmode="country names",
scope="world",
color="% Cases",
color_continuous_scale="Viridis_r",
)
fig.update_layout(
margin={"r": 0, "t": 0, "l": 0, "b": 0},
coloraxis_colorbar=dict(
title="% Cases",
ticks="outside",
tickvals=[0, 10, 20, 30, 40, 50, 60, 70],
dtick=8,
),
)
fig.show()
# Treemap
ax3 = px.treemap(df, path=["Country"], values="% Cases", title="Top % Cases")
ax3.show()
# ### 5.5 | WordCloud
from wordcloud import WordCloud
world = df.Country[df.Country.index]
plt.figure(figsize=(15, 20))
wordcloud = WordCloud(min_font_size=3, max_words=2500, width=1200, height=800).generate(
" ".join(world)
)
plt.imshow(wordcloud, interpolation="bilinear")
plt.figure(figsize=(15, 20))
wordcloud = WordCloud(
min_font_size=3, max_words=2500, width=1200, height=800, background_color="white"
).generate(" ".join(world))
plt.imshow(wordcloud, interpolation="bilinear")
|
# # Expectation Maximization on Gaussian Mixture Models
# Idea of this notebook is to learn EM (Expectation Maximization) algorithm by applying it to GMM.
# 
# ## Gaussian Mixture Model - Algorithm
# Assume that we know that our dataset consists of mixture of `K` multinomial Gaussian distributions (mixture of Gaussian's). We want to estimate mean and variance for each Gaussian. We could (in theory) do that by using `Maximum Likelihood (ML)` method since we have dataset samples:
# \begin{align}
# L(w) = \prod^{N}_{i=1} N_w(x_i) = \prod^{n}_{i=1} \cdot \sum^{K}_{j=1} p_w(z=j) \cdot N_w(x_i|z=j) \tag{1}
# \end{align}
# Here `L` is the likelihood function given the set of parameters `w` (mean's and std's), `N` is the number of samples, `K` is the number of clusters, `z` is the cluster color (latent variable). Finally, $N_w(x)$ is `pdf (Probability Distribution Function)` for sample $x_i$ given the parameters $w$, $p_w(z=j)$ is probability of sample $x_i$ belonging to the cluster $j$ and $N_w(x_i|z=j) = N_w(x_i|\mu_j, \Sigma_j)$ is `pdf` for sample $x_i$ assuming that it belongs to cluster $j$ given the parameters $w$ (mean - $\mu_j$ and variance $\Sigma_j$). Note: second equation stands because of the [law of total probability](https://en.wikipedia.org/wiki/Law_of_total_probability). We want to find parameters $w$ that maximizes $L$ function. This is equivalent to finding the maximum of `log(L)` since `log` in an increasing function. Using the `log` simplifies optimization function (product becomes sum).
# \begin{align}
# l(w) = log(L(w)) = \sum^{N}_{i=1} log(\sum^{K}_{j=1} p_w(z=j) \cdot p_w(x_i|z=k)) \tag{2}
# \end{align}
# In special case `K=1` for $(2)$ we have
# \begin{align}
# l(w) = \sum^{n}_{i=1} log(p_w(x_i)) \tag{3}
# \end{align}
# and this can easily be solved analytically by finding $w$ where $\frac{dl}{dw} = 0$, $\frac{d^{2}l}{dw^2} < 0$.
# Unfortunately, this problem is a lot harder in general case since `log` can't pass through the sum. If we set the derivatives of the log likelihood to zero, we will no longer obtain closed form solution. If we replace $N_w(x_i|z=j) = N_w(x_i|\mu_j, \Sigma_j)$ in $l(w)$ we get:
# \begin{align}
# l(w) = \sum^{N}_{i=1} log(\sum^{K}_{j=1} p_w(z=j) \cdot N_w(x_i|\mu_j, \Sigma_j)) \tag{4}
# \end{align}
# It is important to see that we also need to estimate $p_w(z=j)$. Our intuition tells us that $p_w(z=j)$ should be equal to $\sum^{N}_{i=1} p_w(z=j|x_i)$. This approximation makes sense since if distribution of $x$ (data) was known then $p(z=j) = \int_x p(z=j,x) = \int_x p(z=j|x) \cdot p(x)$ ([marginal distribution](https://en.wikipedia.org/wiki/Marginal_distribution)). Later we will show that approximation is valid. For the estimation we use $\pi_j \approx p_w(z = j)$ with constraint which will be important later. We call this quantity `prior` for better understanding intuition but that assumption is not formal or completely correct:
# \begin{align}
# \sum_{j} \pi_j = 1 \tag{5}.
# \end{align}
# Now the log likelihood function has next form:
# \begin{align}
# l(w) = \sum^{N}_{i=1} log(\sum^{K}_{j=1} \pi_j \cdot N_w(x_i|\mu_j, \Sigma_j)) \tag{6}
# \end{align}
# From this form we can express derivatives with respect to $\mu_j$ and $\Sigma_j$:
# \begin{align}
# \frac{dl}{d\mu_j} = \sum^{n}_{i=1} \frac{\pi_j \cdot N_w(x_i|\mu_j, \Sigma_j)}{\sum^{K}_{t=1} \pi_t \cdot N_w(x_i|\mu_t, \Sigma_t)} \cdot \frac{1}{(2 \cdot \pi)^{\frac{n}{2}} \times |\Sigma|^{\frac{1}{2}}} \tag{7}
# \end{align}
# Note: $\frac{dN_w(x_i|\mu_j, \Sigma_j)}{d\mu_j} = \Sigma_j \cdot (x_i - \mu_j) \cdot \frac{1}{(2 \cdot \pi)^{\frac{n}{2}} \times |\Sigma|^{\frac{1}{2}}}$ and it can be easily calculated when expressing the full multinomial Gaussian distribution formulation:
# \begin{align}
# N(x|\mu,\Sigma) = \frac{1}{(2 \cdot \pi)^{\frac{n}{2}} \times |\Sigma|^{\frac{1}{2}}} \cdot exp(−\frac{1}{2} \cdot (x − \mu)^{T} \Sigma^{-1} (x − \mu)) \tag{8}
# \end{align}
# where $n$ is the dimensionality.
# Also note that $\frac{\pi_j \cdot N_w(x_i|\mu_j, \Sigma_j)}{\sum^{K}_{t=1} \pi_t \cdot N_w(x_i|\mu_t, \Sigma_t)}$ in $(7)$ is actually a `posterior` $p(z=j|x_i)$ ([Bayes' theorem](https://en.wikipedia.org/wiki/Bayes%27_theorem)) if $\pi_j$ is a `prior`. We define $\gamma_{j,x_i} := p(z=j|x_i)$ for more compact formulas. By replacing $\gamma_{j,x_i}$ in $(7)$ we get:
# \begin{align}
# \frac{dl}{d\mu_j} = \sum^{n}_{i=1} \gamma_{j,x_i} \cdot \Sigma_j \cdot (x_i - \mu_j) \cdot \frac{1}{(2 \cdot \pi)^{\frac{n}{2}} \times |\Sigma|^{\frac{1}{2}}}\tag{9}
# \end{align}
# Setting the derivatives of the gradient to zero we can get optimal $\mu_j$ (proof that this is local maximum is skipped). Constant multiplier on the right can be removed immediately:
# \begin{align}
# \sum^{n}_{i=1} \gamma_{j,x_i} \cdot \Sigma_j \cdot (x_i - \mu_j) = 0 \tag{10}
# \end{align}
# We multiply left side by $\Sigma^{-1}$ assuming that is not a singular matrix:
# \begin{align}
# \sum^{n}_{i=1} \gamma_{j,x_i} \cdot (x_i - \mu_j) = 0 \tag{11}
# \end{align}
# From here we express $\mu_j$:
# \begin{align}
# \mu_j = \frac{\sum^{n}_{i=1} \gamma_{j,x_i} \cdot x_i}{\sum^{n}_{t=1} \gamma_{j,x_t}} \tag{12}
# \end{align}
# We define $N_j = \sum^{n}_{i=1} \gamma_{j,i_t}$ as effective number of points assigned to the cluster $j$ - sum of all probabilities for each point that it belongs to cluster $j$. Thus:
# \begin{align}
# \mu_j = \frac{1}{N_j} \cdot \sum^{n}_{i=1} \gamma_{j,x_i} \cdot x_i \tag{13}
# \end{align}
# In similar way we get:
# \begin{align}
# \Sigma_j = \frac{1}{N_j} \cdot \sum^{n}_{i=1} \gamma_{j,x_i} \cdot (x_i - \mu_j)(x_i - \mu_j)^T \tag{14}
# \end{align}
# For $\pi_j$ we need to include constraint $(5)$. This can be achieved using [Lagrange multiplier](https://en.wikipedia.org/wiki/Lagrange_multiplier) and maximizing $G(\pi_1, \pi_2, ..., \lambda)$:
# \begin{align}
# G(\pi_1, \pi_2, ..., \lambda) = l(w) + \lambda \cdot (\sum_{j} \pi_j - 1) \tag{15}
# \end{align}
# Hence, we have:
# \begin{align}
# 0 = \frac{dG}{d\pi_j} = \sum^{N}_{i}\frac{N_w(x_i|\mu_j, \Sigma_j)}{\sum^{K}_{t=1} \pi_t \cdot N_w(x_i|\mu_t, \Sigma_t)} + \lambda \tag{16}
# \end{align}
# If we multiply both sides by $\pi_j$ sum them for each $0 = \frac{dG}{d\pi_j}$ (for every $j$) we get:
# \begin{align}
# 0 = \sum^{N}_{i}\frac{\sum^{K}_{j=1} \pi_j \cdot N_w(x_i|\mu_j, \Sigma_j)}{\sum^{K}_{t=1} \pi_t \cdot N_w(x_i|\mu_t, \Sigma_t)} + \pi_j \cdot \lambda = N + \pi_j \cdot \lambda \tag{17}
# \end{align}
# Hence from $(16)$ by multiplying it by $\pi_j$ we get:
# \begin{align}
# 0 = \sum^{N}_{i}\frac{\pi_j \cdot N_w(x_i|\mu_j, \Sigma_j)}{\sum^{K}_{t=1} \pi_t \cdot N_w(x_i|\mu_t, \Sigma_t)} - \pi_j \cdot N
# = \sum^{N}_{i} \gamma_{j,x_i} - \pi_j \cdot N \tag{18}
# \end{align}
# And finally:
# \begin{align}
# \pi_j = \frac{N_j}{N} \tag{19}
# \end{align}
# where $N_j := \sum^{N}_{i} \gamma_{j,x_i}$.
# Key formulas are $(12)$, $(14)$ and $(19)$:
# \begin{align}
# \mu_j = \frac{1}{N_j} \cdot \sum^{n}_{i=1} \gamma_{j,x_i} \cdot x_i
# \end{align}\begin{align}
# \Sigma_j = \frac{1}{N_j} \cdot \sum^{n}_{i=1} \gamma_{j,x_i} \cdot (x_i - \mu_j)(x_i - \mu_j)^T
# \end{align}\begin{align}
# \pi_j = \frac{N_j}{N}
# \end{align}
# Note that $(12)$ is slightly modified into a more compact format.
# Problem is that our all estimations depend on $\gamma_{j,x_i}$ (posterior latent variable distribution) which requires estimated parameters. Thus, this solution is not a closed form solution. On other hand they give us iterative algorithm where we can use some starting values for $\mu_j$, $\Sigma_j$ and $pi_j$ (E step), calculate $\gamma_{j,x_i}$ (M step) and then use it to estimate new parameters and likelihood. This is Expectation Minimization (EM) algorithm which guarantees in each step that the likelihood increases.
# ## Expectation Minimization
# Expectation minimization algorithm is a generic optimization algorithm used estimate model parameters that give (local) maximum likelihood in cases when the data is not completely observed or missing.
# We denote $X$ as set of all observed data, $Z$ as set of latent variables and $\Theta$ as set of all model parameters. Log likelihood function is given by:
# \begin{align}
# l(\Theta) = log(p(X|\Theta)) = log(\sum_Z p(X,Z|\Theta) )\tag{20}
# \end{align}
# This function is usually hard to optimize even if the distribution $p(X,Z|\Theta)$ comes from the exponential family since logarithm can't pass through the sum. If we have all information about latent variable Z then we could easily estimate parameters $\Theta$ by optimizing likelihood of the combined $X$ and $Z$: $l(\Theta) = p(X,Z|\Theta)$. Assuming that instead of full data, we only have knowledge of the $Z$ based on the posterior distribution $p(Z|X,\Theta)$ (we can estimate $Z$ if we have $X$ and $\Theta$) then we can replace log likelihood with the expectation of log likelihood using the posterior distribution $p(Z|X,\Theta)$ and using the current estimated parameters:
# \begin{align}
# Q(\Theta) = \sum_Z p(Z|X,\Theta) \cdot log(p(X,Z|\Theta))\tag{21}
# \end{align}
# The last part does not seem so intuitive - why can we replace log likelihood with the expectation of log likelihood using posterior distribution of $Z$? For full explanation please refer to the well known book `Pattern Recognition and Machine Learning` by Christopher M. Bishop, since it may require few pages for full algorithm explanation. This section is just a grasp on the full algorithm.
# During the `E` step we are evaluating the posterior distribution $p(Z|X,\Theta_{old})$ where the parameters $\Theta_{old}$ are fixed parameters. During the `M` step we are estimating parameters by optimizing the expected log likelihood $Q(\Theta, \Theta_{old}) = \sum_Z p(Z|X,\Theta_{old}) \cdot log(p(X,Z|\Theta))$ where the posterior distribution $p(Z|X,\Theta_{old})$ is fixed. By repeating `E` and `M` steps many times we convert to some local maxima (or saddle point).
# ## Algorithm (EM for GMM)
# 0. Initialize model initial parameters $\mu_j$, $\Sigma_j$ and $\pi_j$ for every cluster $j$.
# 1. **Expectation:** Evaluate $\gamma_{j, x_i}$ for every cluster $j$ and data sample $x_i$.
# 2. **Maximization:** Estimate parameters $\mu_j$, $\Sigma_j$ and $\pi_j$ for every cluster based on $\gamma_{j, x_i}$.
# 3. Evaluate `log_likelihood` based on the model estimated parameters.
# ## Implementation
# In this section the GMM model will be implemented based on the previous introduction theory. Model will be first tested on 1d dataset and then on the 2d dataset.
from typing import List, Tuple, Optional, Dict, Any, Callable
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans
np.random.seed(42)
# ### Dataset creation and visualization
# It will be useful to have support for custom dataset generation and visualization.
# #### Visualization support for 1D
# For visualization `matplotlib` library is used. For Mixture of Gaussian visualization it is important to have two components:
# - Visualization of Gaussian pdf (probability distribution function);
# - Visualization of data points (and their cluster color).
def plot_1d_gaussian_pdf(
mean: float,
std: float,
std_range: Tuple[int, int] = (-3, 3),
n_points: int = 100,
ax: Optional[plt.Axes] = None,
plot_params: Optional[Dict[str, Any]] = None,
) -> None:
"""
Plots 1d Gaussian. Supports matplotlib Axes.
Args:
mean: Mean (center)
std: Std
std_range: [a, b] - plot from -a*std to +b*std
n_points: Number of points on plot (more points -> better precision)
ax: Plot on Axes
plot_params: Parameters for plot (e.g. color, label, line-width, ...)
"""
plot_params = plot_params if plot_params is not None else {}
a, b = std_range
# Sample points around mean uniformly and calculate pdf for them
x = np.linspace(mean + a * std, mean + b * std, n_points)
y = stats.norm.pdf(x, mean, std)
if ax is None:
plt.plot(x, y, **plot_params)
else:
ax.plot(x, y, **plot_params)
# Test distribution plotting
s_range = (-4, 4)
plot_1d_gaussian_pdf(
1, 2, plot_params={"color": "red", "label": "N(1, 2)"}, std_range=s_range
)
plot_1d_gaussian_pdf(
0, 1, plot_params={"color": "blue", "label": "N(0, 1)"}, std_range=s_range
)
plot_1d_gaussian_pdf(
3, 0.5, plot_params={"color": "green", "label": "N(3, 0.5)"}, std_range=s_range
)
plt.xlabel("value")
plt.ylabel("PDF")
plt.title("Visualizing multiple Normal PDFs")
plt.ylim((0, 1))
plt.legend()
plt.show()
def plot_1d_points(
points: np.ndarray,
height: float = 0.1,
ax: Optional[plt.Axes] = None,
plot_params: Optional[Dict[str, Any]] = None,
) -> None:
"""
Plots list of 1D points on a horizontal line.
Args:
points: Point positions
height: Default y-axis point position (height)
ax: Plot on Axes
plot_params: Parameters for plot (e.g. color, label, line-width, ...)
"""
plot_params = plot_params if plot_params is not None else {}
y = np.ones_like(points) * height
if ax is None:
plt.scatter(points, y, **plot_params)
else:
ax.scatter(points, y, **plot_params)
# Test plot colored points
plot_1d_points(
np.array([[0, 1, 2]]), plot_params={"color": "red", "s": 100, "label": "red"}
)
plot_1d_points(
np.array([[1.5, 3, 4]]), plot_params={"color": "blue", "s": 100, "label": "blue"}
)
plt.title("Visualizing points")
plt.xlabel("Positions")
plt.ylabel("Height")
plt.ylim((0, 1))
plt.legend()
plt.show()
# #### Dataset generation (and visualization)
# Synthetic dataset from the mixture of Gaussians can be easily sampled by sampling some number of points for each Gaussian (cluster).
# Dataset can be easily visualized using previously implemented support visualization functions.
def create_1d_cluster_dataset(
n: int, sizes: List[int], means: List[float], stds: List[float]
) -> Tuple[np.ndarray, np.ndarray]:
"""
Creates dataset consisting of `n` Gaussian clusters with defined sizes, means and stds.
Args:
n: Number of clusters
sizes: Cluster sizes (list of sizes for each of n clusters)
means: Cluster means (list of means for each of n clusters)
stds: Cluster stds (list of stds for each of n clusters)
Returns:
Samples, Sample colors (clusters
"""
assert n == len(sizes)
assert n == len(means)
assert n == len(stds)
positions = []
colors = []
for c_id, (c_size, c_mean, c_std) in enumerate(zip(sizes, means, stds)):
p = np.random.randn(c_size) * c_std + c_mean
c = np.ones(c_size) * c_id
positions.append(p)
colors.append(c)
positions = np.hstack(positions).reshape(-1, 1)
colors = np.hstack(colors)
return positions, colors
def plot_1d_clusters(
positions: np.ndarray,
colors: np.ndarray,
n_clusters: int,
means: List[float],
stds: List[float],
ax: Optional[plt.Axes] = None,
color_list: Optional[List[str]] = None,
points_height: float = 0.1,
) -> None:
"""
Visualizes 1d clusters and data points with their cluster classes (colors).
Args:
positions: List (array) of 1D points.
colors: List (array) of point cluster colors
n_clusters: Number of clusters
means: Cluster means
stds: Cluster stds
ax: matplotlib Axes
color_list: Specify manually list of colors to be used (with order)
points_height: points height
"""
assert n_clusters == len(means)
assert n_clusters == len(stds)
if color_list is None:
color_list = ["b", "g", "r", "c", "m", "y", "k", "w"]
assert n_clusters <= len(
color_list
), "Not enough colors. Please define color list!"
for cluster_id, (cluster_mean, cluster_std) in enumerate(zip(means, stds)):
ps = np.array([p for p, c in zip(positions, colors) if c == cluster_id])
cluster_color = color_list[cluster_id]
plot_1d_points(
ps,
ax=ax,
plot_params={"color": cluster_color, "label": f"{cluster_id}", "s": 50},
height=points_height,
)
plot_1d_gaussian_pdf(
cluster_mean,
cluster_std,
ax=ax,
plot_params={"color": cluster_color, "label": f"{cluster_id}"},
)
if ax is None:
plt.ylim((0, 1))
plt.legend()
plt.grid()
else:
ax.legend()
# noinspection PyTypeChecker
ax.set_ylim((0, 1))
ax.grid()
# Test dataset generation and visualization
ex_n = 3
ex_sizes = [10, 20, 30]
ex_means = [-1, 5, 15]
ex_stds = [1, 0.5, 2]
ex_pis = [1 / 6, 1 / 3, 1 / 2]
_, ex_ax = plt.subplots(figsize=(14, 5))
point_positions, point_colors = create_1d_cluster_dataset(
3, ex_sizes, ex_means, ex_stds
)
plot_1d_clusters(
point_positions,
point_colors,
n_clusters=ex_n,
means=ex_means,
stds=ex_stds,
ax=ex_ax,
points_height=0.05,
)
plt.show()
# ### GMM Implementation
# The EM algorithm expects some initial values for the parameters. We can use some simple algorithm (heuristic) to generate them. Later on we can check if better heuristic can improve the algorithm speed by lowering the number of iterations to converge (with a better starting point).
# For default heuristic we will sample random points between minimum and maximum values in the dataset. These random points are initial cluster means. For covariance matrix a identity matrix is used for each cluster.
# One alternative and good approach is to run K-Means for few iterations and use cluster centroids as initial Gaussian cluster means.
GMMHeuristic = Callable[[np.ndarray, int], Tuple[np.ndarray, np.ndarray, np.ndarray]]
def uniform_space_points_heuristic(
points: np.ndarray, n_clusters: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Finds min and max for each point dimension and chooses cluster points uniformly.
Covariance matrix is diagonal with variance equal to (points_var / n_clusters).
Args:
points: Data points
n_clusters: Number of clusters
Returns:
Means array, covariance array
"""
assert (
1 <= n_clusters <= points.shape[0]
), f"Invalid number of clusters! Got {n_clusters}."
assert len(points.shape) == 2, f"Expected 2D data but got {points.shape}"
dim = points.shape[1]
p_min = np.min(points, axis=0)
p_max = np.max(points, axis=0)
p_range = p_max - p_min
mus = (
p_min
+ np.random.uniform(low=0, high=1, size=n_clusters * dim).reshape(
n_clusters, dim
)
* p_range
)
sigmas = np.stack([np.eye(dim) for _ in range(n_clusters)])
pis = np.ones(n_clusters) / n_clusters
return mus, sigmas, pis
# Test implemented heuristic
n_axs_row = 3
n_axs_cols = 3
_, axs = plt.subplots(figsize=(12, 9), nrows=n_axs_row, ncols=n_axs_cols)
for i in range(n_axs_row):
for j in range(n_axs_cols):
index = i * n_axs_cols + j + 1
h_mus, h_sigma, _ = uniform_space_points_heuristic(point_positions, 3)
ax = axs[i][j]
for m, s, color, name in zip(
h_mus, h_sigma, ["red", "blue", "green"], ["A", "B", "C"]
):
s = s.flatten() # shape: (1, 1) -> shape (1,)
plot_1d_gaussian_pdf(
m, s, plot_params={"color": color, "label": name}, ax=ax
)
ax.legend(loc="upper right")
ax.set_xlabel("Position")
ax.set_ylabel("PDF")
ax.set_title(f"Test {index}")
plt.show()
# The implemented heuristic is not ideal since it does not take into consideration dataset distribution. It might be necessary to rerun clustering multiple times with different starting parameters.
# ### GMM model implementation
# For GMM model we first need to implement some key helper functions. Afterward we can implement the core component of GMM model - The EM algorithm.
# We need to be able to calculate likelihood for each point with the respect to some cluster - $N_w(x_i|\mu_j, \Sigma_j)$. Using that we can easily calculate log likelihood (optimization function) - $\sum^{N}_{i=1} log(\sum^{K}_{j=1} \pi_j \cdot N_w(x_i|\mu_j, \Sigma_j))$.
# Having likelihood for each point with the respect to some cluster also allows us to do intuitive classification: "Point cluster should be the cluster for which the point is most likely to come from" - $\arg \max_j N_w(x_i|\mu_j, \Sigma_j)$.
#
def calculate_point_likelihoods(
X: np.ndarray, mus: np.ndarray, sigmas: np.ndarray
) -> np.ndarray:
"""
Calculates likelihood of each point with respect to each cluster.
Args:
X: Data points
mus: Cluster means
sigmas: Cluster covariances
Returns:
Array of shape (n, c) where n is number of data points and c is number of clusters.
"""
likelihoods = []
for mu, sigma in zip(mus, sigmas):
likelihood = stats.multivariate_normal.pdf(X, mu, sigma)
likelihoods.append(likelihood)
likelihoods = np.vstack(likelihoods).T
return likelihoods
def calculate_log_likelihood(
X: np.ndarray, mus: np.ndarray, sigmas: np.ndarray, pis: np.ndarray
) -> float:
"""
Calculates log likelihood - optimization objective.
Formula: sum[i=1..n] ln(sum[j=1..c]N(X[i]; mus[j], sigmas[j])
Args:
X: Data points
mus: Cluster means
sigmas: Cluster covariances
pis: Cluster weights
Returns:
Log likelihood
"""
likelihoods = calculate_point_likelihoods(X, mus, sigmas)
normalization = np.sum(likelihoods * pis, axis=1) # shape=(n,)
return float(np.log(normalization).sum())
def classify_points_by_gaussians(
X: np.ndarray, mus: np.ndarray, sigmas: np.ndarray
) -> np.ndarray:
"""
Classify points by the highest likelihood given a set of Gaussians clusters.
Args:
X: Data points
mus: Cluster means
sigmas: Cluster covariances
Returns:
Point classes (colors)
"""
return np.argmax(calculate_point_likelihoods(X, mus, sigmas), axis=1)
# Test classification function
h_mus, h_sigma, _ = uniform_space_points_heuristic(point_positions, 3)
estimated_colors = classify_points_by_gaussians(point_positions, h_mus, h_sigma)
plot_1d_clusters(
point_positions,
estimated_colors,
n_clusters=3,
means=h_mus,
stds=h_sigma.flatten(),
points_height=0.05,
)
plt.show()
# #### Algorithm reminder
# 0. Initialize model initial parameters $\mu_j$, $\Sigma_j$ and $\pi_j$ for every cluster $j$.
# 1. **Expectation:** Evaluate $\gamma_{j, x_i}$ for every cluster $j$ and data sample $x_i$.
# 2. **Maximization:** Estimate parameters $\mu_j$, $\Sigma_j$ and $\pi_j$ for every cluster based on $\gamma_{j, x_i}$.
# 3. Evaluate `log_likelihood` based on the model estimated parameters.
# The implementation is slightly different from the algorithm: The `log_likelihood` is evaluated at the step 1 (expectation step) of the next iteration since some calculation elements are shared with evaluating $\gamma_{j, x_i}$ for clusters.
class GaussianMixtureModel:
"""
Mixture of Gaussians generative model.
"""
def __init__(
self,
n_clusters: int,
max_iters: int = 10,
heuristic: Optional[GMMHeuristic] = None,
):
"""
Args:
n_clusters: Number of clusters
max_iters: Maximum number of iterations
heuristic: Initial state heuristic
"""
self._n_clusters = n_clusters
self._heuristic = (
heuristic if heuristic is not None else uniform_space_points_heuristic
)
# Stopping criteria
self._max_iters = max_iters
# State
self._mus = None
self._sigmas = None
self._pis = None
@property
def clusters(self) -> int:
"""
Get number of clusters.
Returns:
Number of clusters
"""
return self._n_clusters
@property
def mus(self) -> np.ndarray:
"""
Get cluster means.
Returns:
Cluster means.
"""
return self._mus
@property
def sigmas(self) -> np.ndarray:
"""
Get cluster covariances
Returns:
Cluster covariances
"""
return self._sigmas
@property
def pis(self) -> np.ndarray:
"""
Get cluster weights.
Returns:
Cluster weights
"""
return self._pis
def _e_step(
self, X: np.ndarray, mus: np.ndarray, sigmas: np.ndarray, pis: np.ndarray
) -> Tuple[np.ndarray, float]:
"""
Performs expectation step - estimate gamma and likelihood.
Args:
X: Data points
mus: Previous cluster means
sigmas: Previous cluster covariances
pis: Previous cluster weight coefficients
Returns:
Gammas, Log likelihood
"""
n = X.shape[0]
likelihoods = calculate_point_likelihoods(
X, mus, sigmas
) # shape=(n, n_clusters)
normalization = np.sum(likelihoods * pis, axis=1) # shape=(n,)
log_likelihood = float(np.log(normalization).sum())
# Calculate Gammas
gamma = np.zeros(shape=(self._n_clusters, n), dtype=np.float32)
for c_j in range(self._n_clusters):
likelihood = likelihoods[:, c_j]
pi_j = pis[c_j]
for d_i in range(X.shape[0]):
gamma[c_j, d_i] = pi_j * likelihood[d_i] / normalization[d_i]
return gamma, log_likelihood
def _m_step(
self, X: np.ndarray, gammas: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Performs maximization step - estimate parameters
Args:
X: Data points
gammas: Gamma values
Returns:
Cluster means, covariances, weights
"""
n, dim = X.shape
mus, sigmas, pis = [], [], []
for c_j in range(self._n_clusters):
n_j = gammas[c_j, :].sum()
mu_j = sum([gammas[c_j, d_i] * X[d_i] for d_i in range(n)]) / n_j
offsets_j = (X - mu_j).reshape(
n, dim, 1
) # converting array to Nx1 vector for matrix multiplication
sigma_j = (
sum(
[
gammas[c_j, d_i] * offsets_j[d_i] @ offsets_j[d_i].T
for d_i in range(n)
]
)
/ n_j
)
pi_j = n_j / n
mus.append(mu_j)
sigmas.append(sigma_j)
pis.append(pi_j)
mus = np.stack(mus)
sigmas = np.stack(sigmas)
pis = np.stack(pis)
return mus, sigmas, pis
def fit(self, X: np.ndarray) -> Dict[str, Any]:
"""
Performs EM algorithm to fit GMM to the data.
Args:
X: 2D Data
Returns:
State for each EM step.
"""
mus, sigmas, pis = self._heuristic(X, self._n_clusters)
history = {
"mu": [mus],
"sigma": [sigmas],
"pi": [pis],
"log_likelihood": [], # Note: log_likelihood is evaluated using old parameters (one step lag)
}
for iteration in range(self._max_iters):
gammas, log_likelihood = self._e_step(X, mus, sigmas, pis)
mus, sigmas, pis = self._m_step(X, gammas)
history["mu"].append(mus)
history["sigma"].append(sigmas)
history["pi"].append(pis)
history["log_likelihood"].append(log_likelihood)
# Postprocessing - normalizing weights in order for them to sum up to 1
pis /= pis.sum()
self._mus = mus
self._sigmas = sigmas
self._pis = pis
history["log_likelihood"].append(self.log_likelihood(X))
return history
def predict(self, X: np.ndarray) -> np.ndarray:
"""
Classifies data points bu cluster classes.
Point class is chosen by the highest likelihood with respect to some cluster.
Args:
X: Data points
Returns:
Classes (colors)
"""
return classify_points_by_gaussians(X, self._mus, self._sigmas)
def log_likelihood(self, X: np.ndarray) -> float:
"""
Calculates log likelihood for given data points.
Args:
X: Data points
Returns:
Log likelihood
"""
return calculate_log_likelihood(X, self._mus, self._sigmas, self._pis)
def sample(self, n: int, cluster: int = -1) -> np.ndarray:
"""
Generate new samples. Since GMM is a generative probabilistic model it can generate new samples.
Args:
n: Number of samples to generate
cluster: Generate samples from specific cluster (optional_
Returns:
Generated samples.
"""
result = []
for _ in range(n):
chosen_cluster = cluster
if chosen_cluster == -1: # Cluster not chosen (generate random)
chosen_cluster = np.random.choice(self._n_clusters, p=self.pis)
new_sample = np.random.multivariate_normal(
self._mus[chosen_cluster], self._sigmas[chosen_cluster]
)
result.append(new_sample)
return np.stack(result)
# #### Testing model on generated synthetic dataset
# If we run this algorithm multiple times we shall get different results. This is because EM gets stuck in a local maxima. It is a lot dependent on the initial parameters.
N_ITERS = 12
def run_gmm_experiment(model: GaussianMixtureModel, data: np.ndarray) -> None:
"""
Simulates model training for each step.
Args:
model: GMM model
data: data points
n_clusters: Number of clusters
"""
n_iters = N_ITERS
history = model.fit(data)
print("Log likelihood updates:", history["log_likelihood"])
print("Cluster weights:", model.pis)
iter_cols = 4
iter_rows = 3
assert iter_cols * iter_rows == n_iters
_, axs = plt.subplots(figsize=(14, 10), nrows=iter_rows, ncols=iter_cols)
for i in range(iter_rows):
for j in range(iter_cols):
h_index = i * iter_cols + j
ax = axs[i][j]
mus = history["mu"][h_index]
sigmas = history["sigma"][h_index].reshape(-1)
log_likelihood = history["log_likelihood"][h_index]
# Plot
estimated_colors = classify_points_by_gaussians(data, mus, sigmas)
plot_1d_clusters(
data,
estimated_colors,
n_clusters=model.clusters,
means=mus,
stds=sigmas,
ax=ax,
points_height=0.05,
)
ax.set_xlabel("X")
ax.set_ylabel("Probability")
ax.set_title(f"[{h_index}] Log likelihood: {log_likelihood:.2f}")
plt.tight_layout()
plt.show()
_, ex_ax = plt.subplots(figsize=(14, 5))
estimated_colors = model.predict(data)
plot_1d_clusters(
data,
estimated_colors,
n_clusters=ex_n,
means=model.mus,
stds=model.sigmas.reshape(-1),
ax=ex_ax,
points_height=0.05,
)
plt.xlabel("X")
plt.ylabel("Density")
plt.title(f"Log likelihood: {model.log_likelihood(data):.2f}")
plt.show()
run_gmm_experiment(
model=GaussianMixtureModel(n_clusters=3, max_iters=N_ITERS), data=point_positions
)
# I seems that EM algorithm converged around step 6 but there was not too much improvement after first step (step 0 is from heuristic).
# #### Comparison to the ground truth
# Based on the `log likelihood` and visual comparison we can see that the results are worse than the ground truth. We shall see if using K-Means for parameters initialization improves the results.
_, ex_ax = plt.subplots(figsize=(14, 5))
log_likelihood = calculate_log_likelihood(
point_positions, mus=ex_means, sigmas=ex_stds, pis=ex_pis
)
plot_1d_clusters(
point_positions,
point_colors,
n_clusters=ex_n,
means=ex_means,
stds=ex_stds,
ax=ex_ax,
points_height=0.05,
)
ex_ax.set_xlabel("X")
ex_ax.set_ylabel("Density")
ex_ax.set_title(f"Log likelihood: {log_likelihood:.2f}")
plt.show()
def create_kmeans_heuristic(max_iter: int) -> GMMHeuristic:
"""
Wrapper for `scikit-learn` KMeans algorithm.
Cluster means: This is equivalent to cluster centroids.
Cluster covariances: This is equivalent to `v*I` where v is variance and I is identity matrix.
Cluster weights: This is equivalent to `1 / K` where K is number of clusters.ix.
Args:
max_iter: Maximum number of iterations.
Returns:
Starting parameters.
"""
def kmeans(
points: np.ndarray, n_clusters: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
dim = points.shape[1]
kmeans_model = KMeans(n_clusters=n_clusters, max_iter=max_iter)
kmeans_model.fit(points)
mus = kmeans_model.cluster_centers_
groups = kmeans_model.predict(points)
Xg = np.hstack([points, groups.reshape(-1, 1)])
sigmas = []
for grp in range(n_clusters):
Xg_cluster = Xg[Xg[:, -1] == grp]
var = Xg_cluster.var()
sigmas.append(var * np.eye(dim))
sigmas = np.stack(sigmas)
pis = np.ones(n_clusters) / n_clusters
return mus, sigmas, pis
return kmeans
model = GaussianMixtureModel(
n_clusters=3, max_iters=N_ITERS, heuristic=create_kmeans_heuristic(5)
)
run_gmm_experiment(model=model, data=point_positions)
# New results are a lot better. With better initial parameters model found better local maxima. Based on the `log likelihood` it can be seed that model is bit overfitted.
# #### Comparison to the `scikit-learn` implementation.
# The `scikit-learn` gives similar results to the implemented model.
gm = GaussianMixture(n_components=3).fit(point_positions)
skl_colors = gm.predict(point_positions).reshape(-1)
skl_means = gm.means_.reshape(-1)
skl_covariances = gm.covariances_.reshape(-1)
skl_weight = gm.weights_
_, ex_ax = plt.subplots(figsize=(14, 5))
log_likelihood = calculate_log_likelihood(
point_positions, mus=skl_means, sigmas=skl_covariances, pis=skl_weight
)
plot_1d_clusters(
point_positions,
skl_colors,
n_clusters=ex_n,
means=skl_means,
stds=skl_covariances,
ax=ex_ax,
points_height=0.05,
)
ex_ax.set_xlabel("X")
ex_ax.set_ylabel("Probability")
ex_ax.set_title(f"Log likelihood: {log_likelihood:.2f}")
plt.show()
# #### Generating new samples
# Since Gaussian mixture model is a probabilistic generative model we can generate new samples from random or specific cluster.
gen_points = model.sample(1000)
gen_colors = model.predict(gen_points)
_, ex_ax = plt.subplots(figsize=(14, 5))
plot_1d_clusters(
gen_points,
gen_colors,
n_clusters=model.clusters,
means=model.mus,
stds=model.sigmas.reshape(-1),
ax=ex_ax,
points_height=0.05,
)
plt.xlabel("X")
plt.ylabel("Density")
plt.title(f"Log likelihood: {model.log_likelihood(point_positions):.2f}")
plt.show()
# ### Synthetic 2D dataset
# Same like in the 1D case but the data is 2D. Working with 2D datasets requires specific support:
# - Creating 2D dataset;
# - Visualizing 2D cluster points;
# - Visualizing 2D cluster pdf contours (distributions).
def create_cluster_dataset(
n: int, sizes: np.ndarray, means: np.ndarray, covs: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
Creates dataset consisting of `n` Gaussian clusters with defined sizes, means and covariance matrices.
Args:
n: Number of clusters
sizes: Cluster sizes
means: Cluster means
covs: Cluster covariance matrices
Returns:
Samples, Sample colors (clusters
"""
assert n == len(sizes)
assert n == len(means)
assert n == len(covs)
positions = []
colors = []
for c_id, (c_size, c_mean, c_cov) in enumerate(zip(sizes, means, covs)):
p = np.random.multivariate_normal(c_mean, c_cov, c_size)
c = np.ones(c_size) * c_id
positions.append(p)
colors.append(c)
positions = np.vstack(positions)
colors = np.hstack(colors)
return positions, colors
def plot_clusters_data(
X: np.ndarray,
colors: np.ndarray,
color_list: List[str],
ax: Optional[plt.Axes] = None,
) -> None:
"""
Visualizes data by coloring scattered points by their true or predicted class color.
Args:
X: Data points
colors: Colors for data points
color_list: List of colors
ax: matplotlib Axes
"""
assert X.shape[0] == colors.shape[0]
unique_colors = np.unique(colors)
Xc = np.hstack([X, colors.reshape(-1, 1)])
for c in unique_colors:
Xc_cluster = Xc[Xc[:, 2] == c]
if ax is None:
plt.scatter(
Xc_cluster[:, 0],
Xc_cluster[:, 1],
color=color_list[int(c)],
s=30,
label=f"{c}",
)
else:
ax.scatter(
Xc_cluster[:, 0],
Xc_cluster[:, 1],
color=color_list[int(c)],
s=30,
label=f"{c}",
)
if ax is None:
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Cluster/Classes visualization")
else:
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_title("Cluster/Classes visualization")
def plot_cluster_contours(
mus: np.ndarray,
sigmas: np.ndarray,
radius_coef: int = 4,
n_points: int = 30,
ax: Optional[plt.Axes] = None,
) -> None:
"""
Plots contours of 2d Gaussians in order to visualize cluster distributions.
Args:
mus: Cluster means
sigmas: Cluster sigmas
radius_coef: Radius coef (contour completeness)
n_points: Number of points to sample to plot the grid (contour quality)
ax: matplotlib Ax
"""
for c, (mu, sigma) in enumerate(zip(mus, sigmas)):
# Create grid of points around cluster
# And calculate density for each point
# Plot contour based on point coordinates and its density
radius_x = radius_coef * sigma[0, :].max()
radius_y = radius_coef * sigma[1, :].max()
x = np.linspace(mu[0] - radius_x, mu[0] + radius_x, n_points)
y = np.linspace(mu[1] - radius_y, mu[1] + radius_y, n_points)
grid = np.dstack(np.meshgrid(x, y))
density = stats.multivariate_normal.pdf(grid, mu, sigma)
if ax is None:
plt.contour(x, y, density, cmap="YlOrRd")
else:
ax.contour(x, y, density, cmap="YlOrRd")
N_CLUSTERS = 5
SIZES = np.array([10, 30, 40, 60, 100])
MEANS = np.array([[-12, -12], [0, 0], [8, 8], [-3, 5], [7, -10]])
COVS = np.array(
[
[[1, 5], [5, 1]],
[[1, 2], [2, 1]],
[[1, 1.3], [1.3, 1]],
[[1, 2], [2, 1]],
[[0.3, 1], [1, 0.3]],
]
)
# Test dataset creation
COLOR_LIST = ["r", "b", "g", "y", "c"]
positions_2d, colors_2d = create_cluster_dataset(N_CLUSTERS, SIZES, MEANS, COVS)
plot_clusters_data(positions_2d, colors_2d, color_list=COLOR_LIST)
plt.show()
def run_gmm_experiment_2d(
model: GaussianMixtureModel, data: np.ndarray, n_iters: int, color_list: List[str]
) -> None:
"""
Simulates model training for each step. Like `run_gmm_experiment` but transformed for 2D data.
Args:
model: GMM model
data: Data points
n_iters: Training iterations
color_list: Color list
"""
history = model.fit(data)
print("Log likelihood updates:", history["log_likelihood"])
print("Cluster weights:", model.pis)
iter_cols = 4
iter_rows = 2
assert iter_cols * iter_rows == n_iters
_, axs = plt.subplots(figsize=(14, 7), nrows=iter_rows, ncols=iter_cols)
for i in range(iter_rows):
for j in range(iter_cols):
h_index = i * iter_cols + j
ax = axs[i][j]
mus = history["mu"][h_index]
sigmas = history["sigma"][h_index]
log_likelihood = history["log_likelihood"][h_index]
# Plot
estimated_colors = classify_points_by_gaussians(data, mus, sigmas)
plot_clusters_data(
positions_2d, estimated_colors, color_list=color_list, ax=ax
)
plot_cluster_contours(mus, sigmas, radius_coef=2, ax=ax)
ax.set_xlabel("X")
ax.set_ylabel("Probability")
ax.set_title(f"[{h_index}] Log likelihood: {log_likelihood:.2f}")
plt.tight_layout()
plt.show()
experiment_2d_iterations = 7
run_gmm_experiment_2d(
model=GaussianMixtureModel(
n_clusters=5,
heuristic=create_kmeans_heuristic(10),
max_iters=experiment_2d_iterations,
),
data=positions_2d,
n_iters=experiment_2d_iterations + 1,
color_list=COLOR_LIST,
)
model = GaussianMixtureModel(
n_clusters=N_CLUSTERS, heuristic=create_kmeans_heuristic(5)
)
_ = model.fit(positions_2d)
y_pred = model.predict(positions_2d)
plot_clusters_data(positions_2d, y_pred, color_list=COLOR_LIST)
plot_cluster_contours(model.mus, model.sigmas, radius_coef=2)
plt.show()
# ### The Iris dataset
# Testing model on another toy flower dataset that has 3 classes. We shall see if the unsupervised model is able to find those three classes.
# The Iris dataset has 150 samples and every sample has 4 features:
# - `sepal length (cm)`
# - `sepal width (cm)`
# - `petal length (cm)`
# - `petal width (cm)`
# Only `sepal length (cm)` and `petal length (cm)` will be used for easier visualization.
from sklearn.datasets import load_iris
data = load_iris()
X = data["data"][:, [0, 2]] #
y = data["target"]
N_CLASSES = 3
COLOR_LIST = ["red", "green", "blue"]
print("Dataset shape:", X.shape, y.shape)
plot_clusters_data(X, y, color_list=COLOR_LIST)
plt.show()
model = GaussianMixtureModel(n_clusters=N_CLASSES, heuristic=create_kmeans_heuristic(5))
_ = model.fit(X)
y_pred = model.predict(X)
plot_clusters_data(X, y_pred, color_list=COLOR_LIST)
plot_cluster_contours(model.mus, model.sigmas)
plt.show()
|
# # Advanced topics in computer science
# #### sport car data
# #### The dataset from https://www.kaggle.com/datasets/rkiattisak/sports-car-prices-dataset
# #### The puposes of this assigment are learning the way to analyse data using data science.
# ##### student : maryam ahmed alsalti
# ##### ID : 201916078
# #### Importing libraries
#
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# ### Reading the file
import pandas as pd
df = pd.read_csv("/kaggle/input/sssssssssssssss/Sport car price.csv")
df.head()
df.info()
df.tail()
# # Data cleansing and improvement(Make the information more useful)
df1 = df.copy()
# ### Checking number of rows before change anything
df.info()
def change_to_numeric(x, df1):
temp = pd.get_dummies(df1[x])
df1 = pd.concat([df1, temp], axis=1)
df1.drop([x], axis=1, inplace=True)
return df1
df2 = change_to_numeric("Car Make", df1)
df2.head()
# ### Checking number of rows in the new dataframe
#
df2.info()
df.isna().sum()
df = df.dropna()
df.isna().sum()
df = pd.read_csv("/kaggle/input/sssssssssssssss/Sport car price.csv")
df.tail()
df.fillna(10)
# #### Checking the values for car make
df["Car Make"].unique()
# ## Arange the region to be in column rather than rows
df3 = df2.copy()
def change_to_numeric(x, df3):
temp = pd.get_dummies(df3[x])
df3 = pd.concat([df3, temp], axis=1)
df3.drop([x], axis=1, inplace=True)
return df3
df4 = change_to_numeric("Car Model", df3)
df4.head()
# ### Checking number of rows before change again
df4.info()
gen = df["Car Make"].mode()[0]
gen
# ## Checking if there's an empty value in dataframe
df.isna().sum()
# ## Checking if there's any duplicated value
df4.duplicated().sum()
# ## checking Values for year columns
df4["Year"].unique()
df["Engine Size (L)"].unique()
df["Engine Size (L)"].isna().sum()
x = df["Engine Size (L)"].mode()[0]
print(x)
df["Engine Size (L)"].fillna(x, inplace=True)
df.isna().sum()
df["Torque (lb-ft)"].unique()
df["Torque (lb-ft)"].fillna(0, inplace=True)
df.isna().sum()
df.describe().T.style.background_gradient(cmap="magma")
df.isna().sum()
temp = df[df["Price (in USD)"].isna()]
temp
df.head()
df.isna().sum()
df1 = df[df["Car Model"] == "Year"].copy()
df1["Car Model"].unique()
df.isna().sum()
df["Car Model"].fillna(1, inplace=True)
df
df.head()
df["Total"] = df["Price (in USD)"]
df.head()
am = df["Total"].sum()
am
mx = df["Total"].max()
mx
df[df["Total"] == mx]
df.isna().sum()
df.head(5)
# ### Understand the data again
df4.describe().T.style.background_gradient(cmap="magma")
# # Finding the correlation between data:
df4.corr()
df.head()
# ## Rename car make with make,car model with model,Price (in USD) with Price and Torque (lb-ft) with Torque.
#
df.rename(columns={"Car Make": "Make", "Car Model": "Model"}, inplace=True)
df.rename(columns={"Price (in USD)": "Price", "Torque (lb-ft)": "Torque"}, inplace=True)
df.head()
# ## presenting the releation in bar chart
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(20, 4))
plt.subplot(1, 2, 1)
plt.title("Car Year")
sns.histplot(df.Year)
plt.subplot(1, 2, 2)
plt.title("Year ")
sns.boxplot(y=df.Year)
plt.show()
plt.figure(figsize=(25, 6))
plt.subplot(1, 3, 1)
plt1 = df.Make.value_counts().plot(kind="bar")
plt.title("Make")
plt1.set(xlabel="Make", ylabel="Frequency")
plt.subplot(1, 3, 2)
plt1 = df.Year.value_counts().plot(kind="bar")
plt.title("Year")
plt1.set(xlabel=" Year", ylabel="Frequency")
plt.subplot(1, 3, 3)
plt1 = df.Torque.value_counts().plot(kind="bar")
plt.title("Torque")
plt1.set(xlabel="Torque", ylabel="Frequency")
plt.show()
df1 = df.copy()
df1 = df1.drop(["Total"], axis=1)
df1.head()
df.rename(columns={"Car Make": "Make", "Engine Size (L)": "Size"}, inplace=True)
df.rename(columns={"Car Make": "Make", "Car Model": "Model"}, inplace=True)
df.rename(columns={"Price (in USD)": "Price", "Torque (lb-ft)": "Torque"}, inplace=True)
df.head()
categ = df.select_dtypes(exclude="number")
categ.head()
df1 = df.copy()
df1 = df1.drop(["Make", "Model", "Horsepower"], axis=1)
df1.head
df["Size"] = df["Size"].replace("200,000", "200,000 - 200,000")
df["Size"] = df["Size"].replace("0", "0 - 0")
f = list(df["Size"].unique())
f
for x in f:
print(x)
if "Electric" in x:
x1 = x.replace("Electric", "0")
x1 = float(x1)
if "Electric Motor" in x:
x1 = x.replace("Electric Motor", "0")
x1 = float(x1)
if "Electric" in x:
x1 = x.replace("Electric", "0")
x1 = float(x1)
if "Electric Motor" in x:
x1 = x.replace("Electric Motor", "0")
x1 = float(x1)
else:
x1 = float(x)
print(x, x1)
df["Size"] = df["Size"].replace(x, x1)
df.head(200)
# # Machine learning
# ## Importing liberary for machine learning
from sklearn import linear_model
df.tail()
# ## Going to predict the Value
print(allfeatures)
print(len(allfeatures))
|
# KFold Sampling/Cross-Validation in reference to train_test_split
# The basic train_test_split sampling is simple and intuitive. With train_test_split, you separate the data into train and test splits, you fit the model on the train split, and then use the fitted model on the test split. For the train_test_split, the test split is not being used in fitting the model.
# |------------train----------------|-------test-------|
# KFold is a sampling technique useful for situations where there is limited data. In these situations we would like to be able to use all of the limited data in model fitting. The KFold approach is to separate the data into n folds or splits. Below is an example for n=5:
# |--Fold 1--|--Fold 2--|--Fold 3--|--Fold 4--|--Fold 5--|
# Then use one of the folds as a test split and the remaining folds as train splits. Fit the model on the combined train splits and validate on the test split. This fitted model would be Model 1. Repeat this step by switching the split that is designated as the test split:
# Model 1:|--train--|--train--|--train--|--train--|--test--|
# Model 2:|--train--|--train--|--train--|--test--|--train--|
# Model 3:|--train--|--train--|--test--|--train--|--train--|
# Model 4:|--train--|--test--|--train--|--train--|--train--|
# Model 5:|--test--|--train--|--train--|--train--|--train--|
# The validation metrics for the 5 models are then averaged to come up with a final validation metric for the data.
# The train-test split can be considered as a special case of KFold where n=2.
# What Kfold does not do is provide the final fitted model similar to the train_test_split. In general, we will not be using any of the 5 interim models as the final model. We will perform model fit with all the data to come up with the final model.
# By using KFold, we are able to avoid having to set aside a part of the data for validation.
# To obtain the best model in combination with KFold sampling we can use grid search techniques.
# Reference:
# https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option(
"display.float_format", lambda x: "%.3f" % x
) # format floats in dataframes
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # Prepare Data for Processing
# Load Data into Pandas DataFrame
train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
# Preview the data
def describe_all(df):
pd.set_option("display.max_rows", df.shape[1]) # display all rows of data
print(f"(rows, columns){df.shape} Datatype Counts:{dict(df.dtypes.value_counts())}")
desc = df.describe(include="all").transpose()
desc["Datatype"] = df.dtypes
desc["%missing"] = df.isnull().sum().values * 100 / len(df)
desc["Value0"] = df.head().transpose()[0]
desc["Value1"] = df.head().transpose()[1]
desc["Value2"] = df.head().transpose()[2]
desc["Value3"] = df.head().transpose()[3]
desc["Value4"] = df.head().transpose()[4]
return desc
describe_all(train)
# # Process Data for Modeling
missing_val_count_by_column = train.isnull().sum()
print(missing_val_count_by_column[missing_val_count_by_column > 0])
# Find most important numerical features relative to target
corr = train.corr()
corr.sort_values(["SalePrice"], ascending=False, inplace=True)
print(corr.SalePrice)
# Separate numerical features (minus the target) and categorical features for processing
categorical_features = train.select_dtypes(include=["object", "bool"]).columns
numerical_features = train.select_dtypes(include=["int64", "float64"]).columns
numerical_features = numerical_features.drop("SalePrice")
print("Numerical features : " + str(len(numerical_features)))
print("Categorical features : " + str(len(categorical_features)))
train_num = train[numerical_features]
train_cat = train[categorical_features]
# Handle missing values for numerical features by using median as replacement
print(
"NAs for numerical features in train : "
+ str(train[numerical_features].isnull().values.sum())
)
train[numerical_features] = train[numerical_features].fillna(
train[numerical_features].median()
)
print(
"Remaining NAs for numerical features in train : "
+ str(train[numerical_features].isnull().values.sum())
)
# Use OrdinalEncoder as one-hot encoding results in different columns counts between Model Training and Test
from sklearn.preprocessing import OrdinalEncoder
encoder = OrdinalEncoder()
train[categorical_features] = encoder.fit_transform(train[categorical_features])
# Handle missing values for categorical features
print(
"NAs for categorical features in train : "
+ str(train[categorical_features].isnull().values.sum())
)
train[categorical_features] = train[categorical_features].fillna(method="ffill")
train[categorical_features] = train[categorical_features].fillna(0)
print(
"Remaining NAs for categorical features in train : "
+ str(train[categorical_features].isnull().values.sum())
)
# # Analyze Model
# Define features
X = train.drop("SalePrice", axis=1)
# print("New number of features : " + str(X.shape[1]))
# Define the Label for Model Fitting
y = train.SalePrice
# print(y)
# References:
# * https://www.kaggle.com/learn/intro-to-deep-learning
# * deep-learning-with-python-second-edition
# For regression, we are looking to output a number rather than a probability, so the sigmoid activation function is not needed.
# 
# TensorFlow Model
from tensorflow import keras
from tensorflow.keras import layers
# Input shape
input_shape = [X.shape[1]]
def build_model():
model = keras.Sequential(
[
layers.BatchNormalization(input_shape=input_shape),
layers.Dense(64, activation="relu"),
layers.Dropout(0.05),
layers.Dense(64, activation="relu"),
layers.Dropout(0.05),
layers.Dense(64, activation="relu"),
layers.Dense(1),
]
)
model.compile(optimizer="rmsprop", loss="mse", metrics=["mae"])
return model
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
# Partition the dataset into train and validation sets using KFold
kf = KFold(n_splits=5, shuffle=True, random_state=0)
models = dict()
num_epochs = 400
all_mae_histories = []
for i, (train_index, validation_index) in enumerate(kf.split(train)):
print(f"Fold {i}:")
# print(f" Train: index={train_index}")
# print(f" Test: index={test_index}")
# Separate data based on split index
Xy_train = train.iloc[train_index] # use iloc to select DataFrame rows by position
Xy_validation = train.iloc[validation_index]
# Specify features(X) and label(y)
X_train = Xy_train.drop("SalePrice", axis=1)
X_validation = Xy_validation.drop("SalePrice", axis=1)
y_train = Xy_train.SalePrice
y_validation = Xy_validation.SalePrice
print("X_train : " + str(X_train.shape))
print("X_validation : " + str(X_validation.shape))
print("y_train : " + str(y_train.shape))
print("y_validation : " + str(y_validation.shape))
# Fit model
model = build_model()
history = model.fit(
X_train,
y_train,
validation_data=(X_validation, y_validation),
epochs=num_epochs,
batch_size=16,
verbose=0,
)
mae_history = history.history["val_mae"]
all_mae_histories.append(mae_history)
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)
]
plt.plot(range(1, len(average_mae_history[50:]) + 1), average_mae_history[50:])
plt.xlabel("Epochs")
plt.ylabel("Validation MAE")
plt.show()
# Based on the Kfold analysis the model has converged around 200 epochs
# Build Final Model using all Data
model = build_model()
model.fit(X, y, epochs=200, batch_size=16, verbose=0)
# # Share Submission
test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv")
describe_all(test)
missing_val_count_by_column = test.isnull().sum()
print(missing_val_count_by_column[missing_val_count_by_column > 0])
# Separate numerical features and categorical features
test_num = test[numerical_features]
test_cat = test[categorical_features]
# Handle missing values for numerical features by using median as replacement
print(
"NAs for numerical features in test : "
+ str(test[numerical_features].isnull().values.sum())
)
test[numerical_features] = test[numerical_features].fillna(
test[numerical_features].median()
)
print(
"Remaining NAs for numerical features in test : "
+ str(test[numerical_features].isnull().values.sum())
)
# Use OrdinalEncoder
test[categorical_features] = encoder.fit_transform(test[categorical_features])
# Handle missing values for categorical features
print(
"NAs for categorical features in test : "
+ str(test[categorical_features].isnull().values.sum())
)
test[categorical_features] = test[categorical_features].fillna(method="ffill")
test[categorical_features] = test[categorical_features].fillna(0)
print(
"Remaining NAs for categorical features in test : "
+ str(test[categorical_features].isnull().values.sum())
)
# Join categorical and numerical features
# test = pd.concat([test_num, test_cat], axis = 1)
X_test = test
print("New number of features : " + str(X_test.shape[1]))
# Predict
y_test_pred = model.predict(X_test)
print(y_test_pred.transpose()[0])
# Output
output = pd.DataFrame({"Id": test.Id, "SalePrice": y_test_pred.transpose()[0]})
output.to_csv("submission.csv", index=False)
print("Your submission was successfully saved!")
|
# This model is based on the FastAI models and coursework produced by Jeremy Howard,Rachel Thomas and the community
# I have used model details from this course and modified for this classifer
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# I manually "Added Data" that is under my kaggle profile including "2023 Global Potato Chips"
# and images from the "Canadian Potato Chips October 2022 Testing"
# I am responsible for taking all these picture and have uploaded the images. Anyone can use.
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Import Fastcore library
# Import FastAI vision library use the Path method to read the files that were imported (Can-Potato-chips-10-2022)
from fastcore.all import *
from fastai.vision.all import *
# Verify if any images failed
path = Path("/kaggle/input/2023-global-potato-chips-training-images-ii")
failed = verify_images(get_image_files(path))
failed.map(Path.unlink)
len(failed)
# load the data blocks with the images input
dls = DataBlock(
blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(valid_pct=0.2, seed=42),
get_y=parent_label,
item_tfms=[Resize(192, method="squish")],
).dataloaders(path)
# show 40 images
dls.show_batch(max_n=40)
# run learner using resnet18 with 3 epochs
learn = vision_learner(dls, resnet18, metrics=error_rate)
learn.fine_tune(3)
# import widgets to learn
from fastai.vision.widgets import *
cleaner = ImageClassifierCleaner(learn)
cleaner
# Build a classifier function
categories = learn.dls.vocab
def classify_image(img):
pred, idx, probs = learn.predict(img)
return dict(zip(categories, map(float, probs)))
# export the model
learn.export("global_model_april14.pkl")
# Let's see how well the model classifies a test image
classify_image("/kaggle/input/test-data/red1.JPG")
# This is in fact Red-Coast!
im = Image.open("/kaggle/input/test-data/red1.JPG")
im.to_thumb(256, 256)
# Let's see how well the model classifies a test image
classify_image("/kaggle/input/test-data/red2.JPG")
# This is Red-Coast from a distance. The model needs some work here.
im = Image.open("/kaggle/input/test-data/red2.JPG")
im.to_thumb(256, 256)
# Let's see how well the model classifies a test image
classify_image("/kaggle/input/test-data/dutch1.JPG")
# This is in fact Old Dutch so the model is correct!
im = Image.open("/kaggle/input/test-data/dutch1.JPG")
im.to_thumb(256, 256)
# Let's see how well the model classifies a test image
classify_image("/kaggle/input/test-data/syal1.JPG")
# This is in fact OD-LS so the model is correct!
im = Image.open("/kaggle/input/test-data/syal1.JPG")
im.to_thumb(256, 256)
# define the test image directory
test_dir = "/kaggle/input/test-data"
# create a list of all image files in the test directory
test_files = get_image_files(test_dir)
# create an empty list to store the results
results = []
# loop through all the test images
for img_path in test_files:
# open the image and classify it using your model
img = PILImage.create(img_path)
classify_image(img)
pred, pred_idx, probs = learn.predict(img)
# extract the probabilities for each category
(
p1,
p2,
p3,
p4,
p5,
p6,
p7,
p8,
p9,
p10,
p11,
p12,
p13,
p14,
p15,
p16,
p17,
p18,
p19,
p20,
p21,
p22,
p23,
p24,
p25,
p26,
p27,
p28,
p29,
p30,
p31,
p32,
p33,
p34,
p35,
p36,
p37,
p38,
p39,
p40,
p41,
) = probs.tolist()
# append the results to the list
# Old ways without actual category names
# convert the list of results to a pandas dataframe
test_df = pd.DataFrame(results)
# export the dataframe to an Excel file
test_df.to_excel("results_w_cat_april14.xlsx", index=False)
|
import pandas as pd
import numpy as np
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # Context
# * Cardiovascular diseases (CVDs) are the number 1 cause of death globally, taking an estimated 17.9 million lives each year, which accounts for 31% of all deaths worldwide. Four out of 5CVD deaths are due to heart attacks and strokes, and one-third of these deaths occur prematurely in people under 70 years of age. Heart failure is a common event caused by CVDs and this dataset contains 11 features that can be used to predict a possible heart disease.
# * People with cardiovascular disease or who are at high cardiovascular risk (due to the presence of one or more risk factors such as hypertension, diabetes, hyperlipidaemia or already established disease) need early detection and management wherein a machine learning model can be of great help.
# # Data input
df = pd.read_csv("/kaggle/input/heart-failure-prediction/heart.csv")
df.head() # data view
# # Attribute Information
# * Age: age of the patient [years]
# * Sex: sex of the patient [M: Male, F: Female]
# * ChestPainType: chest pain type [TA: Typical Angina, ATA: Atypical Angina, NAP: Non-Anginal Pain, ASY: Asymptomatic]
# * RestingBP: resting blood pressure [mm Hg]
# * Cholesterol: serum cholesterol [mm/dl]
# * FastingBS: fasting blood sugar [1: if FastingBS > 120 mg/dl, 0: otherwise]
# * RestingECG: resting electrocardiogram results [Normal: Normal, ST: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV), LVH: showing probable or definite left ventricular hypertrophy by Estes' criteria]
# * MaxHR: maximum heart rate achieved [Numeric value between 60 and 202]
# * ExerciseAngina: exercise-induced angina [Y: Yes, N: No]
# * Oldpeak: oldpeak = ST [Numeric value measured in depression]
# * ST_Slope: the slope of the peak exercise ST segment [Up: upsloping, Flat: flat, Down: downsloping]
# * HeartDisease: output class [1: heart disease, 0: Normal]
df.shape # the shape of data is 918 rows and 12 columns
df.dtypes # the type of data
df.nunique() # every columns unique value number
df.ChestPainType.unique() # view unique value in ChestPainType
df.isnull().sum() # judge null value
# change to categorical number
# Sex
df.Sex.replace({"M": 1, "F": 0}, inplace=True)
# ChestPainType
df.ChestPainType.replace({"TA": 0, "ATA": 1, "NAP": 2, "ASY": 3}, inplace=True)
# RestingECG
df.RestingECG.replace({"Normal": 0, "ST": 1, "LVH": 2}, inplace=True)
# ExerciseAngina
df.ExerciseAngina.replace({"N": 0, "Y": 1}, inplace=True)
# ST_Slope
df.ST_Slope.replace({"Up": 0, "Flat": 1, "Down": 2}, inplace=True)
df.head()
# define categorical and continuous feature
cat_col = [
"Sex",
"ChestPainType",
"RestingECG",
"ExerciseAngina",
"ST_Slope",
] # categorical features
con_col = [
"Age",
"RestingBP",
"Cholesterol",
"FastingBS",
"MaxHR",
"Oldpeak",
] # continuous features
round(df[con_col].describe(), 2) # two decimal
# # Visualization
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rcParams
sns.set(style="whitegrid")
config = {
"font.family": "serif",
"font.size": 10.5,
"mathtext.fontset": "stix",
"font.serif": ["Times New Roman"],
}
rcParams.update(config)
fig, ax = plt.subplots(3, 4, figsize=(16, 12))
# Age the relationship between Age and HeartDisease
sns.kdeplot(ax=ax[0][0], data=df, x="Age", hue="HeartDisease", fill=True, linewidth=0)
ax[0][0].grid(False)
ax[0][0].set_ylabel("")
sns.move_legend(ax[0][0], loc="upper left") # move legend location
# Sex
sns.countplot(ax=ax[0][1], data=df, x="Sex", hue="HeartDisease")
ax[0][1].grid(False)
ax[0][1].set_xticklabels(["female", "male"])
ax[0][1].set_ylabel("")
# ChestPainType
sns.stripplot(ax=ax[0][2], data=df, x="ChestPainType", y="Age", hue="HeartDisease")
ax[0][2].set_xticklabels(["TA", "ATA", "NAP", "ASY"])
ax[0][2].set_ylabel("")
ax[0][2].grid(False)
# RestingBP
sns.boxenplot(ax=ax[0][3], data=df, y="RestingBP", x="HeartDisease")
ax[0][3].set_ylabel("")
ax[0][3].set_xlabel("RestingBP")
ax[0][3].grid(False)
# Cholesterol
sns.kdeplot(
ax=ax[1][0], data=df, x="Cholesterol", hue="HeartDisease", fill=True, linewidth=0
)
ax[1][0].grid(False)
ax[1][0].set_ylabel("")
# FastingBS fasting blood sugar
sns.countplot(ax=ax[1][1], data=df, x="FastingBS", hue="HeartDisease")
ax[1][1].set_xticklabels(["otherwise", ">120mg/dl"])
ax[1][1].grid(False)
ax[1][1].set_ylabel("")
# RestingECG
sns.countplot(ax=ax[1][2], data=df, x="RestingECG", hue="HeartDisease")
ax[1][2].set_xticklabels(["Normal", "ST", "LVH"])
ax[1][2].grid(False)
ax[1][2].set_ylabel("")
# MaxHR
sns.violinplot(ax=ax[1][3], data=df, y="MaxHR", x="HeartDisease")
ax[1][3].grid(False)
ax[1][3].set_ylabel("")
# ExerciseAngina exercise-induced angina
sns.countplot(ax=ax[2][0], data=df, x="ExerciseAngina", hue="HeartDisease")
ax[2][0].grid(False)
ax[2][0].set_ylabel("")
# Oldpeak
sns.boxplot(ax=ax[2][1], data=df, x="HeartDisease", y="Oldpeak")
ax[2][1].grid(False)
ax[2][1].set_ylabel("")
ax[2][1].set_xlabel("Oldpeak")
# ST_Slope
sns.countplot(
ax=ax[2][2],
data=df,
x="ST_Slope",
hue="HeartDisease",
)
ax[2][2].set_xticklabels(["upsloping", "flat", "downsloping"])
ax[2][2].grid(False)
ax[2][2].set_ylabel("")
# HeartDisease
sns.countplot(
ax=ax[2][3],
data=df,
x="HeartDisease",
)
for x, y in enumerate(df.HeartDisease.value_counts()[::-1]):
# print(x,y)
ax[2][3].text(x - 0.05, y + 3, "%s" % y)
ax[2][3].grid(False)
ax[2][3].set_ylabel("")
plt.show()
# # Conclusions of EDA
# 1. 58 years old people more tend to have heart disease
# 2. male more tend to have heart disease than female
# 3. the chest pain type of patient more likely is asymptomatic
# 4. blood pressure of heart disease patient have exception value, such as zero
# 5. serum Cholesterol of heart disease patient is lower than normal people
# 6. when fasting blood sugar > 120 mg/dl, the number of patien whose fasting blood sugar bigger than 120 mg/dl is more. But when fasting blood sugar < 120 mg/dl, the number of normal people is more.
# 7. the number of people who have ST-T wave abnormality is double than normal people
# 8. the max heart rate of patient is lower
# 9. patient more tend to have exercise-induced angina
# 10. the oldpeak of patient is higher
# 11. flat and downsloping people more tend to have heart disease
# 12. the number of patient is 508 that more than normal people
# # Prediction
# Scaling
from sklearn.preprocessing import RobustScaler
# Train Test Split
from sklearn.model_selection import train_test_split
# models
# svc
from sklearn.svm import SVC
# logistic regression
from sklearn.linear_model import LogisticRegression
# random forest
from sklearn.ensemble import RandomForestClassifier
# decision tree
from sklearn.tree import DecisionTreeClassifier
# boosting
from sklearn.ensemble import GradientBoostingClassifier
# xgboost
import xgboost
# metrics
from sklearn.metrics import accuracy_score, classification_report, roc_curve
# crossover validation
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
# one-hot coding
df = pd.get_dummies(df, columns=cat_col, drop_first=True)
df
# define the features and target
y = df["HeartDisease"]
X = df.drop("HeartDisease", axis=1)
# data scaler
scaler = RobustScaler()
X[con_col] = scaler.fit_transform(X[con_col])
X.head()
# define the train data and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
print("X_train shape is", X_train.shape)
print("X_test shape is", X_test.shape)
print("y_train shape is", y_train.shape)
print("y_test shape is", y_test.shape)
# ### support vector classifier
svc = SVC(kernel="linear", C=1, random_state=1, probability=True)
svc.fit(X_train, y_train)
y_pred = svc.predict(X_test)
print("the score of svc is", accuracy_score(y_pred, y_test))
# roc curve
# calculating the probabilities
y_pred_prob = svc.predict_proba(X_test)[:, 1]
# instantiating the roc_cruve
fpr, tpr, threshols = roc_curve(y_test, y_pred_prob)
# plotting the curve
plt.plot([0, 1], [0, 1], "k--", "r+")
plt.plot(fpr, tpr, label="support vector classifier")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("support vector classifier ROC Curve")
plt.show()
# ### logistic regression
logreg = LogisticRegression(random_state=1)
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print("the score of logistic regression is", accuracy_score(y_pred, y_test))
# ### random forest
rf = RandomForestClassifier(random_state=1)
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
print("the score of random forest is", accuracy_score(y_pred, y_test))
# ### decision tree
dt = DecisionTreeClassifier(random_state=1)
dt.fit(X_train, y_train)
y_pred = dt.predict(X_test)
print("the score of decision tree is", accuracy_score(y_pred, y_test))
# ### gradient boosting
gb = GradientBoostingClassifier(random_state=1)
gb.fit(X_train, y_train)
y_pred = gb.predict(X_test)
print("the score of gradient boosting is", accuracy_score(y_pred, y_test))
# ### xgboosting
xgb = xgboost.XGBClassifier(random_state=1)
xgb.fit(X_train, y_train)
y_pred = xgb.predict(X_test)
print("the score of xgboosting is", accuracy_score(y_pred, y_test))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
gender_submission = pd.read_csv("/kaggle/input/titanic/gender_submission.csv")
# Survived: 생존 여부 => 0 = No, 1 = Yes
# pclass: 티켓 등급 => 1 = 1st, 2 = 2nd, 3 = 3rd
# Sex: 성별
# Age: 나이
# Sibsp: 함께 탑승한 형제자매, 배우자의 수
# Parch: 함께 탑승한 부모, 자식의 수
# Ticket: 티켓 번호
# Fare: 운임
# Cabin: 객실 번호
# Embarked: 탑승 항구 => C = Cherbourg, Q = Queenstown, S = Southampton
train
# 0: Dead, 1: Survived
# SibSp = Sibling + Spouse
# Parch = Parent + Child
train.info()
test
import matplotlib.pyplot as plt
# Pclass
plt.subplot(1, 3, 1)
train["Survived"][train["Pclass"] == 1].value_counts().sort_values().plot.bar(
figsize=(10, 3)
)
plt.xlabel("Pclass : 1")
plt.xticks([0, 1], ["Dead", "Survived"])
plt.subplot(1, 3, 2)
train["Survived"][train["Pclass"] == 2].value_counts().plot.bar(figsize=(10, 3))
plt.xlabel("Pclass : 2")
plt.xticks([0, 1], ["Dead", "Survived"])
plt.subplot(1, 3, 3)
train["Survived"][train["Pclass"] == 3].value_counts().plot.bar(figsize=(10, 3))
plt.xlabel("Pclass : 3")
plt.xticks([0, 1], ["Dead", "Survived"])
plt.show()
# Pclass Survived(%) : 1st > 2nd > 3rd
# Sex
plt.subplot(1, 2, 1)
train["Survived"][train["Sex"] == "male"].value_counts().sort_values().plot.bar(
figsize=(7, 3)
)
plt.xlabel("Male")
plt.xticks([0, 1], ["Survived", "Dead"])
plt.subplot(1, 2, 2)
train["Survived"][train["Sex"] == "female"].value_counts().plot.bar(figsize=(7, 3))
plt.xlabel("Female")
plt.xticks([0, 1], ["Survived", "Dead"])
plt.show()
# Survived(%) : female > male
# Age
plt.hist(
train[train["Survived"] == 0]["Age"], bins=20, alpha=0.5, color="red", label="Dead"
)
plt.hist(
train[train["Survived"] == 1]["Age"],
bins=20,
alpha=0.5,
color="blue",
label="Survived",
)
plt.title("Survived")
plt.legend()
plt.xlabel("Age")
plt.show()
# Survived : Young > Old
# SibSp
plt.subplot(1, 3, 1)
train["Survived"][train["SibSp"] == 0].value_counts().plot.bar(figsize=(10, 3))
plt.xlabel("SibSp = 0")
plt.xticks([0, 1], ["Dead", "Survived"])
plt.subplot(1, 3, 2)
train["Survived"][
(train["SibSp"] == 1) | (train["SibSp"] == 2)
].value_counts().sort_values().plot.bar(figsize=(10, 3))
plt.xlabel("SibSp >= 1")
plt.xticks([0, 1], ["Dead", "Survived"])
plt.subplot(1, 3, 3)
train["Survived"][train["SibSp"] >= 3].value_counts().plot.bar(figsize=(10, 3))
plt.xlabel("SibSp >= 3")
plt.xticks([0, 1], ["Dead", "Survived"])
plt.show()
# Survived(%) : (1, 2) > (0) > (more 3)
# Parch
plt.subplot(1, 3, 1)
train["Survived"][train["Parch"] == 0].value_counts().plot.bar(figsize=(10, 3))
plt.xlabel("Parch = 0")
plt.xticks([0, 1], ["Dead", "Survived"])
plt.subplot(1, 3, 2)
train["Survived"][
(train["Parch"] == 1) | (train["Parch"] == 2)
].value_counts().sort_values().plot.bar(figsize=(10, 3))
plt.xlabel("Parch >= 1")
plt.xticks([0, 1], ["Dead", "Survived"])
plt.subplot(1, 3, 3)
train["Survived"][train["Parch"] >= 3].value_counts().plot.bar(figsize=(10, 3))
plt.xlabel("Parch >= 3")
plt.xticks([0, 1], ["Dead", "Survived"])
plt.show()
# Survived(%) : (1, 2) > (0) > (more 3)
# SibSp, Parch로 보아 일행이 적으면 생존율이 높음
max(train["Fare"].value_counts())
# Fare
# 소득 구간을 새로운 범주형 특성으로 추가
train["Fare_cat"] = pd.cut(
train["Fare"], bins=[0.0, 9.0, 18.0, 27.0, 36.0, np.inf], labels=[1, 2, 3, 4, 5]
)
plt.hist(
train[train["Survived"] == 0]["Fare_cat"],
bins=5,
alpha=0.5,
color="red",
label="Dead",
)
plt.hist(
train[train["Survived"] == 1]["Fare_cat"],
bins=5,
alpha=0.5,
color="blue",
label="Survived",
)
plt.title("Survived")
plt.legend()
plt.xlabel("Fare")
plt.show()
# 더 많은 운임 비용을 냈을 경우 생존율이 높음
train.drop(["Fare_cat"], axis=1, inplace=True)
train["Embarked"].value_counts()
# Embarked
plt.subplot(1, 3, 1)
train["Survived"][train["Embarked"] == "S"].value_counts().plot.bar(figsize=(10, 3))
plt.xlabel("S")
plt.xticks([0, 1], ["Dead", "Survived"])
plt.subplot(1, 3, 2)
train["Survived"][train["Embarked"] == "C"].value_counts().sort_values().plot.bar(
figsize=(10, 3)
)
plt.xlabel("C")
plt.xticks([0, 1], ["Dead", "Survived"])
plt.subplot(1, 3, 3)
train["Survived"][train["Embarked"] == "Q"].value_counts().plot.bar(figsize=(10, 3))
plt.xlabel("Q")
plt.xticks([0, 1], ["Dead", "Survived"])
plt.show()
# Survived(%) : C > Q > S
# "C" 장소에서 탑승한 사람은 생존 확률이 높음
# Drop Columns : Name, Ticket, Cabin
train.drop(["Name", "Ticket", "Cabin"], axis=1, inplace=True)
train.head()
test.drop(["Name", "Ticket", "Cabin"], axis=1, inplace=True)
test.head()
X_train = train.drop("Survived", axis=1)
Y_train = train["Survived"]
train.isnull().sum()
test.isnull().sum()
test["Fare"].fillna(test["Fare"].dropna().median(), inplace=True)
test.isnull().sum()
from sklearn.preprocessing import MinMaxScaler
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
min_max_scaler = MinMaxScaler(feature_range=(-1, 1))
num_pipeline = make_pipeline(SimpleImputer(strategy="median"), StandardScaler())
cat_pipeline = make_pipeline(
SimpleImputer(strategy="most_frequent"), OneHotEncoder(handle_unknown="ignore")
)
transformer = ColumnTransformer(
[
("num", num_pipeline, ["Age"]),
("min-max", min_max_scaler, ["Fare"]),
("cat", cat_pipeline, ["Sex", "Embarked"]),
]
)
from sklearn.tree import DecisionTreeRegressor
tree_reg = make_pipeline(transformer, DecisionTreeRegressor(random_state=42))
tree_reg.fit(X_train, Y_train)
from sklearn.metrics import mean_squared_error
train_predictions = tree_reg.predict(X_train)
tree_rmse = mean_squared_error(Y_train, train_predictions, squared=False)
tree_rmse
final_predictions = tree_reg.predict(test)
final_predictions = tree_reg.predict(test)
output = pd.DataFrame(
{"PassengerId": test["PassengerId"], "Survived": final_predictions}
)
output.to_csv("submission.csv", index=False)
output
|
#
# # About Data
# Bob started his own mobile company. He doesn't know how to estimate price of mobiles. To solve this problem he collects sales data of mobile phones of various companies. He wants to find out some relation between features of a mobile phone and price. In this case we need to predict a price range indicating how high the price is.
# # Table of Contents
#
# * [Step 1 | Import Libraries](#import)
# * [Step 2 | Read Dataset](#read)
# * [Step 3 | Train Dataset](#train)
# - [Step 3.1 | Dataset Basic Information](#basic)
# - [Step 3.2 | Description of the data](#description)
# # Step 1 | Import Libraries
import pandas as pd
import missingno as msno
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import plotly.express as px
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
#
# # Step 2 | Read Dataset
test = pd.read_csv("/kaggle/input/mobile-price-classification/test.csv")
train = pd.read_csv("/kaggle/input/mobile-price-classification/train.csv")
#
# # Step 3 | Train Dataset
train.head()
#
# | | **features** | **Information about features** |
# | :--- | :--- | :--- |
# | **1** | battery_power | Total energy a battery can store in one time measured in (mAh) |
# | **2** | blue | Has bluetooth or not |
# | **3** | clock_speed | Speed at which microprocessor executes instructions |
# | **4** | dual_sim | Has dual sim support or not |
# | **5** | fc | Front camera (Megapixels) |
# | **6** | four_g | Has 4G or not |
# | **7** | int_memory | Internal memory in (Gigabytes) |
# | **8** | m_dep | Mobile depth in (Cm) |
# | **9** | mobile_wt | Weight of mobile phone |
# | **10** | pc | Primary camera (Megapixels) |
# | **11** | px_height | Pixel resolution height |
# | **12** | px_width | Pixel resolution width |
# | **13** | ram | Random access memory in (Megabytes) |
# | **14** | sc_h | Screen height of mobile in (Cm) |
# | **15** | sc_w | Screen width of mobile in (Cm) |
# | **16** | talk_time | Longest time that a single battery charge will last when you are constantly talking on the phone |
# | **17** | three_g | Has 3G or not |
# | **18** | touch_screen | Has touch screen or not |
# | **19** | wifi | Has wifi or not |
# | **20** | n_cores | Number of cores of processor |
# | **21** | **price_range** | This is the Target variable with value of 0: (Low Cost), 1: (Medium Cost), 2: (High Cost), and 3: (Very High Cost)
# #### Step 3.1 | Dataset Basic Information
train.info()
#
# * The train dataset contains 21 features and 2000 entries.
# * We have 21 variables including 20 independent variables and 1 dependent variable: price_range.
# * The features: clock_speed and m_dep have an float64 type. The rest of the features have an int64 type.
# * There is no missing value in the dataset.
# * We have 8 categorical variables: n_cores , price_range, blue, dual_sim, four_g, three_g, touch_screen, wifi
# * We have 13 numeric variables: battery_power, clock_speed, fc, int_memory, m_dep, mobile_wt, pc, px_height, px_width, ram, talk_time, sc_h, sc_w
# #### Step 3.2 | Description of the data
train.describe()
#
#
# #### 1.3 Check NULL values
train.isnull().sum()
#
# #### 1.4 Missing Data Visualizations
#
msno.bar(train, color="orange")
#
# There are no missing data in the train dataset.
#
# #### 1.5 Duplicated Data
train.duplicated().sum()
#
# There aren't any duplicated data in the train dataset.
#
# #### 1.6 Correlation
train.corr()
fig = plt.gcf()
fig.set_size_inches(18, 12)
plt.title("Correlation Between The Features")
a = sns.heatmap(train.corr(), annot=True, fmt=".2f", linewidths=0.2)
a.set_xticklabels(a.get_xticklabels(), rotation=90)
a.set_yticklabels(a.get_yticklabels(), rotation=30)
plt.show()
plt.figure(figsize=(12, 7), dpi=100)
heatmap = sns.heatmap(
train.corr()[["price_range"]].sort_values(by="price_range", ascending=False),
vmin=-1,
vmax=1,
annot=True,
)
heatmap.set_title(
"Features Correlating with Price Range", fontdict={"fontsize": 12}, pad=18
)
#
# There is a strong correlation between ram and price_range. We can see a moderate correlation between 4G and 3G, fc and pc, px_height and px_width, sc_h and sc_w.
#
# #### 1.7 Visualization of Categorical Features
labels = ["Yes", "No"]
values = train["blue"].value_counts().values
fig1, ax1 = plt.subplots()
colors = ["orange", "gold"]
plt.title("Bluetooth")
ax1.pie(
values, labels=labels, autopct="%1.1f%%", shadow=True, startangle=90, colors=colors
)
plt.show()
labels = ["Yes", "No"]
values = train["dual_sim"].value_counts().values
fig1, ax1 = plt.subplots()
colors = ["orange", "gold"]
plt.title("Dual SIM")
ax1.pie(
values, labels=labels, autopct="%1.1f%%", shadow=True, startangle=90, colors=colors
)
plt.show()
labels = ["4G-supported", "Not supported"]
values = train["four_g"].value_counts().values
fig1, ax1 = plt.subplots()
colors = ["orange", "gold"]
plt.title("4G")
ax1.pie(
values, labels=labels, autopct="%1.1f%%", shadow=True, startangle=90, colors=colors
)
plt.show()
labels = ["3G-supported", "Not supported"]
values = train["three_g"].value_counts().values
fig1, ax1 = plt.subplots()
colors = ["orange", "gold"]
plt.title("3G")
ax1.pie(
values, labels=labels, autopct="%1.1f%%", shadow=True, startangle=90, colors=colors
)
plt.show()
labels = ["Yes", "No"]
values = train["touch_screen"].value_counts().values
fig1, ax1 = plt.subplots()
colors = ["orange", "gold"]
plt.title("Touch screen")
ax1.pie(
values, labels=labels, autopct="%1.1f%%", shadow=True, startangle=90, colors=colors
)
plt.show()
labels = ["Yes", "No"]
values = train["wifi"].value_counts().values
fig1, ax1 = plt.subplots()
colors = ["orange", "gold"]
plt.title("WIFI")
ax1.pie(
values, labels=labels, autopct="%1.1f%%", shadow=True, startangle=90, colors=colors
)
plt.show()
#
# #### 1.7 Visualization of Numerical Features
fig, axis = plt.subplots(7, 2, figsize=(20, 25))
sns.histplot(x=train["battery_power"], ax=axis[0, 0], color="orange")
sns.histplot(x=train["clock_speed"], ax=axis[0, 1], color="yellow")
sns.histplot(x=train["fc"], ax=axis[1, 0], color="gold")
sns.histplot(x=train["int_memory"], ax=axis[1, 1], color="orange")
sns.histplot(x=train["m_dep"], ax=axis[2, 0], color="yellow")
sns.histplot(x=train["mobile_wt"], ax=axis[2, 1], color="gold")
sns.histplot(x=train["pc"], ax=axis[3, 0], color="orange")
sns.histplot(x=train["px_height"], ax=axis[3, 1], color="yellow")
sns.histplot(x=train["px_width"], ax=axis[4, 0], color="gold")
sns.histplot(x=train["ram"], ax=axis[4, 1], color="orange")
sns.histplot(x=train["sc_h"], ax=axis[5, 0], color="yellow")
sns.histplot(x=train["sc_w"], ax=axis[5, 1], color="gold")
sns.histplot(x=train["talk_time"], ax=axis[6, 0], color="orange")
sns.histplot(x=train["price_range"], ax=axis[6, 1], color="yellow")
#
# #### 1.8 Box Plots
features = [
"battery_power",
"clock_speed",
"fc",
"int_memory",
"m_dep",
"mobile_wt",
"n_cores",
"px_height",
"px_width",
"ram",
"sc_h",
"talk_time",
]
def create_boxplot(data, x, y):
fig = px.box(data, x=x, y=y, color=x, title=f"Box Plots\n{x} vs {y}")
fig.show()
for feature in features:
create_boxplot(data=train, y=feature, x="price_range")
#
# #### 1.9 Scatter Matrix
features_sm = [
"battery_power",
"clock_speed",
"fc",
"int_memory",
"m_dep",
"mobile_wt",
"n_cores",
"px_height",
"px_width",
"ram",
"sc_h",
"talk_time",
]
scatter_matrix(train[features_sm], figsize=(30, 30))
plt.show()
#
# Each scatter plot in the matrix show correlation between the numerical features. The main diagonal contains the histograms for each attribute.
#
# #### 2.0 Density Plots
features_dp = [
"battery_power",
"clock_speed",
"fc",
"int_memory",
"m_dep",
"mobile_wt",
"n_cores",
"px_height",
"px_width",
"ram",
"sc_h",
"sc_w",
"talk_time",
]
def displot(data, x):
sns.displot(
data=data,
x=x,
hue="price_range",
kde=True,
color="orange",
bins=40,
height=8,
col="price_range",
)
fig.show()
for feature in features_dp:
displot(data=train, x=feature)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv(
"/kaggle/input/exploring-wealth-forbes-richest-people-dataset/forbes_richman.csv",
encoding="ISO-8859-1",
)
df
df1 = df.head(20)
df1
df2 = df1.drop(["Rank", "Age", "Country", "Source", "Industry"], axis=1)
df2
df2["Net Worth"] = pd.to_numeric(
df2["Net Worth"].str.replace("$", " ").str.replace("B", " ")
)
df2["Net Worth"] = df2["Net Worth"].astype(float)
df2
fig, ax1 = plt.subplots(figsize=(25, 10))
ax1 = fig.add_axes([0, 0, 1, 1])
fig, ax1 = plt.subplots(figsize=(25, 10))
ax1 = fig.add_axes([0, 0, 1, 1])
ax1.plot(
df2["Name"].tail(20),
df2["Net Worth"].tail(20),
color="#cddafd",
linewidth=3,
alpha=1,
marker="*",
markersize=20,
mec="k",
linestyle=(0, (1, 10)),
)
ax1.set_xticklabels(df["Name"], rotation=45, ha="right")
ax1.set_title("Top 20 Rich people in world", fontsize=20, fontweight="bold")
ax1.set_xlabel("Name", fontsize=20, fontweight="bold")
ax1.set_ylabel("Net worth", fontsize=20, fontweight="bold")
fig, ax1 = plt.subplots(figsize=(25, 10))
ax1 = fig.add_axes([0, 0, 1, 1])
ax1.plot(
df2["Name"].tail(20),
df2["Net Worth"].tail(20),
color="#cddafd",
linewidth=3,
alpha=0.7,
marker="*",
markersize=15,
mec="k",
linestyle=(0, (1, 10)),
)
ax1.fill_between(
df2["Name"], df2["Net Worth"], color="#3a0ca3", alpha=0.1, edgecolor="black"
)
ax1.set_title("Top 20 Rich people in world", fontsize=20, fontweight="bold")
ax1.set_xlabel("Name", fontsize=20, fontweight="bold")
ax1.set_ylabel("Net worth", fontsize=20, fontweight="bold")
ax1.tick_params(axis="both", labelsize=20)
ax1.set_xticklabels(df["Name"], rotation=45, ha="right")
fig, ax1 = plt.subplots(figsize=(25, 15))
ax1 = fig.add_axes([0, 0, 1, 1])
ax1.plot(
df2["Name"].tail(20),
df2["Net Worth"].tail(20),
color="black",
linewidth=3,
alpha=0.7,
marker="*",
markersize=20,
mec="k",
linestyle=(0, (1, 10)),
)
ax1.fill_between(df2["Name"], df2["Net Worth"], color="#3a0ca3", alpha=0.1)
for i, temp in enumerate(df2["Net Worth"].tail(20)):
ax1.text(
df2["Name"][i],
temp + 1,
str(temp) + "B",
ha="center",
va="bottom",
fontsize=20,
fontweight="bold",
)
ax1.set_title("Top 20 Rich people in world", fontsize=20, fontweight="bold")
ax1.set_xlabel("Name", fontsize=20, fontweight="bold")
ax1.set_ylabel("Net worth", fontsize=20, fontweight="bold")
ax1.set_xticks(df2["Name"])
ax1.tick_params(axis="both", labelsize=20)
ax1.set_xticklabels(df2["Name"], rotation=45, ha="right", color="#480ca8")
# Set y-axis ticks from 0 to 300 with a step of 50
ax1.set_yticks(range(0, 301, 50))
ax1.set_ylim([0, 300])
fig.patch.set_facecolor("#ffbe0b")
plt.show()
fig, ax1 = plt.subplots(figsize=(25, 15))
ax1 = fig.add_axes([0, 0, 1, 1])
ax1.plot(
df2["Name"].tail(20),
df2["Net Worth"].tail(20),
color="black",
linewidth=3,
alpha=0.7,
marker="*",
markersize=20,
mec="k",
linestyle=(0, (1, 10)),
)
ax1.fill_between(df2["Name"], df2["Net Worth"], color="#3a0ca3", alpha=0.1)
for i, temp in enumerate(df2["Net Worth"].tail(20)):
ax1.text(
df2["Name"][i],
temp + 1,
str(temp) + "B",
ha="center",
va="bottom",
fontsize=20,
fontweight="bold",
)
ax1.set_title("Top 20 Rich people in world", fontsize=20, fontweight="bold")
ax1.set_xlabel("Name", fontsize=20, fontweight="bold")
ax1.set_ylabel("Net worth", fontsize=20, fontweight="bold")
ax1.set_xticks(df2["Name"])
ax1.tick_params(axis="both", labelsize=20)
ax1.set_xticklabels(df2["Name"], rotation=45, ha="right", color="#480ca8")
# Set y-axis ticks from 0 to 300 with a step of 50
ax1.set_yticks(range(0, 301, 50))
ax1.set_ylim([0, 300])
ax1.grid(True)
fig.patch.set_facecolor("#ffbe0b")
plt.show()
fig, ax1 = plt.subplots(figsize=(25, 15))
color1 = [
"#f72585",
"#b5179e",
"#7209b7",
"#560bad",
"#480ca8",
"#3a0ca3",
"#3f37c9",
"#4361ee",
"#4895ef",
"#4cc9f0",
"#0077b6",
"#00b4d8",
"#90e0ef",
"#64dfdf",
"#56cfe1",
"#64dfdf",
"#80ffdb",
"#d9ed92",
"#b5e48c",
"#b5e48c",
]
# Plot the bar chart
ax1.bar(
df2["Name"].tail(20),
df2["Net Worth"].tail(20),
color=color1,
alpha=0.7,
edgecolor="black",
)
# Display the net worth values on top of the bars
for i, temp in enumerate(df2["Net Worth"].tail(20)):
ax1.text(
df2["Name"][i],
temp + 1,
str(temp) + "B",
ha="center",
va="bottom",
rotation=45,
fontsize=20,
fontweight="bold",
)
ax1.set_title("Top 20 Rich people in world", fontsize=20, fontweight="bold")
ax1.set_xlabel("Name", fontsize=20, fontweight="bold")
ax1.set_ylabel("Net worth", fontsize=20, fontweight="bold")
ax1.set_xticks(df2["Name"])
ax1.tick_params(axis="both", labelsize=20)
ax1.set_xticklabels(df2["Name"], rotation=45, ha="right", color="#480ca8")
# Set y-axis ticks from 0 to 300 with a step of 50
ax1.set_yticks(range(0, 301, 50))
ax1.set_ylim([0, 300])
fig.patch.set_facecolor("#ffafcc")
plt.show()
df
df3 = df.drop(["Rank", "Name", "Age", "Country", "Source"], axis=1)
df3
df3["Net Worth"] = pd.to_numeric(
df3["Net Worth"].str.replace("$", " ").str.replace("B", " ")
)
df3
df3
df4 = df3.groupby("Industry").sum().reset_index()
df4
df4 = df4.sort_values(by="Net Worth", ascending=False).reset_index()
df4
fig, ax1 = plt.subplots(figsize=(25, 25))
color1 = [
"#EA698B",
"#D55D92",
"#C05299",
"#AC46A1",
"#973AA8",
"#822FAF",
"#6D23B6",
"#6411AD",
"#6411AD",
"#571089",
"#571089",
"#47126B",
"#461873",
"#8013bd",
"#8b26c3",
"#8c07dd",
"#f7ebfd",
]
# Plot the bar chart
ax1.bar(
df4["Industry"].tail(20),
df4["Net Worth"].tail(20),
color=color1,
alpha=0.7,
edgecolor="#ffff00",
)
# Display the net worth values on top of the bars
for i, temp in enumerate(df4["Net Worth"].tail(20)):
ax1.text(
df4["Industry"][i],
temp + 1,
str(temp) + "B",
ha="center",
va="bottom",
rotation=45,
fontsize=15,
fontweight="bold",
)
ax1.set_title("Top Rich industries in the world", fontsize=20, fontweight="bold")
ax1.set_xlabel("Industry", fontsize=20, fontweight="bold")
ax1.set_ylabel("Net worth", fontsize=20, fontweight="bold")
ax1.set_xticks(df4["Industry"])
ax1.tick_params(axis="both", labelsize=20)
ax1.set_xticklabels(
df4["Industry"], rotation=45, ha="right", color="#fcd72c", fontweight="bold"
)
# Set y-axis ticks from 0 to 300 with a step of 50
# ax1.set_yticks(range(0, 301, 50))
# ax1.set_ylim([0, 300])
fig.patch.set_facecolor("#696eff")
plt.show()
|
import numpy as np
import pandas as pd
import glob
from PIL import Image
import matplotlib.pyplot as plt
train_df_path = "/kaggle/input/vesuvius-challenge-ink-detection/train"
test_df_path = "/kaggle/input/vesuvius-challenge-ink-detection/test"
img_path = train_df_path + "/1/surface_volume"
f_p = glob.glob(img_path + "/*.tif")
f_p[0]
im = np.array(Image.open(f_p[0]))
im = im / im.max()
im = im.astype(np.float32)
ax = plt.imshow(im, cmap="gray")
def plot_image_hist(image):
pixels = image.ravel() # flatten
nonzero_pixels = pixels[
np.nonzero(pixels)
] # taking only non zero pixels coz thats what matter
plt.hist(nonzero_pixels, 100000)
plt.show()
plot_image_hist(im) # to find which shades are the most frequent
# seems most frequent shades are between 0.2 & 0.4, closer to 0.4
flat_im = im.flatten()
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics._classification import classification_report
# custom function to plot
def boxplot_data(X, Y):
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(10, 6))
axs = axs.flatten()
for i, col in enumerate(X.columns):
sns.boxplot(x=X[col], ax=axs[i], y=Y, orient="h")
# Set labels and title
axs[i].set_ylabel("{}".format(col))
axs[i].set_xlabel("Target")
plt.subplots_adjust(wspace=0.5)
data = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
# First look into the data
display(data.head(5))
display(data.info())
display(data.describe())
# Extract X and y
X = data.drop(
["target", "id"], axis=1
) # drop id since it is not a real feature and also split the target to y
y = data["target"]
boxplot_data(X, y)
# KDE plot using seaborn
sns.pairplot(data, hue="target", diag_kind="kde", height=1.5)
# Feature Engineering
# Custom function for feature engineering step
def create_new_features(X):
new_columns = [
"calcium_to_urea_ratio",
"ion_product",
"electrolyte_balance",
"osmolality_to_sg_ratio",
"osmo_density",
]
X_new = pd.DataFrame(columns=new_columns)
X_new[new_columns[0]] = X["calc"] / X["urea"]
X_new[new_columns[1]] = X["calc"] * X["urea"]
X_new[new_columns[2]] = X["cond"] / (-(10 ** X["ph"]))
X_new[new_columns[3]] = X["osmo"] / X["gravity"]
X_new[new_columns[4]] = X["osmo"] * X["gravity"]
X_combined = pd.concat([X, X_new], axis=1)
X_combined = X_combined.assign(Mean=X_combined.mean(axis=1))
X_combined = X_combined.assign(Std=X_combined.std(axis=1))
X_combined = X_combined.assign(Min=X_combined.min(axis=1))
X_combined = X_combined.assign(Max=X_combined.max(axis=1))
return X_combined
# Do some feature engineering
X_combined = create_new_features(X)
# Split training and test data
X_train, X_test, y_train, y_test = train_test_split(
X_combined, y, test_size=0.2, random_state=42
)
# Using RandomForestClassifier, chose the best one via GridSearch
rfc = RandomForestClassifier(random_state=42)
param_grid = {
"n_estimators": [10, 50, 100, 150],
"max_depth": [None, 10, 20],
"min_samples_split": [2, 5, 10, 15, 30],
"min_samples_leaf": [1, 2, 4, 6],
}
grid_search = GridSearchCV(
estimator=rfc, param_grid=param_grid, scoring="roc_auc", cv=5, n_jobs=-1
)
# Fit the GridSearchCV object to the data
grid_search.fit(X_train, y_train)
best_rfc = grid_search.best_estimator_
print("Best parameters found: ", grid_search.best_params_)
print("Best score found: ", grid_search.best_score_)
best_rfc.fit(X_train, y_train)
# Evaluate the model based on test data
y_pred = best_rfc.predict(X_test)
print(classification_report(y_test, y_pred))
# Prepare submission
X_subm_test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
patient_id = X_subm_test["id"]
X_subm_test.drop(["id"], axis=1, inplace=True)
X_combined_test = create_new_features(X_subm_test)
y_subm_pred = best_rfc.predict(X_combined_test)
submit = pd.DataFrame({"id": patient_id, "target": y_subm_pred})
submit.to_csv("result.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import cv2
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
path = "/kaggle/input/aptos2019-blindness-detection/"
df = pd.read_csv(path + "train.csv")
df.head()
df["diagnosis"].hist()
df["diagnosis"].value_counts()
# burada amaç train_images deki resimlerin isimlerini liste halinde bulmak bunu yapmassak resimleri okuyamayız.
files = os.listdir(path + "train_images")
files
img_list = []
from tqdm import tqdm_notebook as tqdm
for i in tqdm(files):
image = cv2.imread(path + "train_images/" + i)
image = cv2.resize(image, (400, 400))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
kopya = image.copy()
kopya = cv2.cvtColor(kopya, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(kopya, (5, 5), 0)
thresh = cv2.threshold(blur, 10, 255, cv2.THRESH_BINARY)[1]
kontur = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
kontur = kontur[0][0]
kontur = kontur[:, 0, :]
x1 = tuple(kontur[kontur[:, 0].argmin()])[0]
y1 = tuple(kontur[kontur[:, 1].argmin()])[1]
x2 = tuple(kontur[kontur[:, 0].argmax()])[0]
y2 = tuple(kontur[kontur[:, 1].argmax()])[1]
x = int(x2 - x1) * 4 // 50
y = int(y2 - y1) * 5 // 50
kopya2 = image.copy()
if x2 - x1 > 100 and y2 - y1 > 100:
kopya2 = kopya2[y1 + y : y2 - y, x1 + x : x2 - x]
kopya2 = cv2.resize(kopya2, (400, 400))
lab = cv2.cvtColor(kopya2, cv2.COLOR_RGB2LAB)
l, a, b = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=5.0, tileGridSize=((8, 8)))
cl = clahe.apply(l)
limg = cv2.merge((cl, a, b))
son = cv2.cvtColor(limg, cv2.COLOR_LAB2RGB)
med_son = cv2.medianBlur(son, 3)
arka_plan = cv2.medianBlur(son, 37)
maske = cv2.addWeighted(med_son, 1, arka_plan, -1, 255)
son_img = cv2.bitwise_and(maske, med_son)
img_list.append(son_img)
plt.imshow(img_list[6])
fig = plt.figure(figsize=(20, 12))
for i in range(12):
img = img_list[i]
fig.add_subplot(3, 4, i + 1)
plt.imshow(img)
plt.tight_layout()
y_train = pd.get_dummies(df["diagnosis"]).values
y_train[1]
y_train_son = np.ones(y_train.shape, dtype="uint8")
for i in range(3, -1, -1):
y_train_son[:, i] = np.logical_or(y_train[:, i], y_train_son[:, i + 1])
x_train = np.array(img_list)
x_train.shape
y_train_son.shape
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from efficientnet.keras import EfficientNetB5
from keras.models import Sequential
from keras import layers
from keras.optimizers import Adam
from keras.callbacks import ReduceLROnPlateau
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train_son, test_size=0.15, random_state=2019, shuffle=True
)
x_train.shape, x_val.shape, y_train.shape, y_val.shape
datagen = ImageDataGenerator(horizontal_flip=True, vertical_flip=True)
data_generator = datagen.flow(x_train, y_train, batch_size=2, seed=2020)
model = Sequential()
model.add(
EfficientNetB5(weights="imagenet", include_top=False, input_shape=(400, 400, 3))
)
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(5, activation="sigmoid"))
model.compile(
loss="binary_crossentropy", optimizer=Adam(lr=0.00005), metrics=["accuracy"]
)
lr = ReduceLROnPlateau(
monitor="val_loss", patience=3, verbose=1, mode="auto", factor=0.25, min_lr=0.000001
)
history = model.fit_generator(
data_generator,
steps_per_epoch=1000,
epochs=1,
validation_data=(x_val, y_val),
callbacks=[lr],
)
|
import pandas as pd
df = pd.read_csv("../input/london-housing-dataset/London Housing Data.csv")
# Explore data
df.head()
# df.columns
df.shape
df.dtypes
df.describe()
df.info()
# Clean Data
df.area.unique()
# Check duplicates
df[df.duplicated()]
# Fill null values in no_of_crimes with 0
df.no_of_crimes.fillna(0, inplace=True)
df.isnull().sum()
# Fill null values in houses_sold by mean
df.houses_sold
df.houses_sold = df.houses_sold.fillna(df.houses_sold.mean())
df.isnull().sum()
# Convert datatypes into suitable one for analysis
df.no_of_crimes = df.no_of_crimes.astype("int64")
df.date = pd.to_datetime(df.date)
df.date
df.dtypes
df["month"] = pd.DatetimeIndex(df["date"]).month
df["year"] = pd.DatetimeIndex(df["date"]).year
df.month
df.columns
df.head(4)
df = df[
["year", "month", "area", "average_price", "code", "houses_sold", "no_of_crimes"]
]
df.columns
df.no_of_crimes
# Show all the records where 'No. of Crimes' is 0. And, how many such records are there?
df[df.no_of_crimes == 0]
len(df[df.no_of_crimes == 0])
df.no_of_crimes.value_counts()
# What is the maximum & minimum 'average_price' per year in england ?
temp = df[df.area == "england"]
temp.groupby("year")["average_price"].max()
temp.groupby("year")["average_price"].min()
# What is the Maximum & Minimum No. of Crimes recorded per area?
df.groupby("area")["no_of_crimes"].max()
# [df.no_of_crimes.max()]
df.groupby("area")["no_of_crimes"].max()
# Show the total count of records of each area, where average price is less than 100000
temp = df[df.average_price < 100000]
temp.area.value_counts()
|
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape((X_train.shape[0], 28, 28, 1)).astype("float32")
X_test = X_test.reshape((X_test.shape[0], 28, 28, 1)).astype("float32")
X_train = X_train / 255
X_test = X_test / 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape=(28, 28, 1), activation="relu"))
model.add(MaxPooling2D())
model.add(Conv2D(15, (3, 3), activation="relu"))
model.add(MaxPooling2D())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dense(50, activation="relu"))
model.add(Dense(num_classes, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)
scores = model.evaluate(X_test, y_test, verbose=0)
print("Large CNN Error: %.2f%%" % (100 - scores[1] * 100))
|
# # **Predicting Cancer Data With Logistic Regression**
# ## 🏆 Accuracy: %96.50
# - **We will apply the Logistic Regression algorithm to a data set with 570 cancer cells and 30 features to determine whether the cancer cells in our data are benign or malignant.**
# - **The cancer data we have contains 2 types of cancer 1. benign cancer (B) and 2. malignant cancer (M).**
# #### 🔗 Github Links: https://github.com/Prometheussx/Cancer_Data_Classification
# ## Content:
# * [**1.** Library and Input File](#1)
# * [**2.** Data Loading and Editing](#2)
# * [**3.** Normalization ](#3)
# * [**4.** Train Test Split](#4)
# * [**5.** Initialize Weights and Bias](#5)
# * [**6.** Sigmoid Function](#6)
# * [**7.** Forward Bacward Propagation](#7)
# * [**8.** Updating(Learning) Parameters](#8)
# * [**9.** Prediction](#9)
# * [**10.** Logistic Regressin Algorithm](#10)
# * [**11.** Model Result](#11)
# ## Computation graph of logistic regression
# **To explain step by step**
# * Parameters are weight and bias.
# * Weights: coefficients of each pixels
# * Bias: intercept
# * z = (w.t)x + b => z equals In an other saying => z = b + px1w1 + px2w2 + ... + px4096*w4096
# * Predict Data = y_head
# * y_head = sigmoid(z)
# * The sigmoid function reduces the obtained prediction data, i.e. z's, to a value between 0 and 1
# 
# # 1. Library and Input File
# Library
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # Graphic Draw
# İnput File
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
#
# # 2.Data Loading and Editing
# Data Loading
data = pd.read_csv("/kaggle/input/cancer-data/Cancer_Data.csv")
print(data.info())
# * ## Why are we editing data ?
# * ### why delete "id" and "unnamed: 32"?
# * **"Untitled: 32" and "id" values are redundant for the data set.**
# * **Can manipulate the model we will train.**
# * **A struct consisting entirely of nulls such as "Unnamed: 32" may create an error margin for us in the future.**
# * ### why do we make "M" and "B" in "diagnosis" 0 and 1 ?
# * **Objects are not used in modeling, so we convert them to numeric type.**
# * **Since we will apply normalization process to all data, it creates a healthier result for us that the data to be classified consists of 1 and 0.**
# Data Editing
data.drop(["Unnamed: 32", "id"], axis=1, inplace=True)
data.diagnosis = [1 if each == "M" else 0 for each in data.diagnosis]
print(data.info())
# Split the data by x and y
y = data.diagnosis.values
x_data = data.drop(["diagnosis"], axis=1)
print("X Data graph")
pd.DataFrame(x_data)
print("Y Data Graph")
pd.DataFrame(y)
#
# # 3. Normalization
# * **The normalization process ensures that very high values or very low values in the data we have are brought between 0 and 1, thus preventing them from creating a margin of error in the prediction model. In this way, we get rid of high values and small values and bring them closer to other numbers.**
# 
x = (x_data - np.min(x_data)) / (
np.max(x_data) - np.min(x_data)
).values # Normalization Formula
pd.DataFrame(x)
#
# # 4 . Train Test Split
# * **%80 Train Data (x_train, y_train)**
#
# * **%20 Test Data (x_test,y_test)**
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
# ## Why did we take the transpose of matrices ?
# * **Since the matrix operations we will apply in the future require "nxm mxn" in the matrix operations we will apply in the future, we have made it available by transposing some matrices**
# Matrkis Transpoz
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
#
# # 5. Initialize Weights and Bias
# * **When determining the values of the weights and bias, we use the general standard of 0.01 for w and 0.0 for b. In order for w to exist for each feature, we print 0.01 in a matrix of feature size by making np.full(dimension,1),0.01)**
# # 6. Sigmoid Function
# * **Ensures that the prediction values are between 0 and 1**;
# * **It gives probabilistic result**
# * **It is derivative so we can use it in gradient descent algorithm.**
# 
def initialize_weights_and_bias(dimension):
w = np.full((dimension, 1), 0.01)
b = 0.0
return w, b
def sigmoid(z):
y_head = 1 / (1 + np.exp(-z))
return y_head
#
# # 7. Forward Bacward Propagation
# * ## Forward
# * **The Forward structure is a forward working version of the logistic regression structure**
# * ## Bacward
# * **Bacward finds the average change in weight and bias with the derivative, called the "slope", which is the slope of the tangents drawn to the differentiable function on the graph of the cost. The closer the cost is to 0, the better the weight or bias is**
#
# 
#
# * ## Loss
# * **Is the margin of error in the data**
#
# 
def forward_bacward_propagation(w, b, x_train, y_train):
# FORWARD
z = np.dot(w.T, x_train) + b
y_head = sigmoid(z)
loss = -y_train * np.log(y_head) - (1 - y_train) * np.log(1 - y_head)
cost = (np.sum(loss)) / x_train.shape[1]
# BACKWARD
derivative_weight = (np.dot(x_train, ((y_head - y_train).T))) / x_train.shape[1]
derivative_bias = np.sum(y_head - y_train) / x_train.shape[1]
gradients = {
"derivative_weight": derivative_weight,
"derivative_bias": derivative_bias,
}
return cost, gradients
#
# # 8. Updating(Learning) Parameters
# * **We update by taking the derivatives of weight and bias in accordance with the cost values, so we can get new results every time.**
def update(
w, b, x_train, y_train, learning_rate, number_of_iteration
): # number of iteration kaç tur denenceğini söyler deneme yanımayla bulunur
cost_list = []
cost_list2 = []
index = []
# updating(Learning) parameters is number_of_iteration times
for i in range(number_of_iteration):
# make forward and backward propagation and find cost and gradients
cost, gradients = forward_bacward_propagation(w, b, x_train, y_train)
cost_list.append(cost)
# lets update
w = w - learning_rate * gradients["derivative_weight"]
b = b - learning_rate * gradients["derivative_bias"]
if i % 10 == 0:
cost_list2.append(cost)
index.append(i)
print("Cost after iteration %i: %f" % (i, cost))
# we update(learn) parameters wights and bias
parameters = {"weight": w, "bias": b}
plt.plot(index, cost_list2)
plt.xticks(index, rotation="vertical")
plt.xlabel("Number of İteration")
plt.ylabel("Cost")
plt.show()
return parameters, gradients, cost_list
#
# # 9. Prediction
# * **Classifies all z-values obtained one by one according to whether they are less than 0.5**
# * **if z is bigger than 0.5 our prediction is sign one (y_head = 1)**
# * **if z is smaller than 0.5 our prediction is sign one (y_head = 0)**
#
def predict(w, b, x_test):
# x_test is a input for forward propagation
z = sigmoid(np.dot(w.T, x_test) + b)
Y_prediction = np.zeros((1, x_test.shape[1]))
# if z is bigger than 0.5 our prediction is sign one (y_head = 1)
# if z is smaller than 0.5 our prediction is sign one (y_head = 0)
for i in range(z.shape[1]):
if z[0, i] <= 0.5:
Y_prediction[0, i] = 0
else:
Y_prediction[0, i] = 1
return Y_prediction
#
# # 10. Logistic Regressin Algorithm
# * **In this study, we have developed a study classification by developing our own model without using any model library.**
# * **Print train/test Errors**
# * **if the truth is 1 and the prediction is 0, the absolute value is 1, 1*100 is 100, 100-100 is 0 and the result is incorrect**
# * **If the result is 0 and 0, then 0-0 is 0, 100*0 is 0, 100-0 is 100 and the result is correct**
def logistic_regression(
x_train, y_train, x_test, y_test, learning_rate, num_iterations
):
# initialize
dimension = x_train.shape[0] # that is 30
w, b = initialize_weights_and_bias(dimension)
# do not change learning rate
parameters, gradients, cost_list = update(
w, b, x_train, y_train, learning_rate, num_iterations
)
y_prediction_test = predict(
parameters["weight"], parameters["bias"], x_test
) # predict
# Print train/test Errors
print(
"test accuracy: {} %".format(
100 - np.mean(np.abs(y_prediction_test - y_test)) * 100
)
)
logistic_regression(
x_train, y_train, x_test, y_test, learning_rate=1, num_iterations=300
)
|
# # State farm distracted driver detection
# Importing modules to use
import os.path as osp
from glob import glob
import random
import time
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
import albumentations as A
from albumentations.pytorch import ToTensorV2
def fix_seed(seed):
# random
random.seed(seed)
# Numpy
np.random.seed(seed)
# Pytorch
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
# Fixed
SEED = 42
fix_seed(SEED)
# Define description for each class
activity_map = {
"c0": "Safe driving",
"c1": "Texting - right",
"c2": "Talking on the phone - right",
"c3": "Texting - left",
"c4": "Talking on the phone - left",
"c5": "Operating the radio",
"c6": "Drinking",
"c7": "Reaching behind",
"c8": "Hair and makeup",
"c9": "Talking to passenger",
}
# Define path
data_dir = "../input/state-farm-distracted-driver-detection"
csv_file_path = osp.join(data_dir, "driver_imgs_list.csv")
df = pd.read_csv(csv_file_path)
# read csv file
df.head(5) # show the first 5lines
# # EDA
by_drivers = df.groupby("subject") # Groupby driver
unique_drivers = by_drivers.groups.keys() # List of driver names
# Number of drivers in dataset
print("unique drivers: ", len(unique_drivers))
# Number of drivers in dataset
print("mean of images: ", round(df.groupby("subject").count()["classname"].mean()))
train_file_num = len(
glob(osp.join(data_dir, "imgs/train/*/*.jpg"))
) # Number of training data
test_file_num = len(glob(osp.join(data_dir, "imgs/test/*.jpg"))) # Number of test data
category_num = len(df["classname"].unique()) # Number of categories
print("train_file_num: ", train_file_num)
print("test_file_num: ", test_file_num)
print("category_num: ", category_num)
# クラスごとのデータ数
px.histogram(
df, x="classname", color="classname", title="Number of images by categories "
)
drivers_id = pd.DataFrame((df["subject"].value_counts()).reset_index())
drivers_id.columns = ["driver_id", "Counts"]
px.histogram(
drivers_id,
x="driver_id",
y="Counts",
color="driver_id",
title="Number of images by subjects ",
)
# Histogram of number of images per driver
# do it yourself
px.histogram(df, x="subject", color="subject", title="Number of images by subjects")
# Draw data for each class
# do it yourself
plt.figure(figsize=(12, 20))
for i, (key, value) in enumerate(activity_map.items()):
image_dir = osp.join(data_dir, "imgs/train", key, "*.jpg")
image_path = glob(image_dir)[0]
image = cv2.imread(image_path)[:, :, (2, 1, 0)]
plt.subplot(5, 2, i + 1)
plt.imshow(image)
plt.title(value)
# # 前処理
# Add file path column
# Implement by yourself
df["file_path"] = df.apply(
lambda x: osp.join(data_dir, "imgs/train", x.classname, x.img), axis=1
)
# Add column converting correct answer label to numbers
# do it yourself
df["class_num"] = df["classname"].map(lambda x: int(x[1]))
df.head(5)
# #Creation of dataset
class DataTransform:
"""Image and annotation preprocessing classes. It behaves differently during training and testing.
Attributes
---------
input_size : int
The size of the resized image.
color_mean: (R,G,B)
Average value for each color channel.
color_std :(R,G,B)
Standard deviation for each color channel.
"""
def __init__(self, input_size, color_mean, color_std):
self.data_transform = {
# Implement train by myself
"train": A.Compose(
[
A.HorizontalFlip(p=0.5),
A.Rotate(-10, 10),
A.Resize(input_size, input_size), # リサイズ(input_size)
A.Normalize(color_mean, color_std), # 色情報の標準化
ToTensorV2(), # テンソル化
]
),
"val": A.Compose(
[
A.Resize(input_size, input_size), # リサイズ(input_size)
A.Normalize(color_mean, color_std), # 色情報の標準化
ToTensorV2(), # テンソル化
]
),
}
def __call__(self, phase, image):
"""
Parameters
----------
phase : 'train' or 'val'
前処理のモードを指定。
"""
transformed = self.data_transform[phase](image=image)
return transformed["image"]
# 使用するモジュールのインポート
import os.path as osp
from glob import glob
import random
import time
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
import albumentations as A
from albumentations.pytorch import ToTensorV2
class Dataset(data.Dataset):
"""
Attributes
----------
df : DataFrame
class_num, file_pathのカラムがあるデータフレーム
phase : 'train' or 'val'
学習か訓練かを設定する。
transform : object
前処理クラスのインスタンス
"""
def __init__(self, df, phase, transform):
self.df = df
self.phase = phase
self.transform = transform
def __len__(self):
"""画像の枚数を返す"""
return len(self.df)
def __getitem__(self, index):
"""前処理をした画像のTensor形式のデータを取得"""
image = self.pull_item(index)
return image, self.df.iloc[index]["class_num"]
def pull_item(self, index):
"""画像のTensor形式のデータを取得する"""
# 自分で実装
# 1. 画像読み込み
image_path = self.df.iloc[index]["file_path"]
image = cv2.imread(image_path)[:, :, (2, 1, 0)]
# 2. 前処理を実施
return self.transform(self.phase, image)
# 動作確認
# (RGB)の色の平均値と標準偏差
color_mean = (0.485, 0.456, 0.406)
color_std = (0.229, 0.224, 0.225)
input_size = 256
# データ分割
df_train, df_val = train_test_split(df, stratify=df["subject"], random_state=SEED)
# 自分で実装
# データセット作成
train_dataset = Dataset(
df_train,
phase="train",
transform=DataTransform(
input_size=input_size, color_mean=color_mean, color_std=color_std
),
)
val_dataset = Dataset(
df_val,
phase="val",
transform=DataTransform(
input_size=input_size, color_mean=color_mean, color_std=color_std
),
)
# 自分で実装
# データの取り出し例
image, label = train_dataset[0]
plt.imshow(image.permute(1, 2, 0))
plt.title(label)
plt.show()
# # DataLoaderの作成
# データローダーの作成
batch_size = 64
# 自分で実装
train_dataloader = data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
# 辞書オブジェクトにまとめる
dataloaders_dict = {"train": train_dataloader, "val": val_dataloader}
# 自分で実装
# 動作の確認
batch_iterator = iter(dataloaders_dict["val"]) # イタレータに変換
images, labels = next(batch_iterator) # 1番目の要素を取り出す
print(images.size()) # torch.Size([8, 3, 256, 256])
print(labels.size()) # torch.Size([8])
# # モデルの作成
from efficientnet_pytorch import EfficientNet
model = EfficientNet.from_pretrained("efficientnet-b0", num_classes=10)
# 自分で実装(練習)
# class Model(nn.Module):
# def __init__(self, num_classes=10):
# super(Model, self).__init__()
# self.net = nn.Sequential(
# nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=2, padding=1),
# nn.BatchNorm2d(num_features=64),
# nn.ReLU(),
# nn.Conv2d(in_channels=64, out_channels=16, kernel_size=3, stride=2, padding=1),
# nn.BatchNorm2d(num_features=16),
# nn.ReLU(),
# nn.Flatten(),
# nn.Linear(in_features=65536, out_features=num_classes)
# )
# def forward(self, x):
# output = self.net(x)
# return output
#
# model = Model(num_classes=10)
# # 学習
# 自分で実装(練習)
# train_dataset = Dataset(df_train.iloc[:1000], phase="train", transform=DataTransform(
# input_size=input_size, color_mean=color_mean, color_std=color_std))
# val_dataset = Dataset(df_val.iloc[:1000], phase="val", transform=DataTransform(
# input_size=input_size, color_mean=color_mean, color_std=color_std))
# train_dataloader = data.DataLoader(
# train_dataset, batch_size=batch_size, shuffle=True)
# val_dataloader = data.DataLoader(
# val_dataset, batch_size=batch_size, shuffle=False)
# # 辞書オブジェクトにまとめる
# dataloaders_dict = {"train": train_dataloader, "val": val_dataloader}
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# optimizer = optim.Adam(model.parameters(), lr=1e-3)
# criterion = nn.CrossEntropyLoss()
# model = model.to(device)
# for epoch in range(5):
# print(f'epoch: {epoch+1}')
# print('-'*50)
# # train
# model.train()
# train_loss = 0
# for images, labels in train_dataloader:
# images = images.to(device)
# labels = labels.to(device)
# optimizer.zero_grad()
# outputs = model(images)
# loss = criterion(outputs, labels)
# loss.backward()
# optimizer.step()
# train_loss += loss.item() / len(images)
# print(f'epoch train loss: {train_loss: .4f}')
# # valid
# model.eval()
# valid_loss = 0
# preds = []
# trues = []
# for images, labels in val_dataloader:
# images = images.to(device)
# labels = labels.to(device)
# with torch.no_grad():
# outputs = model(images)
# loss = criterion(outputs, labels)
# valid_loss = loss.item() / len(images)
# trues += list(labels.cpu().numpy())
# preds += list(outputs.argmax(axis=1).cpu().numpy())
# accuracy = accuracy_score(trues, preds)
# print(f'epoch valid loss: {valid_loss: .4f} accuracy: {accuracy: .4f}\n')
# チェックポイントの保存
def save_checkpoint(model, optimizer, scheduler, epoch, path):
torch.save(
{
"epoch": epoch,
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
},
path,
)
# チェックポイントの読み込み
def load_checkpoint(model, optimizer, scheduler, path):
checkpoint = torch.load(path)
model.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
# モデルを学習させる関数
def train_model(
model,
dataloaders_dict,
criterion,
scheduler,
optimizer,
device,
num_epochs,
save_path,
):
# ネットワークをGPUへ
model.to(device)
best_val_loss = float("inf")
best_preds = None
# epochのループ
for epoch in range(num_epochs):
# 開始時刻を保存
t_epoch_start = time.time()
epoch_train_loss = 0.0 # epochの損失和
epoch_val_loss = 0.0 # epochの損失和
preds = []
trues = []
print("-------------")
print(f"Epoch {epoch+1}/{num_epochs}")
print("-------------")
# epochごとの訓練と検証のループ
for phase in ["train", "val"]:
if phase == "train":
model.train() # モデルを訓練モードに
else:
model.eval() # モデルを検証モードに
print("-------------")
# イテレーターループ
for i, (images, labels) in enumerate(dataloaders_dict[phase]):
# GPUが使えるならGPUにデータを送る
images = images.to(device)
labels = labels.to(device)
# 順伝搬(forward)計算
with torch.set_grad_enabled(phase == "train"):
outputs = model(images)
loss = criterion(outputs, labels)
# 訓練時はバックプロパゲーション
if phase == "train":
loss.backward() # 勾配の計算
optimizer.step()
optimizer.zero_grad() # 勾配の初期化
epoch_train_loss += loss.item() / len(
dataloaders_dict[phase].dataset
)
# 検証時
else:
preds += [outputs.detach().cpu().softmax(dim=1).numpy()]
trues += [labels.detach().cpu()]
epoch_val_loss += loss.item() / len(
dataloaders_dict[phase].dataset
)
# 途中経過を表示
if i % 10 == 0:
print(
f"[{phase}][{i+1}/{len(dataloaders_dict[phase])}] loss: {loss.item()/images.size(0): .4f}"
)
if phase == "train":
scheduler.step() # 最適化schedulerの更新
# epochのphaseごとのlossと正解率
t_epoch_finish = time.time()
print("-------------")
print(
f"epoch {epoch+1} epoch_train_Loss:{epoch_train_loss:.4f} epoch_val_loss:{epoch_val_loss:.4f} time: {t_epoch_finish - t_epoch_start:.4f} sec."
)
print(
f"epoch_val_acc: {accuracy_score(np.concatenate(trues), np.concatenate(preds).argmax(axis=1))}"
)
# validation lossが一番低いエポックのモデルを保存
if best_val_loss > epoch_val_loss:
best_preds = np.concatenate(preds)
best_val_loss = epoch_val_loss
save_checkpoint(model, optimizer, scheduler, epoch, save_path)
print("save model")
return best_val_loss, best_preds
# 1foldの学習を行う関数
def run_one_fold(df_train, df_val, fold, device):
# データセット作成
train_dataset = Dataset(
df_train,
phase="train",
transform=DataTransform(
input_size=args.input_size,
color_mean=args.color_mean,
color_std=args.color_std,
),
)
val_dataset = Dataset(
df_val,
phase="val",
transform=DataTransform(
input_size=args.input_size,
color_mean=args.color_mean,
color_std=args.color_std,
),
)
# データローダーの作成
train_dataloader = data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True
)
val_dataloader = data.DataLoader(
val_dataset, batch_size=args.batch_size, shuffle=False
)
# 辞書オブジェクトにまとめる
dataloaders_dict = {"train": train_dataloader, "val": val_dataloader}
# モデル定義
model = EfficientNet.from_pretrained(args.model_name, num_classes=args.num_classes)
optimizer = optim.Adam(model.parameters(), lr=args.lr) # 最適化手法
criterion = nn.CrossEntropyLoss() # 損失関数
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=args.gamma) # スケジューラ―
save_path = f"{args.model_name}_fold_{fold}.pth"
best_val_loss, best_preds = train_model(
model,
dataloaders_dict,
criterion,
scheduler,
optimizer,
device,
num_epochs=args.epochs,
save_path=save_path,
)
return best_val_loss, best_preds
# kfoldの学習を行う関数
def run_k_fold(df):
# GPUが使えるかを確認
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("使用デバイス:", device)
# 層化K分割交差検証
skf = StratifiedKFold(n_splits=args.folds, shuffle=True, random_state=SEED)
oof = pd.DataFrame(index=df.index)
for fold, (train_index, val_index) in enumerate(skf.split(df, df["subject"])):
print(f"\n\nFOLD: {fold}")
print("-" * 50)
df_train, df_val = df.loc[train_index], df.loc[val_index]
best_val_loss, best_preds = run_one_fold(df_train, df_val, fold, device)
oof.loc[val_index, activity_map.keys()] = best_preds
return oof
# 学習パラメータ
class args:
model_name = "efficientnet-b3"
color_mean = (0.485, 0.456, 0.406)
color_std = (0.229, 0.224, 0.225)
input_size = 256
num_classes = 10
batch_size = 64
epochs = 10
folds = 5
lr = 1e-3
gamma = 0.98
debug = True
train = False
if args.debug:
df_train = df.iloc[:1000]
else:
df_train = df.copy()
if args.train:
oof = run_k_fold(df_train)
accuracy = accuracy_score(df_train["class_num"], oof.values.argmax(axis=1))
print(f"\n\naccuracy: {accuracy}")
# # Testデータの予測
# 自分で実装
# テストデータの推論を行う関数
def inference(model, dataloader, device):
model.to(device)
model.eval()
preds = []
for i, (images, labels) in enumerate(dataloader):
images = images.to(device)
with torch.no_grad():
outputs = model(images)
preds += [outputs.detach().cpu().softmax(dim=1).numpy()]
if i % 10 == 0:
print(f"[test][{i+1}/{len(dataloader)}]")
preds = np.concatenate(preds)
return preds
# k個のモデルに対して推論を行い,アンサンブル
def inference_k_fold(df_test):
test_dataset = Dataset(
df_test,
phase="val",
transform=DataTransform(
input_size=args.input_size,
color_mean=args.color_mean,
color_std=args.color_std,
),
)
test_dataloader = data.DataLoader(
test_dataset, batch_size=args.batch_size, shuffle=False
)
model = EfficientNet.from_pretrained(args.model_name, num_classes=args.num_classes)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
for fold in range(args.folds):
print(f"\n\nFOLD: {fold}")
print("-" * 50)
model.load_state_dict(torch.load(f"{args.model_name}_fold_{fold}.pth")["model"])
df_test.loc[:, activity_map.keys()] += (
inference(model, test_dataloader, device) / args.folds
)
if args.debug:
results = pd.read_csv(
"../input/statefarmdistracteddriverdetectionpretrain/result.csv"
)
results.to_csv("result.csv", index=False)
else:
# テストデータの読み込み
df_test = pd.read_csv(osp.join(data_dir, "sample_submission.csv"))
# 前処理
df_test["file_path"] = df_test.apply(
lambda row: osp.join(data_dir, f"imgs/test/{row.img}"), axis=1
)
df_test["class_num"] = 0
df_test.loc[:, activity_map.keys()] = 0
# k個分の推論結果を平均し,resultsに格納
inference_k_fold(df_test)
results = df_test.drop(["file_path", "class_num"], axis=1)
results.iloc[:, 1:] = results.iloc[:, 1:].clip(0, 1)
results.to_csv("result.csv", index=False)
|
# Competition description
# Twitter has become an important communication channel in times of emergency. The ubiquitousness of smartphones enables people to announce an emergency they’re observing in real-time. Because of this, more agencies are interested in programatically monitoring Twitter (i.e. disaster relief organizations and news agencies).
# But, it’s not always clear whether a person’s words are actually announcing a disaster.
# Acknowledgments
# This dataset was created by the company figure-eight and originally shared on their ‘Data For Everyone’ website here.
# Import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import re
import string
# Load csv files
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Read files
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv")
train
test
submission
# Analyse train
sns.displot(train["target"], kde=True)
train["target"].value_counts()
num_classes = train["target"].nunique()
print(num_classes)
# Split dataset
X = train["text"]
y = train["target"]
X_test = test["text"]
# Import functions from sklearn library
from sklearn.model_selection import train_test_split
# Splitting the data into training and testing sets
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=0.1, random_state=42, shuffle=y
)
X_train.shape, X_val.shape, y_train.shape, y_val.shape, X_test.shape
# Tensorflow
import tensorflow as tf
print(tf.__version__)
import tensorflow_hub as hub
import tensorflow_text as text
from tensorflow import keras
from keras.callbacks import ModelCheckpoint, EarlyStopping
bert_preprocess = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
)
bert_encoder = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4"
)
# initialising bert layers
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name="text")
preprocessed_text = bert_preprocess(text_input)
outputs = bert_encoder(preprocessed_text)
# initialising neural network layers
l = tf.keras.layers.Dropout(0.1, name="dropout")(outputs["pooled_output"])
l = tf.keras.layers.Dense(1, activation="sigmoid", name="output")(l)
# create model
model = tf.keras.Model(inputs=[text_input], outputs=[l])
model.summary()
# compile the model
METRICS = [
tf.keras.metrics.BinaryAccuracy(name="accuracy"),
tf.keras.metrics.Precision(name="precision"),
tf.keras.metrics.Recall(name="recall"),
]
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=METRICS)
keras.utils.plot_model(model, "sentiment_classifier.png")
# fit the model
early_stopping = EarlyStopping(monitor="val_loss", mode="min", patience=25, verbose=1)
mc = ModelCheckpoint(
"best_model.tf", monitor="val_loss", mode="min", save_best_only=True
)
model.fit(
X_train,
y_train,
epochs=15,
validation_data=(X_val, y_val),
verbose=1,
callbacks=[early_stopping, mc],
)
results = model.evaluate(X_val, y_val, batch_size=128)
model.save("tweet_model")
# evaluate the model
predictions = model.predict(X_test)
predictions = predictions.flatten()
predictions
cutoff = (predictions.max() - predictions.min()) / 2
predictions = np.where(predictions > 0.5, 1, 0)
predictions
sns.displot(predictions, kde=True)
count_pred = np.unique(predictions, return_counts=True)
count_pred
# Prepare submission
submission["target"] = predictions
submission.to_csv("submission.csv", index=False) # writing data to a CSV file
submission = pd.read_csv("submission.csv")
submission
|
# # **Lab 5 - Corner detection e SIFT**
# ## Corner Detection
# ### L' obiettivo di questa esercitazione è di implementare un algoritmo di Corner Detection di Harris usando la sua definizione.
# ### 1. Aprire un immagine e valutare le derivate
# ### 2. Calcolare lo score di Harris
# ### 3. Applicare una soglia opportuna
# ### 4. Raffinamento del risultato con Non-Maxima suppresion
# ## SIFT
# ### Nella seconda parte, è richiesta l'implementazione di SIFT per effettuare una keypoints detection, description e matching.
# ## Import libraries
import os
import numpy as np
import matplotlib
import skimage
from scipy import ndimage as ndi
from matplotlib import pyplot as plt
from pathlib import Path
from skimage import io
# ## Apri un immagine
# ### Apri un immagine, convertila in float e normalizzala in un intervallo valido, ad esempio (0, 1).
# ### Se l'immagine è RGB, convertila in scala di grigi.
img_dir = Path("/kaggle/input/immagini-esercitazione-1")
img_name = Path("flamingos.jpg")
img_path = img_dir / img_name
I_original = io.imread(img_path) / 255.0
# RGB2GRAY
I = skimage.color.rgb2gray(I_original)
plt.figure()
plt.imshow(I, cmap="gray")
# ## Ex 5.1 Harris Corner Detector
# ### 1. Calcola la matrice M
# ### 2. Trova i punti la cui finestra restituisce valori più alti di corner response (f > threshold)
# ### 3. Seleziona i punti di massimo locale (ad esempio eseguendo una non-maxima suppression)
# ## Costruisci la M
# ### $M = \sum_{x}
# w(x)
# \begin{bmatrix}
# I_{x}^2 & I_{x}I_{y}\\
# I_{x}I_{y} & I_{y}^2
# \end{bmatrix} $
# ### dove il pedice indica la derivata parziale su riga o colonna. Per evitare di fare la sommatoria, riscriviamo la formula precedente come
# ### $M = M(\sigma_i, \sigma_D) =
# g(\sigma_I) *
# \begin{bmatrix}
# I_{x}^2(\sigma_D) & I_{x}I_{y}(\sigma_D)\\
# I_{x}I_{y}(\sigma_D) & I_{y}^2(\sigma_D)
# \end{bmatrix} $
# ### dove
# ### 1. $\sigma_D$ indica il livello di dispersione introdotto per calcolare le derivate
# ### 2. $\sigma_I$ definisce la finestra di osservazione vera e propria. Variando questo valore è possibile controllare la scala del corner che vado a rilevare
# Calcola le derivate dell'immagine scegliendo un qualsiasi kernel derivativo
# Es. Sobel
sobel_x = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
sobel_y = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
I = skimage.filters.gaussian(I, sigma=3)
Ix = ndi.convolve(I, sobel_x)
Iy = ndi.convolve(I, sobel_y)
Ixx = Ix**2
Iyy = Iy**2
Ixy = Ix * Iy
# ### A questo punto è possibile effettuare un filtraggio con un filtro gaussiano caratterizzato da
# ### $\alpha = \alpha_I$
# #### Consiglio: puoi usare la funzione skimage.filters.gaussian()
# Scegliere un sigma opportuno per il filtro gaussiano
sigma_i = 3
Ixx = skimage.filters.gaussian(Ixx, sigma=sigma_i)
Iyy = skimage.filters.gaussian(Iyy, sigma=sigma_i)
Ixy = skimage.filters.gaussian(Ixy, sigma=sigma_i)
M = np.array([[Ixx, Ixy], [Ixy, Iyy]])
# ### Calcola la R
# ## $R = \det(M) - \alpha \,\text{trace}(M)^{2}$
# Calcolare la matrice degli score di Harris
# scegliere un valore opportuno per alfa (solitamente tra 0.04 e 0.06)
alfa = 0.04
det_M = Ixx * Iyy - Ixy**2
trace_M = Ixx + Iyy
R = det_M - alfa * trace_M**2
# Scegliere una percentuale per il thresholding es. 0.01 che corrisponde all' 1%
threshold_percentage = 0.01
threshold = threshold_percentage * R.max()
# ## Seleziona i punti con R > threshold
# ### Trova i punti il cui score è superiore ad un valore di soglia
# Assegnare un colore per i punti che superano la soglia es. per il rosso [1.0,0,0]
img_keys = np.copy(I_original)
img_keys[R > threshold] = [0, 0, 1.0]
# visualizzare i corner ottenuti
plt.figure()
plt.imshow(img_keys)
# ## Confronta con le built-in functions
# ### Utilizza le funzioni fornite dalla libreria skimage.feature e confronta i risultati
from skimage.feature import corner_harris, corner_subpix, corner_peaks
# Trovare la matrice degli score di Harris
# mediante la built-in function corner_harris
R2 = corner_harris(I, method="k", k=alfa, eps=1e-06, sigma=3)
# Applicare il thresholding
threshold2 = threshold_percentage * R2.max()
img_keys2 = np.copy(I_original)
img_keys2[R2 > threshold2] = [0, 0, 1.0]
plt.figure()
plt.subplot(121)
plt.imshow(img_keys)
plt.title("Harris detector from scratch")
plt.subplot(122)
plt.imshow(img_keys2)
plt.title("built-in Harris Detector")
# ### Non-Maxima Suppression
# Se vogliamo applicare la non-maxima suppression per ottenere dei singoli punti
# per ciascun corner, possiamo utilizzare la funzione corner_peaks sulla matrice
# degli score di Harris
# Calcolare la matrice degli score di Harris
R_h = R
coords = corner_peaks(R_h, min_distance=5, threshold_rel=0.2)
# La funzione corner_peaks fornisce le coordinate dei punti corrispondenti
# al corner
fig, ax = plt.subplots()
ax.imshow(I_original, cmap=plt.cm.gray)
ax.plot(
coords[:, 1], coords[:, 0], color="cyan", marker="o", linestyle="None", markersize=6
)
plt.show()
# # SIFT feature detector and descriptor extractor
# ### Sviluppa un codice che utilizzi lo strumento SIFT fornito dalla libreria skimage.
from skimage import transform
from skimage.feature import match_descriptors, plot_matches, SIFT
# Caricare un'immagine in scala di grigi e normalizzarla Es. blox.jpg
img = io.imread(img_path) / 255.0
gray1 = skimage.color.rgb2gray(img)
# ## Genera immagini trasformate
# ### Prova a creare 2 versioni trasformate dell'immagine di partenza.
# ### Alcuni suggerimenti sono:
# ### 1. Rotazione di 90 / 180 gradi
# ### 2. Traslare l' immagine di un certo offset
# ### 3. Cambiare il rapporto d'aspetto
# ### Consiglio: puoi usare la libreria skimage.transform per effettuare diverse tipologie di trasformazioni
# La funzione AffineTransform permette di creare una matrice di trasformazione
# per ottenere tutti i tipi di trasformazioni affini: scaling, rotazioni e traslazioni.
# Per applicarla bisogna utilizzare la funzione warp.
# N.B. l'argomento scale deve essere una tupla, così come translation, mentre rotation un angolo
# in gradi sessagesimali
tform2 = transform.AffineTransform(scale=1.5, rotation=0, translation=(-300.0, -200.0))
gray2 = transform.warp(gray1, tform2)
tform3 = transform.AffineTransform(scale=0.5, rotation=0, translation=(200.0, 300.0))
gray3 = transform.warp(gray1, tform3)
plt.figure(figsize=(15, 8))
plt.subplot(131)
plt.imshow(gray1, cmap="gray")
plt.subplot(132)
plt.imshow(gray2, cmap="gray")
plt.subplot(133)
plt.imshow(gray3, cmap="gray")
# ## SIFT implementation
# ### Crea un oggetto SIFT
# Per applicare l'algoritmo di SIFT sull'immagine, bisogna instanziare
# un oggetto della classe SIFT
sift = SIFT()
# ## Keypoints and descriptors
# ### Trova i keypoints e i descrittori dell'immagine di partenza.
# Per estrarre e descrivere i keypoints dell'immagine, si può usare il metodo
# detect_and_extract della classe SIFT. I keypoints e i descrittori verranno
# conservati negli attributi keypoints e descriptors.
sift.detect_and_extract(gray1)
keypoints1 = sift.keypoints
descriptors1 = sift.descriptors
# ### Trova i keypoints e i descrittori delle 2 trasformazioni dell'immagine di partenza.
sift.detect_and_extract(gray2)
keypoints2 = sift.keypoints
descriptors2 = sift.descriptors
sift.detect_and_extract(gray3)
keypoints3 = sift.keypoints
descriptors3 = sift.descriptors
# ## Combaciano?
# ### Trova quali keypoints combaciano tra:
# ### 1. L'immagine originale e la prima trasformazione
# ### 2. L'immagine originale e la seconda trasformazione
# ### Consiglio: usa la funzione match_descriptors() importata da skimage.features
# La funzione match_descriptors vuole in ingresso i due descrittori ed un valore di
# max_ratio che è il il rapporto massimo delle distanze tra il primo e secondo
# descrittore più vicini nel secondo set di descrittori. Questa soglia è utile per
# filtrare matches ambigui tra i due descrittori.
# La scelta di questo valore dipende dalle statistiche del descrittore scelto.
# Scegliere opportunamente il max_ratio
max_ratio = 0.5
matches12 = match_descriptors(
descriptors1, descriptors2, max_ratio=max_ratio, cross_check=True
)
matches13 = match_descriptors(
descriptors1, descriptors3, max_ratio=max_ratio, cross_check=True
)
matches12 = matches12[:10]
matches13 = matches13[:10]
# ## Visualizza i matching
# ### A questo punto potrai rappresentare gli accoppiamenti calcolati in precedenza utilizzando la funzione plot_matches() importata da skimage.features
# ### Consiglio: Per questioni di visibilità prova a vedere cosa succede mettendo
# ### only_matches = True
# ### all'interno della funzione.
# N.B. se i matches risultano essere troppi, per una migliore visualizzazione
# mostrarne soltanto una parte
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(11, 8))
plt.gray()
plot_matches(ax[0, 0], gray1, gray2, keypoints1, keypoints2, matches12)
ax[0, 0].axis("off")
ax[0, 0].set_title("Original Image vs. Flipped Image\n" "(all keypoints and matches)")
plot_matches(ax[1, 0], gray1, gray3, keypoints1, keypoints3, matches13)
ax[1, 0].axis("off")
ax[1, 0].set_title(
"Original Image vs. Transformed Image\n" "(all keypoints and matches)"
)
plot_matches(
ax[0, 1], gray1, gray2, keypoints1, keypoints2, matches12, only_matches=True
)
ax[0, 1].axis("off")
ax[0, 1].set_title(
"Original Image vs. Flipped Image\n" "(subset of matches for visibility)"
)
plot_matches(
ax[1, 1], gray1, gray3, keypoints1, keypoints3, matches13, only_matches=True
)
ax[1, 1].axis("off")
ax[1, 1].set_title(
"Original Image vs. Transformed Image\n" "(subset of matches for visibility)"
)
plt.tight_layout()
plt.show()
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import sklearn.metrics as metrics
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.preprocessing import MinMaxScaler
import warnings
warnings.filterwarnings("ignore")
# load data
data_train = pd.read_csv(
"/Users/wangxinjie/Documents/564/564 project/archive1/Training Data.csv"
)
data_test = pd.read_csv(
"/Users/wangxinjie/Documents/564/564 project/archive1/Test Data.csv"
)
data_train.head(5)
# data_test.head(5)
# # Preprocessing
#
# NA cheack
data_train.isnull().sum()
data_train.describe()
data_train.info()
# type
print(data_train.dtypes)
# drop 'Id'
df = data_train.copy()
df = df.drop(columns="Id")
# encode categorical data
en = LabelEncoder()
catCols = [
"Married/Single",
"House_Ownership",
"Car_Ownership",
"Profession",
"CITY",
"STATE",
]
for cols in catCols:
df[cols] = en.fit_transform(df[cols])
df.head(5)
sns.distplot(a=df["Age"])
defaulted = df.Risk_Flag.sum() # len(data.Risk_Flag[data.Risk_Flag == 1])
total = len(df.Risk_Flag)
rate_of_default = defaulted / total
print(
f"The rate of 'defaulted-on-loan' is {rate_of_default * 100 }% and total number of defaulter are",
defaulted,
)
f, ax = plt.subplots(1, 2, figsize=(18, 8))
df.Risk_Flag.value_counts().plot.pie(
explode=[0, 0.1], autopct="%1.f%%", shadow=True, ax=ax[0]
)
ax[0].set_title("Paid vs Defaulted")
ax[0].set_ylabel("")
sns.countplot("Risk_Flag", data=df, ax=ax[1])
ax[1].set_title("Paid vs Defaulted")
plt.show()
y.head()
X.head()
# # Splite data
y = df["Risk_Flag"]
X = df.drop("Risk_Flag", axis=1)
Y = pd.DataFrame(y)
X_train, X_test, y_train, y_test = train_test_split(X, Y, train_size=0.8, test_size=0.2)
# # 1 logistic regression
#
# logistic regression
logreg = LogisticRegression(random_state=42)
# Fit model to training data
logreg.fit(X_train, y_train)
# Predict
log_pred = logreg.predict(X_test)
# Evaluate performance of logistic regression model
print("Logistic Regression:")
print("Accuracy:", accuracy_score(y_test, log_pred))
print("Confusion matrix:\n", confusion_matrix(y_test, log_pred))
# plot
fig, ax = plt.subplots(figsize=(6, 4))
plot_confusion_matrix(logreg, X_test, y_test, ax=ax)
plt.show()
from sklearn.metrics import classification_report
print(classification_report(y_test, log_pred))
def plot_clf_metrics(clf, model_name, X_test, y_true):
# predict probabilities of positive class
y_pred_prob = clf.predict_proba(X_test)[:, 1]
# calculate ROC curve and AUC
fpr, tpr, thresholds = roc_curve(y_true, y_pred_prob)
roc_auc = auc(fpr, tpr)
# calculate precision-recall curve and AUC
precision, recall, _ = precision_recall_curve(y_true, y_pred_prob)
pr_auc = auc(recall, precision)
# plot ROC curve
plt.figure(figsize=(6, 4))
plt.plot(fpr, tpr, label="ROC curve (area = %0.2f)" % roc_auc)
plt.plot([0, 1], [0, 1], "k--")
plt.xlim([-0.02, 1.02])
plt.ylim([-0.02, 1.02])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver Operating Characteristic - " + model_name)
plt.legend(loc="lower right")
plt.show()
# plot precision-recall curve
plt.figure(figsize=(6, 4))
plt.plot(recall, precision, label="Precision-Recall curve (area = %0.2f)" % pr_auc)
plt.xlim([-0.02, 1.02])
plt.ylim([-0.02, 1.02])
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("Precision-Recall Curve - " + model_name)
plt.legend(loc="lower right")
plt.show()
# call modified plotting function for logistic regression model
plot_clf_metrics(logreg, "Logistic Regression", X_test, y_test)
# # 2 KNN
# KNN
knn = KNeighborsClassifier(n_neighbors=5)
# Fit model
knn.fit(X_train, y_train)
# Predict
knn_pred = knn.predict(X_test)
# Evaluate performance of KNN model
print("KNN:")
print("Accuracy:", accuracy_score(y_test, knn_pred))
print("Confusion matrix:\n", confusion_matrix(y_test, knn_pred))
# plot
fig, ax = plt.subplots(figsize=(6, 4))
plot_confusion_matrix(knn, X_test, y_test, ax=ax)
plt.show()
print(classification_report(y_test, knn_pred))
from sklearn.metrics import roc_curve, auc, precision_recall_curve
def plot_clf_metrics(clf, model_name, X_test, y_true):
# predict probabilities of positive class
y_pred_prob = clf.predict_proba(X_test)[:, 1]
# calculate ROC curve and AUC
fpr, tpr, thresholds = roc_curve(y_true, y_pred_prob)
roc_auc = auc(fpr, tpr)
# calculate precision-recall curve and AUC
precision, recall, _ = precision_recall_curve(y_true, y_pred_prob)
pr_auc = auc(recall, precision)
# plot ROC curve
plt.figure(figsize=(6, 4))
plt.plot(fpr, tpr, label="ROC curve (area = %0.2f)" % roc_auc)
plt.plot([0, 1], [0, 1], "k--")
plt.xlim([-0.02, 1.02])
plt.ylim([-0.02, 1.02])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver Operating Characteristic - " + model_name)
plt.legend(loc="lower right")
plt.show()
# plot precision-recall curve
plt.figure(figsize=(6, 4))
plt.plot(recall, precision, label="Precision-Recall curve (area = %0.2f)" % pr_auc)
plt.xlim([-0.02, 1.02])
plt.ylim([-0.02, 1.02])
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("Precision-Recall Curve - " + model_name)
plt.legend(loc="lower right")
plt.show()
# call modified plotting function for KNN model
plot_clf_metrics(knn, "KNN", X_test, y_test)
# # 3 Decision Tree Classifier
dt = DecisionTreeClassifier()
dt.fit(X_train, y_train)
dt_pred = dt.predict(X_test)
# Evaluate performance of Decision Tree model
print("Decision Tree:")
print("Accuracy:", accuracy_score(y_test, dt_pred))
print("Confusion matrix:\n", confusion_matrix(y_test, dt_pred))
# plot
fig, ax = plt.subplots(figsize=(6, 4))
plot_confusion_matrix(dt, X_test, y_test, ax=ax)
plt.show()
# # 4 Random Forest Classifier
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
rf_pred = rf.predict(X_test)
# Evaluate performance of Random Forest model
print("Random Forest:")
print("Accuracy:", accuracy_score(y_test, rf_pred))
print("Confusion matrix:\n", confusion_matrix(y_test, rf_pred))
# plot
fig, ax = plt.subplots(figsize=(6, 4))
plot_confusion_matrix(rf, X_test, y_test, ax=ax)
plt.show()
# # Test Prediction
#
df_test = data_test.copy()
# Apply the same preprocessing steps to test data
df_test = df_test.drop(columns="ID")
en = LabelEncoder()
catCols = [
"Married/Single",
"House_Ownership",
"Car_Ownership",
"Profession",
"CITY",
"STATE",
]
for cols in catCols:
df_test[cols] = en.fit_transform(df_test[cols])
df_test.head(5)
# logistic
# Predict on test data
log_pred = logreg.predict(df_test)
# Print the predictions
print("Predictions for test data 1og:")
print(log_pred)
# Convert test_pred to a DataFrame
log_pred = pd.DataFrame(log_pred, columns=["Risk_Flag"])
log_pred.head(5)
# log_pred.to_csv('Test_Prediction_log.csv',index=False)
# knn
# Predict on test data
knn_pred = knn.predict(df_test)
# Print the predictions
print("Predictions for test data knn:")
print(knn_pred)
# Convert test_pred to a DataFrame
knn_pred = pd.DataFrame(knn_pred, columns=["Risk_Flag"])
knn_pred.head(5)
# knn_pred.to_csv('Test_Prediction_knn.csv',index=False)
|
# # US Tornado Length Top 40
# https://www.noaa.gov/
import os
import cv2
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from matplotlib import animation, rc
rc("animation", html="jshtml")
import folium
from folium import plugins
from folium.features import PolyLine
data = pd.read_csv(
"/kaggle/input/us-tornado-dataset-1950-2021/us_tornado_dataset_1950_2021.csv"
)
display(data[0:3])
print(data.columns.tolist())
print(len(data))
data = data.sort_values("len", ascending=False)
data = data[["date", "slat", "slon", "elat", "elon", "len"]]
data2 = data[0:40].reset_index(drop=True)
display(data2)
data3 = data2[["date", "len"]].copy()
data3["date"] = data3["date"].apply(lambda x: "Date " + x)
fig = px.bar(data3, x="date", y="len", title="Tornado Length Ranknig")
fig.show(renderer="iframe")
# # Historical 40 Long Tornado Start to End
# green marker: start point
# red marker: end point
m = folium.Map(location=[42, -90], tiles="Stamen Terrain", zoom_start=5.0, min_zoom=2.0)
for i in range(30):
folium.Marker(location=data.iloc[i, 1:3], icon=folium.Icon(color="green")).add_to(
m
) # start point
folium.Marker(location=data.iloc[i, 3:5], icon=folium.Icon(color="red")).add_to(
m
) # end point
points = [data.iloc[i, 1:3], data.iloc[i, 3:5]]
line = PolyLine(locations=points, color="black", weight=3).add_to(m)
folium.PolyLine(locations=[points[0], points[1]], color="red", weight=3).add_to(m)
m
# # 207 Tornado outbreaks on '2011-04-27'
# https://www.washingtonpost.com/weather/2021/04/26/tornado-super-outbreak-april-2011/
data4 = data[data["date"] == "2011-04-27"]
data4 = data4.reset_index(drop=True)
data4["index"] = data4.index.tolist()
display(data4)
fig = px.bar(
data4, x="index", y="len", title="207 Tornados' Length ocurred on 2011-04-27"
)
fig.show(renderer="iframe")
m = folium.Map(location=[34, -88], tiles="Stamen Terrain", zoom_start=5.0, min_zoom=2.0)
for i in range(len(data4)):
folium.Marker(location=data4.iloc[i, 1:3], icon=folium.Icon(color="green")).add_to(
m
) # start point
folium.Marker(location=data4.iloc[i, 3:5], icon=folium.Icon(color="red")).add_to(
m
) # end point
points = [data4.iloc[i, 1:3], data4.iloc[i, 3:5]]
line = PolyLine(locations=points, color="black", weight=3).add_to(m)
folium.PolyLine(locations=[points[0], points[1]], color="red", weight=3).add_to(m)
m
|
import pandas as pd
df = pd.read_csv("/kaggle/input/countries-gdp-2012-to-2021/gdp.csv")
df
print(df.describe())
print(df.duplicated().sum())
import numpy as np
# Replace ".." with NaN values
df = df.replace("..", np.nan)
# Drop rows with NaN values in any column
df = df.dropna()
# Check number of rows in dataframe
print(len(df))
df["2012"] = df["2012"].apply(
lambda x: float(x[:-9]) * 1000000000000
if isinstance(x, str) and x.endswith("trillion")
else float(x)
)
df["2013"] = df["2013"].apply(
lambda x: float(x[:-9]) * 1000000000000
if isinstance(x, str) and x.endswith("trillion")
else float(x)
)
df["2014"] = df["2014"].apply(
lambda x: float(x[:-9]) * 1000000000000
if isinstance(x, str) and x.endswith("trillion")
else float(x)
)
df["2015"] = df["2015"].apply(
lambda x: float(x[:-9]) * 1000000000000
if isinstance(x, str) and x.endswith("trillion")
else float(x)
)
df["2016"] = df["2016"].apply(
lambda x: float(x[:-9]) * 1000000000000
if isinstance(x, str) and x.endswith("trillion")
else float(x)
)
df["2017"] = df["2017"].apply(
lambda x: float(x[:-9]) * 1000000000000
if isinstance(x, str) and x.endswith("trillion")
else float(x)
)
df["2018"] = df["2018"].apply(
lambda x: float(x[:-9]) * 1000000000000
if isinstance(x, str) and x.endswith("trillion")
else float(x)
)
df["2019"] = df["2019"].apply(
lambda x: float(x[:-9]) * 1000000000000
if isinstance(x, str) and x.endswith("trillion")
else float(x)
)
df["2020"] = df["2020"].apply(
lambda x: float(x[:-9]) * 1000000000000
if isinstance(x, str) and x.endswith("trillion")
else float(x)
)
df["2021"] = df["2021"].apply(
lambda x: float(x[:-9]) * 1000000000000
if isinstance(x, str) and x.endswith("trillion")
else float(x)
)
df["2012"] = df["2012"].astype(int)
df["2013"] = df["2013"].astype(int)
df["2014"] = df["2014"].astype(int)
df["2015"] = df["2015"].astype(int)
df["2016"] = df["2016"].astype(int)
df["2017"] = df["2017"].astype(int)
df["2018"] = df["2018"].astype(int)
df["2019"] = df["2019"].astype(int)
df["2020"] = df["2020"].astype(int)
df["2021"] = df["2021"].astype(int)
print(df.dtypes)
df
import matplotlib.pyplot as plt
sums = df[
["2012", "2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020", "2021"]
].sum()
plt.figure(figsize=(8, 8))
plt.pie(sums.values, labels=sums.index, autopct="%1.1f%%", startangle=90)
plt.axis("equal")
plt.title("Pie Chart of 10 Columns")
plt.show()
corr_matrix = df[
["2012", "2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020", "2021"]
].corr()
print(corr_matrix)
# df = df.set_index('Country Name ')
df.head()
subset_df = df.loc[["Afghanistan", "Albania"], :]
# Create a pie chart for the specific rows
subset_df.plot(kind="pie", y="2019", legend=False)
# Set the title of the plot
plt.title("Pie Chart of Rows 1 and 2 in 2019")
# Show the plot
plt.show()
# beginner
|
# 
# image from [link](https://www.travelpayouts.com/blog/a-b-and-split-tests/)
# ## AB Testing
# It is one of the most commonly used tests in the field of data science
# * Let A represent a feature or group.
# * Let B represent another feature or group.
# The topic of interest is whether there is a difference between A and B.
# Hypothesis Testing: It is a statistical method used to test a belief or claim. The main purpose in group comparisons in AB tests in hypothesis testing is to try to demonstrate whether possible differences arise by chance or not.
# The Two Sample Proportion Test that we will be interested in this project compares the proportions of two groups.
# Hypothesis:
# * H0: p1 = p2
# * H1: p1 != p2
# Depending on the resulting p-value;
# * If p-value < 0.05, H0 reject
# So, there is a significant difference between the groups in terms of the ratios.
# * If p-value > 0.05, H0 cannot be rejected
# So, there isn't a significant difference between the groups in terms of the ratios.
# ### Data Details:
# Index: Row index
# user id: User ID (unique)
# test group: If "ad" the person saw the advertisement, if "psa" they only saw the public service announcement
# converted: If a person bought the product then True, else is False
# total ads: Amount of ads seen by person
# most ads day: Day that the person saw the biggest amount of ads
# most ads hour: Hour of day that the person saw the biggest amount of ads
#
import numpy as np
import pandas as pd
from statsmodels.stats.proportion import proportions_ztest
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.width", 500)
pd.set_option("display.float_format", lambda x: "%.5f" % x)
df = pd.read_csv("/kaggle/input/marketing-ab-testing/marketing_AB.csv")
# We are trying to understand the data.
def check_df(dataframe, head=7):
print("################### Shape ####################")
print(dataframe.shape)
print("#################### Info #####################")
print(dataframe.info())
print("################### Nunique ###################")
print(dataframe.nunique())
print("##################### NA #####################")
print(dataframe.isnull().sum())
print("################## Quantiles #################")
print(dataframe.describe([0, 0.05, 0.50, 0.95, 0.99, 1]).T)
print("#################### Head ####################")
print(dataframe.head(head))
check_df(df)
# Data Preparation
# We are deleting the variable that does not carry any information.
df.drop("Unnamed: 0", inplace=True, axis=1)
# We convert the true/false values to 1 and 0.
df["converted"] = np.where(df["converted"] == False, 0, 1)
df.head()
# We are looking at the mean purchase values of those who saw the advertisement and those who didn't.
df.groupby("test group")["converted"].mean()
# We are summing the purchase values separately for those who saw the ad and those who didn't see the ad.
# We assign these to new variables.
ad_converted_count = df.loc[df["test group"] == "ad", "converted"].sum()
psa_converted_count = df.loc[df["test group"] == "psa", "converted"].sum()
# We are calculating the p-value to determine the effect of seeing,
# the advertisement on the purchase for those who saw it versus those who didn't see it.
test_stat, pvalue = proportions_ztest(
count=[ad_converted_count, psa_converted_count],
nobs=[
df.loc[df["test group"] == "ad", "converted"].shape[0],
df.loc[df["test group"] == "psa", "converted"].shape[0],
],
)
# count = success count
# nobs = the total number of observations
# Thus, we obtain the ratio.
print("Test Stat = %.4f, p-value = %.4f" % (test_stat, pvalue))
|
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn import metrics
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Bidirectional
from timeit import default_timer as timer
data = pd.read_csv(
"../input/my-moods/daylio_export_2021_03_03.csv", index_col="full_date"
)
mood_data = data["mood"]
mood_data = mood_data.sort_index()
enc = LabelEncoder()
mood_data = enc.fit_transform(mood_data)
mood_data = pd.DataFrame(mood_data)
428 * 0.4
-85 - 171
mood_train = mood_data.iloc[0:-256]
mood_val = mood_data.iloc[171:-85]
mood_test = mood_data.iloc[-85:]
print(mood_train.shape)
print(mood_val.shape)
print(mood_test.shape)
rnn_mood_train = np.array(mood_train)
rnn_mood_train = rnn_mood_train.reshape(mood_train.shape[0], 1)
rnn_mood_val = np.array(mood_val)
rnn_mood_val = rnn_mood_val.reshape(mood_val.shape[0], 1)
Scaler = MinMaxScaler()
rnn_mood_train_scaled = Scaler.fit_transform(rnn_mood_train)
rnn_mood_val_scaled = Scaler.transform(rnn_mood_val)
# Create our data RNN np arrays
RNN_X_train = []
RNN_y_train = []
RNN_X_val = []
RNN_y_val = []
n_past = 60
n_future = 1
# TODO: also train a model that forecasts for 7 days
for i in range(0, len(rnn_mood_train_scaled) - n_past - n_future + 1):
RNN_X_train.append(rnn_mood_train_scaled[i : i + n_past, 0])
RNN_X_val.append(rnn_mood_train_scaled[i : i + n_past, 0])
RNN_y_train.append(rnn_mood_train_scaled[i + n_past : i + n_past + n_future, 0])
RNN_y_val.append(rnn_mood_train_scaled[i + n_past : i + n_past + n_future, 0])
RNN_X_train, RNN_y_train = np.array(RNN_X_train), np.array(RNN_y_train)
RNN_X_val, RNN_y_val = np.array(RNN_X_val), np.array(RNN_y_val)
# Reshape to the Keras 3D tensor input shape
RNN_X_train = np.reshape(RNN_X_train, (RNN_X_train.shape[0], RNN_X_train.shape[1], 1))
RNN_X_val = np.reshape(RNN_X_val, (RNN_X_val.shape[0], RNN_X_val.shape[1], 1))
print(RNN_X_train.shape)
print(RNN_X_val.shape)
# Initialise the RNN
regressor = Sequential()
# Add the first LSTM layer
regressor.add(
Bidirectional(
LSTM(units=1000, return_sequences=True, input_shape=(RNN_X_train.shape[1], 1))
)
)
# regressor.add(Dropout(0.2)) # 20% of the neurons will be ignored, for regularisation
# Add more LSTM layers
regressor.add(LSTM(units=1000, return_sequences=True))
# regressor.add(Dropout(0.2))
#
regressor.add(LSTM(units=1000, return_sequences=True))
# regressor.add(Dropout(0.2))
# Add the last LSTM layer
regressor.add(LSTM(units=1000, return_sequences=False))
# regressor.add(Dropout(0.2))
# Add the output layer
regressor.add(Dense(units=n_future, activation="linear"))
# Compile & Train
start = timer()
regressor.compile(optimizer="adam", loss="mean_squared_error")
RNN_hist = regressor.fit(
RNN_X_train,
RNN_y_train,
epochs=25,
batch_size=32,
validation_data=(RNN_X_val, RNN_y_val),
)
runtime = timer() - start
print("Model Training took {0:0.0f} seconds, with GPU".format(runtime))
graph_data = pd.DataFrame(
[np.array(RNN_hist.history["loss"]), np.array(RNN_hist.history["val_loss"])]
)
graph_data = graph_data.T
graph_data.columns = ["Loss", "Validation Loss"]
sns.lineplot(data=graph_data)
dataset_total = pd.concat((mood_train, mood_test), axis=0)
inputs = dataset_total[
len(dataset_total) - len(mood_test) - n_past - n_future + 1 :
].values
inputs = inputs.reshape(-1, 1)
inputs = Scaler.transform(inputs) # only transfrom to prevent data leakage
RNN_X_test = []
for i in range(
0, len(inputs) - n_past - n_future + 1
): # we have to start our test data at 60
RNN_X_test.append(inputs[i : i + n_past, 0])
RNN_X_test = np.array(RNN_X_test)
RNN_X_test.shape
# Move test set to 3D structure, as expected by the RNN
RNN_X_test = np.reshape(RNN_X_test, (RNN_X_test.shape[0], RNN_X_test.shape[1], 1))
# Predict
RNN_pred = regressor.predict(RNN_X_test)
RNN_pred = Scaler.inverse_transform(RNN_pred)
RNN_pred = pd.DataFrame(RNN_pred)[0]
RNN_pred.index = mood_test.index
RNN_pred_data = pd.merge(
pd.DataFrame(mood_test), RNN_pred, left_index=True, right_index=True
)
RNN_pred_data.columns = ["Actual", "Predicted by RNN"]
plt.figure(figsize=(18, 6))
sns.lineplot(data=RNN_pred_data, palette="Blues")
errors = {
"MAE": metrics.mean_absolute_error(mood_test, RNN_pred),
"RMSE": metrics.mean_squared_error(mood_test, RNN_pred, squared=False),
}
errors = pd.DataFrame(errors, index=["RNN"])
errors = np.round(errors, 3)
errors
|
# format function
a = " we are leaning {}"
c = input("enter topic")
b = a.format(c)
b
s = "{1} is a {0} company"
p = s.format("google", "tech")
p
# q = input("enter company")
# r = input("big or small")
# d = s.format(0 = q,1 = r)
# d
# s
s = "{company_name} is a {company_type} company"
t = input("enter company_name")
q = input("Enter company type")
p = s.format(company_name=t, company_type=q)
# p = s.format(company_name = 'Google', company_type = 'Tech')
p
d = {"apple": 23455, "Google": 454546, "Facebook": 3543646, "Netflix": 435346}
for i in d:
print("{:<10} - {:>10}".format(i, d[i]))
a = "the profile generated by Apple is {:,}"
b = a.format(6346346)
b
# categories of error:
# - syntax error
# - Runtime error : Index, key, module, type,name,zero division error, value
# - Logical error
# **Exception Handling**
try:
a = int(input("enter the first number"))
b = int(input("Enter the second number"))
c = (a + b) / 2
print(c)
except:
print("This is exceptional handling code and values entered is incorrect")
p = int(input("enter the p number"))
q = int(input("enter the q number"))
r = p + q
print("value of r is", r)
# try:
# a = int(input("enter the first number"))
# b = int(input("Enter the second number"))
# c = (a+b)/2
# print(c)
# except Exception as e:
# print("This is exceptional handling code and values entered is incorrect")
# print(e)
#
# p = int(input("enter the p number"))
# q = int(input("enter the q number"))
# r = p+q
# print("value of r is", r)
try:
a = int(input("enter the first number"))
b = int(input("Enter the second number"))
c = a / b
print(c)
except Exception as e:
print("This is exceptional handling code and values entered is incorrect")
print(e)
p = int(input("enter the p number"))
q = int(input("enter the q number"))
r = p + q
print("value of r is", r)
try:
a = int(input("enter the first number"))
b = int(input("Enter the second number"))
c = a / b
print(c)
except ZeroDivisionError:
print("This is exceptional handling, denominator can not be zero")
except NameError:
print("We need to initialize the variable first")
else: # this will run when no error in try block
print("good to go")
finally: # this code always executed
print("this is final block")
print(
"move to next set of code"
) # this will not run if error not handled + error is happening
|
# # What is Feature Selection:
# Feature selection is a technique used in machine learning and data mining to identify the most relevant and informative features (also known as variables or attributes) for a given task or problem. The process involves selecting a subset of the available features that are most important or influential in explaining the target variable, while discarding the rest.
# The main goal of feature selection is to improve the performance of a model by reducing the number of features, which can help to reduce the risk of overfitting, increase the model's interpretability, and reduce the computational cost of training and testing.
# Let's say you have a dataset of customer information for a marketing campaign, and it includes features such as age, gender, income, occupation, education level, marital status, and location.
# However, not all of these features may be relevant or useful in predicting whether a customer will respond positively to the marketing campaign. Feature selection helps to identify the most important features and exclude irrelevant or redundant ones.
# One approach to feature selection is to use statistical methods such as correlation analysis or mutual information to measure the relationship between each feature and the target variable (e.g., whether the customer responds to the campaign).
# For instance, let's say the correlation analysis reveals that age, income, and occupation have the strongest correlations with the target variable, while gender and location have weak correlations. In this case, you may decide to only use age, income, and occupation as features in your machine learning model, while excluding gender and location.
# By selecting the most relevant features, you can improve the accuracy and interpretability of your model, reduce overfitting, and make it more efficient to train and deploy.
# # Data Content:
# The breast-cancer-wisconsin dataset is a well-known benchmark dataset in machine learning that contains information about breast cancer tumors. It was originally collected by Dr. William H. Wolberg at the University of Wisconsin Hospital, Madison, in the early 1990s.
# The dataset contains 569 instances, each representing a tumor sample. There are 30 features, which are numerical measures of the characteristics of the cell nuclei present in the sample, including mean radius, mean texture, mean perimeter, mean area, mean smoothness, mean compactness, mean concavity, mean concave points, mean symmetry, mean fractal dimension, and their standard errors, and worst values.
# The target variable is the diagnosis, which can be either benign (non-cancerous) or malignant (cancerous), indicated by the values 0 and 1, respectively.
# # Read and analyze the data:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_predict
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
f1_score,
roc_auc_score,
confusion_matrix,
ConfusionMatrixDisplay,
)
import statsmodels.api as sm
from sklearn.linear_model import LogisticRegression
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier
import warnings
warnings.filterwarnings("ignore")
path = "/kaggle/input/breast-cancer-wisconsin-data/data.csv"
data = pd.read_csv(path)
data.head()
data.isna().sum()
# We can noticed that we have a column with 569 missing values (as many as the number of instances in the dataset), and an id column which wouldn't really contribute to our classification problem.
#
y = data["diagnosis"]
X = data.drop(["id", "Unnamed: 32", "diagnosis"], axis=1)
y.replace(["M", "B"], [1, 0], inplace=True)
X.info()
X.describe()
# First thing that we notice is that the features value range is not the same, which implies that we might need to do some scaling to our variables otherwise the features with larger values might dominate the model. For that we are going to do Standard scaling, also known as Z-score normalization which is a common technique used in machine learning to transform numerical features so that they have a mean of 0 and a standard deviation of 1.
X = (X - X.mean()) / X.std()
y_count = y.value_counts()
y_count.plot(kind="bar")
# # Data visualization:
# We are going to visualize the features in the data to get more sense of their distibution and the range of their values for that we are going to use violinplot.
# A violin plot is a type of data visualization that is particularly useful for comparing the distribution of data between different groups or categories.
# One of the key benefits of a violin plot is that it combines information about the distribution of the data (similar to a box plot) with information about the density of the data (similar to a kernel density plot). This can help to provide a more complete picture of the data and highlight any differences or similarities between groups.
# Another advantage of a violin plot is that it can display multiple groups or categories side by side, allowing for easy visual comparison. This can be particularly useful when trying to identify patterns or trends in the data.
# We are going to visualize the features ten by ten for clarity.
data = pd.concat([y, X.iloc[:, 0:10]], axis=1)
data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value")
plt.figure(figsize=(15, 10))
sns.violinplot(data=data, x="features", y="value", hue="diagnosis", split=True)
plt.xticks(rotation=90)
plt.show()
# We can notice that for variables such as radius_mean and concavity_mean that the values are easily seperable which could suggest that the feature has some predictive power for classification.
data = pd.concat([y, X.iloc[:, 10:20]], axis=1)
data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value")
plt.figure(figsize=(15, 10))
sns.violinplot(data=data, x="features", y="value", hue="diagnosis", split=True)
plt.xticks(rotation=90)
plt.show()
data = pd.concat([y, X.iloc[:, 20:30]], axis=1)
data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value")
plt.figure(figsize=(15, 10))
sns.violinplot(data=data, x="features", y="value", hue="diagnosis", split=True)
plt.xticks(rotation=90)
plt.show()
# # Feature Selection:
# ## Feature Selection using correlation:
#
# concat the target and the features
data = pd.concat([y, X], axis=1)
# compute the correlation matrix
corr = data.corr()
plt.figure(figsize=(18, 18))
# plot the correlation as a heatmap
sns.heatmap(corr, annot=True, linewidths=0.5, fmt=".1f")
# select the upper bounds of the matrix
upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))
# select the columns that have a correlation bigger than a certain threshhold with any other column
to_drop = [column for column in upper.columns if any(upper[column] > 0.9)]
# drop the columns
X_transformed = X.drop(to_drop, axis=1)
corr.drop(to_drop, inplace=True)
# print the 10 features that are the most correlated with the target
corr["diagnosis"].nlargest(11)
cols = corr["diagnosis"].nlargest(11).index[1:]
# Select the 10 features that are the most correlated with the target
df = X_transformed[cols]
# Split data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(
df, y, test_size=0.2, random_state=42
)
# ### Logistic Regression using statsmodels:
# Statsmodels is a powerful tool that find great success when our focus is statistical modeling and hypothesis testing.
# Statsmodels has a strong emphasis on hypothesis testing and provides a range of statistical tests, such as t-tests, F-tests, and Wald tests, that can be used to test hypotheses about model parameters. This makes it a good choice for settings where hypothesis testing is a primary goal.
# Statsmodels also provides detailed output that is designed to be easily interpretable by researchers and analysts. For example, summary tables for regression models provide information such as the coefficients, standard errors, p-values, and confidence intervals, along with various goodness-of-fit measures.
# All of these make Statsmodels a good chocie when we want our model to be as interpretable as possible and thoroughtly tested.
log_reg = sm.Logit(y_train, X_train).fit()
log_reg.summary()
# The output of summary() provides a detailed summary of the model including the following:
# A table that includes:
# * The name, coefficient, standard error, z-value, and p-value for each variable in the model. The coefficient represents the change in the log odds of the outcome for a one-unit change in the predictor variable, holding all other variables constant. The standard error is the standard deviation of the sampling distribution of the coefficient estimate, and the z-value is the coefficient divided by its standard error. The p-value is the probability of observing a z-value as extreme or more extreme than the observed value, assuming the null hypothesis that the true coefficient is zero.
# * The log-likelihood of the model, which is a measure of how well the model fits the data.
# * The pseudo R-squared measures of the model, which are measures of how well the model explains the variability in the data.
# In addition to that we can access the AIC (Akaike information criterion) and BIC (Bayesian information criterion), which are measures of the quality of the model, lower values imply better fited models.
print(log_reg.aic, log_reg.bic)
# In the context of regression analysis, the Wald test is commonly used to test the significance of individual predictor variables in a linear regression model, logistic regression model, or other types of generalized linear models.
# The Wald test is based on the Wald statistic, which is calculated as the square of the difference between the estimated value of a parameter and the hypothesized value, divided by the estimated variance of the parameter. The resulting test statistic is compared to a chi-squared distribution with degrees of freedom equal to the number of parameters being tested.
# $$W = \frac{(\hat{b} - b)^2}{var(\hat{b})}$$
# In the case of testing the significance of individual predictor variables in a regression model, the null hypothesis typically states that the coefficient (or slope) of the predictor variable is equal to zero. This means that the predictor variable has no effect on the outcome variable, or in other words, the predictor variable is not a significant predictor of the outcome variable.
print(log_reg.wald_test(X_train))
y_pred = log_reg.predict(X_test)
y_pred = np.where(y_pred > 0.5, 1, 0)
print(accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
ConfusionMatrixDisplay(cm).plot()
# ## Feature Selection using SelectKbest method:
# The SelectKBest method is a feature selection technique in machine learning that selects the top k most informative features from a dataset. The SelectKBest method works by first evaluating the statistical relationship between each feature and the target variable using a statistical test, such as chi-squared, f-regression, or mutual information. Then, it selects the k features with the highest scores from the test.
from sklearn.feature_selection import SelectKBest, f_classif
# Since our features are numeric SelectKBest is going to use ANOVA to select best features
reg = SelectKBest(f_classif, k=10).fit(X, y)
# transform the data; drop columns
X_transformed = reg.transform(X)
X_new = X[[val for i, val in enumerate(X.columns) if reg.get_support()[i]]]
X_train, X_test, y_train, y_test = train_test_split(
X_new, y, random_state=42, test_size=0.2
)
log_reg = sm.Logit(y_train, X_train).fit()
y_pred = log_reg.predict(X_test)
y_pred = np.where(y_pred > 0.5, 1, 0)
print(accuracy_score(y_test, y_pred))
# compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
# plot confusion matrix
ConfusionMatrixDisplay(cm).plot()
# ## Feature Selection using recursive feature elimination:
# In RFE, a machine learning model is trained on the full set of features, and the importance of each feature is then ranked based on its contribution to the model's accuracy. The feature with the lowest ranking is then removed from the dataset, and the process is repeated iteratively until a desired number of features is reached or the model's performance deteriorates.
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42, test_size=0.2
)
from sklearn.feature_selection import RFE
rfe = RFE(estimator=LogisticRegression(), n_features_to_select=10, step=1)
rfe = rfe.fit(X_train, y_train)
print("Chosen best 10 feature by rfe:", X_train.columns[rfe.support_])
# ## Feature Selection using Recursive feature elimination with cross validation
# Recursive Feature Elimination with Cross-Validation is an extension of the Recursive Feature Elimination (RFE) algorithm, which adds a cross-validation step to the feature selection process.
# In RFECV, the dataset is first split into training and testing sets, and then the RFE algorithm is applied to the training set to determine the optimal subset of features. However, instead of using a fixed number of features, RFECV uses cross-validation to estimate the optimal number of features to select. This is done by iteratively training the model on subsets of the features and evaluating its performance using cross-validation. The number of features selected is then chosen based on the optimal cross-validation score.
from sklearn.feature_selection import RFECV
rfecv = RFECV(estimator=LogisticRegression(), cv=5, scoring="accuracy", step=1)
rfecv = rfecv.fit(X_train, y_train)
print("Optimal number of features :", rfecv.n_features_)
print("Best features :", X_train.columns[rfecv.support_])
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score of number of selected features")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_.mean(axis=1))
plt.show()
# # Model Selection using cross validation:
# Now that we have selected our features, next step is to try out different models and select the best performing one.
df = X[X.columns[rfecv.support_]]
models = [
LogisticRegression(),
RandomForestClassifier(),
CatBoostClassifier(silent=True),
LGBMClassifier(),
GradientBoostingClassifier(),
AdaBoostClassifier(),
SVC(),
LinearDiscriminantAnalysis(),
]
d = {}
d["Model"] = [
"Logistic Regression",
"RandomForest",
"CatBoost",
"LightGBM",
"GradientBoosting",
"AdaBoost",
"SVM",
"LDA",
]
d["accuracy"] = []
d["precision"] = []
d["recall"] = []
d["f1_score"] = []
d["auc"] = []
for i, model in enumerate(d["Model"]):
y_pred = cross_val_predict(models[i], df, y)
d["accuracy"].append(accuracy_score(y_pred, y))
d["precision"].append(precision_score(y_pred, y))
d["recall"].append(recall_score(y_pred, y))
d["f1_score"].append(f1_score(y_pred, y))
d["auc"].append(roc_auc_score(y_pred, y))
models_summary = pd.DataFrame(d)
models_summary.sort_values(["accuracy"], ascending=False)
|
# # Para Ilustração | Utilizado Originalmente Localmente no Jupyter Notebook
import shutil
import os
import glob
import cv2
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
import mahotas
import numpy as np
import pandas as pd
dir_oversampling = "D:\\DataSets\\Simpsons-Kaggle-jfgm2018\\Datasets\\Conjunto1\\simpsons_enriquecido_manualmente - oversampling"
dir_undersampling = "D:\\DataSets\\Simpsons-Kaggle-jfgm2018\\Datasets\\Conjunto1\\simpsons_enriquecido_manualmente - undersampling"
dir_meansampling = "D:\\DataSets\\Simpsons-Kaggle-jfgm2018\\Datasets\\Conjunto1\\simpsons_enriquecido_manualmente - meansampling"
dir_unbalanced = "D:\\DataSets\\Simpsons-Kaggle-jfgm2018\\Datasets\\Conjunto1\\simpsons_enriquecido_manualmente - unbalanced"
dir_teste = "D:\\DataSets\\Simpsons-Kaggle-jfgm2018\\teste"
def gerar_dataframe_dir(dir):
# diretorios = os.listdir(dir_original) #lista separando apenas os arquivos do caminho.
diretorios = [
filename
for filename in os.listdir(dir)
if os.path.isdir(os.path.join(dir, filename))
]
df = pd.DataFrame()
for i in range(len(diretorios)):
arquivos = os.listdir(
dir + "\\" + diretorios[i]
) # lista separando apenas os arquivos do caminho.
arquivos = [arq for arq in arquivos if arq.lower().endswith(".jpg")]
for x in range(len(arquivos)):
serie = pd.Series([diretorios[i], arquivos[x]])
df = df.append(serie, ignore_index=True)
print("\r", "Anexando:", diretorios[i], "\\", arquivos[x], end="")
df.columns = ["Classe", "Imagem"]
return df
df = gerar_dataframe_dir(dir_meansampling)
classes = df["Classe"].unique()
classes.shape
df.groupby(["Classe"]).size().sort_values(ascending=False)
# df.groupby(['Classe']).size().mean()
df.groupby(["Classe"]).size().sort_values(ascending=False)
##convertendo .png em jpg
def png_to_jpg(dir):
# diretorios = os.listdir(dir_original) #lista separando apenas os arquivos do caminho.
diretorios = [
filename
for filename in os.listdir(dir)
if os.path.isdir(os.path.join(dir, filename))
]
for i in range(len(diretorios)):
arquivos = os.listdir(
dir + "\\" + diretorios[i]
) # lista separando apenas os arquivos do caminho.
arquivos = [arq for arq in arquivos if arq.lower().endswith(".png")]
for x in range(len(arquivos)):
# Load .png image
image = cv2.imread(dir + "\\" + diretorios[i] + "\\" + arquivos[x])
# Save .jpg image
cv2.imwrite(
dir + "\\" + diretorios[i] + "\\png_to_jpg_" + str(x) + ".jpg",
image,
[int(cv2.IMWRITE_JPEG_QUALITY), 100],
)
os.remove(dir + "\\" + diretorios[i] + "\\" + arquivos[x])
dir = "D:\\DataSets\\Simpsons-Kaggle-jfgm2018\\simpsons_enriquecido_manualmente - unbalanced" # pasta origem
png_to_jpg(dir_teste)
png_to_jpg(dir_unbalanced)
png_to_jpg(dir_oversampling)
png_to_jpg(dir_undersampling)
png_to_jpg(dir_meansampling)
##convertendo .bmp em jpg
def bmp_to_jpg(dir):
# diretorios = os.listdir(dir_original) #lista separando apenas os arquivos do caminho.
diretorios = [
filename
for filename in os.listdir(dir)
if os.path.isdir(os.path.join(dir, filename))
]
for i in range(len(diretorios)):
arquivos = os.listdir(
dir + "\\" + diretorios[i]
) # lista separando apenas os arquivos do caminho.
arquivos = [arq for arq in arquivos if arq.lower().endswith(".bmp")]
for x in range(len(arquivos)):
# Load .bmp image
image = cv2.imread(dir + "\\" + diretorios[i] + "\\" + arquivos[x])
# Save .jpg image
cv2.imwrite(
dir + "\\" + diretorios[i] + "\\bmp_to_jpg_" + str(x) + ".jpg",
image,
[int(cv2.IMWRITE_JPEG_QUALITY), 100],
)
os.remove(dir + "\\" + diretorios[i] + "\\" + arquivos[x])
dir = "D:\\DataSets\\Simpsons-Kaggle-jfgm2018\\simpsons_enriquecido_manualmente - unbalanced" # pasta origem
bmp_to_jpg(dir_teste)
bmp_to_jpg(dir_unbalanced)
bmp_to_jpg(dir_oversampling)
bmp_to_jpg(dir_undersampling)
bmp_to_jpg(dir_meansampling)
##convertendo .gif em jpg
import imageio
def gif_to_jpg(dir):
# diretorios = os.listdir(dir_original) #lista separando apenas os arquivos do caminho.
diretorios = [
filename
for filename in os.listdir(dir)
if os.path.isdir(os.path.join(dir, filename))
]
for i in range(len(diretorios)):
arquivos = os.listdir(
dir + "\\" + diretorios[i]
) # lista separando apenas os arquivos do caminho.
arquivos = [arq for arq in arquivos if arq.lower().endswith(".gif")]
for x in range(len(arquivos)):
# Load .gif image
image = imageio.imread(dir + "\\" + diretorios[i] + "\\" + arquivos[x])
# image = cv2.imread(dir + "\\" + diretorios[i] + "\\" + arquivos[x])
# Save .jpg image
cv2.imwrite(
dir + "\\" + diretorios[i] + "\\gif_to_jpg_" + str(x) + ".jpg",
image,
[int(cv2.IMWRITE_JPEG_QUALITY), 100],
)
os.remove(dir + "\\" + diretorios[i] + "\\" + arquivos[x])
dir = "D:\\DataSets\\Simpsons-Kaggle-jfgm2018\\simpsons_enriquecido_manualmente - unbalanced" # pasta origem
gif_to_jpg(dir_teste)
gif_to_jpg(dir_unbalanced)
gif_to_jpg(dir_oversampling)
gif_to_jpg(dir_undersampling)
gif_to_jpg(dir_meansampling)
##convertendo .gif em jpg mesmo se a extensão estiver em .jpg
import imageio
import imghdr
def gif_to_jpg(dir):
# diretorios = os.listdir(dir_original) #lista separando apenas os arquivos do caminho.
diretorios = [
filename
for filename in os.listdir(dir)
if os.path.isdir(os.path.join(dir, filename))
]
for i in range(len(diretorios)):
arquivos = os.listdir(
dir + "\\" + diretorios[i]
) # lista separando apenas os arquivos do caminho.
arquivos = [arq for arq in arquivos if arq.lower().endswith(".jpg")]
for x in range(len(arquivos)):
# Load and test .jpg image
if (imghdr.what(dir + "\\" + diretorios[i] + "\\" + arquivos[x])) == "gif":
image = imageio.imread(dir + "\\" + diretorios[i] + "\\" + arquivos[x])
# image = cv2.imread(dir + "\\" + diretorios[i] + "\\" + arquivos[x])
# Save .jpg image
cv2.imwrite(
dir + "\\" + diretorios[i] + "\\gif_to_jpg_" + str(x) + ".jpg",
image,
[int(cv2.IMWRITE_JPEG_QUALITY), 100],
)
os.remove(dir + "\\" + diretorios[i] + "\\" + arquivos[x])
print(
"O arquivo - "
+ dir
+ "\\"
+ diretorios[i]
+ "\\"
+ arquivos[x]
+ "- era do tipo GIF."
)
dir = "D:\\DataSets\\Simpsons-Kaggle-jfgm2018\\simpsons_enriquecido_manualmente - unbalanced" # pasta origem
# gif_to_jpg(dir_teste)
# gif_to_jpg(dir_unbalanced)
# gif_to_jpg(dir_oversampling)
# gif_to_jpg(dir_undersampling)
gif_to_jpg(dir_meansampling)
##renomeando .jpeg em jpg
import imageio
def jpeg_to_jpg(dir):
# diretorios = os.listdir(dir_original) #lista separando apenas os arquivos do caminho.
diretorios = [
filename
for filename in os.listdir(dir)
if os.path.isdir(os.path.join(dir, filename))
]
for i in range(len(diretorios)):
arquivos = os.listdir(
dir + "\\" + diretorios[i]
) # lista separando apenas os arquivos do caminho.
arquivos = [arq for arq in arquivos if arq.lower().endswith(".jpeg")]
for x in range(len(arquivos)):
antes = dir + "\\" + diretorios[i] + "\\" + arquivos[x]
nome = arquivos[x].split(".")
depois = dir + "\\" + diretorios[i] + "\\" + nome[0] + "_jpeg_to_jpg.jpg"
os.rename(antes, depois)
jpeg_to_jpg(dir_teste)
jpeg_to_jpg(dir_unbalanced)
jpeg_to_jpg(dir_oversampling)
jpeg_to_jpg(dir_undersampling)
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode="nearest",
)
def gerar_amostra_minima(dir, min):
diretorios = [
filename
for filename in os.listdir(dir_original)
if os.path.isdir(os.path.join(dir_original, filename))
]
for i in range(len(diretorios)):
arquivos = os.listdir(
dir_original + "\\" + diretorios[i]
) # lista separando apenas os arquivos do caminho.
arquivos = [arq for arq in arquivos if arq.lower().endswith(".jpg")]
qtd_gerar = min - len(arquivos)
if qtd_gerar < 1:
print(
"A classe ", diretorios[i], "já possui a quantidade mínima de imagens."
)
continue
else:
for x in range(len(arquivos)):
### Gerando novas Imagens a partir da PB
img_caminho = dir + "\\" + diretorios[i] + "\\" + arquivos[x]
print("Gerando imagens a partir de", img_caminho)
img = load_img(img_caminho) # this is a PIL image
ximg = img_to_array(
img
) # this is a Numpy array with shape (3, 150, 150)
ximg = ximg.reshape(
(1,) + ximg.shape
) # this is a Numpy array with shape (1, 3, 150, 150)
# the .flow() command below generates batches of randomly transformed images
# and saves the results to the `preview/` directory
qtd = 0
for batch in datagen.flow(
ximg,
batch_size=1,
save_to_dir=(dir + "\\" + diretorios[i]),
save_prefix="GEN-",
save_format="jpg",
):
qtd += 1
if qtd >= (qtd_gerar / len(arquivos)):
break # otherwise the generator would loop indefinitely
gerar_amostra_minima(dir_train, 2000)
|
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objects as go
import warnings
warnings.filterwarnings("ignore")
from scipy.stats import shapiro
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import FunctionTransformer
from scipy.stats import chi2_contingency
import statsmodels.api as sm
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import (
GridSearchCV,
StratifiedKFold,
cross_val_score,
KFold,
)
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import make_scorer, roc_auc_score
df_ts = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
df_tr = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
# # Background:
# Literature review shows top features for stone ML prediction include saturations of uric acid, calcium phosphate, and urinary ammonium. LR used in other models to predict stone type classification with success. Osmolality found as a predictor with pediatric population -> Lower osmo consistent with "stone forming" children. 2-3000 mOsm/kg H20. For adults, there is some evidence that >501 and >577 mmol/kg is associated with risk of calcium stone formation. pH identified as good predictor but not the best. One study found specific gravity as a "good" predictor using null hypothesis testing. Urea and Calcuim may be good predictors for specific stone type formation prediction, but may help with binary classification as well.
# Abraham A, Kavoussi NL, Sui W, Bejan C, Capra JA, Hsi R. Machine Learning Prediction of Kidney Stone Composition Using Electronic Health Record-Derived Features. J Endourol. 2022 Feb;36(2):243-250. doi: 10.1089/end.2021.0211. PMID: 34314237; PMCID: PMC8861926.
# Kavoussi NL, Floyd C, Abraham A, Sui W, Bejan C, Capra JA, Hsi R. Machine Learning Models to Predict 24 Hour Urinary Abnormalities for Kidney Stone Disease. Urology. 2022 Nov;169:52-57. doi: 10.1016/j.urology.2022.07.008. Epub 2022 Jul 16. PMID: 35853510.
# Moreira DM, Friedlander JI, Hartman C, Elsamra SE, Smith AD, Okeke Z. Using 24-hour urinalysis to predict stone type. J Urol. 2013 Dec;190(6):2106-11. doi: 10.1016/j.juro.2013.05.115. Epub 2013 Jun 11. PMID: 23764079.
# Porowski T, Kirejczyk JK, Mrozek P, Protas P, Kozerska A, Łabieniec Ł, Szymański K, Wasilewska A. Upper metastable limit osmolality of urine as a predictor of kidney stone formation in children. Urolithiasis. 2019 Apr;47(2):155-163. doi: 10.1007/s00240-018-1041-2. Epub 2018 Jan 22. PMID: 29356875; PMCID: PMC6420897.
# Silverio AA, Chung WY, Cheng C, Wang HL, Kung CM, Chen J, Tsai VF. The potential of at-home prediction of the formation of urolithiasis by simple multi-frequency electrical conductivity of the urine and the comparison of its performance with urine ion-related indices, color and specific gravity. Urolithiasis. 2016 Apr;44(2):127-34. doi: 10.1007/s00240-015-0812-2. Epub 2015 Aug 13. PMID: 26271351.
# Torricelli FC, De S, Liu X, Calle J, Gebreselassie S, Monga M. Can 24-hour urine stone risk profiles predict urinary stone composition? J Endourol. 2014 Jun;28(6):735-8. doi: 10.1089/end.2013.0769. Epub 2014 Feb 14. PMID: 24460026.
# # EDA
# 1. Univaraiate Analysis (centrality, dispersion, distribution)
# 2. Bivariate Analysis (correlation, covariance)
# 3. Multivariate Analysis (correlations matrix, covariance matrix)
# ## Univariate Analysis
print(df_ts.info())
# # Test Dataset Info
# ## 276 urine specimens
# * id = unique ID
# * gravity = specific gravity, the density of the urine relative to water
# * ph = fixed int, pH, the negative logarithm of the hydrogen ion
# * osmo = osmolarity (mOsm), a unit used in biology and medicine but not in physical chemistry. Osmolarity is proportional to the concentration of molecules in solution
# * cond = conductivity (mMho milliMho). One Mho is one reciprocal Ohm. Conductivity is proportional to the concentration of charged ions in solution
# * urea = urea concentration per millimoles per liter
# * calc = calcium concentration in millimoles liter
# ## Null
# * No null values
print(df_tr.info())
# # Train Dataset Info
# ## 414 urine specimens
# * id = unique ID
# * gravity = specific gravity, the density of the urine relative to water
# * ph = fixed int, pH, the negative logarithm of the hydrogen ion
# * osmo = osmolarity (mOsm), a unit used in biology and medicine but not in physical chemistry. Osmolarity is proportional to the concentration of molecules in solution
# * cond = conductivity (mMho milliMho). One Mho is one reciprocal Ohm. Conductivity is proportional to the concentration of charged ions in solution
# * urea = urea concentration per millimoles per liter
# * calc = calcium concentration in millimoles liter
# * target = binary, 0 no kidney stone, 1 kidney stone
# ## Null
# * No null values
# List features and target variables.
features = ["gravity", "ph", "osmo", "cond", "urea", "calc"]
target = ["target"]
# ## Centrality and Dispersion
df_tr[features].describe().T.style.bar(subset=["mean", "std", "min", "max"])
df_ts[features].describe().T.style.bar(subset=["mean", "std", "min", "max"])
# ## Shape of Distribution
plt.style.use("dark_background")
fig, ax = plt.subplots(2, 3, figsize=(20, 10))
for i, feature in enumerate(features):
sns.distplot(df_tr[feature], ax=ax[i // 3, i % 3])
plt.show()
# **Distribution Analysis**
# - gravity has a narrow dispersion, somewhat bimodal, not normal
# - ph has larger dispersion within ph scale, peaked distribution at 5.5, right tail
# - osmo wide dispersion, nearly bimodal, large std
# - cond wide dispersion, peak at ~25, left tail
# - urea wide dispersion, bimodal in presentation, right tail
# - calc skewed towards ~1, right tail, large std
# Are the feature distributions normal? Graphical representation does not appear normally distributed.
# Therefore, test for normality with Shapiro-Wilk test.
for feature in features:
stat, p = shapiro(df_tr[feature])
print("Statistics=%.3f, p=%.3f" % (stat, p))
if p > 0.05:
print("Training Sample looks Gaussian (fail to reject H0)")
else:
print("Training Sample does not look Gaussian (reject H0)")
print("")
for feature in features:
stat, p = shapiro(df_ts[feature])
print("Statistics=%.3f, p=%.3f" % (stat, p))
if p > 0.05:
print("Test Sample looks Gaussian (fail to reject H0)")
else:
print("Test Sample does not look Gaussian (reject H0)")
print("")
# # Univariate Analysis Complete
# * All features will need to be normalized
# # Bivariate Analysis and Multivariate Anaylsis
# 1. Correlation
# 2. Covariance
display(df_tr[features].corr().style.background_gradient(cmap="coolwarm"))
display(df_tr[features].cov().style.background_gradient(cmap="coolwarm"))
# Perform multivariate analysis on correlation matrix of features in train set
corr = df_tr[features].corr()
mask = np.triu(np.ones_like(corr, dtype=bool))
f, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(230, 20, as_cmap=True)
sns.heatmap(
corr,
mask=mask,
cmap=cmap,
vmax=1,
center=0,
square=True,
linewidths=0.5,
cbar_kws={"shrink": 0.5},
)
cov = df_tr[features].cov()
mask = np.triu(np.ones_like(cov, dtype=bool))
f, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(230, 20, as_cmap=True)
sns.heatmap(
cov,
mask=mask,
cmap=cmap,
vmax=0,
center=0,
square=True,
linewidths=0.5,
cbar_kws={"shrink": 0.5},
)
# # EDA Results
# - Train and test data contain similar values, adequate for training and testing with ML methods
# - Data will need to be normalized prior to predictive modeling for some methods
# - High correlation (with all features) and covariance (with pH) within osmo variable. Consider isolating osmo as univariate predictor or removing from feature set and test for predictive change.
# - Due to lit review revealing osmo as a good predictor, this warrants including in the data set.
# - The impact to the models may be irrelevant for prediction purposes but warrants consideration
# # Analysis of target variable
df_tr[target].value_counts()
fig, ax = plt.subplots(2, 3, figsize=(20, 10))
for i, feature in enumerate(features):
sns.boxplot(x=df_tr["target"], y=df_tr[feature], ax=ax[i // 3, i % 3])
plt.show()
df_tr[features + ["target"]].corr().style.background_gradient(cmap="coolwarm")
# # Chi-Square Analyis for model inclusion
signif_level_1 = 0.05
signif_level_2 = 0.20
result_table = []
for target_col in ["target"]:
for columns in features:
crosstab = pd.crosstab(df_tr[columns], df_tr["target"])
chi2, p, dof, ex = chi2_contingency(crosstab)
if p <= signif_level_1:
result_table.append([columns, target_col, chi2, p, dof])
elif p <= signif_level_2:
result_table.append([columns, target_col, chi2, p, dof])
if not result_table:
print("No p-value less than or equal to 0.20 in the data")
else:
result_table = pd.DataFrame(
result_table,
columns=[
"Column Name",
"Target",
"Chi-Square",
"P-Value",
"Degrees of Freedom",
],
)
display(result_table)
# # Correlation and Chi-Square Analysis
# - It appears that calc has higher correlation than other variables on the target
#
# create X and y variables
X_train = df_tr[["gravity", "osmo", "cond", "urea", "calc"]]
y_train = df_tr["target"]
X_test = df_ts.drop(columns=["id"])
X_train = sm.add_constant(X_train)
model = sm.OLS(y_train, X_train).fit()
predictions = model.predict(X_train)
model.summary()
# # LR model summary
# - Calc is the only significant predictor in LR model. investigate further with RFE
#
linear_model = LinearRegression()
kf = KFold(n_splits=10, shuffle=True)
rfe_cv = RFECV(estimator=linear_model, step=1, cv=kf, scoring="neg_mean_squared_error")
rfe_cv.fit(X_train, y_train)
# Print the optimal number of features
print("Optimal number of features: ", rfe_cv.n_features_)
# Print the selected features
selected_features = [
feature for feature, support in zip(X_train.columns, rfe_cv.support_) if support
]
print("Selected features: ", selected_features)
X_train_selected = X_train[["gravity", "calc"]]
X_test_selected = X_test[["gravity", "calc"]]
# classifiers = [
# ('Logistic Regression', LogisticRegression(random_state=42), {'C': [0.001, 0.01, 0.1, 1, 10]}),
# ('SVM', SVC(kernel='rbf', probability=True, random_state=42), {'C': [0.1, 1, 10], 'gamma': [0.1, 1, 10]}),
# ('Random Forest', RandomForestClassifier(random_state=42), {'n_estimators': [100, 200], 'max_depth': [3, 5, 7]})
# ]
# fold_range = range(10, 21)
# results = pd.DataFrame(columns=['Classifier', 'Fold', 'Best Params', 'Mean AUC'])
# for name, classifier, param_grid in classifiers:
# for fold in fold_range:
# print(f'Testing {name} with {fold} folds...')
# skf = StratifiedKFold(n_splits=fold, shuffle=True, random_state=42)
# grid_search = GridSearchCV(classifier, param_grid, scoring='roc_auc', cv=skf, n_jobs=-1)
# grid_search.fit(X_train, y_train)
# results = results.append({
# 'Classifier': name,
# 'Fold': fold,
# 'Best Params': grid_search.best_params_,
# 'Mean AUC': grid_search.best_score_
# }, ignore_index=True)
# results.sort_values(by='Mean AUC', ascending=False, inplace=True)
# print(results)
best_classifier = RandomForestClassifier(max_depth=5, n_estimators=100, random_state=42)
best_classifier.fit(X_train, y_train)
y_pred_proba = best_classifier.predict_proba(X_test)[:, 1]
df_ts["target"] = y_pred_proba
result = df_ts[["id", "target"]]
result
result.to_csv("submission.csv", index=False)
|
import sys
sys.path.append("../input/sentence-transformers")
import transformers
import sentence_transformers
print("Transformers:", transformers.__version__)
print("Sentence Transformers Version", sentence_transformers.__version__)
import warnings
from transformers import logging
warnings.filterwarnings("ignore")
logging.set_verbosity_error()
import random, os
import numpy as np
import torch
def seed_everything(seed: int):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
seed_everything(42)
import datasets
import pandas as pd
from ast import literal_eval
datasets.disable_caching()
data_dir = "/kaggle/input/vispamdataset-v2/preprocessed/"
def load_data(data_dir):
train_df = pd.read_csv(
data_dir + "train.csv", converters={"categories": literal_eval}
)
dev_df = pd.read_csv(data_dir + "dev.csv", converters={"categories": literal_eval})
test_df = pd.read_csv(
data_dir + "test.csv", converters={"categories": literal_eval}
)
train_dataset = datasets.Dataset.from_dict(train_df)
dev_dataset = datasets.Dataset.from_dict(dev_df)
test_dataset = datasets.Dataset.from_dict(test_df)
dataset_dict = datasets.DatasetDict(
{"train": train_dataset, "dev": dev_dataset, "test": test_dataset}
)
return dataset_dict
vispam_datasets = load_data(data_dir)
# # Task 1
import os
import sys
import math
from torch.utils.data import DataLoader
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
model_name = "NlpHUST/vibert4news-base-cased"
model_save_path = f'output/training_nli_{model_name.replace("/", "-")}-task-1'
word_embedding_model = models.Transformer(model_name)
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
train_samples = []
for comment, description, label in zip(
vispam_datasets["train"]["clean_comment"],
vispam_datasets["train"]["clean_description"],
vispam_datasets["train"]["label"],
):
train_samples.append(InputExample(texts=[comment, description], label=label))
train_batch_size = 8
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=2,
)
dev_samples = []
for comment, description, label in zip(
vispam_datasets["dev"]["clean_comment"],
vispam_datasets["dev"]["clean_description"],
vispam_datasets["dev"]["label"],
):
dev_samples.append(InputExample(texts=[comment, description], label=label))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
dev_samples, batch_size=train_batch_size, name="vispam-dev-task-1"
)
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1)
print(f"[Task 1] - Start training with {num_epochs} epochs...")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
test_samples = []
for comment, description, label in zip(
vispam_datasets["test"]["clean_comment"],
vispam_datasets["test"]["clean_description"],
vispam_datasets["test"]["label"],
):
test_samples.append(InputExample(texts=[comment, description], label=label))
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
test_samples, batch_size=train_batch_size, name="vispam-test-task-1"
)
test_evaluator(model, output_path=model_save_path)
# # Task 2
import gc
import torch
torch.cuda.empty_cache()
gc.collect()
import os
import sys
import math
from torch.utils.data import DataLoader
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
model_name = "NlpHUST/vibert4news-base-cased"
model_save_path = f'output/training_nli_{model_name.replace("/", "-")}-task-2'
word_embedding_model = models.Transformer(model_name)
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
train_samples = []
for comment, description, label in zip(
vispam_datasets["train"]["clean_comment"],
vispam_datasets["train"]["clean_description"],
vispam_datasets["train"]["spam_label"],
):
train_samples.append(InputExample(texts=[comment, description], label=label))
train_batch_size = 8
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=4,
)
dev_samples = []
for comment, description, label in zip(
vispam_datasets["dev"]["clean_comment"],
vispam_datasets["dev"]["clean_description"],
vispam_datasets["dev"]["spam_label"],
):
dev_samples.append(InputExample(texts=[comment, description], label=label))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
dev_samples, batch_size=train_batch_size, name="vispam-dev-task-2"
)
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1)
print(f"[Task 2] - Start training with {num_epochs} epochs...")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
test_samples = []
for comment, description, label in zip(
vispam_datasets["test"]["clean_comment"],
vispam_datasets["test"]["clean_description"],
vispam_datasets["test"]["spam_label"],
):
test_samples.append(InputExample(texts=[comment, description], label=label))
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
test_samples, batch_size=train_batch_size, name="vispam-test-task-2"
)
test_evaluator(model, output_path=model_save_path)
|
# # Import Packages
# Data Handling
import numpy as np
import pandas as pd
# Model Selection
from sklearn.model_selection import train_test_split
# Preprocessing
## Missing Values
from sklearn.impute import SimpleImputer
## Feature Scaling
from sklearn.preprocessing import StandardScaler
## Categorical Encoding
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
# Peformance - Classification
from sklearn.metrics import classification_report, cohen_kappa_score
# Export
# # Import Data
train = pd.read_csv("/kaggle/input/DontGetKicked/training.csv")
test = pd.read_csv("/kaggle/input/DontGetKicked/test.csv")
# # Check
train.info()
test.info()
# # Numerical and Categorical Feature Names
numerical_features = (
train.drop(["RefId", "IsBadBuy"], axis=1)
.select_dtypes(include="number")
.columns.tolist()
)
categorical_features = (
train.drop(["RefId", "IsBadBuy"], axis=1)
.select_dtypes(exclude="number")
.columns.tolist()
)
numerical_features
categorical_features
# # Partition Data
y = train["IsBadBuy"]
X = train.drop(["RefId", "IsBadBuy"], axis=1)
# train test split
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=0.2, random_state=42, stratify=y
)
|
# # 1. Introduction
# Name: Tomasz Abels and Jack Chen
# Username: JackChenXJ
# Score:
# Leaderbord rank:
# # 2. Data
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import os
print(os.listdir("../input/LANL-Earthquake-Prediction"))
train = pd.read_csv(
"../input/LANL-Earthquake-Prediction/train.csv",
dtype={"acoustic_data": np.int16, "time_to_failure": np.float64},
)
train_acoustic_data_small = train["acoustic_data"].values[:200000000:100]
train_time_to_failure_small = train["time_to_failure"].values[:200000000:100]
print(train_acoustic_data_small.shape)
print(train_time_to_failure_small.shape)
fig, ax1 = plt.subplots(figsize=(16, 8))
plt.title("Trends of acoustic_data and time_to_failure. 2% of data (sampled)")
plt.plot(train_acoustic_data_small, color="b")
ax1.set_ylabel("acoustic_data", color="b")
plt.legend(["acoustic_data"])
ax2 = ax1.twinx()
plt.plot(train_time_to_failure_small, color="g")
ax2.set_ylabel("time_to_failure", color="g")
plt.legend(["time_to_failure"], loc=(0.875, 0.9))
# ### 2.1 Dataset
# In this section, we load and explore the dataset.
# ### 2.1.1 Train-test split
# In the above below, we split the train data into a test and a train set. Set a value for the `test_size` yourself. Argue why the test value can not be too small or too large. You can also use k-fold cross validation.
# Secondly, we have set the `random_state` to 102. Can you think of a reason why we set a `random_state` at all?
# ### 2.2 Data Exploration
# Explore the features and target variables of the dataset. Think about making some scatter plots, box plots, histograms or printing the data, but feel free to choose any method that suits you.
# What do you think is the right performance
# metric to use for this dataset? Clearly explain which performance metric you
# choose and why.
# Algorithmic bias can be a real problem in Machine Learning. So based on this,
# should we use the Race and the Sex features in our machine learning algorithm? Explain what you believe.
# ### 2.3 Data Preparation
# This dataset hasn’t been cleaned yet. Meaning that some attributes (features) are in numerical format and some are in categorial format. Moreover, there are missing values as well. However, all Scikit-learn’s implementations of these algorithms expect numerical features. Check for all features if they are in categorial and use a method to transform them to numerical values. For the numerical data, handle the missing data and normalize the data.
# Note that you are only allowed to use training data for preprocessing but you then need to perform similar changes on test data too.
# You can use [pipelining](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html) to help with the preprocessing.
# ## 3. Training and Results
# Briefly introduce the classification algorithms you choose.
# Present your final confusion matrices (2 by 2) and balanced accuracies for both test and training data for all classifiers. Analyse the performance on test and training in terms of bias and variance. Give one advantage and one drawback of the method you use.
# ## 4. Discussion and Conclusion
# Discuss all the choices you made during the process and your final conclusions. Highlight the strong points of your approach, discuss its shortcomings and suggest some future approaches that may improve it. Please be self critical here. The assignment is not about achieving a state of the art performance, but about showing what you have learned the concepts during the course.
# my_submission = pd.DataFrame({'Id': test.Id, 'SalePrice': predicted_data})
# you could use any filename. We choose submission here
# my_submission.to_csv('submission.csv', index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.io
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
mats15s1 = scipy.io.loadmat("../input/bciaut-p300/data/SBJ15/S01/Train/trainData.mat")
mats15s1.items()
mat2s15s1 = scipy.io.loadmat("../input/bciaut-p300/data/SBJ15/S01/Test/testData.mat")
mat2s15s1.items()
X_trains15s1 = mats15s1["trainData"].transpose((2, 1, 0))
X_tests15s1 = mat2s15s1["testData"].transpose((2, 1, 0))
Y_trains15s1 = np.loadtxt(
"../input/bciaut-p300/data/SBJ15/S01/Train/trainTargets.txt", dtype=int
)
Y_tests15s1 = np.loadtxt(
"../input/bciaut-p300/data/SBJ15/S01/Test/testTargets.txt", dtype=int
)
##print(File_data)
##X_train.shape
X_trains15s1 = X_trains15s1[:, 50:190, 0:8]
Y_trains15s1 = Y_trains15s1[0:1600]
X_tests15s1 = X_tests15s1[:, 50:190, 0:8]
# Y_train = Y_train[0:1600]
print(X_trains15s1.shape)
print(Y_trains15s1.shape)
print(X_tests15s1.shape)
print(Y_tests15s1.shape)
Y_trains15s1
Y_trains15s1
mats15s2 = scipy.io.loadmat("../input/bciaut-p300/data/SBJ15/S02/Train/trainData.mat")
mats15s2.items()
mat2s15s2 = scipy.io.loadmat("../input/bciaut-p300/data/SBJ15/S02/Test/testData.mat")
mat2s15s2.items()
X_trains15s2 = mats15s2["trainData"].transpose((2, 1, 0))
X_tests15s2 = mat2s15s2["testData"].transpose((2, 1, 0))
Y_trains15s2 = np.loadtxt(
"../input/bciaut-p300/data/SBJ15/S02/Train/trainTargets.txt", dtype=int
)
Y_tests15s2 = np.loadtxt(
"../input/bciaut-p300/data/SBJ15/S02/Test/testTargets.txt", dtype=int
)
##print(File_data)
##X_train.shape
X_trains15s2 = X_trains15s2[:, 50:190, 0:8]
Y_trains15s2 = Y_trains15s2[0:1600]
X_tests15s2 = X_tests15s2[:, 50:190, 0:8]
# Y_train = Y_train[0:1600]
print(X_trains15s2.shape)
print(Y_trains15s2.shape)
print(X_tests15s2.shape)
print(Y_tests15s2.shape)
mats15s3 = scipy.io.loadmat("../input/bciaut-p300/data/SBJ15/S03/Train/trainData.mat")
mats15s3.items()
mat2s15s3 = scipy.io.loadmat("../input/bciaut-p300/data/SBJ15/S03/Test/testData.mat")
mat2s15s3.items()
X_trains15s3 = mats15s3["trainData"].transpose((2, 1, 0))
X_tests15s3 = mat2s15s3["testData"].transpose((2, 1, 0))
Y_trains15s3 = np.loadtxt(
"../input/bciaut-p300/data/SBJ15/S03/Train/trainTargets.txt", dtype=int
)
Y_tests15s3 = np.loadtxt(
"../input/bciaut-p300/data/SBJ15/S03/Test/testTargets.txt", dtype=int
)
##print(File_data)
##X_train.shape
X_trains15s3 = X_trains15s3[:, 50:190, 0:8]
Y_trains15s3 = Y_trains15s3[0:1600]
X_tests15s3 = X_tests15s3[:, 50:190, 0:8]
# Y_train = Y_train[0:1600]
print(X_trains15s3.shape)
print(Y_trains15s3.shape)
print(X_tests15s3.shape)
print(Y_tests15s3.shape)
mats15s4 = scipy.io.loadmat("../input/bciaut-p300/data/SBJ15/S04/Train/trainData.mat")
mats15s4.items()
mat2s15s4 = scipy.io.loadmat("../input/bciaut-p300/data/SBJ15/S04/Test/testData.mat")
mat2s15s4.items()
X_trains15s4 = mats15s4["trainData"].transpose((2, 1, 0))
X_tests15s4 = mat2s15s4["testData"].transpose((2, 1, 0))
Y_trains15s4 = np.loadtxt(
"../input/bciaut-p300/data/SBJ15/S04/Train/trainTargets.txt", dtype=int
)
Y_tests15s4 = np.loadtxt(
"../input/bciaut-p300/data/SBJ15/S04/Test/testTargets.txt", dtype=int
)
##print(File_data)
##X_train.shape
X_trains15s4 = X_trains15s4[:, 50:190, 0:8]
Y_trains15s4 = Y_trains15s4[0:1600]
X_tests15s4 = X_tests15s4[:, 50:190, 0:8]
# Y_train = Y_train[0:1600]
print(X_trains15s4.shape)
print(Y_trains15s4.shape)
print(X_tests15s4.shape)
print(Y_tests15s4.shape)
mats15s5 = scipy.io.loadmat("../input/bciaut-p300/data/SBJ15/S05/Train/trainData.mat")
mats15s5.items()
mat2s15s5 = scipy.io.loadmat("../input/bciaut-p300/data/SBJ15/S05/Test/testData.mat")
mat2s15s5.items()
X_trains15s5 = mats15s5["trainData"].transpose((2, 1, 0))
X_tests15s5 = mat2s15s5["testData"].transpose((2, 1, 0))
Y_trains15s5 = np.loadtxt(
"../input/bciaut-p300/data/SBJ15/S05/Train/trainTargets.txt", dtype=int
)
Y_tests15s5 = np.loadtxt(
"../input/bciaut-p300/data/SBJ15/S05/Test/testTargets.txt", dtype=int
)
##print(File_data)
##X_train.shape
X_trains15s5 = X_trains15s5[:, 50:190, 0:8]
Y_trains15s5 = Y_trains15s5[0:1600]
X_tests15s5 = X_tests15s5[:, 50:190, 0:8]
# Y_train = Y_train[0:1600]
print(X_trains15s5.shape)
print(Y_trains15s5.shape)
print(X_tests15s5.shape)
print(Y_tests15s5.shape)
mats15s6 = scipy.io.loadmat("../input/bciaut-p300/data/SBJ15/S06/Train/trainData.mat")
mats15s6.items()
mat2s15s6 = scipy.io.loadmat("../input/bciaut-p300/data/SBJ15/S06/Test/testData.mat")
mat2s15s6.items()
X_trains15s6 = mats15s6["trainData"].transpose((2, 1, 0))
X_tests15s6 = mat2s15s6["testData"].transpose((2, 1, 0))
Y_trains15s6 = np.loadtxt(
"../input/bciaut-p300/data/SBJ15/S06/Train/trainTargets.txt", dtype=int
)
Y_tests15s6 = np.loadtxt(
"../input/bciaut-p300/data/SBJ15/S06/Test/testTargets.txt", dtype=int
)
##print(File_data)
##X_train.shape
X_trains15s6 = X_trains15s6[:, 50:190, 0:8]
Y_trains15s6 = Y_trains15s6[0:1600]
X_tests15s6 = X_tests15s6[:, 50:190, 0:8]
# Y_train = Y_train[0:1600]
print(X_trains15s6.shape)
print(Y_trains15s6.shape)
print(X_tests15s6.shape)
print(Y_tests15s6.shape)
mats15s7 = scipy.io.loadmat("../input/bciaut-p300/data/SBJ15/S07/Train/trainData.mat")
mats15s7.items()
mat2s15s7 = scipy.io.loadmat("../input/bciaut-p300/data/SBJ15/S07/Test/testData.mat")
mat2s15s7.items()
X_trains15s7 = mats15s7["trainData"].transpose((2, 1, 0))
X_tests15s7 = mat2s15s7["testData"].transpose((2, 1, 0))
Y_trains15s7 = np.loadtxt(
"../input/bciaut-p300/data/SBJ15/S07/Train/trainTargets.txt", dtype=int
)
Y_tests15s7 = np.loadtxt(
"../input/bciaut-p300/data/SBJ15/S07/Test/testTargets.txt", dtype=int
)
##print(File_data)
##X_train.shape
X_trains15s7 = X_trains15s7[:, 50:190, 0:8]
Y_trains15s7 = Y_trains15s7[0:1600]
X_tests15s7 = X_tests15s7[:, 50:190, 0:8]
# Y_train = Y_train[0:1600]
print(X_trains15s7.shape)
print(Y_trains15s7.shape)
print(X_tests15s7.shape)
print(Y_tests15s7.shape)
Y_trains15s1.shape
# ##df.info()
X_trains15s1 = np.reshape(
X_trains15s1, (X_trains15s1.shape[0], X_trains15s1.shape[1] * X_trains15s1.shape[2])
)
##X_train = X_train.transpose()
X_tests15s1 = np.reshape(
X_tests15s1, (X_tests15s1.shape[0], X_tests15s1.shape[1] * X_tests15s1.shape[2])
)
##X_test = X_test.transpose()
print(X_tests15s1.shape)
print(Y_tests15s1.shape)
print(X_trains15s1.shape)
print(Y_trains15s1.shape)
X_trains15s2 = np.reshape(
X_trains15s2, (X_trains15s2.shape[0], X_trains15s2.shape[1] * X_trains15s2.shape[2])
)
##X_train = X_train.transpose()
X_tests15s2 = np.reshape(
X_tests15s2, (X_tests15s2.shape[0], X_tests15s2.shape[1] * X_tests15s2.shape[2])
)
##X_test = X_test.transpose()
print(X_tests15s2.shape)
print(Y_tests15s2.shape)
print(X_trains15s2.shape)
print(Y_trains15s2.shape)
X_trains15s3 = np.reshape(
X_trains15s3, (X_trains15s3.shape[0], X_trains15s3.shape[1] * X_trains15s3.shape[2])
)
##X_train = X_train.transpose()
X_tests15s3 = np.reshape(
X_tests15s3, (X_tests15s3.shape[0], X_tests15s3.shape[1] * X_tests15s3.shape[2])
)
##X_test = X_test.transpose()
print(X_tests15s3.shape)
print(Y_tests15s3.shape)
print(X_trains15s3.shape)
print(Y_trains15s3.shape)
X_trains15s4 = np.reshape(
X_trains15s4, (X_trains15s4.shape[0], X_trains15s4.shape[1] * X_trains15s4.shape[2])
)
##X_train = X_train.transpose()
X_tests15s4 = np.reshape(
X_tests15s4, (X_tests15s4.shape[0], X_tests15s4.shape[1] * X_tests15s4.shape[2])
)
##X_test = X_test.transpose()
print(X_tests15s4.shape)
print(Y_tests15s4.shape)
print(X_trains15s4.shape)
print(Y_trains15s4.shape)
X_trains15s5 = np.reshape(
X_trains15s5, (X_trains15s5.shape[0], X_trains15s5.shape[1] * X_trains15s5.shape[2])
)
##X_train = X_train.transpose()
X_tests15s5 = np.reshape(
X_tests15s5, (X_tests15s5.shape[0], X_tests15s5.shape[1] * X_tests15s5.shape[2])
)
##X_test = X_test.transpose()
print(X_tests15s5.shape)
print(Y_tests15s5.shape)
print(X_trains15s5.shape)
print(Y_trains15s5.shape)
X_trains15s6 = np.reshape(
X_trains15s6, (X_trains15s6.shape[0], X_trains15s6.shape[1] * X_trains15s6.shape[2])
)
##X_train = X_train.transpose()
X_tests15s6 = np.reshape(
X_tests15s6, (X_tests15s6.shape[0], X_tests15s6.shape[1] * X_tests15s6.shape[2])
)
##X_test = X_test.transpose()
print(X_tests15s6.shape)
print(Y_tests15s6.shape)
print(X_trains15s6.shape)
print(Y_trains15s6.shape)
X_trains15s7 = np.reshape(
X_trains15s7, (X_trains15s7.shape[0], X_trains15s7.shape[1] * X_trains15s7.shape[2])
)
##X_train = X_train.transpose()
X_tests15s7 = np.reshape(
X_tests15s7, (X_tests15s7.shape[0], X_tests15s7.shape[1] * X_tests15s7.shape[2])
)
##X_test = X_test.transpose()
print(X_tests15s7.shape)
print(Y_tests15s7.shape)
print(X_trains15s7.shape)
print(Y_trains15s7.shape)
# #Y_test.shape
import seaborn as sns
from imblearn.over_sampling import SMOTE
smt = SMOTE()
X_trains15s1, Y_trains15s1 = smt.fit_resample(X_trains15s1, Y_trains15s1)
# sns.countplot(x='label', data=df)
print(X_trains15s1.shape)
print(Y_trains15s1.shape)
X_trains15s2, Y_trains15s2 = smt.fit_resample(X_trains15s2, Y_trains15s2)
# sns.countplot(x='label', data=df)
print(X_trains15s2.shape)
print(Y_trains15s2.shape)
X_trains15s3, Y_trains15s3 = smt.fit_resample(X_trains15s3, Y_trains15s3)
# sns.countplot(x='label', data=df)
print(X_trains15s3.shape)
print(Y_trains15s3.shape)
X_trains15s4, Y_trains15s4 = smt.fit_resample(X_trains15s4, Y_trains15s4)
# sns.countplot(x='label', data=df)
print(X_trains15s4.shape)
print(Y_trains15s4.shape)
X_trains15s5, Y_trains15s5 = smt.fit_resample(X_trains15s5, Y_trains15s5)
# sns.countplot(x='label', data=df)
print(X_trains15s5.shape)
print(Y_trains15s5.shape)
X_trains15s6, Y_trains15s6 = smt.fit_resample(X_trains15s6, Y_trains15s6)
# sns.countplot(x='label', data=df)
print(X_trains15s6.shape)
print(Y_trains15s6.shape)
X_trains15s7, Y_trains15s7 = smt.fit_resample(X_trains15s7, Y_trains15s7)
# sns.countplot(x='label', data=df)
print(X_trains15s7.shape)
print(Y_trains15s7.shape)
mapping = {0: [0, 1], 1: [1, 0]}
# Use the mapping dictionary to convert the input array
Y_trains15s1 = np.array([mapping[x] for x in Y_trains15s1])
Y_trains15s2 = np.array([mapping[x] for x in Y_trains15s2])
Y_trains15s3 = np.array([mapping[x] for x in Y_trains15s3])
Y_trains15s4 = np.array([mapping[x] for x in Y_trains15s4])
Y_trains15s5 = np.array([mapping[x] for x in Y_trains15s5])
Y_trains15s6 = np.array([mapping[x] for x in Y_trains15s6])
Y_trains15s7 = np.array([mapping[x] for x in Y_trains15s7])
mapping = {0: [0, 1], 1: [1, 0]}
# Use the mapping dictionary to convert the input array
Y_tests15s1 = np.array([mapping[x] for x in Y_tests15s1])
Y_tests15s2 = np.array([mapping[x] for x in Y_tests15s2])
Y_tests15s3 = np.array([mapping[x] for x in Y_tests15s3])
Y_tests15s4 = np.array([mapping[x] for x in Y_tests15s4])
Y_tests15s5 = np.array([mapping[x] for x in Y_tests15s5])
Y_tests15s6 = np.array([mapping[x] for x in Y_tests15s6])
Y_tests15s7 = np.array([mapping[x] for x in Y_tests15s7])
Y_trains15s1.shape
Y_tests15s2.shape
X_trains15s1 = np.reshape(X_trains15s1, (-1, 140, 8, 1))
print(X_trains15s1.shape)
# Y_trains15s1 = np.reshape(Y_trains15s1,(-1,1,1))
print(Y_trains15s1.shape)
X_tests15s1 = np.reshape(X_tests15s1, (-1, 140, 8, 1))
print(X_tests15s1.shape)
# Y_tests15s1 = np.reshape(Y_tests15s1,(-1,1,1))
print(Y_tests15s1.shape)
X_trains15s2 = np.reshape(X_trains15s2, (-1, 140, 8, 1))
print(X_trains15s2.shape)
# Y_trains15s2 = np.reshape(Y_trains15s2,(-1,1,1))
print(Y_trains15s2.shape)
X_tests15s2 = np.reshape(X_tests15s2, (-1, 140, 8, 1))
print(X_tests15s2.shape)
# Y_tests15s2 = np.reshape(Y_tests15s2,(-1,1,1))
print(Y_tests15s2.shape)
X_trains15s3 = np.reshape(X_trains15s3, (-1, 140, 8, 1))
print(X_trains15s3.shape)
# Y_trains15s3 = np.reshape(Y_trains15s3,(-1,1,1))
print(Y_trains15s3.shape)
X_tests15s3 = np.reshape(X_tests15s3, (-1, 140, 8, 1))
print(X_tests15s3.shape)
# Y_tests15s3 = np.reshape(Y_tests15s3,(-1,1,1))
print(Y_tests15s3.shape)
X_trains15s4 = np.reshape(X_trains15s4, (-1, 140, 8, 1))
print(X_trains15s4.shape)
# Y_trains15s4 = np.reshape(Y_trains15s4,(-1,1,1))
print(Y_trains15s4.shape)
X_tests15s4 = np.reshape(X_tests15s4, (-1, 140, 8, 1))
print(X_tests15s4.shape)
# Y_tests15s4 = np.reshape(Y_tests15s4,(-1,1,1))
print(Y_tests15s4.shape)
X_trains15s5 = np.reshape(X_trains15s5, (-1, 140, 8, 1))
print(X_trains15s5.shape)
# Y_trains15s5 = np.reshape(Y_trains15s5,(-1,1,1))
print(Y_trains15s5.shape)
X_tests15s5 = np.reshape(X_tests15s5, (-1, 140, 8, 1))
print(X_tests15s5.shape)
# Y_tests15s5 = np.reshape(Y_tests15s5,(-1,1,1))
print(Y_tests15s5.shape)
X_trains15s6 = np.reshape(X_trains15s6, (-1, 140, 8, 1))
print(X_trains15s6.shape)
# Y_trains15s6 = np.reshape(Y_trains15s6,(-1,1,1))
print(Y_trains15s6.shape)
X_tests15s6 = np.reshape(X_tests15s6, (-1, 140, 8, 1))
print(X_tests15s6.shape)
# Y_tests15s6 = np.reshape(Y_tests15s6,(-1,1,1))
print(Y_tests15s6.shape)
X_trains15s7 = np.reshape(X_trains15s7, (-1, 140, 8, 1))
print(X_trains15s7.shape)
# Y_trains15s7 = np.reshape(Y_trains15s7,(-1,1,1))
print(Y_trains15s7.shape)
X_tests15s7 = np.reshape(X_tests15s7, (-1, 140, 8, 1))
print(X_tests15s7.shape)
# Y_tests15s7 = np.reshape(Y_tests15s7,(-1,1,1))
print(Y_tests15s7.shape)
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
X_trains15s1, X_vals15s1, Y_trains15s1, Y_vals15s1 = train_test_split(
X_trains15s1, Y_trains15s1, test_size=0.2, random_state=8
) # 0.25 x 0.8 = 0.2
X_trains15s2, X_vals15s2, Y_trains15s2, Y_vals15s2 = train_test_split(
X_trains15s2, Y_trains15s2, test_size=0.2, random_state=8
) # 0.25 x 0.8 = 0.2
X_trains15s3, X_vals15s3, Y_trains15s3, Y_vals15s3 = train_test_split(
X_trains15s3, Y_trains15s3, test_size=0.2, random_state=8
) # 0.25 x 0.8 = 0.2
X_trains15s4, X_vals15s4, Y_trains15s4, Y_vals15s4 = train_test_split(
X_trains15s4, Y_trains15s4, test_size=0.2, random_state=8
) # 0.25 x 0.8 = 0.2
X_trains15s5, X_vals15s5, Y_trains15s5, Y_vals15s5 = train_test_split(
X_trains15s5, Y_trains15s5, test_size=0.2, random_state=8
) # 0.25 x 0.8 = 0.2
X_trains15s6, X_vals15s6, Y_trains15s6, Y_vals15s6 = train_test_split(
X_trains15s6, Y_trains15s6, test_size=0.2, random_state=8
) # 0.25 x 0.8 = 0.2
X_trains15s7, X_vals15s7, Y_trains15s7, Y_vals15s7 = train_test_split(
X_trains15s7, Y_trains15s7, test_size=0.2, random_state=8
) # 0.25 x 0.8 = 0.2
print("X_train shape: {}".format(X_trains15s1.shape))
##print("X_test shape: {}".format(X_test.shape))
print("y_train shape: {}".format(Y_trains15s1.shape))
##print("y_test shape: {}".format(y_test.shape))
print("X_val shape: {}".format(X_vals15s1.shape))
print("y val shape: {}".format(Y_vals15s1.shape))
print("X_train shape: {}".format(X_trains15s2.shape))
##print("X_test shape: {}".format(X_test.shape))
print("y_train shape: {}".format(Y_trains15s2.shape))
##print("y_test shape: {}".format(y_test.shape))
print("X_val shape: {}".format(X_vals15s2.shape))
print("y val shape: {}".format(Y_vals15s2.shape))
print("X_train shape: {}".format(X_trains15s3.shape))
##print("X_test shape: {}".format(X_test.shape))
print("y_train shape: {}".format(Y_trains15s3.shape))
##print("y_test shape: {}".format(y_test.shape))
print("X_val shape: {}".format(X_vals15s3.shape))
print("y val shape: {}".format(Y_vals15s3.shape))
print("X_train shape: {}".format(X_trains15s4.shape))
##print("X_test shape: {}".format(X_test.shape))
print("y_train shape: {}".format(Y_trains15s4.shape))
##print("y_test shape: {}".format(y_test.shape))
print("X_val shape: {}".format(X_vals15s4.shape))
print("y val shape: {}".format(Y_vals15s4.shape))
print("X_train shape: {}".format(X_trains15s5.shape))
##print("X_test shape: {}".format(X_test.shape))
print("y_train shape: {}".format(Y_trains15s5.shape))
##print("y_test shape: {}".format(y_test.shape))
print("X_val shape: {}".format(X_vals15s5.shape))
print("y val shape: {}".format(Y_vals15s5.shape))
print("X_train shape: {}".format(X_trains15s6.shape))
##print("X_test shape: {}".format(X_test.shape))
print("y_train shape: {}".format(Y_trains15s6.shape))
##print("y_test shape: {}".format(y_test.shape))
print("X_val shape: {}".format(X_vals15s6.shape))
print("y val shape: {}".format(Y_vals15s6.shape))
print("X_train shape: {}".format(X_trains15s7.shape))
##print("X_test shape: {}".format(X_test.shape))
print("y_train shape: {}".format(Y_trains15s7.shape))
##print("y_test shape: {}".format(y_test.shape))
print("X_val shape: {}".format(X_vals15s7.shape))
print("y val shape: {}".format(Y_vals15s7.shape))
import tensorflow as tf
model2 = tf.keras.Sequential()
model2.add(
tf.keras.layers.Conv2D(
32,
(3, 3),
padding="same",
input_shape=X_trains15s1.shape[1:],
activation="relu",
)
)
model2.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model2.add(tf.keras.layers.BatchNormalization())
model2.add(tf.keras.layers.Conv2D(64, (3, 3), padding="same", activation="relu"))
model2.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model2.add(tf.keras.layers.BatchNormalization())
model2.add(tf.keras.layers.Conv2D(128, (3, 3), padding="same", activation="relu"))
model2.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model2.add(tf.keras.layers.BatchNormalization())
model2.add(tf.keras.layers.Conv2D(256, (3, 3), padding="same", activation="relu"))
# model2.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model2.add(tf.keras.layers.BatchNormalization())
model2.add(tf.keras.layers.Conv2D(512, (3, 3), padding="same", activation="relu"))
# model2.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model2.add(tf.keras.layers.BatchNormalization())
model2.add(tf.keras.layers.Dense(100, activation="relu"))
model2.add(tf.keras.layers.Dense(1, activation="sigmoid"))
model2.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
X_trains15s1.shape[1:]
from keras.models import Sequential
from keras.layers import Conv2D, BatchNormalization, SeparableConv2D
from keras.layers import DepthwiseConv2D
from keras.layers import Activation, MaxPooling2D, AveragePooling2D
from keras.layers import Dropout, Flatten, Dense, ELU
from keras import backend as K
# Define the CNN model
model = Sequential()
# Depthwise Separable Conv2D layer with 16 filters, (8,1) kernel size, (0,0) padding, and linear activation
model.add(
Conv2D(
8,
(1, 65),
padding="same",
activation="linear",
input_shape=X_trains15s1.shape[1:],
)
)
# BatchNormalization
model.add(BatchNormalization())
# DepthwiseConv2D layer with D=2, K=16, F=(8,1), P=(0,0) and linear activation
model.add(
DepthwiseConv2D((1, 8), depth_multiplier=2, padding="valid", activation="linear")
)
# BatchNormalization
model.add(BatchNormalization())
# ELU activation
model.add(Activation("elu"))
# AveragePooling2D layer with F=(1,4)
model.add(AveragePooling2D((4, 1)))
# Dropout layer with p=0.25
model.add(Dropout(0.25))
# Sep-Conv2D layer with K=16, F=(1,17), P=(0,8) and linear activation
model.add(SeparableConv2D(16, (17, 1), padding="same", activation="linear"))
# BatchNormalization
model.add(BatchNormalization())
# ELU activation
model.add(Activation("elu"))
# AveragePooling2D layer with F=(1,8)
model.add(AveragePooling2D((8, 1)))
# Dropout layer with p=0.25
model.add(Dropout(0.25))
# Flatten
model.add(Flatten())
# Fully connected layer with N=2 and linear activation
model.add(Dense(2, activation="linear"))
# Softmax activation
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", metrics=["acc"], optimizer="adam")
model.summary()
model2.summary()
# X_tests15s1 = np.reshape(X_tests15s1,(-1,150,8))
BATCH_SIZE = 40
##EPOCHS = 1
print(X_trains15s1.shape)
print(Y_trains15s1.shape)
print(X_tests15s1.shape)
print(Y_tests15s1.shape)
print("for subject 7 session 1")
history1 = model.fit(
X_trains15s1,
Y_trains15s1,
batch_size=40,
epochs=50,
validation_data=(X_tests15s1, Y_tests15s1),
)
##verbose=1)
print("for subject 7 session 2")
history2 = model.fit(
X_trains15s2,
Y_trains15s2,
batch_size=40,
epochs=50,
validation_data=(X_tests15s2, Y_tests15s2),
)
print("for subject 7 session 3")
history3 = model.fit(
X_trains15s3,
Y_trains15s3,
batch_size=40,
epochs=50,
validation_data=(X_tests15s3, Y_tests15s3),
)
print("for subject 7 session 4")
history4 = model.fit(
X_trains15s4,
Y_trains15s4,
batch_size=40,
epochs=50,
validation_data=(X_tests15s4, Y_tests15s4),
)
print("for subject 7 session 5")
history5 = model.fit(
X_trains15s5,
Y_trains15s5,
batch_size=40,
epochs=50,
validation_data=(X_tests15s5, Y_tests15s5),
)
print("for subject 7 session 6")
history6 = model.fit(
X_trains15s6,
Y_trains15s6,
batch_size=40,
epochs=50,
validation_data=(X_tests15s6, Y_tests15s6),
)
print("for subject 7 session 7")
history7 = model.fit(
X_trains15s7,
Y_trains15s7,
batch_size=40,
epochs=50,
validation_data=(X_tests15s7, Y_tests15s7),
)
##history = model.fit(X_train, Y_train, validation_split=0.2, epochs=20, batch_size=40)
##callbacks=callbacks_list,
from matplotlib import pyplot as plt
loss = history1.history["loss"]
val_loss = history1.history["val_loss"]
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, "bo", label="training loss")
plt.plot(epochs, val_loss, "b", label="validation loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend()
##plt.plot(history.history['accuracy'])
##plt.plot(history.history['val_accuracy'],'bo')
loss = history2.history["loss"]
val_loss = history2.history["val_loss"]
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, "bo", label="training loss")
plt.plot(epochs, val_loss, "b", label="validation loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend()
##plt.plot(history.history['accuracy'])
##plt.plot(history.history['val_accuracy'],'bo')
loss = history3.history["loss"]
val_loss = history3.history["val_loss"]
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, "bo", label="training loss")
plt.plot(epochs, val_loss, "b", label="validation loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend()
##plt.plot(history.history['accuracy'])
##plt.plot(history.history['val_accuracy'],'bo')
loss = history4.history["loss"]
val_loss = history4.history["val_loss"]
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, "bo", label="training loss")
plt.plot(epochs, val_loss, "b", label="validation loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend()
##plt.plot(history.history['accuracy'])
##plt.plot(history.history['val_accuracy'],'bo')
loss = history5.history["loss"]
val_loss = history5.history["val_loss"]
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, "bo", label="training loss")
plt.plot(epochs, val_loss, "b", label="validation loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend()
##plt.plot(history.history['accuracy'])
##plt.plot(history.history['val_accuracy'],'bo')
loss = history6.history["loss"]
val_loss = history6.history["val_loss"]
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, "bo", label="training loss")
plt.plot(epochs, val_loss, "b", label="validation loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend()
##plt.plot(history.history['accuracy'])
##plt.plot(history.history['val_accuracy'],'bo')
loss = history7.history["loss"]
val_loss = history7.history["val_loss"]
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, "bo", label="training loss")
plt.plot(epochs, val_loss, "b", label="validation loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend()
##plt.plot(history.history['accuracy'])
##plt.plot(history.history['val_accuracy'],'bo')
print("Evaluate on test data")
results = model2.evaluate(X_tests15s1, Y_tests15s1, batch_size=128)
print("test loss, test acc:", results)
print("Evaluate on test data")
results = model2.evaluate(X_tests15s2, Y_tests15s2, batch_size=128)
print("test loss, test acc:", results)
print("Evaluate on test data")
results = model2.evaluate(X_tests15s3, Y_tests15s3, batch_size=128)
print("test loss, test acc:", results)
print("Evaluate on test data")
results = model2.evaluate(X_tests15s4, Y_tests15s4, batch_size=128)
print("test loss, test acc:", results)
print("Evaluate on test data")
results = model2.evaluate(X_tests15s5, Y_tests15s5, batch_size=128)
print("test loss, test acc:", results)
print("Evaluate on test data")
results = model2.evaluate(X_tests15s6, Y_tests15s6, batch_size=128)
print("test loss, test acc:", results)
print("Evaluate on test data")
results = model2.evaluate(X_tests15s7, Y_tests15s7, batch_size=128)
print("test loss, test acc:", results)
|
import cv2
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import os
# # Goal of this competition
# We have to detect InChI from [Structual fomula](https://en.wikipedia.org/wiki/Structural_formula).
# # Why we check 3D model?
# - Even if we are not familiar with chemical notation, it is easy to visualize molecules.
# Some elements may be omitted in the skeletal formula, so the molecule one you imagine may not be actual. For example, hydrogen bonded to carbon may be omitted, but you can see it in the 3D model (models that can be checked are normal.).
# - Develop an intuition for molecular structures.
# This is also very important in machine learning. For examle, if you do data augmentation, you may end up with a different molecule if you invert the data blindly. Please check the [Enantiomer](https://en.wikipedia.org/wiki/Enantiomer).
# # About Structual fomula
# Molecules are made up of atoms bonded together. A structural formula describes the bonds between atoms.
# There are There are many atoms. Knowing what elements (kinds of atoms) are present is necessary for understanding the structural formula. Check [Periodic table](https://en.wikipedia.org/wiki/Periodic_table) for the types of atoms.
# From https://www.kaggle.com/ihelon/molecular-translation-exploratory-data-analysis
def convert_image_id_2_path(image_id: str) -> str:
return "../input/bms-molecular-translation/train/{}/{}/{}/{}.png".format(
image_id[0], image_id[1], image_id[2], image_id
)
def visualize_train_image(image_id, label):
plt.figure(figsize=(10, 8))
image = cv2.imread(convert_image_id_2_path(image_id))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.title(f"{label}", fontsize=14)
plt.axis("off")
plt.show()
train_labels = pd.read_csv("../input/bms-molecular-translation/train_labels.csv")
index = 1
molecule_image_id = train_labels["image_id"].iloc[index]
molecule_InChI = train_labels["InChI"].iloc[index]
visualize_train_image(molecule_image_id, molecule_InChI)
# The image given as data is called skeletal formulas. It represents a molecule with several kinds of lines and element symbols.
# Carbon is omitted from the letters. For example, a bar line that has no letter at the end is a carbon-carbon bond. All other elements will be written as letters. Black triangles have bonds extending to the front of the paper, and border triangles have bonds extending to the back of the paper.
# There are several types of bonds: single bonds for single lines, double bonds for double lines, and triple bonds for triple lines.
# # Check 3D molecules on PubChem website
# We can check the chemical & Structual data by [PubChem](https://pubchem.ncbi.nlm.nih.gov). First, we get InChI expression from label. Let's take a look at the 3D model of the very first data (above structural formula) of the training data.
# Note
# The PubChem images shown below was obtained on 3/3/2021 from the [official PubChem website](https://pubchem.ncbi.nlm.nih.gov).
# ## 0. Get InChI for molecule
# Get the InChI of the molecule we want to check as follows.
train_labels = pd.read_csv("../input/bms-molecular-translation/train_labels.csv")
index = 1
molecule_image_id = train_labels["image_id"].iloc[index]
molecule_InChI = train_labels["InChI"].iloc[index]
print(molecule_InChI)
# Let's go PubChem site and search the molecure.
# ## 1. Enter InChI to search box. And search.
# ## 2. Select molecule
# ## 3. Check molecule models
# It may be little hard to tell from the image, but you can check the 3D model in addition to the structural formula. Please click on the link below to check the results.
# [PubChem CID: 124916588](https://pubchem.ncbi.nlm.nih.gov/compound/124916588)
# # Check 3D models on kaggle notebook
# We can also check comformation on kaggle notebook with py3Dmol.
import py3Dmol
# To query molecure's information, we need PubChem CID. We can get like following.
#
# Enter CID here!!!
cid_for_query = "cid:124916588"
view = py3Dmol.view(width=680, height=300, query=cid_for_query, linked=False)
view.setStyle({"stick": {}})
view.setBackgroundColor("#f9f4fb")
view.show()
# It is also possible to view different types of 3D models.
view = py3Dmol.view(
width=1000, height=500, query=cid_for_query, viewergrid=(1, 3), linked=False
)
view.setStyle({"line": {"linewidth": 1}}, viewer=(0, 0))
view.setStyle({"stick": {}}, viewer=(0, 1))
view.setStyle({"sphere": {}}, viewer=(0, 2))
view.setBackgroundColor("#ebf4fb", viewer=(0, 0))
view.setBackgroundColor("#f9f4fb", viewer=(0, 1))
view.setBackgroundColor("#e1e1e1", viewer=(0, 2))
view.show()
|
# 
# Even though I lived through the Survivor hype of the 2000's where every summer camp logo resembled Survivor's, i never understood it. I was one of the few that wasn't tuned in to see who's name was written down every week. Twenty years later, after revamping post-COVID Survivor returned and my partner and I became hooked. I immediately gravitated to the host-extrodinaire Jeff Probst and my fave player that season, Mafia Pastor Shan. After the twists and turns of Season 41, we had to stream as many previous seasons as possible. Now that we're familiar with Boston Rob, Sandra, Tony, and all the other outlandish personalities on the show, I had to review and visualize some data! ...So here we are with this amazing dataset! Let's see what stats and conclusions can be uncovered!
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # plot, chart
import seaborn as sns # additional plot
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# save csv to database
df_seasons = pd.read_csv("/kaggle/input/survivor-cbs-dataset/season_table.csv")
df_contestants = pd.read_csv("/kaggle/input/survivor-cbs-dataset/contestant_table.csv")
df_tribes = pd.read_csv("/kaggle/input/survivor-cbs-dataset/tribe_table.csv")
# creating a function to print info about the databases since there are a couple
def info_printr(database):
size = database.shape
cols = database.columns
print("There are {} rows and {} columns in the database.".format(size[0], size[1]))
for col in cols:
print(col)
info_printr(df_contestants)
info_printr(df_seasons)
info_printr(df_tribes)
# Ok that is a lot of information! Lets just start with some simple charts before we start merging dataframes and getting fancy.
# # **CONTESTANTS**
# We often debate game strategy when watching survivor; asking whether staying under the radar is more effective than orchestrating blindsides or should focus be on idols and challenge wins. The information offered by this dataset leads to the question, how many votes against them do winners have? So let's see!
# create winner database
df_winners = df_contestants.copy().loc[(df_contestants["finish"] == 1)]
# add column that includes contestant names and their respective seasons to help with 2-time winners
df_winners["name_season"] = (
df_winners["contestant_name"] + ", S" + df_winners["num_season"].astype("str")
)
# set up plot size
plt.figure(figsize=(16, 10))
# plot data as bar chart
ax1 = sns.barplot(data=df_winners, x="name_season", y="votes_against", errorbar=None)
# add and customize labels/titles
ax1.set_xticklabels(ax1.get_xticklabels(), rotation=45, ha="right")
ax1.set_title("Sum of Votes [Against] Accumulated by Winners")
ax1.set_xlabel("Contestants (by Season)")
ax1.set_ylabel("Total Votes Against")
# add totals to bars
for i in ax1.containers:
ax1.bar_label(
i,
)
# Well it looks like flying under the radar IS a pretty successful strategy! Definitely important to remember when we start looking into the tribe and season data. Taking a little break from the winners for a second, I've always been curious about the diversity of the show, especially as a viewer that caught the wave late.
# # SEASONS
df_seasons.head()
df_ethnicity = df_seasons[
[
"num_season",
"num_contestants",
"african_american",
"asian_american",
"latin_american",
"poc",
]
]
focus = ["african_american", "asian_american", "latin_american"]
df_melt = pd.melt(df_ethnicity, id_vars="num_season", value_vars=focus)
plt.figure(figsize=(14, 12))
plt.grid(visible=True)
sns.lineplot(
data=df_melt,
x="num_season",
y="value",
hue="variable",
style="variable",
dashes=True,
markers=True,
)
plt.xticks(range(0, df_melt["num_season"].max() + 5, 5))
plt.xlabel("Seasons")
plt.ylabel("Number of Contestants")
plt.title(
"African-, Asian-, and Latin-American Survivor Contestants Through The Seasons"
)
plt.legend(
title="Ethnicity",
labels=["African American", "", "Asian American", "", "Latin American"],
)
# Now to break it out for a better look at each of the groups of focus.
fig = plt.figure(figsize=(14, 10), constrained_layout=True)
fig.set_constrained_layout_pads(h_pad=0.2, wspace=0.25)
plt.suptitle("Survivor Contestants Through The Years")
ax1 = plt.subplot(3, 2, 1)
ax2 = plt.subplot(3, 2, 3)
ax3 = plt.subplot(3, 2, 5)
ax4 = plt.subplot(1, 2, 2)
axs = [ax1, ax2, ax3, ax4]
for ax in axs:
ax.grid()
ax.set_xlabel("Survivor Season")
ax.set_xticks(range(0, 50, 5))
sns.lineplot(
data=df_ethnicity,
x="num_season",
y="african_american",
color="blue",
marker="o",
ax=ax1,
)
ax1.set_ylabel("African-American Contestants")
sns.lineplot(
data=df_ethnicity,
x="num_season",
y="asian_american",
color="green",
marker="o",
ax=ax2,
)
ax2.set_ylabel("Asian-American Contestants")
sns.lineplot(
data=df_ethnicity,
x="num_season",
y="latin_american",
color="orange",
marker="o",
ax=ax3,
)
ax3.set_ylabel("Latin-American Contestants")
sns.lineplot(
data=df_ethnicity, x="num_season", y="poc", color="purple", marker="o", ax=ax4
)
ax4.set_ylabel("Groups Combined")
# Cool! We kinda have a picture of how the number of POC contenstants have changed throughout the seasons. Now let's see how that number looks in comparison to the total contestant count!
fig, ax = plt.subplots(figsize=(16, 20))
df_ethnicity.plot(
kind="barh",
x="num_season",
y=["african_american", "asian_american", "latin_american"],
stacked=True,
color=["royalblue", "limegreen", "darkorange"],
position=1,
width=0.4,
ax=ax,
)
df_ethnicity.plot(
kind="barh",
x="num_season",
y="num_contestants",
position=0,
width=0.4,
color="silver",
ax=ax,
)
plt.grid(axis="x")
plt.xticks(range(0, 22, 2))
plt.title("Survivor Contestants")
plt.xlabel("Count of Contestants")
plt.ylabel("Seasons")
plt.legend(
["African American", "Asian American", "Latin American", "Total Contestants"],
loc="upper right",
bbox_to_anchor=(1.12, 1),
)
df_ethnicity["afam_percent"] = (
df_ethnicity["african_american"] / df_ethnicity["num_contestants"]
)
df_ethnicity["asam_percent"] = (
df_ethnicity["asian_american"] / df_ethnicity["num_contestants"]
)
df_ethnicity["latam_percent"] = (
df_ethnicity["latin_american"] / df_ethnicity["num_contestants"]
)
df_ethnicity["poc_percent"] = df_ethnicity["poc"] / df_ethnicity["num_contestants"]
df_ethnicity["non_poc"] = df_ethnicity["num_contestants"] - df_ethnicity["poc"]
df_ethnicity["non_percent"] = df_ethnicity["non_poc"] / df_ethnicity["num_contestants"]
percents = ["afam_percent", "asam_percent", "latam_percent"]
df_melt2 = pd.melt(df_ethnicity, id_vars="num_season", value_vars=percents)
fig, ax = plt.subplots(figsize=(12, 12))
sns.lineplot(
df_melt2, x="num_season", y="value", hue="variable", style="variable", markers=True
)
ax.grid()
ax.set(title="Survivor Contestant Percentages", xlabel="Season", ylabel="Percentage")
ax.set_xticks(range(0, 50, 5))
ax.set_yticklabels(["{:.0%}".format(x) for x in ax.get_yticks()])
ax.legend(
title="Ethnicities",
labels=["African American", "", "Asian American", "", "Latin American"],
loc="upper right",
bbox_to_anchor=(1.2, 1),
)
# This could just be me, but I prefer seeing this information in a pie chart... just easier for me to visualize. Don't think anyone is interested in scrolling through forty something pie charts, making this the perfect time to focus on an individual season at a time. I also really love interactive code so we'll see if that can be incorporated as well!
def askNum():
userNum = int(input("Enter the season number of interest (1-43):"))
return userNum
def askPlay():
userPlay = input("Press any key other than 0 to keep playing!")
return userPlay
def plotPie(userNum):
if not 1 <= userNum <= 43:
userNum = np.random.randint(1, 44)
print("A random number has been selected due to invalid user input.")
df_pie = df_ethnicity.loc[df_ethnicity["num_season"] == userNum]
df_pie = df_pie[
["num_season", "afam_percent", "asam_percent", "latam_percent", "non_percent"]
]
labls = ["African American", "Asian American", "Latin American", "Other"]
pie_data = pd.melt(df_pie, id_vars="num_season")
plt.pie(
x=pie_data["value"], autopct=lambda x: "{:.1f}%".format(x) if x > 5 else None
)
plt.title("Survivor Contestants by Ethnicity - Season {}".format(userNum))
plt.legend(labls, loc="upper right", bbox_to_anchor=(1.5, 1.0))
plt.show()
def letsPlay():
play = 1
while play != 0:
# ask for season
user = askNum()
# plot respective chart
plotPie(user)
# ask to keep playing
if askPlay() == "0":
play = 0
letsPlay()
|
# ## Iris dataset
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
from sklearn.datasets import load_iris
iris = load_iris()
features = pd.DataFrame(
data=iris.data, columns=["SepalLength", "SepalWidth", "PetalLength", "PetalWidth"]
)
target = pd.DataFrame(data=iris.target, columns=["Species"])
data = pd.concat([features, target], axis=1)
data.head(5) # first five rows of our dataset
data.info() # we have no null values in the dataset
iris.target_names
plt.style.use("ggplot")
plt.title("Species Distribution")
sns.countplot(x="Species", data=data)
plt.show()
plt.figure(figsize=(10, 7))
plt.title("Species distribution according to sepal dimensions")
sns.scatterplot(
data=data, x="SepalWidth", y="SepalLength", hue="Species", palette="deep"
)
plt.show()
plt.figure(figsize=(10, 7))
plt.title("Species distribution according to petal dimensions")
sns.scatterplot(
data=data, x="PetalWidth", y="PetalLength", hue="Species", palette="deep"
)
plt.show()
fig, axs = plt.subplots(2, 2, figsize=(15, 10))
sns.boxplot(x="Species", y="SepalWidth", data=data, ax=axs[0, 0])
sns.boxplot(x="Species", y="PetalWidth", data=data, ax=axs[0, 1])
sns.boxplot(x="Species", y="SepalLength", data=data, ax=axs[1, 0])
sns.boxplot(x="Species", y="PetalLength", data=data, ax=axs[1, 1])
plt.show()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(features, target, random_state=0)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
def print_confusion_matrix(confusion_matrix):
print(
"\n\n",
pd.DataFrame(
confusion_matrix,
columns=["Predicted setosa", "Predicted versicolor", "Predicted virginica"],
index=["True setosa", "True versicolor", "True virginica"],
),
)
def Model(model, X_train, X_test, y_train, y_test):
model = model
model.fit(X_train, y_train)
print("Model accuracy on test set: ", model.score(X_test, y_test))
pred = model.predict(X_test)
print_confusion_matrix(confusion_matrix(y_test, pred))
print(
"\n\n",
classification_report(
y_test, pred, target_names=["setosa", "versicolor", "virginica"]
),
)
from sklearn.linear_model import LogisticRegression
print("Logistic Regression model\n\n")
Model(LogisticRegression(), X_train, X_test, y_train, y_test)
print("Softmax Regression model\n\n")
Model(
LogisticRegression(multi_class="multinomial", solver="lbfgs", C=10),
X_train,
X_test,
y_train,
y_test,
)
from sklearn.tree import DecisionTreeClassifier
print("Decision Tree model\n\n")
Model(DecisionTreeClassifier(max_depth=4), X_train, X_test, y_train, y_test)
from sklearn.ensemble import GradientBoostingClassifier
print("Gradient Boosting Classifier model\n\n")
Model(GradientBoostingClassifier(), X_train, X_test, y_train, y_test)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/laptop-price/laptop_price.csv", encoding="ISO-8859-1")
df.head()
df.shape
df.info()
df.describe(include="all")
df.isnull().sum()
df[df.duplicated()]
df["Ram"] = df["Ram"].str.replace("GB", "")
df["Ram"] = df["Ram"].astype(int)
df.head(2)
df["Weight"] = df["Weight"].str.replace("kg", "")
df["Weight"] = df["Weight"].astype(float)
df.head(2)
df.corr()
df = df.drop(["laptop_ID", "Inches"], axis="columns")
df.head(2)
com_stat = df["Company"].value_counts()
com_stat
df.Company = df.Company.apply(lambda x: "other" if x in com_stat[com_stat <= 10] else x)
df.Company.value_counts()
df.TypeName.value_counts()
df.ScreenResolution.value_counts()
df["resolution"] = df["ScreenResolution"].str.extract("(\d+x\d+)")
df.head(2)
df["Touchscreen"] = df["ScreenResolution"].apply(
lambda x: 1 if "Touchscreen" in x else 0
)
df["IPS Panel"] = df["ScreenResolution"].apply(lambda x: 1 if "IPS" in x else 0)
df.head(2)
df = df.drop("ScreenResolution", axis="columns")
df.Cpu.value_counts()
df["CpuType"] = df.Cpu.apply(lambda x: " ".join(x.split()[0:3]))
df["CpuType"].value_counts()
def set_processor(name):
if name == "Intel Core i7" or name == "Intel Core i5" or name == "Intel Core i3":
return name
elif name.split()[0] == "AMD":
return "AMD"
else:
return "other"
df["CpuType"] = df["CpuType"].apply(set_processor)
df.CpuType.value_counts()
df = df.drop("Cpu", axis="columns")
df.head(2)
df.Memory.value_counts()
df1 = df.copy()
df1["Memory"] = df1["Memory"].astype(str).replace("\.0", "", regex=True)
df1["Memory"] = df1["Memory"].str.replace("GB", "")
df1["Memory"] = df1["Memory"].str.replace("TB", "000")
split_memory = df1["Memory"].str.split("+", n=1, expand=True)
df1["memory1"] = split_memory[0]
df1["memory1"] = df1["memory1"].str.strip()
df1["memory2"] = split_memory[1]
df1["HDD1"] = df1["memory1"].apply(lambda x: 1 if "HDD" in x else 0)
df1["SSD1"] = df1["memory1"].apply(lambda x: 1 if "SSD" in x else 0)
df1["Hybrid1"] = df1["memory1"].apply(lambda x: 1 if "Hybrid" in x else 0)
df1["Flash_Storage1"] = df1["memory1"].apply(lambda x: 1 if "Flash Storage" in x else 0)
df1["memory1"] = df1["memory1"].str.replace(r"\D", "")
df1["memory2"].fillna("0", inplace=True)
df1["HDD2"] = df1["memory2"].apply(lambda x: 1 if "HDD" in x else 0)
df1["SSD2"] = df1["memory2"].apply(lambda x: 1 if "SSD" in x else 0)
df1["Hybrid2"] = df1["memory2"].apply(lambda x: 1 if "Hybrid" in x else 0)
df1["Flash_Storage2"] = df1["memory2"].apply(lambda x: 1 if "Flash Storage" in x else 0)
df1["memory2"] = df1["memory2"].str.replace(r"\D", "")
df1["memory1"] = df1["memory1"].astype(int)
df1["memory2"] = df1["memory2"].astype(int)
df1["HDD"] = df1["memory1"] * df1["HDD1"] + df1["memory2"] * df1["HDD2"]
df1["SSD"] = df1["memory1"] * df1["SSD1"] + df1["memory2"] * df1["SSD2"]
df1["Hybrid"] = df1["memory1"] * df1["Hybrid1"] + df1["memory2"] * df1["Hybrid2"]
df1["Flash_Storage"] = (
df1["memory1"] * df1["Flash_Storage1"] + df1["memory2"] * df1["Flash_Storage2"]
)
df1.drop(
columns=[
"memory1",
"memory2",
"HDD1",
"SSD1",
"Hybrid1",
"Flash_Storage1",
"HDD2",
"SSD2",
"Hybrid2",
"Flash_Storage2",
],
inplace=True,
)
df1.sample(2)
df1["gpu"] = df1.Gpu.apply(lambda x: x.split()[0])
df1.sample(2)
df1.OpSys.value_counts()
def set_os(os):
if os == "Windows 10" or os == "Windows 7" or os == "Windows 10 S":
return "Windows"
elif os == "Linux":
return "Linux"
elif os == "macOS" or os == "Mac OS X":
return "macOS"
else:
return "other"
df1.OpSys = df1.OpSys.apply(set_os)
df1.head(2)
df1 = df1.drop(["Memory", "Gpu", "Product"], axis="columns")
df1.head()
df1.isnull().sum()
df2 = df1.copy()
df2 = pd.get_dummies(df2)
X = df2.drop("Price_euros", axis="columns")
y = df2.Price_euros
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=10
)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X_train, y_train)
lr.score(X_test, y_test)
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0)
cross_val_score(LinearRegression(), X, y, cv=cv)
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Lasso
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
def find_best_model_using_gridsearchcv(X, y):
algos = {
"linear_regression": {
"model": LinearRegression(),
"params": {"normalize": [True, False]},
},
"lasso": {
"model": Lasso(),
"params": {"alpha": [1, 2], "selection": ["random", "cyclic"]},
},
"decision_tree": {
"model": DecisionTreeRegressor(),
"params": {
"criterion": ["mse", "friedman_mse"],
"splitter": ["best", "random"],
},
},
}
scores = []
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0)
for algo_name, config in algos.items():
gs = GridSearchCV(
config["model"], config["params"], cv=cv, return_train_score=False
)
gs.fit(X, y)
scores.append(
{
"model": algo_name,
"best_score": gs.best_score_,
"best_params": gs.best_params_,
}
)
return pd.DataFrame(scores, columns=["model", "best_score", "best_params"])
find_best_model_using_gridsearchcv(X, y)
import pickle
with open("laptop_price_prediction_model.pickle", "wb") as f:
pickle.dump(lr, f)
|
# # **Electricity Consumption Using Time Series Analysis**
# Time series analysis is a statistical method to analyse the past data within a given duration of time to forecast the future. It comprises of ordered sequence of data at equally spaced interval.To understand the time series data & the analysis let us consider an example. Consider an example of Airline Passenger data. It has the count of passenger over a period of time.
# 
# Here the **Objective** is- Build a model to forecast the electricity power consumtion(value. The data is classified in date/time and the value of consumption. The goal is to predict electricity consumption for the next 6 years i.e. till 2024.
# **Time Series:**
# Time Series is a series of observations taken at particular time intervals (usually equal intervals). Analysis of the series helps us to predict future values based on previous observed values. In Time series, we have only 2 variables, time & the variable we want to forecast.
# **Why & where Time Series is used?**
# Time series data can be analysed in order to extract meaningful statistics and other charecteristsics. It's used in atleast the 4 scenarios:
# 1. Business Forecasting
# 2. Understanding past behavior
# 3. Plan the future
# 4. Evaluate current accomplishment
# **Importance of Time Series Analysis:**
# Ample of time series data is being generated from a variety of fields. And hence the study time series analysis holds a lot of applications. Let us try to understand the importance of time series analysis in different areas.
# 1. Economics
# 2. Finance
# 3. Healthcare
# 4. Environmental Science
# 5. Sales Forecasting
# 6. Weather forecasting
# 7. Earthquake prediction
# 8. Astronomy
# 9. Signal processing
# **Loading the basic libraries**
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.arima_model import ARIMA
# **Loading Electric Production data set**
elecom = pd.read_csv("../input/electric-production/Electric_Production.csv")
# **Let's check first 5 and last 5 records of data set**
elecom.head(5)
elecom.tail(5)
elecom.shape
elecom.info()
# **There are 397 records in datasets and 2 columns. There are no null records present. But, look at the DATE column. We need to convert them in to datetime datatype.**
from datetime import datetime
elecom["DATE"] = pd.to_datetime(elecom["DATE"], infer_datetime_format=True)
elecom.info()
# **Now, we will need to index DATE column.**
elecomind = elecom.set_index("DATE", inplace=False)
elecomind.head()
# **Let's plot the data**
plt.figure(figsize=(10, 5))
plt.xlabel("Date")
plt.ylabel("Electric Power Consumption")
plt.plot(elecomind)
# **From the above plot, we can see that there is a Trend compoenent in the series. Hence, we now check for stationarity of the data.**
# **Let's make one function consisting of stationary data checking and ADCF test working. Because we will need to repeat the steps many times, therefore, making function will become very handy.**
def test_stationarity(timeseries):
# Determine rolling statistics
movingAverage = timeseries.rolling(window=12).mean()
movingSTD = timeseries.rolling(window=12).std()
# Plot rolling statistics
plt.figure(figsize=(10, 5))
plt.plot(timeseries, color="blue", label="Original")
plt.plot(movingAverage, color="red", label="Rolling Mean")
plt.plot(movingSTD, color="black", label="Rolling Std")
plt.legend(loc="best")
plt.title("Rolling Mean & Standard Deviation")
plt.show(block=False)
# Perform Dickey–Fuller test:
print("Results of Dickey Fuller Test:")
elecom_test = adfuller(timeseries["Value"], autolag="AIC")
dfoutput = pd.Series(
elecom_test[0:4],
index=[
"Test Statistic",
"p-value",
"#Lags Used",
"Number of Observations Used",
],
)
for key, value in elecom_test[4].items():
dfoutput["Critical Value (%s)" % key] = value
print(dfoutput)
# **Let's determine & plot rolling statistics.**
test_stationarity(elecomind)
# **From above plot, we can see that Rolling Mean itself has a trend component even though Rolling Standard Deviation is fairly constant with time.**
# **For time series to be stationary, we need to ensure that both Rolling Mean and Rolling Standard Deviation remain fairly constant WRT time.**
# **Both the curves needs to be parallel to X-Axis, in our case it is not so.**
# **We've also conducted the ADCF ie Augmented Dickey Fuller Test. Having the Null Hypothesis to be Time Series is Non Stationary.**
# For a Time series to be stationary, the ADCF test should have:
# 1. p-value should be low (according to the null hypothesis)
# 2. The critical values at 1%,5%,10% confidence intervals should be as close as possible to the Test Statistics
# From the above ADCF test result, we can see that p-value(near to 0.18) is very large. Also critical values lower than Test Statistics. Hence, we can safely say that our Time Series at the moment is **NOT STATIONARY**
# ### **Data Transformation To Achieve Stationarity**
# Now, we will have to perform some data transformation to achieve Stationarity. We can perform any of the transformations like taking log scale, square, square root, cube, cube root, time shift, exponential decay, etc.
# Let's perform Log Transformation.
# Basically we need to remove the trend component.
elecom_log = np.log(elecomind)
plt.figure(figsize=(10, 5))
plt.xlabel("Date")
plt.ylabel("Electric Power Consumption")
plt.plot(elecom_log)
# **Working on Rolling stats seperately (not using function) because we would need Rolling stats separately for computing.**
rollmean_log = elecom_log.rolling(window=12).mean()
rollstd_log = elecom_log.rolling(window=12).std()
plt.figure(figsize=(10, 5))
plt.plot(elecom_log, color="blue", label="Original")
plt.plot(rollmean_log, color="red", label="Rolling Mean")
plt.plot(rollstd_log, color="black", label="Rolling Std")
plt.legend(loc="best")
plt.title("Rolling Mean & Standard Deviation (Logarithmic Scale)")
# From above graph we can say that, we slightly bettered our previous results. Now, we are heading into the right direction.
# From the above graph, Time series with log scale as well as Rolling Mean(moving avg) both have the trend component. Thus subtracting one from the other should remove the trend component.
# **R (result) = Time Series Log Scale - Rolling Mean Log Scale -> this can be our final non trend curve.**
elecom_new = elecom_log - rollmean_log
elecom_new.head()
elecom_new.dropna(inplace=True)
elecom_new.head()
# **Let's determine & plot rolling statistics.**
test_stationarity(elecom_new)
# **From the above plot, we came to know that "indeed subtracting two related series having similar trend components actually removed trend and made the dataset stationary"**
# Also, after concluding the results from ADFC test, we can now say that given series is now **STATIONARY**
# ### **Time Shift Transformation**
elecom_log_diff = elecom_log - elecom_log.shift()
plt.figure(figsize=(10, 5))
plt.plot(elecom_log_diff)
elecom_log_diff.dropna(inplace=True)
plt.figure(figsize=(10, 5))
plt.plot(elecom_log_diff)
# **Let's determine & plot rolling statistics.**
test_stationarity(elecom_log_diff)
# From above plot, we can see that, visually this is the very best result as our series along with rolling stats values of moving avg(mean) & moving standard deviation is very much flat & stationary.
# **Let us now break down the 3 components of the log scale series using a system libary function. Once, we separate our the components, we can simply ignore trend & seasonality and check on the nature of the residual part.**
decomposition = seasonal_decompose(elecom_log)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
plt.figure(figsize=(10, 5))
plt.subplot(411)
plt.plot(elecom_log, label="Original")
plt.legend(loc="best")
plt.subplot(412)
plt.plot(trend, label="Trend")
plt.legend(loc="best")
plt.subplot(413)
plt.plot(seasonal, label="Seasonality")
plt.legend(loc="best")
plt.subplot(414)
plt.plot(residual, label="Residuals")
plt.legend(loc="best")
plt.tight_layout()
# **There can be cases where an observation simply consist of trend & seasonality. In that case, there won't be any residual component & that would be a null or NaN. Hence, we also remove such cases.**
elecom_decompose = residual
elecom_decompose.dropna(inplace=True)
rollmean_decompose = elecom_decompose.rolling(window=12).mean()
rollstd_decompose = elecom_decompose.rolling(window=12).std()
plt.figure(figsize=(10, 5))
plt.plot(elecom_decompose, color="blue", label="Original")
plt.plot(rollmean_decompose, color="red", label="Rolling Mean")
plt.plot(rollstd_decompose, color="black", label="Rolling Std")
plt.legend(loc="best")
plt.title("Rolling Mean & Standard Deviation")
# ### **Plotting ACF & PACF**
lag_acf = acf(elecom_log_diff, nlags=20)
lag_pacf = pacf(elecom_log_diff, nlags=20, method="ols")
# Plot ACF:
plt.subplot(121)
plt.plot(lag_acf)
plt.axhline(y=0, linestyle="--", color="gray")
plt.axhline(y=-1.96 / np.sqrt(len(elecom_log_diff)), linestyle="--", color="gray")
plt.axhline(y=1.96 / np.sqrt(len(elecom_log_diff)), linestyle="--", color="gray")
plt.title("Autocorrelation Function")
# Plot PACF
plt.subplot(122)
plt.plot(lag_pacf)
plt.axhline(y=0, linestyle="--", color="gray")
plt.axhline(y=-1.96 / np.sqrt(len(elecom_log_diff)), linestyle="--", color="gray")
plt.axhline(y=1.96 / np.sqrt(len(elecom_log_diff)), linestyle="--", color="gray")
plt.title("Partial Autocorrelation Function")
plt.tight_layout()
# From the ACF graph, we can see that curve touches y=0.0 line at x=2. Thus, from theory, Q = 3 From the PACF graph, we see that curve touches y=0.0 line at x=2. Thus, from theory, P = 3
# (from the above graphs the p and q values are very close to 3 where the graph cuts off the origin)
# **ARIMA is AR + I + MA.** Before, we see an ARIMA model, let us check the results of the individual AR & MA model. Note that, these models will give a value of RSS. Lower the RSS values indicates a better model.
# ### **AR Model**
# Making order = (3,1,0)
model1 = ARIMA(elecom_log, order=(3, 1, 0))
results_AR = model1.fit(disp=-1)
plt.figure(figsize=(10, 5))
plt.plot(elecom_log_diff)
plt.plot(results_AR.fittedvalues, color="red")
plt.title("RSS: %.4f" % sum((results_AR.fittedvalues - elecom_log_diff["Value"]) ** 2))
print("Plotting AR model")
# ### **MA Model**
# Making order = (0,1,3)
model2 = ARIMA(elecom_log, order=(0, 1, 3))
plt.figure(figsize=(10, 5))
results_MA = model2.fit(disp=-1)
plt.plot(elecom_log_diff)
plt.plot(results_MA.fittedvalues, color="red")
plt.title("RSS: %.4f" % sum((results_MA.fittedvalues - elecom_log_diff["Value"]) ** 2))
print("Plotting MA model")
# ### **AR+I+MA = ARIMA Model**
# Making order = (3,1,3)
model = ARIMA(elecom_log, order=(3, 1, 3))
plt.figure(figsize=(10, 5))
results_ARIMA = model.fit(disp=-1)
plt.plot(elecom_log_diff)
plt.plot(results_ARIMA.fittedvalues, color="red")
plt.title(
"RSS: %.4f" % sum((results_ARIMA.fittedvalues - elecom_log_diff["Value"]) ** 2)
)
print("Plotting ARIMA model")
# **RSS value for:** AR Model - 0.8695, MA Model - 1.2793
# ARIMA Model - 0.5227
# By combining AR & MA into ARIMA, we see that RSS value has decreased from either case to 0.5227, indicating ARIMA to be better than its individual component models.
# With the ARIMA model built, we will now generate predictions. But, before we do any plots for predictions ,we need to reconvert the predictions back to original form. This is because, our model was built on log transformed data.
# ### **Prediction & Reverse Transformation**
predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True)
predictions_ARIMA_diff.head()
predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()
predictions_ARIMA_diff_cumsum.head()
predictions_ARIMA_log = pd.Series(elecom_log["Value"].iloc[0], index=elecom_log.index)
predictions_ARIMA_log = predictions_ARIMA_log.add(
predictions_ARIMA_diff_cumsum, fill_value=0
)
predictions_ARIMA_log.head()
# ### **Inverse of log is exp**
predictions_ARIMA = np.exp(predictions_ARIMA_log)
plt.figure(figsize=(10, 5))
plt.plot(elecomind)
plt.plot(predictions_ARIMA)
# **From above plot, we can see that our predicted forecasts are very close to the real time series values. It also indicates a fairly accurate model.**
elecom_log.head()
elecom_log.shape
# **We have 396 (existing data of 33 yrs in months) data points. Now, we can to forecast for additional 6 yrs (6x12 months=72 data points).**
# **396+72 = 468 records/data points**
results_ARIMA.plot_predict(1, 468)
|
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
from sklearn.preprocessing import PolynomialFeatures
import os
print(os.listdir("../input/insurance"))
data = pd.read_csv("../input/insurance/insurance.csv")
data.head()
# # 1 - Interactions between variables
# # 1) age - children
inter = smf.ols(formula="charges ~ age*children", data=data).fit()
print(inter.summary())
# # As we can see, the p-value for "age:children" coefficient is 0.497, so there is no significant interaction.
# # The F-statistic is 45.48 and its p-value is 5.56e-28, so a combination of interactions in the model is significant.
# # 2) age - bmi
inter = smf.ols(formula="charges ~ age*bmi", data=data).fit()
print(inter.summary())
# # As we can see, the p-value for "age:bmi" coefficient is 0.524, so there is no significant interaction.
# # The F-statistic is 59.18 and its p-value is 6.45e-36, so a combination of interactions in the model is significant.
# # 3) age - smoker
inter = smf.ols(formula="charges ~ age*smoker", data=data).fit()
print(inter.summary())
# # As we can see, the p-value for "age:smoker[T.yes]" coefficient is 0.222, so there is no significant interaction.
# # The F-statistic is 1153. and its p-value is 0.00, so a combination of interactions in the model is significant.
# # 4) sex - smoker (significant)
inter = smf.ols(formula="charges ~ sex*smoker", data=data).fit()
print(inter.summary())
# # As we can see, the p-value for "sex[T.male]:smoker[T.yes]" coefficient is 0.003, so there IS significant interaction.
# # The F-statistic is 732.6. and its p-value is 2.15e-281, so a combination of interactions in the model is significant.
# # 5) sex - bmi (considerable)
inter = smf.ols(formula="charges ~ sex*bmi", data=data).fit()
print(inter.summary())
# # As we can see, the p-value for "sex[T.male]:bmi" coefficient is 0.091, so there IS considerable interaction.
# # The F-statistic is 20.32 and its p-value is 7.03e-13, so a combination of interactions in the model is significant.
# # 6) bmi - smoker (significant)
inter = smf.ols(formula="charges ~ bmi*smoker", data=data).fit()
print(inter.summary())
# # As we can see, the p-value for "bmi:smoker[T.yes]" coefficient is 0.000, so there IS significant interaction.
# # The F-statistic is 1277. and its p-value is 0.00, so a combination of interactions in the model is significant.
# # 7) smoker - region (significant)
inter = smf.ols(formula="charges ~ smoker*region", data=data).fit()
print(inter.summary())
# # As we can see, the p-value for "smoker[T.yes]:region[T.southwest]" coefficient is 0.011 and "smoker[T.yes]:region[T.southeast]" coefficient is 0.000, so there IS significant interaction.
# # The F-statistic is 320.1 and its p-value is 6.37e-280, so a combination of interactions in the model is significant.
# # 8) region - children - sex (significant)
inter = smf.ols(formula="charges ~ region*children*sex", data=data).fit()
print(inter.summary())
# # As we can see, the p-value for "region[T.northwest]:children:sex[T.male] " coefficient is 0.004, so there IS significant interaction.
# # The F-statistic is 2.278 and its p-value is 0.003, so a combination of interactions in the model is significant.
# # 9) age - smoker - bmi (average)
inter = smf.ols(formula="charges ~ age*smoker*bmi", data=data).fit()
print(inter.summary())
# # As we can see, the p-value for "age:smoker[T.yes]:bmi" coefficient is 0.094, so there IS considerable interaction.
# # The F-statistic is 974.3 and its p-value is 0.00, so a combination of interactions in the model is significant.
# # 10) age - smoker - region (considerable)
inter = smf.ols(formula="charges ~ age*smoker*region", data=data).fit()
print(inter.summary())
# # As we can see, the p-value for "age:smoker[T.yes]:region[T.southeast]" coefficient is 0.089, so there IS considerable interaction.
# # The F-statistic is 238.4 and its p-value is 0.00, so a combination of interactions in the model is significant.
# # 2 - Numerical Transformations
# # 1) age - logarithmic (significant)
inter = smf.ols(formula="charges ~ np.log(age)", data=data).fit()
print(inter.summary())
# # As we can see, the p-value for "np.log(age)" coefficient is 0.000, so the transformation is significant.
# # 2) children - numeric, order 2 (significant)
inter = smf.ols(formula="charges ~ children + I(children*children)", data=data).fit()
print(inter.summary())
# # As we can see, the p-value for "I(children * children)" coefficient is 0.022, so the transformation is significant.
# # 3) bmi - logarithmic (significant)
inter = smf.ols(formula="charges ~ np.log(bmi)", data=data).fit()
print(inter.summary())
# # As we can see, the p-value for "np.log(bmi)" coefficient is 0.000, so the transformation is significant.
# # 3 - Forward Model Selection
data = pd.get_dummies(data, drop_first=True)
data.head()
from mlxtend.feature_selection import SequentialFeatureSelector as sfs
from sklearn.linear_model import LinearRegression
inputDF = data[
[
"age",
"sex_male",
"bmi",
"children",
"smoker_yes",
"region_northwest",
"region_southeast",
"region_southwest",
]
]
inputDF["sex:smoker"] = inputDF["sex_male"] * inputDF["smoker_yes"]
inputDF["sex:bmi"] = inputDF["sex_male"] * inputDF["bmi"]
inputDF["bmi:smoker"] = inputDF["bmi"] * inputDF["smoker_yes"]
inputDF["smoker:region"] = inputDF["smoker_yes"] * inputDF["region_southeast"]
# inputDF["smoker:region2"] = inputDF["smoker_yes"] * inputDF["region_southwest"]
inputDF["region:children:sex"] = (
inputDF["region_northwest"] * inputDF["children"] * inputDF["sex_male"]
)
inputDF["age:smoker:bmi"] = inputDF["age"] * inputDF["smoker_yes"] * inputDF["bmi"]
inputDF["age:smoker:region"] = (
inputDF["age"] * inputDF["smoker_yes"] * inputDF["region_southeast"]
)
inputDF["age_log"] = np.log(inputDF["age"])
inputDF["bmi_log"] = np.log(inputDF["bmi"])
inputDF["children^2"] = inputDF["children"] ** 2
outputDFapply = data[["charges"]]
model = sfs(
LinearRegression(),
k_features=5,
forward=True,
verbose=2,
cv=5,
n_jobs=-1,
scoring="r2",
)
model.fit(inputDF, outputDF)
model.k_feature_idx_
model.k_feature_names_
# # 4 - Backward Model Selection
backwardModel = sfs(
LinearRegression(),
k_features=5,
forward=False,
verbose=2,
cv=5,
n_jobs=-1,
scoring="r2",
)
backwardModel.fit(inputDF, outputDF)
backwardModel.k_feature_idx_
backwardModel.k_feature_names_
# # 5 - Cross-Validation
# # Forward & Backward Selection Model (same set of predictors generated from 3) and 4))
# # LOOCV:
from sklearn import metrics
from sklearn.model_selection import (
KFold,
cross_val_score,
cross_val_predict,
LeaveOneOut,
)
inputDF_LOOCV = inputDF[["age", "children", "smoker_yes", "bmi:smoker", "age_log"]]
outputDF = data[["charges"]]
model = LinearRegression()
loocv = LeaveOneOut()
rmse = np.sqrt(
-cross_val_score(
model, inputDF, outputDF, scoring="neg_mean_squared_error", cv=loocv
)
)
print(rmse.mean())
# # 5-fold CV:
kf = KFold(5, shuffle=True, random_state=42).get_n_splits(inputDF)
rmse = np.sqrt(
-cross_val_score(model, inputDF, outputDF, scoring="neg_mean_squared_error", cv=kf)
)
print(rmse.mean())
# # 10-fold CV:
kf = KFold(10, shuffle=True, random_state=42).get_n_splits(inputDF)
rmse = np.sqrt(
-cross_val_score(model, inputDF, outputDF, scoring="neg_mean_squared_error", cv=kf)
)
print(rmse.mean())
# # 2) Backward Selection Model
# # LOOCV:
from sklearn import metrics
from sklearn.model_selection import (
KFold,
cross_val_score,
cross_val_predict,
LeaveOneOut,
)
inputDF = data[["age", "bmi", "children", "smoker_yes", "region_southeast"]]
outputDF = data[["charges"]]
model = LinearRegression()
loocv = LeaveOneOut()
rmse = np.sqrt(
-cross_val_score(
model, inputDF, outputDF, scoring="neg_mean_squared_error", cv=loocv
)
)
print(rmse.mean())
# # 5-fold CV:
kf = KFold(5, shuffle=True, random_state=42).get_n_splits(inputDF)
rmse = np.sqrt(
-cross_val_score(model, inputDF, outputDF, scoring="neg_mean_squared_error", cv=kf)
)
print(rmse.mean())
# # 10-fold CV:
kf = KFold(10, shuffle=True, random_state=42).get_n_splits(inputDF)
rmse = np.sqrt(
-cross_val_score(model, inputDF, outputDF, scoring="neg_mean_squared_error", cv=kf)
)
print(rmse.mean())
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
from tensorflow.keras.applications import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
from keras.callbacks import ModelCheckpoint, EarlyStopping
import os
import tensorflow as tf
import datetime
import splitfolders
from keras.models import load_model
from distutils.dir_util import copy_tree
import glob
import matplotlib.pyplot as plt
# combine the train and validation folders in order to create test, train, validation folders
fromDirectory = "../input/cats-and-dogs-filtered/cats_and_dogs_filtered/cats_and_dogs_filtered/train"
toDirectory = "dataset"
copy_tree(fromDirectory, toDirectory)
fromDirectory = "../input/cats-and-dogs-filtered/cats_and_dogs_filtered/cats_and_dogs_filtered/validation"
toDirectory = "dataset"
copy_tree(fromDirectory, toDirectory)
# split the combined folder into test, train, validation folders
splitfolders.ratio(
"./dataset",
output="splitted_Dataset",
seed=1337,
ratio=(0.7, 0.15, 0.15),
group_prefix=None,
)
# build the train generator to load and augment the images
train_datagen = ImageDataGenerator(
rescale=1.0 / 255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode="nearest",
preprocessing_function=preprocess_input,
)
train_generator = train_datagen.flow_from_directory(
"./splitted_Dataset/train",
target_size=(150, 150),
color_mode="rgb",
batch_size=16,
class_mode="categorical",
)
# build the validation generator
val_datagen = ImageDataGenerator(
rescale=1.0 / 255, preprocessing_function=preprocess_input
)
val_generator = val_datagen.flow_from_directory(
"./splitted_Dataset/val",
target_size=(150, 150),
color_mode="rgb",
batch_size=16,
class_mode="categorical",
)
# using transfer learning on the VGG16 network with imagenet weights
base_model = VGG16(weights="imagenet", include_top=False, input_shape=(150, 150, 3))
# calculate the step per epoch size
step_size_train = train_generator.n // train_generator.batch_size
print("step size train:", step_size_train)
step_size_val = val_generator.n // val_generator.batch_size
print("step size test:", step_size_val)
VGG16_self = Sequential()
VGG16_self.add(base_model)
VGG16_self.add(Flatten())
VGG16_self.add(Dense(64, activation="relu"))
VGG16_self.add(Dense(2, activation="softmax"))
# only the added layers going to train
base_model.trainable = False
VGG16_self.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
# callbacks for saving the best model, early stopping and tensorboard
callbacks = [
ModelCheckpoint(
str(datetime.datetime.now()) + "_vgg16.h5",
monitor="val_accuracy",
verbose=1,
save_best_only=True,
save_weights_only=False,
mode="auto",
save_freq="epoch",
),
EarlyStopping(
monitor="val_accuracy", min_delta=0, patience=3, verbose=1, mode="auto"
),
]
# train the model
history = VGG16_self.fit(
train_generator,
steps_per_epoch=step_size_train,
epochs=10,
validation_data=val_generator,
validation_steps=step_size_val,
callbacks=callbacks,
)
# load the best model
model = load_model(glob.glob("./*.h5")[0])
# build the the test generator
test_datagen = ImageDataGenerator(
rescale=1.0 / 255, preprocessing_function=preprocess_input
)
test_generator = test_datagen.flow_from_directory(
"./splitted_Dataset/test",
target_size=(150, 150),
color_mode="rgb",
batch_size=1,
class_mode="categorical",
)
# evaluate the model on a unseen data
scoreSeg = model.evaluate(test_generator)
print("Model Test accuracy: ", scoreSeg[1])
print("Model Test Loss: ", scoreSeg[0])
hist = history.history
acc = hist["accuracy"]
val_acc = hist["val_accuracy"]
epoch = range(len(acc))
loss = hist["loss"]
val_loss = hist["val_loss"]
f, ax = plt.subplots(1, 2, figsize=(16, 8))
ax[0].plot(epoch, acc, "b", label="Training Accuracy")
ax[0].plot(epoch, val_acc, "r", label="Validation Accuracy")
ax[0].legend()
ax[1].plot(epoch, loss, "b", label="Training Loss")
ax[1].plot(epoch, val_loss, "r", label="Validation Loss")
ax[1].legend()
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Load train dataset and make a copy of it
df_train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
# Load test dataset
df_test = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/test.csv"
)
id = df_test["Id"]
df_train.drop(["Id"], axis=1, inplace=True)
df_test.drop(["Id"], axis=1, inplace=True)
df_train.shape
df_test.shape
df_train["PoolQC"].unique()
df_train["Alley"].unique()
df_train["MiscFeature"].unique()
# calculate total number of null values in training data
null_train = df_train.isnull().sum().sum()
print(null_train)
# calculate total number of null values in test data
null_test = df_test.isnull().sum().sum()
print(null_test)
# function to HANDLE the missing data in a dataframe
def missing(df):
# drop theses columns due to large null values or many same values
df = df.drop(["Utilities", "PoolQC", "MiscFeature", "Alley"], axis=1)
# Null value likely means No Fence so fill as "None"
df["Fence"] = df["Fence"].fillna("None")
# Null value likely means No Fireplace so fill as "None"
df["FireplaceQu"] = df["FireplaceQu"].fillna("None")
# Lot frontage is the feet of street connected to property, which is likely similar to the neighbourhood houses, so fill Median value
df["LotFrontage"] = df["LotFrontage"].fillna(df["LotFrontage"].median())
# Null value likely means typical(Typ)
df["Functional"] = df["Functional"].fillna("Typ")
# Only one null value so fill as the most frequent value(mode)
df["KitchenQual"] = df["KitchenQual"].fillna(df["KitchenQual"].mode()[0])
# Only one null value so fill as the most frequent value(mode)
df["Electrical"] = df["Electrical"].fillna(df["Electrical"].mode()[0])
# Very few null value so fill with the most frequent value(mode)
df["SaleType"] = df["SaleType"].fillna(df["SaleType"].mode()[0])
# Null value likely means no masonry veneer
df["MasVnrType"] = df["MasVnrType"].fillna(
"None"
) # so fill as "None" (since categorical feature)
df["MasVnrArea"] = df["MasVnrArea"].fillna(0) # so fill as o
# Only one null value so fill as the most frequent value(mode)
df["Exterior1st"] = df["Exterior1st"].fillna(df["Exterior1st"].mode()[0])
df["Exterior2nd"] = df["Exterior2nd"].fillna(df["Exterior2nd"].mode()[0])
# MSZoning is general zoning classification,Very few null value so fill with the most frequent value(mode)
df["MSZoning"] = df["MSZoning"].fillna(df["MSZoning"].mode()[0])
# Null value likely means no Identified type of dwelling so fill as "None"
df["MSSubClass"] = df["MSSubClass"].fillna("None")
# Null value likely means No Garage, so fill as "None" (since these are categorical features)
for col in ("GarageType", "GarageFinish", "GarageQual", "GarageCond"):
df[col] = df[col].fillna("None")
# Null value likely means No Garage and no cars in garage, so fill as 0
for col in ("GarageYrBlt", "GarageArea", "GarageCars"):
df[col] = df[col].fillna(0)
# Null value likely means No Basement, so fill as 0
for col in (
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"BsmtFullBath",
"BsmtHalfBath",
):
df[col] = df[col].fillna(0)
# Null value likely means No Basement, so fill as "None" (since these are categorical features)
for col in ("BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2"):
df[col] = df[col].fillna("None")
return df
def add_new_cols(df):
# Areas
df["TotalArea"] = df["GrLivArea"] + df["TotalBsmtSF"]
df["TotalBaths"] = (
df["FullBath"]
+ df["BsmtFullBath"]
+ 0.5 * (df["HalfBath"] + df["BsmtHalfBath"])
)
df["TotalPorch"] = (
df["OpenPorchSF"] + df["EnclosedPorch"] + df["3SsnPorch"] + df["ScreenPorch"]
)
# Logical features
df["Pool"] = df["PoolArea"].apply(lambda x: 1 if x > 0 else 0)
df["2ndFloor"] = df["2ndFlrSF"].apply(lambda x: 1 if x > 0 else 0)
df["Garage"] = df["GarageCars"].apply(lambda x: 1 if x > 0 else 0)
df["Bsmt"] = df["TotalBsmtSF"].apply(lambda x: 1 if x > 0 else 0)
df["Fireplace"] = df["Fireplaces"].apply(lambda x: 1 if x > 0 else 0)
df["Porch"] = df["TotalPorch"].apply(lambda x: 1 if x > 0 else 0)
return df
df_train.shape, df_test.shape
df_train = missing(df_train)
df_test = missing(df_test)
# add the new columns
df_train = add_new_cols(df_train)
df_test = add_new_cols(df_test)
# save the 'SalePrice'column as train_label
train_label = df_train["SalePrice"].reset_index(drop=True)
# # drop 'SalePrice' column from df_train
df_train = df_train.drop(["SalePrice"], axis=1)
# # now df_train contains all training features
# categorical values to numerical values
df_train = pd.get_dummies(df_train, drop_first=True)
df_test = pd.get_dummies(df_test, drop_first=True)
print(f"Train shape: {df_train.shape}")
print(f"Test shape: {df_test.shape}")
df_train, df_test = df_train.align(df_test, join="inner", axis=1)
df_train.shape, df_test.shape
X_test = df_test # testing features
df_train["SalePrice"] = train_label
from sklearn.model_selection import train_test_split
train_set, valid_set = train_test_split(df_train, train_size=0.7, shuffle=False)
X_train = train_set.drop(["SalePrice"], axis=1) # training features
y_train = train_set["SalePrice"].copy() # training label
X_valid = valid_set.drop(["SalePrice"], axis=1) # testing features
y_valid = valid_set["SalePrice"].copy() # testing label
print(f"X_train", X_train.shape[1])
print(f"y_train", y_train.shape)
print(f"X_valid", X_valid.shape)
print(f"y_valid", y_valid.shape)
# prepare configuration for cross validation test harness
seed = 7
# prepare models
models = []
from sklearn.linear_model import LinearRegression
models.append(("LinearR", LinearRegression()))
from sklearn.linear_model import Ridge
models.append(("Ridge", Ridge(alpha=0.1)))
from sklearn.linear_model import Lasso
models.append(("Lasso", Lasso(alpha=0.1)))
from sklearn.tree import DecisionTreeRegressor
models.append(("DecisionTree", DecisionTreeRegressor()))
from sklearn.ensemble import RandomForestRegressor
models.append(("RandomForest", RandomForestRegressor()))
from sklearn.neighbors import KNeighborsRegressor
models.append(("KNN", KNeighborsRegressor()))
from sklearn.svm import SVR
models.append(("SVR", SVR()))
from catboost import CatBoostRegressor
models.append(("CatBoost", CatBoostRegressor(verbose=500)))
import xgboost
models.append(("xgboost", xgboost.XGBRegressor()))
from sklearn.ensemble import AdaBoostRegressor
models.append(("AdaBoost", AdaBoostRegressor()))
# evaluate each model in turn
from sklearn.metrics import mean_absolute_error
from sklearn import model_selection
results = []
names = []
scoring = "neg_mean_absolute_error"
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed, shuffle=True)
cv_results = model_selection.cross_val_score(
model, X_train, y_train, cv=kfold, scoring=scoring
)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
model_svr = CatBoostRegressor(verbose=500)
model_svr.fit(X_train, y_train)
# get predictions
pred_opt = model_svr.predict(X_valid)
mae = mean_absolute_error(y_valid, pred_opt)
print("MAE : ", mae)
import optuna
from sklearn.metrics import mean_squared_error
def objective(trial):
param = {}
param["learning_rate"] = trial.suggest_float("learning_rate", 0.001, 0.02)
param["depth"] = trial.suggest_int("depth", 9, 15)
param["l2_leaf_reg"] = trial.suggest_float("l2_leaf_reg", 1.0, 5.5)
param["min_child_samples"] = trial.suggest_categorical(
"min_child_samples", [1, 4, 8, 16, 32]
)
param["grow_policy"] = "Depthwise"
param["iterations"] = 1000
param["use_best_model"] = True
param["eval_metric"] = "RMSE"
param["od_type"] = "iter"
param["od_wait"] = 20
param["random_state"] = RANDOM_SEED
param["logging_level"] = "Silent"
regressor = CatBoostRegressor(**param)
regressor.fit(
X_train.copy(),
y_train.copy(),
eval_set=[(X_valid.copy(), y_valid.copy())],
early_stopping_rounds=EARLY_STOPPING_ROUND,
)
loss = mean_squared_error(y_valid, regressor.predict(X_valid.copy()))
return loss
RANDOM_SEED = 1
EARLY_STOPPING_ROUND = 100
study = optuna.create_study(study_name=f"catboost-seed{RANDOM_SEED}")
study.optimize(objective, n_trials=20, n_jobs=-1, timeout=24000)
study.best_params
optimized_regressor = CatBoostRegressor(
learning_rate=study.best_params["learning_rate"],
depth=study.best_params["depth"],
l2_leaf_reg=study.best_params["l2_leaf_reg"],
min_child_samples=study.best_params["min_child_samples"],
grow_policy="Depthwise",
iterations=10000,
use_best_model=True,
eval_metric="RMSE",
od_type="iter",
od_wait=20,
random_state=RANDOM_SEED,
logging_level="Silent",
)
df_train = df_train.drop(["SalePrice"], axis=1)
optimized_regressor.fit(
df_train, train_label, early_stopping_rounds=EARLY_STOPPING_ROUND
)
# get predictions
prediction = optimized_regressor.predict(df_test)
output = pd.DataFrame({"Id": id, "SalePrice": prediction})
output.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import sys
sys.path.append("..")
from PIL import Image
from torchvision import transforms as tfs
from torchvision.transforms import (
Compose,
RandomResizedCrop,
RandomHorizontalFlip,
RandomVerticalFlip,
RandomRotation,
RandomCrop,
ToTensor,
ToPILImage,
CenterCrop,
Resize,
Grayscale,
ColorJitter,
)
import os
def calculate_valid_crop_size(crop_size, blocksize):
return crop_size - (crop_size % blocksize)
path = r"../input/bsds500-v2/BSDS500_v2/BSDS500/BSDS500/train_data"
count = 0
for filename in os.listdir(path):
# 读入一张图片
# im = Image.open('../input/bsds500-v2/BSDS500_v2/BSDS500/BSDS500/train_data/100007.jpg')
im = Image.open(os.path.join(path, filename))
im_aug = tfs.Compose(
[
# tfs.RandomCrop(calculate_valid_crop_size(96,32)),
tfs.RandomHorizontalFlip(p=0.5),
tfs.RandomVerticalFlip(p=0.5),
tfs.RandomRotation((90, 270)),
tfs.ColorJitter(0.5, 0.5, 0.5),
# tfs.Grayscale(),
# tfs.ToTensor(),
# torchvision.transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5)),
]
)
import matplotlib.pyplot as plt
nrows = 5
ncols = 5
figsize = (8, 8)
_, figs = plt.subplots(nrows, ncols, figsize=figsize)
for i in range(nrows):
for j in range(ncols):
new_im = im_aug(im)
count = count + 1
# figs[i][j].imshow(new_im)
# figs[i][j].axes.get_xaxis().set_visible(False)
# figs[i][j].axes.get_yaxis().set_visible(False)
save_dir = "./BSDS500aug_10k/"
if not os.path.exists(save_dir):
os.makedirs(save_dir)
new_im.save(os.path.join(save_dir, str(count) + "_" + filename))
print(
str(count) + "--" + os.path.join(save_dir, str(count) + "_" + filename)
)
# plt.show()
import zipfile
from pathlib import Path
img_root = Path("./BSDS500aug_10k/")
with zipfile.ZipFile("bsds_imgs10k.zip", "w") as z:
for img_name in img_root.iterdir():
z.write(img_name)
print(img_name)
|
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
from tqdm import tqdm
DATADIR = "../input/emotion/images/train"
CATEGORIES = ["angry", "disgust", "fear", "happy", "neutral", "sad", "surprise"]
for category in CATEGORIES: # do dogs and cats
path = os.path.join(DATADIR, category) # create path to dogs and cats
for img in os.listdir(path): # iterate over each image per dogs and cats
img_array = cv2.imread(
os.path.join(path, img), cv2.IMREAD_GRAYSCALE
) # convert to array
plt.imshow(img_array, cmap="gray") # graph it
plt.show() # display!
break # we just want one for now so break
break # ...and one more!
print(img_array)
print(img_array.shape)
IMG_SIZE = 50
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap="gray")
plt.show()
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap="gray")
plt.show()
training_data = []
def create_training_data():
for category in CATEGORIES: # do dogs and cats
path = os.path.join(DATADIR, category) # create path to dogs and cats
class_num = CATEGORIES.index(
category
) # get the classification (0 or a 1). 0=dog 1=cat
for img in tqdm(os.listdir(path)): # iterate over each image per dogs and cats
try:
img_array = cv2.imread(
os.path.join(path, img), cv2.IMREAD_GRAYSCALE
) # convert to array
new_array = cv2.resize(
img_array, (IMG_SIZE, IMG_SIZE)
) # resize to normalize data size
training_data.append(
[new_array, class_num]
) # add this to our training_data
except Exception as e: # in the interest in keeping the output clean...
pass
# except OSError as e:
# print("OSErrroBad img most likely", e, os.path.join(path,img))
# except Exception as e:
# print("general exception", e, os.path.join(path,img))
create_training_data()
print(len(training_data))
import random
random.shuffle(training_data)
for sample in training_data[:10]:
print(sample[1])
X = []
y = []
for features, label in training_data:
X.append(features)
y.append(label)
print(X[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1))
X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
import pickle
pickle_out = open("X.pickle", "wb")
pickle.dump(X, pickle_out)
pickle_out.close()
pickle_out = open("y.pickle", "wb")
pickle.dump(y, pickle_out)
pickle_out.close()
pickle_in = open("X.pickle", "rb")
X = pickle.load(pickle_in)
pickle_in = open("y.pickle", "rb")
y = pickle.load(pickle_in)
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import pickle
pickle_in = open("X.pickle", "rb")
X = pickle.load(pickle_in)
pickle_in = open("y.pickle", "rb")
y = pickle.load(pickle_in)
X = X / 255.0
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(1))
model.add(Activation("sigmoid"))
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(X, y, batch_size=32, epochs=3, validation_split=0.1)
|
import csv
import re # preprocess
tokenedPantun = []
with open(
"/kaggle/input/pembangkitan-otomatis-sampiran-pantun/sampiran.csv"
) as csvfile:
reader = csv.reader(csvfile, quoting=csv.QUOTE_NONE)
for row in reader: # each row is a list
tokenedPantun.append(row)
words = []
for pantun in tokenedPantun:
for word in pantun:
words.append(word)
vocab_size = len(set(words))
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(vocab_size))
filekataHubung = open(
"/kaggle/input/pembangkitan-otomatis-sampiran-pantun/kataHubung.txt", "r"
) # 10 ribu pantun
kataHubung = filekataHubung.read()
kataHubung = kataHubung.split("\n")
kataHubung[:10]
# # Word2Vec
# tahap word embedding
import gensim
from gensim.models import Word2Vec
from time import time # menghitung waktu training
import numpy as np
import tensorflow.keras.utils as ku # mengubah target menjadi kategorikal
from keras.utils import pad_sequences
# menggunakan word2vec yang telah ditraining menggunakan data pantun dan puisi
w2v_model_sg = gensim.models.KeyedVectors.load_word2vec_format(
"/kaggle/input/pembangkitan-otomatis-sampiran-pantun/modelW2V.bin", binary=True
)
pretrained_weights = w2v_model_sg.vectors
vocab_size, emdedding_size = pretrained_weights.shape
print("Result embedding shape:", pretrained_weights.shape)
# # Mempersiapkan Data
# Data akan dibagi menjadi prediction(X) dan target/label (y)
# 1. mengubah setiap kata menjadi index
# 3. membalik urutan index karena akan membangkitkan dari kata terakhir
# 4. membuat n-gram
# 5. membagi predictor (X) dan label (y)
def word2idx(word):
if word in w2v_model_sg.key_to_index: # mencegah oov
return w2v_model_sg.key_to_index[word]
def idx2word(idx):
return w2v_model_sg.index_to_key[idx]
# mengubah tiap kata menjadi angka index
token_list = []
for pantun in tokenedPantun:
idxOfSentence = []
for word in pantun:
idxWord = word2idx(word)
idxOfSentence.append(idxWord)
token_list.append(idxOfSentence)
print(token_list[503])
len(token_list)
# membalik kalimat karena akan men-generate dari belakang
def reverse(lst):
return [ele for ele in reversed(lst)]
for i in range(len(token_list)):
token_list[i] = reverse(token_list[i])
print(token_list[503])
# n-gram
input_sequences = []
for line in token_list:
for i in range(1, len(line)):
n_gram_sequence = line[: i + 1]
input_sequences.append(n_gram_sequence)
print(input_sequences[:3])
def generate_padded_sequences(input_sequences):
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(
pad_sequences(input_sequences, maxlen=max_sequence_len, padding="pre")
)
predictors, label = input_sequences[:, :-1], input_sequences[:, -1]
label = ku.to_categorical(label, num_classes=vocab_size)
return predictors, label, max_sequence_len
predictors, label, max_sequence_len = generate_padded_sequences(input_sequences)
print(
predictors[0].shape
) # setelah di padding, menjadi panjang maksimum dalam dataset(sampiran pantun terpanjang)
print(label[0].shape) # jumlah unique word
print(label.shape) # jumlah akhir data
print(max_sequence_len)
# # LSTM
from tensorflow import keras
import tensorflow as tf
model = tf.keras.models.load_model(
"/kaggle/input/pembangkitan-otomatis-sampiran-pantun/modelSampiran2-balik.h5"
)
# # Pembangkitan Otomatis sampiran pantun baru
def sentence2idx(sentence):
token_list = []
idxOfSentence = []
# tokenisasi
token_list = [word.lower() for line in [sentence] for word in line.split()]
for word in token_list:
if word in w2v_model_sg.key_to_index:
idxWord = word2idx(word) # ubah ke index
idxOfSentence.append(idxWord)
return idxOfSentence
# ## mencari kata yang memiliki suku kata sama dengan kata terakhir dengan isi pantun
def cariRimaIsi(isiPantun):
isiPantun = isiPantun.lower()
isiPantun = isiPantun.split("<akhirbaris>")
kataTerakhir = []
for i in range(len(isiPantun)):
baris = isiPantun[i].split()
kataTerakhir.append(baris[-1])
kataTerakhir = kataTerakhir[:2] # agar tokn <akhirbaris> tidak ikut
akhiran = (
[]
) # kalo ng- dan ny- ubah jadi 3 akhiran :: misal menganga --> nga bukan ga
for kata in kataTerakhir:
if kata[-3:-1] == "ng" or kata[-3:-1] == "ny":
akhiran.append(kata[-3:])
else:
akhiran.append(kata[-2:])
return kataTerakhir, akhiran
kataAkhir, rima = cariRimaIsi(
"<isipantun> Sampah di buang pada tempatnya <akhirBaris> Biar sekolah menjadi asri <akhirBaris> <akhirPantun>\n"
)
print(kataAkhir, rima)
vocab = set(words)
# cari akhiran sama dalam dataset kemudian disatukan dalam suatu array
def cariKataBerima(kataTerakhir, akhiran):
rimaSama = []
for word in vocab:
if word.endswith(akhiran):
if word not in kataHubung:
rimaSama.append(word)
if (
len(rimaSama) == 0
): # kalao ga ada akhiran yang sama maka pakai kata yang diinputkan
rimaSama.append(kataTerakhir)
return rimaSama
# menentukan kata terakhir baris kedua dengan melihat nilai kemiripannya dengan kata terakhir baris pertama
# agar baris pertama dan kedua dalam satu topik dan memiliki hubungan yang logis
def KataKedua(kataPertama, listBerima2):
fixx = []
kataSerupa = []
similiar = w2v_model_sg.most_similar(
positive=kataPertama, topn=300
) # similiar dengan kata terakhir dalam isi pantun
for wordValue in similiar:
wordValue = list(wordValue)
kataSerupa.append(wordValue[0])
# rima = kataPertama[-2:]
# a = cariKataBerima(kataPertama, rima)
# aConc = np.concatenate((a[0],a[1]))
for kata in kataSerupa:
if kata in listBerima2:
fixx.append(kata)
# if len(fixx) == 0:
# fixx.append(kataTerakhir)
return fixx
kataTerakhir = ["bersyukur", "makmur"]
a = cariKataBerima(kataTerakhir[1], kataTerakhir[1][-2:])
print(KataKedua("kabur", a))
# # generate sampiran pantun
# 1. cari kata yang paling similar, masukin dalam array
# 2. sesuaiin sama list berima
# 3. kalo ga ada / len dari kataberimanya cuman 1 berarti pakai yang itu juga.
# 4. ambil kata random terus coba di generate.
import random
# hapus ekstra token
charReplace = {
"<mulaipantun>": "",
"<sampiranpantun>": "",
"<akhirbaris>": "",
"<akhirpantun>": "",
"<isipantun>": "",
}
def hapusToken(teks):
for i in range(len(teks)):
for key, value in charReplace.items():
teks[i] = teks[i].replace(key, value)
return teks
def generateSatuBaris(kataFix):
sampiranBaris = str(kataFix) + " "
for _ in range(10):
token_list = sentence2idx(sampiranBaris)
token_list = pad_sequences(
[token_list], maxlen=max_sequence_len - 1, padding="pre"
)
predicted = np.argmax(model.predict(token_list), axis=-1)
kata_sampiran = ""
kata_sampiran = idx2word(predicted.item(0))
sampiranBaris += kata_sampiran + " "
cek = sampiranBaris.lower()
if cek.count("<akhirbaris>") == 1: # mengahiri pantun jika sudah 1 baris
break
sampiranBaris = sampiranBaris.split()
if sampiranBaris.count("<akhirbaris>") == 1:
sampiranBaris.remove("<akhirbaris>")
sampiranBaris = hapusToken(sampiranBaris)
baris = " ".join(sampiranBaris[::-1]) # ubah urutan dari belakang ke depan
baris = baris.capitalize()
return baris
def PreProcessIsi(isiPantun):
isiPantun = isiPantun.lower()
isiPantun = isiPantun.split("<akhirbaris>")
# isiPantun = isiPantun.split("\n")
for i in range(len(isiPantun)):
isiPantun[i] = isiPantun[i].split()
isiPantun[i] = hapusToken(isiPantun[i])
baris3 = " ".join(isiPantun[0])
baris3 = baris3.capitalize()
baris4 = " ".join(isiPantun[1])
baris4 = baris4.capitalize()
isiPantun = baris3 + "\n" + baris4
return isiPantun
def pembangkitan_otomatis(isiPantun, model):
isiPantun = re.sub(r"[^a-z^A-Z^0-9\n^>< ]", " ", isiPantun) # hapus tanda baca
kataTerakhir, rima = cariRimaIsi(isiPantun)
listBerima1 = cariKataBerima(kataTerakhir[0], rima[0])
listBerima2 = cariKataBerima(kataTerakhir[1], rima[1])
# kata terakhir pertama dipilih random agar memiliki banyak variasi sampiran
kataFix1 = random.choice(listBerima1)
# pilih kata terakhir pada baris kedua yang berhubungan dengan kata terakhir pada baris pertama
if len(KataKedua(kataFix1, listBerima2)) == 0:
kataFix2 = random.choice(listBerima2)
else:
kataFix2 = random.choice(KataKedua(kataFix1, listBerima2))
sampiranBaris1 = generateSatuBaris(kataFix1)
sampiranBaris2 = generateSatuBaris(kataFix2)
# jika satu baris hanya terdiri dari 2 kata --> ulangi proses generateSatuBaris
if len(sampiranBaris1.split()) <= 2:
sampiranBaris1 = generateSatuBaris(random.choice(listBerima1))
if len(sampiranBaris2.split()) <= 2:
sampiranBaris2 = generateSatuBaris(random.choice(listBerima2))
sampiranFix = sampiranBaris1 + "\n" + sampiranBaris2
# pre-process isi pantun
isiPantun = PreProcessIsi(isiPantun)
# return len(sampiranBaris1.split())
return sampiranFix + "\n" + isiPantun
tf.keras.utils.disable_interactive_logging()
baris1 = input("masukkan baris pertama isi pantun : ")
baris2 = input("masukkan baris kedua isi pantun : ")
isi = baris1 + " <akhirbaris> " + baris2 + " <akhirbaris> <akhirpantun>"
n = input("mau buat berapa variasi pantun?")
print("\n")
for i in range(int(n)):
print(pembangkitan_otomatis(isi, model))
print("------------------------\n")
|
# # Overview
# >
# >
# What we did
#
# EDA with count plots, box plots, violin plots and correlation heat maps
# We defined an ensemble model of Xgboost, lightGBM and Catboost and optimized the weights using optuna..
#
# Conclusion
#
# The Ensemble Accuracy score for stratified Kfold=15 is 0.87420 ± 0.02648 and the Model Weights are as follows
#
# Xgboost: 0.45660 ± 0.34364
# LightGBM: 0.41482 ± 0.30189
# Catboost: 0.53435 ± 0.29584
# LogisticRegression: 0.47058 ± 0.30719
# RandomForest: 0.24521 ± 0.29629
# HistGradientBoosting: 0.41429 ± 0.31196
# SVC: 0.28773 ± 0.22673
# KNeighbors: 0.30743 ± 0.29921
#
# Since the data are relatively evenly distributed, we believe that the effect of the ensemble is effective.
# While Xgboost and Catboost resulted in gender as the most important of the features, LightGBM showed a completely different trend.
# Beginners are encouraged to refer to the code for basic EDA flow and ensemble methods that will lead to higher scores.
#
# # Table Of Contents
# * [Setup](#setup)
# * [Data Exploration](#eda)
# * [Correlations](#correlations)
# * [Continuous Data](#continuousdata)
# * [True/False Data](#tfdata)
# * [Ordinal Data](#ordinaldata)
# * [Survived Dependency On Features](#surviveddependency)
# * [Feature Engineering](#featureengineering)
# * [Pre Processing](#preprocess)
# * [Ensemble Model (Xgb, LGB, CAT)](#model)
# * [Define Model](#DefineModel)
# * [Optimizer](#Optimizer)
# * [Model Training](#modeltraining)
# * [Model Evaluation](#modelevaluation)
# * [Feature Importances](#FeatureImportances)
# * [Submission](#submission)
# # Setup
# ### Imports
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import display, Markdown
from functools import partial
from copy import deepcopy
import gc
# Import libraries for gradient boosting
import optuna
from sklearn.base import BaseEstimator, TransformerMixin
import xgboost as xgb
import lightgbm as lgb
import xgboost as xgb
import lightgbm as lgb
from sklearn.ensemble import (
RandomForestClassifier,
HistGradientBoostingClassifier,
GradientBoostingClassifier,
)
from imblearn.ensemble import BalancedRandomForestClassifier
from sklearn.svm import NuSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from catboost import CatBoost, CatBoostRegressor, CatBoostClassifier
from catboost import Pool
from category_encoders import (
OneHotEncoder,
OrdinalEncoder,
CountEncoder,
CatBoostEncoder,
)
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold, KFold
# Suppress warnings
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
# ### Read In Data
filepath = "/kaggle/input/titanic"
df_sub, df_train, df_test = [
pd.read_csv(x, index_col=[0])
for x in [
os.path.join(filepath, "gender_submission.csv"),
os.path.join(filepath, "train.csv"),
os.path.join(filepath, "test.csv"),
]
]
df = df_train.copy()
# ### Helper Methods
def set_frame_style(df, caption=""):
"""Helper function to set dataframe presentation style."""
return (
df.style.background_gradient(cmap="Greens")
.set_caption(caption)
.set_table_styles(
[
{
"selector": "caption",
"props": [
("color", "darkgreen"),
("font-size", "18px"),
("font-weight", "bold"),
],
}
]
)
)
#
# # Data Exploration
# Don't want to see the useless id column here
cols = df.columns.to_list()
display(set_frame_style(df[cols].head(), "First 5 Rows Of Data"))
display(set_frame_style(df[cols].describe(), "Summary Statistics"))
display(
set_frame_style(
df[cols]
.nunique()
.to_frame()
.rename({0: "Unique Value Count"}, axis=1)
.transpose(),
"Unique Value Counts In Each Column",
)
)
display(
set_frame_style(df[cols].isna().any().to_frame().transpose(), "Columns With Nan")
)
# >
# >
# 1 or 0 features: Survived, Sex
# Ordinal features: Pclass, SibSp, Parch, Embarked
# Continuous features: Age, Fare
# Age and Cabin, Cabin contain Nans
# ## **Correlations**
plt.figure(figsize=(12, 6))
ax = sns.heatmap(
df.corr(),
linecolor="green",
linewidth="0.6",
cmap="vlag",
annot=True,
fmt=".2f",
vmin=-1,
vmax=1,
)
ax.set_title("Correlation Heat Map", weight="bold")
plt.tight_layout()
plt.show()
# >
# >
# Survived is not strongly correlated to any feature
# There is also not much correlation between the explanatory variables, but Fare and Pclass are slightly correlated.
# ## **Box plot (Survived, Pclass, Age, Fare)**
continuous_cols = ["Survived", "Pclass", "Age", "Fare"]
fig, axs = plt.subplots(nrows=2, ncols=4, figsize=(12, 5))
row = 0
col = 0
crs = ["red", "blue", "green", "orange"]
for i, cc in enumerate(continuous_cols):
ax = sns.boxplot(data=df, y=cc, ax=axs[row, col], color=crs[i])
ax.yaxis.grid()
ax.set_ylabel(cc, weight="bold")
ax = sns.violinplot(data=df, y=cc, ax=axs[row, col + 1], color=crs[i])
ax.yaxis.grid()
ax.set_ylabel(cc, weight="bold")
if col + 1 == 3:
col = 0
row += 1
else:
col += 2
fig.suptitle("Continuous Data Distributions", weight="bold")
fig.tight_layout()
plt.show()
# >
# >
# It may be necessary to note that Fare has some data that is quite out of line.
# ## **True/False Data**
# Want to get counts for True/False features
# Start by melting a sub-dataframe for those features
dff = pd.melt(df, value_vars=["Survived", "Sex"])
plt.figure(figsize=(7, 5))
cmap = sns.color_palette("Set2")
ax = sns.countplot(data=dff, x="variable", hue="value", palette=cmap)
ax.yaxis.grid()
ax.set_ylabel("Count", weight="bold")
ax.set_xlabel("Variable", weight="bold")
ax.set_title("True/False Data Counts", weight="bold")
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()
# >
# >
# There is no significant bias in the objective variable.
# Survivor and gender counts tend to be the same.
# ## **Ordinal Data**
fig, axs = plt.subplots(nrows=1, ncols=4, figsize=(12, 4))
crs = ["red", "blue", "green", "orange", "purple"]
for i, cc in enumerate(["Pclass", "Parch", "SibSp", "Embarked"]):
ax = sns.countplot(data=df, x=cc, ax=axs[i], color=crs[i])
ax.yaxis.grid()
ax.set_xlabel(cc, weight="bold")
ax.set_ylabel("Count", weight="bold")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
fig.suptitle("Ordinal Data Counts", weight="bold")
fig.tight_layout()
plt.show()
# Plot this apart from the other ordinal features because it has a lot more categories
plt.figure(figsize=(20, 3))
ax = sns.countplot(data=df, x="Cabin", color="purple")
ax.yaxis.grid()
ax.set_title("Cabin", weight="bold")
ax.set_xlabel("Cabin", weight="bold")
ax.set_ylabel("Count", weight="bold")
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
fig.tight_layout()
plt.show()
#
# ## **Survived Dependency On Features**
fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(12, 4))
crs = ["red", "blue", "green"]
for i, cc in enumerate(["Pclass", "Age", "Fare"]):
ax = sns.scatterplot(data=df, y=cc, x="Survived", ax=axs[i], color=crs[i])
ax.yaxis.grid()
ax.xaxis.grid()
ax.set_xlabel(cc, weight="bold")
ax.set_ylabel("Survived", weight="bold")
ax.set_title("Survived Vs " + cc, weight="bold")
fig.tight_layout()
plt.show()
# >
# >
# No clear trend between Survived and any of these continuous numeric features
# # Feature Engineering
# >
# >
# Since there is an object column, change the type and implement the label encoder.
# Other features creation and Nan values are left as they are..
# ## **Pre-Processing**
df_train, df_test = [
pd.read_csv(x, index_col=[0])
for x in [os.path.join(filepath, "train.csv"), os.path.join(filepath, "test.csv")]
]
target_col = "Survived"
categorical_columns = [
"Pclass",
"Sex",
"SibSp",
"Parch",
]
col_types = df_train[categorical_columns].dtypes
object_columns = [col for col, col_type in col_types.items() if "object" == col_type]
## Label Encoder
# encoder = OrdinalEncoder(cols=categorical_columns)
# train_encoder = encoder.fit_transform(df_train[categorical_columns]).astype(int)
# test_encoder = encoder.transform(df_test[categorical_columns]).astype(int)
## OneHot Encoder
encoder = OneHotEncoder(cols=object_columns)
train_encoder = (
encoder.fit_transform(df_train[categorical_columns]).astype(int).add_suffix("_ohe")
)
test_encoder = (
encoder.transform(df_test[categorical_columns]).astype(int).add_suffix("_ohe")
)
# Concatenate train and original dataframes, and prepare train and test sets
drop_columns = ["Name", "Ticket", "Cabin", "Embarked"]
X_train = df_train.drop([target_col] + drop_columns, axis=1)
y_train = df_train[f"{target_col}"]
X_test = df_test.drop(drop_columns, axis=1)
# Applay Encoder
## Label Encoder
# X_train[object_columns] = train_encoder[object_columns]
# X_test[object_columns] = test_encoder[object_columns]
## OneHot Encoder
X_train = pd.concat([X_train, train_encoder], axis=1)
X_test = pd.concat([X_test, test_encoder], axis=1)
X_train.drop(categorical_columns, axis=1, inplace=True)
X_test.drop(categorical_columns, axis=1, inplace=True)
# StandardScaler
sc = StandardScaler()
X_train[["Age", "Fare"]] = sc.fit_transform(X_train[["Age", "Fare"]])
X_test[["Age", "Fare"]] = sc.transform(X_test[["Age", "Fare"]])
# Fillna
X_train_fillna = X_train.copy()
X_test_fillna = X_test.copy()
X_train_fillna["Age"] = X_train["Age"].fillna(29)
X_train_fillna["Fare"] = X_train["Fare"].fillna(25)
X_test_fillna["Age"] = X_test["Age"].fillna(29)
X_test_fillna["Fare"] = X_test["Fare"].fillna(25)
# Reset index
X_train.reset_index(drop=True, inplace=True)
X_test.reset_index(drop=True, inplace=True)
y_train.reset_index(drop=True, inplace=True)
print(f"X_train shape :{X_train.shape} , y_train shape :{y_train.shape}")
print(f"X_test shape :{X_test.shape}")
# Delete the train and test dataframes to free up memory
del df_train, df_test, df
#
# # Ensemble Model (Xgb, LGB, CAT)
# ## **Define Model**
class Classifier:
def __init__(self, n_estimators=100, device="cpu", random_state=0):
self.n_estimators = n_estimators
self.device = device
self.random_state = random_state
self.models = self._define_model()
self.len_models = len(self.models)
def _define_model(self):
xgb_params = {
"n_estimators": self.n_estimators,
"learning_rate": 0.05,
"max_depth": 7,
"subsample": 1.0,
"colsample_bytree": 1.0,
"n_jobs": -1,
"eval_metric": "logloss",
"objective": "binary:logistic",
"verbosity": 0,
"random_state": self.random_state,
}
if self.device == "gpu":
xgb_params["tree_method"] = "gpu_hist"
xgb_params["predictor"] = "gpu_predictor"
lgb_params = {
"n_estimators": self.n_estimators,
"max_depth": 7,
"learning_rate": 0.05,
"subsample": 0.20,
"colsample_bytree": 0.56,
"reg_alpha": 0.25,
"reg_lambda": 5e-08,
"objective": "binary",
"metric": "binary_error",
"boosting_type": "gbdt",
"device": self.device,
"random_state": self.random_state,
}
cb_params = {
"iterations": self.n_estimators,
"depth": 7,
"learning_rate": 0.1,
"l2_leaf_reg": 0.7,
"random_strength": 0.2,
"max_bin": 200,
"od_wait": 65,
"one_hot_max_size": 70,
"grow_policy": "Depthwise",
"bootstrap_type": "Bayesian",
"od_type": "Iter",
"eval_metric": "Logloss",
"loss_function": "Logloss",
"task_type": self.device.upper(),
"random_state": self.random_state,
}
models = {
"xgb": xgb.XGBClassifier(**xgb_params),
"lgb": lgb.LGBMClassifier(**lgb_params),
"cat": CatBoostClassifier(**cb_params),
"lr": LogisticRegression(max_iter=500, random_state=self.random_state),
"rf": RandomForestClassifier(
n_estimators=1000, random_state=self.random_state
),
"hgb": HistGradientBoostingClassifier(
max_iter=2000, random_state=self.random_state
),
"gbdt": GradientBoostingClassifier(
n_estimators=1000, random_state=self.random_state
),
"svc": SVC(gamma="auto", probability=True),
"knn": KNeighborsClassifier(n_neighbors=5),
"mlp": MLPClassifier(random_state=self.random_state, max_iter=1000),
"brf": BalancedRandomForestClassifier(
n_estimators=1000, n_jobs=-1, random_state=self.random_state
),
"gpc": GaussianProcessClassifier(
1.0 * RBF(1.0), random_state=self.random_state
),
}
return models
#
# ## **Optimizer**
class OptunaWeights:
def __init__(self, random_state):
self.study = None
self.weights = None
self.random_state = random_state
def _objective(self, trial, y_true, y_preds):
# Define the weights for the predictions from each model
weights = [trial.suggest_float(f"weight{n}", 0, 1) for n in range(len(y_preds))]
# Calculate the weighted prediction
weighted_pred = np.average(np.array(y_preds).T, axis=1, weights=weights)
# Calculate the AUC score for the weighted prediction
score = roc_auc_score(y_true, weighted_pred)
return score
def fit(self, y_true, y_preds, n_trials=1000):
optuna.logging.set_verbosity(optuna.logging.ERROR)
sampler = optuna.samplers.CmaEsSampler(seed=self.random_state)
self.study = optuna.create_study(
sampler=sampler, study_name="OptunaWeights", direction="maximize"
)
objective_partial = partial(self._objective, y_true=y_true, y_preds=y_preds)
self.study.optimize(objective_partial, n_trials=n_trials)
self.weights = [
self.study.best_params[f"weight{n}"] for n in range(len(y_preds))
]
def predict(self, y_preds):
assert (
self.weights is not None
), "OptunaWeights error, must be fitted before predict"
weighted_pred = np.average(np.array(y_preds).T, axis=1, weights=self.weights)
return weighted_pred
def fit_predict(self, y_true, y_preds, n_trials=1000):
self.fit(y_true, y_preds, n_trials=n_trials)
return self.predict(y_preds)
def weights(self):
return self.weights
class ThresholdOptimizer:
def __init__(self, y_true, y_preds, random_state):
self.y_true = y_true
self.y_preds = y_preds
self.random_state = random_state
self.sampler = optuna.samplers.CmaEsSampler(seed=self.random_state)
self.study = optuna.create_study(
sampler=self.sampler, study_name="OptunaWeights", direction="maximize"
)
self.objective_partial = partial(
self._objective, y_true=y_true, y_preds=y_preds
)
def _objective(self, trial, y_true, y_preds):
threshold = trial.suggest_float("threshold", 0, 1)
score = accuracy_score(y_true, np.where(y_preds > threshold, 1, 0))
return score
def run_optimization(self, n_trials=1000):
optuna.logging.set_verbosity(optuna.logging.ERROR)
self.study.optimize(self.objective_partial, n_trials=n_trials)
def get_best_threshold(self):
return self.study.best_params["threshold"]
#
# ## **Model Training**
n_splits = 10
random_state = 42
n_estimators = 9999 # 9999
early_stopping_rounds = 100
verbose = False
device = "cpu"
# Initialize an array for storing test predictions
test_predss = np.zeros(X_test.shape[0])
test_predss_onehot = []
ensemble_score = []
weights = []
trained_models = dict(
zip(Classifier().models.keys(), [[] for _ in range(Classifier().len_models)])
)
kf = StratifiedKFold(n_splits=n_splits, random_state=random_state, shuffle=True)
for i, (train_index, val_index) in enumerate(kf.split(X_train, y_train)):
X_train_, X_val = X_train.iloc[train_index], X_train.iloc[val_index]
y_train_, y_val = y_train.iloc[train_index], y_train.iloc[val_index]
# Get a set of Regressor models
classifier = Classifier(n_estimators, device, random_state)
models = classifier.models
# Initialize lists to store oof and test predictions for each base model
oof_preds = []
test_preds = []
# Loop over each base model and fit it to the training data, evaluate on validation data, and store predictions
for name, model in models.items():
if name == "cat_label":
train_pool = Pool(X_train_, y_train_, cat_features=categorical_columns)
test_pool = Pool(X_val, y_val, cat_features=categorical_columns)
model.fit(
train_pool,
eval_set=[test_pool],
early_stopping_rounds=early_stopping_rounds,
verbose=verbose,
)
elif name == "lgb_label":
model.fit(
X_train_,
y_train_,
eval_set=[(X_val, y_val)],
categorical_feature=categorical_columns,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose,
)
elif name in ["xgb", "lgb", "cat"]:
model.fit(
X_train_,
y_train_,
eval_set=[(X_val, y_val)],
early_stopping_rounds=early_stopping_rounds,
verbose=verbose,
)
else:
model.fit(X_train_fillna.iloc[train_index], y_train_)
if name in ["xgb", "lgb", "cat", "cat_label", "lgb_label"]:
test_pred = model.predict_proba(X_test)[:, 1]
y_val_pred = model.predict_proba(X_val)[:, 1]
else:
test_pred = model.predict_proba(X_test_fillna)[:, 1]
y_val_pred = model.predict_proba(X_train_fillna.iloc[val_index])[:, 1]
score = roc_auc_score(y_val, y_val_pred)
print(f"{name} model [FOLD-{i}] AUC: {score:.5f}")
oof_preds.append(y_val_pred)
test_preds.append(test_pred)
trained_models[f"{name}"].append(deepcopy(model))
# Use Optuna to find the best ensemble weights
optweights = OptunaWeights(random_state=random_state)
y_val_pred = optweights.fit_predict(y_val.values, oof_preds)
# Use Optuna to find the best threshold
opt = ThresholdOptimizer(y_val, y_val_pred, random_state)
opt.run_optimization(n_trials=2000)
best_threshold = opt.get_best_threshold()
score = accuracy_score(y_val, np.where(y_val_pred > best_threshold, 1, 0))
print(f"Ensemble [FOLD-{i}] Accuracy score {score:.5f}")
ensemble_score.append(score)
weights.append(optweights.weights)
test_predss += optweights.predict(test_preds) / n_splits
test_predss_onehot.append(
np.where(optweights.predict(test_preds) > best_threshold, 1, 0)
)
gc.collect()
#
# ## **Model Evaluation**
# Calculate the mean LogLoss score of the ensemble
mean_score = np.mean(ensemble_score)
std_score = np.std(ensemble_score)
print(f"Ensemble Accuracy score {mean_score:.5f} ± {std_score:.5f}")
# Print the mean and standard deviation of the ensemble weights for each model
print("--- Model Weights ---")
mean_weights = np.mean(weights, axis=0)
std_weights = np.std(weights, axis=0)
for name, mean_weight, std_weight in zip(models.keys(), mean_weights, std_weights):
print(f"{name} {mean_weight:.5f} ± {std_weight:.5f}")
#
# ## **Feature Importances**
def visualize_importance(models, feature_cols, title, top=8):
importances = []
feature_importance = pd.DataFrame()
for i, model in enumerate(models):
_df = pd.DataFrame()
_df["importance"] = model.feature_importances_
_df["feature"] = pd.Series(feature_cols)
_df["fold"] = i
_df = _df.sort_values("importance", ascending=False)
_df = _df.head(top)
feature_importance = pd.concat(
[feature_importance, _df], axis=0, ignore_index=True
)
feature_importance = feature_importance.sort_values("importance", ascending=False)
# display(feature_importance.groupby(["feature"]).mean().reset_index().drop('fold', axis=1))
plt.figure(figsize=(12, 4))
sns.barplot(
x="importance",
y="feature",
data=feature_importance,
color="skyblue",
errorbar="sd",
)
plt.xlabel("Importance", fontsize=14)
plt.ylabel("Feature", fontsize=14)
plt.title(f"{title} Feature Importance [Top {top}]", fontsize=18)
plt.grid(True, axis="x")
plt.show()
for name, models in trained_models.items():
if name in ["cat", "xgb"]:
visualize_importance(models, list(X_train.columns), name)
#
# # Submission
# df_sub[f'{target_col}'] = np.where(test_predss > 0.5, 1, 0)
df_sub[f"{target_col}"] = np.median(test_predss_onehot, axis=0).astype(int)
df_sub.to_csv("submission.csv")
df_sub.head(5)
sns.histplot(test_predss)
|
# # Case study guidelines
# ## Scenario
# You are a junior data analyst working in the marketing analyst team at Cyclistic, a bike-share company in Chicago. The director of marketing believes the company’s future success depends on maximizing the number of annual memberships. Therefore,
# your team wants to understand how casual riders and annual members use Cyclistic bikes differently. From these insights, your team will design a new marketing strategy to convert casual riders into annual members. But first, Cyclistic executives
# must approve your recommendations, so they must be backed up with compelling data insights and professional data visualizations.
# ## Characters and teams
# * **Cyclistic:** A bike-share program that features more than 5,800 bicycles and 600 docking stations. Cyclistic sets itself apart by also offering reclining bikes, hand tricycles, and cargo bikes, making bike-share more inclusive to people with disabilities and riders who can’t use a standard two-wheeled bike. The majority of riders opt for traditional bikes; about 8% of riders use the assistive options. Cyclistic users are more likely to ride for leisure, but about 30% use them to commute to work each day.
# * **Lily Moreno:** The director of marketing and your manager. Moreno is responsible for the development of campaigns and initiatives to promote the bike-share program. These may include email, social media, and other channels.
# * **Cyclistic marketing analytics team:** A team of data analysts who are responsible for collecting, analyzing, and reporting data that helps guide Cyclistic marketing strategy. You joined this team six months ago and have been busy learning about Cyclistic’s mission and business goals — as well as how you, as a junior data analyst, can help Cyclistic achieve them.
# * **Cyclistic executive team:** The notoriously detail-oriented executive team will decide whether to approve the recommended marketing program.
# ## About the company
# In 2016, Cyclistic launched a successful bike-share offering. Since then, the program has grown to a fleet of 5,824 bicycles that are geotracked and locked into a network of 692 stations across Chicago. The bikes can be unlocked from one station and
# returned to any other station in the system anytime.
# Until now, Cyclistic’s marketing strategy relied on building general awareness and appealing to broad consumer segments. One approach that helped make these things possible was the flexibility of its pricing plans: single-ride passes, full-day passes, and annual memberships. Customers who purchase single-ride or full-day passes are referred to as casual riders. Customers who purchase annual memberships are Cyclistic members.
# Cyclistic’s finance analysts have concluded that annual members are much more profitable than casual riders. Although the pricing flexibility helps Cyclistic attract more customers, Moreno believes that maximizing the number of annual members will be key to future growth. Rather than creating a marketing campaign that targets all-new customers, Moreno believes there is a very good chance to convert casual riders into members. She notes that casual riders are already aware of the Cyclistic
# program and have chosen Cyclistic for their mobility needs.
# Moreno has set a clear goal: Design marketing strategies aimed at converting casual riders into annual members. In order to do that, however, the marketing analyst team needs to better understand how annual members and casual riders differ, why casual riders would buy a membership, and how digital media could affect their marketing tactics. Moreno and her team are interested in analyzing the Cyclistic historical bike trip data to identify trends.
# Three questions will guide the future marketing program:
# 1. How do annual members and casual riders use Cyclistic bikes differently?
# 2. Why would casual riders buy Cyclistic annual memberships?
# 3. How can Cyclistic use digital media to influence casual riders to become members?
# Moreno has assigned you the first question to answer: How do annual members and casual riders use Cyclistic bikes differently?
# You will produce a report with the following deliverables:
# 1. A clear statement of the business task
# 2. A description of all data sources used
# 3. Documentation of any cleaning or manipulation of data
# 4. A summary of your analysis
# 5. Supporting visualizations and key findings
# 6. Your top three recommendations based on your analysis
# ## Source of data:
# [Data provided by Motivate International Inc.](https://divvy-tripdata.s3.amazonaws.com/index.html) under [this license](https://ride.divvybikes.com/data-license-agreement). The context above was taken from the [Google Data Analytics Specialization course](https://www.coursera.org/google-certificates/data-analytics-certificate) and is a fictional scenario for learning purposes.
# ## ✅ My outputs
# [Dashboard with key indicators](https://)
# [Presentation w/ story telling and appendix](https://)
# # 1. Ask
# **Guiding questions**
# * What is the problem you are trying to solve?
# * How can your insights drive business decisions?
# **Key tasks**
# 1. Identify the business task
# 2. Consider key stakeholders
# **Deliverable**
# A clear statement of the business task
# ## Business task
# Define trip behaviour for casual and member riders and establish their differences. The goal is to understand if members and casual riders have a different motive to use Cyclistic's bikes.
# # 2. Prepare
# *Plan the project, gather the necessary data and assure it ROCCCs (reliable, original, comprehensive, current and cited)*
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
paths = [
"/kaggle/input/cyclistic-2022/202201-divvy-tripdata/202201-divvy-tripdata.csv",
"/kaggle/input/cyclistic-2022/202202-divvy-tripdata/202202-divvy-tripdata.csv",
"/kaggle/input/cyclistic-2022/202203-divvy-tripdata/202203-divvy-tripdata.csv",
"/kaggle/input/cyclistic-2022/202204-divvy-tripdata/202204-divvy-tripdata.csv",
"/kaggle/input/cyclistic-2022/202205-divvy-tripdata/202205-divvy-tripdata.csv",
"/kaggle/input/cyclistic-2022/202206-divvy-tripdata/202206-divvy-tripdata.csv",
"/kaggle/input/cyclistic-2022/202207-divvy-tripdata/202207-divvy-tripdata.csv",
"/kaggle/input/cyclistic-2022/202208-divvy-tripdata/202208-divvy-tripdata.csv",
"/kaggle/input/cyclistic-2022/202209-divvy-tripdata/202209-divvy-publictripdata.csv",
"/kaggle/input/cyclistic-2022/202210-divvy-tripdata/202210-divvy-tripdata.csv",
"/kaggle/input/cyclistic-2022/202211-divvy-tripdata/202211-divvy-tripdata.csv",
"/kaggle/input/cyclistic-2022/202212-divvy-tripdata/202212-divvy-tripdata.csv",
]
# we need a numerical index for later in data cleaning.
# so we won't assign the ride_id as the index col.
datasetTrips = pd.DataFrame()
for path in range(len(paths)):
datasetTrips = pd.concat([datasetTrips, pd.read_csv(paths[path])])
datasetTrips.head()
# # 3. Process
# *Clean the data and assure it's integrity to the Analysis process*
# Data cleaning steps:
# 1. Check data for null values
# 2. Fill the null values w/ data if possible
# 3. Decide whether or not to remove rows w/ null values
# 4. Check string values (like station names) for mispelled entries
# 5. Remove duplicate rows
# 6. Create a column which identifies the weekday of the trip
# 7. Create a column which identifies the hour of the trip
# 8. Create a column w/ trip duration
# 9. Create a column w/ trip distance (point-to-point, despite real distance being the ideal, we won't focus on that right now)
datasetTrips.info(show_counts=True)
# ## Evaluating options to fill NaN values
# The number of null values for `start_station_name` and `start_station_id` is the same, and this also happens with the data regarding the end station of each trip. We can fill the data if only one of the two were missing in each row, but if both are missing we can't use this information to fill the gaps.
# We'll check a sample of the dataset to see if both are missing for a few rows. If true, this suggests that using the `name` or `id` values of the stations is not a viable option to fill most the NaN values.
datasetTrips.loc[datasetTrips.start_station_name.isna()][
["ride_id", "start_station_name", "start_station_id"]
].head(20)
datasetTrips.loc[datasetTrips.end_station_name.isna()][
["ride_id", "end_station_name", "end_station_id"]
].head(20)
# The dataframes above confirms that using the station's name and id isn't a viable option. We may try to use the latitude and longitude to fill the NaN values, if there's only one group of latitude and longitude for each station.
datasetTrips[
[
"start_station_id",
"start_station_name",
"start_lat",
"start_lng",
"end_station_name",
"end_station_id",
"end_lat",
"end_lng",
]
].nunique()
# Neither ids or latitude/longitude pairs have a 1:1 count ratio to stations' name or id. Therefore, we can't fill these values and assure the confiability of the data.
# So, we won't use the stations in our analysis to avoid possible bias due to the lack of proper data to identify correctly the stations for a reasonable amount of the trips.
# Since we won't be making a station-based analysis, this makes unnecessary the checking for eventual misspellings in the station names, so we will skip step 4 of the data cleaning process.
# ## Dropping duplicate rows
datasetTrips = datasetTrips.drop_duplicates()
datasetTrips.info(show_counts=True)
# ## Creating weekday and hour columns in the dataset
# We will want to see the patterns of usage by casual riders and members throughout the days of the week and by hour.
datasetTrips["startDate"] = pd.to_datetime(
datasetTrips["started_at"], format="%Y-%m-%d %H:%M:%S"
)
datasetTrips["endDate"] = pd.to_datetime(
datasetTrips["ended_at"], format="%Y-%m-%d %H:%M:%S"
)
# monday is 0, sunday = 6
datasetTrips["weekday"] = datasetTrips.startDate.dt.dayofweek
datasetTrips["hour"] = datasetTrips.startDate.dt.hour
# ## Creating duration and distance columns in the dataset
datasetTrips["durationSeconds"] = (
(datasetTrips.endDate - datasetTrips.startDate).dt.total_seconds().abs()
)
# Calculating the straight-line distance between stations using latitude and longitude
# arccos((sin(lat_start) * sin(lat_end)) + cos(start_lat) * cos(end_lat) * cos(end_long - start_long)) * earth_radius
rad = np.pi / 180
sinStartLat = np.sin(rad * datasetTrips.start_lat)
sinEndLat = np.sin(rad * datasetTrips.end_lat)
cosStartLat = np.cos(rad * datasetTrips.start_lat)
cosEndLat = np.cos(rad * datasetTrips.end_lat)
cosDeltaLng = np.cos(rad * (datasetTrips.end_lng - datasetTrips.start_lng))
earthRadius = 6371 # unit = km
datasetTrips["distanceKm"] = (
np.arccos((sinStartLat * sinEndLat) + cosStartLat * cosEndLat * cosDeltaLng).abs()
* earthRadius
)
datasetTrips.head()
# ### Filtering unwanted columns to reduce memory usage during the analysis phase
datasetTripsClean = datasetTrips[
[
"ride_id",
"member_casual",
"rideable_type",
"startDate",
"endDate",
"durationSeconds",
"distanceKm",
"weekday",
"hour",
]
]
datasetTripsClean.head()
# # 4. Analysis
# 1. Check the descriptive statistics for overall usage
# 2. Check the descriptive statistics comparing members and casual riders
# 3. Check the descriptive statistics for members and casual riders comparing rideable types
# 4. Check the usage of the service by day of week and hour of day
datasetTripsClean.groupby("member_casual")[["durationSeconds", "distanceKm"]].describe()
datasetTripsClean.groupby(["member_casual", "rideable_type"])[
["durationSeconds", "distanceKm"]
].describe()
# ### Members usage info
# * 51.1% of members' rentals are classic bikes, and the remaining 48.9% are electric bikes;
# * Members' average rental duration is aprox. 13 minutes; with 75% of the rentals lasting up to 15 minutes;
# * The average duration of the classic bike rental is 13m54s
# * The average duration of the electric bike rental is 11m28s (2m26s less than classic)
# * The average distance between stations for members is 2.11 km, with 75 percentile at 2.73km
# * For classic bikes, the average distance is 1.96km and 75 percentile at 2.49km
# * For electric bikes, the average distance is 2.26km and 75 percentile at 2.99km
# ### Casual riders usage info
# * 54% of the trips by casual riders were with electric bikes, 38.4% were with casual bikes, and 7.6% were with docked bikes;
# * Casual riders' average rental duration is aprox. 29m08s, with 75 percentile at 24m06s
# * The average duration with classic bike is 28m45s, with 75 percentile at 26m50s
# * The average duration with docked bike is 2h02m43s, with 75 percentile at 55m14s
# * The average eduration with electric bike is 16m10s, with 75 percentile at 19m20s
# * Casual riders' average distance between stations is 2.18km, with 75 percentile at 2.86 km
# * The average distance with classic bike is 2.09km, with 75 percentile at 2.72km
# * The average distance with docked bike is 2.18km, with 75 percentile at 2.94km
# * The average distance with electric bike is 2.25km, with 75 percentile at 2.96km
#
# ### Overall comparisons
# * Members were responsible for 59% of the overall trips in 2022;
# * Members had shorter and faster trips when compared w/ casual riders;
# * Casual riders trip duration w/ docked bikes suggests they may focus their usage of the service for leisure;
# * The data suggests that members may have used the service in 2022 for day-to-day affairs (like going to work or routine activities);
# * We need to dig deeper in day and hour of day usage to validate this hypothesis;
#
casualPivot = datasetTripsClean.loc[datasetTrips.member_casual == "casual"][
["weekday", "hour", "ride_id"]
]
casualHeatmapData = casualPivot.pivot_table(
index="hour", columns="weekday", values="ride_id", aggfunc=np.count_nonzero
)
casualHeatmapData = casualHeatmapData.rename(
columns={
0: "monday",
1: "tuesday",
2: "wedsneday",
3: "thursday",
4: "friday",
5: "saturday",
6: "sunday",
}
)
memberPivot = datasetTripsClean.loc[datasetTrips.member_casual == "member"][
["weekday", "hour", "ride_id"]
]
memberHeatmapData = memberPivot.pivot_table(
index="hour", columns="weekday", values="ride_id", aggfunc=np.count_nonzero
)
memberHeatmapData = memberHeatmapData.rename(
columns={
0: "monday",
1: "tuesday",
2: "wedsneday",
3: "thursday",
4: "friday",
5: "saturday",
6: "sunday",
}
)
plt.figure(figsize=(12, 8))
plt.title(
"Number of bike rentals by casual riders by day and hour \n",
fontdict={"fontsize": 20, "fontweight": "bold"},
)
casualHeatmap = sns.heatmap(
data=casualHeatmapData, cbar=True, annot=True, fmt="g", cmap="OrRd"
)
casualHeatmap.xaxis.tick_top()
plt.figure(figsize=(12, 8))
plt.title(
"Number of bike rentals by members by day and hour \n",
fontdict={"fontsize": 20, "fontweight": "bold"},
)
memberHeatmap = sns.heatmap(
data=memberHeatmapData, cbar=True, annot=True, fmt="g", cmap="OrRd"
)
memberHeatmap.xaxis.tick_top()
# ## Analysis of the heatmap:
# * Casual riders focus their usage for leasure during saturdays and sundays from 10AM until 7PM;
# * Casual riders also use the service to return from work probably, due to the high volume of usage in workdays between 4PM and 7PM;
# * Members use the service mainly in workdays, with high volumes during the early hours (6AM to 8AM) and peaking during the return (4PM to 7PM)
# * They also use the service for leasure, specially on saturdays.
# # 5. Share
# * Create an output csv file to work in Tableau and create a dashboard.
tableauColumns = [
"ride_id",
"member_casual",
"rideable_type",
"start_lat",
"start_lng",
"end_lat",
"end_lng",
"startDate",
"endDate",
"weekday",
"hour",
"durationSeconds",
"distanceKm",
]
output = datasetTrips[tableauColumns]
output.to_csv("datasetTrips.csv")
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import numpy as np
import pandas as pd
from sklearn import linear_model as lm
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.model_selection import (
KFold,
cross_val_score,
cross_val_predict,
LeaveOneOut,
)
import statsmodels.api as sma
import statsmodels.formula.api as sm
import statsmodels.sandbox.tools.cross_val as cross_val
from regressors import stats
from mlxtend.feature_selection import SequentialFeatureSelector as sfs
# ## 1 - Interactions between variables
# Explore at least 5 interactions - at least one should be a three-way interaction. Determine and identify your discoveries for each interaction. Is that interaction significant? Is a combination of interactions in the model significant?
df = pd.read_csv(
"/kaggle/input/california-housing-prices-data-extra-features/California_Houses.csv"
)
df.head()
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import f1_score
# Loading datasets and quick look
phone_features = pd.read_csv("../input/mobile-price/train.csv", sep=";")
phone_features_test = pd.read_csv("../input/mobile-price/test.csv", sep=";")
phone_features.describe()
phone_features.info()
# as shown at the bottom, there are no null-values
# Now we can remove correlated features if they appear. Highly correlated features are limitations towards the proper model creation.
# If appear, then features can be inseparable so whole dependences could be corrupted. As we can see below there is no such problem - most features are not correlated.
correlation = phone_features.corr()
map = sns.heatmap(correlation, vmin=-1, vmax=1, center=0)
plt.show()
PF = phone_features.drop(columns="price_range")
PF_test = phone_features_test.drop(columns="id")
# Division between training data and testing data
X_train, X_test, y_train, y_test = train_test_split(
PF, phone_features["price_range"], test_size=0.3, random_state=0
)
# MODELS - comparison and selection
# 1: random forest
random_forest = RandomForestClassifier()
random_forest.fit(X_train, y_train)
prediction1 = random_forest.predict(X_test)
print("Random forest:")
print(classification_report(y_test, prediction1))
# 2: logistic regression
lr = LogisticRegression(multi_class="ovr", solver="liblinear")
lr.fit(X_train, y_train)
prediction2 = lr.predict(X_test)
print("Logistic regression:")
print(classification_report(y_test, prediction2))
# 3: naive bayes
Naive_bayes = GaussianNB()
Naive_bayes.fit(X_train, y_train)
prediction3 = Naive_bayes.predict(X_test)
print("Naive Bayes:")
print(classification_report(y_test, prediction3))
# 4: support vector machine
Support_vector_machine = SVC()
Support_vector_machine.fit(X_train, y_train)
prediction4 = Support_vector_machine.predict(X_test)
print("Support vector machine:")
print(classification_report(y_test, prediction4))
# Choosing 'n' for nearest neighbors
f1_storage = []
for i in range(1, 10):
knn = KNeighborsClassifier(n_neighbors=i)
knn = knn.fit(X_train, y_train)
knn_predict = knn.predict(X_test)
print(i, f1_score(y_test, knn_predict, average="weighted"))
f1_storage.append(f1_score(y_test, knn_predict, average="weighted"))
numbers = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
plt.bar(numbers, f1_storage)
plt.ylim(0.75, 1)
for i in range(len(f1_storage)):
plt.text(
i,
f1_storage[i],
str(round(f1_storage[i] * 100, 3)) + "%",
size=8,
ha="center",
va="bottom",
)
plt.show()
# As we can see above the best 'n' value is 7..
# 5: KNN
knn = KNeighborsClassifier(n_neighbors=7)
knn = knn.fit(X_train, y_train)
prediction5 = knn.predict(X_test)
print("KNN:")
print(classification_report(y_test, prediction5))
Random_forest_results = f1_score(y_test, prediction1, average="weighted")
Logistic_regression_results = f1_score(y_test, prediction2, average="weighted")
Naive_Bayes_results = f1_score(y_test, prediction3, average="weighted")
Support_vector_machine_results = f1_score(y_test, prediction4, average="weighted")
KNN_results = f1_score(y_test, prediction5, average="weighted")
models = [
Random_forest_results,
Logistic_regression_results,
Naive_Bayes_results,
Support_vector_machine_results,
KNN_results,
]
model_names = ["RF", "LR", "NB", "SVM", "KNN"]
plt.bar(model_names, models)
plt.ylim(0.5, 1)
for i in range(len(models)):
plt.text(
i,
models[i],
str(round(models[i] * 100)) + "%",
size=8,
ha="center",
va="bottom",
)
# Finally, we can see that Support Vector Machine algorithm gives the best results and is the best model to predict price range.
# The predictions are as follows:
print(Support_vector_machine.predict(PF_test))
|
# # Installing pytest
class employee:
def __init__(self, age, salary):
self.first = None
self.last = None
self.email = None
self.age = age
self.salary = salary
def validate_age(self):
if self.age <= 20:
raise ValueError("Age can not be less than equal to 20")
return True
def get_years_retirement(self):
return 60 - self.age
@property
def fullname(self):
return "{} {}".format(self.first, self.last)
@fullname.setter
def fullname(self, fullname):
first, last = fullname.split(" ")
self.first = first
self.last = last
self.email = first + "." + last + "@gmail.com"
def __str__(self):
return "{0}-{1}-{2}-{3}-{4}".format(
self.first, self.last, self.email, self.age, self.salary
)
emp = employee(15, 55)
emp.fullname = "swapnil patil"
print(emp)
emp.validate_age()
emp.get_years_retirement()
# ## We have written class which has two methods, one is to validate the age and other is to check the remaining years for the retirement
# ## Lets write pytest to test these two functions
import pytest
from pytest import mark
class employee:
def __init__(self, age, salary):
self.first = None
self.last = None
self.email = None
self.age = age
self.salary = salary
def validate_age(self):
if self.age <= 20:
raise ValueError("Age can not be less than equal to 20")
return True
def get_years_retirement(self):
return 60 - self.age
@property
def fullname(self):
return "{} {}".format(self.first, self.last)
@fullname.setter
def fullname(self, fullname):
first, last = fullname.split(" ")
self.first = first
self.last = last
self.email = first + "." + last + "@gmail.com"
def __str__(self):
return "{0}-{1}-{2}-{3}-{4}".format(
self.first, self.last, self.email, self.age, self.salary
)
class TestEmployee:
fixture_count = 0
@pytest.fixture(scope="class")
def dummy_emp_obj(self, request):
TestEmployee.fixture_count += 1
print("Creating Fixture for {} time".format(TestEmployee.fixture_count))
obj = employee(50, 50)
yield obj
print("------------Tearing down fixture------------")
obj = None
@mark.prod
def test_check_email(self, dummy_emp_obj):
# emp_obj = employee(30,50)
dummy_emp_obj.fullname = "swapnil patil"
assert dummy_emp_obj.email == "[email protected]"
@mark.prod
def test_validate_age(self):
emp_obj = employee(30, 50)
assert emp_obj.validate_age() == True
@mark.smoke
def test_validate_age_exception(self):
with pytest.raises(ValueError, match="Age can not be less than equal to 20"):
emp_obj = employee(19, 50)
emp_obj.validate_age()
@mark.prod
def test_check_retirement_years(self, dummy_emp_obj):
# emp_obj = employee(50,50)
# assert emp_obj.get_years_retirement() == 10
assert dummy_emp_obj.get_years_retirement() == 10
# # Writing first pytest
#
# def test_validate_age(self):
#
# emp_obj = employee(30,50)
#
# assert emp_obj.validate_age() == True
#
#
# # Check against exceptions
# ## with pytest.raises(ValueError, match='Age can not be less than equal to 20'):
# ## Here we are creating a context with pytest.raises and expecting that, code inside context would raise ValueError with message as 'Age can not be less than equal to 20' to be excatly match
# def test_validate_age_exception(self):
#
# with pytest.raises(ValueError, match='Age can not be less than equal to 20'):
#
# emp_obj = employee(19,50)
# emp_obj.validate_age()
#
# # Mark : Test categorization
# ## If we want to only execute the smoke test and not all the tests, in this case we will mark each test with @mark.smoke
# ## And then while executing all the test, execute pytest -m smoke -v
# # Mark with filters
# ## Marker can also be apply on class level
# ## We can not only give -m and marker name but also can give the search criteria
# ## We pytest -m "not prod" means execute all the test cases not marked as prod
# ## As shown "smoke and product" "smoke or product" seach combination can also be applied
# # Fixture
# ## We could see that enployee object had to be created in every function
# ## Fixture gives us ability to create setup function if we are talking about unittest
#
# @pytest.fixture(scope='function')
# def dummy_emp_obj(self):
# obj = employee(50,50)
# return obj
# ## We create fixture by @pytest.fixture(scope='function')
# ## In order to use that fixture, we should pass fixture name as a paramter def test_check_retirement_years(self,dummy_emp_obj):
# ## In this way, we dont have to create emp object in every test function. Only pass the fixture and instantiate the object which we need to use in other test methods
# # Fixture Scope
# ## @pytest.fixture(scope='function') This defines the scope as function for fixture, which means fixture will be instantiated every time.
# ## If we see in above results, fixture was instantited 2 times.
# ## Let's set the scope as class and lets check if Fixture is getting executed for each call
# ## This time fixture was intantiated only once. Becuase this time the scope was 'class'
# ## @pytest.fixture(scope='class')
# # Fixture Tear Down
# ## Basically we want to do clean up once fixture's job is done, for example dbconectinon or opened file
#
# @pytest.fixture(scope='class')
# def dummy_emp_obj(self, request):
#
# TestEmployee.fixture_count += 1
# print("Creating Fixture for {} time".format(TestEmployee.fixture_count))
# obj = employee(50,50)
# yield obj
#
# print("------------Tearing down fixture------------")
# obj = None
#
#
#
# ## As shown, once fixture obj is instantiated, yield obj statement will return obj to caller
# ## stetments after yeild will execute once all the fixture usage is completed
# ## As shown here, Tearing down fixture is getting called at the end
# # Parameterizing Test Case
# ## Instead of writing test cases, @pytest.mark.parametrize("a,b,c",[(1,2,3),('a','b','ab')],ids=['nums_test','strings_test'])
# ## @pytest.mark.parametrize : to mark the pytest as parameterized
# ## ("a, b, c") : paramter name which would be used for paramterizing
# ## call the test case as usual using paramters name
import pytest
from pytest import mark
def add(a, b):
return a + b
class Test_parameterize:
def test_add(self):
assert add(10, 30) == 40
assert add(20, 30) == 50
@pytest.mark.parametrize(
"a,b,c", [(1, 2, 3), ("a", "b", "ab")], ids=["nums_test", "strings_test"]
)
def test_add_param(self, a, b, c):
assert add(a, b) == c
@pytest.mark.parametrize(
"emp_name,emp_age",
[("swapnil-1", 34), ("swapnil-2", 24), ("swapnil-3", 44)],
ids=["employee-1", "employee-2", "employee-3"],
)
def test_add_param(self, emp_name, emp_age):
print("Employee Name : {0} Age : {1}".format(emp_name, emp_age))
# # Parameterized Fixture
# ## So we I have class user with name and age
# ## Class constructor expecting name and age parameters
# ## In fixture we have sent parmas as [("swapnil-1",23),("swapnil-2",25)]
# ## In the fixture body while instantiating user class,
# ## user(request.param[0],request.param[1]) is used as request.param[0] is passed for name, and request.param[1] for age
import pytest
from pytest import mark
def add(a, b):
return a + b
class user:
def __init__(self, name, age):
self.name = name
self.age = age
class Test_parameterize:
@pytest.fixture(scope="class", params=[("swapnil-1", 25), ("swapnil-2", 35)])
def dummy_usr_obj(self, request):
obj = user(request.param[0], request.param[1])
yield obj
obj = None
def test_user(self, dummy_usr_obj):
print(
"--user name : {0} user age {1} ".format(
dummy_usr_obj.name, dummy_usr_obj.age
)
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Introduction
# ### Welcome to the "Plant Pathology 2020 - FGVC7" competition! In this competition, contestants are challenged to diagnose plant diseases solely based on leaf images. The categories include "healthy", "scab", "rust", and "multiple diseases". Solving this problem is important because diagnosing plant diseases early can save tonnes of agricultural produce every year. This will benefit not only the general population by reducing hunger, but also the farmers by ensuring they get the harvest they deserve.
#
# ### In this note we will visualize the data with matplotlib and ploty and then demonstrate some important image processing and augemntation techniques tecchniques using OpenCV.Finally, I will show how different pretrained Keras models, such as DenseNet and EfficientNet, can be used to solve the problem.
# # EDA
# ## Installing and import necessary libraries
import os
import gc
import re
import cv2
import math
import numpy as np
import scipy as sp
import pandas as pd
import tensorflow as tf
from IPython.display import SVG
import efficientnet.tfkeras as efn
from tensorflow.keras.utils import plot_model
import tensorflow.keras.layers as L
from tensorflow.keras.utils import model_to_dot
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from kaggle_datasets import KaggleDatasets
from tensorflow.keras.applications import DenseNet121
import seaborn as sns
from tqdm import tqdm
import matplotlib.cm as cm
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
tqdm.pandas()
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
np.random.seed(0)
tf.random.set_seed(0)
import warnings
warnings.filterwarnings("ignore")
# ## Load the data and define hyperparameters
EPOCHS = 20
SAMPLE_LEN = 100
IMAGE_PATH = "../input/plant-pathology-2020-fgvc7/images/"
TEST_PATH = "../input/plant-pathology-2020-fgvc7/test.csv"
TRAIN_PATH = "../input/plant-pathology-2020-fgvc7/train.csv"
SUB_PATH = "../input/plant-pathology-2020-fgvc7/sample_submission.csv"
sub = pd.read_csv(SUB_PATH)
test_data = pd.read_csv(TEST_PATH)
train_data = pd.read_csv(TRAIN_PATH)
train_data.head()
test_data.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# import pandas library to read csv file data
import pandas as pd
df = pd.read_csv("/kaggle/input/london-weather/london_weather.csv")
print(df.head())
# import library
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.arima.model import ARIMA
# check data has 7671 rows and 10 columns
df.shape
# set column date as index column
df2 = pd.read_csv(
"/kaggle/input/london-weather/london_weather.csv",
index_col="date",
parse_dates=True,
)
df2.head()
# check row has null value
print(df.isnull().sum())
# drop row has null value
df2 = df2.dropna()
# after drop null row, data has 6201 rows and 9 columns left
df2.shape
df2["mean_temp"].plot(figsize=(12, 5))
df["date"] = pd.to_datetime(df["date"])
df
# df.index = pd.DatetimeIndex(df.index).to_period('M')
df.set_index("date", inplace=True)
df
df["date"] = pd.to_datetime(df["date"])
# Set the date column as the index of the DataFrame and set the frequency to daily
df.set_index("date", inplace=True)
df.index.freq = "D"
print(df.isnull().sum())
df = df.dropna(subset=["date"])
print(df[df.duplicated(["date"])])
df = df.drop_duplicates(["date"])
df.set_index("date", inplace=True)
df.index.freq = "D"
print(df.head())
temp_df = df[["mean_temp"]]
train_size = int(len(temp_df) * 0.9)
train_data = temp_df.iloc[:train_size]
test_data = temp_df.iloc[train_size:]
plt.plot(train_data)
plt.plot(test_data)
plt.legend(["Train", "Test"])
plt.show()
model = ARIMA(train_data, order=(1, 1, 1))
# model_fit = model.fit()
predictions = model_fit.forecast(steps=len(test_data))[0]
plt.plot(test_data)
plt.plot(predictions, color="red")
plt.legend(["Actual", "Predicted"])
plt.show()
mae = np.mean(np.abs(predictions - test_data["mean_temp"]))
print("MAE:", mae)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from bs4 import BeautifulSoup
import re
import datetime
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
webpage = "/kaggle/input/beurstalk-webpage/BeursTalk - Account pagina.html"
with open(webpage, "r") as f:
contents = f.read()
soup = BeautifulSoup(contents, "html.parser")
print(len(soup.find_all("tr")))
table_headers = [
"instap",
"koers",
"rendement",
"datum",
"aandeel_naam",
"ticker",
"persoon",
]
def get_date(text):
date = re.match("(\d{1,2}-\d{1,2}\-\d{4})", text)
return date.group()
regex_ticker = "([A-Z]{1,6}.[A-Z]{0,5}$)"
list_rows = len(
soup.find("ul", class_="list-group").find_all("li", class_="list-group-item")
)
table = []
for i in range(0, list_rows - 1):
instap = (
soup.find("ul", class_="list-group")
.find_all("li", class_="list-group-item")[i]
.find_all("div")[0]
.find("div", class_="")
.text.replace("Instap: ", "")
.strip()
.replace(",", ".")
)
koers = (
soup.find("ul", class_="list-group")
.find_all("li", class_="list-group-item")[i]
.find_all("div")[0]
.find_all("div", class_=["text-success", "text-warning"])[0]
.text.replace("EOD: ", "")
.replace(",", ".")
.strip()
)
rendement = (
soup.find("ul", class_="list-group")
.find_all("li", class_="list-group-item")[i]
.find_all("div")[0]
.find_all("div", class_=["text-success", "text-warning"])[1]
.text.replace("Rendement: ", "")
)
datum = get_date(
soup.find("ul", class_="list-group")
.find_all("li", class_="list-group-item")[i]
.find_all("div", class_="date text-muted")[0]
.text
)
naam = var = (
soup.find("ul", class_="list-group")
.find_all("li", class_="list-group-item")[i]
.find("div", class_="title")
.text.strip()
)
ticker = re.search(regex_ticker, naam).group()
persoon = (
soup.find("ul", class_="list-group")
.find_all("li", class_="list-group-item")[i]
.find_all("div", class_="indiceLabel")[0]
.text
)
table.append([instap, koers, rendement, datum, naam, ticker, persoon])
df = pd.DataFrame(table, columns=table_headers)
df
|
# # Homework 04
# **Kelsey Puetz**
# # Preparing the Environment
# Use the cell below to import NumPy, Pandas, and the Pyplot library of Matplotlib, using the standard aliases.
# Also import the following tools from Scikit-Learn: `LogisticRegresion`, `DecisionTreeClassifier`, `RandomForestClassifier`, `KNeighborsClassifier`, `GridSearchCV`, `cross_val_score`, `cross_val_predict`, `classification_report`, and `confusion_matrix`.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV, cross_val_score, cross_val_predict
from sklearn.metrics import classification_report, confusion_matrix
# Run the cell below to suppress scientific notation for NumPy arrays.
np.set_printoptions(suppress=True)
# # Part 1: Load and Explore Data
# In this assignment, you will be working with the MNIST dataset. This dataset consists of several thousand black-and-white images of handwritten digits from 0 - 9. Your goal will be to create a model capable of identifying the digit displayed in an image based on the values of pixels within the image.
# You can learn more about this dataset here: https://www.kaggle.com/c/digit-recognizer
# The images in the dataset are 28x28 black-and-white images. As such, each image contains 784 pixel values. These pixel values are represented as integers ranging from 0 to 255. Rather than working with image files, you will be provided with CSV files that contain the pixel values. Each row in the CSV files will contain the pixel values for a single image.
# **1A.** Use the cell below to load the dataset `digit-recognizer/train.csv` into a DataFrame. Shuffle the DataFrame (setting `random_state=1`), print its shape, and then display the head of the DataFrame.
# 1A
MNIST = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
MNIST = MNIST.sample(frac=1, random_state=1)
print(MNIST.shape)
MNIST.head()
# This dataset is relatively large. For the sake of reducing runtime of this notebook, we will use only 10,000 observations when performing hyperparameter tuning and model selection.
# **1B**. Create a DataFrame named `df_sample` that contains 10,000 rows sampled at random from the DataFrame created in the previous cell. Set `random_state=1` when performing the sampling. Print the shape of the new DataFrame.
# 1B
df_sample = MNIST.sample(10000, random_state=1)
print(df_sample.shape)
# **1C.** Create feature and label arrays named `X_sample` and `y_sample`. Use only the 10,000 observations from the sample. Print the shape of both arrays.
# 1C
X_sample = df_sample.iloc[:, 1:785].values
y_sample = df_sample.label
print(X_sample.shape)
print(y_sample.shape)
# **1D.** Complete the code below to create a 2x4 grid of subplots displaying the first 8 digits in the sample, and to also print the labels for these observations. Note that the rows need to be reshaped into 28x28 arrays in order to be displayed as images.
# 1D
for i in range(8):
plt.subplot(2, 4, i + 1)
digit = X_sample[i, :].reshape(28, 28)
plt.imshow(digit, cmap="Greys")
plt.axis("off")
plt.show()
print(y_sample[0:8])
# **1E.** Some of our algorithms will have better performance if we scale the pixel values. Since we know the pixel values all range from 0 to 255, we will simply divide the feature array by 255 to get a scaled feature array with values between 0 and 1. Perform this task in the next cell and name the scaled feature array `X_scaled`.
# 1E
X_scaled = X_sample / 255
# # Part 2: Model Selection
# We will now perform model selection and hyperparameter tuning. For this assignment, you will consider logistic regression, decision tree, random forest, and KNN models.
# **IMPORTANT NOTE:** For the sake of time, we will be using **5-fold** cross-validation throughout Part 2, rather than 10-fold cross-validation.
# ### Logistic Regression
# **2A.** In the cell below, create a logistic regression model, setting `penalty='none'`, `solver='saga'`, and `tol=0.01`. The last two parameters are set to help with convergence. Estimate the model's out-of-sample accuracy using **5-fold** cross validation. Print the result rounded to 4 decimal places in a message of the following form:
# CV Score: ____
# You might need to adjust the max_iter parameter of your model to avoid convergence warnings. Note that you are not asked to actually fit a model on the full dataset at this point.
# 2A
lr_model = LogisticRegression(penalty="none", solver="saga", tol=0.01)
cv_score = cross_val_score(lr_model, X_scaled, y_sample, cv=5)
print(f"CV Score: {cv_score.mean().round(4)}")
# ### K-Nearest Neighbors
# **2B.** Use `GridSearchCV` to perform hyperparameter tuning for KNN models, according to the following specifications:
# * Tune over the `n_neighbors` parameter, considering at least 8 values for this parameter.
# * Adjust the list of hyperparameter values as needed in order to find the best possible value.
# * Name your `GridSearchCV` object `knn_grid` to be consistent with code provided in the next step.
# * Use the scaled sample data.
# * Use **5-fold** cross_validation.
# * Use the `best_estimator_` attribute to extract the best model found.
# * Summarize your results by printing messages of the type shown below, with numerical values rounded to four decimal places.
# ```
# Best Parameters: ____
# Best CV Score: ____
# Training Acc: ____
# ```
# 2B
knn_clf = KNeighborsClassifier()
knn_parameters = {"n_neighbors": [2, 4, 8, 12, 16, 20, 24, 30]}
knn_grid = GridSearchCV(knn_clf, knn_parameters, cv=5, refit="True", n_jobs=-1)
knn_grid.fit(X_scaled, y_sample)
knn_mod = knn_grid.best_estimator_
print(f"Best Parameters: {knn_grid.best_params_}")
print(f"Best CV Score: {knn_grid.best_score_:.4f}")
print(f"Training Acc: {knn_grid.score(X_scaled, y_sample):.4f}")
# Run the cell below to visualize your grid search results. Based on these results, you might want to adjust the hyperparameter values considered above. Your plot should provide strong support for the claim that you have found the best hyperparameter value.
knn_summary = pd.DataFrame(knn_grid.cv_results_["params"])
knn_summary["cv_score"] = knn_grid.cv_results_["mean_test_score"]
plt.plot(knn_summary.n_neighbors, knn_summary.cv_score)
plt.xlabel("n_neighbors")
plt.ylabel("CV Score")
plt.xticks(knn_summary.n_neighbors)
plt.grid()
plt.show()
# ### Decision Trees
# **2C.** Use `GridSearchCV` to perform hyperparameter tuning for decision tree models, according to the following specifications:
# * Set `random_state=1` for your model.
# * Tune over the `max_depth` and `min_samples_leaf` parameters.
# * Considering at least 6 values for `max_depth` and at least 4 values for `min_samples_leaf`.
# * Adjust the list of hyperparameter values as needed in order to find a parameter set that is at least close to the best possible.
# * Name your `GridSearchCV` object `dt_grid` to be consistent with code provided in the next step.
# * Use the scaled sample data.
# * Use **5-fold** cross_validation.
# * Use the `best_estimator_` attribute to extract the best model found.
# * Summarize your results by printing messages of the type shown below, with numerical values rounded to four decimal places.
# ```
# Best Parameters: ____
# Best CV Score: ____
# Training Acc: ____
# ```
# 2C
dt_clf = DecisionTreeClassifier(random_state=1)
dt_parameters = {
"max_depth": [2, 4, 6, 8, 10, 12, 16, 20, 24, 32],
"min_samples_leaf": [2, 4, 8, 16, 32],
}
dt_grid = GridSearchCV(dt_clf, dt_parameters, cv=5, refit="True", n_jobs=-1)
dt_grid.fit(X_scaled, y_sample)
dt_mod = dt_grid.best_estimator_
print(f"Best Parameters: {dt_grid.best_params_}")
print(f"Best CV Score: {dt_grid.best_score_:.4f}")
print(f"Training Acc: {dt_grid.score(X_scaled, y_sample):.4f}")
# Run the cell below to visualize your grid search results. Based on these results, you might want to adjust the hyperparameter values considered above. Your plot should provide strong support for the claim that you have found the best set of hyperparameter values (or at least something close to it).
dt_summary = pd.DataFrame(dt_grid.cv_results_["params"])
dt_summary["cv_score"] = dt_grid.cv_results_["mean_test_score"]
for ms in dt_parameters["min_samples_leaf"]:
temp = dt_summary.query(f"min_samples_leaf == {ms}")
plt.plot(temp.max_depth, temp.cv_score, label=ms)
plt.xlabel("Maximum Depth")
plt.ylabel("CV Score")
plt.legend(title="Min Samples", bbox_to_anchor=[1, 1])
plt.xticks(dt_parameters["max_depth"])
plt.grid()
plt.show()
# ### Random Forests
# **2D.** Use `GridSearchCV` to perform hyperparameter tuning for random forest models, according to the following specifications:
# * Set `random_state=1` for your model.
# * Use 100 trees in your forest.
# * Tune over the `max_depth` and `min_samples_leaf` parameters.
# * Considering at least 6 values for `max_depth` and at least 3 values for `min_samples_leaf`.
# * Adjust the list of hyperparameter values as needed in order to find a parameter set that is at least close to the best possible.
# * Name your `GridSearchCV` object `rf_grid` to be consistent with code provided in the next step.
# * Use the scaled sample data.
# * Use **5-fold** cross_validation.
# * Use the `best_estimator_` attribute to extract the best model found.
# * Summarize your results by printing messages of the type shown below, with numerical values rounded to four decimal places.
# ```
# Best Parameters: ____
# Best CV Score: ____
# Training Acc: ____
# 2D
rf_clf = RandomForestClassifier(n_estimators=100, random_state=1)
rf_parameters = {
"max_depth": [2, 4, 6, 8, 12, 16, 20, 24, 30],
"min_samples_leaf": [1, 2, 4, 6, 8],
}
rf_grid = GridSearchCV(rf_clf, rf_parameters, cv=5, refit="True", n_jobs=-1)
rf_grid.fit(X_scaled, y_sample)
rf_mod = rf_grid.best_estimator_
print(f"Best Parameters: {rf_grid.best_params_}")
print(f"Best CV Score: {rf_grid.best_score_:.4f}")
print(f"Training Acc: {rf_grid.score(X_scaled, y_sample):.4f}")
rf_summary = pd.DataFrame(rf_grid.cv_results_["params"])
rf_summary["cv_score"] = rf_grid.cv_results_["mean_test_score"]
for ms in rf_parameters["min_samples_leaf"]:
temp = rf_summary.query(f"min_samples_leaf == {ms}")
plt.plot(temp.max_depth, temp.cv_score, label=ms)
plt.xlabel("Maximum Depth")
plt.ylabel("CV Score")
plt.legend(title="Min Samples", bbox_to_anchor=[1, 1])
plt.xticks(rf_parameters["max_depth"])
plt.grid()
plt.show()
# # Part 3: Final Model
# In this part, you will identify, train, and evaluate your final model. For the sake of time, we only used 10,000 observations for hyperparameter tuning. However, in order to improve our model, we will train the our final model on the entire set of 42,000 observations.
# **3A.** In the cell below, create feature and label arrays using all of the observations from the original DataFrame. Scale the feature array by dividing the pixel values by 255. Then print the shape of the scaled feature array and the label array.
# 3A
X = MNIST.iloc[:, 1:785].values
y = MNIST.label
X_scaled = X / 255
print(X_scaled.shape)
print(y.shape)
# Review the cross-validation scores for the models considered in Part 2 and identify the model with the highest CV score. If your best-performing model is a KNN model, and it is only slighlty better than another model, then select your second-best model instead. KNN models are are problematic with this dataset, for reasons discussed in class.
# **3B.** After identifying your final model, train a new version of this model using the best set of hyperparameter values found previously. If creating a decision tree or random forest model, set `random_state=1`. Then fit the model to the full dataset. Next, evaluate the final model by performing **10-fold** cross-validation. Print the result rounded to 4 decimal places in a message of the following form:
# CV Score: ____
#
# **Note:** You are asked to use 10-fold CV here.
# 3B
final_model = RandomForestClassifier(
n_estimators=100, random_state=1, max_depth=16, min_samples_leaf=1
)
final_model.fit(X_scaled, y)
score = cross_val_score(final_model, X_scaled, y, cv=10)
print(f"CV Score: {score.mean().round(4)}")
# **3C.** Use 10-fold cross-validation prediction with your final model to generate out-of-sample predictions for the dataset. Use these predictions to generate a confusion matrix. Display the matrix as a DataFrame.
# 3C
cv_pred = cross_val_predict(final_model, X_scaled, y, cv=10)
cm = confusion_matrix(y, cv_pred)
pd.DataFrame(cm)
# **3D.** Edit this markdown cell to fill complete the statements below. Please remove the underscores replacing them with the correct values.
# * The 1st most common misclassification made by the model was to classify the digit 4 as the digit 9.
# * The 2nd most common misclassification made by the model was to classify the digit 3 as the digit 5.
# **3E.** Use the out-of-sample predictions to create and print a classification report.
# 3E
print(classification_report(y, cv_pred))
# # Part 4: New Observations
# The csv file `digit-recognizer/test.csv` contains pixel values for 28,000 unlabeled images.
# **4A.** Load this csv file into a DataFrame. Then use the `sample()` method to select 16 rows at random, setting `random_state=4`. Create a feature array by extract the pixel values and dividing them by 255. Print the shape of the new feature array.
# 4A
new_data = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
new_data = new_data.sample(16, random_state=4)
X_new = new_data.values
X_new_scaled = X_new / 255
print(X_new_scaled.shape)
# **4B**. Adapt the code form Part 1D to create a 4x4 grid of subplots displaying the 16 images in the sample of new observations.
# 4B
for i in range(16):
plt.subplot(4, 4, i + 1)
digit = X_new_scaled[i, :].reshape(28, 28)
plt.imshow(digit, cmap="Greys")
plt.axis("off")
plt.show()
# **4C.** Generate class predictions for the 16 digits shown above. Print the predictions. To better compare the predictions with the digits, reshape the prediction array to a 4x4 array and then print this reshaped array.
# 4C
y_new_pred = final_model.predict(X_new_scaled)
print(y_new_pred.reshape(4, 4))
# **4D.** For which, if any, of the 16 new observations do you suspect that the model generated an incorrect prediction? Provide your answer in this mark-down cell by listing the indices (0 - 15) for the misclassified observations.
# Answer: 6
# **4E.** Use your final model to generate class probability estimates for the 16 new observations. Print the array of probability estimates rounded to 2 decimal places.
# 4E
y_new_prob = final_model.predict_proba(X_new_scaled)
print(y_new_prob.round(2))
# **4F.** Use the `max()` method with `axis=1` to determine the largest value in each row of the probability estimate array. Print these values rounded to 2 decimal places.
# 4F
max = y_new_prob.max(axis=1)
print(max.round(2))
# **4G.** Identify the three observations for which the model is the least confident in its prediction. Provide the indices (0-15) for these observations below, in increasing order of confidence.
# Answer: 8, 6, 10
# ## HTML Export
# Follow these instructions to create an HTML render of your notebook:
# 1. Replace the text `"Username"` in the cell below with your Maryville username.
# 2. Replace 408 with 508, if appropriate.
# 3. Save and Commit your notebook.
# 4. When the commit is completed, open the new version in the viewer.
# 5. Navigate to the "Data" tab in the view.
# 6. Click the "Download" button next to your HTML file.
# 7. This might open the HTML file in your browser. If so, then save that file to your local machine.
# 8. Upload the HTML export to Canvas.
|
#
# # Import Statements
import pandas as pd
import matplotlib.pyplot as plt
# # Data Exploration
# How many different colours does the LEGO company produce?
colors = pd.read_csv("/kaggle/input/lego-database/colors.csv")
colors.head()
colors["name"].nunique()
colors.groupby("is_trans").count()
colors.is_trans.value_counts()
# ### Understanding LEGO Themes vs. LEGO Sets
# Walk into a LEGO store and you will see their products organised by theme. Their themes include Star Wars, Batman, Harry Potter and many more.
# A lego **set** is a particular box of LEGO or product. Therefore, a single theme typically has many different sets.
# The sets.csv data contains a list of sets over the years and the number of parts that each of these sets contained.
sets = pd.read_csv("/kaggle/input/lego-database/sets.csv")
sets.head()
sets.tail()
# In which year were the first LEGO sets released and what were these sets called?
sets.sort_values("year").head()
# How many different sets did LEGO sell in their first year? How many types of LEGO products were on offer in the year the company started?
sets[sets["year"] == 1949]
# Find the top 5 LEGO sets with the most number of parts.
sets.sort_values("num_parts", ascending=False).head()
# Use .groupby() and .count() to show the number of LEGO sets released year-on-year. How do the number of sets released in 1955 compare to the number of sets released in 2019?
sets_by_year = sets.groupby("year").count()
sets_by_year["set_num"].head()
sets_by_year["set_num"].tail()
# Show the number of LEGO releases on a line chart using Matplotlib.
# Note that the .csv file is from late 2020, so to plot the full calendar years, you will have to exclude some data from your chart. Can you use the slicing techniques covered in Day 21 to avoid plotting the last two years? The same syntax will work on Pandas DataFrames.
plt.plot(sets_by_year.index, sets_by_year.set_num)
plt.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2])
# ### Aggregate Data with the Python .agg() Function
# Let's work out the number of different themes shipped by year. This means we have to count the number of unique theme_ids per calendar year.
themes_by_year = sets.groupby("year").agg({"theme_id": pd.Series.nunique})
themes_by_year.rename(columns={"theme_id": "nr_themes"}, inplace=True)
themes_by_year.head()
themes_by_year.tail()
plt.plot(themes_by_year.index[:-2], themes_by_year.nr_themes[:-2])
# ### Line Charts with Two Seperate Axes
# This looks terrible
plt.plot(themes_by_year.index[:-2], themes_by_year.nr_themes[:-2])
plt.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2])
ax1 = plt.gca() # get the axis
ax2 = ax1.twinx() # create another axis that shares the same x-axis
ax1.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2])
ax2.plot(themes_by_year.index[:-2], themes_by_year.nr_themes[:-2])
ax1 = plt.gca()
ax2 = ax1.twinx()
# Add styling
ax1.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2], color="g")
ax2.plot(themes_by_year.index[:-2], themes_by_year.nr_themes[:-2], "b")
ax1.set_xlabel("Year")
ax1.set_ylabel("Number of Sets", color="green")
ax2.set_ylabel("Number of Themes", color="blue")
parts_per_set = sets.groupby("year").agg({"num_parts": pd.Series.mean})
parts_per_set.head()
parts_per_set.tail()
# ### Scatter Plots in Matplotlib
plt.scatter(parts_per_set.index[:-2], parts_per_set.num_parts[:-2])
# ### Number of Sets per LEGO Theme
# LEGO has licensed many hit franchises from Harry Potter to Marvel Super Heros to many others. But which theme has the largest number of individual sets? Is it one of LEGO's own themes like Ninjago or Technic or is it a third party theme? Let's analyse LEGO's product lines!
set_theme_count = sets["theme_id"].value_counts()
set_theme_count[:5]
#
# ### Database Schemas, Foreign Keys and Merging DataFrames
# The themes.csv file has the actual theme names. The sets .csv has theme_ids which link to the id column in the themes.csv.
themes = pd.read_csv("/kaggle/input/lego-database/themes.csv") # has the theme names!
themes.head()
themes[themes.name == "Star Wars"]
sets[sets.theme_id == 18]
sets[sets.theme_id == 209]
# ### Merging (i.e., Combining) DataFrames based on a Key
#
set_theme_count = pd.DataFrame(
{"id": set_theme_count.index, "set_count": set_theme_count.values}
)
set_theme_count.head()
merged_df = pd.merge(set_theme_count, themes, on="id")
merged_df[:3]
# Basic, but unreadable
plt.bar(merged_df.name[:10], merged_df.set_count[:10])
plt.figure(figsize=(14, 8))
plt.xticks(fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.ylabel("Nr of Sets", fontsize=14)
plt.xlabel("Theme Name", fontsize=14)
plt.bar(merged_df.name[:10], merged_df.set_count[:10])
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# import the dataset and its structure
print(os.listdir("../input"))
df = pd.read_csv("/kaggle/input/heart-disease-uci/heart.csv")
df.head()
# display each data type of the features
print(df.dtypes)
print(df.shape)
# I can see that there are some variables that is not stored correctly. Therefore, I will change them to the following types.
# * ratio: age, chol, thalach
# * interval: trestbps, oldpeak
# * ordinal: cp, fbs, slope, ca
# * categorical: sex,restecg, exang, thal, target
# I will change the ordinal and categorical data to object type.
# devide the dataset by types
cate_df = df[["cp", "fbs", "slope", "ca", "sex", "restecg", "exang", "thal", "target"]]
num_df = df[["age", "chol", "thalach", "trestbps", "oldpeak"]]
for column in cate_df:
cate_df[column] = cate_df[column].astype("category", copy=False)
print(cate_df.dtypes)
print(num_df.dtypes)
# display missing values
print("Number of missing values by column")
print(df.isnull().sum())
# We can see that there is no missing value, but the cetegorial variables are viewed as numeric. This is needed to be chaged correctly.
# bar plots of cetegorical variables
import seaborn as sns
import matplotlib.pyplot as plt
fig, axes = plt.subplots(3, 3, figsize=(15, 15))
fig.suptitle("Bar plots of categorical and ordinal variables")
#'cp','fbs','slope','ca','sex','restecg','exang','thal','target'
sns.countplot(ax=axes[0, 0], data=cate_df, x="cp", palette="YlGnBu")
sns.countplot(ax=axes[0, 1], data=cate_df, x="fbs", palette="YlGnBu")
sns.countplot(ax=axes[0, 2], data=cate_df, x="slope", palette="YlGnBu")
sns.countplot(ax=axes[1, 0], data=cate_df, x="ca", palette="YlGnBu")
sns.countplot(ax=axes[1, 1], data=cate_df, x="sex", palette="YlGnBu")
sns.countplot(ax=axes[1, 2], data=cate_df, x="restecg", palette="YlGnBu")
sns.countplot(ax=axes[2, 0], data=cate_df, x="exang", palette="YlGnBu")
sns.countplot(ax=axes[2, 1], data=cate_df, x="thal", palette="YlGnBu")
sns.countplot(ax=axes[2, 2], data=cate_df, x="target", palette="YlGnBu")
# Histogram of numeric vaiables
fig, axes = plt.subplots(3, 2, figsize=(15, 15))
#'age','chol','thalach','trestbps','oldpeak'
fig.suptitle("Bar plots of ratio and interval variables")
sns.histplot(ax=axes[0, 0], data=num_df, x="age")
sns.histplot(ax=axes[0, 1], data=num_df, x="chol")
sns.histplot(ax=axes[1, 0], data=num_df, x="thalach")
sns.histplot(ax=axes[1, 1], data=num_df, x="trestbps")
sns.histplot(ax=axes[2, 0], data=num_df, x="oldpeak")
# I can see that it seems to be outliers in oldpeak and chol. To verify them, I use boxplots to ensure.
#
fig, axes = plt.subplots(1, 2)
sns.boxplot(ax=axes[0], y=num_df["chol"])
sns.boxplot(ax=axes[1], y=num_df["oldpeak"])
# I decide to drop the one outlier from chol and two outliers from oldpeak.
index = list(num_df["chol"]).index(max(num_df["chol"]))
num_df = num_df.drop([index], axis=0)
cate_df = cate_df.drop([index], axis=0)
index = list(num_df["oldpeak"]).index(max(num_df["oldpeak"]))
num_df = num_df.drop([index], axis=0)
cate_df = cate_df.drop([index], axis=0)
index = list(num_df["oldpeak"]).index(max(num_df["oldpeak"]))
num_df = num_df.drop([index], axis=0)
cate_df = cate_df.drop([index], axis=0)
# scale the numeric variables
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
a = scaler.fit_transform(num_df)
a = pd.DataFrame(a)
# reconstruct the dataframe
col_name = list(num_df.columns)
a.columns = col_name
df_prepared = pd.DataFrame(a)
for column in cate_df:
df_prepared[column] = cate_df[column]
# Fit models to the dataset with 5-folds cross validation
# Logistic regression
precision = []
models = [
"Logistic regression",
"Decision Tree",
"Random Forest",
"KNN",
"Naive Bayes",
"SVC",
"VotingClassifier",
]
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
log_reg_model = LogisticRegression()
x = df_prepared.iloc[:, 0:13].dropna()
y = df_prepared.iloc[:, 13].dropna()
scores = cross_val_score(log_reg_model, x, y, cv=5, scoring="precision")
print(scores.mean())
precision.append(scores.mean())
# Decision Tree
from sklearn import tree
deci_tree_model = tree.DecisionTreeClassifier(max_depth=3)
scores = cross_val_score(deci_tree_model, x, y, cv=5, scoring="precision")
print(scores.mean())
precision.append(scores.mean())
# Random Forest
from sklearn.ensemble import RandomForestClassifier
forest_reg = RandomForestClassifier(n_estimators=500)
scores = cross_val_score(forest_reg, x, y, cv=5, scoring="precision")
print(scores.mean())
precision.append(scores.mean())
# KNN
from sklearn.neighbors import KNeighborsClassifier
l = []
for i in range(1, 10):
knn = KNeighborsClassifier(n_neighbors=i)
scores = cross_val_score(knn, x, y, cv=5, scoring="precision")
l.append(scores.mean())
print(max(l))
precision.append(max(l))
# Naive Bayes
from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
scores = cross_val_score(nb, x, y, cv=5, scoring="precision")
print(scores.mean())
precision.append(scores.mean())
# SVC
from sklearn.svm import SVC
svc = SVC(gamma="auto", kernel="linear")
scores = cross_val_score(svc, x, y, cv=5, scoring="precision")
print(scores.mean())
precision.append(scores.mean())
# Ensemble
from sklearn.ensemble import VotingClassifier
log_reg_model = LogisticRegression()
deci_tree_model = tree.DecisionTreeClassifier(max_depth=3)
forest_reg = RandomForestClassifier(n_estimators=500)
knn = KNeighborsClassifier(n_neighbors=3)
nb = GaussianNB()
svc = SVC(gamma="auto", kernel="linear")
vote = VotingClassifier(
estimators=[
("log_reg", log_reg_model),
("deci_tree", deci_tree_model),
("ran_forest", forest_reg),
("knn", knn),
("nb", nb),
("svc", svc),
],
voting="hard",
)
scores = cross_val_score(vote, x, y, cv=5, scoring="precision")
print(scores.mean())
precision.append(scores.mean())
# Display the perdformance of ecah model
plot = sns.barplot(y=models, x=precision, palette="YlGnBu")
plot.set_title("Performance by Models")
plot.set_xlabel("precision")
plot.set_ylabel("model")
|
# # EMNIST for ABCD
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from cv2 import cv2
import matplotlib.pyplot as plt # plotting library
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from tensorflow.keras.optimizers import Adam, RMSprop
from keras import backend as K
from subprocess import check_output
from keras.preprocessing.image import ImageDataGenerator
# # TO DO: 这里需要参照本段代码,读取ABCD目录中的图片作为数据集
K.clear_session()
n_classes = 4
img_width, img_height = 64, 64
train_data_dir = "/kaggle/input/myabcd"
# validation_data_dir = 'test_mini'
# nb_train_samples = 2250 #75750
# nb_validation_samples = 750 #25250
train_datagen = ImageDataGenerator(
rescale=1.0 / 255,
shear_range=0.2,
zoom_range=0.2,
validation_split=0.1,
horizontal_flip=True,
)
# test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir, target_size=(img_height, img_width), class_mode="categorical"
)
# validation_generator = test_datagen.flow_from_directory(
# validation_data_dir,
# target_size=(img_height, img_width),
# batch_size=batch_size,
# class_mode='categorical')
# inception = InceptionV3(weights='imagenet', include_top=False)
# x = inception.output
# x = GlobalAveragePooling2D()(x)
# x = Dense(128,activation='relu')(x)
# x = Dropout(0.2)(x)
# predictions = Dense(3,kernel_regularizer=regularizers.l2(0.005), activation='softmax')(x)
# model = Model(inputs=inception.input, outputs=predictions)
# model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
# checkpointer = ModelCheckpoint(filepath='best_model_3class.hdf5', verbose=1, save_best_only=True)
# csv_logger = CSVLogger('history_3class.log')
# history = model.fit_generator(train_generator,
# steps_per_epoch = nb_train_samples // batch_size,
# validation_data=validation_generator,
# validation_steps=nb_validation_samples // batch_size,
# epochs=30,
# verbose=1,
# callbacks=[csv_logger, checkpointer])
# model.save('model_trained_3class.hdf5')
# # 4. Data visualization
# [Back to Table of Contents](#0.1)
# - The following code will help to sample the 25 random MNIST digits and visualize them.
# sample 25 mnist digits from train dataset
indexes = np.random.randint(0, x_train.shape[0], size=25)
images = x_train[indexes]
labels = y_train[indexes]
# plot the 25 mnist digits
plt.figure(figsize=(5, 5))
for i in range(len(indexes)):
plt.subplot(5, 5, i + 1)
image = images[i]
plt.imshow(image, cmap="gray")
plt.axis("off")
plt.show()
plt.savefig("mnist-samples.png")
plt.close("all")
# # 5. Designing model architecture using Keras
# [Back to Table of Contents](#0.1)
# - The MLP model, discussed above can be used for MNIST digits classification.
# - When the units or perceptrons are exposed, the MLP model is a fully connected network.
# - The following code shows how to design the MLP model architecture using Keras.
# - The first step in designing the model architecture is to import the Keras layers. This can be done as follows:
# ## 5.1 Import Keras layers
print(image.shape[0])
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from tensorflow.keras.utils import to_categorical, plot_model
# ## 5.2 Compute the number of labels
# - Now, the data must be in the correct shape and format.
# - After loading the MNIST dataset, the number of labels is computed as:
# compute the number of labels
num_labels = len(np.unique(y_train))
# ## 5.3 One-Hot Encoding
# - At this point, the labels are in digits format, 0 to 9.
# - This sparse scalar representation of labels is not suitable for the neural network prediction layer that outputs probabilities per class.
# - A more suitable format is called a one-hot vector, a 10-dim vector with all elements 0, except for the index of the digit class.
# - For example, if the label is 2, the equivalent one-hot vector is [0,0,1,0,0,0,0,0,0,0]. The first label has index 0.
# - The following lines convert each label into a one-hot vector:
# convert to one-hot vector
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# ## 5.4 Data Preprocessing
# - In deep learning, data is stored in tensors. The term tensor applies to a scalar (0D tensor), vector (1D tensor), matrix (2D tensor), and a multi-dimensional tensor.
# - The rest code computes the image dimensions, input_size of the first Dense layer and scales each pixel value from 0 to 255 to range from 0.0 to 1.0. Although raw pixel values can be used directly, it is better to normalize the input data as to avoid large gradient values that could make training difficult.
# - The output of the network is also normalized. After training, there is an option to put everything back to the integer pixel values by multiplying the output tensor by 255.
# - The proposed model is based on MLP layers. Therefore, the input is expected to be a 1D tensor. So, x_train and x_test are reshaped to [60000, 28 * 28] and [10000, 28 * 28], respectively.
# image dimensions (assumed square)
image_size = x_train.shape[1]
input_size = image_size * image_size
input_size
# resize and normalize
x_train = np.reshape(x_train, [-1, input_size])
x_train = x_train.astype("float32") / 255
x_test = np.reshape(x_test, [-1, input_size])
x_test = x_test.astype("float32") / 255
# ## 5.5 Setting network parameters
# - Now, we will set the network parameters as follows:
# network parameters
batch_size = 128
hidden_units = 256
dropout = 0.45
# - The **batch_size** argument indicates the number of data that we will use for each update of the model parameters.
# - **Hidden_units** shows the number of hidden units.
# - **Dropout** is the dropout rate (more on this in section 7 - **Overfitting and Regularization**).
# ## 5.6 Designing the model architecture
# - The next step is to design the model architecture. The proposed model is made of three MLP layers.
# - In Keras, an MLP layer is referred to as Dense, which stands for the densely connected layer.
# - Both the first and second MLP layers are identical in nature with 256 units each, followed by relu activation and dropout.
# - 256 units are chosen since 128, 512 and 1,024 units have lower performance metrics. At 128 units, the network converges quickly, but has a lower test accuracy. The added number units for 512 or 1,024 does not increase the test accuracy significantly.
# - The main data structure in Keras is the Sequential class, which allows the creation of a basic neural network.
# - The Sequential class of the Keras library is a wrapper for the sequential neural network model that Keras offers and can be created in the following way:
# `from keras.models import Sequential`
# `model = Sequential()`
# - The model in Keras is considered as a sequence of layers and each of them gradually “distills” the input data to obtain the desired output.
# - In Keras, we can add the required types of layers through the **add()** method.
# model is a 3-layer MLP with ReLU and dropout after each layer
model = Sequential()
model.add(Dense(hidden_units, input_dim=input_size))
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Dense(hidden_units))
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Dense(num_labels))
model.add(Activation("softmax"))
# - Since a Dense layer is a linear operation, a sequence of Dense layers can only approximate a linear function.
# - The problem is that the MNIST digit classification is inherently a non-linear process. Inserting a relu activation between Dense layers will enable MLPs to model non-linear mappings.
# - relu or Rectified Linear Unit (ReLU) is a simple non-linear function. It allows positive inputs to pass through unchanged while clamping everything else to zero.
# ## 5.7 View model summary
# - Keras library provides us **summary()** method to check the model description.
model.summary()
# - The above listing shows the model summary of the proposed network. It requires a total of 269,322 parameters.
# - This is substantial considering that we have a simple task of classifying MNIST digits. So, MLPs are not parameter efficient.
# - The total number of parameters required can be computed as follows:
# - From input to Dense layer: 784 × 256 + 256 = 200,960.
#
# - From first Dense to second Dense: 256 × 256 + 256 = 65,792.
#
# - From second Dense to the output layer: 10 × 256 + 10 = 2,570.
#
# - The total is 200,690 + 65,972 + 2,570 = 269,322.
# - Another way of verifying the network is by calling the **plot_model()** method as follows:
plot_model(model, to_file="mlp-mnist.png", show_shapes=True)
# # 6. Implement MLP model using Keras
# [Back to Table of Contents](#0.1)
# - The implementation of MLP model in Keras comprises of three steps:-
# - Compiling the model with the compile() method.
#
# - Training the model with fit() method.
#
# - Evaluating the model performance with evaluate() method.
#
#
# - For detailed discussion on implementation, please refer to my previous kernel [Comprehensive Guide to ANN with Keras](https://www.kaggle.com/prashant111/comprehensive-guide-to-ann-with-keras)
# ## 6.1 Compile the model with compile() method
# - Compilation of model can be done as follows:
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# ### Loss function (categorical_crossentropy)
# - How far the predicted tensor is from the one-hot ground truth vector is called **loss**.
# - In this example, we use **categorical_crossentropy** as the loss function. It is the negative of the sum of the product of the target and the logarithm of the prediction.
# - There are other loss functions in Keras, such as mean_absolute_error and binary_crossentropy. The choice of the loss function is not arbitrary but should be a criterion that the model is learning.
# - For classification by category, categorical_crossentropy or mean_squared_error is a good choice after the softmax activation layer. The binary_crossentropy loss function is normally used after the sigmoid activation layer while mean_squared_error is an option for tanh output.
# ### Optimization (optimizer adam)
# - With optimization, the objective is to minimize the loss function. The idea is that if the loss is reduced to an acceptable level, the model has indirectly learned the function mapping input to output.
# - In Keras, there are several choices for optimizers. The most commonly used optimizers are; **Stochastic Gradient Descent (SGD)**, **Adaptive Moments (Adam)** and **Root Mean Squared Propagation (RMSprop)**.
# - Each optimizer features tunable parameters like learning rate, momentum, and decay.
# - Adam and RMSprop are variations of SGD with adaptive learning rates. In the proposed classifier network, Adam is used since it has the highest test accuracy.
# ### Metrics (accuracy)
# - Performance metrics are used to determine if a model has learned the underlying data distribution. The default metric in Keras is loss.
# - During training, validation, and testing, other metrics such as **accuracy** can also be included.
# - **Accuracy** is the percent, or fraction, of correct predictions based on ground truth.
# ## 6.2 Train the model with fit() method
model.fit(x_train, y_train, epochs=20, batch_size=batch_size)
# ## 6.3 Evaluating model performance with evaluate() method
loss, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print("\nTest accuracy: %.1f%%" % (100.0 * acc))
path = "/kaggle/input/testdata1/three.jpg"
img = cv2.imread(path)
img
# 通过模型预测结果并输出
path = "/kaggle/input/testdata1/three.jpg"
img = cv2.imread(path)
plt.imshow(img, cmap=plt.cm.binary)
plt.show()
img = np.array(img).astype(np.float32)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = img.reshape(-1, 28 * 28)
y_pred = model.predict(img)
# print('predicted number:',np.argmax(y_pred, axis=1)[0])
y_pred
# # 7. Overfitting and Regularization
# [Back to Table of Contents](#0.1)
# - A neural network has the property to memorize the characteristics of training data. This is called **overfitting**.
# - In such a case, the network fails to generalize when subject to the test data.
# - To avoid this tendency, the model uses a regularizing layer or function. A commonly used regularizing layer is referred to as a **Dropout layer**.
# - Given a dropout rate (dropout=0.45), the **Dropout layer** randomly removes the fraction of units from participating in the next layer. For example, if the first layer has 256 units, after dropout=0.45 is applied, only (1 - 0.45) * 256 units = 140 units from layer 1 participate in layer 2.
# - The Dropout layer makes neural networks robust to unforeseen input data because the network is trained to predict correctly, even if some units are missing.
# - The dropout is not used in the output layer and it is only active during training. Moreover, dropout is not present during prediction.
# - There are regularizers that can be used other than dropouts like l1 or l2. In Keras, the bias, weight and activation output can be regularized per layer. - l1 and l2 favor smaller parameter values by adding a penalty function. Both l1 and l2 enforce the penalty using a fraction of the sum of absolute (l1) or square (l2) of parameter values.
# - So, the penalty function forces the optimizer to find parameter values that are small. Neural networks with small parameter values are more insensitive to the presence of noise from within the input data.
# - So, the l2 weight regularizer with fraction=0.001 can be implemented as:
from keras.regularizers import l2
model.add(Dense(hidden_units, kernel_regularizer=l2(0.001), input_dim=input_size))
|
# ## Keşifçi Veri Analizi | Becerileri Pekiştirme
# Aşağıda ihtiyacımız doğrultusunda kullanacağımız kütüphaneleri yükleyelim.
import numpy as np
import seaborn as sns
import pandas as pd
# Veri çerçevemizi bulunduğumuz dizinden yükleyelim ve bir veri çerçevesi haline getirerek df değişkenine atayalım. (pd.read_csv(...csv))
# CSV dosyasını okuma
df = pd.read_csv("/kaggle/input/iris-flower-dataset/IRIS.csv")
# Veri çerçevesinin ilk 5 gözlemini görüntüleyelim.
# head() ile default olarak ilk 5 gözlemi görüntüledik. Ancak paranteze yazacağımız değer ile görüntülenecek gözlem miktarını belirleyebiliriz.
df.head()
# Veri çerçevesinin kaç öznitelik ve kaç gözlemden oluştuğunu görüntüleyelim.
# len metoduyla df değişkenine atadığımız data setin sütun uzunluğunu yazdırıyoruz.
print("Kaç öznitelik var:", len(df.columns))
# Yine len metoduyla direk data seti yazdırdığımızda kaç gözlemden(satır) oluştuğunu görüyoruz.
print("Kaç gözlem var:", len(df))
# Veri çerçevesindeki değişkenlerin hangi tipte olduğunu ve bellek kullanımını görüntüleyelim.
# info() ile özniteliklerin tipini ve bellekte kapladığı alanı görüntülüyoruz.
df.info()
# Veri çerçevesindeki sayısal değişkenler için temel istatistik değerlerini görüntüleyelim.
# Standart sapma ve ortalama değerlerden çıkarımda bulunarak hangi değişkenlerin ne kadar varyansa sahip olduğu hakkında fikir yürütelim.
# Temel istatistik değerlerini görüntülemek ve mean yani ortalama ile çıkarımda bulunmak için aşağıdaki şekilde describe() kullanıyoruz.
df.describe()
# Veri çerçevesinde hangi öznitelikte kaç adet eksik değer olduğunu gözlemleyelim.
# Gözlemlerde özniteliklerin hangisinde kaç adet eksik olduğunu gözlemlemek için isnull() attribute'üne sum() ile yazdırıyoruz.
df.isnull().sum()
# Sayısal değişkenler arasında korelasyon olup olmadığını göstermek için korelasyon matrisi çizdirelim. Korelasyon katsayıları hakkında fikir yürütelim.
# En güçlü pozitif ilişki hangi iki değişken arasındadır?
# Veri çerçevemizdeki değişkenler arasındaki korelasyon matrisini hesaplıyoruz.
corr_matrix = df.corr()
# max() fonksiyonunu iki kez kullanarak, bu matrisin içindeki en yüksek korelasyon katsayısını buluyoruz.
max_corr = df.corr().max().max()
max_corr_pos = df.corr()[df.corr() == max_corr].dropna(how="all")
print(max_corr_pos)
# Korelasyon katsayılarını daha iyi okuyabilmek için ısı haritası çizdirelim.
# Korelasyon matrisini daha kolay okuyabilmek için ısı haritası çizdiriyoruz.
sns.heatmap(corr_matrix, cmap="coolwarm", annot=True)
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz değerlerini görüntüleyelim.
# species sütunundaki benzersiz değerleri dizi olarak görüntülüyoruz.
unique_varieties = df["species"].unique()
print(unique_varieties)
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz kaç adet değer içerdiğini görüntüleyelim.
# species sütunundaki benzersiz değerlerin kaç adet olduğunu yazdırıyoruz.
num_unique_varieties = df["species"].nunique()
print(num_unique_varieties)
# Veri çerçevesindeki sepal.width ve sepal.length değişkenlerinin sürekli olduğunu görüyoruz. Bu iki sürekli veriyi görselleştirmek için önce scatterplot kullanalım.
sns.scatterplot(data=df, x="sepal_width", y="sepal_length")
# Aynı iki veriyi daha farklı bir açıdan frekanslarıyla incelemek için jointplot kullanarak görselleştirelim.
sns.jointplot(data=df, x="sepal_width", y="sepal_length")
# Aynı iki veriyi scatterplot ile tekrardan görselleştirelim fakat bu sefer "variety" parametresi ile hedef değişkenine göre kırdıralım.
# 3 farklı renk arasında sepal değişkenleriyle bir kümeleme yapılabilir mi? Ne kadar ayırt edilebilir bunun üzerine düşünelim.
sns.scatterplot(data=df, x="sepal_width", y="sepal_length", hue="species")
# value_counts() fonksiyonu ile veri çerçevemizin ne kadar dengeli dağıldığını sorgulayalım.
print(df["species"].value_counts())
# Keman grafiği çizdirerek sepal.width değişkeninin dağılımını inceleyin.
# Söz konusu dağılım bizim için ne ifade ediyor, normal bir dağılım olduğunu söyleyebilir miyiz?
sns.kdeplot(data=df, x="sepal_width")
# Daha iyi anlayabilmek için sepal.width üzerine bir distplot çizdirelim.
sns.histplot(df["sepal_width"])
# Üç çiçek türü için üç farklı keman grafiğini sepal.length değişkeninin dağılımı üzerine tek bir satır ile görselleştirelim.
sns.violinplot(x="species", y="sepal_length", data=df)
# Hangi çiçek türünden kaçar adet gözlem barındırıyor veri çerçevemiz?
# 50 x 3 olduğunu ve dengeli olduğunu value_counts ile zaten görmüştük, ancak bunu görsel olarak ifade etmek için sns.countplot() fonksiyonuna variety parametresini vereilm.
sns.countplot(x="species", data=df)
# sepal.length ve sepal.width değişkenlerini sns.jointplot ile görselleştirelim, dağılımı ve dağılımın frekansı yüksek olduğu bölgelerini inceleyelim.
sns.jointplot(x="sepal_length", y="sepal_width", data=df)
# Bir önceki hücrede yapmış olduğumuz görselleştirmeye kind = "kde" parametresini ekleyelim. Böylelikle dağılımın noktalı gösterimden çıkıp yoğunluk odaklı bir görselleştirmeye dönüştüğünü görmüş olacağız.
sns.jointplot(x="sepal_length", y="sepal_width", data=df, kind="kde")
# scatterplot ile petal.length ve petal.width değişkenlerinin dağılımlarını çizdirelim.
sns.scatterplot(x="petal_length", y="petal_width", data=df)
# Aynı görselleştirmeye hue = "variety" parametresini ekleyerek 3. bir boyut verelim.
sns.scatterplot(data=df, x="petal_length", y="petal_width", hue="species")
# sns.lmplot() görselleştirmesini petal.length ve petal.width değişkenleriyle implemente edelim. Petal length ile petal width arasında ne tür bir ilişki var ve bu ilişki güçlü müdür? sorusunu yanıtlayalım.
sns.lmplot(data=df, x="petal_length", y="petal_width")
# Bu sorunun yanıtını pekiştirmek için iki değişken arasında korelasyon katsayısını yazdıralım.
df[["petal_length", "petal_width"]].corr()
df[["petal_length", "petal_width"]].corr().iloc[0, 1]
# Petal Length ile Sepal Length değerlerini toplayarak yeni bir total length özniteliği oluşturalım.
df["total_length"] = df["petal_length"] + df["sepal_length"]
print(df["total_length"])
# total.length'in ortalama değerini yazdıralım.
df["total_length"] = df["petal_length"] + df["sepal_length"]
mean_total_length = df["total_length"].mean()
print(mean_total_length)
# total.length'in standart sapma değerini yazdıralım.
# sepal.length'in maksimum değerini yazdıralım.
max_sepal_length = df["sepal_length"].max()
print(max_sepal_length)
# sepal.length'i 5.5'den büyük ve türü setosa olan gözlemleri yazdıralım.
setosa_df = df[df["species"] == "Setosa"]
setosa_df = setosa_df[setosa_df["sepal_length"] > 5.5]
print(setosa_df)
# petal.length'i 5'den küçük ve türü virginica olan gözlemlerin sadece sepal.length ve sepal.width değişkenlerini ve değerlerini yazdıralım.
subset_df = df[(df["petal_length"] < 5) & (df["species"] == "Virginica")][
["sepal_length", "sepal_width"]
]
print(subset_df)
# Hedef değişkenimiz variety'e göre bir gruplama işlemi yapalım değişken değerlerimizin ortalamasını görüntüleyelim.
df.groupby("species").mean()
# Hedef değişkenimiz variety'e göre gruplama işlemi yaparak sadece petal.length değişkenimizin standart sapma değerlerini yazdıralım.
# species'e göre gruplama işlemi yapılır
grouped = df.groupby("species")
# sadece petal_length değişkeninin standart sapması yazdırılır
print(grouped["petal_length"].std())
|
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import (
train_test_split,
cross_validate,
KFold,
GridSearchCV,
)
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, Lasso
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from lightgbm import LGBMRegressor
from xgboost import XGBRegressor
from sklearn.impute import SimpleImputer
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
from IPython.core.display import HTML
with open("./CSS.css", "r") as file:
custom_css = file.read()
HTML(custom_css)
# ## Creating datasets with Pandas
ROOT_PATH = "/kaggle/input/house-prices-advanced-regression-techniques"
train = pd.read_csv(ROOT_PATH + "/train.csv")
test = pd.read_csv(ROOT_PATH + "/test.csv")
sample = pd.read_csv(ROOT_PATH + "/sample_submission.csv")
# ## Creating a summary function
def summary(text, df):
print(f"{text} shape: {df.shape}")
summ = pd.DataFrame(df.dtypes, columns=["dtypes"])
summ["null"] = df.isnull().sum()
summ["unique"] = df.nunique()
summ["min"] = df.min()
summ["median"] = df.median()
summ["max"] = df.max()
summ["mean"] = df.mean()
summ["std"] = df.std()
return summ
summary("train", train)
# ## Filling the NA values with mean of the column
train["LotFrontage"].fillna((train["LotFrontage"].mean()), inplace=True)
summary("test", test)
# ## Checking the distribution of the Saleprice in train dataset
sns.distplot(train["SalePrice"])
plt.show()
# ## Validating the Saleprice wrt GrLivArea,TotalBsmtSF and YearBuilt
sns.scatterplot(x="GrLivArea", y="SalePrice", data=train)
plt.show()
sns.scatterplot(x="TotalBsmtSF", y="SalePrice", data=train)
plt.show()
sns.scatterplot(x="YearBuilt", y="SalePrice", data=train)
plt.xticks(rotation=90)
plt.show()
# ## SalePrice Vs OverallQual, GrLivArea and GarageCars
sns.pairplot(train, vars=["SalePrice", "OverallQual", "GrLivArea", "GarageCars"])
plt.show()
test["LotFrontage"].fillna((test["LotFrontage"].mean()), inplace=True)
pd.set_option("display.max_columns", None)
train.head(5)
numerics = ["int16", "int32", "int64", "float16", "float32", "float64"]
train_nums = train.select_dtypes(include=numerics)
# ## Distribution of all the numerical columns in train dataset
plt.figure(figsize=(24, 20))
for i in range(1, len(train_nums.columns)):
plt.subplot(7, 7, i)
sns.kdeplot(x=train[train_nums.columns[i]], label="Continuous Columns")
plt.tight_layout()
features = train.columns
# ## Modelling :)
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, Lasso
model_dict = {
"Light GBM": LGBMRegressor(),
"XG Boost": XGBRegressor(),
"Random Forest": RandomForestRegressor(),
"Linear": LinearRegression(),
"Decision Tree": DecisionTreeRegressor(),
"Logistic Reg": LogisticRegression(),
"Ridge": Ridge(),
"Lasso": Lasso(),
}
y = train["SalePrice"]
train.drop(columns=["SalePrice"], inplace=True)
encoded_data = pd.get_dummies(train_cats.columns)
for cols in encoded_data.columns:
train[cols] = encoded_data[cols]
test[cols] = encoded_data[cols]
# ## Converting the categorical coumns to arrays
my_imputer = SimpleImputer()
data_with_imputed_values = my_imputer.fit_transform(train)
data_with_imputed_values_test = my_imputer.fit_transform(test)
X = data_with_imputed_values
X_validation = data_with_imputed_values_test
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=52
)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
def evaluation(model_str, y_pred, y_pred_train):
results = {
"model": model_str,
"rmseval": mean_squared_error(y_test, y_pred, squared=False),
"rmse_train": mean_squared_error(y_train, y_pred_train, squared=False),
}
return results
# ## Model fitting the evaluation
result_list = []
for model in model_dict:
model_dict[model].fit(X_train, y_train)
y_pred = model_dict[model].predict(X_test)
y_pred_train = model_dict[model].predict(X_train)
result = evaluation(model, y_pred, y_pred_train)
result_list.append(result)
df_eval = pd.DataFrame(result_list)
df_eval
# ## Checking the best model from the above list
RF = RandomForestRegressor()
RF.fit(X_train, y_train)
RF.predict(X_test)
score = RF.score(X_test, y_test)
print(score)
y_pred = RF.predict(X_validation)
# ## Creating the submission file
output = pd.DataFrame({"Id": test["Id"], "SalePrice": y_pred})
output.to_csv("submission.csv", index=False)
|
# # Séance 8
# Pour commencer, créez chacun une copie de ce document, et ajoutez son lien dans le document de Ressources
# (https://docs.google.com/document/d/142Mt-C7qPsjvIZyx7Cy65lNphn1_67fUfarLaGwYSRk/edit?usp=sharing)
# pour que j'y aie accès.
# Importer des modules
import numpy as np
import matplotlib.pyplot as plt
import astropy.constants as const
import pandas as pd
import batman
from shutil import copyfile
import ttvfast
# ## Exercices avec batman!
# ### Découvrons d'abord les paramètres orbitaux pertinents pour des transits
## à modifier par vous si vous avez un nom différent:
votre_dataset = "batman-ttv-utils"
copyfile(
src="/kaggle/input/" + votre_dataset + "/batman-ttv-utils/batmanutils.py",
dst="../working/batmanutils.py",
)
copyfile(
src="/kaggle/input/"
+ votre_dataset
+ "/batman-ttv-utils/ttvutils_CSMB_20230202.py",
dst="../working/utils.py",
)
# importer nos fonctions "custom"
import batmanutils as but
import utils as ut
data_visit1 = pd.read_csv(
"/kaggle/input/" + votre_dataset + "/batman-ttv-utils/HST_Visit1.csv"
)
# # Diagrammes O-C
# ### Lire les données
#
import numpy as np
import matplotlib.pyplot as plt
import pickle
import pandas as pd
from copy import deepcopy
import astropy.io.ascii as aioascii
import sys
from astropy import constants as const
from math import atan2
import ttvfast
from ttvfast.models import Planet
from shutil import copyfile
import matplotlib as mpl
path_input_ttvs = (
"/kaggle/input/" + votre_dataset + "/batman-ttv-utils/"
) ### a modifier
# Planète b
b_epochs, b_times, b_unc = ut.read_tt_file(
path_input_ttvs + "JontofHutter2015_Planet1.txt"
)
# Planète c
c_epochs, c_times, c_unc = ut.read_tt_file(
path_input_ttvs + "JontofHutter2015_Planet2.txt"
)
# Planète d
d_epochs, d_times, d_unc = ut.read_tt_file(
path_input_ttvs + "JontofHutter2015_Planet3.txt"
)
d_unc_low = deepcopy(d_unc)
d_unc_upp = deepcopy(d_unc)
t_times = aioascii.read(
path_input_ttvs + "transit_times_HST_Spitzer_newAnalysis_20220621.csv"
)
d_epochs = np.append(d_epochs, np.array(t_times["epoch"]))
d_times = np.append(d_times, np.array(t_times["Tc"]))
d_unc_low = np.append(d_unc_low, np.array(t_times["-1sigma"]))
d_unc_upp = np.append(d_unc_upp, np.array(t_times["+1sigma"]))
# ### Faire un premier graphique des TTVs
#
# unc est pour uncertainties = incertitudes
fig, (axb, axc, axd), (fitb, fitc, fitd) = ut.plot_observed_ttvs(
b_times,
b_epochs,
b_unc,
c_times,
c_epochs,
c_unc,
d_times,
d_epochs,
np.array([d_unc_low, d_unc_upp]),
)
# ### Ajouter au graphique votre TTV mesuré (séance 7) !
#
quelle_epoch = 89 # epoch du transit Hubble
quel_t0 = 2457012.75 ## changer pour votre valeur de t0
# ajout au graphique:
fig, (axb, axc, axd), (fitb, fitc, fitd) = ut.plot_observed_ttvs(
b_times,
b_epochs,
b_unc,
c_times,
c_epochs,
c_unc,
d_times,
d_epochs,
np.array([d_unc_low, d_unc_upp]),
)
axd.scatter(
[quelle_epoch],
[(quel_t0 - fitd(quelle_epoch)) * 60 * 24],
color="r",
marker="o",
label="Valeur trouvée",
)
axd.legend(loc=1)
# # Nouveauté : modèle à 3 planètes !
# choisissez des masses pour Kepler-138 b,c,d
# pour l'instant, valeurs test.
masses_planetes = dict() # unités de masses terrestres
masses_planetes["b"] = 0.16833438481969000
masses_planetes["c"] = 5.8881981521201800
masses_planetes["d"] = 2.22178661763819
masses_planetes["e"] = None
# Reproduire le graphique avec le modèle.
fig, (axb, axc, axd), (fitb, fitc, fitd) = ut.plot_observed_ttvs(
b_times,
b_epochs,
b_unc,
c_times,
c_epochs,
c_unc,
d_times,
d_epochs,
np.array([d_unc_low, d_unc_upp]),
)
axd.scatter(
[quelle_epoch],
[(quel_t0 - fitd(quelle_epoch)) * 60 * 24],
color="r",
marker="o",
label="Valeur trouvée",
)
# créer le modèle (prédiction pour ces masses)
(b_epochs_pred, c_epochs_pred, d_epochs_pred), (ttv_b, ttv_c, ttv_d) = ut.calc_ttvs(
(fitb, fitc, fitd), npla=3, mass_in_earth_masses=masses_planetes
)
color = "C4"
axb.plot(b_epochs_pred, ttv_b, color=color)
axc.plot(c_epochs_pred, ttv_c, color=color)
axd.plot(d_epochs_pred, ttv_d, color=color, label="Prédiction")
axd.legend(loc=1)
# ### Choisissez vos propres valeurs pour les masses et regardez le résultat !
#
# choisissez des masses pour Kepler-138 b,c,d
# MODIFIEZ LES VALEURS!!
masses_planetes = dict() # unités de masses terrestres
masses_planetes["b"] = 0.16833438481969000 ## MODIFIER
masses_planetes["c"] = 5.8881981521201800 ## MODIFIER
masses_planetes["d"] = 2.22178661763819 ## MODIFIER
masses_planetes["e"] = None
# Reproduire le graphique avec le modèle.
fig, (axb, axc, axd), (fitb, fitc, fitd) = ut.plot_observed_ttvs(
b_times,
b_epochs,
b_unc,
c_times,
c_epochs,
c_unc,
d_times,
d_epochs,
np.array([d_unc_low, d_unc_upp]),
)
axd.scatter(
[quelle_epoch],
[(quel_t0 - fitd(quelle_epoch)) * 60 * 24],
color="r",
marker="o",
label="Mon point",
)
# créer le modèle (prédiction pour ces masses)
(b_epochs_pred, c_epochs_pred, d_epochs_pred), (ttv_b, ttv_c, ttv_d) = ut.calc_ttvs(
(fitb, fitc, fitd), npla=3, mass_in_earth_masses=masses_planetes
)
color = "C4"
axb.plot(b_epochs_pred, ttv_b, color=color)
axc.plot(c_epochs_pred, ttv_c, color=color)
axd.plot(d_epochs_pred, ttv_d, color=color)
axd.legend()
# ### Essayons de changer la masse de la planète d pour reproduire les observations !
# changer seulement la valeur de la masse de la planète d (plus bas)
masses_planetes = dict() # unités de masses terrestres
masses_planetes["b"] = 0.16833438481969000
masses_planetes["c"] = 5.8881981521201800
masses_planetes["d"] = 2.22178661763819
masses_planetes["e"] = None
# Reproduire le graphique avec le modèle.
fig, (axb, axc, axd), (fitb, fitc, fitd) = ut.plot_observed_ttvs(
b_times,
b_epochs,
b_unc,
c_times,
c_epochs,
c_unc,
d_times,
d_epochs,
np.array([d_unc_low, d_unc_upp]),
)
axd.scatter(
[quelle_epoch],
[(quel_t0 - fitd(quelle_epoch)) * 60 * 24],
color="r",
marker="o",
label="Mon point",
)
# créer le modèle (prédiction pour ces masses)
# créer plusieurs masses pour planète d
### ICI:
masses_d = np.linspace(0.8, 5.5, 5) ## à vous de jouer !!
# couleurs
cmap = mpl.cm.viridis
for i in range(len(masses_d)):
masses_planetes["d"] = masses_d[i]
(b_epochs_pred, c_epochs_pred, d_epochs_pred), (ttv_b, ttv_c, ttv_d) = ut.calc_ttvs(
(fitb, fitc, fitd), npla=3, mass_in_earth_masses=masses_planetes
)
color = cmap((i + 1) / len(masses_d))
axb.plot(b_epochs_pred, ttv_b, color=color)
axc.plot(c_epochs_pred, ttv_c, color=color)
axd.plot(d_epochs_pred, ttv_d, color=color)
axd.legend()
# ### Maintenant, un essai avec 4 planètes !
masses_planetes = dict() # unités de masses terrestres
masses_planetes["b"] = 0.07935407520189360
masses_planetes["c"] = 2.49385250008207
masses_planetes["d"] = 2.698468008325090
masses_planetes["e"] = 0.8
npla = 4
# Reproduire le graphique avec le modèle.
fig, (axb, axc, axd), (fitb, fitc, fitd) = ut.plot_observed_ttvs(
b_times,
b_epochs,
b_unc,
c_times,
c_epochs,
c_unc,
d_times,
d_epochs,
np.array([d_unc_low, d_unc_upp]),
)
axd.scatter(
[quelle_epoch],
[(quel_t0 - fitd(quelle_epoch)) * 60 * 24],
color="r",
marker="o",
label="Mon point",
)
# créer le modèle (prédiction pour ces masses)
(b_epochs_pred, c_epochs_pred, d_epochs_pred), (ttv_b, ttv_c, ttv_d) = ut.calc_ttvs(
(fitb, fitc, fitd), npla=npla, mass_in_earth_masses=masses_planetes
)
color = "C4"
axb.plot(b_epochs_pred, ttv_b, color=color)
axc.plot(c_epochs_pred, ttv_c, color=color)
axd.plot(d_epochs_pred, ttv_d, color=color)
axd.legend()
# ### On essaie de changer la masse de la planète e !
masses_planetes = dict() # unités de masses terrestres
masses_planetes["b"] = 0.07935407520189360
masses_planetes["c"] = 2.49385250008207
masses_planetes["d"] = 2.698468008325090
masses_planetes["e"] = 0.8
npla = 4
# Reproduire le graphique avec le modèle.
fig, (axb, axc, axd), (fitb, fitc, fitd) = ut.plot_observed_ttvs(
b_times,
b_epochs,
b_unc,
c_times,
c_epochs,
c_unc,
d_times,
d_epochs,
np.array([d_unc_low, d_unc_upp]),
)
axd.scatter(
[quelle_epoch],
[(quel_t0 - fitd(quelle_epoch)) * 60 * 24],
color="r",
marker="o",
label="Mon point",
)
# créer le modèle (prédiction pour ces masses)
# créer plusieurs masses pour planète e
### ICI:
masses_e = np.linspace(0.8, 1.5, 5) ## à vous de jouer !!
# couleurs
cmap = mpl.cm.viridis
for i in range(len(masses_e)):
masses_planetes["e"] = masses_e[i]
(b_epochs_pred, c_epochs_pred, d_epochs_pred), (ttv_b, ttv_c, ttv_d) = ut.calc_ttvs(
(fitb, fitc, fitd), npla=npla, mass_in_earth_masses=masses_planetes
)
color = cmap((i + 1) / len(masses_e))
axb.plot(b_epochs_pred, ttv_b, color=color)
axc.plot(c_epochs_pred, ttv_c, color=color)
axd.plot(d_epochs_pred, ttv_d, color=color)
axd.legend()
|
from scipy import stats
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from pylab import *
pd.set_option("display.max_columns", 100)
import warnings
warnings.filterwarnings("ignore")
dx = 0.01
X = np.arange(-2, 2, dx) # 400 value
Y = exp(-(X**2))
# Normalize the data to a proper PDF
Y /= (dx * Y).sum()
# Compute the CDF and return the probability value
CY = np.cumsum(Y * dx)
# Plot the probability density function(blue) and the cumulative density function (red dotted)
plot(X, Y)
plot(X, CY, "r--")
show()
## cdf function for x=0 gives us back y=0.5
print(stats.norm.cdf(np.array([1, -1.0, 0, 1, 3, 4, -2, 6])))
np.random.seed(282629734) # set-up the seed
# Generate 1000 Student’s T continuous random variables.
x = stats.t.rvs(10, size=1000)
print(x.min()) # equivalent to np.min(x)
print(x.max()) # equivalent to np.max(x)
print(x.mean()) # equivalent to np.mean(x)
print(x.var()) # equivalent to np.var(x))
stats.describe(x)
x = np.arange(0, 3 * np.pi, 0.1) # numpy array with 95 values
y = -x
plt.plot(x, y)
plt.show()
y = -(x**2)
plt.plot(x, y)
plt.show()
y_sin = np.sin(x)
y_cos = np.cos(x)
plt.plot(x, y_sin)
plt.plot(x, y_cos)
plt.xlabel("x axis label")
plt.ylabel("y axis label")
plt.title("Sine and Cosine")
plt.legend(["Sine", "Cosine"])
plt.show()
y_sin = np.sin(x)
y_cos = np.cos(x)
plt.subplot(2, 1, 1)
plt.plot(x, y_sin)
plt.title("Sine")
plt.subplot(2, 1, 2)
plt.plot(x, y_cos)
plt.title("Cosine")
plt.show()
df = pd.read_csv("../input/data-for-datavis/insurance.csv")
df
df.groupby(["smoker"]).size()
plt.figure(figsize=(12, 6))
sns.swarmplot(x="sex", y="charges", data=df)
plt.show()
sns.boxplot(data=df.loc[:, ["age", "bmi"]])
plt.show()
sns.boxplot(data=df.loc[df["sex"] == "female", ["age", "bmi"]])
df.head()
sns.distplot(df.age) # Histogram
plt.show()
sns.distplot(df.bmi) # Bell shape
plt.show()
sns.distplot(df.charges) # Histogram
plt.show()
print("chargese mean: ", df.charges.mean())
print("chargese median: ", df.charges.median())
tips_data = sns.load_dataset("tips")
tips_data
tips_data.describe()
# The average that people paid a total bill is 19.78 $.
# does the size of the party impact the si<e of the tip? or the size of the bill
tips_data.total_bill.median() # 50%
sns.set_style("darkgrid")
plt.figure(figsize=(10, 6))
sns.distplot(tips_data.total_bill, kde=True).set_title("Histogram of total_bill")
plt.show() # unimodal and right skewed shape
sns.distplot(tips_data.tip)
plt.show()
sns.distplot(tips_data["total_bill"], kde=False)
sns.distplot(tips_data["tip"], kde=False).set_title(
"Histogram of Both Tip Size and Total Bill"
)
plt.show()
sns.boxplot(tips_data["total_bill"])
plt.show()
sns.boxplot(tips_data["tip"]).set_title("Box plot of the Tip")
plt.show()
sns.boxplot(tips_data["total_bill"])
sns.boxplot(tips_data["tip"]).set_title("Box plot of the Total Bill and Tips")
plt.show()
sns.boxplot(x=tips_data["tip"], y=tips_data["smoker"]) # y is the group by
plt.show()
sns.boxplot(x=tips_data["tip"], y=tips_data["time"])
g = sns.FacetGrid(tips_data, row="time")
g = g.map(plt.hist, "tip")
plt.show()
# Create a boxplot and histogram of the tips grouped by the day
sns.boxplot(x=tips_data["tip"], y=tips_data["day"])
g = sns.FacetGrid(tips_data, row="day") # groupby: Day
g = g.map(plt.hist, "tip")
plt.show()
# # Univariate data analyses - NHANES case study
da = pd.read_csv("../input/nhanes-2015-2016/NHANES.csv")
da # the focuss only on 'DMDEDUC2' column
# # ---------------------------------
# # Relabel education variable to have informative character labels.
# The table down shows the data file have DMDEDUC=4, which indicates that the person has completed some college,
# but has not graduated with a four-year degree.
da.DMDEDUC2.value_counts() # the "frequency distribution" of the variable.
da.DMDEDUC2.isnull().sum()
# Relabel the variable to have informative character labels
da["DMDEDUC2x"] = da.DMDEDUC2.replace(
{
1: "<9",
2: "9-11",
3: "HS/GED",
4: "Some college/AA",
5: "College",
7: "Refused",
9: "Don't know",
}
)
da.DMDEDUC2x.value_counts()
da["RIAGENDRx"] = da.RIAGENDR.replace({1: "Male", 2: "Female"})
# It is more relevant to consider the proportion of the sample with each of the possible category values,
# rather than the number of people in each category
x = da.DMDEDUC2x.value_counts()
x / x.sum()
# Explicitly drop the missing cases using the dropna method before generating the summaries.
da.BMXWT.dropna().describe()
da.BMXWT.dropna()
x = (
da.BMXWT.dropna()
) # Extract all non-missing values of BMXWT into a variable called 'x'
print("mean:", x.mean()) # Pandas method
print(np.mean(x)) # Numpy function
print(x.median())
print(np.percentile(x, 50)) # 50th percentile, same as the median
print(np.percentile(x, 75)) # 75th percentile
print(x.quantile(0.75)) # Pandas method for quantiles, equivalent to 75th percentile
# A person is considered to have pre-hypertension when their systolic blood pressure(BPXSY2)is between 120
# and 139, or their diastolic blood pressure(BPXDI2) is between 80 and 89.
np.mean((da.BPXSY1 >= 120) & (da.BPXSY2 <= 139))
np.mean((da.BPXSY1 >= 120) & (da.BPXSY2 <= 139))
# the proportion of NHANES subjects who are pre-hypertensive based on either systolic or diastolic blood pressure.
a = (da.BPXSY1 >= 120) & (da.BPXSY2 <= 139)
b = (da.BPXDI1 >= 80) & (da.BPXDI2 <= 89)
print(np.mean(a | b))
# We can calculate the extent to which white coat anxiety is present in the NHANES data by looking a the mean
# difference between the first two systolic or diastolic blood pressure measurements.
print(np.mean(da.BPXSY1 - da.BPXSY2))
print(np.mean(da.BPXDI1 - da.BPXDI2))
# the body weight 'kg'shown as a histogram
sns.distplot(da.BMXWT.dropna())
sns.distplot(da.BPXSY1.dropna())
bp = sns.boxplot(data=da.loc[:, ["BPXSY1", "BPXSY2", "BPXDI1", "BPXDI2"]])
_ = bp.set_ylabel("Blood pressure in mm/Hg")
da["agegrp"] = pd.cut(
da.RIDAGEYR, [18, 30, 40, 50, 60, 70, 80]
) # Create age strata based on these cut points
plt.figure(figsize=(12, 5))
sns.boxplot(x="agegrp", y="BPXSY1", data=da)
# Make boxplot of BPXSY1 stratified by age group
plt.figure(figsize=(12, 5))
sns.boxplot(x="agegrp", y="BPXSY1", hue="RIAGENDRx", data=da)
# #### We see from the figure below that within each gender, older people tend to have higher blood pressure than younger people. However within an age band, the relationship between gender and systolic blood pressure is somewhat complex -- in younger people, men have substantially higher blood pressures than women of the same age. However for people older than 50, this relationship becomes much weaker, and among people older than 70 it appears to reverse. It is also notable that the variation of these distributions, reflected in the height of each box in the boxplot, increases with age.
plt.figure(figsize=(12, 5))
sns.boxplot(x="RIAGENDRx", y="BPXSY1", hue="agegrp", data=da)
# #### Stratification can also be useful when working with categorical variables. Below we look at the frequency distribution of educational attainment ("DMDEDUC2") within 10-year age bands. While "some college" is the most common response in all age bands, up to around age 60 the second most common response is "college" (i.e. the person graduated from college with a four-year degree). However for people over 50, there are as many or more people with only high school or general equivalency diplomas (HS/GED) than there are college graduates.
da.groupby("agegrp")["DMDEDUC2x"].value_counts()
# To stratify jointly by age and gender to explore how educational attainment varies by both of these factors simultaneously
dx = da.loc[
~da.DMDEDUC2x.isin(["Don't know", "Missing"]), :
] # Eliminate rare/missing values
dx = dx.groupby(["agegrp", "RIAGENDRx"])["DMDEDUC2x"]
dx = dx.value_counts()
dx = dx.unstack() # Restructure the results from 'long' to 'wide'
dx = dx.apply(
lambda x: x / x.sum(), axis=1
) # Normalize within each stratum to get proportions
print(dx.to_string(float_format="%.3f")) # Limit display to 3 decimal places
# # ------------------------------
# # Relabel Marital status variable to have informative character labels.
da["DMDMARTLx"] = da.DMDMARTL.replace(
{
1: "Married",
2: "Widowed",
3: "Divorced",
4: "Separated",
5: "Never married",
6: "Living with partner",
77: "Refused",
}
)
da.DMDMARTLx.value_counts() # the distribution of marital status between women and men, for people of all ages.
# the distribution of marital status for Men in all ages.
da.loc[da["RIAGENDRx"] == "Male"].DMDMARTLx.value_counts()
da.loc[da["RIAGENDRx"] == "Female"].RIDAGEYR.describe() # female ages
da.loc[da["RIAGENDRx"] == "Female"].DMDMARTLx.value_counts().sum() # total women
# the distribution of marital status for women in all ages.
da.loc[da["RIAGENDRx"] == "Female"].DMDMARTLx.value_counts()
# The widowed's females are more than men which means women lives more than the men.
da_Fem = da.loc[da["RIAGENDRx"] == "Female"]
# the female marital status between 30-40 years old
da_ladies = da.loc[(da["RIDAGEYR"] >= 30) & (da["RIDAGEYR"] <= 40)]
da_ladies.loc[da_ladies["RIAGENDRx"] == "Female"].DMDMARTLx.value_counts()
da_young = da.loc[(da["RIDAGEYR"] < 30)]
da_young.loc[da_young["RIAGENDRx"] == "Female"].DMDMARTLx.value_counts()
da_Mrs = da.loc[(da["RIDAGEYR"] > 40)]
da_Mrs.loc[da_Mrs["RIAGENDRx"] == "Female"].DMDMARTLx.value_counts()
da_Mrs.loc[da_Mrs["RIAGENDRx"] == "Female"].RIAGENDRx.value_counts()
da.loc[da["RIAGENDRx"] == "Male"].RIDAGEYR.describe() # Male ages destibution
da_Men = da.loc[(da["RIDAGEYR"] >= 30) & (da["RIDAGEYR"] <= 40)]
da_Men.loc[da_Men["RIAGENDRx"] == "Male"].DMDMARTLx.value_counts()
da_youngM = da.loc[(da["RIDAGEYR"] < 30)]
da_youngM.loc[da_youngM["RIAGENDRx"] == "Female"].DMDMARTLx.value_counts()
da_Senior = da.loc[(da["RIDAGEYR"] > 40)]
da_Senior.loc[da_Senior["RIAGENDRx"] == "Female"].DMDMARTLx.value_counts()
da_Mrs.loc[da_Mrs["RIAGENDRx"] == "Female"].RIAGENDRx.value_counts()
da_Fem["Fagegrp"] = pd.cut(da_Fem.RIDAGEYR, [18, 30, 40, 50, 60, 70, 80])
da_Men["Fagegrp"] = pd.cut(da_Men.RIDAGEYR, [18, 30, 40, 50, 60, 70, 80])
# The distrubution propotion of the maritual status for each group age
Fem_prop = da_Fem.groupby("Fagegrp")["DMDMARTLx"].value_counts()
Fem_prop / Fem_prop.sum()
# # ------------------------------------
# # multivariate data select with conditions
df = pd.read_csv("../input/nhanes-2015-2016/NHANES.csv")
df
columns = df.columns
columns
keep = [column for column in columns if "BMX" in column]
keep
df[keep]
# 2 conditions
waist_median = pd.Series.median(df["BMXWAIST"])
condition1 = df["BMXWAIST"] > waist_median
condition2 = df["BMXLEG"] < 32
df[condition1 & condition2]
df.loc[condition1 & condition2, :]
df_small = df.head(5)
df_small
df_small.index = ["a", "b", "c", "d", "f"] # change the index
df_small.loc[["a", "c"], :]
df_small["BMXBMI"]
df_small["BMXBMI"] = range(5)
df_small.BMXBMI
df_small[df_small.BMXBMI > 2].index
df_small.loc[df_small.BMXBMI > 2, "BMXBMI"] = [10] * 2
df_small
df_small.loc[df_small["BMXBMI"] == 10]
r = 1
mean = [15, 5]
cov = [[1, r], [r, 1]]
x, y = x, y = np.random.multivariate_normal(mean, cov, 400).T
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.hist(x=x, bins=15)
plt.title("X")
plt.subplot(1, 2, 2)
plt.hist(x=y, bins=15)
plt.title("y")
plt.show()
plt.figure(figsize=(10, 10))
plt.subplot(2, 2, 2)
plt.scatter(x=x, y=y)
plt.title("Joint distribution of X and Y")
plt.subplot(2, 2, 4)
plt.hist(x=x, bins=15)
plt.title("Marginal distribution of X")
plt.subplot(2, 2, 1)
plt.hist(x=y, orientation="horizontal", bins=15)
plt.title("Marginal distrubution of Y")
plt.show()
# # ----------------------------------------
# # Unit testign
df = pd.read_csv("../input/nhanes-2015-2016/NHANES.csv")
df.index = range(1, df.shape[0] + 1)
df.head()
# # Find the mean of the first 100 rows of'BPXSY1' when 'RIDAGEYR' > 60
# one way, the age greater than 60 and only first 100 line of blood pressure
pd.Series.mean(df[df.RIDAGEYR > 60].loc[0:100, "BPXSY1"])
# Note that the result is not correct! we should use iloc instead of loc
# ## The difference between loc & iloc
df[df.RIDAGEYR > 60].loc[0:5, :]
df[df.RIDAGEYR > 60].iloc[0:5, :]
# It is better to use iloc when choosing specific raws or column
# Applying the correct method to the original question about BPXSY1
print(pd.Series.mean(df[df.RIDAGEYR > 60].iloc[0:100, 16]))
# Another way to reference the BPXSY1 variable
print(pd.Series.mean(df[df.RIDAGEYR > 60].iloc[0:100, df.columns.get_loc("BPXSY1")]))
# # ----------------------------------------
# # Analysis of Multivariate data
# > Below we make a scatterplot of arm length against leg length. This arm length (BMXARML) is plotted on the vertical axis and leg length (BMXLEG) is plotted on the horizontal axis). We see a positive dependence between the two measures -- people with longer arms tend to have longer legs, and vice-versa. However it is far from a perfect relationship.
da = pd.read_csv("../input/nhanes-2015-2016/NHANES.csv")
sns.regplot(
x="BMXLEG", y="BMXARML", data=da, fit_reg=False, scatter_kws={"alpha": 0.2}
).set_title("The arm length vs leg length")
# We see see a positive dependence between the two measures x,y But it is far from a perfect relationship
# people with longer arms tend to have longer legs, and vice-versa. use"alpha" to make the points semi-transparent.
# >Another way to avoid overplotting is to make a plot of the "density" of points.
# will use jointplot which shows
# Pearson correlation coefficient that ranges from (-1 to 1), 0.62 is considered a strong positive dependence.
sns.jointplot(x="BMXLEG", y="BMXARML", kind="kde", data=da).annotate(stats.pearsonr)
# that systolic and diastolic blood pressure (the max and min blood pressure between two consecutive heart beats)
# more weakly correlated than arm and leg length,with a correlation coefficient of 0.32 which indicates that some
# people have unusually high systolic blood pressure but have average diastolic blood pressure, and vice-versa.
sns.jointplot(x="BPXSY1", y="BPXDI1", kind="kde", data=da).annotate(stats.pearsonr)
# Next we look at two repeated measures of systolic blood pressure, taken a few minutes apart on the
# same person. These values are very highly correlated, with a correlation coefficient of around 0.96.
jp = sns.jointplot(x="BPXSY1", y="BPXSY2", kind="kde", data=da).annotate(stats.pearsonr)
# >It is usually productive to explore the data more deeply by stratifying on relevant factors such as gender
# again the relation between leg & arm length, stratifying first by gender, then by gender and ethnicity.
da["RIAGENDRx"] = da.RIAGENDR.replace({1: "Male", 2: "Female"})
sns.FacetGrid(da, col="RIAGENDRx").map(
plt.scatter, "BMXLEG", "BMXARML", alpha=0.4
).add_legend()
# The plot indicates that men tend to have somewhat longer arms and legs than women, beside that the
# correlation between arm length and leg length appears to be somewhat weaker in women than in men.
# >the correlation between arm length and leg length appears to be somewhat weaker in women than in men.
# calculating the correlation coefficient separately within each gender. The 'corr' method of a dataframe
# calculates the correlation coefficients for every pair of variables in the dataframe, this method returns
# a "correlation matrix" that is also symmetric around the diagonal (1),
print(da.loc[da.RIAGENDRx == "Female", ["BMXLEG", "BMXARML"]].dropna().corr())
print(da.loc[da.RIAGENDRx == "Male", ["BMXLEG", "BMXARML"]].dropna().corr())
# the correlation between leg length and arm length in men is 0.50, while in women the correlation is 0.43.
# >We look to stratifying the data by two factors the gender and ethnicity 2x5 = 10 strata. the correlation in group 5 between arm length and leg length is stronger, especially for men. This is not surprising, as greater heterogeneity can allow correlations to emerge that are indiscernible in more homogeneous data.
# map(a function, a list to iterate through)
_ = (
sns.FacetGrid(da, col="RIDRETH1", row="RIAGENDRx")
.map(plt.scatter, "BMXLEG", "BMXARML", alpha=0.5)
.add_legend()
)
# since there are 2 gender strata and 5 ethnicity strata the result would be 2x5 = 10 strata.
# the relationship between arm length and leg length within genders is roughly similar across the ethnic groups.
# # Categorical Bivariate data using "Crosstab"
da.DMDEDUC2.value_counts()
da["DMDEDUC2x"] = da.DMDEDUC2.replace(
{
1: "<9",
2: "9-11",
3: "HS/GED",
4: "Some college/AA",
5: "College",
7: "Refused",
9: "Don't know",
}
)
da["DMDMARTLx"] = da.DMDMARTL.replace(
{
1: "Married",
2: "Widowed",
3: "Divorced",
4: "Separated",
5: "Never married",
6: "Living w/partner",
77: "Refused",
}
)
db = da.loc[(da.DMDEDUC2x != "Don't know") & (da.DMDMARTLx != "Refused"), :]
# create a new data set that omits people who responded "Don't know" or who refused to answer these questions.
x = pd.crosstab(db.DMDEDUC2x, da.DMDMARTLx) # count the frequency (index, columns)
x
# pd.crosstab(index=new_index(rows),columns=your_pivoted_columns,values=your_new_values,aggfunc=aggregationFuntion)
res_names = ["pc", "ll", "pc", "ts", "fc"]
purchase_type = ["food", "food", "food", "drink", "drink"]
price = [12, 25, 32, 10, 15]
pd.crosstab(
index=[res_names], columns=[purchase_type]
) # counted the frequency at which the intersection happened
pd.crosstab(index=[res_names], columns=[purchase_type], values=price, aggfunc=sum)
# Normalize within the columns is calculate the proportion for each education level(row)
x.apply(lambda z: z / z.sum(), axis=1)
# Normalize within the rows is calculate the proportion for each maritial status(column)
x.apply(
lambda z: z / z.sum(), axis=0
) # We see here that the plurality of divorced people have some
# college but have not graduated from college, while the plurality of married people are college graduates.
# look at the proportion of people in each marital status category, for each combination of the gender
# and education variables. The following line does these steps, reading the code from left to right:
# 1 Group the data by every combination of gender, education, and marital status
# 2 Count the number of people in each cell using the 'size' method
# 3 Pivot the marital status results into the columns (using unstack)
# 4 Fill any empty cells with 0
# 5 Normalize the data by row
db.groupby(["RIAGENDRx", "DMDEDUC2x", "DMDMARTLx"]).size().unstack().fillna(0).apply(
lambda x: x / x.sum(), axis=1
)
# The factor behind the greater number of women who are divorced & widowed could b that women live longer than men.
# To minimize the impact of this factor,we are going to focus on the marriage rate, which is
# a widely-studied variable in social science research.
# There are a number of intriguing results here. For example, the marriage rate seems to drop as college-educated
# people get older (e.g. 71% of college educated women between 49 and 50 are married, but only 65% of college
# educated women between 50 and 59 are married, an even larger drop occurs for men). However in people with
# a HS/GED level of education, the marriage rate is higher for older people (although it is lower compared to
# the college educated sample). There are a number of possible explanations for this, for example, that remarriage
# after divorce is less common among college graduates.
dx = db.loc[(db.RIDAGEYR >= 40) & (db.RIDAGEYR < 50)]
a = (
dx.groupby(["RIAGENDRx", "DMDEDUC2x", "DMDMARTLx"])
.size()
.unstack()
.fillna(0)
.apply(lambda x: x / x.sum(), axis=1)
)
dx = db.loc[(db.RIDAGEYR >= 50) & (db.RIDAGEYR < 60)]
b = (
dx.groupby(["RIAGENDRx", "DMDEDUC2x", "DMDMARTLx"])
.size()
.unstack()
.fillna(0)
.apply(lambda x: x / x.sum(), axis=1)
)
print(a.loc[:, ["Married"]].unstack())
print("")
print(b.loc[:, ["Married"]].unstack())
# > We conclude this section by noting that marital status is associated with many factors, including gender and eduational status, but also varies strongly by age and birth cohort. For example, it is unlikely for young people to be widowed, and it is less likely for older people to be "never married", since a person can transition from "never married" into one of the other categories, but can never move back. Below we will consider the role of age in more detail, and later in the course we will revisit these questions using more sophisticated analytic methods that can account for all of these factors simultaneously. However, since NHANES is a cross-sectional study, there are certain important questions that it cannot be used to answer. For example, while we know each person's current marital status, we do not know their full marital history (e.g. how many times and at what ages they were married or divorced).
#
# Another situation that commonly arises when we analyze bivariate data consisting of quantitative & categorical,
# (marital status and age)notice that widowed people tend to be older, and never-married people tend to be younger.
plt.figure(figsize=(12, 4))
_ = sns.boxplot(db.DMDMARTLx, db.RIDAGEYR)
# When we have enough data,"violinplot" gives a bit more insight into the shapes of the distributions than boxplot
plt.figure(figsize=(12, 4))
a = sns.violinplot(da.DMDMARTLx, da.RIDAGEYR)
# We can see quite clearly that the distributions with low mean (living with partner, never married) are strongly
# right-skewed, while the distribution with high mean (widowed) is strongly left-skewed.
# ## Question 1:
# >Make a scatterplot showing the relationship between the first and second measurements of diastolic blood pressure (BPXDI1 and BPXDI2). Also obtain the 4x4 matrix of correlation coefficients among the first two systolic and the first two diastolic blood pressure measures.
_ = sns.regplot(
x="BPXDI1", y="BPXDI2", data=da, fit_reg=False, scatter_kws={"alpha": 0.2}
).set_title("The first and second measurements of diastolic blood pressure")
print(da.loc[:, ["BPXDI1", "BPXDI2"]].dropna().corr())
print(da.loc[:, ["BPXSY1", "BPXSY2"]].dropna().corr())
# NOTE the correlation for the systolic is larger than the diastolic
# ## Question 2:
# >Use "violin plots" to compare the distributions of ages within groups defined by gender and educational attainment.
plt.figure(figsize=(14, 6))
sns.violinplot(x="DMDEDUC2x", y="RIDAGEYR", hue="RIAGENDRx", data=da)
# NOTE that people with <9 years education have age average around 60
# ## Question 3:
# >Use violin plots to compare the distributions of BMI within a series of 10-year age bands. Also stratify these plots by gender
da["agegrp"] = pd.cut(da.RIDAGEYR, [18, 30, 40, 50, 60, 70, 80])
plt.figure(figsize=(14, 8))
sns.violinplot(x="agegrp", y="BMXBMI", hue="RIAGENDRx", data=da)
# NOTE that almost all ages has the same BMI median and they are all right-skewed
# ## Question 4:
# >Construct a frequency table for the joint distribution of ethnicity groups (RIDRETH1) and health-insurance status (HIQ210). Normalize the results so that the values within each ethnic group are proportions that sum to 1.
x = pd.crosstab(da.RIDRETH1, da.HIQ210)
x.apply(lambda z: z / z.sum(), axis=1)
# NOTE: The ethnic group 3 has the highest rate of being uninsured in the past year
# ### --------------------------------------------------------------------------
# ### --------------------------------------------------------------------------
# # Sampling from Biased Population
# hypothetical simulation for non-normal and normal sampling distribution
mean_uofm = 155 # mean weight of the student
sd_uofm = 5 # std
mean_gym = 185 # mean
sd_gym = 5
gymperc = 0.3 # 30 percent of ppl visiting GYM
totalPopSize = 40000
# Create two subgroups, [in order to sample from the normal distribution we use normal function ]
uofm_students = np.random.normal(mean_uofm, sd_uofm, int(totalPopSize * (1 - gymperc)))
students_at_gym = np.random.normal(mean_gym, sd_gym, int(totalPopSize * (gymperc)))
# Create the population from the subgroups
population = np.append(uofm_students, students_at_gym)
# Set up the figure for plotting
plt.figure(figsize=(10, 12))
# Plot the UofM students only
plt.subplot(3, 1, 1)
sns.distplot(uofm_students)
plt.title("UofM Students Only")
plt.xlim([140, 200])
# Plot the Gym Goers only
plt.subplot(3, 1, 2)
sns.distplot(students_at_gym)
plt.title("Gym Goers Only")
plt.xlim([140, 200])
# Plot both groups together
plt.subplot(3, 1, 3)
sns.distplot(population)
plt.title("Full Population of UofM Students")
plt.axvline(x=np.mean(population), color="r")
plt.xlim([140, 200])
plt.show()
# Simulation parameters
numberSamps = 5000
sampSize = 50
# Get the sampling distribution of the mean from only the gym
mean_distribution = np.empty(numberSamps)
for i in range(numberSamps):
random_students = np.random.choice(
population, sampSize
) # pick 50 random students from the population
mean_distribution[i] = np.mean(random_students) # assign in mean dis vector
# Plot the population and the biased sampling distribution
plt.figure(figsize=(10, 8))
# Plotting the population again
plt.subplot(2, 1, 1)
sns.distplot(population)
plt.title("Full Population of UofM Students")
plt.axvline(x=np.mean(population))
plt.xlim([140, 200])
# Plotting the sampling distribution
plt.subplot(2, 1, 2)
sns.distplot(mean_distribution)
plt.title("Sampling Distribution of the Mean Weight of All UofM Students")
plt.axvline(x=np.mean(population))
plt.axvline(x=np.mean(mean_distribution), color="black")
plt.xlim([140, 200])
plt.show()
# ### What happens if I only go to the gym to get the weight of individuals, and I don't sample randomly from all students at the University of Michigan?
#
# Simulation parameters
numberSamps = 5000
sampSize = 3
# Get the sampling distribution of the mean from only the gym
mean_distribution = np.empty(numberSamps)
for i in range(numberSamps):
random_students = np.random.choice(students_at_gym, sampSize)
mean_distribution[i] = np.mean(random_students)
# Plot the population and the biased sampling distribution
plt.figure(figsize=(10, 8))
# Plotting the population again
plt.subplot(2, 1, 1)
sns.distplot(population)
plt.title("Full Population of UofM Students")
plt.axvline(x=np.mean(population))
plt.xlim([140, 200])
# Plotting the sampling distribution
plt.subplot(2, 1, 2)
sns.distplot(mean_distribution)
plt.title("Sampling Distribution of the Mean Weight of Gym Goers")
plt.axvline(x=np.mean(population))
plt.axvline(x=np.mean(students_at_gym), color="black")
plt.xlim([140, 200])
plt.show()
# we can replicate the output of a random number generator simply by knowing which seed was used
import random
random.seed(1234)
random.random()
# ## Empirical Rule and Distribution
from IPython.display import Image
path = "../input/three-sigma-rule/three_sigma_rule.png"
Image(filename=path, width=400, height=400)
# the 68-95-99.7 rule
random.seed(1738)
mu = 7 # the average college student within the population will have about 7 hours of sleep a night
sigma = 1.7 # the average distance fro mthis mean is about 1.7
Observations = [
random.normalvariate(mu, sigma) for _ in range(100000)
] # create bell curve
sns.distplot(Observations)
plt.axvline(np.mean(Observations) + np.std(Observations), color="g")
plt.axvline(np.mean(Observations) - np.std(Observations), color="g")
plt.axvline(np.mean(Observations) + (np.std(Observations) * 2), color="y")
plt.axvline(np.mean(Observations) - (np.std(Observations) * 2), color="y")
pd.Series(Observations).describe() # summary statistics
SampleA = random.sample(
Observations, 100
) # pulling 100 values from our population with equal propability
SampleB = random.sample(Observations, 100)
SampleC = random.sample(Observations, 100)
fig, ax = plt.subplots() # create figure and lay in it subplota
sns.distplot(SampleA, ax=ax)
sns.distplot(SampleB, ax=ax)
sns.distplot(SampleC, ax=ax)
# the distributions are do vary even they follow the same trend, mean around 7
from statsmodels.distributions.empirical_distribution import ECDF
ecdf = ECDF(Observations) # calculate the empirical cumulative density function
plt.plot(ecdf.x, ecdf.y)
plt.axhline(y=0.025, color="y", linestyle="-")
plt.axvline(
x=np.mean(Observations) - (2 * np.std(Observations)), color="y", linestyle="-"
)
plt.axhline(y=0.975, color="y", linestyle="-")
plt.axvline(
x=np.mean(Observations) + (2 * np.std(Observations)), color="y", linestyle="-"
)
m = 100 # Subsample size
sbp_diff = [] # Storage for our subsample mean differences
for i in range(1000):
dx = da.sample(2 * m) # We need two subsamples of size m
dx1 = dx.iloc[0:m, :] # First subsample
dx2 = dx.iloc[m:, :] # Second subsample
sbp_diff.append(
dx1.BPXSY1.mean() - dx2.BPXSY1.mean()
) # The difference of mean BPXSY1 values
sns.distplot(sbp_diff)
pd.Series(sbp_diff).describe()
m = 400 # Change the sample size, everything else below is unchanged from the cells above
sbp_diff = []
for i in range(1000):
dx = da.sample(2 * m)
dx1 = dx.iloc[0:m, :]
dx2 = dx.iloc[m:, :]
sbp_diff.append(dx1.BPXSY1.mean() - dx2.BPXSY1.mean())
sns.distplot(sbp_diff)
pd.Series(sbp_diff).describe()
# Question: generate 3 normal random variables with mean 100 and standard deviation 1
np.random.seed(123)
mu, sigma = 100, 1
sample = np.random.normal(mu, sigma, 1000)
sample
sns.distplot(sample)
# Question: Generating random samples from a population lies at the heart of statistics.
# In the code block below, draw a sample of size 10 from a set containing the integers 1 through 100.
np.random.seed(123)
population = np.arange(1, 101) # create population list of number 1-100
Sample = np.random.choice(population, 10)
Sample
|
import numpy as np
import pandas as pd
import tensorflow as tf
import os
import io
import nltk
import json
import string
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.layers import (
Input,
Embedding,
LSTM,
Dense,
GlobalAveragePooling1D,
Flatten,
Dropout,
GRU,
)
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.layers import Conv1D, MaxPool1D
data = pd.read_json("/kaggle/input/chatbots-intent-recognition-dataset/Intent.json")
data
def json_data(data):
tags = []
patterns = []
responses = {}
for intent in data["intents"]:
responses[intent["intent"]] = intent["responses"]
for lines in intent["text"]:
patterns.append(lines)
tags.append(intent["intent"])
return [tags, patterns, responses]
[tags, patterns, responses] = json_data(data)
df = pd.DataFrame({"patterns": patterns, "tags": tags})
df = df.sample(frac=1)
df.head()
import re
r = re.compile(r"[^\w\s]+")
df["patterns"] = [r.sub("", s) for s in df["patterns"].tolist()]
df.head()
tokenizer = Tokenizer(num_words=1000)
tokenizer.fit_on_texts(df["patterns"])
train = tokenizer.texts_to_sequences(df["patterns"])
features = pad_sequences(train)
le = LabelEncoder()
labels = le.fit_transform(df["tags"])
vocab = len(tokenizer.word_index)
output_len = le.classes_.shape[0]
model = Sequential()
model.add(Input(shape=(features.shape[1])))
model.add(Embedding(vocab + 1, 100))
model.add(
Conv1D(
filters=32,
kernel_size=5,
activation="relu",
kernel_initializer=tf.keras.initializers.GlorotNormal(),
bias_regularizer=tf.keras.regularizers.L2(0.0001),
kernel_regularizer=tf.keras.regularizers.L2(0.0001),
activity_regularizer=tf.keras.regularizers.L2(0.0001),
)
)
model.add(Dropout(0.3))
model.add(LSTM(32, dropout=0.3, return_sequences=True))
model.add(LSTM(16, dropout=0.3, return_sequences=False))
model.add(
Dense(128, activation="relu", activity_regularizer=tf.keras.regularizers.L2(0.0001))
)
model.add(Dropout(0.6))
model.add(
Dense(
output_len,
activation="softmax",
activity_regularizer=tf.keras.regularizers.L2(0.0001),
)
)
# importing unsupervised learning algorithm
model.layers
glove_dir = "glove.6B.100d.txt"
embeddings_index = {}
file_ = open(glove_dir)
for line in file_:
arr = line.split()
single_word = arr[0]
w = np.asarray(arr[1:], dtype="float32")
embeddings_index[single_word] = w
file_.close()
print("Found %s word vectors." % len(embeddings_index))
max_words = vocab + 1
word_index = tokenizer.word_index
embedding_matrix = np.zeros((max_words, 100)).astype(object)
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False
model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
from keras.callbacks import TensorBoard, EarlyStopping
earlyStopping = EarlyStopping(
monitor="loss", patience=400, mode="min", restore_best_weights=True
)
history_training = model.fit(
features, labels, epochs=1000, batch_size=64, callbacks=[earlyStopping]
)
import random
def generate_answer(query):
texts = []
pred_input = query
pred_input = [
letters.lower() for letters in pred_input if letters not in string.punctuation
]
pred_input = "".join(pred_input)
texts.append(pred_input)
pred_input = tokenizer.texts_to_sequences(texts)
pred_input = np.array(pred_input).reshape(-1)
pred_input = pad_sequences([pred_input], features.shape[1])
output = model.predict(pred_input)
output = output.argmax()
response_tag = le.inverse_transform([output])[0]
return random.choice(responses[response_tag])
print('Bot: Hello! I am here to chat. if you need to go just say "bye" :)')
person = "anything"
bye = ["bye", "stop", "leave", "goodbye", "good bye", "see you later", "exit", "quit"]
while person not in bye:
print("you:", end=" ")
person = input()
res_tag = generate_answer(person)
print("Bot: " + res_tag)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# importing the classes I need for different models
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
from sklearn.model_selection import train_test_split
y_train = train.label
X_train = train.loc[:, train.columns != "label"]
def submission_output(inp_array, method: str):
output_df = pd.DataFrame(np.arange(1, len(inp_array) + 1), columns=["ImageId"])
output_df["Label"] = inp_array
output_df.to_csv("/kaggle/working/" + method + "_submission")
print(output_df.head())
# random forest
clf = RandomForestClassifier(max_depth=4, random_state=0)
clf.fit(X_train, y_train)
randomf_array = clf.predict(test)
print(randomf_array)
submission_output(randomf_array, "random_forest")
# Logistic Regression
logisticRegr = LogisticRegression()
logisticRegr.fit(X_train, y_train)
logisticr_array = logisticRegr.predict(test)
print(logisticr_array)
submission_output(logisticr_array, "logistic_regresstion")
# #XGBoost
# XGB_model = XGBClassifier()
# XGB_model.fit(X_train, y_train)
# XGB_array = XGB_model.predict(X_test)
# print(XGB_array)
# submission_output(XGB_array, 'XGBoost')
|
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn import svm
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.metrics import confusion_matrix, precision_score, recall_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import precision_recall_curve
from sklearn.preprocessing import OneHotEncoder
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Exploring the dataset
kaggle_train_dataset = pd.read_csv("/kaggle/input/titanic/train.csv")
kaggle_train_dataset.head()
kaggle_train_dataset.info()
kaggle_train_dataset.hist()
kaggle_train_dataset["Parch"].value_counts()
kaggle_train_dataset["SibSp"].value_counts()
kaggle_train_dataset["Sex"].value_counts()
kaggle_train_dataset["Embarked"].value_counts()
# ## Spliting on train and validation data
train_dataset, validation_dataset = train_test_split(
kaggle_train_dataset,
test_size=0.2,
random_state=42,
shuffle=True,
stratify=kaggle_train_dataset["Survived"],
)
print(f"Total of {len(train_dataset)} train samples")
print(
f"In train {sum(train_dataset['Survived'].values) / len(train_dataset)} % survived"
)
print(f"Total of {len(validation_dataset)} in test samples")
print(
f"In test {sum(validation_dataset['Survived'].values) / len(validation_dataset)} % survived"
)
# ## Handle missing values
all_features = ["Age", "Embarked", "Fare", "Pclass", "SibSp", "Sex"]
numerical_features = ["Age", "Fare", "Pclass", "SibSp"]
categorical_values = ["Embarked", "Sex"]
train_dataset_nonna = train_dataset[all_features + ["Survived"]]
imputer = SimpleImputer(missing_values=np.nan, strategy="most_frequent")
train_dataset_nonna[all_features] = imputer.fit_transform(train_dataset[all_features])
categorical_transformer = OneHotEncoder()
categorical_df = pd.DataFrame(
categorical_transformer.fit_transform(
train_dataset_nonna[categorical_values]
).toarray()
)
categories = categorical_transformer.categories_
total_categories = []
for category in categories:
total_categories.extend(category)
categorical_df.columns = total_categories
categorical_df.index = train_dataset_nonna.index
train_dataset_nonna_transformed = train_dataset_nonna[
numerical_features + ["Survived"]
].join(categorical_df)
train_dataset_nonna_transformed.index
train_dataset_nonna_transformed.info()
# ## Handle categorical values
# # Build classifier
features_train = train_dataset_nonna_transformed[
numerical_features + total_categories
].values
labels_train = train_dataset_nonna_transformed["Survived"].values
random_forrest_classifier = RandomForestClassifier(max_depth=3, random_state=42)
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
plt.plot(thresholds, recalls[:-1], "g-", label="Recall")
plt.show()
# ## Evaluation random forrest
pred_labels_train_scores = cross_val_predict(
random_forrest_classifier,
features_train,
labels_train,
cv=3,
method="predict_proba",
)
pred_labels_train_scores_1 = [
pred_labels_train_score[1] for pred_labels_train_score in pred_labels_train_scores
]
precisions, recalls, thresholds = precision_recall_curve(
labels_train, pred_labels_train_scores_1
)
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
random_forrest_classifier.fit(features_train, labels_train)
threshold = 0.5
proba_train = np.asarray(
[proba[1] for proba in random_forrest_classifier.predict_proba(features_train)]
)
labels_train_pred = proba_train >= threshold
precision = precision_score(labels_train_pred, labels_train)
recall = recall_score(labels_train_pred, labels_train)
print(f"Precision is {precision} and recall is {recall}")
confusion_matrix(labels_train, labels_train_pred)
# ## Evaluate model
validation_dataset_nonna = validation_dataset[all_features + ["Survived"]]
validation_dataset_nonna[all_features] = imputer.transform(
validation_dataset[all_features]
)
categorical_df_validation = pd.DataFrame(
categorical_transformer.transform(
validation_dataset_nonna[categorical_values]
).toarray()
)
categories = categorical_transformer.categories_
total_categories = []
for category in categories:
total_categories.extend(category)
categorical_df_validation.columns = total_categories
categorical_df_validation.index = validation_dataset_nonna.index
validation_dataset_nonnatransformed = validation_dataset_nonna[
numerical_features + ["Survived"]
].join(categorical_df_validation)
validation_dataset_nonnatransformed.index
features_validation = validation_dataset_nonnatransformed[
numerical_features + total_categories
].values
labels_validation = validation_dataset_nonnatransformed["Survived"].values
threshold = 0.5
proba_validation = np.asarray(
[proba[1] for proba in random_forrest_classifier.predict_proba(features_validation)]
)
labels_validation_pred = proba_validation >= threshold
precision = precision_score(labels_validation_pred, labels_validation)
recall = recall_score(labels_validation_pred, labels_validation)
print(f"Precision is {precision} and recall is {recall}")
confusion_matrix(labels_validation, labels_validation_pred)
# # Inference on Kaggle's test set
kaggle_test_table = pd.read_csv("/kaggle/input/titanic/test.csv")
test_dataset_nonna = kaggle_test_table
test_dataset_nonna[all_features] = imputer.transform(kaggle_test_table[all_features])
categorical_df_test = pd.DataFrame(
categorical_transformer.transform(test_dataset_nonna[categorical_values]).toarray()
)
categorical_df_test.columns = total_categories
categorical_df_test.index = test_dataset_nonna.index
test_dataset_nonna_transformed = test_dataset_nonna[numerical_features].join(
categorical_df_test
)
test_dataset_nonna_transformed.index
features_test_kaggle = test_dataset_nonna_transformed[
numerical_features + total_categories
].values
predict_classes = random_forrest_classifier.predict(features_test_kaggle)
output = pd.DataFrame(
{"PassengerId": kaggle_test_table.PassengerId, "Survived": predict_classes}
)
output.to_csv("my_submission.csv", index=False)
|
import numpy as np # linear algebra, numerical operations
import pandas as pd # data processing/manipulation, CSV file I/O (e.g. pd.read_csv)
import os
import re
# for data visualisation
import matplotlib.pyplot as plt
import seaborn as sns
# sklearn packages to be imported for machine learning
from sklearn.datasets import fetch_openml
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.datasets import fetch_openml
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# import forest_frenzy data
traindf = pd.read_csv("/kaggle/input/forest-frenzy/train.csv")
testdf = pd.read_csv("/kaggle/input/forest-frenzy/test.csv")
# # Homework Two #
# What are we working with?
traindf.describe
traindf.head()
traindf.tail()
# I check for missing values in the data set
traindf.isnull().sum()
# testdf.isnull().sum()
# look for any for duplicates
traindf.duplicated().sum()
# look at data types
traindf.dtypes
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(
"/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv"
)
df.set_index("CustomerID")
# df = pd.get_dummies(df, columns = ['Gender'], prefix = ['Gender'])
features = df[
[
"Age",
"Annual Income (k$)",
"Gender_Female",
"Gender_Male",
"Spending Score (1-100)",
]
]
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init="k-means++", max_iter=300, n_init=10)
kmeans.fit(features)
wcss.append(kmeans.inertia_)
font_title = {"family": "normal", "weight": "bold", "size": 35}
font_axes = {"family": "normal", "weight": "normal", "size": 28}
plt.plot(range(1, 11), wcss)
plt.title("The Elbow Method", **font_title)
plt.xlabel("Number of clusters", **font_axes)
plt.ylabel("wcss", **font_axes)
plt.show()
kmeans = KMeans(n_clusters=5, init="k-means++", max_iter=300, n_init=10)
df["cluster"] = kmeans.fit_predict(features)
df
sns.scatterplot(
data=df,
x="Annual Income (k$)",
y="Spending Score (1-100)",
hue="cluster",
palette="tab10",
)
|
import cv2
import shutil
import os
import time
import argparse
import glob
import os
import subprocess
import pandas as pd
import unicodedata
def is_english_only(string):
for s in string:
cat = unicodedata.category(s)
if not cat in ["Ll", "Lu", "Nd", "Po", "Pd", "Zs"]:
return False
return True
df = pd.read_parquet("/kaggle/input/diffusiondb-metadata/metadata-large.parquet")
# print(len(df))
df["prompt"] = df["prompt"].str.strip()
df.drop_duplicates(subset="prompt", inplace=True)
# # df.drop_duplicates(subset='prompt', inplace=True)
# # print(df.shape)
# df = df[df['width'] == df['height']]
# print(df.shape)
# # df['prompt'] = df['prompt'].str.strip()
# # df = df[df['prompt'].map(lambda x: len(x.split())) >= 5]
# # print(df.shape)
df = df[~df["prompt"].str.contains("^(?:\s*|NULL|null|NaN)$", na=True)]
# print(df.shape)
df = df[df["prompt"].apply(is_english_only)]
# print(df.shape)
# df.drop_duplicates(subset='prompt', inplace=True)
# print(df.shape)
# df['tail'] = df['prompt'].apply(lambda x: x[-35:])
# df.drop_duplicates(subset='tail', inplace=True)
# # print(df.shape)
# # df.drop_duplicates(subset='prompt', inplace=True)
# # print(df.shape)
for idx in range(12300, 12600):
print(idx)
subprocess.check_output(
"wget -q https://huggingface.co/datasets/poloclub/diffusiondb/resolve/main/diffusiondb-large-part-2/part-{0}.zip -O part-{0}.zip > /dev/null".format(
str(idx).zfill(6)
),
shell=True,
)
# subprocess.check_output("wget -q https://huggingface.co/datasets/poloclub/diffusiondb/resolve/main/images/part-{0}.zip -O part-{0}.zip > /dev/null".format(str(idx).zfill(6)), shell=True)
subprocess.check_output(
"unzip part-{0}.zip -d /kaggle/working/{0}/ > /dev/null".format(
str(idx).zfill(6)
),
shell=True,
)
subprocess.check_output("rm part-{0}.zip".format(str(idx).zfill(6)), shell=True)
for path in glob.glob("./{0}/*.webp".format(str(idx).zfill(6))):
if path.split("/")[-1] not in df["image_name"].values:
os.unlink(path)
subprocess.check_output(
"zip -r {0}.zip {0} > /dev/null".format(str(idx).zfill(6)), shell=True
)
subprocess.check_output("rm -rf {0}".format(str(idx).zfill(6)), shell=True) # 只因你太美
# import os
# import zipfile
# import datetime
# def file2zip(packagePath, zipPath):
# '''
# :param packagePath: 文件夹路径
# :param zipPath: 压缩包路径
# :return:
# '''
# zip = zipfile.ZipFile(zipPath, 'w', zipfile.ZIP_DEFLATED)
# for path, dirNames, fileNames in os.walk(packagePath):
# fpath = path.replace(packagePath, '')
# for name in fileNames:
# fullName = os.path.join(path, name)
# name = fpath + '\\' + name
# zip.write(fullName, name)
# zip.close()
# if __name__ == "__main__":
# # 文件夹路径
# packagePath = '/kaggle/working/'
# zipPath = '/kaggle/working/output.zip'
# if os.path.exists(zipPath):
# os.remove(zipPath)
# file2zip(packagePath, zipPath)
# print("打包完成")
# print(datetime.datetime.utcnow())
# import os
# import zipfile
# import datetime
# def file2zip(packagePath, zipPath):
# '''
# :param packagePath: 文件夹路径
# :param zipPath: 压缩包路径
# :return:
# '''
# zip = zipfile.ZipFile(zipPath, 'w', zipfile.ZIP_DEFLATED)
# for path, dirNames, fileNames in os.walk(packagePath):
# fpath = path.replace(packagePath, '')
# for name in fileNames:
# fullName = os.path.join(path, name)
# name = fpath + '\\' + name
# zip.write(fullName, name)
# zip.close()
# if __name__ == "__main__":
# # 文件夹路径
# packagePath = '/kaggle/working/'
# zipPath = '/kaggle/working/output.zip'
# if os.path.exists(zipPath):
# os.remove(zipPath)
# file2zip(packagePath, zipPath)
# print("打包完成")
# print(datetime.datetime.utcnow())
print(len(glob.glob("./images/*.webp")))
print(len(glob.glob("./images/*.webp")))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
import cv2
from sklearn.preprocessing import LabelEncoder
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Sequential
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing import image
from tensorflow.keras.layers import (
Dense,
Flatten,
Dropout,
Conv2D,
MaxPooling2D,
BatchNormalization,
)
import tensorflow as tf
import matplotlib.pyplot as plt
import cv2 as cv
import numpy as np
from datetime import datetime
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
import warnings
warnings.filterwarnings("ignore")
image = plt.imread("../input/retinadataset/normal/010.jpg")
img = plt.imshow(image)
image.shape
train = ImageDataGenerator(rescale=1 / 255)
train_dataset = train.flow_from_directory(
"../input/retinadataset", target_size=(200, 200), batch_size=3, class_mode="binary"
)
train_dataset.class_indices
train_dataset.classes
model = tf.keras.models.Sequential(
[
tf.keras.layers.Conv2D(
16, (3, 3), activation="relu", input_shape=(200, 200, 3)
),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(5, activation="softmax"),
]
)
model.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=["sparse_categorical_accuracy"],
)
start_time = datetime.now()
model_fit = model.fit(train_dataset, steps_per_epoch=3, epochs=500)
end_time = datetime.now()
print(end_time - start_time)
model.evaluate(train_dataset)
model.save("my_model.h5")
from keras.models import load_model
import tensorflow as tf
model = tf.keras.models.load_model("my_model.h5")
import tensorflow as tf
import numpy as np
import os
IMG_SIZE = 200
class_names = ["normal", "ododemi", "psodopapilödem"]
test_dir = "/kaggle/input/testimages/test"
test_filepaths = [os.path.join(test_dir, filename) for filename in os.listdir(test_dir)]
# test resimlerini boyutlandırın ve ölçeklendirin
test_images = []
for filepath in test_filepaths:
img = tf.keras.preprocessing.image.load_img(
filepath, target_size=(IMG_SIZE, IMG_SIZE)
)
img = tf.keras.preprocessing.image.img_to_array(img)
img = img / 255.0
test_images.append(img)
test_images = np.array(test_images)
model = tf.keras.models.load_model("my_model.h5")
# tahminleri yapın
predictions = model.predict(test_images)
predicted_class_indices = np.argmax(predictions, axis=1)
predicted_class_names = [class_names[idx] for idx in predicted_class_indices]
print(predicted_class_names)
|
import numpy as np # linear algebra
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
os.path.join(dirname, filename)
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import sys
package_paths = ["/kaggle/input/fractal/", "/kaggle/input/fractal/decode/"]
for pth in package_paths:
sys.path.append(pth)
print(sys.path)
import time
import matplotlib.pyplot as plt
from tqdm import tqdm
import mxnet as mx
from mxnet import gluon, nd, autograd
from decode.FracTAL_ResUNet.nn.loss.mtsk_loss import *
from decode.FracTAL_ResUNet.models.semanticsegmentation.FracTAL_ResUNet import *
device = mx.gpu() if mx.context.num_gpus() else mx.cpu()
print(ctx.device_type)
depth = 6
norm_type = "GroupNorm"
norm_groups = 4
NClasses = 1
nfilters_init = 32
psp_depth = 4
net = FracTAL_ResUNet_cmtsk(
depth=depth,
nfilters_init=nfilters_init,
NClasses=NClasses,
norm_groups=norm_groups,
norm_type=norm_type,
psp_depth=psp_depth,
)
net.initialize(mx.initializer.Xavier())
net.hybridize()
net.collect_params().reset_ctx(device)
import xarray
from skimage import exposure
from skimage import color
import rasterio
from rasterio.plot import show
def getImage(filePath, i):
xds = xarray.open_dataset(filePath)
red_time = xds["B4"][i]
green_time = xds["B3"][i]
blue_time = xds["B2"][i]
merged_array = np.stack([red_time, green_time, blue_time], axis=-1)
merged_array_norm = exposure.rescale_intensity(merged_array, out_range=(0, 1))
img_hsv = color.rgb2hsv(merged_array_norm)
merged_array_norm = merged_array_norm.transpose((2, 0, 1))
img = img_hsv.transpose((2, 0, 1))
return merged_array_norm, img_hsv
def getMask(filePath):
img = rasterio.open(filePath)
return img.read(1), img.read(2), img.read(3)
from mxnet.gluon.data import Dataset
class CustomImageDataset(Dataset):
def __init__(self, data_dir):
self.data_dir = data_dir
self.mask_dir = data_dir.replace("images", "masks")
self.samples = self._make_dataset()
def _make_dataset(self):
samples = []
for filename in os.listdir(self.data_dir):
if filename.endswith(".nc"):
path = os.path.join(self.data_dir, filename)
mask_filename = filename.replace("S2_10m_256.nc", "S2label_10m_256.tif")
mask_path = os.path.join(self.mask_dir, mask_filename)
for i in range(0, 5):
item = (path, mask_path, i)
samples.append(item)
return samples
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
path, mask_path, i = self.samples[idx]
img, img_hsv = getImage(path, i)
extent, boundary, distance = getMask(mask_path)
return (
mx.nd.array(img),
mx.nd.array(extent),
mx.nd.array(boundary),
mx.nd.array(distance),
)
train_dataset = CustomImageDataset(
data_dir="/kaggle/input/eu-farm-bounadries/farmdata/sentinel2/images/FR"
)
batch_size = 4
train_loader = gluon.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
optimizer = "adam"
optimizer_params = {"learning_rate": 0.001}
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
for batch in train_loader:
a = batch
img = a[0][0].asnumpy()
print(img.shape)
img = img.transpose((1, 2, 0))
extent = a[1][0].asnumpy()
boundary = a[2][0].asnumpy()
distance = a[3][0].asnumpy()
f, axarr = plt.subplots(4, 1)
axarr[0].imshow(img)
axarr[1].imshow(extent)
axarr[2].imshow(boundary)
axarr[3].imshow(distance)
break
loss_class = ftnmt_loss(depth=depth)
loss_class.hybridize()
# initialize metrics
epoch = 2
cumulative_loss = 0
accuracy = mx.metric.Accuracy()
f1 = mx.metric.F1()
mcc = mx.metric.MCC()
# training set
for batch_i, (img, extent, boundary, distance) in enumerate(
tqdm(train_loader, desc="Training epoch {}".format(epoch))
):
with autograd.record():
img = img.as_in_context(device)
extent = extent.as_in_context(device)
boundary = boundary.as_in_context(device)
distance = distance.as_in_context(device)
exten_pred, bound, dist = net(img)
extent = nd.expand_dims(extent, axis=1)
boundary = nd.expand_dims(boundary, axis=1)
distance = nd.expand_dims(distance, axis=1)
loss_extent = mx.nd.sum(loss_class(exten_pred, extent.as_in_context(device)))
loss_boundary = mx.nd.sum(loss_class(bound, boundary.as_in_context(device)))
loss_distance = mx.nd.sum(loss_class(dist, distance.as_in_context(device)))
loss = (0.33) * (loss_extent + loss_boundary + loss_distance)
mx.autograd.backward(loss)
trainer.step(batch_size)
# update metrics based on every batch
cumulative_loss += mx.nd.sum(loss).asscalar()
# accuracy
extent_predicted_classes = mx.nd.ceil(exten_pred[:, [0], :, :] - 0.5)
accuracy.update(extent, extent_predicted_classes)
# f1 score
prediction = exten_pred[:, 0, :, :].reshape(-1)
probabilities = mx.nd.stack(1 - prediction, prediction, axis=1)
f1.update(extent.reshape(-1), probabilities)
# MCC metric
mcc.update(extent.reshape(-1), probabilities)
net.save_parameters("begnali.params")
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
pd.options.display.max_rows = 10
df = pd.read_csv("fake_job_postings.csv")
df
plt.figure(figsize=(10, 5))
sns.countplot(df["fraudulent"])
(df[df["fraudulent"] == 0].shape[0]) / df.shape[0]
# # UNBALANCE DATA
# # Data Preprocessing
def Information(df):
df.replace("?", np.nan, inplace=True)
nulls = df.shape[0] - df.dropna(axis=0).shape[0]
nulls_per = (df.shape[0] - df.dropna(axis=0).shape[0]) / df.shape[0]
dup, dup_per = df.duplicated().sum(), (df.duplicated().sum()) / df.shape[0]
numerical_columns = list((df.select_dtypes(include=np.number)).columns)
categorical_columns = list(df.select_dtypes(include=["object"]).columns)
numerical_col_dict = {}
for col in numerical_columns:
if df[col].nunique() > 10:
numerical_col_dict[col] = "Continuous"
else:
numerical_col_dict[col] = "Discrete"
categorical_col_dict = {}
for col in categorical_columns:
if df[col].nunique() > 10:
categorical_col_dict[col] = "Continuous"
else:
categorical_col_dict[col] = "Classes"
x = []
for col in df.columns:
x.append(
[
col,
df[col].unique(),
df[col].nunique(),
df[col].dtype,
df[col].isna().sum(),
int((df[col].isna().sum()) / df.shape[0] * 100),
]
)
x = pd.DataFrame(
x,
columns=[
"Feature",
"Unique_Values",
"No.OfUniqueValues",
"DataType",
"NoOfNulls",
"Nulls %",
],
)
print(
"\n",
"There is : {} Nulls, Representing {} % of The Data".format(
nulls, "%.2f" % nulls_per
),
)
print(
"\n",
"There is : {} Duplicated, Representing {} % of The Data".format(
dup, "%.2f" % dup_per
),
)
print("\n", "Numerical Columns : {}".format(numerical_col_dict))
print("\n", "Categorical Columns : {}".format(categorical_col_dict))
return x
pd.options.display.max_rows = None
Information(df)
df.drop("job_id", axis=1, inplace=True)
pd.options.display.max_rows = 10
df
df.reset_index(drop=True, inplace=True)
# # Impute Nulls
df["location"] = df["location"].fillna(value=df["location"].value_counts().index[0])
for idx in df[df["department"].isna()]["title"].index:
if "Marketing" in df.at[idx, "title"]:
df.at[idx, "department"] = "Marketing"
elif "Sales" in df.at[idx, "title"]:
df.at[idx, "department"] = "Sales"
elif ("Accountant" in df.at[idx, "title"]) | ("Accounting" in df.at[idx, "title"]):
df.at[idx, "department"] = "Accounting"
elif ("Engineer" in df.at[idx, "title"]) | ("Engineering" in df.at[idx, "title"]):
df.at[idx, "department"] = "Engineering"
else:
df.at[idx, "department"] = df.at[idx, "title"]
for idx in (df["salary_range"].dropna()).index:
Range = df.at[idx, "salary_range"].split("-")
try:
start = int(Range[0])
if start < 1000:
df.at[idx, "salary_range"] = 0
else:
df.at[idx, "salary_range"] = start
except ValueError:
df.at[idx, "salary_range"] = 0
df["salary_range"] = df["salary_range"].fillna(0)
df
df[["company_profile", "description", "requirements", "benefits"]] = df[
["company_profile", "description", "requirements", "benefits"]
].fillna("no available data")
pd.options.display.max_rows = None
Information(df)
df[["industry", "function"]] = df[["industry", "function"]].fillna("")
df["job_field"] = (
df["title"] + " " + df["department"] + " " + df["industry"] + " " + df["function"]
)
df.drop(["title", "department", "industry", "function"], axis=1, inplace=True)
pd.options.display.max_rows = None
Information(df)
df[["employment_type", "required_experience", "required_education"]] = df[
["employment_type", "required_experience", "required_education"]
].fillna("Other")
df["Requirements"] = (
df["requirements"]
+ " "
+ df["required_experience"]
+ " "
+ df["required_education"]
)
df.drop(
["requirements", "required_experience", "required_education"], axis=1, inplace=True
)
pd.options.display.max_rows = None
Information(df)
pd.options.display.max_rows = 10
# # Text Preprocessing
import re
from nltk.corpus import stopwords
import string
from nltk.stem import WordNetLemmatizer
punc = string.punctuation
lmt = WordNetLemmatizer()
s_words = stopwords.words("english")
class TextPreprocessing:
def __init__(self, df: pd.DataFrame = pd.DataFrame):
self.df = df
def Clean(self, df):
self.df = df
df_copy = df.copy(deep=True)
text_cols = list(df_copy.select_dtypes(include=["object"]).columns)
for col in text_cols:
for idx, text in enumerate(df_copy[col]):
te = []
word = re.sub(r"(@|#)\w+", "", text)
word = re.sub("[,.]", "", word)
word = re.sub(r"https?://\S+", "", word)
word = re.sub(r"(\?|!)+", "", word)
word = re.sub(r"\(|\)", "", word)
word = re.sub(r"(^\s+)", "", word)
word = re.sub(r"(\s+$)", "", word)
word = re.sub(r"\d+", "", word)
word = word.split()
for i in word:
if (i not in s_words) & (i not in punc):
i = i.lower()
i = lmt.lemmatize(i, "v")
te.append(i)
df_copy.at[idx, col] = te
return df_copy
def Vactorize(self, df, target_name):
self.df = df
self.target_name = target_name
df_cleaned = df.copy(deep=True)
text_cols = list(df_cleaned.select_dtypes(include=["object"]).columns)
pos_word = {}
neg_word = {}
pos_df = df_cleaned[df_cleaned[target_name] == 1].reset_index(drop=True)
neg_df = df_cleaned[df_cleaned[target_name] == 0].reset_index(drop=True)
for col in text_cols:
pos_word[col] = [word for sublist in pos_df[col] for word in sublist]
neg_word[col] = [word for sublist in neg_df[col] for word in sublist]
pos_freq = {}
neg_freq = {}
for key in pos_word.keys():
positive_dict = {}
for word in pos_word[key]:
positive_dict[word] = positive_dict.get(word, 0) + 1
pos_freq[key] = positive_dict
for key in neg_word.keys():
negative_dict = {}
for word in neg_word[key]:
negative_dict[word] = negative_dict.get(word, 0) + 1
neg_freq[key] = negative_dict
return pos_freq, neg_freq
def Vactorization(self, df, target_name):
self.df = df
self.target_name = target_name
df_cleaned = df.copy(deep=True)
text_cols = list(df_cleaned.select_dtypes(include=["object"]).columns)
pos_freq, neg_freq = TextPreprocessing().Vactorize(df_cleaned, target_name)
for col in text_cols:
df_cleaned["{}_pos".format(col)] = 0
df_cleaned["{}_neg".format(col)] = 0
for idx, List in enumerate(df_cleaned[col]):
pos_frequent = 0
neg_frequent = 0
for word in List:
pos_frequent += pos_freq[col].get(word, 0)
neg_frequent += neg_freq[col].get(word, 0)
df_cleaned.at[idx, "{}_pos".format(col)] = pos_frequent
df_cleaned.at[idx, "{}_neg".format(col)] = neg_frequent
df_cleaned.drop([col], axis=1, inplace=True)
return df_cleaned
def fit_transform(self, df, target_name):
self.df = df
self.target_name = target_name
df_cleaned = TextPreprocessing().Clean(df)
df_vact = TextPreprocessing().Vactorization(df_cleaned, target_name)
return df_vact
def Naive_Bayes(self, df, target_name):
self.df = df
self.target_name = target_name
df_naive = TextPreprocessing().Clean(df)
pos_freq, neg_freq = TextPreprocessing().Vactorize(df_naive, target_name)
text_cols = list(df_naive.select_dtypes(include=["object"]).columns)
v_n_pos, v_n_neg = {}, {}
for key in pos_freq.keys():
v_n_pos[key] = len(pos_freq[key])
n = 0
for word in pos_freq[key]:
n += pos_freq[key].get(word, 0)
v_n_pos[key] += n
for key in neg_freq.keys():
v_n_neg[key] = len(neg_freq[key])
n = 0
for word in neg_freq[key]:
n += neg_freq[key].get(word, 0)
v_n_neg[key] += n
prob_pos_dict = {}
for key in pos_freq.keys():
positive_dict = {}
for word in pos_freq[key]:
positive_dict[word] = (pos_freq[key].get(word, 0) + 1) / (v_n_pos[key])
prob_pos_dict[key] = positive_dict
prob_neg_dict = {}
for key in neg_freq.keys():
negative_dict = {}
for word in neg_freq[key]:
negative_dict[word] = (neg_freq[key].get(word, 0) + 1) / (v_n_neg[key])
prob_neg_dict[key] = negative_dict
for col in text_cols:
df_naive["{}_probs".format(col)] = 0
for idx, List in enumerate(df_naive[col]):
score = 0
for word in List:
try:
b = np.log(
(prob_pos_dict[col].get(word, 0))
/ (prob_neg_dict[col].get(word, 0))
)
if b == -float("inf"):
pass
else:
score += b
except:
pass
df_naive["{}_probs".format(col)][idx] = score
df_naive.drop([col], axis=1, inplace=True)
return {"probs_pos": prob_pos_dict, "probs_neg": prob_neg_dict}, df_naive
# # Clean Text
pre = TextPreprocessing()
df_cleaned = pre.Clean(df)
df_cleaned
# # 1. Try Vactorization method
df_cleaned = pre.Vactorization(df_cleaned, target_name="fraudulent")
df_cleaned
# # fit_transform perform clean & vactorization together
# df_cleaned = pre.fit_transform(df , target_name = 'fraudulent')
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
accuracy_score,
recall_score,
precision_score,
f1_score,
classification_report,
r2_score,
confusion_matrix,
)
x = df_cleaned.drop(["fraudulent"], axis=1)
y = df_cleaned["fraudulent"]
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.3, random_state=42
)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
list_of_models = [
LogisticRegression(),
KNeighborsClassifier(),
DecisionTreeClassifier(),
ExtraTreeClassifier(),
RandomForestClassifier(),
ExtraTreesClassifier(),
]
classification_report = pd.DataFrame(
columns=["Accuracy", "F1_score", "Precision", "Recall"]
)
for model in list_of_models:
model = model.fit(x_train, y_train)
for i in range(2):
if i == 0:
to_pred = x_train
pred = y_train
title = "Train"
else:
to_pred = x_test
pred = y_test
title = "Test"
y_pred = model.predict(to_pred)
acc = round(accuracy_score(pred, y_pred) * 100)
f1 = round(f1_score(pred, y_pred) * 100)
prec = round(precision_score(pred, y_pred) * 100)
recall = round(recall_score(pred, y_pred) * 100)
d = pd.DataFrame(
data=np.array([acc, f1, prec, recall]).reshape(1, 4),
columns=["Accuracy", "F1_score", "Precision", "Recall"],
)
classification_report = pd.concat([classification_report, d])
classification_report.rename(
index={0: "{} _ {} Details".format(model, title)}, inplace=True
)
pd.options.display.max_rows = 15
classification_report
model = RandomForestClassifier(n_estimators=100, n_jobs=-1)
model.fit(x_train, y_train)
from sklearn.metrics import classification_report
y_train_pred = model.predict(x_train)
y_test_pred = model.predict(x_test)
train_report = classification_report(y_train, y_train_pred)
test_report = classification_report(y_test, y_test_pred)
print("Train Report", "\n", train_report)
print("Test Report", "\n", test_report)
plt.figure(figsize=(12, 8))
conf_matrix = confusion_matrix(y_test, y_test_pred)
sns.heatmap(conf_matrix, cmap="Blues", annot=True, fmt="g")
plt.title("Confusion Matrix")
plt.xlabel("Predicted Labels")
plt.ylabel("True Labels")
labels = ["Negative", "Positive"]
plt.xticks(np.arange(len(labels)) + 0.5, labels)
plt.yticks(np.arange(len(labels)) + 0.5, labels)
plt.show()
# # Naive Bayes
proba, df_naive = pre.Naive_Bayes(df, target_name="fraudulent")
len(proba)
df_naive
x = df_naive.drop(["fraudulent"], axis=1)
y = df_naive["fraudulent"]
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.3, random_state=42
)
model = RandomForestClassifier(n_estimators=200, n_jobs=-1)
model.fit(x_train, y_train)
from sklearn.metrics import classification_report
y_train_pred = model.predict(x_train)
y_test_pred = model.predict(x_test)
train_report = classification_report(y_train, y_train_pred)
test_report = classification_report(y_test, y_test_pred)
print("Train Report", "\n", train_report)
print("Test Report", "\n", test_report)
plt.figure(figsize=(12, 8))
conf_matrix = confusion_matrix(y_test, y_test_pred)
sns.heatmap(conf_matrix, cmap="Blues", annot=True, fmt="g")
plt.title("Confusion Matrix")
plt.xlabel("Predicted Labels")
plt.ylabel("True Labels")
labels = ["Negative", "Positive"]
plt.xticks(np.arange(len(labels)) + 0.5, labels)
plt.yticks(np.arange(len(labels)) + 0.5, labels)
plt.show()
|
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half()
tokenizer.save_pretrained("chatglm")
model.save_pretrained("chatglm")
|
import os
import json
import random
import collections
import cv2
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# # **Data Visualization**
# the function returns the file path of the image corresponding to the given image_id.
def imageIdToPath(imageId: str, isTrain: bool = True) -> str:
folder = "train" if isTrain else "test"
return "../input/g2net-gravitational-wave-detection/{}/{}/{}/{}/{}.npy".format(
folder, imageId[0], imageId[1], imageId[2], imageId
)
trainDf = pd.read_csv("../input/g2net-gravitational-wave-detection/training_labels.csv")
trainDf
# create a count plot of the "target" variable from a pandas dataframe called 'trainDf'
sns.countplot(data=trainDf, x="target")
def visualizeSample(
_id,
target,
colors=("blue", "red", "green"),
signal_names=("LIGO Hanford", "LIGO Livingston", "Virgo"),
):
# get file path
path = imageIdToPath(_id)
# load data
x = np.load(path)
# set up the figure
fig, axs = plt.subplots(
nrows=4, ncols=1, figsize=(16, 7), gridspec_kw={"height_ratios": [1, 1, 1, 2]}
)
# plot each signal
for i in range(3):
axs[i].plot(x[i], color=colors[i])
axs[i].legend([signal_names[i]], fontsize=12, loc="lower right")
# plot all three signals overlaid on each other
axs[3].plot(x[0], color=colors[0], label=signal_names[0])
axs[3].plot(x[1], color=colors[1], label=signal_names[1])
axs[3].plot(x[2], color=colors[2], label=signal_names[2])
axs[3].legend(fontsize=12, loc="lower right")
# set the title
fig.suptitle(f"id: {_id} target: {target}", fontsize=16)
# show the plot
plt.show()
for i in random.sample(trainDf.index.tolist(), 3):
_id = trainDf.iloc[i]["id"]
target = trainDf.iloc[i]["target"]
visualizeSample(_id, target)
# # **Spectrogram**
import librosa
import librosa.display
def visualizeSampleSpectogram(
_id,
target,
signal_names=("LIGO Hanford", "LIGO Livingston", "Virgo"),
sr=2048,
fmin=20,
fmax=1024,
):
x = np.load(imageIdToPath(_id))
plt.figure(figsize=(16, 5))
for i in range(3):
X = librosa.stft(x[i] / x[i].max())
Xdb = librosa.amplitude_to_db(abs(X))
plt.subplot(1, 3, i + 1)
librosa.display.specshow(
Xdb,
sr=sr,
x_axis="time",
y_axis="linear",
fmin=fmin,
fmax=fmax,
vmin=-50,
vmax=50,
)
plt.colorbar()
plt.title(signal_names[i], fontsize=14)
plt.suptitle(f"id: {_id} target: {target}", fontsize=16)
plt.show()
for i in random.sample(trainDf.index.tolist(), 3):
_id = trainDf.iloc[i]["id"]
target = trainDf.iloc[i]["target"]
visualizeSampleSpectogram(_id, target)
#!pip install pycbc -qq
# import pycbc.types
import torch
from nnAudio.Spectrogram import CQT1992v2
# # **Q-Transfrom**
Q_TRANSFORM = CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=32)
def visualizeSampleQtransform(
_id,
target,
signal_names=("LIGO Hanford", "LIGO Livingston", "Virgo"),
sr=2048,
):
"""
Visualizes the CQT transform of a waveform signal for three different detectors.
Args:
_id (str): The ID of the image to visualize.
target (int): The target class of the image.
signal_names (tuple of str, optional): The names of the three signals to display.
Defaults to ("LIGO Hanford", "LIGO Livingston", "Virgo").
sr (int, optional): The sampling rate of the signal. Defaults to 2048.
"""
x = np.load(imageIdToPath(_id))
plt.figure(figsize=(16, 5))
for i in range(len(signal_names)):
waves = x[i] / np.max(x[i])
waves = torch.from_numpy(waves).float()
image = Q_TRANSFORM(waves)
plt.subplot(1, len(signal_names), i + 1)
plt.imshow(image.squeeze())
plt.title(signal_names[i], fontsize=14)
plt.suptitle(f"ID: {_id} | Target: {target}", fontsize=16)
plt.show()
for i in random.sample(trainDf.index.tolist(), 5):
_id = trainDf.iloc[i]["id"]
target = trainDf.iloc[i]["target"]
visualizeSample(_id, target)
visualizeSampleQtransform(_id, target)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(
"/kaggle/input/list-of-unicorn-startup-companies/List of Unicorn Startup Companies.csv"
)
colors = "#c9b1ff, #ffcaf2, #ffb2b1, #fff3ad, #bcffbc, #a2edff".split(", ")
colors = [f.upper() for f in colors]
sns.palplot(colors, size=2)
df.info()
sns.set_style("darkgrid")
sns.countplot(
y="Country/countries",
data=df,
order=df["Country/countries"].value_counts().index[:10],
palette=sns.color_palette([colors[0]] + [colors[1]] * 9),
)
plt.title(
"Top 10 countries which have the most unicorn companies in the world", size=12
)
plt.xlabel("Frequency")
plt.ylabel("")
plt.show()
sns.set_style("darkgrid")
sns.countplot(
y="Industry",
data=df,
order=df["Industry"].value_counts().index[:10],
palette=sns.color_palette([colors[0]] + [colors[1]] * 9),
)
plt.title("Top 10 industries which have the most unicorn companies", size=12)
plt.xlabel("Frequency")
plt.ylabel("")
plt.show()
# # Date considered
date_df = df.dropna(subset=["Valuation date"])
date_df["Valuation date 1"] = date_df["Valuation date"].str.split("[").str[0]
date_df["Valuation date 1"] = pd.to_datetime(date_df["Valuation date 1"])
date_df["Year"] = date_df["Valuation date 1"].dt.year
df.head()
sns.countplot(data=date_df, x="Year")
plt.title("Which year has the most validated unicorn?")
plt.show()
top_5_industries = df["Industry"].value_counts().index[:5]
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 7), sharey=True)
axs = np.ravel(axs)
for ind, industry in enumerate(top_5_industries):
mask = date_df["Industry"] == industry
g = sns.countplot(data=date_df[mask], x="Year", ax=axs[ind])
g.set_title(industry)
plt.tight_layout()
def process_valuation(data):
data = data.strip("+")
return float(data)
date_df["Valuation(US$ billions)"].unique()
date_df["Valuation"] = date_df["Valuation(US$ billions)"].apply(
lambda x: float(x.strip("+")) if x != "Undisclosed" else 0
)
date_df = date_df[date_df["Valuation"] > 0]
# # Valuation
date_df.head()
# ## Valuation break down by Country
plt.figure(figsize=(15, 7))
sns.boxplot(
x="Country/countries",
y="Valuation",
data=date_df,
order=date_df["Country/countries"].value_counts().index[:10],
showfliers=False,
)
plt.title("Valuation break down by Country")
# ## Valuation break down by industry
plt.figure(figsize=(15, 7))
g = sns.boxplot(
x="Industry",
y="Valuation",
data=date_df,
order=date_df["Industry"].value_counts().index[:10],
showfliers=False,
)
g.set_xticklabels(g.get_xticklabels(), rotation=30)
plt.title("Valuation break down by Industry")
plt.show()
# ## Valuation break down by year
plt.figure(figsize=(15, 7))
g = sns.boxplot(x="Year", y="Valuation", data=date_df, showfliers=False)
g.set_xticklabels(g.get_xticklabels(), rotation=30)
plt.title("Valuation break down by Year")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
df
df.drop(columns="id", inplace=True)
df
df.info()
x = df.drop(columns="target", axis=1)
y = df["target"]
x
df.describe()
params = {
"learning_rate": [0.05, 0.10, 0.15, 0.20, 0.25, 0.30],
"max_depth": [3, 4, 5, 6, 8, 10, 12, 15],
"min_child_weight": [1, 3, 5, 7, 9],
"gamma": [0.0, 0.1, 0.2, 0.3, 0.4],
"colsample_bytree": [0.3, 0.4, 0.5, 0.7],
}
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
import xgboost
from sklearn.preprocessing import MinMaxScaler, StandardScaler
scaler = StandardScaler()
scalerr = MinMaxScaler()
x_scaled = pd.DataFrame(scaler.fit_transform(x), columns=x.columns)
x1_scaled = pd.DataFrame(scalerr.fit_transform(x), columns=x.columns)
x_scaled
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x_scaled, y, test_size=0.5, random_state=42
)
classifier = xgboost.XGBClassifier()
random_search = RandomizedSearchCV(
classifier,
param_distributions=params,
n_iter=10,
scoring="f1",
n_jobs=-1,
cv=5,
verbose=3,
)
random_search.fit(x_scaled, y)
from sklearn.metrics import accuracy_score
acc = accuracy_score(random_search.predict(x_train), y_train)
print(acc)
acc1 = accuracy_score(random_search.predict(x_test), y_test)
print(acc1)
df2 = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
df2
df2.drop(columns="id", inplace=True)
df2_scaled = pd.DataFrame(scaler.fit_transform(df2), columns=df2.columns)
df2_scaled
predict = random_search.predict(df2_scaled)
predict
subm = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
sub = pd.DataFrame({"target": predict}, index=subm.id)
sub.head()
sub.to_csv("submissionn.csv")
|
# # `zara.com` XML Sitemap Analysis
import advertools as adv
import adviz
import pandas as pd
pd.options.display.max_columns = None
import plotly
import plotly.express as px
import plotly.graph_objects as go
from IPython.display import display_html, display_markdown
from dataset_utilities import value_counts_plus
import sklearn
for pkg in [adv, adviz, pd, plotly, sklearn]:
print(f"{pkg.__name__:-<30}v{pkg.__version__}")
def md(text):
return display_markdown(text, raw=True)
# sitemap = adv.sitemap_to_df('https://www.zara.com/sitemaps/sitemap-index.xml.gz')
sitemap_raw = pd.read_csv(
"/kaggle/input/xml-sitemaps/sitemap_2023_02_18_zara_com.csv",
parse_dates=["sitemap_last_modified", "download_date"],
low_memory=False,
)
sitemap_raw.shape
"errors" in sitemap_raw
sitemap = sitemap_raw.drop_duplicates(subset=["loc"])
print(sitemap.shape)
sitemap.head()
urldf = adv.url_to_df(sitemap["loc"].fillna(""))
urldf.head()
(
urldf.notna()
.mean()
.to_frame()
.style.background_gradient(cmap="cividis")
.format("{:.1%}")
)
adviz.value_counts_plus(urldf["scheme"], name="Scheme", size=15)
adviz.value_counts_plus(urldf["netloc"], name="Domain", size=15)
adviz.value_counts_plus(urldf["dir_1"], show_top=15, name="country").set_caption(
"<h3>Top country pages</h3>"
)
adviz.value_counts_plus(urldf["dir_2"], show_top=15, name="language").set_caption(
"<h3>Top language pages</h3>"
)
(
urldf[["dir_1", "dir_2"]]
.value_counts()
.reset_index()
.rename(columns={0: "count"})
.sort_values("count", ascending=False)
.style.format({"count": "{:,}"})
.bar(subset=["count"], color="steelblue")
.set_caption("<h3>Top <code>/country/language/</code> combination pages</h3>")
)
(
urldf[["dir_1", "dir_2"]]
.value_counts()
.describe()
.to_frame()
.rename(columns={0: "stat"})
.style.format("{:,.1f}")
.set_caption("Distribution of /country/lang/ combinations")
)
adviz.url_structure(
urldf["url"].fillna(""),
items_per_level=50,
domain="zara.com",
height=750,
title="URL Structure: zara.com XML sitemap",
)
import plotly.express as px
px.histogram(
x=urldf[["dir_1", "dir_2"]].value_counts(),
nbins=10,
height=500,
width=1000,
template="none",
labels={"x": "Number of URLs per country/language combination"},
title="<b>/country/language/</b> combination page counts - <b>zara.com</b>",
)
(
value_counts_plus(
urldf["last_dir"].str.split("-").explode(), show_top=20
).set_caption("<h3>Most used words in the last directory")
)
(
value_counts_plus(
urldf[urldf["dir_1"].eq("us")]["last_dir"].str.split("-").explode(), show_top=20
).set_caption("<h3>Most used words in the last directory - USA")
)
(
value_counts_plus(
urldf[urldf["dir_1"].eq("us") & urldf["last_dir"].str.contains("shirt")][
"last_dir"
]
.str.split("-")
.explode(),
show_top=20,
).set_caption('<h3>Most used words in the last directory - USA "shirt"')
)
(
adv.word_frequency(
urldf[urldf["dir_1"].eq("us")]["last_dir"].str.replace("-", " "), phrase_len=2
).head(30)
).style.set_caption("<h4>Top bigrams in last_dir")
# # Word similarity across documents
# ## Quantifying the similarity between documents (URLs)
# * For each pair of documents (keywords, titles, etc.) in the text list (URL slugs in this case), count the number of common words
# * Get a matrix for all the values
# * Take a meaningful subset of URLs (a certain `/country/lang/` combination for example)
# * Clean and extract tokens from the `last_dir`
# ## Use cases:
# * Get the most/least unique documents (the ones with the least/most common with other documents)
# * For any selected document get the most similar ones
# * Simple recommendation engine (if you like this you probably like that)
# * Allow flexible criteria for filtering by similarity and other criteria.
text_list = [
"blue green red",
"blue green yellow",
"blue black white",
"white red purple",
"magenta teal gray",
]
text_list
# Create a document-term matrix using `scikit-learn`'s `CountVectorizer`
from sklearn.feature_extraction.text import CountVectorizer
vect = CountVectorizer()
X = vect.fit_transform(text_list)
X
X.toarray()
words = vect.get_feature_names_out()
words
# # Document-term matrix as a pandas `DataFrame`
dtm = pd.DataFrame.sparse.from_spmatrix(
data=X, index=text_list, columns=vect.get_feature_names_out()
)
dtm
# Select a certain document and filter columns that contain its tokens (columns that are 1 and not 0)
dtm.loc[:, dtm.loc["blue green yellow", :].eq(1)]
# Every row that is not all zeros has one or more words in comon with our selected document.
# We can also sum each row, to see how many common words there are with our selected document:
dtm.loc[:, dtm.loc["blue green yellow", :].eq(1)].assign(
similarity=lambda df: df.apply("sum", axis=1)
)
# Sort by `similarity` to get the most similar documents, and remove rows where similarity is zero. This can be fine-tuned, as in most cases a single common word is probably not enough, so you can set it to a higher number.
bgy = dtm.loc[:, dtm.loc["blue green yellow", :].eq(1)].assign(
similarity=lambda df: df.apply("sum", axis=1)
)
bgy.sort_values("similarity", ascending=False).query("similarity > 0")
# Switching to real data
# ### Get a subset: URLs under `/uk/en/`
# # `/uk/en/product_name_number.html`
uk_en = urldf[urldf["dir_1"].eq("uk") & urldf["dir_2"].eq("en")]
uk_en["last_dir"]
# Clean the data:
# * Remove numbers from the URLs, e.g: `-s2342342342.html`, `-L234234234.html`
# * Replace hyphens with spaces
uk_en_clean = (
uk_en["last_dir"]
.str.replace("-[A-Za-z]+\d{4,}\.html$", "", regex=True)
.str.replace("-", " ")
)
uk_en_clean
from sklearn.feature_extraction.text import CountVectorizer
text_list = uk_en_clean.drop_duplicates().tolist()
vect = CountVectorizer()
X = vect.fit_transform(text_list)
vect.get_feature_names_out()
X
dtm_df = pd.DataFrame.sparse.from_spmatrix(
data=X, index=text_list, columns=vect.get_feature_names_out()
).assign(ngrams=lambda df: df.apply("sum", axis=1))
dtm_df.info()
# ## Add ngrams for each word to give more context on similarity
dtm_df.iloc[:15, -20:]
phrase = "carrot fit trousers with darts detail"
phrasedf = dtm_df.loc[:, dtm_df.loc[phrase, :].gt(0)]
phrasedf
# Now that we have our subset, we can further filter according to whatever critiria we want:
# * Minimun number of common words under `similarity`
# * Making sure we have relevant products: In this case "with" and "detail" might be two common words with our selected phrase. But this is clearly not similar. You might decide that the important feature is that it has be a pair of trousers, which can easily be done by setting a minimum for the trousers column.
(
phrasedf.assign(
similarity=phrasedf.iloc[:, :-1].apply("sum", axis=1),
sim_perc=phrasedf.iloc[:, :-1].apply("mean", axis=1),
)
.sort_values("similarity", ascending=False)
.query("similarity > 1")
# .query('ngrams > 3')
.query("trousers > 0")
.style.background_gradient(cmap="cividis", subset=["similarity", "sim_perc"])
.format({"sim_perc": "{:.1%}"})
.set_caption("<h4>URLs most similar to 'carrot fit trousers with darts detail'")
)
# ## Creating a `similarity` function that takes a `dtm_df`, a selected `phrase` and a `min_similarity` parameters
def similarity(dtm_df, phrase, min_similarity=1):
phrasedf = dtm_df.loc[:, dtm_df.loc[phrase, :].gt(0)]
return (
phrasedf.assign(
similarity=phrasedf.iloc[:, :-1].apply("sum", axis=1),
sim_perc=phrasedf.iloc[:, :-1].apply("mean", axis=1),
)
.sort_values("similarity", ascending=False)
.query("similarity >= @min_similarity")
.style.background_gradient(cmap="cividis", subset=["similarity", "sim_perc"])
.background_gradient(cmap="Blues", subset=["ngrams"])
.format({"sim_perc": "{:.1%}"})
.set_caption(f"<h4>URLs most similar to '{phrase}'")
)
# Experiment with various options below:
similarity(dtm_df, "draped dress with ruffles", min_similarity=3)
|
# This is the a project that reports how an insurance agent (agent) helps people through the years with different types of products.
# **The main goal of this project is to find out the potential clients of Isabella Insurance Office.
# **
# Isabella Insurance Office's business is focusing on: Covered California, Medicare and other types of insurances. In this specific project, we are going to focus on analysing the open data from `Covered California` and `Medicare`.
# **Table of Content**
# 1. [Part 1: Covered California](#1)
# 1. [Part 2: Medicare](#2)
#
# **1: Covered California**
# **Questions:**
#
#
# **Q1: What are the trends of growth for enrollees enrolled using an agent?**
#
#
# **Q2: What is the most valuable groups of people who will enroll using an agent?**
#
# **Q3: Any suggestion we can provide for the business.**
# [Jump to the answers](#1.1)
# *Covered California™ is an entirely new entity charged with building a marketplace that will make health insurance available to millions of people who are currently uninsured.
# This is no easy undertaking. To accomplish our goals, we are working together with a broad range of partners. These stakeholders and entities have trusted relationships among California's
# uninsured markets and will help increase awareness and understanding of new health coverage options, promote a culture of coverage and encourage Californians to get covered.*
# *We anticipate building a very broad network of partnerships to ensure that consumers have access to a wide range of resources to help in making decisions about health care for themselves,
# their families and their employees.*
# *Covered California is working with state agencies to implement the new health care law. These partner organizations are helping Covered California ensure that anyone who needs to apply for any of health insurance affordability programs affected by the Affordable Care Act can do so in a streamlined way. We are also working with partner groups to make sure that all health plans offered through the marketplace meet the requirements of a Qualified Health Plan. By partnering with these agencies, git overed California will be able to reach more Californians to let them know about their new options for health insurance. *
#
# *Reference: https://hbex.coveredca.com/resources/*
# In this research, we will analyse the datasets from Covered California Data & Resource Center: [https://hbex.coveredca.com/data-research/](https://hbex.coveredca.com/data-research/)
# There are few choices of datasets over the Covered California Data & Resource center. The `Statewide CrossTabs` in `Open Enrollment Profile` for each year reported how many enrollees were enrolled in the products by a certificated insurance agent which was us. We decided to focus on these specific datasets.
# The `Open Enrollment Profile` after 2020 include `Net Profiles` and `Gross Profiles`. But since the enrollment data before 2019 were `Gross Profiles`, in order to keep consistency, we will use the `Gross Profiles` for all years.
# *The Open Enrollment Profile shows counts of enrollees who have selected a plan through Covered California during the open enrollment period. These are gross plan selections, meaning they include any enrollees, regardless of the outcome of effectuation (payment of first premium). Data subject to revision due to reporting data lags and on-going reconciliations between Covered California and health plans. All cells rounded to nearest 10 consistent with privacy policy. Some dimension totals may not sum to the grand total enrollment figures due to occasional discrepancies between reporting data warehouse and CalHEERS, which may be corrected in subsequent updates to the data.*
#
#
# *Reference: CC_Open_Enrollment_Profile_Gross_2023_R20230308.xlsx*
# According to the `Self-Service and CEC_Market Source` , Covered California Plan Enrollees were answering: "How did you hear about Covered California?", data from 10/01/2013 to 04/15/2014. The certificated insurance agent helped 268 enrollees, which was 0.24% of all the market sources. In the dataset of `Covered California Covered California Active Member Profile, 2023`, the certificated insurance agent helped 148,520 enrollees, which is 51.6% of all the listed service channels (service channel includes: Certified Enrollment Counselor, Certified Insurance Agent, Certified Plan-based Enroller, County Eligibility Worker, Service Center Representative, Unassisted).
# *Service Channel reflects the latest assister type to submit an application or enroll a consumer, including change reports.
# For this measure, prior contact with a CEC, PBE, or agent overwrites a more recent activity that was unassisted or performed by SCRs.*
# First, let's take a look at some facts by age.
# Note: The following data are capitured from the sheet `Statewide CrossTabs` in `Open Enrollment Profile` for each year.
# *Definition:*
#
# *2023: Open Enrollment 2023. Data as of 2/1/2023*
# *2022: Open Enrollment 2022. Data as of 2/4/2022*
#
# *2021: Open Enrollment 2021. Data as of 1/31/2021*
#
# *2020: Open Enrollment 2020. Data as of 2/7/2020*
#
# *2019: Open Enrollment 2019. Data as of 1/18/2019*
#
# *2018: Open Enrollment 2018. Data as of 2/4/2018*
#
# *2017: Open Enrollment 2017. Data as of 2/8/2017*
# *2016: Open Enrollment 2016. Data as of 2/6/2016*
import numpy as np
import pandas as pd
cc_age = pd.DataFrame(
{
"Year": ["2016", "2017", "2018", "2019", "2020", "2021", "2022", "2023"],
"Age 17 or less": [12230, 13860, 16750, 13050, 18300, 9430, 13190, 13020],
"Age 18 to 25": [34060, 26470, 27060, 21510, 31710, 18240, 23040, 20370],
"Age 26 to 34": [33820, 35130, 35730, 31620, 46810, 29070, 38860, 32390],
"Age 35 to 44": [32240, 28990, 31350, 24890, 37490, 21190, 25720, 23210],
"Age 45 to 54": [46860, 39450, 41140, 31460, 47980, 25950, 27600, 25220],
"Age 55 to 64": [41080, 39370, 40190, 33590, 53590, 30670, 34020, 32640],
"Age 65+": [1230, 1280, 1340, 980, 1500, 1150, 1490, 1520],
}
)
cc_age
import matplotlib.pyplot as plt
axes = plt.gca()
cc_age.plot(kind="line", marker=".", x="Year", y="Age 17 or less", ax=axes)
cc_age.plot(kind="line", marker=".", x="Year", y="Age 18 to 25", ax=axes)
cc_age.plot(kind="line", marker=".", x="Year", y="Age 26 to 34", ax=axes)
cc_age.plot(kind="line", marker=".", x="Year", y="Age 35 to 44", ax=axes)
cc_age.plot(kind="line", marker=".", x="Year", y="Age 45 to 54", ax=axes)
cc_age.plot(kind="line", marker=".", x="Year", y="Age 55 to 64", ax=axes)
cc_age.plot(kind="line", marker=".", x="Year", y="Age 65+", ax=axes)
plt.title("Enrollees based on Age")
plt.xlabel("Year")
plt.ylabel("Number of Enrollee")
plt.legend(bbox_to_anchor=(0.75, 1.15), ncol=2)
plt.grid(axis="y", linestyle="--", linewidth=0.5)
plt.show()
# Let's look into more details:
# The number of enrollees had a significant increase in the year of 2020 because of the Pandemic. In the year of 2021, the number went back to the same level as the year before the pandemic. Let's take a look at the mean changing value after 2021.
cc_age_17_2000_2023 = ((13020 - 13190) + (13190 - 9430)) / 2
cc_age_18_2000_2023 = ((20370 - 23040) + (23040 - 18240)) / 2
cc_age_26_2000_2023 = ((32390 - 38860) + (38860 - 29070)) / 2
cc_age_35_2000_2023 = ((23210 - 25720) + (25720 - 21190)) / 2
cc_age_45_2000_2023 = ((25220 - 27600) + (27600 - 25950)) / 2
cc_age_55_2000_2023 = ((32640 - 34020) + (34020 - 30670)) / 2
cc_age_65_2000_2023 = ((1520 - 1490) + (1490 - 1150)) / 2
print(
f"The mean changing value betwen 2021 and 2023 for Age 17 or less is: {cc_age_17_2000_2023:.2f}"
)
print(
f"The mean changing value betwen 2021 and 2023 for Age 18 to 25 is: {cc_age_18_2000_2023:.2f}"
)
print(
f"The mean changing value betwen 2021 and 2023 for Age 26 to 34 is: {cc_age_26_2000_2023:.2f}"
)
print(
f"The mean changing value betwen 2021 and 2023 for Age 35 to 44 is: {cc_age_35_2000_2023:.2f}"
)
print(
f"The mean changing value betwen 2021 and 2023 for Age 45 to 54 is: {cc_age_45_2000_2023:.2f}"
)
print(
f"The mean changing value betwen 2021 and 2023 for Age 55 to 64 is: {cc_age_55_2000_2023:.2f}"
)
print(
f"The mean changing value betwen 2021 and 2023 for Age 65+ is: {cc_age_65_2000_2023:.2f}"
)
# What we have found here:
# 1. The number of enrollees for almost all age ranges have increased after 2020, except age 45 to 54.
# 1. After 2019, Age 55 to 64 has the most enrollees through the years, except 2022.
# 1. Age 17 or less and Age 26 to 34 show big potential that have larger increases for the number of enrollees, compared to other age ranges. Especially for age 17 or less, but we will need to check on the new policy of Medi-Cal for this age range.
# 1. Age 26 to 34 had largest increases in the year of 2022, but went back to normal in the year of 2023.
# 1. Age 65+ has the least number of enrollees (because of Medi-care), but it doesn't show a big wave of changings. We will focus on analysing the product of Medicare in the second section.
# Let's take a look at the FPL (Federal Poverty Level) in this dataset.
# The FPL in the range of `400% FPL to greater` was separated into `400% FPL to 600% FPL` and `600% FPL to greater` in the year of 2020. In order to make data consistent, we combine `400% FPL to 600% FPL` and `600% FPL to greater` into `400% FPL to greater` for all years.
# *A measure of income issued every year by the Department of Health and Human Services (HHS). Federal poverty levels are used to determine your eligibility for certain programs and benefits, including savings on Marketplace health insurance, and Medicaid and CHIP coverage.*
#
# *The 2023 federal poverty level (FPL) income numbers below are used to calculate eligibility for Medicaid and the Children's Health Insurance Program (CHIP). 2022 numbers are slightly lower, and are used to calculate savings on Marketplace insurance plans for 2023.*
#
# *How federal poverty levels are used to determine eligibility for reduced-cost health coverage:*
#
# *Income above 400% FPL: If your income is above 400% FPL, you may now qualify for premium tax credits that lower your monthly premium for a 2023 Marketplace health insurance plan.*
#
# *Income between 100% and 400% FPL: If your income is in this range, in all states you qualify for premium tax credits that lower your monthly premium for a Marketplace health insurance plan.*
#
# *Income at or below 150% FPL: If your income falls at or below 150% FPL in your state and you’re not eligible for Medicaid or CHIP, you may qualify to enroll in or change Marketplace coverage through a Special Enrollment Period.*
#
# *Income below 138% FPL: If your income is below 138% FPL and your state has expanded Medicaid coverage, you qualify for Medicaid based only on your income.*
#
# *Income below 100% FPL: If your income falls below 100% FPL, you probably won’t qualify for savings on a Marketplace health insurance plan or for income-based Medicaid.*
#
# *Reference: Healthcare.gov [https://www.healthcare.gov/glossary/federal-poverty-level-fpl/](https://www.healthcare.gov/glossary/federal-poverty-level-fpl/)*
#
cc_fpl = pd.DataFrame(
{
"Year": ["2016", "2017", "2018", "2019", "2020", "2021", "2022", "2023"],
"138% FPL or less": [1930, 2470, 2480, 1410, 2340, 1180, 430, 1540],
"138% FPL to 150% FPL": [
41670,
37470,
36260,
29880,
39920,
28620,
30170,
29580,
],
"150% FPL to 200% FPL": [
62880,
55230,
51680,
42300,
61980,
37220,
35820,
30270,
],
"200% FPL to 250% FPL": [
31580,
30460,
31910,
25540,
41360,
19080,
23410,
19180,
],
"250% FPL to 400% FPL": [
44220,
42870,
53150,
43840,
64330,
33090,
41860,
39080,
],
"400% FPL or greater": [5320, 5420, 5090, 3960, 14380, 7490, 19840, 18400],
"FPL Unavailable": [4490, 30, 3010, 2030, 1840, 390, 2650, 1310],
"Unsubsidized Application": [9420, 10600, 9970, 8140, 11250, 8630, 10460, 9160],
}
)
cc_fpl
# We make those data into a line chart as usual.
axes = plt.gca()
cc_fpl.plot(kind="line", marker=".", x="Year", y="138% FPL or less", ax=axes)
cc_fpl.plot(kind="line", marker=".", x="Year", y="138% FPL to 150% FPL", ax=axes)
cc_fpl.plot(kind="line", marker=".", x="Year", y="150% FPL to 200% FPL", ax=axes)
cc_fpl.plot(kind="line", marker=".", x="Year", y="200% FPL to 250% FPL", ax=axes)
cc_fpl.plot(kind="line", marker=".", x="Year", y="250% FPL to 400% FPL", ax=axes)
cc_fpl.plot(kind="line", marker=".", x="Year", y="400% FPL or greater", ax=axes)
cc_fpl.plot(
kind="line", marker=".", x="Year", y="FPL Unavailable", ax=axes, linestyle="--"
)
# We make those 2 columns less obvious
cc_fpl.plot(
kind="line",
marker=".",
x="Year",
y="Unsubsidized Application",
ax=axes,
linestyle="--",
)
plt.title("Enrollees based on the FPL")
plt.xlabel("Year")
plt.ylabel("Number of Enrollee")
plt.legend(bbox_to_anchor=(0.75, 1.15), ncol=2)
plt.grid(axis="y", linestyle="--", linewidth=0.5)
plt.show()
# What we have found here:
# 1. The enrollees based on the FPL show the huge raises in the year of 2020, but drop back to the lower level than the 2019, except the 400% FPL or greater.
# 1. The group of 150% FPL to 200% FPL have kept decreasing after 2020.
# Let's take a look the race.
cc_race = pd.DataFrame(
{
"Year": ["2016", "2017", "2018", "2019", "2020", "2021", "2022", "2023"],
"American Indian or Alaska Native": [260, 170, 230, 210, 350, 200, 270, 230],
"Asian American": [32160, 30530, 38470, 31460, 53570, 28040, 28880, 29160],
"Black or African American": [2870, 2140, 2410, 2760, 4650, 2980, 4800, 3700],
"Latino": [36560, 28090, 34460, 27810, 47910, 25440, 36710, 29650],
"Multiple Races": [1390, 1320, 1670, 1730, 2970, 2090, 3430, 2970],
"Native Hawaiian or Other Pacific Islander": [
250,
210,
120,
100,
170,
120,
190,
150,
],
"White": [31100, 26210, 34080, 31460, 45500, 27040, 37640, 31440],
"Other": [5430, 5530, 14710, 12060, 16800, 8990, 10970, 10430],
}
)
cc_race
axes = plt.gca()
cc_race.plot(
kind="line", marker=".", x="Year", y="American Indian or Alaska Native", ax=axes
)
cc_race.plot(kind="line", marker=".", x="Year", y="Asian American", ax=axes)
cc_race.plot(kind="line", marker=".", x="Year", y="Black or African American", ax=axes)
cc_race.plot(kind="line", marker=".", x="Year", y="Latino", ax=axes)
cc_race.plot(kind="line", marker=".", x="Year", y="Multiple Races", ax=axes)
cc_race.plot(
kind="line",
marker=".",
x="Year",
y="Native Hawaiian or Other Pacific Islander",
ax=axes,
)
cc_race.plot(kind="line", marker=".", x="Year", y="White", ax=axes)
cc_race.plot(kind="line", marker=".", x="Year", y="Other", ax=axes)
plt.title("Enrollees based on race")
plt.xlabel("Year")
plt.ylabel("Number of Enrollee")
plt.legend(bbox_to_anchor=(0.75, 1.15), ncol=2)
plt.grid(axis="y", linestyle="--", linewidth=0.5)
plt.show()
# What we have found here:
# 1. Asian American, Latino and White have much higher numbers of enrollees all the time, compared to other races.
# 1. In 2022, Latino and White had big increases, compared to Asian American and others.
# Let's take a look at the data of the languages.
# Since Isabella Insurance Office speaks English, Cantonese and Mandarin, we will show the data for these three languages.
# In addition, some languages like Spanish are also popular languages in enrollment profiles (more than 1000 enrollees by a certificated agent), let's take a look and see if any of them are worth investing in.
cc_language = pd.DataFrame(
{
"Year": ["2016", "2017", "2018", "2019", "2020", "2021", "2022", "2023"],
"Cantonese": [1750, 1740, 1980, 1560, 3510, 1740, 1260, 1280],
"English": [138730, 128880, 153450, 128700, 184440, 111860, 140560, 122650],
"Korean": [4640, 3130, 3600, 1920, 5170, 1580, 1530, 1730],
"Mandarin": [6610, 6790, 9940, 7370, 15290, 6920, 6930, 7880],
"Spanish": [23130, 18230, 17710, 12870, 24210, 11420, 12440, 12670],
"Vietnamese": [1840, 1990, 2300, 1900, 3080, 1090, 960, 1020],
}
)
cc_language
axes = plt.gca()
cc_language.plot(kind="line", marker=".", x="Year", y="Cantonese", ax=axes)
cc_language.plot(kind="line", marker=".", x="Year", y="English", ax=axes)
cc_language.plot(kind="line", marker=".", x="Year", y="Korean", ax=axes, linestyle="--")
cc_language.plot(kind="line", marker=".", x="Year", y="Mandarin", ax=axes)
cc_language.plot(kind="line", marker=".", x="Year", y="Spanish", ax=axes)
cc_language.plot(
kind="line", marker=".", x="Year", y="Vietnamese", ax=axes, linestyle="--"
)
plt.title("Enrollees based on language")
plt.xlabel("Year")
plt.ylabel("Number of Enrollee")
plt.legend(bbox_to_anchor=(1.3, 1.15), ncol=2)
plt.grid(axis="y", linestyle="--", linewidth=0.5)
plt.show()
# What we have found here:
# 1. English is always the most popular language for all enrollment profiles
# 1. Mandarin and Spanish have larger numbers of enrollees compared to other non-English enrollees.
# 1. The current languages that Isabella Insurance Office speaks are keeping the same trends through the years.
# 1. Spanish is the most popular non-English language, and the number of enrollees is increasing.
#
# The answers of the Questions:
# **Q1: What are the trends of growth for enrollees enrolled using an agent?**
# Overall, the numbers of enrollees who used insurance agents are decreasing.
#
#
# **Q2: What is the most valuable groups of people who will enroll using an agent?**
# The following groups of people are the most valuable groups:
# * Age between 55-64 years old, age between 26-34 years old.
# * FPL between 250% - 400%
# * Race: White, Asian American, Latino
# * Language: English, Mandarin and Spanish
# **Q3: Any suggestion we can provide for the business.**
# * The group of people who are between 55-65 always have the most enrollees who use insurance agents, but young people who are between 26-34 have big potential value of investment.
# * Even though the trend of growth for Covered California has gone down in recent years, this trend is related to the business situation. For example, starting from the end of 2022, many people from S&P 500 companies got laid off, we predict this group of people will need coverage by Covered California. It is a chance for our business growth.
# * Latino and Spanish speakers have big numbers of enrollees, it is worth investing in an employee who speaks Spanish in Isabella Insurance Office.
#
# **2: Medicare**
# **Question:**
#
#
#
# **Any suggestions we can provide in the business of Medicare?**
# [Jump to the answers](#2.1)
# In order to analyse the data of Medicare focused on Insurance Broker (In medicare system, the "Certificated Insurance Agent" calls "Insurance Broker"), we researched and found the datasets for `Agent Broker Compensation` on CMS.gov.
# The dataset has been uploaded into the notebook, it can also be downloaded over: [https://www.cms.gov/Medicare/Health-Plans/ManagedCareMarketing/AgentBroker](https://www.cms.gov/Medicare/Health-Plans/ManagedCareMarketing/AgentBroker)
# *An introduction of the dataset from CMS.gov*
#
# *Below is a link to a file containing the amounts that companies pay independent agents/brokers to sell their Medicare drug and health plans. Companies that contract with Medicare to provide health care coverage or prescription drugs typically use agents/brokers to sell their Medicare plans to Medicare beneficiaries. Sometimes these agents/brokers are employees of the contracted company. In other situations, the companies hire independent agents/brokers who are not employees to sell the companies' Medicare plans.*
#
# *Generally, agents/brokers receive an initial payment in the first year of the policy (or when there is an “unlike plan type” enrollment change) and half as much for years two (2) and beyond if the member remains enrolled in the plan or make a “like plan type” enrollment change.*
# *Agents/brokers must be licensed in the State in which they do business, annually complete training and pass a test on their knowledge of Medicare and health and prescription drug plans, and follow all Medicare marketing rules. Agents/brokers are subject to rigorous oversight by their contracted health or drug plans and face the risk of loss of licensure with their State and termination with their contracted health or drug plans if they don't comply with strict rules related to selling to and enrolling Medicare beneficiaries in Medicare plans.*
# *The information contained in this file has columns for each Medicare plan with the following information: State, county, company name, plan name, whether the company uses independent agents or not, the amount(s) paid to independent agents for selling the plan in the first year of enrollment following the sale, other plan identification numbers, and whether the plan information displayed requires correction. The information within the various columns can be sorted to more easily find compensation information about the plan or plans you are interested in.*
# These four(4) datasets are good resources for figuring out the trends in recent years. In order to keep consistency, we converted the 2023 and 2022 datasets from xlsx to csv format.
mc_2023_original = pd.read_csv(
"../input/medicare-agent-broker-compensation/Preprocessed_ABC_Extract_CY2023.csv",
encoding="unicode_escape",
)
mc_2023_original.columns
# Above are the columns that `Preprocessed_ABC_Extract_CY2023.csv` has used. Let's only use the columns that are useful for our analysis. When we analyse the dataset for the other years, we will do the same thing, we will skip this definition later.
mc_2023 = mc_2023_original[["State Code", "Plan Name", "Used Agent Brokers?"]]
mc_2023
# Let's take a look at the descriptive data for our dataset.
mc_2023.describe()
mc_2023_Y = mc_2023[(mc_2023["Used Agent Brokers?"] == "Y")]
mc_2023_N = mc_2023[(mc_2023["Used Agent Brokers?"] == "N")]
print(mc_2023_Y)
print(mc_2023_N)
print(
"In the dataset of 2023, the number of Medicare enrollees who used Agent Brokers are:",
len(mc_2023_Y),
)
print(
"In the dataset of 2023, the number of Medicare enrollees who don't used Agent Brokers are:",
len(mc_2023_N),
)
mc_2023_all_percentage = len(mc_2023_Y) / (len(mc_2023_Y) + len(mc_2023_N))
print(
f"The percentage of enrollees who used Agent Broker is: {mc_2023_all_percentage:.2f}"
)
# Now let's take a look at the difference based on the states.
mc_2023_Y_state = mc_2023_Y.drop(columns=["Plan Name"], axis=1)
mc_2023_N_state = mc_2023_N.drop(columns=["Plan Name"], axis=1)
mc_2023_Y_state.describe()
mc_2023_Y_state_value = mc_2023_Y_state.value_counts()
mc_2023_Y_state_value
mc_2023_Y_state = pd.DataFrame(
{
"State Code": [
"AK",
"AL",
"AR",
"AS",
"AZ",
"CA",
"CO",
"CT",
"DC",
"DE",
"FL",
"GA",
"GU",
"HI",
"IA",
"ID",
"IL",
"IN",
"KS",
"KY",
"LA",
"MA",
"MD",
"ME",
"MI",
"MN",
"MO",
"MP",
"MS",
"MT",
"NC",
"ND",
"NE",
"NH",
"NJ",
"NM",
"NV",
"NY",
"OH",
"OK",
"OR",
"PA",
"PR",
"RI",
"SC",
"SD",
"TN",
"TX",
"UT",
"VA",
"VI",
"VT",
"WA",
"WI",
"WV",
"WY",
],
"Used Agent Brokers": [
80,
2578,
3386,
1,
768,
2087,
1324,
481,
54,
152,
4046,
8921,
1,
139,
2869,
951,
3906,
4305,
2020,
4718,
2809,
738,
564,
614,
4112,
3487,
3996,
1,
2553,
581,
4212,
1029,
1874,
419,
1109,
1189,
465,
3932,
6423,
2365,
991,
5337,
5032,
241,
2454,
1512,
4547,
7090,
605,
5528,
1,
336,
1203,
2906,
1888,
225,
],
}
)
mc_2023_N_state.describe()
mc_2023_N_state_value = mc_2023_N_state.value_counts()
mc_2023_N_state_value
mc_2023_N_state = pd.DataFrame(
{
"State Code": [
"AK",
"AL",
"AR",
"AZ",
"CA",
"CO",
"CT",
"DC",
"DE",
"FL",
"GA",
"HI",
"IA",
"ID",
"IL",
"IN",
"KS",
"KY",
"LA",
"MA",
"MD",
"ME",
"MI",
"MN",
"MO",
"MS",
"MT",
"NC",
"ND",
"NE",
"NH",
"NJ",
"NM",
"NV",
"NY",
"OH",
"OK",
"OR",
"PA",
"RI",
"SC",
"SD",
"TN",
"TX",
"UT",
"VA",
"VT",
"WA",
"WI",
"WV",
"WY",
],
"Not Used Agent Brokers": [
1,
2,
1,
1,
2,
1,
2,
2,
2,
1,
2,
1,
1,
1,
311,
14,
1,
2,
1,
35,
10,
2,
35,
73,
1,
2,
1,
2,
31,
87,
2,
1,
2,
1,
24,
57,
2,
6,
2,
2,
45,
37,
2,
10,
1,
2,
2,
2,
120,
2,
1,
],
}
)
mc_2023_state = pd.merge(mc_2023_Y_state, mc_2023_N_state, on="State Code", how="left")
mc_2023_state = mc_2023_state.fillna(0)
mc_2023_state.plot(kind="bar", x="State Code", figsize=(12, 5), stacked=True)
plt.title("Medicare Enrollees Used or Not Used Agent Brokers")
plt.xlabel("State Code")
plt.ylabel("Numbers of Enrollees")
mc_2023_state.describe()
# Through this analysis, we found out enrollees are using agent brokers nationwide. As Isabella is more focused on California, especially the Bay Area, but the California enrollees who use agent broker are less than the mean value (2087 of 2235), we would see a big value of investing in enrollees who are living outside of California.
# Let's now take a look at the differences based on the plan names.
mc_2023_Y_plan_name = mc_2023_Y.drop(columns=["State Code"], axis=1)
mc_2023_N_plan_name = mc_2023_N.drop(columns=["State Code"], axis=1)
len(mc_2023_Y_plan_name["Plan Name"].unique().tolist())
# There are 2844 of `Plan Names` in this dataset, it's hard for us to list the names and make graphics of them. But we can still take a look at some details.
mc_2023_Y_plan_name.value_counts()
# In the group of enrollees who are using Agent Brokers in this dataset, the most popular plan for Medicare is `Humana Honor (PPO)`, followed by `Lasso Healthcare Growth Plus (MSA)`, `Lasso Healthcare Growth (MSA)`, `UnitedHealthcare Dual Complete Choice (PPO D-SNP)` and `Wellcare No Premium Open (PPO)`, which are the Top 5.
# What is the "most popular" plan that enrollees don't use agent brokers?
mc_2023_N_plan_name.value_counts()
# It's `Aetna Better Health Premier Plan MMAI (Medicare-Medicaid Plan)` in this dataset.
# Let's simply take a look the dataset of 2022, and see if we can find any differences.
mc_2022_original = pd.read_csv(
"../input/medicare-agent-broker-compensation/Preprocessed_ABC_Extract_CY2022.csv",
encoding="unicode_escape",
)
mc_2022 = mc_2022_original[["State Code", "Plan Name", "Used Agent Brokers?"]]
mc_2022_Y = mc_2022[(mc_2022["Used Agent Brokers?"] == "Y")]
mc_2022_N = mc_2022[(mc_2022["Used Agent Brokers?"] == "N")]
print(
f"In the dataset of 2022, the number of Medicare enrollees who used Agent Brokers were: {len(mc_2022_Y)}"
)
print(
f"In the dataset of 2022, the number of Medicare enrollees who didn't used Agent Brokers were: {len(mc_2022_N)}"
)
mc_2022_all_percentage = len(mc_2022_Y) / (len(mc_2022_Y) + len(mc_2022_N))
print(
f"The percentage of enrollees who used Agent Broker was: {mc_2022_all_percentage:.2f}"
)
mc_2022_Y.describe()
# Based on the results we have found above, there is no major difference between 2023 and 2022. Let's take a look the year of 2021.
mc_2021_original = pd.read_csv(
"../input/medicare-agent-broker-compensation/CY2021 CMS Agent Broker Compensation Data.csv",
encoding="unicode_escape",
header=4,
)
mc_2021 = mc_2021_original[["State Code", "Plan Name", "Used Agent Brokers?"]]
mc_2021_Y = mc_2021[(mc_2021["Used Agent Brokers?"] == "Y")]
mc_2021_N = mc_2021[(mc_2021["Used Agent Brokers?"] == "N")]
print(
f"In the dataset of 2021, the number of Medicare enrollees who used Agent Brokers were: {len(mc_2021_Y)}"
)
print(
f"In the dataset of 2021, the number of Medicare enrollees who didn't used Agent Brokers were: {len(mc_2021_N)}"
)
mc_2021_all_percentage = len(mc_2021_Y) / (len(mc_2021_Y) + len(mc_2021_N))
print(
f"The percentage of enrollees who used Agent Broker was: {mc_2021_all_percentage:.2f}"
)
mc_2021_Y.describe()
# We can in the dataset of 2021, `The most popular Plan Name` was `Lasso Healthcare Growth Plus (MSA)` instead of `Humana Honor (PPO)` , even though `GA` still had the most enrollees who used insurance agents, and the percentage enrollees who used agent brokers was still 99%.
# Let's lastly take a look 2020, that we knew it was the year of Pandemic.
mc_2020_original = pd.read_csv(
"../input/medicare-agent-broker-compensation/CY2020 CMS Agent Broker Compensation Data.csv",
encoding="unicode_escape",
header=4,
)
mc_2020 = mc_2020_original[["State Code", "Plan Name", "Used Agent Brokers?"]]
mc_2020_Y = mc_2020[(mc_2020["Used Agent Brokers?"] == "Y")]
mc_2020_N = mc_2020[(mc_2020["Used Agent Brokers?"] == "N")]
print(
f"In the dataset of 2020, the number of Medicare enrollees who used Agent Brokers were: {len(mc_2020_Y)}"
)
print(
f"In the dataset of 2020, the number of Medicare enrollees who didn't used Agent Brokers were: {len(mc_2020_N)}"
)
mc_2020_all_percentage = len(mc_2020_Y) / (len(mc_2020_Y) + len(mc_2020_N))
print(
f"The percentage of enrollees who used Agent Broker was: {mc_2020_all_percentage:.2f}"
)
mc_2020_Y.describe()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(
"/kaggle/input/us-tornado-dataset-1950-2021/us_tornado_dataset_1950_2021.csv"
)
df.st.unique()
df[df["yr"] == 1950]
import seaborn as sns
df[df["st"] == "TX"]["slon"].mean()
df[df["st"] == "TX"]["slat"].mean()
import folium
m = folium.Map(location=[32.06, -98.5], tiles="OpenStreetMap", zoom_start=5.3)
for index, location_info in df[
(df["st"] == "TX") & (df["yr"] >= 2020) & (df["mag"] >= 2)
].iterrows():
folium.Marker(
[location_info["slat"], location_info["slon"]],
popup=location_info["date"],
icon=folium.Icon(color="red"),
).add_to(m)
folium.Marker(
[location_info["elat"], location_info["elon"]],
popup=location_info["date"],
icon=folium.Icon(color="green"),
).add_to(m)
m
df[df["st"] == "IL"]["slat"].mean()
df[df["st"] == "IL"]["slon"].mean()
m = folium.Map(location=[40.06, -89.5], tiles="OpenStreetMap", zoom_start=6.3)
for index, location_info in df[
(df["st"] == "IL") & (df["yr"] >= 2020) & (df["mag"] >= 2)
].iterrows():
folium.Marker(
[location_info["slat"], location_info["slon"]],
popup=location_info["date"],
icon=folium.Icon(color="red"),
).add_to(m)
folium.Marker(
[location_info["elat"], location_info["elon"]],
popup=location_info["date"],
icon=folium.Icon(color="green"),
).add_to(m)
m
df[df["mag"] > 3]["slat"].mean()
df[df["mag"] > 3]["slon"].mean()
m = folium.Map(location=[37.9, -91.7], tiles="OpenStreetMap", zoom_start=4.8)
for index, location_info in df[(df["yr"] >= 2020) & (df["mag"] > 3)].iterrows():
folium.Marker(
[location_info["slat"], location_info["slon"]],
popup=location_info["date"],
icon=folium.Icon(color="red"),
).add_to(m)
folium.Marker(
[location_info["elat"], location_info["elon"]],
popup=location_info["date"],
icon=folium.Icon(color="green"),
).add_to(m)
m
df[df["fat"] >= 40]["slat"].mean()
df[df["fat"] >= 40]["slon"].mean()
m = folium.Map(location=[35.7, -90.6], tiles="OpenStreetMap", zoom_start=4)
for index, location_info in df[df["fat"] >= 40].iterrows():
folium.Marker(
[location_info["slat"], location_info["slon"]],
popup=location_info["date"],
icon=folium.Icon(color="red"),
).add_to(m)
folium.Marker(
[location_info["elat"], location_info["elon"]],
popup=location_info["date"],
icon=folium.Icon(color="green"),
).add_to(m)
m
import matplotlib.pyplot as plt
count_vec = []
month_vec = []
for month in range(1, 13):
foo = len(df[df.mo == month])
count_vec.append(foo)
month_vec.append(month)
df_month = pd.DataFrame({"Month": month_vec, "Count": count_vec})
fig, ax = plt.subplots(figsize=(7, 5))
sns.barplot(df_month, x="Month", y="Count")
ax.set(title="Tornadoes per Month (1950-2021)")
labels = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
ax.set_xticklabels(labels)
plt.show()
# # **The most tornado prone month is May. This is because the ingredients for powerful storms and large twisters come together frequently.**
fat_vec = []
yr_vec = []
for yr in range(1950, 2022):
totfat = df[df.yr == yr].fat.sum()
fat_vec.append(totfat)
yr_vec.append(yr)
df_fat = pd.DataFrame({"yr": yr_vec, "fat": fat_vec})
# fig,ax = plt.subplots(figsize=(15,5))
fig, ax = plt.subplots(figsize=(10, 6))
sns.barplot(data=df_fat, x="yr", y="fat")
ax.tick_params(axis="x", rotation=90)
ax.set(xlabel="Year", ylabel="Count", title="Tornado Fatalities per Year (1950-2021)")
# ax.ticklabel_format(useOffset=False, style='plain')
# Only show every other tick label
n = 2
for index, label in enumerate(ax.xaxis.get_ticklabels()):
if index % n != 0:
label.set_visible(False)
plt.show()
fig, ax = plt.subplots(figsize=(10, 4))
sns.lineplot(data=df_fat, x="yr", y="fat", marker="o")
sns.regplot(data=df_fat, x="yr", y="fat", marker="o", ci=95)
ax.tick_params(axis="x", rotation=90)
ax.set(
xlabel="Year",
ylabel="Count",
title="Trend in Tornado Fatalities per Year (1950-2021)",
)
# ax.ticklabel_format(useOffset=False, style='plain')
# Only show every other tick label
n = 2
for index, label in enumerate(ax.xaxis.get_ticklabels()):
if index % n != 0:
label.set_visible(False)
plt.show()
inj_vec = []
yr_vec = []
for yr in range(1950, 2022):
totinj = df[df.yr == yr].inj.sum()
inj_vec.append(totinj)
yr_vec.append(yr)
df_inj = pd.DataFrame({"yr": yr_vec, "inj": inj_vec})
# fig,ax = plt.subplots(figsize=(15,5))
fig, ax = plt.subplots(figsize=(10, 6))
sns.barplot(data=df_inj, x="yr", y="inj")
ax.tick_params(axis="x", rotation=90)
ax.set(xlabel="Year", ylabel="Count", title="Tornado Injuries per Year (1950-2021)")
# ax.ticklabel_format(useOffset=False, style='plain')
# Only show every other tick label
n = 2
for index, label in enumerate(ax.xaxis.get_ticklabels()):
if index % n != 0:
label.set_visible(False)
plt.show()
fig, ax = plt.subplots(figsize=(10, 4))
sns.lineplot(data=df_inj, x="yr", y="inj", marker="o")
sns.regplot(data=df_inj, x="yr", y="inj", marker="o", ci=95)
ax.tick_params(axis="x", rotation=90)
ax.set(
xlabel="Year",
ylabel="Count",
title="Trend in Tornado Injuries per Year (1950-2021)",
)
# ax.ticklabel_format(useOffset=False, style='plain')
# Only show every other tick label
n = 2
for index, label in enumerate(ax.xaxis.get_ticklabels()):
if index % n != 0:
label.set_visible(False)
plt.show()
# # **The total number of injuries and fatalities have reduced overall due to tornadoes. This can be attributed to the fact that an improved understanding of tornadoes, along with technology to issue warnings, led to a decline in the number of fatalities**
fig, ax = plt.subplots(figsize=(5, 8))
sns.countplot(y=df.st, order=df.st.value_counts().index, width=0.8)
ax.set(ylabel="State", xlabel="Number of tornadoes")
ax.set_title("Number of tornadoes per state from 1950 until 2021")
plt.show()
fat_vec = []
yr_vec = []
for yr in range(1950, 2022):
totfat = df[(df.yr == yr)].shape[0]
fat_vec.append(totfat)
yr_vec.append(yr)
df_yr = pd.DataFrame({"yr": yr_vec, "fat": fat_vec})
# fig,ax = plt.subplots(figsize=(15,5))
fig, ax = plt.subplots(figsize=(10, 6))
sns.barplot(data=df_fat, x="yr", y="fat")
ax.tick_params(axis="x", rotation=90)
ax.set(xlabel="Year", ylabel="Count", title="Number of Tornadoes per Year (1950-2021)")
# ax.ticklabel_format(useOffset=False, style='plain')
# Only show every other tick label
n = 2
for index, label in enumerate(ax.xaxis.get_ticklabels()):
if index % n != 0:
label.set_visible(False)
plt.show()
fig, ax = plt.subplots(figsize=(10, 4))
sns.lineplot(data=df_yr, x="yr", y="fat", marker="o")
sns.regplot(data=df_yr, x="yr", y="fat", marker="o", ci=95)
ax.tick_params(axis="x", rotation=90)
ax.set(
xlabel="Year",
ylabel="Count",
title="Trend in Number of Tornadoes per Year (1950-2021)",
)
# ax.ticklabel_format(useOffset=False, style='plain')
# Only show every other tick label
n = 2
for index, label in enumerate(ax.xaxis.get_ticklabels()):
if index % n != 0:
label.set_visible(False)
plt.show()
# # **The number of tornadoes occurring per year seems to increase which can be attributed to Global Warming as it greenhouse emissions have steadily increased since the 1950s**
fat_vec = []
yr_vec = []
for yr in range(1950, 2022):
totfat = df[(df.yr == yr) & (df.st == "TX")].shape[0]
fat_vec.append(totfat)
yr_vec.append(yr)
df_fat = pd.DataFrame({"yr": yr_vec, "fat": fat_vec})
# fig,ax = plt.subplots(figsize=(15,5))
fig, ax = plt.subplots(figsize=(10, 6))
sns.barplot(data=df_fat, x="yr", y="fat")
ax.tick_params(axis="x", rotation=90)
ax.set(xlabel="Year", ylabel="Count", title="Tornadoes in Texas per Year (1950-2021)")
# ax.ticklabel_format(useOffset=False, style='plain')
# Only show every other tick label
n = 2
for index, label in enumerate(ax.xaxis.get_ticklabels()):
if index % n != 0:
label.set_visible(False)
plt.show()
fig, ax = plt.subplots(figsize=(10, 4))
sns.lineplot(data=df_fat, x="yr", y="fat", marker="o")
sns.regplot(data=df_fat, x="yr", y="fat", marker="o", ci=95)
ax.tick_params(axis="x", rotation=90)
ax.set(
xlabel="Year",
ylabel="Count",
title="Trend in Tornado Fatalities per Year (1950-2021)",
)
# ax.ticklabel_format(useOffset=False, style='plain')
# Only show every other tick label
n = 2
for index, label in enumerate(ax.xaxis.get_ticklabels()):
if index % n != 0:
label.set_visible(False)
plt.show()
# # **Texas had an Average of 1 tornado forming every 3 days since the last 70 Years !!**
# Count the number of tornadoes and fatalities per F/EF rating. Ignore -9 since no fatalities from that.
f_list = [0, 1, 2, 3, 4, 5]
fat_count = []
tornado_count = []
for f in f_list:
count = df[df.mag == f].fat.sum()
fat_count.append(count)
count = len(df[df.mag == f])
tornado_count.append(count)
# Convert f_list to string type since categories
f_list = list(map(str, f_list))
# Put these counts in a new dataframe
df_tor_fat = pd.DataFrame(
{"mag": f_list, "num_tornadoes": tornado_count, "num_fatalities": fat_count},
index=None,
)
# Make a long format version to make easier to plot bars side by side
df_tor_fat_long = pd.melt(
df_tor_fat,
id_vars="mag",
value_vars=[
"num_tornadoes",
"num_fatalities",
],
value_name="count",
)
# plt.style.use("default")
# Plot number of tornadoes and fatalities per F rating
fig, ax = plt.subplots()
ax = sns.barplot(
data=df_tor_fat_long, x="mag", y="count", hue="variable", palette="hls"
)
# Put numbers on top of bars
for i in ax.containers:
ax.bar_label(i, fontsize=9)
plt.xlabel("F or EF Rating")
plt.title("Number of Tornadoes and Fatalities per F/EF rating (1950-2021)")
plt.show()
# # **The number of tornadoes decrease with magnitude. The number of fatalities increase with magnitude.**
df_tor_fat["avg_fatalities"] = df_tor_fat.num_fatalities / df_tor_fat.num_tornadoes
df_tor_fat
plt.style.use("ggplot")
sns.lineplot(
data=df_tor_fat,
x="mag",
y="avg_fatalities",
marker="o",
markersize=10,
color="green",
)
plt.xlabel("F/EF Rating")
plt.ylabel("Mean # Fatalities")
plt.title("Mean Fatalities per Storm in each Storm Category")
plt.show()
# Count number of tornadoes per year per category, and all together
year, count_all, count_f0, count_f1, count_f2, count_f3, count_f4, count_f5 = (
[],
[],
[],
[],
[],
[],
[],
[],
)
for yr in range(1950, 2021 + 1):
year.append(yr)
count_all.append(
len(df[(df.yr == yr) & (df.mag != -9)])
) # Don't include the unknowns
count_f0.append(len(df[(df.yr == yr) & (df.mag == 0)]))
count_f1.append(len(df[(df.yr == yr) & (df.mag == 1)]))
count_f2.append(len(df[(df.yr == yr) & (df.mag == 2)]))
count_f3.append(len(df[(df.yr == yr) & (df.mag == 3)]))
count_f4.append(len(df[(df.yr == yr) & (df.mag == 4)]))
count_f5.append(len(df[(df.yr == yr) & (df.mag == 5)]))
df_counts = pd.DataFrame(
{
"yr": year,
"all": count_all,
"f0": count_f0,
"f1": count_f1,
"f2": count_f2,
"f3": count_f3,
"f4": count_f4,
"f5": count_f5,
}
)
df_counts
plt.style.use("ggplot")
fig, ax = plt.subplots(4, 2, figsize=(10, 15))
cols = df_counts.drop(columns="yr").columns
ii = 0
for i in range(0, 4):
for j in range(0, 2):
category = cols[ii]
x = df_counts.yr
y = df_counts[category]
ax[i, j].plot(x, y, color="#FF9500")
ax[i, j].scatter(x, y, color="#000000", s=6.0) # s is for markersize
ax[i, j].set(xlabel="Years", ylabel="Count")
ax[i, j].set_title(category.upper(), loc="left")
# Draw regression line
slope, intercept = np.polyfit(x, y, 1)
ax[i, j].plot(x, slope * x + intercept, color="purple")
ii += 1
if ii >= 7:
break
ax[3, 1].axis("off") # Blank out the last, unsed, plot position
fig.suptitle("Trends in Number of Tornadoes per Year by Rating", fontsize="xx-large")
fig.tight_layout(pad=1.5)
plt.show()
|
# ## Official Website -> [quick-ml](http://antoreepjana.wixsite.com/quick-ml)
# ## Summary ->
# In this notebook, we'll learn how to create Labeled TFRecords Dataset in multiple parts. This is a standard industry practice where the Dataset is regenerated in TFRecords in multiple parts rather than a single file.
# Multi-parts TFRecords Dataset is easier to manage as size per file gets reduced.
# **Note:-** For making of TFRecords Dataset, you don't need TPUs. This processing happens in the CPU.
# ### Installation
# Install the latest version of quick_ml
# ### Imports
# Make the necessary imports.
# Please maintain the order of the imports. Tensorflow followed by quick_ml.
# After a suceessful import, you'll receive an output stating successful import of Tensorflow & the version of Tensorflow imported.
import tensorflow as tf
import quick_ml
# Get the necessary utility function to perform the labeled dataset creation in splits.
# create_split_tfrecords_data() supports this utility. Learn more [here](https://antoreepjana.wixsite.com/quick-ml/making-custom-datasets-tfrecords) at Labeled Data Part B.
from quick_ml.tfrecords_maker import create_split_tfrecords_data
# ### Gather the Dataset
# We'll be using Cats & Dogs dataset and transforming it to TFRecords dataset in multi-parts.
# ### Create the TFRecords Dataset
# With the right set of parameters needed by **create_split_tfrecords_data**, you can begin generating your TFRecords Dataset.
# ##### To know more about the parameters, please check [here](https://antoreepjana.wixsite.com/quick-ml/making-custom-datasets-tfrecords) at Labeled Data Part B.
data_dir = "/kaggle/working/PetImages"
outfile1name = "train.tfrecords"
output1folder = "train"
outfile2name = "val.tfrecords"
output2folder = "val"
split_size_ratio = 0.8
create_split_tfrecords_data(
data_dir,
outfile1name,
output1folder,
outfile2name,
output2folder,
split_size_ratio,
num_parts1=10,
num_parts2=5,
IMAGE_SIZE=(192, 192),
)
# ##### Great! We have generated our TFRecords Dataset.
# After generating the TFRecords Dataset, we might want to visualize and check the TFRecords Dataset. quick_ml provides an easy solution to quickly check & visualize the TFRecords Dataset created using quick_ml
# **check_batch_and_labels()** is used to visualize the TFRecords Dataset created using quick_ml. Learn more [here](https://antoreepjana.wixsite.com/quick-ml/visualize-check-data)
from quick_ml.visualize_and_check_data import check_batch_and_labels
# As usual, to read & access TFRecords Dataset (labeled or unlabeled) in quick_ml, the Data Format needs to be defined first.
# The Dataset definition/format is provided either in the Dataset's description or in the output cell (if the dataset is just created).
# In our case, the latter is applicable. If you scroll up, the output of create_split_tfrecords_data() mentions the dictionary_labeled_format.
# #### Run the cell below to define the Labeled TFRecords Data Format.
dictionary_labeled = "{'image' : tf.io.FixedLenFeature([], tf.string),'label' : tf.io.FixedLenFeature([], tf.int64)}"
IMAGE_SIZE = "192,192"
from quick_ml.begin_tpu import get_labeled_tfrecord_format
get_labeled_tfrecord_format(dictionary_labeled, IMAGE_SIZE)
# ### Visualize the created tfrecords dataset
# Visualize the **train_part_3.tfrecords**, **9** units, in a grid of **3**x**3**, figure size of **(15,15)**
check_batch_and_labels(
"/kaggle/working/train/train_part_3.tfrecords", 9, 3, 3, grid_size=(15, 15)
)
# Visualize the **val_part_3.tfrecords**, **9** units, in a grid of **3**x**3**, figure size of **(15,15)**
check_batch_and_labels(
"/kaggle/working/val/val_part_3.tfrecords", 9, 3, 3, grid_size=(15, 15)
)
|
# ## Kernel description
# This is just a trial submission that uses a basic LinearRegression model and the simplest features.
# ## Import libraries
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# ## Loading data
INPUT_DIR = "/kaggle/input/icecube-neutrinos-in-deep-ice"
sensor_geometry = pd.read_csv(f"{INPUT_DIR}/sensor_geometry.csv")
train_meta = pd.read_parquet(f"{INPUT_DIR}/train_meta.parquet")
test_meta = pd.read_parquet(f"{INPUT_DIR}/test_meta.parquet")
train_data = pd.concat(
[
pd.read_parquet(f"{INPUT_DIR}/train/batch_{i}.parquet")
for i in tqdm(train_meta["batch_id"].unique()[:1])
]
)
test_data = pd.concat(
[
pd.read_parquet(f"{INPUT_DIR}/test/batch_{i}.parquet")
for i in tqdm(test_meta["batch_id"].unique()[:1])
]
)
# ## Simple feature engineering
train_data = train_data.merge(sensor_geometry, left_on="sensor_id", right_index=True)
train_features = train_data.groupby("event_id").agg(
{"charge": ["mean", "std", "sum"], "time": ["min", "max"]}
)
train_features.columns = ["_".join(col) for col in train_features.columns]
train_features = train_features.merge(train_meta, left_index=True, right_on="event_id")
test_data = test_data.merge(sensor_geometry, left_on="sensor_id", right_index=True)
test_features = test_data.groupby("event_id").agg(
{"charge": ["mean", "std", "sum"], "time": ["min", "max"]}
)
test_features.columns = ["_".join(col) for col in test_features.columns]
test_features = test_features.merge(test_meta, left_index=True, right_on="event_id")
# ## Train test split
train_X, val_X, train_y, val_y = train_test_split(
train_features.drop(["azimuth", "zenith"], axis=1),
train_features[["azimuth", "zenith"]],
test_size=0.15,
random_state=42,
)
# ## LinearRegression
clf = LinearRegression()
clf.fit(train_X.values, train_y.values)
val_pred = clf.predict(val_X.values)
# ## Predictions saving
test_pred = clf.predict(test_features.values)
pd.DataFrame(
{
"event_id": test_features.event_id.values,
"azimuth": test_pred[:, 0],
"zenith": test_pred[:, 1],
}
).to_csv("submission.csv", index=False)
pd.read_csv("submission.csv")
|
# # Exploring Ideas for ML
# ## Playground Series - S3, E12...
# ## Installing Libraries...
# Install pycaret
# ---
# ## Loading Requiered Libraries...
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Import all the nesesary lebraries
from pycaret.classification import *
from imblearn.over_sampling import (
ADASYN,
BorderlineSMOTE,
KMeansSMOTE,
RandomOverSampler,
SMOTE,
SMOTENC,
SVMSMOTE,
)
from pathlib import Path # Import OS path libraries.
# ---
# ## Configuring the Notebook Parameters...
# I like to disable my Notebook Warnings.
import warnings
warnings.filterwarnings("ignore")
# Configure notebook display settings to only use 2 decimal places, tables look nicer.
pd.options.display.float_format = "{:,.2f}".format
pd.set_option("display.max_columns", 15)
pd.set_option("display.max_rows", 50)
# Define some of the notebook parameters for future experiment replication.
SEED = 42
# ---
# ## Loading the Datasets into Pandas
def read_csv_to_dataframe(file_path, delimiter=",", encoding="utf-8", header="infer"):
"""
Read data from a CSV file and load it into a pandas DataFrame.
Parameters:
file_path (str): The file path to the CSV file.
delimiter (str): The delimiter used in the CSV file (default: ',').
encoding (str): The character encoding used in the CSV file (default: 'utf-8').
header (int or str): The row number to use as the header, or 'infer' to let pandas determine the header (default: 'infer').
Returns:
pandas.DataFrame: A DataFrame containing the data from the CSV file.
"""
return pd.read_csv(file_path, delimiter=delimiter, encoding=encoding, header=header)
# Example usage:
# Assuming 'file_path' is the path to your CSV file
# data = read_csv_to_dataframe(file_path)
TRN_PATH = "/kaggle/input/playground-series-s3e12/train.csv"
TST_PATH = "/kaggle/input/playground-series-s3e12/test.csv"
SUB_PATH = "/kaggle/input/playground-series-s3e12/sample_submission.csv"
ORG_PATH = "/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
trn_data = read_csv_to_dataframe(TRN_PATH)
tst_data = read_csv_to_dataframe(TST_PATH)
org_data = read_csv_to_dataframe(ORG_PATH)
org_data = org_data[org_data["target"] == 1]
sub_data = read_csv_to_dataframe(SUB_PATH)
# ---
# ## Merging Datasets...
import pandas as pd
def append_dataframes(df1, df2, reset_index=True):
"""
Append two pandas DataFrames along the rows.
Parameters:
df1 (pandas.DataFrame): The first DataFrame.
df2 (pandas.DataFrame): The second DataFrame.
reset_index (bool): Whether to reset the index of the resulting DataFrame (default: True).
Returns:
pandas.DataFrame: An appended DataFrame.
"""
appended_df = pd.concat([df1, df2], axis=0, ignore_index=reset_index)
return appended_df
trn_data = append_dataframes(trn_data, org_data)
# ---
def analyze_dataframe(df):
"""
Analyze a pandas DataFrame and provide a summary of its characteristics.
Parameters:
df (pandas.DataFrame): The input DataFrame to analyze.
Returns:
None
"""
print("DataFrame Information:")
print("----------------------")
display(df.info(verbose=True, show_counts=True))
print("\n")
print("DataFrame Description:")
print("----------------------")
display(df.describe(include="all"))
print("\n")
print("Number of Null Values:")
print("----------------------")
display(df.isnull().sum())
print("\n")
print("Number of Duplicated Rows:")
print("--------------------------")
display(df.duplicated().sum())
print("\n")
print("Number of Unique Values:")
print("------------------------")
display(df.nunique())
print("\n")
print("DataFrame Shape:")
print("----------------")
print(f"Rows: {df.shape[0]}, Columns: {df.shape[1]}")
# Example usage:
# Assuming 'data' is your DataFrame
# analyze_dataframe(data)
analyze_dataframe(trn_data)
# ---
TARGET = "target"
ignore = ["id", "target"]
numeric_feat = ["gravity", "ph", "osmo", "cond", "urea", "calc"]
categ_feat = [
feat for feat in trn_data.columns if feat not in numeric_feat and feat not in ignore
]
# numeric_feat = ['cond', 'calc', 'gravity'] # Another options for experimentation...
features = categ_feat + numeric_feat
print("Features: ", features)
# Pycaret configuration.
clf = setup(
data=trn_data,
target=TARGET,
categorical_features=categ_feat,
numeric_features=numeric_feat,
normalize=True,
ignore_features=["id"],
normalize_method="zscore",
fix_imbalance=True,
fix_imbalance_method=SMOTE(),
remove_outliers=True,
outliers_method="iforest",
fold_strategy="stratifiedkfold",
fold=15,
use_gpu=True,
session_id=49,
)
# Selecting what model should be trained.
best_model = compare_models(sort="auc", fold=15)
# ---
# ## Creating and Training an XGBoost Model...
# Define the base models
xgb = create_model("xgboost", fold=15)
tuned_xgb = tune_model(xgb, fold=15)
plot_model(tuned_xgb, plot="feature")
unseen_predictions_xgb = predict_model(tuned_xgb, data=tst_data, raw_score=True)
unseen_predictions_xgb
# ...
sub_data["target"] = unseen_predictions_xgb["prediction_score_1"]
sub_data.to_csv("pycaret_xgb_submission.csv", index=False)
sub_data.head()
# ---
# Define the base models
ext = create_model("et", fold=15)
tuned_ext = tune_model(ext, fold=15)
plot_model(tuned_ext, plot="feature")
unseen_predictions_ext = predict_model(tuned_ext, data=tst_data, raw_score=True)
unseen_predictions_ext
# ...
sub_data["target"] = unseen_predictions_ext["prediction_score_1"]
sub_data.to_csv("pycaret_ext_submission.csv", index=False)
sub_data.head()
# ---
# Define the base models
lda = create_model("lda", fold=15)
tuned_lda = tune_model(lda, fold=15)
plot_model(tuned_lda, plot="feature")
unseen_predictions_lda = predict_model(tuned_lda, data=tst_data, raw_score=True)
unseen_predictions_lda
# ...
sub_data["target"] = unseen_predictions_lda["prediction_score_1"]
sub_data.to_csv("pycaret_lda_submission.csv", index=False)
sub_data.head()
# ## Model Blending...
# ...
blender = blend_models([tuned_lda, tuned_et, tuned_xgb])
unseen_predictions_blender = predict_model(blender, data=tst_data, raw_score=True)
# ...
sub_data["target"] = unseen_predictions_blender["prediction_score_1"]
sub_data.to_csv("pycaret_blend_submission.csv", index=False)
sub_data.head()
# ---
# # Training a DNN Model, Using Keras
# %%time
# import tensorflow as tf
# from tensorflow.keras.models import Model
# from tensorflow.keras.callbacks import ReduceLROnPlateau, LearningRateScheduler, EarlyStopping
# from tensorflow.keras.layers import Dense, Input, InputLayer, Add, BatchNormalization, Dropout
# from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler
# import random
# import datetime
# %%time
# def nn_model():
# '''
# '''
# L2 = 65e-6
# activation_func = 'swish'
# inputs = Input(shape = (len(features)))
# x = Dense(512,
# #use_bias = True,
# kernel_regularizer = tf.keras.regularizers.l2(L2),
# activation = activation_func)(inputs)
# x = BatchNormalization()(x)
# x = Dense(256,
# #use_bias = True,
# kernel_regularizer = tf.keras.regularizers.l2(L2),
# activation = activation_func)(x)
# x = BatchNormalization()(x)
# x = Dense(16,
# #use_bias = True,
# kernel_regularizer = tf.keras.regularizers.l2(L2),
# activation = activation_func)(x)
# x = BatchNormalization()(x)
# x = Dense(1 ,
# #use_bias = True,
# #kernel_regularizer = tf.keras.regularizers.l2(L2),
# activation = 'sigmoid')(x)
# model = Model(inputs, x)
# return model
# %%time
# architecture = nn_model()
# architecture.summary()
# %%time
# # Defining model parameters...
# BATCH_SIZE = 2048
# EPOCHS = 512
# EPOCHS_COSINEDECAY = 512
# DIAGRAMS = True
# USE_PLATEAU = False
# INFERENCE = False
# VERBOSE = 0
# TARGET = 'target'
# %%time
# # Defining model training function...
# def fit_model(X_train, y_train, X_val, y_val, run = 0):
# '''
# '''
# lr_start = 0.01
# start_time = datetime.datetime.now()
# scaler = StandardScaler()
# #scaler = RobustScaler()
# #scaler = MinMaxScaler()
# X_train = scaler.fit_transform(X_train)
# epochs = EPOCHS
# lr = ReduceLROnPlateau(monitor = 'val_loss', factor = 0.7, patience = 4, verbose = VERBOSE)
# es = EarlyStopping(monitor = 'val_loss',patience = 12, verbose = 1, mode = 'min', restore_best_weights = True)
# tm = tf.keras.callbacks.TerminateOnNaN()
# callbacks = [lr, es, tm]
# # Cosine Learning Rate Decay
# if USE_PLATEAU == False:
# epochs = EPOCHS_COSINEDECAY
# lr_end = 0.0002
# def cosine_decay(epoch):
# if epochs > 1:
# w = (1 + math.cos(epoch / (epochs - 1) * math.pi)) / 2
# else:
# w = 1
# return w * lr_start + (1 - w) * lr_end
# lr = LearningRateScheduler(cosine_decay, verbose = 0)
# callbacks = [lr, tm]
# model = nn_model()
# optimizer_func = tf.keras.optimizers.Adam(learning_rate = lr_start)
# loss_func = tf.keras.losses.BinaryCrossentropy()
# model.compile(optimizer = optimizer_func, loss = loss_func)
# X_val = scaler.transform(X_val)
# validation_data = (X_val, y_val)
# history = model.fit(X_train,
# y_train,
# validation_data = validation_data,
# epochs = epochs,
# verbose = VERBOSE,
# batch_size = BATCH_SIZE,
# shuffle = True,
# callbacks = callbacks
# )
# history_list.append(history.history)
# print(f'Training loss:{history_list[-1]["loss"][-1]:.5f}')
# callbacks, es, lr, tm, history = None, None, None, None, None
# y_val_pred = model.predict(X_val, batch_size = BATCH_SIZE, verbose = VERBOSE)
# score = roc_auc_score(y_val, y_val_pred)
# print(f'Fold {run}.{fold} | {str(datetime.datetime.now() - start_time)[-12:-7]}'
# f'| AUC: {score:.5f}')
# score_list.append(score)
# tst_data_scaled = scaler.transform(tst_data[features])
# tst_pred = model.predict(tst_data_scaled)
# predictions.append(tst_pred)
# return model
# %%time
# from sklearn.model_selection import KFold, StratifiedKFold
# from sklearn.metrics import roc_auc_score, roc_curve
# import math
# # Create empty lists to store NN information...
# history_list = []
# score_list = []
# predictions = []
# # Define kfolds for training purposes...
# kf = StratifiedKFold(n_splits = 10)
# for fold, (trn_idx, val_idx) in enumerate(kf.split(trn_data[features], trn_data[TARGET])):
# X_train, X_val = trn_data.iloc[trn_idx][features], trn_data.iloc[val_idx][features]
# y_train, y_val = trn_data.iloc[trn_idx][TARGET], trn_data.iloc[val_idx][TARGET]
# fit_model(X_train, y_train, X_val, y_val)
# print(f'OOF AUC: {np.mean(score_list):.5f}')
# %%time
# # Populated the prediction on the submission dataset and creates an output file
# sub_data['target'] = np.array(predictions).mean(axis = 0)
# sub_data.to_csv('keras_dnn_submission.csv', index = False)
# sub_data
|
import numpy as np
import pandas as pd
import json
from PIL import Image
import os
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
# # 파일들 불러오기
TRAIN_DIR = "../input/cassava-leaf-disease-classification/train_images/"
TEST_DIR = "../input/cassava-leaf-disease-classification/test_images/"
labels = json.load(
open("../input/cassava-leaf-disease-classification/label_num_to_disease_map.json")
)
train = pd.read_csv("../input/cassava-leaf-disease-classification/train.csv")
train.head()
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(DEVICE)
# # 학습 파라미터 설정
BATCH = 32
EPOCHS = 20
LR = 0.0001
IMG_SIZE = 256
# # 학습 이미지와 라벨 구분
X_Train, Y_Train = train["image_id"].values, train["label"].values
X_Test = [name for name in (os.listdir(TEST_DIR))]
# # 데이터 불러오는 클래스 선언
class GetData(Dataset):
def __init__(self, Dir, FNames, Labels, Transform):
self.dir = Dir
self.fnames = FNames
self.transform = Transform
self.labels = Labels
def __len__(self):
return len(self.fnames)
def __getitem__(self, index):
x = Image.open(os.path.join(self.dir, self.fnames[index]))
if "train" in self.dir:
return self.transform(x), self.labels[index]
elif "test" in self.dir:
return self.transform(x), self.fnames[index]
# # Transform 정의 (Augmentation? 그럼 test에선 왜하지?)
Transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Resize((IMG_SIZE, IMG_SIZE)),
transforms.RandomRotation(90),
transforms.RandomHorizontalFlip(p=0.5),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
)
trainset = GetData(TRAIN_DIR, X_Train, Y_Train, Transform)
trainloader = DataLoader(trainset, batch_size=BATCH, shuffle=True, num_workers=4)
# 테스트 데이터는 왜 라벨 안 넣어줌?? -> 애초에 테스트 라벨링 파일 자체가 없음
testset = GetData(TEST_DIR, X_Test, None, Transform)
testloader = DataLoader(testset, batch_size=1, shuffle=False, num_workers=4)
# # 모델 선언
model = torchvision.models.resnet50()
model.fc = nn.Linear(2048, 5, bias=True)
model = model.to(DEVICE)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
for epoch in range(EPOCHS):
train_loss = 0.0
model = model.train()
for i, (images, labels) in enumerate(trainloader):
images = images.to(DEVICE)
labels = labels.to(DEVICE)
logits = model(images)
loss = criterion(logits, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 찾아볼 것
train_loss += loss.detach().item()
# 찾아볼 것
model.eval()
print("Epoch: %d | Loss: %.4f" % (epoch, train_loss / i))
|
# ## Analysing Telecom churn data using machine learning and python
# - Customer churn is a key business concept that determines the number of customers that stop doing business with a specific company.
# - With 21 predictor variables we need to predict whether a particular customer will switch to another telecom provider or not. In telecom terminology, this is referred to as churning and not churning, respectively.
# ## Step - 1: Importing and Merging Data
# First import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
# importing customer data set
cust_data = pd.read_csv("/Users/sakshimunde/Downloads/customer_data.csv")
cust_data.head()
# seeing shape of the customer data
cust_data.shape
# 7043 rows ans 5 columns
# importing internet data
internet_data = pd.read_csv("/Users/sakshimunde/Downloads/internet_data.csv")
internet_data.head()
# seeing the shape of internt_data
internet_data.shape
# importing churn_data
churn_data = pd.read_csv("/Users/sakshimunde/Downloads/churn_data.csv")
churn_data.head()
# see dimensions of churn_data
churn_data.shape
# #### Merging all data files
# merging cust and internet data on customer id
df = pd.merge(cust_data, internet_data, how="inner", on="customerID")
df.head()
# merging df & churn data
telecom = pd.merge(df, churn_data, how="inner", on="customerID")
pd.set_option("display.max_columns", None) # this will show all columns of a dataset
# # Step 2 :Inspecting the dataframe
# lets see the final dataframe with all predictor vars
telecom.head()
# Let's look at columns ,their types & non null count
telecom.info()
# - total charges should be a numeric column.
# - There are many columns that needed to be converted to a category type.
# - We can see no null values are there in the data.
# - Customer Id is not useful so we will drop it.
# let's look at the statistical aspects of the dataframe
telecom.describe()
# let's look at unique values of all category column
print("\n1.Partner")
print(telecom.Partner.unique())
print("\n2.Dependents")
print(telecom.Dependents.unique())
print("\n3.OnlineSecurity")
print(telecom.OnlineSecurity.unique())
print("\n4.OnlineBackup")
print(telecom.OnlineBackup.value_counts())
print("\n1.DeviceProtection")
print(telecom.DeviceProtection.value_counts())
print("\n2.TechSupport")
print(telecom.TechSupport.value_counts())
print("\n3.StreamingTV")
print(telecom.StreamingTV.unique())
print("\n4.PhoneService")
print(telecom.PhoneService.unique())
print("\n5.PaperlessBilling")
print(telecom.PaperlessBilling.value_counts())
print("\n6.Churn")
print(telecom.Churn.value_counts())
# - The count of the level ‘No internet service’ is the same for all, i.e. 1526. Can you explain briefly why this has happened?
# This happens because the level ‘No internet service’ just tells you whether a user has internet service or not. Now because the number of users not having an internet service is the same, the count of this level in all of these variables will be the same. You can also check the value counts of the variable ‘InternetService’ and you’ll see that the output you’ll get is:
# Fiber Optic 3096
# DSL 2421
# No 1526
# Coincidence? No!
# This information is already contained in the variable ‘InternetService’ and hence, the count will be the same in all the variables with the level ‘No internet service’. This is actually also the reason we chose to drop this particular level.
# - --------
# - we can see Partner, dependents , PhoneService,PaperlessBilling & Churn are binary data(yes/no) lets convert them to 0 and 1.
# Step 3: Data Preparation
# --
# - Converting binary variables yes/no to 0/1
#
# list of vars to map
binary_var = ["Partner", "Dependents", "PhoneService", "PaperlessBilling", "Churn"]
# applying the map function to hte list
telecom[binary_var] = telecom[binary_var].apply(lambda x: x.map({"Yes": 1, "No": 0}))
# let's lokk at the data
telecom.head()
# - We can see yes & no got converted to 1's and 0's
# - Now let's convert categorical vars with >2 levels to dummy vars
# first we will convert gender,InternetService,Contract,PaymentMethod categorical vars to dummies
dummy1 = pd.get_dummies(
telecom[["gender", "InternetService", "Contract", "PaymentMethod"]], drop_first=True
)
dummy1.head()
# let's now concat dummy vars dataframe with telecom dataframe
telecom = pd.concat([telecom, dummy1], axis=1)
telecom.head()
# Now creating dummy vars of rest all categoriacl vars
print(telecom.MultipleLines.unique())
# let's convert categorical vars to dummies
ML = pd.get_dummies(telecom["MultipleLines"], prefix="MultipleLines")
ML.head()
# let's drop MultipleLines_No phone service as it is not useful aswell
# for n levels we should have n-1 levels.
ML1 = ML.drop(["MultipleLines_No phone service"], axis=1)
ML1.head()
# - We can keep n levels but aanyhow it would be redundant to keep extra columns.
# - OnlineSecurity,OnlineBackup,DeviceProtection,techsupport,StreamingTV,Streaming movies are categorical vars that are left which needed to be converted to dummy vars.
# Converting OnlineSecurity to dummy var
OS = pd.get_dummies(telecom["OnlineSecurity"], prefix="OnlineSecurity")
OS.head()
# dropping OnlineSecurity No internet service column bcz there are n levels we should have n-1 levels
OS1 = OS.drop(["OnlineSecurity_No internet service"], axis=1)
OS1.head()
# OnlineBackup
OB = pd.get_dummies(telecom["OnlineBackup"], prefix="OnlineBackup")
OB.head()
# drop OnlineBackup_No internet service
OB = OB.drop(["OnlineBackup_No internet service"], axis=1)
OB.head()
# DeviceProtection
DP = pd.get_dummies(telecom["DeviceProtection"], prefix="DeviceProtection")
DP.head()
# drop DeviceProtection_No internet service
DP = DP.drop(["DeviceProtection_No internet service"], axis=1)
DP.head()
# TechSupport
TS = pd.get_dummies(telecom["TechSupport"], prefix="TechSupport")
TS.head()
# drop TechSupport_No internet service
TS = TS.drop(["TechSupport_No internet service"], axis=1)
TS.head()
# StreamingTV
ST = pd.get_dummies(telecom["StreamingTV"], prefix="StreamingTV")
ST.head()
# drop StreamingTV_No internet service
ST = ST.drop(["StreamingTV_No internet service"], axis=1)
ST.head()
# StreamingMovies
SM = pd.get_dummies(telecom["StreamingMovies"], prefix="StreamingMovies")
SM.head()
# DROPPING STREAMING MOVIES NO INTERNET SERVICE
SM = SM.drop(["StreamingMovies_No internet service"], axis=1)
SM.head()
# CONCATINATING ALL DUMMIES WITH TELECOM DATAFRAME
telecom = pd.concat([telecom, ML1], axis=1)
telecom = pd.concat([telecom, OS1], axis=1)
telecom = pd.concat([telecom, OB], axis=1)
telecom = pd.concat([telecom, DP], axis=1)
telecom = pd.concat([telecom, TS], axis=1)
telecom = pd.concat([telecom, ST], axis=1)
telecom = pd.concat([telecom, SM], axis=1)
telecom.head()
# ###### Drop repeated vars
# - We have created dumy vars so we can drop repeated var
# We have created dummies for the below vars,so we can drop them
telecom = telecom.drop(
[
"gender",
"InternetService",
"Contract",
"PaymentMethod",
"MultipleLines",
"OnlineSecurity",
"OnlineBackup",
"DeviceProtection",
"TechSupport",
"StreamingTV",
"StreamingMovies",
],
axis=1,
)
telecom.head()
# Customer id is not useful column so let's drop it
telecom = telecom.drop(["customerID"], axis=1)
# THERE IS A BLANK SPACE IN TOTALCHARGES COLUMN BCZ OF WHICH IT IS SHOWING AS AN OBJECT
telecom["TotalCharges"] = telecom["TotalCharges"].str.replace(" ", "0")
telecom["TotalCharges"] = telecom["TotalCharges"].astype(float)
telecom["TotalCharges"].shape
# Seeing the data type
telecom["TotalCharges"].dtype
# checking whether there are null values or not
telecom.isnull().sum()
# seeing data types of all variables
telecom.info()
telecom.head()
# ##### Checking for outliers
# - SeniorCitizen,tenure ,MonthlyCharges,TotalCharges are numerical data with high values.So we will see whether outliers are present in them or not
# Checking for outliers in continuous variables
numerical_val = telecom[["SeniorCitizen", "tenure", "MonthlyCharges", "TotalCharges"]]
numerical_val.describe(percentiles=[0.25, 0.50, 0.75, 0.90, 0.95, 0.99])
# - We can see there are no outliers. All values are increasing gradually.
# # Step 4 : Splitting data into train and test sets
# assigning all feature vars to X
# Splitting data to features and target.
X = telecom.drop(["Churn"], axis=1)
X.head()
# assigning churn(target) variable to y axis
y = telecom["Churn"]
y.head()
# import train test split library
from sklearn.model_selection import train_test_split
# splitting data into train and test
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, test_size=0.30, random_state=100
)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# ## Step 5 : Feature Scaling
# - Scaling helps us in faster convergence of gradient descent.
# - Standard scaler centers mean to 0
# - The formula for standardising a value in a dataset is given by:
# - (X − μ)/σ
# - Min max scaling compress values between min 0 and max 1
# --------
# - We do 'fit_transform' on the train set but just 'transform' on the test set. Why do you think this is done
# - We don't want the variables to learn anything new ,so we don't fit the test data set.
# - if we apply fit to test data then mean 0 and std 1 will be diff of train and test data we should have same mean and std for train and test.That's why we don't do fit to test dataset.If we do then both will get diff mean & std.
# We will use standard scaler to scale all large values
from sklearn.preprocessing import StandardScaler
# creating an object of standard scaler as in sklearn we create an object of a class
scaler = StandardScaler()
# fit and transform the numerical columns
X_train[["tenure", "MonthlyCharges", "TotalCharges"]] = scaler.fit_transform(
X_train[["tenure", "MonthlyCharges", "TotalCharges"]]
)
X_train.head()
#
# - The variables had these ranges before standardisation:
# - Tenure = 1 to 72
# - Monthly charges = 18.25 to 118.80
# - Total charges = 18.8 to 8685
#
# - After standardisation, the ranges of the variables changed to:
# - Tenure = -1.28 to +1.61
# - Monthly charges = -1.55 to +1.79
# - Total charges = -0.99 to 2.83
# - Clearly, none of the variables will have a disproportionate effect on the model’s results now.
# churn data
# --
# let's see what is the percentage of churn data
# churn % rate
churn = (sum(telecom["Churn"]) / len(telecom["Churn"].index)) * 100
churn
# - We have 27% churn rate.
# ## step 6 : Looking at Correlations
# - Let's see the correlation between the variables.
# plotting correlation matrix to see the relationship
plt.figure(figsize=[35, 15])
sns.heatmap(telecom.corr(), annot=True, cmap="Greens")
plt.show()
# - We can see the correlations between these dummy variables with their complimentary dummy variables, i.e. ‘MultipleLines_No’ with ‘MultipleLines_Yes’ or ‘OnlineSecurity_No’ with ‘OnlineSecurity_Yes’, are highly correlated.
# - So it is better that we drop one of these variables from each pair as they won’t add much value to the model.
# - The choice of which of these pair of variables we desire to drop is completely up to us; we’ve chosen to drop all the 'Nos' because the 'Yeses' are generally more interpretable and easy-to-work-with variables.
# - Let's drop this inter correlated vars: also called multicollinearity.
# dropping the variables from both traain and test data sets
X_train = X_train.drop(
[
"MultipleLines_No",
"OnlineSecurity_No",
"OnlineBackup_No",
"DeviceProtection_No",
"TechSupport_No",
"StreamingTV_No",
"StreamingMovies_No",
],
axis=1,
)
X_test = X_test.drop(
[
"MultipleLines_No",
"OnlineSecurity_No",
"OnlineBackup_No",
"DeviceProtection_No",
"TechSupport_No",
"StreamingTV_No",
"StreamingMovies_No",
],
axis=1,
)
# Now after dropping some of the dummy vars ,let's see the relation between rest of the vars
plt.figure(figsize=[20, 10])
sns.heatmap(X_train.corr(), annot=True)
plt.show()
# # Step 7 : Model Building
# - Now that we have completed all the pre-processing steps, inspected the correlation values and have eliminated a few variables, it’s time to build our first model.
# import statsmodel
import statsmodels.api as sm
# first add a constant, as stats take intercept from origin by default
X_train_sm = sm.add_constant(X_train)
X_train_sm.head()
# BUilding logistic regression model
logm1 = sm.GLM(y_train, X_train_sm, family=sm.families.Binomial()).fit()
logm1
# now our model is built.Let's see summary
logm1.summary()
# - Based on the summary statistics, we infer that many of the variables are insignificant and hence, we need to do some feature elimination.
# - Since the number of features are huge, let's first start with an automated feature selection technique (RFE) and then move on to manual feature elimination (using p-values and VIFs).
# ## Step 8: Feature selection using RFE
# import logistic regression from sklearn
from sklearn.linear_model import LogisticRegression
lor = LogisticRegression()
lor
# now import RFE
from sklearn.feature_selection import RFE
# now select number of top vars we want.
rfe = RFE(lor, n_features_to_select=15) # running RFE with 15 vars as output
rfe = rfe.fit(X_train, y_train)
rfe
# let's see the top 15 columns
list(zip(X_train.columns, rfe.ranking_))
# let's keep selected columns in 'col' var
col = X_train.columns[rfe.support_]
col
# - Out of this also we can have insignificant features.So let's see which features are insignificant.
# ##### Creating the model using statsmodel
# now that we have top 15 columns.We will build a model using this vars
# assign top 15 columns to X train
X_train_rfe = X_train[col]
X_train_rfe.head()
# now let's add constant to train data
X_train_sm = sm.add_constant(X_train_rfe)
X_train_sm.head()
# let's build our 2nd model after RFE
logm2 = sm.GLM(y_train, X_train_sm, family=sm.families.Binomial()).fit()
logm2
# let's see summary
logm2.summary()
# - We can see all p values are <0.05 or 5% so all vars are significant.
# - 'Binomial()' in the 'family' argument tells statsmodels that it needs to fit a logit curve to a binomial data (i.e. in which the target will have just two classes, here 'Churn' and 'Non-Churn').
# Getting the predicted values on the trtain set
y_train_pred = logm2.predict(X_train_sm)
y_train_pred[:10]
# reshaping bcz there is no dimension to it
y_train_pred = y_train_pred.values.reshape(-1)
y_train_pred
# Creating a dataframe with the actual churn data and the predicted probabilities
y_train_pred_final = pd.DataFrame({"churn": y_train.values, "churn_prob": y_train_pred})
y_train_pred_final
# Now add a custid column
y_train_pred_final["custid"] = y_train.index
y_train_pred_final.head()
# Creating new column 'predicted' with 1 if Churn_Prob > 0.5 else 0
y_train_pred_final["Predicted"] = y_train_pred_final["churn_prob"].map(
lambda x: 1 if x > 0.5 else 0
)
y_train_pred_final
# - 0.5 is a threshold value. The threshold of 0.5 chosen as of now is completely arbitrary.
# ### Confusion matrix
from sklearn import metrics
confusion = metrics.confusion_matrix(
y_train_pred_final["churn"], y_train_pred_final["Predicted"]
)
confusion
metrics.accuracy_score(y_train_pred_final["churn"], y_train_pred_final["Predicted"])
# - Accuracy is 81% which is a good % value to begin with.
# - So far we have only selected features based on RFE.
# - Further elimination of features using the p-values and VIFs manually is yet to be done.
# -----
# - We saw in the pairwise correlations, there are high values of correlations present between the 15 features, i.e. there is still some multicollinearity among the features.
# - So we definitely need to check the VIFs as well to further eliminate the redundant variables.
# - VIF calculates how well one independent variable is explained by all the other independent variables combined.
# ## Checking VIF'S
# - VIF > 5% drop.
# # Steps :
# - 1.finding vif value
# - 2.manual feature elimination
# - 3.Build a model and fit
# - 1.see summary and based on p values eliminate vars if > 0.05
# - See Vif values.Repeat this process until we get significant variables.
# import variance inflation factor
from statsmodels.stats.outliers_influence import variance_inflation_factor
# VIF
vif = pd.DataFrame()
vif["Features"] = X_train[col].columns
vif["VIF"] = [
variance_inflation_factor(X_train[col].values, i)
for i in range(X_train[col].shape[1])
]
vif["VIF"] = round(vif["VIF"], 2)
vif = vif.sort_values(by="VIF", ascending=False)
vif
# - phone service has very high vif 9%.let's drop it.phone service 9% means it is highly correlated with it's own independent vars i.e., multicollinearity.
# ## MANUAL FEATURE ELIMINATION
# #### dropping PhoneService
# let's see all 15 columns that were selected
col
# Now from this columns let's drop phone service
col = col.drop("PhoneService")
col
# Now once again we need to build model
X_train_rfe = X_train[col]
X_train_rfe.head()
# now we will add constant to X train data set
X_train_sm = sm.add_constant(X_train_rfe)
X_train_sm.head()
# #### Building and fitting model after PhoneService is dropped
# now that constant is added let's build(mx+c) our model and fit it.After fitting only we will get parameters.
# this is our 3rd logistic model
logm3 = sm.GLM(
y_train, X_train_sm, family=sm.families.Binomial()
).fit() # family is binomial
logm3
# now let's see summary
logm3.summary()
# - p values of all features are sigificant.Let's see vif value
# - now we have to see for accuracy score.We will see is there any change in accuracy or not.
# #### Creating a prdicted var
y_train_pred = logm3.predict(X_train_sm)
y_train_pred
# we will reshape y_train_pred
y_train_pred = y_train_pred.values.reshape(-1)
y_train_pred
# Now y_train_pred got reshaped.
y_train_pred_final = pd.DataFrame({"churn": y_train, "churn_prob": y_train_pred})
y_train_pred_final
# now let's add predicted
y_train_pred_final["Predicted"] = y_train_pred_final["churn_prob"].map(
lambda x: 1 if x > 0.5 else 0
)
y_train_pred_final
# see confusion matrix
confusion = metrics.confusion_matrix(
y_train_pred_final.churn, y_train_pred_final.Predicted
)
confusion
metrics.accuracy_score(y_train_pred_final.churn, y_train_pred_final.Predicted)
# - we can see there is no big change in accuracy.So dropping phone service column didn't effected our accuracy it means that phone service was an redundant or insignificant feature
# - Let's see vif value
# Seeing VIF after dropping phone service feature
vif = pd.DataFrame()
vif["Features"] = X_train[col].columns
vif["VIF"] = [
variance_inflation_factor(X_train[col].values, i)
for i in range(X_train[col].shape[1])
]
vif["VIF"] = round(vif["VIF"], 2)
vif = vif.sort_values(by="VIF", ascending=False)
vif
# #### dropping TotalCharges
# Total charges is having vif values of 7.53 which is high and it mean that Total charges is related with other
# independent vars with strong relation and its a multi-collnearity. And we drop this bcz there is no use of this column
col = col.drop("TotalCharges")
col
# Now again we will build a model, to see what changes happened in our model after dropping total charges feature
X_train_sm = X_train[col]
# build a logistic model once again after dropping total charges
logm4 = sm.GLM(y_train, X_train_sm, family=sm.families.Binomial()).fit()
logm4.summary()
# we can see multiple lines _yes has high p value 48%.Which means it is insignificant
# but first we will see accuracy after dropping total charges
y_train_pred = logm4.predict(X_train_sm).values.reshape(-1)
y_train_pred
# Creatin a dataframe
y_train_pred_final = pd.DataFrame({"churn": y_train, "churn_prob": y_train_pred})
y_train_pred_final.head()
y_train_pred_final["Predicted"] = y_train_pred_final["churn_prob"].map(
lambda x: 1 if x > 0.5 else 0
)
y_train_pred_final
# now create a confusion matrix
confusion = metrics.confusion_matrix(
y_train_pred_final.churn, y_train_pred_final.Predicted
)
print(confusion)
# Let's see the accuracy score
print(metrics.accuracy_score(y_train_pred_final.churn, y_train_pred_final.Predicted))
# - from 81% to 79% our accuracy score is not a big change
# now let's see vif value
vif = pd.DataFrame()
vif["features"] = X_train[col].columns
vif["VIF"] = [
variance_inflation_factor(X_train[col].values, i)
for i in range(X_train[col].shape[1])
]
vif = round(vif["VIF"], 2)
vif
# - All values are within range.VIF value of all vars are < 5%.It means that all vars are significant.
# - BUt we saw from summary that multiple lines column has 48% p value which is very high.
# #### dropping Mutiple lines yes
col = col.drop("MultipleLines_Yes")
# assigning col to X train sm after dropping multiple lines yes
X_train_rfe = X_train[col]
X_train_sm.head()
# Building logistic model
# add constant
X_train_sm = sm.add_constant(X_train_rfe)
logm5 = sm.GLM(y_train, X_train_sm, family=sm.families.Binomial()).fit()
logm5.summary()
# Creating a predictive value of train data
y_train_pred = logm5.predict(X_train_sm).values.reshape(-1)
y_train_pred
y_train_pred_final = pd.DataFrame({"churn": y_train, "churn_prob": y_train_pred})
y_train_pred_final
y_train_pred_final["Predicted"] = y_train_pred_final["churn_prob"].map(
lambda x: 1 if x > 0.5 else 0
)
y_train_pred_final
# finding accuracy of the model
metrics.accuracy_score(y_train_pred_final.churn, y_train_pred_final.Predicted)
# - So it was a good idea to drop multiple lines bcz it was redundant as our accuracy score has not changed.
#
# Now let's see vif value
vif = pd.DataFrame()
vif["features"] = X_train[col].columns
vif["VIF"] = [
variance_inflation_factor(X_train[col].values, i)
for i in range(X_train[col].shape[1])
]
vif["VIF"] = round(vif["VIF"], 2)
vif = vif.sort_values(by="VIF", ascending=False)
vif
# - Vif values are also perfect.Pvalues are also perfect.Now no need to drop features.
# - Now we can go with this model and make predictions using this model.
# LET'S VIEW CONFUSION MATRIX
confusion = metrics.confusion_matrix(
y_train_pred_final.churn, y_train_pred_final.Predicted
)
confusion
# PREDICTED : not_churn | churn
# ACTUAL :
# not_churn : 3243 , 384 : TN , FP
# churn : 595 , 708 : FN , TP
# ##### accuracy is often not the best metric
# - bcz we can see from above actual churn is 708 and 595 but we predicted 595 as not churn and only 708 customers as churned. We can say that accuracy alone is not best rather it is very risky to give wrong information.
# - As we predicted wrong company won’t be able to give offers to the rest 46% ‘churn’ customers and they could switch to a competitor!
#
# - 708 means only 54% customers churning.Which is giving wrong information.
# - accuracy is about 80%, the model only predicts 54% of churn cases correctly.
# ----
# - In essence, what’s happening here is that you care more about one class (class='churn') than the other.
# - This is a very common situation in classification problems - you almost always care more about one class than the other.
# - On the other hand, the accuracy tells you the model's performance on both classes combined - which is fine, but not the most important metric.
# ----
# - This brings us to two of the most commonly used metrics to evaluate a classification model:
# - Sensitivity &
# - Specificity
# ## Metrics beyond simply accuracy
TP = confusion[1, 1] # true positive
TN = confusion[0, 0] # true negatives
FP = confusion[0, 1] # false positives
FN = confusion[1, 0] # false negatives
# Let's see sensitivity of our logistic regression
sensitivity = TP / float(TP + FN)
({"SENSITIVITY": sensitivity})
# - 54% +ve value we predicted is correct.
# LET'S SEE SPECIFICITY OF LOGISTICE REGRESSION.
specificity = TN / float(TN + FP)
print({"SPECIFICITY": specificity})
# 89% of customers did'nt churned
# false Positive rate : predicted has churned ,when actually not churned
print({"FALSE POSITIVE": FP / float(FP + TN)})
# 10% customers didn't churned but we predicted has they churned.
# - There is huge difference in specificity and sensitivit.This is bcz THRESHOLD which we choosed was at random and there was no particular logic behind it.
# - So it might not be the ideal cut-off point for classification which is why we might be getting such a low sensitivity and high specificity. So how do you find the ideal threshold/cutoff point?
#
# # step 9 : Plotting ROC curve
# An ROC curve demonstrates several things:
# - It shows the tradeoff between sensitivity and specificity (any increase in sensitivity will be accompanied by a decrease in specificity).
# - The closer the curve follows the left-hand border and then the top border of the ROC space, the more accurate the test.
# - The closer the curve comes to the 45-degree diagonal of the ROC space, the less accurate the test.
# finding FPR and TPR for all thresholds from 0.0 to 0.9
def draw_roc(actual, probs):
fpr, tpr, thresholds = metrics.roc_curve(actual, probs, drop_intermediate=False)
auc_score = metrics.roc_auc_score(actual, probs)
plt.plot(fpr, tpr, label="ROC curve (area = %0.2f)" % auc_score)
plt.plot([0, 1], [0, 1])
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.title("Receiver operating characteristic example:ROC")
plt.legend(loc="lower right")
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.show()
return None
# getting values for fpr and tpr from actual y and predicted y values
fpr, tpr, thresholds = metrics.roc_curve(
y_train_pred_final.churn, y_train_pred_final.churn_prob, drop_intermediate=False
)
fpr, tpr, thresholds
# plotting roc curve
draw_roc(y_train_pred_final.churn, y_train_pred_final.churn_prob)
# # Step 10 : Finding the optimal cutoff point
numbers = [float(x / 10) for x in range(10)]
numbers
for i in numbers:
y_train_pred_final[i] = y_train_pred_final.churn_prob.map(
lambda k: 1 if k > i else 0
)
y_train_pred_final
# - x>i means seeing churn prob column and then when threshold is i= 0.0 then churn prob column we will compare.so o.282193 is > 0.0 then we will say 1.
# - x>i now threshold is o.1 ,so we will see churn prob column if >0.1 write 1 else 0.
# - x> i here thrshold is 0.2, if churn prob column values > 0.2 then 1 else 0.
# - and so on
# Now let's calculate accuracy sensitivity and specificity for various probability cutoffs.
cutoff_df = pd.DataFrame(columns=["prob", "accuracy", "sensitivity", "specificity"])
num = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for i in num:
confusion1 = metrics.confusion_matrix(
y_train_pred_final.churn, y_train_pred_final[i]
)
total = sum(sum(confusion1))
accuracy = (confusion1[0, 0] + confusion1[1, 1]) / total
sensitivity = (confusion1[1, 1]) / (confusion1[1, 0] + confusion1[1, 1])
specificity = (confusion1[0, 0]) / (confusion1[0, 0] + confusion1[0, 1])
cutoff_df.loc[i] = [i, accuracy, sensitivity, specificity]
print(cutoff_df)
# Let's plot accuracy sensitivity and specificity for various probabilities.
cutoff_df.plot.line(x="prob", y=["sensitivity", "accuracy", "specificity"])
plt.show()
# - From the curve above, 0.3 is the optimum point to take it as a cutoff probability.
# predicting using 0.3 as threshold point
y_train_pred_final["final_Predicted"] = y_train_pred_final.churn_prob.map(
lambda k: 1 if k > 0.3 else 0
)
y_train_pred_final
# Let's check the accuracy
metrics.accuracy_score(y_train_pred_final.churn, y_train_pred_final.final_Predicted)
# let's calculate confusion matrix for new threshold value 0.3
confusion_2 = metrics.confusion_matrix(
y_train_pred_final.churn, y_train_pred_final.final_Predicted
)
confusion_2
TP = confusion_2[1, 1]
TN = confusion_2[0, 0]
FP = confusion_2[0, 1]
FN = confusion_2[1, 0]
# ###### sensitivity
# let's calculate sensitivity
TP / (TP + FN)
# - SENSITIVITY : 77% CUSTOMERS ARE ACTUALLY CHURNED
# ###### Specificity
# LET'S SEE SPECIFICITY
TN / (TN + FP)
# - SPECIFICITY INSIGHT : 77% CUSTOMERS ACTUALLY NOT CHURNED
# ###### False +ve rate
# LET'S CALCULATE FALSE POSITIVE RATE :
FP / (TN + FP)
# ###### +ve predictive value /Precision
# POSITIVE PREDICTIVE VALUE / PRECISION
TP / (TP + FP)
# NEGATIVE PREDICTIVE VALUE
TN / (TN + FN)
# # Precision and recall
# - In industry, some businesses follow the 'Sensitivity-Specificity' view and some other businesses follow the 'Precision-Recall' view.
# - We can use any one view of this 2 views
# -----
# - When using the sensitivity-specificity tradeoff, we found out that the optimal cutoff point was 0.3.Now, when we will plot the precision-recall tradeoff, we will get diff threshold.
# import precision and recall from sklearn metrics
from sklearn.metrics import precision_score, recall_score
# finding precision score
precision_score(y_train_pred_final.churn, y_train_pred_final.Predicted)
# finding recall score
recall_score(y_train_pred_final.churn, y_train_pred_final.Predicted)
# ### Trade off between Precision & Recall
from sklearn.metrics import precision_recall_curve
y_train_pred_final.churn, y_train_pred_final.Predicted
p, r, thresholds = precision_recall_curve(
y_train_pred_final.churn, y_train_pred_final.churn_prob
)
# let's plot a graph.Precision - recall trade off curve.
plt.plot(thresholds, p[:-1], "g")
plt.plot(thresholds, r[:-1], "r")
plt.show()
# - Precision & recall are intersecting at 0.42.So our Threshold is 0.42.
# - F1 score:
# - F=2×precision*recall/(precision + recall)
# - The F1-score is useful when you want to look at the performance of precision and recall together.
#
F1 = 2 * ((0.64 * 0.54) / (0.64 + 0.54))
F1
# ## step 11 : Making predictions on test data sets
X_test
# Scaling
# we will transform on test data. We won't do fit on test data.
X_test[["tenure", "MonthlyCharges", "TotalCharges"]] = scaler.transform(
X_test[["tenure", "MonthlyCharges", "TotalCharges"]]
)
# let's see the top 15 features which we got using rfe
col
# We did all this in train data ,so we will access them directly in test data
X_test = X_test[col]
X_test.head()
# add constant to X test data set
X_test_sm = sm.add_constant(X_test)
X_test_sm.head()
# predicting y test
y_test_pred = logm5.predict(X_test_sm)
# create a dataframe of actual y test & predicted ytest
y_test_pred_final = pd.DataFrame({"churn": y_test, "churn_prob": y_test_pred})
y_test_pred_final.head()
# let's add custid to our dataframe
y_test_pred_final["Custid"] = y_test.index
y_test_pred_final.head()
# Creating new column 'predicted' with 1 if Churn_Prob > 0.42 else 0
y_test_pred_final["Predicted"] = y_test_pred_final.churn_prob.map(
lambda x: 1 if x > 0.42 else 0
)
y_test_pred_final.head()
# now let's see the accuracy score
metrics.accuracy_score(y_test_pred_final.churn, y_test_pred_final.Predicted)
# let's see confusion matrix
metrics.confusion_matrix(y_test_pred_final.churn, y_test_pred_final.Predicted)
# we can see sensitivity and specificity of test data
# SENSITIVITY
sensi = TP / (TP + FN)
sensi
# Specificity
spe = TN / (TN + FP)
spe
|
# # 🟠Surgery Duration|EDA🟠
# # 🟠Information About Data🟠
# ## The purpose of this competition is to estimate the duration of a surgery. It is to construct a regression model trained on labeled data extracted from Borda Technology's IoT sensor and Borda Technology's database. According to this dataset, the ID column is located as a column showing the ID number of the patients. In this data set, the ID column will later be removed from the data set as it is not related to other columns. DiagnosticICD10Code column International Classification of Diseases; It is a diagnostic tool used worldwide for epidemiology, health management and clinical purposes. Compiled by the World Health Organization. The official and full name of ICD is 'International Statistical Classification of Diseases and Related Health Problems'. It is a system used to classify and code all diagnoses, symptoms, and procedures recorded in connection with hospital care in the United States. The next column, which we call SurgeryGroup, consists of the classification of people applying to the service according to their risk groups, from the increasing risk group to the decreasing risk group (A1,A2,A3,B,C,D,E).
# ## The next column, AnesthesiaType, relates to the type of anesthesia given to the patient. There are three types of anesthesia; general, regional and local anesthesia. There are also patients who were given more than one type of anesthesia. The column called SurgeryName is the column in which the name of the disease is written according to the DiagnosticICD10Code data. Age and Sex columns are the patient's age and gender values. The Service column, on the other hand, is a column that shows which service the people applying to the health institution apply to. While the DoctorID and AnaesthetistID column is a column that includes doctors or anesthesiologists, the last ElapsedTime(second) column is the operation times given in seconds.
# Table Of Content
# * [1. IMPORTING LIBRARIES](#1)
#
# * [2. LOADING DATASET](#2)
#
# * [3. EXPLORATORY SOME INFORMATION ABOUT DATASET](#3)
# * [4. DATA VISUALIZATION](#4)
# * [5. AUTHOR MESSAGE](#5)
# 
# Let's get started!
# # IMPORTING LIBRARIES
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import missingno as msno
import plotly.graph_objs as go
import plotly.express as px
plt.style.use("seaborn-dark")
plt.style.context("grayscale")
from wordcloud import WordCloud, STOPWORDS
#
# # LOADING DATASET
df = pd.read_csv(
"/kaggle/input/prediction-of-surgery-duration/train.csv", encoding="latin-1"
)
df.head(2)
# ### 🟠The data read operation is performed and the first 2 lines are written.🟠
# # EXPLORATORY SOME INFORMATION ABOUT DATASET
df.drop(["ID"], axis=1, inplace=True)
#
# ### 🟠The 'ID' column in the dataset has been removed from the dataset. Because the ID column is the column with the least attachments, we removed it from the dataset.🟠
#
df.head()
print("Number of rows are :", df.shape[0], ",and number of columns are :", df.shape[1])
# ### 🟠The first 5 rows of data are printed, the row and column numbers are also shown above.🟠
df.info()
#
# ### 🟠 The info method was used to obtain information about the data, and with this method, we saw both the number of missing values in our data and the data types.🟠
# ### With the info() method, we can obtain information such as the number of 2822 entries numbered from 0 to 2822, the number of non-null entries in each column, and the data type of the column.
# ### With the info() method, we learned about columns with missing data, now let's learn about missing data in a more understandable way.
df.isnull().sum()
#
# # DATA VISUALIZATION
msno.bar(df, figsize=(16, 5), color="orange")
plt.show()
#
# ### 🟠We found the sum of the null values in each column with this code index.🟠
a = df.describe()
a.T.head()
# ### 🟠 Here, a more aesthetic appearance is obtained by taking the transpose of the 'a' variable.🟠
#
df.Service.value_counts()
df.loc[(df["Service"] == "GENEL CERRAH? SERV?S?"), "Service"] = "GENEL CERRAHİ SERVİSİ"
df.loc[(df["Service"] == "ORTOPED? SERV?S?"), "Service"] = "GENEL CERRAHİ SERVİSİ"
df.loc[(df["Service"] == "KVC SERV?S?"), "Service"] = "KVC SERVİSİ"
df.loc[(df["Service"] == "ÜROLOJ? SERV?S?"), "Service"] = "ÜROLOJİ SERVİSİ"
df.loc[(df["Service"] == "PLAST?K CERRAH?"), "Service"] = "PLASTİK CERRAHİ"
df.loc[(df["Service"] == "KBB SERV?S?"), "Service"] = "KBB SERVİSİ"
df.loc[
(df["Service"] == "GÖZ HASTALIKLARI SERV?S?"), "Service"
] = "GÖZ HASTALIKLARI SERVİSİ"
df.loc[(df["Service"] == "NÖRO??RÜRJ? SERV?S?"), "Service"] = "NÖROŞİRÜRJİ SERVİSİ"
df.loc[
(df["Service"] == "GENEL KARMA CERRAH? SERV?S?"), "Service"
] = "GENEL KARMA CERRAHİ SERVİSİ"
df.loc[(df["Service"] == "KADIN DO?UM SERV?S?"), "Service"] = "KADIN DOĞUM SERVİSİ"
# ### 🟠In the code directory above, the ? corrections were made instead.🟠
fig = px.histogram(
df, x="Service", color="Service", labels={"SERVICE"}, height=400, nbins=30
)
fig.show()
# ### 🟠The histogram plot, in which the number of people applying to the service is given in the graph above, is made. According to this graph, the department with the highest number of patients was the GENERAL SURGERY SERVICE, while the department with the least number of cases was the Gynecology Service. According to this graph, the order from the highest number of cases to the least case is as follows: GENERAL SURGERY SERVICE - 1475, KVC SERVICE - 432, UROLOGY SERVICE - 335, PLASTIC SURGERY - 178, ENT SERVICE - 128, EYE DISEASES SERVICE - 98, NEUROSURGERY GENERAL MIXED SURGERY SERVICE - 55, Gynecology Service - 40.🟠
fig = px.box(df, x="Service", y="Age", color="Service")
fig.update_traces(quartilemethod="exclusive") # or "inclusive", or "linear" by default
fig.show()
# ### 🟠In the graphic above, the age ranges of the applicants to the relevant department are visualized. It is observed that the average age of people applying to the ENT service is lower than other departments. It is visualized in the box plot graph that the mean age of the ophthalmology service is higher than the other departments.🟠
fig = px.pie(df, names="Service", color_discrete_sequence=px.colors.sequential.RdBu)
fig.show()
# ### 🟠Percentage distribution of services is given in the Pie Chart chart above. Since it is the General Surgery Service with the highest number of cases, it has the highest percentile.🟠
from wordcloud import WordCloud, STOPWORDS
text = " ".join(df["Service"])
plt.rcParams["figure.figsize"] = (12, 12)
wordcloud = WordCloud(
background_color="white", colormap="vlag", width=1200, height=1200, max_words=121
).generate(text)
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
group = []
for name in df.SurgeryGroup:
if "A1" in name:
group.append("A1")
elif "A2" in name:
group.append("A2")
elif "A3" in name:
group.append("A3")
elif "B" in name:
group.append("B")
elif "C" in name:
group.append("C")
elif "D" in name:
group.append("D")
else:
group.append("E")
se = pd.Series(group)
df["Surgery_Group"] = se.values
# ### 🟠Changes have been made to the surgery risk groups of a patient in the code directory above. These risk values go from high risk group to low risk group as A1, A2, A3, B, C, D and E. Whichever is the highest risk group in a list above is assigned to that value. For example, the value in the list {0, 'E', 'B', 'C'} has been assigned to risk group B.🟠
df.SurgeryGroup.value_counts()
# ### 🟠The data set is complex at the top and organized at the bottom.🟠
df.Surgery_Group.value_counts()
df.drop(["SurgeryGroup"], axis=1, inplace=True)
fig = px.histogram(
df,
x="Surgery_Group",
color="Surgery_Group",
nbins=30,
opacity=0.8,
pattern_shape="Surgery_Group",
pattern_shape_sequence=[".", "x", "+", "\\", "|"],
title="Surgery Group Histogram Plot",
)
fig.update_layout(bargap=0.2, font_size=15, font_color="purple")
fig.show()
# ### 🟠The histogram plot graphic above was visualized after editing on the surgical group. According to this visualization result, while group B surgeries are more, A2 is the least. We know that the A1 group is the most risky surgery group, but the data of the A1 group was not entered in the data set. The number of patients in the urgency group, from most to least, is as follows: B-1079, C-840, E-550, D-177, A3-174, A2-2, and A1=0.
fig = px.pie(
df,
names="Surgery_Group",
color="Surgery_Group",
color_discrete_sequence=px.colors.sequential.Rainbow,
hole=0.3,
)
fig.update_traces(textposition="outside")
fig.update_layout(uniformtext_minsize=12, uniformtext_mode="hide")
fig.update_layout(title_text="Surgery Group Pie Plot", font_size=15, font_color="blue")
fig.show()
# ### 🟠While the histogram plot visualizes the number of patients operated on with which risk group, the percentage distribution of these risk groups is visualized with the Pie plot.🟠
df.head()
sns.barplot(data=df, x="Surgery_Group", y="Age", palette="ocean")
plt.show()
#
# ### 🟠It is shown in the barplot chart above that the average age of the people in A3 compared to the risk group among those who had surgery was higher. We can say that these people may be in the risk group due to their age.🟠
# ### 🟠There are several ways to handle missing values of categorical variables. Here are a few options:🟠
# ### Ignore the rows with missing values: This is only a viable option if the number of rows with missing values is small compared to the total number of rows.
# ### Impute the missing values: This involves replacing the missing values with some other value, such as the mode (most frequent value) of the variable. This is a quick and easy option, but it may not always produce the best results.
# ### Use a separate category for missing values: This involves creating a new category specifically for missing values. This can be useful if the missing values have some meaningful interpretation.
# ### Use multiple imputations: This involves using advanced techniques to impute the missing values in a way that takes into account the uncertainty associated with the imputation. Multiple imputations can produce more accurate results than simple imputation, but it is also more complex and time-consuming.
#
df.Sex.value_counts()
fig = px.histogram(df, x="Sex", color="Sex", title="SEX")
fig.show()
#
# ### 🟠In the chart above, the histogram visualization of the 'sex' column in the data set is made. The number of men in the dataset is higher than the number of women. While the number of men is 1531, the number of women is 1291.🟠
fig = px.pie(df, names="Sex", color_discrete_sequence=px.colors.sequential.RdBu)
fig.update_traces(textposition="outside")
fig.update_layout(uniformtext_minsize=12, uniformtext_mode="hide")
fig.update_layout(title_text="Sex Pie Plot", font_size=15, font_color="red")
fig.show()
# ### 🟠Above is the graph showing the percentage distribution of the 'Sex' column in the pie plot chart.🟠
df.AnesthesiaType.isnull().sum()
df.loc[
(df["AnesthesiaType"] == "Kombine Spinal Epidural Anestezi"), "AnesthesiaType"
] = "Bölgesel Anestezi"
df.loc[
(df["AnesthesiaType"] == "Rejyonel Sinir Blo?u"), "AnesthesiaType"
] = "Bölgesel Anestezi"
df.loc[
(df["AnesthesiaType"] == "Epidural Anestezi"), "AnesthesiaType"
] = "Bölgesel Anestezi"
df.loc[
(df["AnesthesiaType"] == "Periferik Bloklar"), "AnesthesiaType"
] = "Bölgesel Anestezi"
df.loc[(df["AnesthesiaType"] == "Local Anestezi"), "AnesthesiaType"] = "Lokal Anestezi"
df.loc[
(df["AnesthesiaType"] == "Spinal Anestezi"), "AnesthesiaType"
] = "Bölgesel Anestezi"
df.loc[
(df["AnesthesiaType"] == "Rejyonel Anestezi"), "AnesthesiaType"
] = "Bölgesel Anestezi"
# ### https://anestezi.deu.edu.tr/anestezi-nedir-anestezi-turleri-nelerdir/
# ### By using the link above, improvements were made on the data set.
df.AnesthesiaType.value_counts()
fig = px.histogram(
df, x="AnesthesiaType", color="AnesthesiaType", title="ANESTHESIA TYPE"
)
fig.show()
# ### 🟠'AnesthesiaType' is visualized in the histogram chart above. According to the type of anesthesia, general anesthesia is mostly preferred, while regional anesthesia is preferred after general anesthesia.🟠
# ### 🟠General anesthesia is putting the patients to sleep for any painful surgical procedure and waking them up at the end of the procedure. General anesthesia, similar to a deep sleep state, removes consciousness and pain sensation. General anesthesia is provided by administering drugs through the vein, breathing gas from the lungs or applying both together. At this time, your breathing will be stopped and a tube is placed in the trachea for respiratory support or alternative means are applied. The patient does not remember these procedures. After these procedures, it is allowed to start the operation. After the operation, the administration of drugs other than oxygen is terminated. Medications are given that counteract the effects of some. If it is inserted, the tube in the patient's throat is removed. The patient is taken to the recovery room for follow-up. The patient is sent to the room where he sleeps after he wakes up well and the treatments that will make him feel the least painful are applied.🟠
# ### 🟠Regional anesthesia, which is a method of anesthesia used in many surgical interventions, is applied by anesthetizing a part of the body required by the relevant operation. It is widely preferred in urology, orthopedics and gynecology surgeries. Although it is thought to be similar to local anesthesia, regional anesthesia is used for numbing larger areas, not limited points as in local anesthesia. For example, while a very small area on the leg can be anesthetized with local anesthesia, the procedure can be performed; In regional anesthesia, the entire leg or, if necessary, the entire lower part of the body can be anesthetized in line with the relevant operation.🟠
df.Age.value_counts()
sns.kdeplot(data=df, x="Age", color="black", fill=True)
plt.xlabel("Age", color="black", fontsize=10)
plt.ylabel("count", color="black", fontsize=10)
plt.title("AGE KDEPLOT", color="black", fontsize=10)
plt.show()
# #
# ### 🟠The kdeplot graph is given above. The 'Age' column is visualized in this chart. The number of people in the 40 to 50 age range is quite high.🟠
df.DiagnosticICD10Code.value_counts().head(65)
df.head()
dig = []
for i in df["DiagnosticICD10Code"]:
if str(i) == "nan":
dig.append("None")
elif "O" in i:
dig.append("Pregnancy, childbirth and the puerperium")
elif "A" in i:
dig.append("Certain infectious and parasitic diseases")
elif "B" in i:
dig.append("Certain infectious and parasitic diseases")
elif "C" in i:
dig.append("Neoplasms")
elif "D" in i:
a = i.split("D")
temp = a[1][:2]
if int(temp) >= 50:
dig.append(
"Diseases of the blood and blood-forming organs and certain disorders involving the immune mechanism"
)
else:
dig.append("Neoplasms")
elif "E" in i:
dig.append("Endocrine, nutritional and metabolic diseases")
elif "F" in i:
dig.append("Mental, Behavioral and Neurodevelopmental disorders")
elif "G" in i:
dig.append("Diseases of the nervous system")
elif "H" in i:
a = i.split("H")
temp = a[1][:2]
if int(temp) >= 60:
dig.append("Diseases of the ear and mastoid process")
else:
dig.append("Diseases of the eye and adnexa")
elif "I" in i:
dig.append("Diseases of the circulatory system")
elif "J" in i:
dig.append("Diseases of the respiratory system")
elif "K" in i:
dig.append("Diseases of the digestive system")
elif "L" in i:
dig.append("Diseases of the skin and subcutaneous tissue")
elif "M" in i:
dig.append("Diseases of the musculoskeletal system and connective tissue")
elif "N" in i:
dig.append("Diseases of the genitourinary system")
elif "P" in i:
dig.append("Certain conditions originating in the perinatal period")
elif "Q" in i:
dig.append(
"Congenital malformations, deformations and chromosomal abnormalities"
)
elif "R" in i:
dig.append(
"Symptoms, signs and abnormal clinical and laboratory findings, not elsewhere classified"
)
elif "S" in i:
dig.append(
"Injury, poisoning and certain other consequences of external causes"
)
elif "T" in i:
dig.append(
"Injury, poisoning and certain other consequences of external causes"
)
elif "U" in i:
dig.append("Codes for special purposes")
elif "V" in i:
dig.append("External causes of morbidity")
elif "Y" in i:
dig.append("External causes of morbidity")
elif "Z" in i:
dig.append("Factors influencing health status and contact with health services")
else:
dig.append("W")
se = pd.Series(dig)
df["Surgery_Code"] = se.values
df.head()
df.Surgery_Code.value_counts()
fig = px.histogram(
df, x="Surgery_Code", color="Surgery_Code", title="Surgery Code Histogram Plot"
)
fig.show()
# ### 🟠The disease categories that we have arranged according to the 'https://www.icd10data.com/ICD10CM/Codes' link above have been visualized. 🟠
fig = px.histogram(
df,
x="Surgery_Code",
y="ElapsedTime(second)",
color="Surgery_Code",
barmode="group",
histfunc="avg",
height=400,
)
fig.show()
# ### 🟠According to the grouping process, it was observed that there were errors in the operation times. Therefore, it is not included in the transactions.🟠
df.loc[
(df["DiagnosticICD10Code"] == {"I83.9"}), "SurgeryName"
] = "Ülser veya enflamasyon olmadan alt ekstremitenin variköz venleri"
df.loc[(df["DiagnosticICD10Code"] == {"K80.8"}), "SurgeryName"] = "Kolelitiazis, diğer"
df.loc[
(df["DiagnosticICD10Code"] == {"I83"}), "SurgeryName"
] = "Alt ekstremitenin variköz venleri"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "M51.1"}), "SurgeryName"
] = "Lumbar ve diğer intervertebral disk bozuklukları, radikülopati ile"
df.loc[
(df["DiagnosticICD10Code"] == {"K40.9"}), "SurgeryName"
] = "Tek taraflı veya tanımlanmamış inguinal herni, gangrensiz veya obstrüksiyonsuz"
df.loc[(df["DiagnosticICD10Code"] == {"K40"}), "SurgeryName"] = "İnguinal herni"
df.loc[(df["DiagnosticICD10Code"] == {"L05"}), "SurgeryName"] = "Pilonidal kist"
df.loc[
(df["DiagnosticICD10Code"] == {"I83", "nan"}), "SurgeryName"
] = "Alt ekstremitenin variköz venleri"
df.loc[
(df["DiagnosticICD10Code"] == {"M95.0"}), "SurgeryName"
] = "Burnun kazanılmış deformitesi"
df.loc[
(df["DiagnosticICD10Code"] == {"K80.1"}), "SurgeryName"
] = "Safra kesesi taşı, diğer kolesistit ile"
df.loc[
(df["DiagnosticICD10Code"] == {"H25.1"}), "SurgeryName"
] = "Senil nükleer katarakt"
df.loc[
(df["DiagnosticICD10Code"] == {"L05.9"}), "SurgeryName"
] = "Pilonidal kist, apsesiz"
df.loc[
(df["DiagnosticICD10Code"] == {"N47"}), "SurgeryName"
] = "Sünnet derisinin fazlalığı, fimozis ve parafimozis"
df.loc[
(df["DiagnosticICD10Code"] == {"K40.3"}), "SurgeryName"
] = "Tek taraflı veya tanımlanmamış inguinal herni, obstrüksiyonlu,gangrensiz"
df.loc[(df["DiagnosticICD10Code"] == {"nan", "K40"}), "SurgeryName"] = "İnguinal herni"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "K40.3"}), "SurgeryName"
] = "Tek taraflı veya tanımlanmamış inguinal herni, obstrüksiyonlu,gangrensiz"
df.loc[(df["DiagnosticICD10Code"] == {"N20.1"}), "SurgeryName"] = "Üreter taşı"
df.loc[
(df["DiagnosticICD10Code"] == {"N40"}), "SurgeryName"
] = "Benign prostat hiperplazisi"
df.loc[
(df["DiagnosticICD10Code"] == {"M50.1"}), "SurgeryName"
] = "Servikal disk bozuklukları, radikülopati ile"
df.loc[
(df["DiagnosticICD10Code"] == {"H26.9"}), "SurgeryName"
] = "Katarakt, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "J34.2"}), "SurgeryName"
] = "Nazal septum deviasyonu"
df.loc[(df["DiagnosticICD10Code"] == {"Z41.2"}), "SurgeryName"] = "Rutin ve dini sünnet"
df.loc[(df["DiagnosticICD10Code"] == {"M25.5"}), "SurgeryName"] = "Eklem ağrısı"
df.loc[
(df["DiagnosticICD10Code"] == {"J34.2"}), "SurgeryName"
] = "Nazal septum deviasyonu"
df.loc[(df["DiagnosticICD10Code"] == {"K60.1"}), "SurgeryName"] = "Anal fissür, kronik"
df.loc[(df["DiagnosticICD10Code"] == {"I86.1"}), "SurgeryName"] = "Varikosel"
df.loc[(df["DiagnosticICD10Code"] == {"K42"}), "SurgeryName"] = "Umbilikal herni"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "M51.8"}), "SurgeryName"
] = "İntervertebral disk bozuklukları diğer, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"L05.0"}), "SurgeryName"
] = "Pilonidal kist, apseli"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "K40.9"}), "SurgeryName"
] = "Tek taraflı veya tanımlanmamış inguinal herni, gangrensiz veya obstrüksiyonsuz"
df.loc[
(df["DiagnosticICD10Code"] == {"K42.0"}), "SurgeryName"
] = "Umbilikal herni; obstrüksiyonlu, gangrensiz"
df.loc[
(df["DiagnosticICD10Code"] == {"N92.1"}), "SurgeryName"
] = "Aşırı ve sık menstrüasyon, düzensiz siklus ile"
df.loc[
(df["DiagnosticICD10Code"] == {"I83.0"}), "SurgeryName"
] = "Alt ekstremitenin variköz venleri, ülserli"
df.loc[(df["DiagnosticICD10Code"] == {"K60.3"}), "SurgeryName"] = "Anal fistül"
df.loc[
(df["DiagnosticICD10Code"] == {"M51.8"}), "SurgeryName"
] = "İntervertebral disk bozuklukları diğer, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"K40.2"}), "SurgeryName"
] = "Bilateral inguinal herni, obstrüksiyonsuz veya gangrensiz"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "M17.9"}), "SurgeryName"
] = "Gonartroz, tanımlanmamış"
df.loc[(df["DiagnosticICD10Code"] == {"N43.2"}), "SurgeryName"] = "Hidrosel, diğer"
df.loc[
(df["DiagnosticICD10Code"] == {"M25.8"}), "SurgeryName"
] = "Diğer tanımlanmış eklem bozuklukları"
df.loc[(df["DiagnosticICD10Code"] == {"K80"}), "SurgeryName"] = "Safra taşı"
df.loc[(df["DiagnosticICD10Code"] == {"H26"}), "SurgeryName"] = "Kataraktlar, diğer"
df.loc[(df["DiagnosticICD10Code"] == {"nan", "M25.5"}), "SurgeryName"] = "Eklem ağrısı"
df.loc[(df["DiagnosticICD10Code"] == {"N20.0"}), "SurgeryName"] = "Böbrek taşı"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "M51.9"}), "SurgeryName"
] = "İntervertebral disk bozuklukları, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"H02.9"}), "SurgeryName"
] = "Göz kapağı bozukluğu, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"R22"}), "SurgeryName"
] = "Derinin ve subkutanöz dokunun lokalize şişme, kitle ve yumrusu"
df.loc[
(df["DiagnosticICD10Code"] == {"J34"}), "SurgeryName"
] = "Burun ve nazal sinüslerin diğer bozuklukları"
df.loc[
(df["DiagnosticICD10Code"] == {"Z01.4"}), "SurgeryName"
] = "Jinekolojik muayene (genel) (olağan)"
df.loc[
(df["DiagnosticICD10Code"] == {"K80.0"}), "SurgeryName"
] = "Safra kesesi taşı, akut kolesistit ile"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "M25.8"}), "SurgeryName"
] = "Diğer tanımlanmış eklem bozuklukları"
df.loc[
(df["DiagnosticICD10Code"] == {"M17.9"}), "SurgeryName"
] = "Gonartroz, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"K42.9"}), "SurgeryName"
] = "Umbilikal herni, obstrüksiyonsuz, gangrenli"
df.loc[
(df["DiagnosticICD10Code"] == {"G56.0"}), "SurgeryName"
] = "Karpal tünel sendromu"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "I83.9"}), "SurgeryName"
] = "Ülser veya enflamasyon olmadan alt ekstremitenin variköz venleri"
df.loc[(df["DiagnosticICD10Code"] == {"M65.3"}), "SurgeryName"] = "Tetik parmak"
df.loc[(df["DiagnosticICD10Code"] == {"N64.2"}), "SurgeryName"] = "Memenin atrofisi"
df.loc[(df["DiagnosticICD10Code"] == {"nan", "K42"}), "SurgeryName"] = "Umbilikal herni"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "M51.0"}), "SurgeryName"
] = "Lumbar ve diğer intervertebral disk bozuklukları, myelopati ile (G99.2*)"
df.loc[
(df["DiagnosticICD10Code"] == {"K82.9"}), "SurgeryName"
] = "Safra kesesi hastalığı, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"I73.9"}), "SurgeryName"
] = "Periferik vasküler hastalık, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "K43.9"}), "SurgeryName"
] = "Ventral herni, obstrüksiyon ve gangrensiz"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "R22"}), "SurgeryName"
] = "Derinin ve subkutanöz dokunun lokalize şişme, kitle ve yumrusu"
df.loc[(df["DiagnosticICD10Code"] == {"N62"}), "SurgeryName"] = "Memenin hipertrofisi"
df.loc[(df["DiagnosticICD10Code"] == {"W19"}), "SurgeryName"] = "Düşme, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "N92.1"}), "SurgeryName"
] = "Aşırı ve sık menstrüasyon, düzensiz siklus ile"
df.loc[
(df["DiagnosticICD10Code"] == {"N23"}), "SurgeryName"
] = "Renal kolik, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"J35.2"}), "SurgeryName"
] = "Adenoidlerin hipertrofisi"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "I83.1"}), "SurgeryName"
] = "Alt ekstremitenin variköz venleri, enflamasyonla birlikte"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "K42.0"}), "SurgeryName"
] = "Umbilikal herni; obstrüksiyonlu, gangrensiz"
df.loc[
(df["DiagnosticICD10Code"] == {"M50.1", "M54.2"}), "SurgeryName"
] = "Servikal disk bozuklukları, radikülopati ile, Boyun ağrısı "
df.loc[
(df["DiagnosticICD10Code"] == {"R22.2"}), "SurgeryName"
] = "Gövdenin lokalize şişme, kitle ve yumrusu"
df.loc[
(df["DiagnosticICD10Code"] == {"N43"}), "SurgeryName"
] = "Hidrosel ve spermatosel"
df.loc[
(df["DiagnosticICD10Code"] == {"Z44.3"}), "SurgeryName"
] = "Dış meme protezinin uygulaması"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "J34"}), "SurgeryName"
] = "Burun ve nazal sinüslerin diğer bozuklukları"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "Z01.4"}), "SurgeryName"
] = "Jinekolojik muayene (genel) (olağan)"
df.loc[
(df["DiagnosticICD10Code"] == {"M50.9"}), "SurgeryName"
] = " Servikal disk bozukluğu, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"S52.50"}), "SurgeryName"
] = "Distal radius kırığı, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "M17.1"}), "SurgeryName"
] = "Primer gonartroz, diğer"
df.loc[(df["DiagnosticICD10Code"] == {"N30.0"}), "SurgeryName"] = "Akut sistit"
df.loc[
(df["DiagnosticICD10Code"] == {"K40.0"}), "SurgeryName"
] = "Bilateral inguinal herni; obstrüksiyonlu, gangrensiz"
df.loc[
(df["DiagnosticICD10Code"] == {"R22.9"}), "SurgeryName"
] = "Lokalize şişme kitle ve yumrusu, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"K40.4"}), "SurgeryName"
] = "Tek taraflı veya tanımlanmamış inguinal herni, gangrenli"
df.loc[(df["DiagnosticICD10Code"] == {"N21.0"}), "SurgeryName"] = "Mesane taşı"
df.loc[
(df["DiagnosticICD10Code"] == {"E04.8"}), "SurgeryName"
] = "Toksik olmayan guatr tanımlanmış, diğer"
df.loc[
(df["DiagnosticICD10Code"] == {"K40.1"}), "SurgeryName"
] = "Bilateral inguinal herni, gangrenli"
df.loc[
(df["DiagnosticICD10Code"] == {"I84.4"}), "SurgeryName"
] = "Eksternal hemoroidler, diğer komplikasyonlarla birlikte"
df.loc[
(df["DiagnosticICD10Code"] == {"C67.4"}), "SurgeryName"
] = "Mesane arka duvarı malign neoplazmı"
df.loc[
(df["DiagnosticICD10Code"] == {"M50.0"}), "SurgeryName"
] = "Servikal disk bozuklukları, myelopati ile (G99.2*)"
df.loc[
(df["DiagnosticICD10Code"] == {"N43.3"}), "SurgeryName"
] = "Hidrosel, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"N92.5"}), "SurgeryName"
] = "Düzensiz menstrüasyon, diğer tanımlanmış"
df.loc[
(df["DiagnosticICD10Code"] == {"J34.8"}), "SurgeryName"
] = "Burun ve nazal sinüslerin diğer tanımlanmış bozuklukları"
df.loc[
(df["DiagnosticICD10Code"] == {"N28.1"}), "SurgeryName"
] = "Böbrek kisti, kazanılmış"
df.loc[(df["DiagnosticICD10Code"] == {"L60"}), "SurgeryName"] = "Tırnak bozuklukları"
df.loc[
(df["DiagnosticICD10Code"] == {"K21.9"}), "SurgeryName"
] = "Gastro-özofajial reflü hastalığı, özofajitsiz"
df.loc[
(df["DiagnosticICD10Code"] == {"J38.1"}), "SurgeryName"
] = "Vokal kord ve larinks polipi"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "S46.0"}), "SurgeryName"
] = "Omuz rotator cuff tendon yaralanması"
df.loc[(df["DiagnosticICD10Code"] == {"H00.1"}), "SurgeryName"] = "Şalazyon"
df.loc[
(df["DiagnosticICD10Code"] == {"Z48.0"}), "SurgeryName"
] = "Cerrahi elbise ve sütürlerin bakım"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "N39.9"}), "SurgeryName"
] = "Üriner sistemin bozukluğu, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"K80.5"}), "SurgeryName"
] = "Safra yolu taşı, kolanjit veya kolesistit olmadan"
df.loc[
(df["DiagnosticICD10Code"] == {"M51.1"}), "SurgeryName"
] = "Lumbar ve diğer intervertebral disk bozuklukları, radikülopati ile"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "K40.1"}), "SurgeryName"
] = "Bilateral inguinal herni, gangrenli"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "A63.0"}), "SurgeryName"
] = "Anogenital (veneryal) Siğiller"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "M20.1"}), "SurgeryName"
] = "Halluks valgus (kazanılmış)"
df.loc[
(df["DiagnosticICD10Code"] == {"N32.9"}), "SurgeryName"
] = "Mesane bozukluğu, tanımlanmamış"
df.loc[
(df["DiagnosticICD10Code"] == {"H66.3"}), "SurgeryName"
] = "Kronik süpüratif Otitis media, diğer"
df.loc[
(df["DiagnosticICD10Code"] == {"I82'"}), "SurgeryName"
] = "Venöz emboli ve tromboz diğer"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "M95.0"}), "SurgeryName"
] = "Burnun kazanılmış deformitesi"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "M17"}), "SurgeryName"
] = "Gonartroz [diz ekleminin artrozu]"
df.loc[(df["DiagnosticICD10Code"] == {"nan", "K60.3"}), "SurgeryName"] = "Anal fistül"
df.loc[
(df["DiagnosticICD10Code"] == {"nan", "C67.9"}), "SurgeryName"
] = "Mesane malign neoplazmı, tanımlanmamış"
# # # ### 🟠We renamed the diseases with ICD codes above according to the codes.🟠
df.SurgeryName.value_counts()
plt.figure(figsize=(20, 10))
plt.xticks(rotation=90)
sns.countplot(
data=df,
x=df["SurgeryName"],
palette="ocean",
order=df["SurgeryName"].value_counts().index[0:20],
)
plt.show()
# ### 🟠Due to the large number of surgical names above, the 20 most common surgical names were visualized. According to this graph, 'Varicose veins of the lower extremity without ulcers or inflammation' is in the first place, followed by 'Bilateral inguinal hernia, without obstruction or gangrene' surgery.🟠
df.DoctorID.value_counts()
plt.figure(figsize=(20, 10))
plt.xticks(rotation=90)
sns.countplot(
data=df,
x=df["DoctorID"],
palette="Pastel1",
order=df["DoctorID"].value_counts().index[0:20],
)
plt.show()
# ### 🟠In the count plot chart above, doctor number 36 undergoes the most surgery, followed by doctor number 5. The doctor who underwent the least operation was visualized as doctor number 23🟠
#
fig = px.sunburst(df, path=["DoctorID"])
fig.show()
df.AnaesthetistID.value_counts()
fig = px.histogram(df, x="AnaesthetistID", color="AnaesthetistID")
fig.show()
# ### 🟠 In the histogram chart above, anesthesiologist number 4 undergoes the most surgery, while doctor number 12 follows this number. Anesthesiologist number 8 is the doctor who has the least surgery and who has undergone surgery. The order from the most operated doctor to the least is as follows:
# ### 🟠4 Anesthesiologist- has 401 patients.
# ### 🟠12 Anesthesiologist- has 379 patients.
# ### 🟠2 Anesthesiologist- has 350 patients.
# ### 🟠6 Anesthesiologist- has 307 patients.
# ### 🟠13 Anesthesiologist- has 284 patients.
# ### 🟠11 Anesthesiologist- has 282 patients.
# ### 🟠14 Anesthesiologist- has 259 patients.
# ### 🟠9 Anesthesiologist- has 127 patients.
# ### 🟠10 Anesthesiologist- has 108 patients.
# ### 🟠3 Anesthesiologist- has 99 patients.
# ### 🟠1 Anesthesiologist- has 75 patients.
# ### 🟠7 Anesthesiologist has 60 patients.
# ### 🟠5 Anesthesiologist- has 45 patients.
# ### 🟠15 Anesthesiologist- has 33 patients.
# ### 🟠16 Anesthesiologist- has 10 patients.
# ### 🟠8 Anesthesiologist- has 3 patients.🟠
# ### 🟠Let's observe how the operation time changes according to the type of anesthesia. 🟠
fig = px.histogram(
df,
x="AnesthesiaType",
y="ElapsedTime(second)",
color="AnesthesiaType",
barmode="group",
histfunc="avg",
height=400,
)
fig.show()
# ### 🟠In the graphic above, it is visualized that the operation times of the people who underwent spinal and general anesthesia are longer than the other types of anesthesia.🟠
# ### 🟠Let's find the operating times of the surgical services according to the gender distribution.🟠
fig = px.histogram(
df,
x="Service",
y="ElapsedTime(second)",
color="Sex",
barmode="group",
histfunc="avg",
height=400,
)
fig.show()
# ### 🟠In the histogram plot above, the average of the operation times of the wards according to gender is given. It has been visualized that the operation times of women in ENT and plastic surgery services are longer than that of men.🟠
# ### 🟠Let's find out how the risk group of surgeries affects the duration of the operation.🟠
fig = px.histogram(
df,
x="Surgery_Group",
y="ElapsedTime(second)",
color="Surgery_Group",
barmode="group",
histfunc="avg",
height=400,
)
fig.show()
# ### 🟠In the graphic above, the graph of how the operation times are according to the risk of surgery is given. According to this graph, we observe that the risk of surgery in the A3 risky surgery group is higher than in the other surgery groups. Another group following the A3 risky surgery group is group B. It is the E group with the least duration of operation. 🟠
# ### 🟠Let's find out which service has more operation time on average...🟠
fig = px.histogram(
df,
x="Service",
y="ElapsedTime(second)",
color="Service",
barmode="group",
histfunc="avg",
height=400,
)
fig.show()
|
# 
# # About Dataset
# This dataset is originally from the National Institute of Diabetes and Digestive and Kidney
# Diseases. The objective of the dataset is to diagnostically predict whether a patient has diabetes,
# based on certain diagnostic measurements included in the dataset. Several constraints were placed
# on the selection of these instances from a larger database. In particular, all patients here are females
# at least 21 years old of Pima Indian heritage.2
# From the data set in the (.csv) File We can find several variables, some of them are independent
# (several medical predictor variables) and only one target dependent variable (Outcome).
# # STEPS
# ## 1. EDA(Exploratory Data Analysis)
# EDA stands for Exploratory Data Analysis, which is an approach to analyzing and summarizing a dataset in order to gain insights and identify patterns and relationships between variables.
# EDA involves visualizing and manipulating data using various statistical and computational methods, in order to identify patterns, anomalies, and trends. Some common techniques used in EDA include scatter plots, histograms, box plots, and correlation analysis.
# The goal of EDA is to understand the data and the relationships between variables, so that we can identify potential problems, such as missing data, outliers, or errors in the dataset, and address them appropriately. EDA is often the first step in the data analysis process and is essential for making informed decisions about how to proceed with data modeling and analysis.
# ## 2. Data Pre-Processing
# Data pre-processing is the process of cleaning and transforming raw data into a format that can be easily understood and analyzed by machine learning algorithms. It is a crucial step in any data analysis or machine learning project. Data pre-processing improves the quality of the dataset by removing errors, filling in missing values, and addressing inconsistencies, allowing for more accurate results from data analysis and machine learning algorithms. Some of the steps involved in data pre-processing include data cleaning, feature selection, scaling, and normalization.
# ### 3. Outlier Analysis
# Outlier analysis is the process of identifying and analyzing data points in a dataset that are significantly different from the rest of the data. These data points are called outliers and they can skew statistical analyses and machine learning models, leading to inaccurate results. Outlier analysis involves identifying the outliers, determining whether they are genuine or erroneous, and deciding how to handle them. There are several methods for outlier analysis, including visual inspection of the data, statistical methods such as z-scores and box plots, and machine learning algorithms that can identify anomalies in the data. Once outliers have been identified, they can be either removed from the dataset or their values can be adjusted to better fit the distribution of the rest of the data, depending on the specific application and the nature of the outliers. Outlier analysis is an important step in data pre-processing and can improve the accuracy of data analysis and machine learning models
# 
# ## 4. Feature Engineering
# Feature engineering is the process of obtaining, extracting, and transforming features in machine learning and data analysis projects to make the data more meaningful and effective. Data sets often contain a large number of features, but not all of them may be important or useful. Feature engineering selects, creates, and transforms features in the data set using various techniques to make them more meaningful and effective for machine learning algorithms. Some of the techniques used in feature engineering include feature scaling, feature extraction, feature transformation, and creation of new features. Feature engineering improves the quality of the data set, helps machine learning algorithms make more accurate predictions, and ultimately leads to better performance and higher accuracy.,
# 
# ## 5. Create Models
# It refers to the process of creating a model using machine learning algorithms to learn from data sets and make predictions on future data.
# This process involves training machine learning algorithms by taking a certain portion of the data set (training data) and then evaluating the performance of the trained model using another portion (test data). The model is optimized by selecting the right features, scaling the features, tuning hyperparameters, and using other techniques.
# Model creation is a method used to identify patterns and relationships in a data set, and these models can be used for prediction, classification, clustering, and other data analysis tasks. Model creation is a powerful tool for understanding patterns in data sets and making predictions for future data.
# ### Hyperparameter optimization
# Hyperparameter optimization is the process of finding the best values for hyperparameters in machine learning algorithms. Hyperparameters are parameters that govern the structure and learning process of the model, and directly affect the model's performance. For example, in an artificial neural network model, hyperparameters are values such as learning rate, number of epochs, number of layers, number of neurons, and weight initialization.
# Hyperparameter optimization selects the best hyperparameters by trying out hyperparameter values within a certain range. This process is often done through trial and error and can be automated to speed up the learning process.
# Hyperparameter optimization can improve model performance and also reduce the risk of overfitting. Therefore, in machine learning projects, hyperparameter optimization is frequently used to enhance the model's performance.
# ### ROC (Receiver Operating Characteristic) Curve
# ROC (Receiver Operating Characteristic) curve is a graphical method used to evaluate the performance of classification models. A classification model assigns each data point in a sample dataset to a class (e.g. disease or health) based on the features of that data point. The ROC curve is a graph that represents the accuracy and false positive rate (FPR) of this model.
# The ROC curve shows the true positive rate (TPR) against the FPR. TPR is the ratio of the total number of true positives to the total number of positives (true positives + false negatives). FPR is the ratio of the total number of false positives to the total number of negatives (true negatives + false positives).
# An ideal classification model would have a curve with TPR = 1 and FPR = 0. Models with a larger area under the curve (AUC) on the ROC curve demonstrate better performance. The ROC curve is a useful tool to visualize the performance of a classification model and compare different models.
import joblib
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sbn
from lightgbm import LGBMClassifier
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
VotingClassifier,
AdaBoostClassifier,
)
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_validate, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.metrics import roc_auc_score, roc_curve
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv("../input/diabetes-dataset/diabetes.csv")
# ## EDA(Exploratory Data Analysis)
def eda(dataframe):
print(
f"""
-- dtypes --
{dataframe.dtypes}
-- NaN Values --
{dataframe.isnull().sum()}
-- Shape --
{dataframe.shape}
-- Unique --
{df.apply(lambda x: x.nunique())}
-- Head --
"""
)
return dataframe.head()
eda(df)
df.describe().T
df.describe().T
sbn.clustermap(df.corr(), annot=True, fmt=".2f")
# ## Data Pre-Processing
def grab_col_names(dataframe, cat_th=10, car_th=20):
# cat_cols, cat_but_car
cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"]
num_but_cat = [
col
for col in dataframe.columns
if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"
]
cat_but_car = [
col
for col in dataframe.columns
if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"
]
cat_cols = cat_cols + num_but_cat
cat_cols = [col for col in cat_cols if col not in cat_but_car]
# num_cols
num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"]
num_cols = [col for col in num_cols if col not in num_but_cat]
print(f"Observations: {dataframe.shape[0]}")
print(f"Variables: {dataframe.shape[1]}")
print(f"cat_cols: {len(cat_cols)}")
print(f"num_cols: {len(num_cols)}")
print(f"cat_but_car: {len(cat_but_car)}")
print(f"num_but_cat: {len(num_but_cat)}")
return cat_cols, num_cols, cat_but_car
cat_cols, num_cols, cat_but_car = grab_col_names(df)
cat_cols, num_cols, cat_but_car
plt.figure(figsize=(16, 8))
for i, x in enumerate(num_cols):
plt.subplot(2, 4, i + 1)
sbn.histplot(df[x])
plt.figure(figsize=(16, 8))
for i, x in enumerate(num_cols):
plt.subplot(2, 4, i + 1)
sbn.boxplot(df[x])
# ### Outlier Analysis
def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75):
quartile1 = dataframe[col_name].quantile(q1)
quartile3 = dataframe[col_name].quantile(q3)
interquantile_range = quartile3 - quartile1
up_limit = quartile3 + 1.5 * interquantile_range
low_limit = quartile1 - 1.5 * interquantile_range
return low_limit, up_limit
def check_outlier(dataframe, col_name):
low_limit, up_limit = outlier_thresholds(dataframe, col_name)
if dataframe[
(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)
].any(axis=None):
return True
else:
return False
def replace_with_thresholds(dataframe, variable):
low_limit, up_limit = outlier_thresholds(dataframe, variable)
dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit
dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit
for x in num_cols:
print(x, check_outlier(df, x))
for x in num_cols:
replace_with_thresholds(df, x)
plt.figure(figsize=(16, 8))
for i, x in enumerate(num_cols):
plt.subplot(2, 4, i + 1)
sbn.histplot(df[x])
plt.figure(figsize=(16, 8))
for i, x in enumerate(num_cols):
plt.subplot(2, 4, i + 1)
sbn.boxplot(df[x])
plt.pie(
df["Outcome"].value_counts(),
explode=[0.0, 0.25],
startangle=30,
shadow=True,
colors=["#004d99", "#ac7339"],
textprops={"fontsize": 8, "fontweight": "bold", "color": "white"},
pctdistance=0.50,
autopct="%1.2f%%",
)
# Visible Outlier data has been fixed, but there are also hidden ones. These hidden outliers data are "Insulin and SkinThickness"
# Their min value is 0, but these values cannot be 0. We will make these 0 values NaN and fill them with mean
df.describe()[["Insulin", "SkinThickness"]].loc["min", :]
# As you can see they have a value of 0.
# Let's look at the number and ratio of these values
zero_count = pd.DataFrame(
df[["Insulin", "SkinThickness"]].apply(lambda x: x.value_counts()).loc[0, :]
)
zero_ratio = zero_count / df[["Insulin", "SkinThickness"]].shape[0]
pd.concat([zero_count, zero_ratio], axis=1)
# It's a lot too :D Let's fix them ;)
df["Insulin"] = (
df["Insulin"]
.where((df["Insulin"] > 0))
.fillna(df.groupby("Outcome")["Insulin"].transform("mean"))
)
df["SkinThickness"] = (
df["SkinThickness"]
.where((df["SkinThickness"] > 0))
.fillna(df.groupby("Outcome")["SkinThickness"].transform("mean"))
)
df.describe()[["Insulin", "SkinThickness"]].loc["min", :]
# yeah that's better
# ## Feature Engineering
plt.subplot(1, 2, 1)
sbn.histplot(df["Glucose"])
plt.subplot(1, 2, 2)
sbn.histplot(df["Age"])
# I think there may be a link between glucose and age. that is, the glucose value of an old person and the glucose value of a young person will not give the same effect at the same level.
# It is normal for a young person to have glucose levels between 50 and 150
df.loc[
(df["Glucose"] < 150) & (df["Glucose"] > 50) & (df["Age"] <= 45), "Gul_Age_Cat"
] = "normal_young"
df.loc[
(df["Glucose"] < 150) & (df["Glucose"] > 50) & (df["Age"] > 45), "Gul_Age_Cat"
] = "normal_old"
df.loc[
((df["Glucose"] > 150) | (df["Glucose"] < 50)) & (df["Age"] <= 45), "Gul_Age_Cat"
] = "not_normal_young"
df.loc[
((df["Glucose"] > 150) | (df["Glucose"] < 50)) & (df["Age"] > 45), "Gul_Age_Cat"
] = "not_normal_old"
# I categorized the insulin values
df.loc[(df["Insulin"] < 30), "Insulin_level"] = "Low"
df.loc[(df["Insulin"] >= 30) & (df["Insulin"] <= 120), "Insulin_level"] = "Normal"
df.loc[(df["Insulin"] > 120), "Insulin_level"] = "High"
#
# I wanted to see if it would have an effect by categorizing glucose values as well.
df.loc[(df["Glucose"] < 50), "Glucose_level"] = "Low"
df.loc[(df["Glucose"] >= 50) & (df["Glucose"] <= 140), "Insulin_level"] = "Normal"
df.loc[(df["Glucose"] > 140), "Glucose_level"] = "High"
# I brought together the variables that have a high impact on diabetes and gave the value "At_Risk" to those that had at least one value in the "Not_Risk" risk zone for those with normal values.
df.loc[
((df["Insulin"] >= 50) & (df["Insulin"] <= 140))
& ((df["BMI"] >= 25) & (df["BMI"] <= 36))
& ((df["Glucose"] >= 50) & (df["Glucose"] <= 150))
& ((df["SkinThickness"] >= 20) & (df["SkinThickness"] <= 32)),
"Life_level",
] = "Not_Risk"
df["Life_level"].fillna("At_Risk", inplace=True)
sbn.histplot(df["BMI"])
# I categorized the BMI Variable in the ranges I specified
df["NEW_BMI_RANGE"] = pd.cut(
x=df["BMI"],
bins=[-1, 18.5, 24.9, 29.9, 100],
labels=["underweight", "healty", "overweight", "obese"],
)
sbn.histplot(df["BloodPressure"])
# I categorized the BloodPressure Variable in the ranges I specified
df["NEW_BLOODPRESSURE"] = pd.cut(
x=df["BloodPressure"], bins=[-1, 79, 89, 123], labels=["normal", "hs1", "hs2"]
)
df[cat_cols] = df[cat_cols].astype("category")
df_dummy = pd.get_dummies(df, drop_first=True)
df_dummy.head()
# ### Scaling
# Scaling is the process of scaling data features. Data features can be measured in different ranges or units, which can be important for machine learning algorithms. For example, suppose there are two features in a dataset: "age" and "income". Age values can be measured in a range of 0-100, while income values can vary from thousands of dollars to hundreds of thousands of dollars. In this case, machine learning algorithms may give more importance to the income feature and ignore the effect of the age feature. Therefore, when scaling data features, each feature is transformed to be on the same scale. This process allows the data set to be properly analyzed and more accurate predictions to be made. Scaling can be performed using different methods such as standardization, normalization, or min-max scaling.
#
X_scaled = StandardScaler().fit_transform(df_dummy[num_cols])
df_dummy[num_cols] = pd.DataFrame(X_scaled, columns=df_dummy[num_cols].columns)
y = df["Outcome"]
x = df_dummy.drop("Outcome_1", axis=1)
# ## Create Models
def base_models(X, y, scoring="accuracy"):
print("Base Models....")
classifiers = [
("LR", LogisticRegression()),
("KNN", KNeighborsClassifier()),
("SVC", SVC()),
("CART", DecisionTreeClassifier()),
("RF", RandomForestClassifier()),
("Adaboost", AdaBoostClassifier()),
("GBM", GradientBoostingClassifier()),
("XGBoost", XGBClassifier(use_label_encoder=False, eval_metric="logloss")),
("LightGBM", LGBMClassifier()),
# ('CatBoost', CatBoostClassifier(verbose=False))
]
for name, classifier in classifiers:
cv_results = cross_validate(classifier, X, y, cv=5, scoring=scoring)
print(f"{scoring}: {round(cv_results['test_score'].mean(), 4)} ({name}) ")
knn_params = {"n_neighbors": range(2, 50)}
cart_params = {"max_depth": range(1, 20), "min_samples_split": range(2, 30)}
rf_params = {
"max_depth": [8, 15, None],
"max_features": [5, 7, "auto"],
"min_samples_split": [15, 20],
"n_estimators": [200, 300],
}
xgboost_params = {
"learning_rate": [0.1, 0.01],
"max_depth": [5, 8],
"n_estimators": [100, 200],
"colsample_bytree": [0.5, 1],
}
lightgbm_params = {
"learning_rate": [0.01, 0.1],
"n_estimators": [300, 500],
"colsample_bytree": [0.7, 1],
}
classifiers = [
("KNN", KNeighborsClassifier(), knn_params),
("CART", DecisionTreeClassifier(), cart_params),
("RF", RandomForestClassifier(), rf_params),
(
"XGBoost",
XGBClassifier(use_label_encoder=False, eval_metric="logloss"),
xgboost_params,
),
("LightGBM", LGBMClassifier(), lightgbm_params),
]
def hyperparameter_optimization(X, y, cv=5, scoring="accuracy"):
print("Hyperparameter Optimization....")
best_models = {}
for name, classifier, params in classifiers:
print(f"########## {name} ##########")
cv_results = cross_validate(classifier, X, y, cv=cv, scoring=scoring)
print(f"{scoring} (Before): {round(cv_results['test_score'].mean(), 4)}")
gs_best = GridSearchCV(classifier, params, cv=cv, n_jobs=-1, verbose=False).fit(
X, y
)
final_model = classifier.set_params(**gs_best.best_params_)
cv_results = cross_validate(final_model, X, y, cv=cv, scoring=scoring)
print(f"{scoring} (After): {round(cv_results['test_score'].mean(), 4)}")
print(f"{name} best params: {gs_best.best_params_}", end="\n\n")
best_models[name] = final_model
return best_models
def voting_classifier(best_models, X, y):
print("Voting Classifier...")
voting_clf = VotingClassifier(
estimators=[
("KNN", best_models["KNN"]),
("RF", best_models["RF"]),
("LightGBM", best_models["LightGBM"]),
],
voting="soft",
).fit(X, y)
cv_results = cross_validate(
voting_clf, X, y, cv=3, scoring=["accuracy", "f1", "roc_auc"]
)
print(f"Accuracy: {cv_results['test_accuracy'].mean()}")
print(f"F1Score: {cv_results['test_f1'].mean()}")
print(f"ROC_AUC: {cv_results['test_roc_auc'].mean()}")
return voting_clf
def fit(x, y):
base_models(x, y)
best_models = hyperparameter_optimization(x, y)
voting_clf = voting_classifier(best_models, x, y)
joblib.dump(voting_clf, "voting_clf.pkl")
return voting_clf, best_models
voting_clf, best_models = fit(x, y)
# lightGBM gave the best results
voting_clf
lgbm = best_models["LightGBM"].fit(x, y)
# ### Feature Importances
feature_imp = pd.DataFrame({"Value": lgbm.feature_importances_, "Feature": x.columns})
plt.figure(figsize=(10, 10))
sbn.set(font_scale=1)
sbn.barplot(
x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False)
)
plt.xlabel("Variable Importance Level ")
plt.show()
# ### ROC (Receiver Operating Characteristic) Curve
logit_roc_auc = roc_auc_score(y, lgbm.predict(x))
fpr, tpr, thresholds = roc_curve(y, lgbm.predict_proba(x)[:, 1])
plt.figure()
plt.plot(fpr, tpr, color="blue", label="ROC eğrisi")
plt.plot([0, 1], [0, 1], color="red", linestyle="--", label="Rastgele tahmin")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.show()
# ### Conclusion
# Thank you for reading my notebook and for your votes. If you like it, I will be glad if you do not forget to support me :)
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # Machine Leaning for University Admission Prediction **Using Linear Regression**
# **Importing Libraries and dataset**
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv("/kaggle/input/graduate-admissions/Admission_Predict.csv")
df.head()
df.drop(labels=["Serial No."], axis=1, inplace=True)
df
df.columns
# **Exploratory Data Analysis**
df.isnull().sum()
df.info()
df.describe()
df_university = df.groupby(by=df["University Rating"]).mean()
# **Data Visualization**
df.hist(bins=30, figsize=(20, 20))
sns.pairplot(df)
corr_matrix = df.corr()
plt.figure(figsize=(12, 12))
sns.heatmap(corr_matrix, annot=True)
plt.show()
# **Training and Testing Data**
# > with caling of dataset
X = df.drop(columns=["Chance of Admit "])
y = df["Chance of Admit "]
X.shape
y.shape
X = np.array(X)
y = np.array(y)
y = y.reshape(-1, 1)
y.shape
from sklearn.preprocessing import StandardScaler, MinMaxScaler
Scaler_X = StandardScaler()
X = Scaler_X.fit_transform(X)
Scaler_y = StandardScaler()
y = Scaler_y.fit_transform(y)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
# **Train Linear regression model**
#
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, accuracy_score
LinearRegression_model = LinearRegression()
LinearRegression_model.fit(X_train, y_train)
accuracy_LinearRegression = LinearRegression_model.score(X_test, y_test)
accuracy_LinearRegression
# **Regression Model KPI**
#
y_predict = LinearRegression_model.predict(X_test)
plt.plot(y_test, y_predict, ".")
y_predict_orig = Scaler_y.inverse_transform(y_predict)
y_test_orig = Scaler_y.inverse_transform(y_test)
plt.plot(y_test_orig, y_predict_orig, ".")
k = X_test.shape[1]
n = len(X_test)
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from math import sqrt
RMSE = float(format(np.sqrt(mean_squared_error(y_test_orig, y_predict_orig)), "3f"))
MSE = mean_squared_error(y_test_orig, y_predict_orig)
MAE = mean_absolute_error(y_test_orig, y_predict_orig)
r2 = r2_score(y_test_orig, y_predict_orig)
adj_r2 = 1 - (1 - r2) * (n - 1) / (n - k - 1)
print("RMSE=", RMSE, "\nMSE=", MSE, "\nMAE=", MAE, "\nr2=", r2, "\nadj_r2=", adj_r2)
|
# # Graph Neural Networks on Competition Data
# This notebook is a copy of graphnet-example and contains everything needed to tinker with GraphNeT on the competition data. It includes
# 1. Installation instructions for GraphNeT
# 2. Code for data conversion
# 3. Snippets for training dynedge similarly to whats shown in the JINST paper
# 4. A pre-trained dynedge on batch 1 to 50.
# 5. Snippets for inference and evaluation of results.
# ## Installing GraphNeT
# You can find the official installation instructions for GraphNeT [here](https://github.com/graphnet-team/graphnet#gear--install)
# This code contains a few extra steps to get the library installed in a Kaggle notebook. It will copy a recent version of graphnet and it's dependencies to the working disk and install the GPU version of GraphNeT in /kaggle/working/software.
# Move software to working disk
# Install dependencies
# Install GraphNeT
# Append to PATH
import sys
sys.path.append("/kaggle/working/software/graphnet/src")
print("Finished installing graphnet.")
import graphnet
# ## Converting The Parquet Files to SQLite
# The majority of functionality is tied to the SQLite data format. Therefore, to make GraphNeT compatible with the data provided in this competition, a small converter was included that reads a selection of batch_id's and writes them to a single database. The database will contain two tables
# * meta_table : Contains the information associated with train_meta_data.parquet
# * pulse_table : Contains the information associated with the batch_n.parquet files, including detector geometry.
# both tables are indexed according to *event_id* for efficiency. This will allow to extract information from the databases by simply writing
# ```python
# import pandas as pd
# import sqlite3
# my_event_id = 32
# my_database = '/kaggle/working/sqlite/batch_01.db'
# with sqlite3.connect(my_database) as conn:
# # extracts meta data for event
# meta_query = f'SELECT * FROM meta_table WHERE event_id ={my_event_id}'
# meta_data = pd.read_sql(meta_query,conn)
#
# # extracts pulses / detector response for event
# pulse_query = f'SELECT * FROM pulse_table WHERE event_id ={my_event_id}'
# pulse_data = pd.read_sql(query,conn)
# ```
# The upside of the SQLite format is that you only have the events you want in memory. Downside is ... the conversion :-)
#
import pyarrow.parquet as pq
import sqlite3
import pandas as pd
import sqlalchemy
from tqdm import tqdm
import os, shutil
from typing import Any, Dict, List, Optional
import numpy as np
from graphnet.data.sqlite.sqlite_utilities import create_table
def load_input(meta_batch: pd.DataFrame, input_data_folder: str) -> pd.DataFrame:
"""
Will load the corresponding detector readings associated with the meta data batch.
"""
batch_id = pd.unique(meta_batch["batch_id"])
assert (
len(batch_id) == 1
), "contains multiple batch_ids. Did you set the batch_size correctly?"
detector_readings = pd.read_parquet(
path=f"{input_data_folder}/batch_{batch_id[0]}.parquet"
)
sensor_positions = geometry_table.loc[
detector_readings["sensor_id"], ["x", "y", "z"]
]
sensor_positions.index = detector_readings.index
for column in sensor_positions.columns:
if column not in detector_readings.columns:
detector_readings[column] = sensor_positions[column]
detector_readings["auxiliary"] = detector_readings["auxiliary"].replace(
{True: 1, False: 0}
)
return detector_readings.reset_index()
def add_to_table(
database_path: str,
df: pd.DataFrame,
table_name: str,
is_primary_key: bool,
) -> None:
"""Writes meta data to sqlite table.
Args:
database_path (str): the path to the database file.
df (pd.DataFrame): the dataframe that is being written to table.
table_name (str, optional): The name of the meta table. Defaults to 'meta_table'.
is_primary_key(bool): Must be True if each row of df corresponds to a unique event_id. Defaults to False.
"""
try:
create_table(
columns=df.columns,
database_path=database_path,
table_name=table_name,
integer_primary_key=is_primary_key,
index_column="event_id",
)
except sqlite3.OperationalError as e:
if "already exists" in str(e):
pass
else:
raise e
engine = sqlalchemy.create_engine("sqlite:///" + database_path)
df.to_sql(table_name, con=engine, index=False, if_exists="append", chunksize=200000)
engine.dispose()
return
def convert_to_sqlite(
meta_data_path: str,
database_path: str,
input_data_folder: str,
batch_size: int = 200000,
batch_ids: Optional[List[int]] = None,
) -> None:
"""Converts a selection of the Competition's parquet files to a single sqlite database.
Args:
meta_data_path (str): Path to the meta data file.
batch_size (int): the number of rows extracted from meta data file at a time. Keep low for memory efficiency.
database_path (str): path to database. E.g. '/my_folder/data/my_new_database.db'
input_data_folder (str): folder containing the parquet input files.
batch_ids (List[int]): The batch_ids you want converted. Defaults to None (all batches will be converted)
"""
if batch_ids is None:
batch_ids = np.arange(1, 661, 1).to_list()
else:
assert isinstance(batch_ids, list), "Variable 'batch_ids' must be list."
if not database_path.endswith(".db"):
database_path = database_path + ".db"
meta_data_iter = pq.ParquetFile(meta_data_path).iter_batches(batch_size=batch_size)
batch_id = 1
converted_batches = []
progress_bar = tqdm(total=len(batch_ids))
if batch_ids == [661]: # particular case of test batch (only one and numbered 661)
batch_id = 661
for meta_data_batch in meta_data_iter:
if batch_id in batch_ids:
meta_data_batch = meta_data_batch.to_pandas()
add_to_table(
database_path=database_path,
df=meta_data_batch,
table_name="meta_table",
is_primary_key=True,
)
pulses = load_input(
meta_batch=meta_data_batch, input_data_folder=input_data_folder
)
del meta_data_batch # memory
add_to_table(
database_path=database_path,
df=pulses,
table_name="pulse_table",
is_primary_key=False,
)
del pulses # memory
progress_bar.update(1)
converted_batches.append(batch_id)
batch_id += 1
if len(batch_ids) == len(converted_batches):
break
progress_bar.close()
del meta_data_iter # memory
print(f"Conversion Complete!. Database available at\n {database_path}")
# This notebook comes with both batch 1 and 51 converted to sqlite databases, so you don't have to convert them yourself. They were produced by running the following code:
# ```python
# input_data_folder = '/kaggle/input/icecube-neutrinos-in-deep-ice/train'
# geometry_table = pd.read_csv('/kaggle/input/icecube-neutrinos-in-deep-ice/sensor_geometry.csv')
# meta_data_path = '/kaggle/input/icecube-neutrinos-in-deep-ice/train_meta.parquet'
# #batch_1
# database_path = '/kagge/working/batch_1'
# convert_to_sqlite(meta_data_path,
# database_path=database_path,
# input_data_folder=input_data_folder,
# batch_ids = [1])
# #batch_51
# database_path = '/kagge/working/batch_51'
# convert_to_sqlite(meta_data_path,
# database_path=database_path,
# input_data_folder=input_data_folder,
# batch_ids = [51])
# ```
# You can convert multiple batches into a single database by adjusting *batch_id*. Notice that the compression of SQLite is far inferiour to parquet. The entire competition dataset will take up more than 1.5T of disk space in SQLite.
# Instead of producing these databases again, the next cell will copy them into /kaggle/working which is a faster disk.
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
filepath = os.path.join(dirname, filename)
if ".db" in filepath:
src_path = filepath
dest_path = f"/kaggle/working/{filename}"
shutil.copy(src_path, dest_path)
# ## Defining A Selection
# The [SQLiteDataset class](https://github.com/graphnet-team/graphnet/blob/main/src/graphnet/data/sqlite/sqlite_dataset.py) is essentially a PyTorch Dataset (read more [here](https://pytorch.org/tutorials/beginner/data_loading_tutorial.html)) where the __get_item__ function extracts a single event at a time from the specified database. If a so-called *selection* is specified, only events in the selection are used for the dataset - that allows us to sub-sample the dataset for training.
# The following few cells introduce a simple selection based on the number of pulses. This selection will then be used for training a GNN later.
from sklearn.model_selection import train_test_split
def make_selection(df: pd.DataFrame, pulse_threshold: int = 200) -> None:
"""Creates a validation and training selection (20 - 80). All events in both selections satisfies n_pulses <= 200 by default."""
n_events = np.arange(0, len(df), 1)
train_selection, validate_selection = train_test_split(
n_events, shuffle=True, random_state=42, test_size=0.20
)
df["train"] = 0
df["validate"] = 0
df["train"][train_selection] = 1
df["validate"][validate_selection] = 1
assert len(train_selection) == sum(df["train"])
assert len(validate_selection) == sum(df["validate"])
# Remove events with large pulses from training and validation sample (memory)
df["train"][df["n_pulses"] > pulse_threshold] = 0
df["validate"][df["n_pulses"] > pulse_threshold] = 0
for selection in ["train", "validate"]:
df.loc[df[selection] == 1, :].to_csv(
f"{selection}_selection_max_{pulse_threshold}_pulses.csv"
)
return
def get_number_of_pulses(db: str, event_id: int, pulsemap: str) -> int:
with sqlite3.connect(db) as con:
query = (
f"select event_id from {pulsemap} where event_id = {event_id} limit 20000"
)
data = con.execute(query).fetchall()
return len(data)
def count_pulses(database: str, pulsemap: str) -> pd.DataFrame:
"""Will count the number of pulses in each event and return a single dataframe that contains counts for each event_id."""
with sqlite3.connect(database) as con:
query = "select event_id from meta_table"
events = pd.read_sql(query, con)
counts = {"event_id": [], "n_pulses": []}
for event_id in tqdm(events["event_id"]):
a = get_number_of_pulses(database, event_id, pulsemap)
counts["event_id"].append(event_id)
counts["n_pulses"].append(a)
df = pd.DataFrame(counts)
df.to_csv("counts.csv")
return df
# ## Training DynEdge
# Now that both database and selection is ready, everything is in place to begin training. DynEdge is a GNN implemented in GraphNeT - it represents IceCube events as 3D point clouds and leverages techniques from segmentation analysis in computer vision to reconstruct events. You can find technical details on the model in [this paper](https://iopscience.iop.org/article/10.1088/1748-0221/17/11/P11003). The model and training configuration shown below is nearly identical to what's presented in the paper. Note that this configuration was originally meant for low energy, so it's possible that some adjustments might improve performance.
from pytorch_lightning.callbacks import EarlyStopping
from torch.optim.adam import Adam
from graphnet.data.constants import FEATURES, TRUTH
from graphnet.models import StandardModel
from graphnet.models.detector.icecube import IceCubeKaggle
from graphnet.models.gnn import DynEdge
from graphnet.models.graph_builders import KNNGraphBuilder
from graphnet.models.task.reconstruction import (
DirectionReconstructionWithKappa,
ZenithReconstructionWithKappa,
AzimuthReconstructionWithKappa,
)
from graphnet.training.callbacks import ProgressBar, PiecewiseLinearLR
from graphnet.training.loss_functions import VonMisesFisher3DLoss, VonMisesFisher2DLoss
from graphnet.training.labels import Direction
from graphnet.training.utils import make_dataloader
from graphnet.utilities.logging import get_logger
from sklearn.ensemble import VotingRegressor
from pytorch_lightning import Trainer
import pandas as pd
print("Finished with imports.")
logger = get_logger()
def build_model(config: Dict[str, Any], train_dataloader: Any) -> StandardModel:
"""Builds GNN from config"""
# Building model
detector = IceCubeKaggle(
graph_builder=KNNGraphBuilder(nb_nearest_neighbours=8),
)
gnn = DynEdge(
nb_inputs=detector.nb_outputs,
global_pooling_schemes=["min", "max", "mean"],
)
if config["target"] == "direction":
task = DirectionReconstructionWithKappa(
hidden_size=gnn.nb_outputs,
target_labels=config["target"],
loss_function=VonMisesFisher3DLoss(),
)
prediction_columns = [
config["target"] + "_x",
config["target"] + "_y",
config["target"] + "_z",
config["target"] + "_kappa",
]
additional_attributes = ["zenith", "azimuth", "event_id"]
graph_model = StandardModel(
detector=detector,
gnn=gnn,
tasks=[task],
optimizer_class=Adam,
optimizer_kwargs={"lr": 1e-03, "eps": 1e-03},
scheduler_class=PiecewiseLinearLR,
scheduler_kwargs={
"milestones": [
0,
len(train_dataloader) / 2,
len(train_dataloader) * config["fit"]["max_epochs"],
],
"factors": [1e-02, 1, 1e-02],
},
scheduler_config={
"interval": "step",
},
)
graph_model.prediction_columns = prediction_columns
graph_model.additional_attributes = additional_attributes
# Define the ensemble of regressors
model1 = graph_model
model2 = graph_model
model3 = graph_model
model = VotingRegressor(
[("model1", model1), ("model2", model2), ("model3", model3)]
)
# Train the model
epochs = 10
lr = 0.001
optimizer = Adam(lr=lr)
criterion = nn.MSELoss()
for epoch in range(epochs):
running_loss = 0.0
for i, data in enumerate(train_dataloader):
inputs, labels = data
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f"Epoch {epoch+1} training loss: {running_loss/len(train_dataloader)}")
return model
def load_pretrained_model(
config: Dict[str, Any],
state_dict_path: str = "/kaggle/input/dynedge-pretrained/dynedge_pretrained_batch_1_to_50/state_dict.pth",
) -> StandardModel:
train_dataloader, _ = make_dataloaders(config=config)
model = build_model(config=config, train_dataloader=train_dataloader)
model.load_state_dict(state_dict_path)
model.prediction_columns = [
config["target"] + "_x",
config["target"] + "_y",
config["target"] + "_z",
config["target"] + "_kappa",
]
model.additional_attributes = ["zenith", "azimuth", "event_id"]
return model
def make_dataloaders(config: Dict[str, Any]) -> List[Any]:
"""Constructs training and validation dataloaders for training with early stopping."""
train_dataloader = make_dataloader(
db=config["path"],
selection=pd.read_csv(config["train_selection"])[config["index_column"]]
.ravel()
.tolist(),
pulsemaps=config["pulsemap"],
features=features,
truth=truth,
batch_size=config["batch_size"],
num_workers=config["num_workers"],
shuffle=True,
labels={"direction": Direction()},
index_column=config["index_column"],
truth_table=config["truth_table"],
)
validate_dataloader = make_dataloader(
db=config["path"],
selection=pd.read_csv(config["validate_selection"])[config["index_column"]]
.ravel()
.tolist(),
pulsemaps=config["pulsemap"],
features=features,
truth=truth,
batch_size=config["batch_size"],
num_workers=config["num_workers"],
shuffle=False,
labels={"direction": Direction()},
index_column=config["index_column"],
truth_table=config["truth_table"],
)
return train_dataloader, validate_dataloader
def train_dynedge_from_scratch(config: Dict[str, Any]) -> StandardModel:
"""Builds and trains GNN according to config."""
logger.info(f"features: {config['features']}")
logger.info(f"truth: {config['truth']}")
# archive = os.path.join(config['base_dir'], "train_model_without_configs")
# run_name = f"dynedge_{config['target']}_{config['run_name_tag']}"
train_dataloader, validate_dataloader = make_dataloaders(config=config)
model = build_model(config, train_dataloader)
# Training model
callbacks = [
EarlyStopping(
monitor="val_loss",
patience=config["early_stopping_patience"],
),
ProgressBar(),
]
model.fit(
train_dataloader, validate_dataloader, callbacks=callbacks, **config["fit"]
)
return model
def inference(model, config: Dict[str, Any]) -> pd.DataFrame:
"""Applies model to the database specified in config['inference_database_path'] and saves results to disk."""
# Make Dataloader
test_dataloader = make_dataloader(
db=config["inference_database_path"],
selection=None, # Entire database
pulsemaps=config["pulsemap"],
features=features,
truth=truth,
batch_size=config["batch_size"],
num_workers=config["num_workers"],
shuffle=False,
labels={"direction": Direction()},
index_column=config["index_column"],
truth_table=config["truth_table"],
)
# Get predictions
results = model.predict_as_dataframe(
gpus=[0],
dataloader=test_dataloader,
prediction_columns=model.prediction_columns,
additional_attributes=model.additional_attributes,
)
return results
# Constants
features = FEATURES.KAGGLE
truth = TRUTH.KAGGLE
# Configuration
config = {
"path": "",
"inference_database_path": "/kaggle/working/batch_51.db",
"pulsemap": "pulse_table",
"truth_table": "meta_table",
"features": features,
"truth": truth,
"index_column": "event_id",
"run_name_tag": "my_example",
"batch_size": 200,
"num_workers": 2,
"target": "direction",
"early_stopping_patience": 5,
"fit": {
"max_epochs": 50,
"gpus": [0],
"distribution_strategy": None,
},
"train_selection": f"/kaggle/working/train_selection_max_180_pulses.csv",
"validate_selection": f"/kaggle/working/validate_selection_max_180_pulses.csv",
"test_selection": None,
"base_dir": "training",
}
# ## Inference & Evaluation
# With a trained model loaded into memory, we can now apply the model to a batch. The following cells will start inference (or load in a csv with predictions, if you're in a hurry) and plot the results.
def convert_to_3d(df: pd.DataFrame) -> pd.DataFrame:
"""Converts zenith and azimuth to 3D direction vectors"""
df["true_x"] = np.cos(df["azimuth"]) * np.sin(df["zenith"])
df["true_y"] = np.sin(df["azimuth"]) * np.sin(df["zenith"])
df["true_z"] = np.cos(df["zenith"])
return df
def calculate_angular_error(df: pd.DataFrame) -> pd.DataFrame:
"""Calculates the opening angle (angular error) between true and reconstructed direction vectors"""
df["angular_error"] = np.arccos(
df["true_x"] * df["direction_x"]
+ df["true_y"] * df["direction_y"]
+ df["true_z"] * df["direction_z"]
)
return df
# # Running all functions for 1 to 3 training batches
import matplotlib.pyplot as plt
import pandas as pd
pulsemap = "pulse_table"
database = "/kaggle/working/batch"
scores = []
for dirname, _, filenames in os.walk("/kaggle/working"):
for filename in filenames:
filepath = os.path.join(dirname, filename)
if database in filepath:
db_filepath = filepath
print("\n", db_filepath)
# Counting pulses, cleaning data to eliminate outliers and creating train/validation dataloaders
# 3 csv files are produced; training and validation selections, and a third file called *counts.csv*.
df = count_pulses(db_filepath, pulsemap)
make_selection(df=df, pulse_threshold=180)
# Plotting pulses filtering
fig = plt.figure(figsize=(6, 4), constrained_layout=True)
plt.hist(
df["n_pulses"],
histtype="step",
label="batch_1",
bins=np.arange(0, 400, 1),
)
plt.xlabel("# of Pulses", size=15)
plt.xticks(size=15)
plt.yticks(size=15)
plt.plot(
np.repeat(200, 2),
[0, 4000],
label=f'Selection\n{np.round((sum(df["n_pulses"]<= 180)/len(df))*100, 1)} % pass',
)
plt.legend(frameon=False, fontsize=15)
print(
f'Event with highest number of pulses counted: {df["n_pulses"].max()}'
)
# Train model
config["path"] = db_filepath
batch_number = int(filename.split("_")[1].split(".")[0])
model_path = f"/kaggle/working/model{batch_number}.pth"
model_state_path = f"/kaggle/working/state_dict{batch_number}.pth"
model = train_dynedge_from_scratch(config=config)
# Save model
model.save(model_path)
model.save_state_dict(model_state_path)
# Inference
results = inference(model, config)
results1 = convert_to_3d(results)
# Scoring
results2 = calculate_angular_error(results1)
display(results2)
logger.info(f"Writing results to /kaggle/working")
results2.to_csv(f"results{batch_number}.csv")
score = np.round(results2["angular_error"].mean(), 2)
scores.append(dict(batch_id=batch_number, scores=score))
print(f"Model model{batch_number} score:", score)
# Angular error distribution
cut_threshold = 0.5
fig = plt.figure(figsize=(6, 6))
plt.hist(
results2["angular_error"][
1 / np.sqrt(results2["direction_kappa"]) <= cut_threshold
],
bins=np.arange(0, np.pi * 2, 0.05),
histtype="step",
label=f'sigma <= {cut_threshold}: {np.round(results2["angular_error"][1/np.sqrt(results2["direction_kappa"]) <= cut_threshold].mean(),2)}',
)
plt.hist(
results2["angular_error"][
1 / np.sqrt(results2["direction_kappa"]) > cut_threshold
],
bins=np.arange(0, np.pi * 2, 0.05),
histtype="step",
label=f'sigma > {cut_threshold}: {np.round(results2["angular_error"][1/np.sqrt(results2["direction_kappa"]) > cut_threshold].mean(),2)}',
)
plt.xlabel("Angular Error [rad.]", size=15)
plt.ylabel("Counts", size=15)
plt.title(f"Angular Error Distribution (Batch {batch_number})", size=15)
plt.legend(frameon=False, fontsize=15) # saving batch vs scores into a file
scores_df = pd.DataFrame(scores)
scores_df.to_csv("scores_csv")
# In the cell below, you can choose between training dynedge from scratch on the batch database or loading in a pretrained model that has trained on batches 1 to 50.
# Train from scratch (slow) - remember to save it!
# model = train_dynedge_from_scratch(config = config)
# model.save(model_path)
# model.save_state_dict(model_state_path)
# Load state-dict from pre-trained model (faster)
# model = load_pretrained_model(config = config)
# Inference
# results = inference(model, config)
# display(results)
# fig = plt.figure(figsize = (6,6))
# plt.hist(results['angular_error'],
# bins = np.arange(0,np.pi*2, 0.05),
# histtype = 'step',
# label = f'mean angular error: {np.round(results["angular_error"].mean(),2)}')
# plt.xlabel('Angular Error [rad.]', size = 15)
# plt.ylabel('Counts', size = 15)
# plt.title('Angular Error Distribution (Batch 51)', size = 15)
# plt.legend(frameon = False, fontsize = 15)
|
import numpy as np
import pandas as pd
import os
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense
import matplotlib.pyplot as plt
data = pd.read_csv("../input/nlp-getting-started/train.csv")
data.isnull().sum(axis=0)
maxlen = 100
training_samples = 200
validation_samples = 10000
max_words = 10000
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(data.text)
sequences = tokenizer.texts_to_sequences(data.text)
word_index = tokenizer.word_index
len(word_index)
data_text = pad_sequences(sequences, maxlen=maxlen)
labels = np.asarray(data.target)
labels
indices = np.arange(data_text.shape[0])
np.random.shuffle(indices)
data_text = data_text[indices]
labels = labels[indices]
x_train = data_text[:training_samples]
y_train = labels[:training_samples]
x_val = data_text[training_samples : training_samples + validation_samples]
y_val = labels[training_samples : training_samples + validation_samples]
glove_dir = "../input/glove-global-vectors-for-word-representation"
embedding_index = {}
f = open(os.path.join(glove_dir, "glove.6B.200d.txt"), encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embedding_index[word] = coefs
f.close()
embedding_dim = 200
embedding_matrix = np.zeros((max_words, embedding_dim))
for word, i in word_index.items():
if i < max_words:
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
model.add(Dense(1, activation="sigmoid"))
model.summary()
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["acc"])
history = model.fit(
x_train, y_train, epochs=10, batch_size=32, validation_data=(x_val, y_val)
)
acc = history.history["acc"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training acc")
plt.legend()
plt.title("Glove")
plt.show()
|
# # XGBoost Baseline - LB 0.678
# In this notebook we present a XGBoost baseline. We train GroupKFold models for each of the 18 questions. Our CV score is 0.678. We infer test using one of our KFold models. We can improve our CV and LB by engineering more features for our xgboost and/or trying different models (like other ML models and/or RNN and/or Transformer). Also we can improve our LB by using more KFold models OR training one model using all data (and the hyperparameters that we found from our KFold cross validation).
# **UPDATE** On March 20 2023, Kaggle doubled the size of train data. Therefore we updated this notebook to avoid memory error. We accomplish this by reading train data in chunks and feature engineering in chunks. Note that another way to avoid memory error is to use two notebooks. Train models in one notebook that has 32GB RAM (and save models), and then submit the required 8GB RAM notebook (with loaded models) as a second notebook. (Discussion [here][1]).
# [1]: https://www.kaggle.com/competitions/predict-student-performance-from-game-play/discussion/386218
# Use GPU notebook to train model faster (and load model in CPU notebook for inference as that is required for the competition): https://www.kaggle.com/competitions/predict-student-performance-from-game-play/discussion/386218. Or think of other solutions..
import pandas as pd, numpy as np, gc
from sklearn.model_selection import KFold, GroupKFold
from xgboost import XGBClassifier
from sklearn.metrics import f1_score
# # Load Train Data and Labels
# On March 20 2023, Kaggle doubled the size of train data (discussion [here][1]). The train data is now 4.7GB! To avoid memory error, we will read the train data in as 10 pieces and feature engineer each piece before reading the next piece. This works because feature engineering shrinks the size of each piece.
# [1]: https://www.kaggle.com/competitions/predict-student-performance-from-game-play/discussion/396202
# READ USER ID ONLY --> to get the row amount for creating chunk sizes.
tmp = tmp.groupby("session_id").session_id.agg("count")
# COMPUTE READS AND SKIPS
PIECES = 5
CHUNK = int(np.ceil(len(tmp) / PIECES))
# Here the data pieces are created
reads = []
skips = [0]
for k in range(PIECES):
a = k * CHUNK
b = (k + 1) * CHUNK
if b > len(tmp):
b = len(tmp)
r = tmp.iloc[a:b].sum()
reads.append(r)
skips.append(skips[-1] + r)
print(f"To avoid memory error, we will read train in {PIECES} pieces of sizes:")
print(reads)
# Reduce Memory Usage
# reference : https://www.kaggle.com/code/arjanso/reducing-dataframe-memory-size-by-65 @ARJANGROEN
def reduce_memory_usage(df):
start_mem = df.memory_usage().sum() / 1024**2
print("Memory usage of dataframe is {:.2f} MB".format(start_mem))
# The idea here is to check the maximum decimal precision of every column and convert the whole column to the max necessary precision to minimize memory usage
for col in df.columns:
col_type = df[col].dtype.name
if (col_type != "datetime64[ns]") & (col_type != "category"):
if col_type != "object":
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif (
c_min > np.iinfo(np.int16).min
and c_max < np.iinfo(np.int16).max
):
df[col] = df[col].astype(np.int16)
elif (
c_min > np.iinfo(np.int32).min
and c_max < np.iinfo(np.int32).max
):
df[col] = df[col].astype(np.int32)
elif (
c_min > np.iinfo(np.int64).min
and c_max < np.iinfo(np.int64).max
):
df[col] = df[col].astype(np.int64)
else:
if (
c_min > np.finfo(np.float16).min
and c_max < np.finfo(np.float16).max
):
df[col] = df[col].astype(np.float16)
elif (
c_min > np.finfo(np.float32).min
and c_max < np.finfo(np.float32).max
):
df[col] = df[col].astype(np.float32)
else:
pass
else:
df[col] = df[col].astype("category")
mem_usg = df.memory_usage().sum() / 1024**2
print("Memory usage became: ", mem_usg, " MB")
return df
# LOAD DATA IN PIECES, THEN REDUCE MEMORY USAGE
all_pieces = []
print(f"Loading train as {PIECES} pieces to avoid memory error... ")
for k in range(PIECES):
print(k, ", ", end="")
SKIPS = 0
if k > 0:
SKIPS = range(1, skips[k] + 1)
train = pd.read_csv(
"/kaggle/input/predict-student-performance-from-game-play/train.csv",
nrows=reads[k],
skiprows=SKIPS,
)
df = reduce_memory_usage(train)
all_pieces.append(df)
# CONCATENATE ALL PIECES TO FULL TRAIN DATA
print("\n")
del train
gc.collect()
pre_df = pd.concat(all_pieces, axis=0)
print("Shape of all train data:", pre_df.shape)
pre_df.head()
# LOAD LABEL DATA
# Splits the session id and question into seperate columns.
# Note, that a unique session id consists of all the questions corresponding to that session, i.e. one session represents one user completing the game.
# We need to seperate this information to verify if the question is answered correctly in later evaluations
targets = pd.read_csv(
"/kaggle/input/predict-student-performance-from-game-play/train_labels.csv"
)
targets["session"] = targets.session_id.apply(lambda x: int(x.split("_")[0]))
targets["q"] = targets.session_id.apply(lambda x: int(x.split("_")[-1][1:]))
print(targets.shape)
targets.head()
targets.tail()
# # Exploratory Data Analysis
pre_df.info()
def summary(df):
print(f"data shape: {df.shape}")
summ = pd.DataFrame(df.dtypes, columns=["data type"])
summ["#missing"] = df.isnull().sum().values * 100
summ["%missing"] = df.isnull().sum().values / len(df)
summ["#unique"] = df.nunique().values
desc = pd.DataFrame(df.describe(include="all").transpose())
summ["min"] = desc["min"].values
summ["max"] = desc["max"].values
return summ
# Display a summary of the data properties (missing values, unique values, min, max)
summary_table = summary(pre_df)
summary_table
pre_df["event_name"].value_counts()
# Additional analyses can be found e.g. here; https://www.kaggle.com/code/banfuzhi/detailed-eda-student-perf-from-game-play or https://www.kaggle.com/code/gehallak/why-are-some-sessions-so-long#3.-Session-Length-and-score.
# # Feature Engineer
# We create basic aggregate features. Try creating more features to boost CV and LB! The idea for EVENTS feature is from [here][1]
# [1]: https://www.kaggle.com/code/kimtaehun/lightgbm-baseline-with-aggregated-log-data
# A distinction is made between categorical (string), numerical (floats or integers) and event (string).
CATS = ["event_name", "fqid", "room_fqid", "text"]
NUMS = [
"elapsed_time",
"level",
"page",
"room_coor_x",
"room_coor_y",
"screen_coor_x",
"screen_coor_y",
"hover_duration",
]
# https://www.kaggle.com/code/kimtaehun/lightgbm-baseline-with-aggregated-log-data
EVENTS = [
"navigate_click",
"person_click",
"cutscene_click",
"object_click",
"map_hover",
"notification_click",
"map_click",
"observation_click",
"checkpoint",
]
# Based on the different data defined above we engineer features that are grouped by, and aggregated per unique session id and level group.
# Thus, resulting in a single row for every session id containing the aggregated value of every feature.
# Note, that the questions within one session are grouped per level group,
# i.e. the 18 questions are distributed over 22 different levels, which makes up a complete session.
def feature_engineer(train):
dfs = []
for c in CATS:
# Define unique number of categorical data per level group within an unique session
tmp = train.groupby(["session_id", "level_group"])[c].agg("nunique")
tmp.name = tmp.name + "_nunique"
dfs.append(tmp)
for c in NUMS:
# Define the mean of the numerical data per level group within an unique session
tmp = train.groupby(["session_id", "level_group"])[c].agg("mean")
tmp.name = tmp.name + "_mean"
dfs.append(tmp)
for c in NUMS:
# Define the std of the numerical data per level group within an unique session
tmp = train.groupby(["session_id", "level_group"])[c].agg("std")
tmp.name = tmp.name + "_std"
dfs.append(tmp)
for c in EVENTS:
train[c] = (train.event_name == c).astype("int8")
for c in EVENTS + ["elapsed_time"]:
# Define the sum of the occurences of an event per level group within an unique session
tmp = train.groupby(["session_id", "level_group"])[c].agg("sum")
tmp.name = tmp.name + "_sum"
dfs.append(tmp)
train = train.drop(EVENTS, axis=1)
df = pd.concat(dfs, axis=1)
df = df.fillna(-1)
df = df.reset_index()
df = df.set_index("session_id")
return df
# create the features per piece of training data and display the head
df = feature_engineer(pre_df)
df.head()
# # Train XGBoost Model
# We train one model for each of 18 questions. Furthermore, we use data from `level_groups = '0-4'` to train model for questions 1-3, and `level groups '5-12'` to train questions 4 thru 13 and `level groups '13-22'` to train questions 14 thru 18. Because this is the data we get (to predict corresponding questions) from Kaggle's inference API during test inference. We can improve our model by saving a user's previous data from earlier `level_groups` and using that to predict future `level_groups`.
FEATURES = [c for c in df.columns if c != "level_group"]
print("We will train with", len(FEATURES), "features")
ALL_USERS = df.index.unique()
print("We will train with", len(ALL_USERS), "users info")
gkf = GroupKFold(n_splits=5)
oof = pd.DataFrame(data=np.zeros((len(ALL_USERS), 18)), index=ALL_USERS)
models = {}
# COMPUTE CV SCORE WITH 5 GROUP K FOLD
for i, (train_index, test_index) in enumerate(gkf.split(X=df, groups=df.index)):
print("#" * 25)
print("### Fold", i + 1)
print("#" * 25)
xgb_params = {
"objective": "binary:logistic",
"eval_metric": "logloss",
"learning_rate": 0.05,
"max_depth": 4,
"n_estimators": 1000,
"early_stopping_rounds": 50,
"tree_method": "hist",
"subsample": 0.8,
"colsample_bytree": 0.4,
"use_label_encoder": False,
}
# ITERATE THRU QUESTIONS 1 THRU 18
for t in range(1, 19):
# USE THIS TRAIN DATA WITH THESE QUESTIONS
if t <= 3:
grp = "0-4"
elif t <= 13:
grp = "5-12"
elif t <= 22:
grp = "13-22"
# TRAIN DATA
train_x = df.iloc[train_index]
train_x = train_x.loc[train_x.level_group == grp]
train_users = train_x.index.values
train_y = targets.loc[targets.q == t].set_index("session").loc[train_users]
# VALID DATA
valid_x = df.iloc[test_index]
valid_x = valid_x.loc[valid_x.level_group == grp]
valid_users = valid_x.index.values
valid_y = targets.loc[targets.q == t].set_index("session").loc[valid_users]
# TRAIN MODEL
clf = XGBClassifier(**xgb_params)
clf.fit(
train_x[FEATURES].astype("float32"),
train_y["correct"],
eval_set=[(valid_x[FEATURES].astype("float32"), valid_y["correct"])],
verbose=0,
)
print(f"{t}({clf.best_ntree_limit}), ", end="")
# SAVE MODEL, PREDICT VALID OOF
models[f"{grp}_{t}"] = clf
oof.loc[valid_users, t - 1] = clf.predict_proba(
valid_x[FEATURES].astype("float32")
)[:, 1]
print()
# # Compute CV Score
# We need to convert prediction probabilities into `1s` and `0s`. The competition metric is F1 Score which is the harmonic mean of precision and recall. Let's find the optimal threshold for `p > threshold` when to predict `1` and when to predict `0` to maximize F1 Score.
# PUT TRUE LABELS INTO DATAFRAME WITH 18 COLUMNS
true = oof.copy()
for k in range(18):
# GET TRUE LABELS
tmp = targets.loc[targets.q == k + 1].set_index("session").loc[ALL_USERS]
true[k] = tmp.correct.values
# FIND BEST THRESHOLD TO CONVERT PROBS INTO 1s AND 0s
scores = []
thresholds = []
best_score = 0
best_threshold = 0
for threshold in np.arange(0.4, 0.81, 0.01):
print(f"{threshold:.02f}, ", end="")
preds = (oof.values.reshape((-1)) > threshold).astype("int")
m = f1_score(true.values.reshape((-1)), preds, average="macro")
scores.append(m)
thresholds.append(threshold)
if m > best_score:
best_score = m
best_threshold = threshold
import matplotlib.pyplot as plt
# PLOT THRESHOLD VS. F1_SCORE
plt.figure(figsize=(20, 5))
plt.plot(thresholds, scores, "-o", color="blue")
plt.scatter([best_threshold], [best_score], color="blue", s=300, alpha=1)
plt.xlabel("Threshold", size=14)
plt.ylabel("Validation F1 Score", size=14)
plt.title(
f"Threshold vs. F1_Score with Best F1_Score = {best_score:.3f} at Best Threshold = {best_threshold:.3}",
size=18,
)
plt.show()
print("When using optimal threshold...")
for k in range(18):
# COMPUTE F1 SCORE PER QUESTION
m = f1_score(
true[k].values, (oof[k].values > best_threshold).astype("int"), average="macro"
)
print(f"Q{k}: F1 =", m)
# COMPUTE F1 SCORE OVERALL
m = f1_score(
true.values.reshape((-1)),
(oof.values.reshape((-1)) > best_threshold).astype("int"),
average="macro",
)
print("==> Overall F1 =", m)
# Results for Macro F1 are different from micro F1:
# Using micro F1...
print("When using optimal threshold...")
for k in range(18):
# COMPUTE F1 SCORE PER QUESTION
m = f1_score(
true[k].values, (oof[k].values > best_threshold).astype("int"), average="micro"
)
print(f"Q{k}: F1 =", m)
# COMPUTE F1 SCORE OVERALL
m = f1_score(
true.values.reshape((-1)),
(oof.values.reshape((-1)) > best_threshold).astype("int"),
average="micro",
)
print("==> Overall F1 =", m)
# # Infer Test Data
# # IMPORT KAGGLE API
# import jo_wilder
# env = jo_wilder.make_env()
# iter_test = env.iter_test()
# # CLEAR MEMORY
# import gc
# del targets, df, oof, true
# _ = gc.collect()
# limits = {'0-4':(1,4), '5-12':(4,14), '13-22':(14,19)}
# for (test, sample_submission) in iter_test:
# # FEATURE ENGINEER TEST DATA
# df = feature_engineer(test)
# # INFER TEST DATA
# grp = test.level_group.values[0]
# a,b = limits[grp]
# for t in range(a,b):
# clf = models[f'{grp}_{t}']
# p = clf.predict_proba(df[FEATURES].astype('float32'))[0,1]
# mask = sample_submission.session_id.str.contains(f'q{t}')
# sample_submission.loc[mask,'correct'] = int( p > best_threshold )
# env.predict(sample_submission)
# # EDA submission.csv
# df = pd.read_csv('submission.csv')
# print( df.shape )
# df.head()
# print(df.correct.mean())
|
# # Processamento Analítico de Dados em Larga Escala
# ## Aula 02: Dados Multidimensionais e Consultas Analíticas
# ## Exemplo usando Pandas
# ## Lista de Exercícios
# **Profa. Dra. Cristina Dutra de Aguiar**
# **ICMC/USP**
# Esta lista contém exercícios referentes à Aula 02, intitulada Dados Multidimensionais e Consultas Analíticas. Recomenda-se fortemente que a lista de exercícios seja respondida antes de se consultar as respostas dos exercícios.
# # 1 Carregamento das Tabelas de Dimensão e das Tabelas de Fatos
#
import pandas as pd
# criando os DataFrames para as tabelas de dimensão
data = pd.read_csv(
"https://raw.githubusercontent.com/CristinaAguiar/DataMartCelebCo/main/data.csv"
)
funcionario = pd.read_csv(
"https://raw.githubusercontent.com/CristinaAguiar/DataMartCelebCo/main/funcionario.csv"
)
equipe = pd.read_csv(
"https://raw.githubusercontent.com/CristinaAguiar/DataMartCelebCo/main/equipe.csv"
)
cargo = pd.read_csv(
"https://raw.githubusercontent.com/CristinaAguiar/DataMartCelebCo/main/cargo.csv"
)
cliente = pd.read_csv(
"https://raw.githubusercontent.com/CristinaAguiar/DataMartCelebCo/main/cliente.csv"
)
# criando os DataFrames para as tabelas de fatos
pagamento = pd.read_csv(
"https://raw.githubusercontent.com/CristinaAguiar/DataMartCelebCo/main/pagamento.csv"
)
negociacao = pd.read_csv(
"https://raw.githubusercontent.com/CristinaAguiar/DataMartCelebCo/main/negociacao.csv"
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
#
# # Table of content
#
# * [1. Introduction](#1)
# - [Problem statement](#1.1)
# - [Data description](#1.2)
#
# * [2. Import libraries](#2)
#
# * [3. Basic Exploration](#3)
# - [Read dataset](#3.1)
# - [Some information](#3.2)
# - [Data transformation](#3.3)
# - [Data visualization](#3.4)
# * [4. Machine Learning model](#4)
#
# * [5 Conclusion](#5)
# * [6 Author Message](#6)
# # Import libraries
import pandas as pd
import numpy as np
import ast
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from datetime import datetime
from dateutil.parser import parse
print("Setup Complete")
#
# # Basic Exploration
# Read dataset
def read_dataset():
file_path = "/kaggle/input/popular-video-games-1980-2023/games.csv"
data = pd.read_csv(file_path, index_col=0)
return data
data = read_dataset()
#
# Some information
data.head()
data.shape
data.info()
data.nunique()
data.duplicated().any()
#
# Data transformation
# >
# Missing data treatment
total_null = data.isnull().sum().sort_values(ascending=False)
percent = ((data.isnull().sum() / data.isnull().count()) * 100).sort_values(
ascending=False
)
print("Total records = ", data.shape[0])
missing_data = pd.concat(
[total_null, percent.round(2)], axis=1, keys=["Total Missing", "In Percent"]
)
missing_data
data[["Rating", "Team", "Summary"]].info()
data["Rating"] = data["Rating"].replace(np.nan, 0.0)
data["Team"] = data["Team"].replace(np.nan, "['Unknown Team']")
data["Summary"] = data["Summary"].replace(np.nan, "Unknown Summary")
total_null = data.isnull().sum().sort_values(ascending=False)
percent = ((data.isnull().sum() / data.isnull().count()) * 100).sort_values(
ascending=False
)
print("Total records = ", data.shape[0])
missing_data = pd.concat(
[total_null, percent.round(2)], axis=1, keys=["Total Missing", "In Percent"]
)
missing_data
# >
# Duplicated data treatment
data[data.duplicated()]
data = data.drop_duplicates().sort_index()
data[data.duplicated()]
# >
# Clean the data
data.head()
data.loc[data["Release Date"] == "releases on TBD"]
# create a datetime object
dt = datetime.now()
# convert the datetime object to a string
dt_str = dt.strftime("%b %d, %Y")
dt_str
data["Release Date"] = data["Release Date"].str.replace("releases on TBD", dt_str)
data["Release Date"] = pd.to_datetime(data["Release Date"], format="%b %d, %Y")
# format the datetime object as a string in the desired format
data["Release Date"] = data["Release Date"].dt.strftime("%Y-%-m-%-d")
# convert the date column to a datetime object
data["Release Date"] = pd.to_datetime(data["Release Date"])
# get the day from the date column
data["Day"] = data["Release Date"].dt.day
data["Month"] = data["Release Date"].dt.month_name()
data["Year"] = data["Release Date"].dt.year
data["Week day"] = data["Release Date"].dt.day_name()
data[["Release Date", "Day", "Month", "Year", "Week day"]].head()
data["Times Listed"] = data["Times Listed"].str.replace("K", "").astype(float) * 1000
data["Number of Reviews"] = (
data["Number of Reviews"].str.replace("K", "").astype(float) * 1000
)
data["Plays"] = data["Plays"].str.replace("K", "").astype(float) * 1000
data["Playing"] = data["Playing"].str.replace("K", "").astype(float) * 1000
data["Backlogs"] = data["Backlogs"].str.replace("K", "").astype(float) * 1000
data["Wishlist"] = data["Wishlist"].str.replace("K", "").astype(float) * 1000
data.describe()
data["Team"] = data["Team"].apply(lambda x: ast.literal_eval(x))
# create a sample DataFrame with a column containing multiple values
df_team = pd.DataFrame({"Title": data["Title"].tolist(), "Team": data["Team"].tolist()})
# use the explode method to transform the 'Team' column
df_team = df_team.explode("Team")
df_team
data["Genres"] = data["Genres"].apply(lambda x: ast.literal_eval(x))
# create a sample DataFrame with a column containing multiple values
df_genres = pd.DataFrame(
{"Title": data["Title"].tolist(), "Genres": data["Genres"].tolist()}
)
# use the explode method to transform the 'Team' column
df_genres = df_genres.explode("Genres")
df_genres
data = data.drop(["Release Date", "Team", "Genres", "Summary", "Reviews"], axis=1)
data.head()
|
import pandas as pd
import numpy as np
import panel as pn
pn.extension("tabulator")
import dask.dataframe as dd
train = pd.read_csv("/kaggle/input/favorita-grocery-sales-forecasting/train.csv")
test = pd.read_csv("/kaggle/input/favorita-grocery-sales-forecasting/test.csv")
sample_sub = pd.read_csv(
"/kaggle/input/favorita-grocery-sales-forecasting/sample_submission.csv"
)
stores = pd.read_csv("/kaggle/input/favorita-grocery-sales-forecasting/stores.csv")
items = pd.read_csv("/kaggle/input/favorita-grocery-sales-forecasting/items.csv")
transactions = pd.read_csv(
"/kaggle/input/favorita-grocery-sales-forecasting/transactions.csv"
)
oil = pd.read_csv("/kaggle/input/favorita-grocery-sales-forecasting/oil.csv")
holiday = pd.read_csv(
"/kaggle/input/favorita-grocery-sales-forecasting/holidays_events.csv"
)
print("Shape of train:", train.shape)
print("Shape of test:", test.shape)
print("Shape of stores:", stores.shape)
print("Shape of items:", items.shape)
print("Shape of transactions:", transactions.shape)
print("Shape of oil:", oil.shape)
print("Shape of holiday:", holiday.shape)
print("size before:", train["date"].memory_usage(deep=True) * 1e-6)
train["date"] = pd.to_datetime(train["date"])
print("size after: ", train["date"].memory_usage(deep=True) * 1e-6)
store_number = (
stores.loc[
(stores["city"] == "Daule")
| (stores["city"] == "Quito")
| (stores["city"] == "Santo Domingo")
]
)["store_nbr"].tolist()
print("Stores which are present in these 3 citites:", "\n", store_number)
item_number = (
items.loc[(items["family"] == "BREAD/BAKERY") | (items["family"] == "DAIRY")]
)["item_nbr"].tolist()
train_subset = train[
train["store_nbr"].isin(store_number) & train["item_nbr"].isin(item_number)
]
print(train_subset.shape)
train_subset.head()
train_subset = pd.merge(train_subset, stores, on="store_nbr", how="left")
train_subset.head()
train_subset = pd.merge(train_subset, items, on="item_nbr", how="left")
train_subset.head()
oil["date"] = pd.to_datetime(oil["date"])
train_subset = pd.merge(train_subset, oil, on="date", how="left")
train_subset.head()
holiday["type"] = holiday["type"].replace(
["Additional", "Bridge", "Event", "Transfer"], "Holiday"
)
mask = holiday["transferred"] == True
holiday["type"][mask] = "Work Day"
print(holiday["type"].value_counts())
holiday["date"] = pd.to_datetime(holiday["date"])
train_subset = pd.merge(train_subset, holiday, on="date", how="left")
train_subset = train_subset.drop(
["locale", "locale_name", "description", "transferred"], axis=1
)
train_subset.head()
train_subset = train_subset.rename(
columns={"type_y": "day_type", "type_x": "type", "dcoilwtico": "oil_price"}
)
train_subset.head()
train_subset.isnull().sum().sort_values(ascending=False)
train_subset["day_type"] = train_subset["day_type"].fillna("Work Day")
train_subset["oil_price"] = train_subset["oil_price"].fillna(axis=0, method="ffill")
train_subset["onpromotion"] = train_subset["onpromotion"].fillna("Not Mentioned")
del oil
del holiday
del items
del stores
train_subset["date"] = pd.to_datetime(train_subset["date"])
train_subset["Month"] = train_subset["date"].dt.strftime("%B")
train_subset["Year"] = train_subset["date"].dt.strftime("%Y")
train_subset
train_subset.to_csv("train_subset.csv", index=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.