script
stringlengths 113
767k
|
---|
# importing libraries
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import warnings
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import BatchNormalization
from keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.models import Sequential
from keras.layers.core import Dense
from sklearn.model_selection import train_test_split
import tensorflow as tf
warnings.filterwarnings("ignore")
train_dir = r"/kaggle/input/leaf-disease-dataset/data_penyakit/dataset/train"
test_dir = r"/kaggle/input/leaf-disease-dataset/data_penyakit/dataset/test"
for d in [train_dir, test_dir]:
filepaths = []
labels = []
classlist = sorted(os.listdir(d))
for klass in classlist:
label = klass.split("__")[1]
classpath = os.path.join(d, klass)
flist = sorted(os.listdir(classpath))
for f in flist:
fpath = os.path.join(classpath, f)
filepaths.append(fpath)
labels.append(label)
Fseries = pd.Series(filepaths, name="filepaths")
Lseries = pd.Series(labels, name="labels")
if d == train_dir:
df = pd.concat([Fseries, Lseries], axis=1)
else:
test_df = pd.concat([Fseries, Lseries], axis=1)
print(df)
import pandas as pd
import numpy as np
import tensorflow as tf
import keras
import matplotlib.pyplot as plt
import os
import cv2
from sklearn.model_selection import train_test_split
from tqdm import tqdm # import tqdm for progress bar
import numpy as np
import cv2
from tqdm import tqdm
import multiprocessing as mp
def conv2d(input_image, filters, kernel_size, activation_func, stride):
input_h, input_w, input_c = input_image.shape
kh, kw = kernel_size
num_filters = filters.shape[-1]
output_h = (input_h - kh) // stride + 1
output_w = (input_w - kw) // stride + 1
output = np.zeros((output_h, output_w, num_filters))
for c in range(num_filters):
for y in range(0, input_h - kh + 1, stride):
for x in range(0, input_w - kw + 1, stride):
output[y // stride, x // stride, c] = np.sum(
input_image[y : y + kh, x : x + kw] * filters[:, :, :, c]
)
return activation_func(output)
def max_pool2d(input_image, pool_size, stride):
input_h, input_w, input_c = input_image.shape
ph, pw = pool_size
output_h = (input_h - ph) // stride + 1
output_w = (input_w - pw) // stride + 1
output = np.zeros((output_h, output_w, input_c))
for y in range(0, input_h - ph + 1, stride):
for x in range(0, input_w - pw + 1, stride):
output[y // stride, x // stride] = np.max(
input_image[y : y + ph, x : x + pw], axis=(0, 1)
)
return output
def relu(x):
return np.maximum(0, x)
def dense(input_layer, units, activation_func):
input_dim = input_layer.shape[-1]
weights = np.random.randn(input_dim, units) * 0.1
bias = np.zeros(
(1, 1, units)
) # add an additional dimension to match expected output shape
output_layer = activation_func(np.dot(input_layer, weights) + bias)
return output_layer
def compute_image_features(filepaths):
driver_img = cv2.imread(filepaths)
driver_img = cv2.resize(driver_img, (100, 100)) / 255
conv1_output = conv2d(
driver_img, conv1_filters, kernel_size=(3, 3), activation_func=relu, stride=1
)
pool1_output = max_pool2d(conv1_output, pool_size=(2, 2), stride=2)
conv2_output = conv2d(
pool1_output, conv2_filters, kernel_size=(3, 3), activation_func=relu, stride=1
)
pool2_output = max_pool2d(conv2_output, pool_size=(2, 2), stride=2)
conv3_output = conv2d(
pool2_output, conv3_filters, kernel_size=(3, 3), activation_func=relu, stride=1
)
pool3_output = max_pool2d(conv3_output, pool_size=(2, 2), stride=2)
flatten_output = pool3_output.flatten()
dense1_output = dense(flatten_output, 256, relu)
dense2_output = dense(flatten_output, 128, relu)
flatten_output = dense2_output.flatten()
return flatten_output
# Define your CNN layers
conv1_filters = np.random.rand(3, 3, 3, 32)
conv2_filters = np.random.rand(3, 3, 32, 64)
conv3_filters = np.random.rand(3, 3, 64, 128)
image_features = []
# Parallelize computation using multiprocessing
with mp.Pool(mp.cpu_count()) as pool:
image_features = list(
tqdm(
pool.imap(compute_image_features, df["filepaths"].iloc[:13000]),
total=len(train_df["filepaths"].iloc[60001:]),
)
)
import csv
with open("hasil_ektraksi-2-1.csv", mode="w", newline="") as f:
writer = csv.writer(f)
for row in image_features:
writer.writerow(row)
|
import os
import numpy as np
import pandas as pd
import random
import cv2
import matplotlib.pyplot as plt
import keras
# Deep learning libraries
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Flatten, Dropout, BatchNormalization
from keras.layers import Conv2D, SeparableConv2D, MaxPool2D, LeakyReLU, Activation
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
import tensorflow as tf
import seaborn as sns
path_normal = (
"../input/covid19-radiography-database/COVID-19_Radiography_Dataset/Normal/images"
)
path_covid = (
"../input/covid19-radiography-database/COVID-19_Radiography_Dataset/COVID/images"
)
path_lung_opacity = "../input/covid19-radiography-database/COVID-19_Radiography_Dataset/Lung_Opacity/images"
path_viral_pneumonia = "../input/covid19-radiography-database/COVID-19_Radiography_Dataset/Viral Pneumonia/images"
len_normal = len(os.listdir(path_normal))
len_covid = len(os.listdir(path_covid))
len_lung_opacity = len(os.listdir(path_lung_opacity))
len_viral_pneumonia = len(os.listdir(path_viral_pneumonia))
objects = ("Normal", "COVID-19", "Viral pneumonia", "Lung Opacity")
y_pos = np.arange(len(objects))
performance = [len_normal, len_covid, len_viral_pneumonia, len_lung_opacity]
plt.barh(y_pos, performance, align="center", alpha=0.5)
plt.yticks(y_pos, objects)
plt.title("Chest X-ray images")
plt.show()
labels = ["Normal", "COVID-19", "Lung Opacity", "Viral pneumonia"]
img1 = path_normal + "/Normal-1.png"
img2 = path_covid + "/COVID-1994.png"
img3 = path_lung_opacity + "/Lung_Opacity-1.png"
img4 = path_viral_pneumonia + "/Viral Pneumonia-1.png"
imgs = [img1, img2, img3, img4]
fig, ax = plt.subplots(2, 2, figsize=(15, 15))
ax = ax.ravel()
plt.tight_layout()
for i in range(0, 4):
ax[i].imshow(plt.imread(imgs[i]), cmap="gray")
ax[i].set_title(labels[i])
from PIL import Image, ImageOps
def resize_image(imgpath):
img = np.array(
ImageOps.grayscale(Image.open(imgpath)).resize((150, 150), Image.ANTIALIAS)
)
return img
import os
x_ = list()
y = list()
for i in os.listdir(path_normal):
try:
imgpath = path_normal + "/" + i
img = resize_image(imgpath)
x_.append(img)
y.append(0)
except:
None
for i in os.listdir(path_covid):
try:
imgpath = path_covid + "/" + i
img = resize_image(imgpath)
x_.append(img)
y.append(1)
except:
None
for i in os.listdir(path_lung_opacity):
try:
imgpath = path_lung_opacity + "/" + i
img = resize_image(imgpath)
x_.append(img)
y.append(2)
except:
None
for i in os.listdir(path_viral_pneumonia):
try:
imgpath = path_viral_pneumonia + "/" + i
img = resize_image(imgpath)
x_.append(img)
y.append(3)
except:
None
x_ = np.array(x_)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x_, y, test_size=0.2, random_state=20
)
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train, test_size=0.15, random_state=40
)
from keras.utils.np_utils import to_categorical
y = to_categorical(y, num_classes=4)
classNames = ["normal", "COVID_19", "lung_opacity", "viral_pneumonia"]
print(classNames)
unique, counts = np.unique(y_train, return_counts=True)
uniqueVal, countsVal = np.unique(y_val, return_counts=True)
dict(zip(unique, counts))
dict(zip(uniqueVal, countsVal))
from keras.layers import Input, Conv2D, MaxPooling2D, Dropout
from keras.layers import Dense, Flatten
from keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import SGD
def createModel():
_input = Input(shape=(150, 150, 1))
conv1 = Conv2D(filters=16, kernel_size=(3, 3), padding="same", activation="relu")(
_input
)
conv2 = Conv2D(filters=16, kernel_size=(3, 3), padding="same", activation="relu")(
conv1
)
pool1 = MaxPooling2D((2, 2))(conv2)
conv3 = SeparableConv2D(
filters=32, kernel_size=(3, 3), padding="same", activation="relu"
)(pool1)
conv4 = SeparableConv2D(
filters=32, kernel_size=(3, 3), padding="same", activation="relu"
)(conv3)
conv4 = BatchNormalization()(conv4)
pool2 = MaxPooling2D((2, 2))(conv4)
conv5 = SeparableConv2D(
filters=64, kernel_size=(3, 3), padding="same", activation="relu"
)(pool2)
conv6 = SeparableConv2D(
filters=64, kernel_size=(3, 3), padding="same", activation="relu"
)(conv5)
conv6 = BatchNormalization()(conv6)
pool3 = MaxPooling2D((2, 2))(conv6)
conv7 = SeparableConv2D(
filters=128, kernel_size=(3, 3), padding="same", activation="relu"
)(pool3)
conv8 = SeparableConv2D(
filters=128, kernel_size=(3, 3), padding="same", activation="relu"
)(conv7)
conv8 = BatchNormalization()(conv8)
pool4 = MaxPooling2D((2, 2))(conv8)
pool4 = Dropout(rate=0.2)(pool4)
conv9 = SeparableConv2D(
filters=256, kernel_size=(3, 3), padding="same", activation="relu"
)(pool4)
conv10 = SeparableConv2D(
filters=256, kernel_size=(3, 3), padding="same", activation="relu"
)(conv9)
conv10 = BatchNormalization()(conv10)
pool5 = MaxPooling2D((2, 2))(conv10)
pool5 = Dropout(rate=0.2)(pool5)
flat = Flatten()(pool5)
dense1 = Dense(512, activation="relu")(flat)
dropout1 = Dropout(rate=0.7)(dense1)
dense2 = Dense(128, activation="relu")(dropout1)
dropout2 = Dropout(0.5)(dense2)
output = Dense(4, activation="sigmoid")(dropout2)
model = Model(inputs=_input, outputs=output)
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
return model
from keras.utils import to_categorical
# Convert labels to one-hot encoded matrix
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
# Build and train the model
modelFitLRate = createModel()
epochs = 10
history = modelFitLRate.fit(
x_train, y_train, validation_data=(x_val, y_val), batch_size=32, epochs=epochs
)
def showChartEpochAccuracy(history):
# show a nicely formatted classification report
print("[INFO] evaluating network...")
# plot the training loss and accuracy
N = epochs
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), history.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), history.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), history.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), history.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy Dataset")
plt.xlabel("Epochs #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.show()
showChartEpochAccuracy(history)
from tensorflow.keras.optimizers import SGD
from keras.callbacks import LearningRateScheduler
sd = []
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = [1, 1]
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get("loss"))
sd.append(step_decay(len(self.losses)))
print("lr:", step_decay(len(self.losses)))
epochs = 10
learning_rate = 0.0001
decay_rate = 5e-6
model = createModel()
adam = tf.keras.optimizers.legacy.Adam(lr=learning_rate, decay=decay_rate)
sgd = tf.keras.optimizers.legacy.SGD(lr=0.0001, momentum=0.9, decay=decay_rate)
model.compile(
loss="mean_squared_error",
optimizer=adam,
metrics=["mean_absolute_error", "accuracy"],
)
def step_decay(losses):
i = float(2 * np.sqrt(np.array(history.losses[-1])))
if i < 0.6:
lrate = 0.001 * 1 / (1 + 0.7 * len(history.losses))
decay_rate = 2e-6
else:
lrate = 0.0001
return lrate
history = LossHistory()
lrate = LearningRateScheduler(step_decay)
myhistory = model.fit(
x_train,
y_train,
validation_data=(x_val, y_val),
batch_size=64,
epochs=epochs,
callbacks=[history, lrate],
verbose=1,
)
# Get predictions
mypredict = model.predict(x_test)
pred = np.argmax(mypredict, axis=-1)
print(pred)
from keras.utils import to_categorical
y_test_one_hot = to_categorical(y_test)
evalute = model.evaluate(x_test, y_test_one_hot)
print("Accuracy: {:.2f}%".format(evalute[2] * 100))
print("Loss: {}".format(evalute[0]))
import matplotlib.pyplot as plt
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(4), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
true_label = np.argmax(true_label)
thisplot[predicted_label].set_color("red")
thisplot[true_label].set_color("blue")
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
img = cv2.resize(img, (128, 128))
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
true_label = np.argmax(true_label)
if predicted_label == true_label:
color = "blue"
else:
color = "red"
plt.xlabel(
"{} {:2.0f}% ({})".format(
classNames[predicted_label],
100 * np.max(predictions_array),
classNames[true_label],
),
color=color,
)
def showResultPrediction(predictions):
num_rows = 4
num_cols = 3
num_images = num_rows * num_cols
plt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)
plot_image(i, predictions, y_test, x_test)
plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)
plot_value_array(i, predictions, y_test)
plt.show()
showResultPrediction(mypredict)
# mo
model.summary()
import re
import os
import random
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from kaggle_datasets import KaggleDatasets
model.layers[16]
IMAGE_SIZE = [180, 180]
def get_img_array(img_path, size=IMAGE_SIZE):
img = keras.preprocessing.image.load_img(img_path, target_size=size)
# `array` is a float32 NumPy array
array = keras.preprocessing.image.img_to_array(img)
# We add a dimension to transform our array into a "batch"
# of size (1, 180, 180, 3)
array = np.expand_dims(array, axis=0) / 255.0
return array
def make_gradcam_heatmap(img_array, model):
# First, we create a model that maps the input image to the activations
# of the last conv layer
last_conv_layer = model.layers[7]
last_conv_layer_model = keras.Model(model.inputs, last_conv_layer.output)
# Mark the classifying layers
classifier_layers = model.layers[-5:]
# Second, we create a model that maps the activations of the last conv
# layer to the final class predictions
classifier_input = keras.Input(shape=last_conv_layer.output.shape[1:])
x = classifier_input
for classifier_layer in classifier_layers:
x = classifier_layer(x)
classifier_model = keras.Model(classifier_input, x)
# Then, we compute the gradient of the top predicted class for our input image
# with respect to the activations of the last conv layer
with tf.GradientTape() as tape:
# Compute activations of the last conv layer and make the tape watch it
last_conv_layer_output = last_conv_layer_model(img_array)
tape.watch(last_conv_layer_output)
# Compute class predictions
preds = classifier_model(last_conv_layer_output)
top_pred_index = tf.argmax(preds[0])
top_class_channel = preds[:, top_pred_index]
# This is the gradient of the top predicted class with regard to
# the output feature map of the last conv layer
grads = tape.gradient(top_class_channel, last_conv_layer_output)
# This is a vector where each entry is the mean intensity of the gradient
# over a specific feature map channel
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
# We multiply each channel in the feature map array
# by "how important this channel is" with regard to the top predicted class
last_conv_layer_output = last_conv_layer_output.numpy()[0]
pooled_grads = pooled_grads.numpy()
for i in range(pooled_grads.shape[-1]):
last_conv_layer_output[:, :, i] *= pooled_grads[i]
# The channel-wise mean of the resulting feature map
# is our heatmap of class activation
heatmap = np.mean(last_conv_layer_output, axis=-1)
# For visualization purpose, we will also normalize the heatmap between 0 & 1
heatmap = np.maximum(heatmap, 0) / np.max(heatmap)
return heatmap
def superimposed_cam(file_path, model):
# Prepare image
img_array = get_img_array(file_path)
# Generate class activation heatmap
heatmap = make_gradcam_heatmap(img_array, model)
# Rescale the original image
img = img_array * 255
# We rescale heatmap to a range 0-255
heatmap = np.uint8(255 * heatmap)
# We use jet colormap to colorize heatmap
jet = cm.get_cmap("jet")
# We use RGB values of the colormap
jet_colors = jet(np.arange(256))[:, :3]
jet_heatmap = jet_colors[heatmap]
# We create an image with RGB colorized heatmap
jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap)
jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0]))
jet_heatmap = keras.preprocessing.image.img_to_array(jet_heatmap)
# Superimpose the heatmap on original image
superimposed_img = jet_heatmap * 0.4 + img
superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img[0])
return superimposed_img, CLASSES[np.argmax(model.predict(img_array))]
covid_filenames = tf.io.gfile.glob(
"../input/covid19-radiography-database/COVID-19 Radiography Database/COVID-19/*"
)
pneumonia_filenames = tf.io.gfile.glob(
"../input/covid19-radiography-database/COVID-19 Radiography Database/Viral Pneumonia/*"
)
plt.figure(figsize=(20, 20))
for n in range(10):
if n < len(covid_filenames):
ax = plt.subplot(5, 5, n + 1)
img, pred = superimposed_cam(covid_filenames[n])
plt.imshow(img)
plt.title(pred)
plt.axis("off")
for n in range(15, 25):
if n < len(pneumonia_filenames):
ax = plt.subplot(5, 5, n + 1)
img, pred = superimposed_cam(pneumonia_filenames[n])
plt.imshow(img)
plt.title(pred)
plt.axis("off")
plt.show()
|
# “Heart disease is easier to treat when detected early, heart disease is the leading cause of death in all over the world. The term “heart disease” refers to several types of heart conditions. In the Pakistan and some other countries, the most common type of heart disease is coronary artery disease (CAD), which can lead to heart attack. You can greatly reduce your risk for heart disease through lifestyle changes.”
# This project is a way to show everything I learned in the course of python and Google Data Analystics Certification, this data have been chosen by my girlfriend, who happens to be a doctor, being a way to test my abilities.
# My goal is to look at datasets to find out:
# How many patients do we have in these samples? And of what gender?
# How many patients have diabetes and high cholesterol?
# How many practices a healthy life being smokers or not?
# # Process Data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
data = pd.read_csv(
"/kaggle/input/mortality-rate-heart-patient-pakistan-hospital/FIC.Full CSV.csv"
)
data.head()
data.isnull().sum()
data.info()
# # Data visualization
# Patient Count By Gender
data["Gender"] = data["Gender"].astype(str)
le = LabelEncoder()
data["Gender"] = le.fit_transform(data["Gender"])
gender = data["Gender"].value_counts()
plt.figure(figsize=(7, 6))
ax = gender.plot(kind="bar", rot=0, color="purple")
ax.set_title("Patient Count By Gender", y=1)
ax.set_xlabel("Gender")
ax.set_ylabel("Number of People")
ax.set_xticklabels(("Male", "Female"))
for i in ax.patches:
y_value = i.get_height()
x_value = i.get_x() + i.get_width() / 2
space = 1
label = format(y_value)
ax.annotate(
label,
(x_value, y_value),
xytext=(0, space),
textcoords="offset points",
ha="center",
va="bottom",
)
plt.show()
# Patient with Depression
data["Depression"] = data["Depression"].map({"YES": 1, "NO": 0})
Depression = data["Depression"].value_counts()
labels = ["Yes", "No"]
plt.pie(Depression, labels=labels, wedgeprops={"edgecolor": "black"}, autopct="%1.1f%%")
plt.title("Patient with Depression")
plt.show()
# Patient with Hyperlipid
data["Hyperlipi"] = data["Hyperlipi"].map({"YES": 1, "NO": 0})
Hyperlipid = data["Hyperlipi"].value_counts()
labels = ["Yes", "No"]
plt.pie(Hyperlipid, labels=labels, wedgeprops={"edgecolor": "black"}, autopct="%1.1f%%")
plt.title("Patient with Hyperlipid")
print(plt.show())
data["Smoking"] = data["Smoking"].map({"YES": 1, "NO": 0})
Smoking = data["Smoking"].value_counts()
labels = ["Yes", "No"]
plt.pie(Smoking, labels=labels, wedgeprops={"edgecolor": "black"}, autopct="%1.1f%%")
plt.title("Smoking Patient")
plt.show()
# Patient with Diabetes
Diabetes = data["Diabetes"].value_counts()
labels = ["Yes", "No"]
plt.pie(Diabetes, labels=labels, wedgeprops={"edgecolor": "black"}, autopct="%1.1f%%")
plt.title("Patient with Diabetes")
plt.show()
# After exploring these graphics observed a few things:
# 1. There are more Males than Females in the sample.
# 2. Large number of patients with depression and Hyperlipid.
# 3. It is also possible to identify a balance between patients with diabetes and smokers.
# # Data prediction
# There are several predictive models available in Python for heart failure prediction. One popular approach is to use machine learning algorithms to analyze various features and risk factors associated with heart failure.
# Machine learning algorithms
categories = [
"Age",
"Age.Group",
"Gender",
"Sleep",
"Category",
"Depression",
"Hyperlipi",
"Smoking",
"Family.History",
"F.History",
"Diabetes",
"HTN",
"Allergies",
"BP",
"Thrombolysis",
"BGR",
"B.Urea",
"S.Cr",
"S.Sodium",
"S.Potassium",
"S.Chloride",
"C.P.K",
"CK.MB",
"ESR",
"WBC",
"RBC",
"Hemoglobin",
"P.C.V",
"M.C.V",
"M.C.H",
"M.C.H.C",
"PLATELET_COUNT",
"NEUTROPHIL",
"LYMPHO",
"MONOCYTE",
"EOSINO",
"CO",
"Diagnosis",
"Hypersensitivity",
"cp",
"trestbps",
"chol",
"fbs",
"restecg",
"thalach",
"exang",
"oldpeak",
"slope",
"ca",
"thal",
"num",
"SK",
"SK.React",
"Reaction",
"Mortality",
"Follow.Up",
]
data2 = pd.get_dummies(data[categories])
y = data2["Mortality"]
X = data2.drop("Mortality", axis=1)
from sklearn.metrics import accuracy_score, confusion_matrix
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
print("x_train shape: ", X_train.shape)
print("x_test shape: ", X_test.shape)
print("y_train shape: ", y_train.shape)
print("y_test shape: ", y_test.shape)
print("Number of classes (dead or alive) ", len(np.unique(y_train)))
# After split the dataset into training and testing sets, it was possible to identify 147 features.
rf = RandomForestClassifier(n_estimators=100, random_state=42)
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
conf_matrix = confusion_matrix(y_test, y_pred)
print("Accuracy:", accuracy)
print("Confusion Matrix:\n", conf_matrix)
# Heart Failure Prediction model matrix
conf_matrix = [[55, 2], [2, 15]]
labels = ["True Negative", "False Positive", "False Negative", "True Positive"]
categories = ["Non-Death", "Death"]
conf_matrix_flattened = [i for sublist in conf_matrix for i in sublist]
plt.figure(figsize=(8, 6))
sns.heatmap(
conf_matrix,
annot=True,
cmap="Blues",
fmt="g",
xticklabels=categories,
yticklabels=categories,
)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
plt.title("Confusion Matrix")
plt.show()
|
# # Multiple Linear Regression
# ## Bike Sharing Assignment
# #### Problem Statement:
# A bike-sharing system is a service in which bikes are made available for shared use to individuals on a short term basis for a price or free. Many bike share systems allow people to borrow a bike from a "dock" which is usually computer-controlled wherein the user enters the payment information, and the system unlocks it. This bike can then be returned to another dock belonging to the same system.
# A US bike-sharing provider BikeIndia has recently suffered considerable dips in their revenues due to the ongoing Corona pandemic. The company is finding it very difficult to sustain in the current market scenario. So, it has decided to come up with a mindful business plan to be able to accelerate its revenue as soon as the ongoing lockdown comes to an end, and the economy restores to a healthy state.
# In such an attempt, **BikeIndia** aspires to understand the demand for shared bikes among the people after this ongoing quarantine situation ends across the nation due to Covid-19. They have planned this to prepare themselves to cater to the people's needs once the situation gets better all around and stand out from other service providers and make huge profits.
# They have contracted a consulting company to understand the factors on which the demand for these shared bikes depends. Specifically, they want to understand the factors affecting the demand for these shared bikes in the American market. The company wants to know:
# Which variables are significant in predicting the demand for shared bikes.
# How well those variables describe the bike demands
# Based on various meteorological surveys and people's styles, the service provider firm has gathered a large dataset on daily bike demands across the American market based on some factors.
# #### Business Goal:
# We are required to model the demand for shared bikes with the available independent variables. It will be used by the management to understand how exactly the demands vary with different features. They can accordingly manipulate the business strategy to meet the demand levels and meet the customer's expectations. Further, the model will be a good way for management to understand the demand dynamics of a new market.
# ## Reading and Understanding the Data
#
# Supress Warnings
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
bike = pd.DataFrame(pd.read_csv("/kaggle/input/bike-sharing/day.csv"))
# Check the head of the dataset
bike.head()
# Check the descriptive information
bike.info()
bike.describe()
# Check the shape of df
print(bike.shape)
# ## Finding :
# Dataset has 730 rows and 16 columns.
# Except one column, all other are either float or integer type.
# One column is date type.
# Looking at the data, there seems to be some fields that are categorical in nature, but in integer/float type.
# We will analyse and finalize whether to convert them to categorical or treat as integer.
# # DATA QUALITY CHECK
# ## Check for NULL/MISSING values
# percentage of missing values in each column
round(100 * (bike.isnull().sum() / len(bike)), 2).sort_values(ascending=False)
# row-wise null count percentage
round((bike.isnull().sum(axis=1) / len(bike)) * 100, 2).sort_values(ascending=False)
# ## Finding
# There are no missing / Null values either in columns or rows
# ## Duplicate Check
bike_dup = bike.copy()
# Checking for duplicates and dropping the entire duplicate row if any
bike_dup.drop_duplicates(subset=None, inplace=True)
bike_dup.shape
bike.shape
# ### Insights
# The shape after running the drop duplicate command is same as the original dataframe.
# Hence we can conclude that there were zero duplicate values in the dataset.
# ## Data Cleaning
# Checking value_counts() for entire dataframe.
# This will help to identify any Unknow/Junk values present in the dataset.
# Create a copy of the dataframe, without the 'instant' column,
# as this will have unique values, and donot make sense to do a value count on it.
bike_dummy = bike.iloc[:, 1:16]
for col in bike_dummy:
print(bike_dummy[col].value_counts(ascending=False), "\n\n\n")
# ### Insights
# There seems to be no Junk/Unknown values in the entire dataset.
# # Removing redundant & unwanted columns
# Based on the high level look at the data and the data dictionary, the following variables can be removed from further analysis:
# 1. **instant** : Its only an index value
# 2. **dteday** : This has the date, Since we already have seperate columns for 'year' & 'month',hence, we could live without this column.
# 3. **casual & registered** : Both these columns contains the count of bike booked by different categories of customers.
# Since our objective is to find the total count of bikes and not by specific category, we will ignore these two columns.
# More over, we have created a new variable to have the ratio of these customer types.
# 4. We will save the new dataframe as bike_new, so that the original dataset is preserved for any future analysis/validation
bike.columns
bike_new = bike[
[
"season",
"yr",
"mnth",
"holiday",
"weekday",
"workingday",
"weathersit",
"temp",
"atemp",
"hum",
"windspeed",
"cnt",
]
]
bike_new.info()
# # Creating Dummy Variables
# We will create DUMMY variables for 4 categorical variables 'mnth', 'weekday', 'season' & 'weathersit'.
# - Before creating dummy variables, we will have to convert them into 'category' data types.
# Check the datatypes before convertion
bike_new.info()
# Convert to 'category' data type
bike_new["season"] = bike_new["season"].astype("category")
bike_new["weathersit"] = bike_new["weathersit"].astype("category")
bike_new["mnth"] = bike_new["mnth"].astype("category")
bike_new["weekday"] = bike_new["weekday"].astype("category")
bike_new.info()
bike_new.head()
# Install the below libaries before importing
import pandas as pd
from pandas_profiling import ProfileReport
# EDA using pandas-profiling
profile = ProfileReport(bike_new)
profile
bike_new.drop(["atemp"], axis=1, inplace=True)
profile_new = ProfileReport(bike_new)
profile_new
data = bike_new.sample(frac=0.9, random_state=42)
data_unseen = bike_new.drop(data.index)
data.reset_index(drop=True, inplace=True)
data_unseen.reset_index(drop=True, inplace=True)
print("Data for Modeling: " + str(data.shape))
print("Unseen Data For Predictions: " + str(data_unseen.shape))
from pycaret.regression import *
exp_reg101 = setup(data=data, target="cnt", session_id=1)
best = compare_models()
catboost = create_model("catboost")
print(catboost)
tuned_catboost = tune_model(catboost)
plot_model(tuned_catboost)
plot_model(tuned_catboost, plot="error")
plot_model(tuned_catboost, plot="feature")
evaluate_model(tuned_catboost)
predict_model(tuned_catboost)
final_catboost = finalize_model(tuned_catboost)
final_catboost
predict_model(final_catboost)
unseen_predictions = predict_model(final_catboost, data=data_unseen)
unseen_predictions.head()
from pycaret.utils import check_metric
check_metric(unseen_predictions.cnt, unseen_predictions.Label, "R2")
save_model(final_catboost, "./model")
saved_final_catboost = load_model("./model")
new_prediction = predict_model(saved_final_catboost, data=data_unseen)
new_prediction.head()
from pycaret.utils import check_metric
check_metric(new_prediction.cnt, new_prediction.Label, "R2")
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import time
import glob
import PIL.Image as Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from tqdm import tqdm
# Data config
DATA_DIR = "/kaggle/input/vesuvius-challenge-ink-detection/"
BUFFER = 32
Z_DIM = 16
Z_START = 25
# Model config
BATCH_SIZE = 64
plt.imshow(Image.open(DATA_DIR + "/train/1/ir.png"), cmap="gray")
# ## Loadig the dataset
def load_mask(split, index):
img = Image.open(f"{DATA_DIR}/{split}/{index}/mask.png").convert("1")
return tf.convert_to_tensor(img, dtype="bool")
def load_labels(split, index):
img = Image.open(f"{DATA_DIR}/{split}/{index}/inklabels.png")
return tf.convert_to_tensor(img, dtype="bool")
mask = load_mask(split="train", index=1)
labels = load_labels(split="train", index=1)
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.set_title("mask.png")
ax1.imshow(mask, cmap="gray")
ax2.set_title("inklabels.png")
ax2.imshow(labels, cmap="gray")
plt.show()
def load_volume(split, index):
# Load the 3d x-ray scan, one slice at a time
z_slices_fnames = sorted(
glob.glob(f"{DATA_DIR}/{split}/{index}/surface_volume/*.tif")
)[Z_START : Z_START + Z_DIM]
z_slices = []
for z, filename in tqdm(enumerate(z_slices_fnames)):
img = Image.open(filename)
z_slice = np.array(img, dtype=np.int16)
z_slices.append(z_slice)
return tf.stack(z_slices, axis=-1)
volume = load_volume(split="train", index=1)
print(f"volume {volume.shape} - {volume.dtype}")
fig, axes = plt.subplots(1, 5, figsize=(15, 3))
for z, ax in enumerate(axes):
ax.imshow(volume[:, :, z], cmap="gray")
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
plt.show()
val_location = (3500, 1100)
val_zone_size = (800, 800)
fig, ax = plt.subplots()
ax.imshow(labels)
patch = patches.Rectangle(
[val_location[1], val_location[0]],
val_zone_size[0],
val_zone_size[1],
linewidth=2,
edgecolor="g",
facecolor="none",
)
ax.add_patch(patch)
plt.show()
# #### Create a dataset that randoml;y samplesw coordinates(Locations)
def sample_random_location(shape):
random_train_x = tf.random.uniform(
shape=(), minval=BUFFER, maxval=shape[0] - BUFFER - 1, dtype="int32"
)
random_train_y = tf.random.uniform(
shape=(), minval=BUFFER, maxval=shape[1] - BUFFER - 1, dtype="int32"
)
random_train_location = tf.stack([random_train_x, random_train_y])
return random_train_location
def is_in_masked_zone(location, mask):
return mask[location[0], location[1]]
sample_random_location_train = lambda x: sample_random_location(mask.shape)
is_in_mask_train = lambda x: is_in_masked_zone(x, mask)
def is_in_val_zone(location, val_location, val_zone_size):
x = location[0]
y = location[1]
x_match = (
val_location[0] - BUFFER <= x <= val_location[0] + val_zone_size[0] + BUFFER
)
y_match = (
val_location[1] - BUFFER <= y <= val_location[1] + val_zone_size[1] + BUFFER
)
return x_match and y_match
def is_proper_train_location(location):
return not is_in_val_zone(
location, val_location, val_zone_size
) and is_in_mask_train(location)
train_locations_ds = (
tf.data.Dataset.from_tensor_slices([0])
.repeat()
.map(sample_random_location_train, num_parallel_calls=tf.data.AUTOTUNE)
)
train_locations_ds = train_locations_ds.filter(is_proper_train_location)
# ### Visualize training patch location
fig, ax = plt.subplots()
ax.imshow(labels)
for x, y in train_locations_ds.take(200):
patch = patches.Rectangle(
[y - BUFFER // 2, x - BUFFER // 2],
2 * BUFFER,
2 * BUFFER,
linewidth=2,
edgecolor="r",
facecolor="none",
)
ax.add_patch(patch)
val_patch = patches.Rectangle(
[val_location[1], val_location[0]],
val_zone_size[0],
val_zone_size[1],
linewidth=2,
edgecolor="g",
facecolor="none",
)
ax.add_patch(val_patch)
plt.show()
# ### Creating traing dataset that yields volume the patches + lables
def extract_subvolume(location, volume):
x = location[0]
y = location[1]
subvolume = volume[x - BUFFER : x + BUFFER, y - BUFFER : y + BUFFER, :]
subvolume = tf.cast(subvolume, dtype="float32") / 65535.0
return subvolume
def extract_labels(location, labels):
x = location[0]
y = location[1]
label = labels[x, y]
label = tf.cast(label, dtype="float32")
return label
def extract_subvolume_and_label(location):
subvolume = extract_subvolume(location, volume)
label = extract_labels(location, labels)
return subvolume, label
shuffle_buffer_size = BATCH_SIZE * 4
train_ds = train_locations_ds.map(
extract_subvolume_and_label, num_parallel_calls=tf.data.AUTOTUNE
)
train_ds = train_ds.prefetch(tf.data.AUTOTUNE).batch(BATCH_SIZE)
for subvolume_batch, label_batch in train_ds.take(1):
print(f"subvolume shape: {subvolume_batch.shape[1:]}")
# ## Create a validation dataset that yields patches from the validation area and their labels
val_locations_stride = 3
val_locations = []
for x in range(
val_location[0], val_location[0] + val_zone_size[0], val_locations_stride
):
for y in range(
val_location[1], val_location[1] + val_zone_size[1], val_locations_stride
):
val_locations.append((x, y))
val_ds = tf.data.Dataset.from_tensor_slices(val_locations).map(
extract_subvolume_and_label, num_parallel_calls=tf.data.AUTOTUNE
)
val_ds = val_ds.prefetch(tf.data.AUTOTUNE).batch(BATCH_SIZE)
t0 = time.time()
n = 200
for _ in train_ds.take(n):
pass
print(f"Time per batch: {(time.time() - t0) / n:.4f}s")
def trivial_baseline(dataset):
total = 0
matches = 0.0
for _, batch_label in tqdm(dataset):
matches += tf.reduce_sum(tf.cast(batch_label, "float32"))
total += tf.reduce_prod(tf.shape(batch_label))
return 1.0 - matches / tf.cast(total, "float32")
score = trivial_baseline(val_ds).numpy()
print(f"Best validation score achievable trivially: {score * 100:.2f}% accuracy")
augmenter = keras.Sequential(
[
layers.RandomFlip("horizontal"),
]
)
def augment_train_data(data, label):
data = augmenter(data)
return data, label
augmented_train_ds = train_ds.map(
augment_train_data, num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
def get_model(input_shape):
inputs = keras.Input(input_shape)
x = layers.Conv2D(kernel_size=3, filters=128, activation="relu")(inputs)
for size in (128, 256, 512):
residual = x
x = layers.BatchNormalization()(x)
x = layers.SeparableConv2D(
kernel_size=3, filters=size, activation="relu", padding="same"
)(x)
x = layers.SeparableConv2D(
kernel_size=3, filters=size, activation="relu", padding="same"
)(x)
x = layers.MaxPooling2D(2, padding="same")(x)
residual = layers.Conv2D(
kernel_size=1, filters=size, strides=2, padding="same"
)(residual)
x = layers.Add()([residual, x])
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dropout(0.3)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
return model
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = get_model(input_shape=(BUFFER * 2, BUFFER * 2, Z_DIM))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.summary()
# Because train_ds is infinitely streaming, you need to specify steps_per_epoch
# i.e. the dataset has no epoch boundary on its own
model.fit(
augmented_train_ds,
validation_data=val_ds,
epochs=10,
steps_per_epoch=300,
)
model.save("model.keras")
del volume
del mask
del labels
del train_ds
del val_ds
# Manually trigger garbage collection
keras.backend.clear_session()
import gc
gc.collect()
model = keras.models.load_model("model.keras")
INFER_BATCH_SIZE = 128
def compute_predictions_map(split, index):
print(f"Load data for {split}/{index}")
test_volume = load_volume(split=split, index=index)
test_mask = load_mask(split=split, index=index)
test_locations = []
stride = BUFFER // 2
for x in range(BUFFER, test_volume.shape[0] - BUFFER, stride):
for y in range(BUFFER, test_volume.shape[1] - BUFFER, stride):
test_locations.append((x, y))
print(f"{len(test_locations)} test locations (before filtering by mask)")
sample_random_location_test = lambda x: sample_random_location(test_mask.shape)
is_in_mask_test = lambda x: is_in_masked_zone(x, test_mask)
extract_subvolume_test = lambda x: extract_subvolume(x, test_volume)
test_locations_ds = tf.data.Dataset.from_tensor_slices(test_locations).filter(
is_in_mask_test
)
test_ds = test_locations_ds.map(
extract_subvolume_test, num_parallel_calls=tf.data.AUTOTUNE
)
predictions_map = np.zeros(test_volume.shape[:2], dtype="float16")
print(f"Compute predictions")
for loc_batch, patch_batch in tqdm(
zip(test_locations_ds.batch(INFER_BATCH_SIZE), test_ds.batch(INFER_BATCH_SIZE))
):
predictions = model.predict_on_batch(patch_batch)
for (x, y), pred in zip(loc_batch, predictions):
predictions_map[x, y] = pred
del test_volume
del test_mask
return predictions_map
predictions_map_1 = compute_predictions_map(split="train", index=1)
predictions_map_a = compute_predictions_map(split="test", index="a")
predictions_map_b = compute_predictions_map(split="test", index="b")
def stats(predictions_map):
size = predictions_map.shape[0] * predictions_map.shape[1]
for thr in (0.1, 0.25, 0.5, 0.75):
num_nonzero = np.count_nonzero(predictions_map_1 > thr)
percent_nonzero = 100.0 * num_nonzero / size
print(f"Percent above {thr}: {percent_nonzero:.2f}%")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import torch
import torchvision
import torch.nn as nn
import torchvision.transforms as trans
import torch.optim as op
import torch.nn.functional as F
import pandas as pd
import numpy as np
PATH = "./trainedModel/mnist_cnn_net.pt"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
transform = trans.Compose([trans.ToTensor(), trans.Normalize((0.5,), (0.5,))])
train_df = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
train_labels = train_df["label"].values
train_images = train_df.iloc[:, 1:].values
train_images = train_images.reshape(-1, 28, 28, 1)
train_images = np.transpose(train_images, (0, 3, 1, 2))
train_images = torch.from_numpy(train_images).float()
train_labels = torch.from_numpy(train_labels).long()
test_df = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
test_images = test_df.values
test_images = test_images.reshape(-1, 28, 28, 1)
test_images = np.transpose(test_images, (0, 3, 1, 2))
test_images = torch.from_numpy(test_images).float()
trainset = torch.utils.data.TensorDataset(train_images, train_labels)
testset = torch.utils.data.TensorDataset(test_images)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=0
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=128, shuffle=False, num_workers=0
)
dataiter = iter(trainloader)
images, labels = next(dataiter)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3).to(device)
self.conv2 = nn.Conv2d(32, 64, 3).to(device)
self.pool = nn.MaxPool2d(2, 2).to(device)
self.fc1 = nn.Linear(64 * 12 * 12, 128).to(device)
self.fc2 = nn.Linear(128, 10).to(device)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 64 * 12 * 12)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
net = Net()
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = op.SGD(net.parameters(), lr=0.001, momentum=0.9)
epochs = 10
epoch_log = []
loss_log = []
accuracy_log = []
for epoch in range(epochs):
print(f"Starting Epoch: {epoch + 1}...")
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
epoch_loss = running_loss / (i + 1)
epoch_log.append(epoch + 1)
loss_log.append(epoch_loss)
print(f"Epoch [{epoch + 1}/{epochs}], Loss: {epoch_loss:.4f}")
net.eval()
test_predictions = []
with torch.no_grad():
for data in testloader:
inputs = data[0].to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
test_predictions.extend(predicted.cpu().numpy())
# Save predictions as CSV file
submission_df = pd.DataFrame(
{"ImageId": np.arange(1, len(test_predictions) + 1), "Label": test_predictions}
)
submission_df.to_csv("submission.csv", index=False)
print("Predictions saved successfully as submission.csv.")
|
import warnings
# Suppress all warnings
warnings.filterwarnings("ignore")
# # FEATURE EXTRACTION VS. FEATURE SELECTION
# **Feature extraction** refers to the process of transforming the original features of a dataset into a new set of features that can capture the underlying patterns or relationships in the data in a more meaningful or informative way. Feature extraction methods often involve dimensionality reduction techniques, such as Principal Component Analysis (PCA), t-SNE, or UMAP, to create a new set of features that retain as much relevant information as possible.
# **Feature selection** refers to the process of selecting a subset of the original features that are most predictive of the target variable or have the highest importance for the machine learning model's performance. Feature selection methods typically involve evaluating each feature's contribution to the model's performance, such as through statistical tests, correlation analysis, or feature ranking algorithms.
# **PCA, UMAP, and t-SNE are examples of Feature Extraction**
# PCA, UMAP, and t-SNE are all unsupervised learning methods that reduce the dimensionality of the data by extracting new features that capture the most important information from the original features. PCA aims to capture the maximum variance in the data while reducing the number of dimensions, while UMAP and t-SNE aim to preserve the local structure of the data in a lower-dimensional space. These techniques are useful for visualizing complex, high-dimensional datasets or preparing the data for machine learning algorithms.
# **RFECV and Permutation are examples of Feature Selection**
# RFECV, on the other hand, is a supervised learning method that selects the best subset of features for a machine learning model by recursively removing less important features and evaluating the model's performance using cross-validation. RFECV is useful when we want to identify the most relevant features for a particular task or when the number of features is too large, and we want to reduce the computational cost of the model or avoid overfitting. By selecting the most important features, RFECV can improve the model's accuracy and generalizability.
# Permutation is a technique that can also be used for feature selection. Feature selection is the process of selecting the most relevant features from a dataset, to improve the accuracy and efficiency of machine learning models. Permutation can be used to evaluate the importance of individual features, and hence, select the most relevant ones. The idea behind permutation is to randomly shuffle or permute the values of a feature, and then observe the effect on the model's performance. If the feature is not important, shuffling its values should not significantly affect the model's performance, while if the feature is important, shuffling its values should decrease the model's performance.
# **NOTE: Sometimes you will see multiple of these concepts used in the same code**
# For instance after using RFECV to select a subset of features, permutation can be used to evaluate the importance of the features selected by the algorithm. After removing a subset of features, we can randomly shuffle the values of the remaining features and evaluate the model's performance. If the performance decreases significantly, it means that the removed features were important for the model. This process can be repeated until we find the optimal subset of features that maximizes the model's performance.
# Permutation might also be used to validate the effectiveness of dimensionality reduction techniques such as PCA, UMAP, and t-SNE by evaluating the importance of the new features created by these methods.
# # Feature Extraction Using PCA (Principal Component Analysis)
# **What is PCA?**
# PCA (Principal Component Analysis) is a **linear** dimensionality reduction technique that works by projecting high-dimensional data onto a lower-dimensional subspace that captures the maximum amount of variance in the data. PCA is well-suited for data that has a linear structure, and can be used for data visualization, feature extraction, or noise reduction.
# **The below code does the following Feature Extraction task using PCA to reduce the ammount of features to 5:**
# - Generates a binary classification dataset with 1000 samples and 10 features
# - Performs PCA on the data to reduce the number of features to 5
# - Converts the PCA-reduced data to a Pandas dataframe
# - Prints the original data and the PCA-reduced data
#
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
# Generate sample binary classification data
X, y = make_classification(
n_samples=1000, n_features=10, n_informative=5, n_redundant=2, random_state=42
)
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Convert the data to pandas dataframes
df_train = pd.DataFrame(X_train)
df_test = pd.DataFrame(X_test)
# Perform PCA on the training data to reduce the number of features
pca = PCA(n_components=5)
X_train_pca = pca.fit_transform(X_train)
# Transform the testing data using the fitted PCA
X_test_pca = pca.transform(X_test)
# Convert the PCA-reduced training data to a pandas dataframe
df_train_pca = pd.DataFrame(
X_train_pca, columns=["PCA{}".format(i + 1) for i in range(5)]
)
# Convert the PCA-reduced testing data to a pandas dataframe
df_test_pca = pd.DataFrame(
X_test_pca, columns=["PCA{}".format(i + 1) for i in range(5)]
)
# Show the data before and after PCA
print("Training features before PCA feature extraction:")
print(df_train.head())
print("\nTraining features after PCA feature extraction:")
print(df_train_pca.head())
print("\nTesting features before PCA feature extraction:")
print(df_test.head())
print("\nTesting features after PCA feature extraction:")
print(df_test_pca.head())
# # Feature Extraction using UMAP (Uniform Manifold Approximation and Projection)
# **What is UMAP?**
# UMAP is a **nonlinear** dimensionality reduction technique that works by preserving the local structure of the data, which can be useful for visualizing complex, high-dimensional datasets that may have nonlinear relationships between the variables. UMAP can also be used for clustering, anomaly detection, and other unsupervised learning tasks.
# **The below code does the following Feature Extraction task using UMAP to reduce the ammount of features to 5:**
# - Generates a binary classification dataset with 1000 samples and 10 features
# - Performs UMAP on the data to reduce the number of features to 5
# - Converts the UMAP-reduced data to a Pandas dataframe
# - Prints the original data and the UMAP-reduced data
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from umap import UMAP
# Generate sample binary classification data
X, y = make_classification(
n_samples=1000, n_features=10, n_informative=5, n_redundant=2, random_state=42
)
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Convert the data to pandas dataframes
df_train = pd.DataFrame(X_train)
df_test = pd.DataFrame(X_test)
# Perform UMAP on the training data to reduce the number of features
umap = UMAP(n_components=5)
X_train_umap = umap.fit_transform(X_train)
# Transform the testing data using the fitted UMAP
X_test_umap = umap.transform(X_test)
# Convert the UMAP-reduced training data to a pandas dataframe
df_train_umap = pd.DataFrame(
X_train_umap, columns=["UMAP{}".format(i + 1) for i in range(5)]
)
# Convert the UMAP-reduced testing data to a pandas dataframe
df_test_umap = pd.DataFrame(
X_test_umap, columns=["UMAP{}".format(i + 1) for i in range(5)]
)
# Show the data before and after UMAP
print("Training features before UMAP:")
print(df_train.head())
print("\nTraining features after UMAP:")
print(df_train_umap.head())
print("\nTesting features before UMAP:")
print(df_test.head())
print("\nTesting features after UMAP:")
print(df_test_umap.head())
# # 3D Visualization using T-SNE (t-Distributed Stochastic Neighbor Embedding)
# **What is t-SNE?**
# t-SNE (t-Distributed Stochastic Neighbor Embedding) is a **nonlinear** dimensionality reduction technique that is based on preserving the local structure of the data, which can be useful for visualizing complex, high-dimensional datasets that may have nonlinear relationships between the variables. **t-SNE is particularly well-suited for visualizing high-dimensional data in a low-dimensional space such as 2D and 3D and I would even venture to say is more used for that purpose now as opposed to actual feature extraction for training a ML model, since UMAP can scale to larger data faster in many cases and also because the Barnes-Hut algorithm is only really good up until 3 dimensions before it has to use a different one which will often be slower and less efficient**
# **The below code does the following 3D Visualization task using t-SNE by first reducing the data to 3 dimensions (3 feature columns):**
# - Generates a binary classification dataset with 1000 samples and 10 features
# - Performs t-SNE on the data to reduce the number of features to 3
# - Converts the t-SNE-reduced data to a Pandas dataframe
# - Prints the original data and the t-SNE-reduced data
# - Creates a 3D visualization of the data that now has 3 dimensions (3 feature columns)
# Main Stuff
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.manifold import TSNE
# For Visualizing
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import plotly.express as px
# Generate sample binary classification data
X, y = make_classification(
n_samples=1000, n_features=10, n_informative=5, n_redundant=2, random_state=42
)
# Convert the data to a pandas dataframe
df = pd.DataFrame(X, columns=[str(i) for i in range(X.shape[1])])
df["target"] = y
# Perform t-SNE on the data to reduce the number of features
tsne_model = TSNE(n_components=3, random_state=42)
X_tsne = tsne_model.fit_transform(X)
# Convert the t-SNE-reduced data to a pandas dataframe
df_tsne = pd.DataFrame(
X_tsne, columns=["tsne_" + str(i) for i in range(X_tsne.shape[1])]
)
df_tsne["target"] = y
# Show the data before and after t-SNE
print("Data before t-SNE:")
print(df.head())
print("\n Data after t-SNE:")
print(df_tsne.head())
# Visualize the data in INTERACTIVE 3D space
fig = px.scatter_3d(df_tsne, x="tsne_0", y="tsne_1", z="tsne_2", color="target")
fig.update_layout(width=600, height=600)
fig.show()
# # Feature Selection using RFECV (Recursive Feature Elimination with Cross-Validation)
# **What is RFECV?**
# Recursive Feature Elimination with Cross-Validation (RFECV) is a **feature selection** technique that is based on recursively removing features and selecting the best ones based on the performance of a machine learning model using cross-validation. RFECV can be used to reduce the number of features in high-dimensional datasets, improve the accuracy of a model, and reduce overfitting.
# **TIP:**
# The CV at the end of the name RFECV simply means it uses cross-validation to validate the optimal features when using Recursive Feature Elimination.
# **The below code does the following Feature SELECTION task using RFECV:**
# - Generates a binary classification dataset with 1000 samples and 10 features
# - Converts the data to a Pandas dataframe with feature names
# - Uses RFECV to select the best features for predicting the target variable using Logistic Regression
# - Converts the selected data back to a Pandas dataframe with consistent feature names
# - Prints the original data and the RFECV-selected data
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFECV
# Generate sample binary classification data
X, y = make_classification(
n_samples=1000, n_features=10, n_informative=5, n_redundant=2, random_state=42
)
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Fit a logistic regression model on the training data
log_reg = LogisticRegression()
selector = RFECV(log_reg, cv=5) # Use RFECV to select the best features
selector = selector.fit(X_train, y_train)
# Transform the training and testing data using the selected features
X_train_selected = selector.transform(X_train)
X_test_selected = selector.transform(X_test)
# Get the selected feature names
selected_mask = selector.get_support()
selected_names = np.array([str(i + 1) for i in range(len(selected_mask))])[
selected_mask
]
# Convert the selected training data to a pandas dataframe with original feature names
df_train_selected = pd.DataFrame(X_train_selected, columns=selected_names)
# Convert the selected testing data to a pandas dataframe with original feature names
df_test_selected = pd.DataFrame(X_test_selected, columns=selected_names)
# Show the data before and after feature selection
print("Training features before RFECV feature selection:")
print(pd.DataFrame(X_train).head())
print("\nTraining features after RFECV feature selection:")
print(df_train_selected.head())
print("\nTesting features before RFECV feature selection:")
print(pd.DataFrame(X_test).head())
print("\nTesting features after RFECV feature selection:")
print(df_test_selected.head())
# # Feature Selection using Permutation
# **What is Permutation based feature selection?**
# Permutation-based feature selection is a technique that ranks the importance of features based on the decrease in the performance of a machine learning model when the feature values are randomly permuted. The more a feature affects the performance of the model, the higher its importance score.
# The below code does the following Feature SELECTION task using permutation-based feature selection:
# - Generates a binary classification dataset with 1000 samples and 10 features
# - Converts the data to a Pandas dataframe with feature names
# - Uses Logistic Regression and permutation feature importance to rank the features based on their importance scores
# - Selects the top 5 features based on their importance scores
# - Converts the selected data back to a Pandas dataframe with consistent feature names
# - Prints the original data and the permutation-based feature-selected data
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.inspection import permutation_importance
# Generate sample binary classification data
X, y = make_classification(
n_samples=1000, n_features=10, n_informative=5, n_redundant=2, random_state=42
)
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Fit a logistic regression model on the training data
log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)
# Use permutation feature selection to select the best features
selector = SelectFromModel(log_reg, threshold=-np.inf, max_features=5)
selector.fit(X_train, y_train)
importance = permutation_importance(log_reg, X_train, y_train)
perm_sorted_idx = importance.importances_mean.argsort()[::-1]
features = X_train.shape[1]
mask = np.zeros(features, dtype=bool)
mask[perm_sorted_idx[:5]] = True
# Transform the training and testing data using the selected features
X_train_selected = selector.transform(X_train)
X_test_selected = selector.transform(X_test)
# Get the selected feature names
selected_names = np.array([str(i + 1) for i in range(features)])[mask]
# Convert the selected training data to a pandas dataframe with original feature names
df_train_selected = pd.DataFrame(X_train_selected, columns=selected_names)
# Convert the selected testing data to a pandas dataframe with original feature names
df_test_selected = pd.DataFrame(X_test_selected, columns=selected_names)
# Show the data before and after feature selection
print("Training features before permutation feature selection:")
print(pd.DataFrame(X_train).head())
print("\nTraining features after permutation feature selection:")
print(df_train_selected.head())
print("\nTesting features before permutation feature selection:")
print(pd.DataFrame(X_test).head())
print("\nTesting features after permutation feature selection:")
print(df_test_selected.head())
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
dogList = [
"Akita",
"Alaskan Malamute",
"Australian shepherd",
"Basset hound",
"Beagle",
"Boston terrier",
"Bulldog",
"Chihuahua",
"Cocker Spaniel",
"Collie",
"French Bulldog",
"Golden Retriever",
"Great Dane",
"Poodle",
"Russell Terrier",
"Scottish Terrier",
"Siberian Husky",
"Skye terrier",
"Smooth Fox terrier",
"Terrier",
"Whippet",
]
# When finding exact multiples (index-position)
for index, item in enumerate(dogList):
if item == "terrier":
print(index)
elif item == "Terrier":
print(index)
else:
print("-1")
# When finding exact multiples (index-position)
if "Basset hound" in dogList:
print("Found")
else:
print("Not Found")
if "Apple" in dogList:
print("Found")
else:
print("Not Found")
if "Basset hound" in dogList:
print(index)
else:
print("Not Found")
# When finding partial
# After changing Terrier into lowercase by using lower(),
# it finds the index number of words that includes "terrier"
for index, item in enumerate(dogList):
if "terrier" in item.lower():
print(index)
# When finding partial
# With index number where the string "Terrier" or "terrier" starts, NOT position
for index, item in enumerate(dogList):
x = item.lower().find("terrier")
print(x)
# When finding partial
# Find dogs start with "A"
for dog in dogList:
if dog[0] == "A":
print(dog)
|
import cupy as cp
import cuml, cudf
from sklearn.model_selection import train_test_split
from cuml.linear_model import Ridge
from cuml.neighbors import KNeighborsRegressor
from cuml.svm import SVC
from cuml.ensemble import RandomForestRegressor
from cuml.preprocessing.TargetEncoder import TargetEncoder
from sklearn.model_selection import GroupKFold, KFold
from cuml.metrics import mean_squared_error
from tqdm.notebook import tqdm
train_cr = cudf.read_csv(
"../input/ncaaw-march-mania-2021-spread/WNCAATourneyCompactResults.csv"
)
train_seeds = cudf.read_csv(
"../input/ncaaw-march-mania-2021-spread/WNCAATourneySeeds.csv"
)
submission = cudf.read_csv(
"../input/ncaaw-march-mania-2021-spread/WSampleSubmissionStage1.csv"
)
train_cr.head()
train_seeds.head()
train_seeds["seed_int"] = [
int(train_seeds["Seed"][x][1:3]) for x in range(len(train_seeds))
]
train_seeds.head()
train_cr.head()
|
# # O Problema
# O diretor de um banco está bastante incomodado com a quantidade de clientes que estão cancelando seus cartões de créditos. Por essa razão, ele pede que, utilizando essa base de dados com mais de 10 mil clientes, você analise e identifique possíveis causas e encontre padrões nos clientes que cancelaram.
# # Os Primeiros Passos
# Antes de tudo, é necessário importar as bibliotecas que serão utilizadas e, naturalmente, a base de dados em si. Pandas (para importação e análise) e Plotly (gráficos) foram as bibliotecas usadas para essa resolução.
import pandas as pd
import plotly.express as px
# Para edição de dos formatos dos números
pd.set_option("float_format", "{:.4f}".format)
tabela = pd.read_csv("/kaggle/input/credit-card-customers/BankChurners.csv")
tabela
# Numa primeira visualização da estrutura da tabela, tem-se a informação que ela possui 10127 linhas e 23 colunas. Agora, utilizamos o comando "info()" para descobrir se existe algum dado nulo na estrutura, o que não acontece.
tabela.info()
# # Análise Exploratória
# Nesse momento, busca-se identificar cada coluna e entender quais informações elas trazem. Após analisar calmamente, segue a identificação de cada coluna:
# - "CLIENTNUM": código de identificação do cliente;
# - "Attrition_Flag": indica se o cliente é ativo ou cancelou seu cartão;
# - "Customer_Age": idade do cliente;
# - "Gender": gênero;
# - "Dependent_count": quantidade de dependentes;
# - "Education_Level": escolaridade;
# - "Marital_Status": estado civil;
# - "Income_Category": renda anual (categorizada);
# - "Card_Category": tipo do cartão;
# - "Months_on_book": tempo como cliente em meses;
# - "Total_Relationship_Count": quantidade de produtos contratados;
# - "Months_Inactive_12_mon": meses em que o cliente esteve inativo no último ano;
# - "Contacts_Count_12_mon": contatos feitos pelo cliente ao atendimento;
# - "Credit_Limit": limite de crédito total;
# - "Total_Revolving_Bal": limite de crédito consumido;
# - "Avg_Open_To_Buy": limite de crédito disponível;
# - "Total_Trans_Amt": valor total utilizado em transferências;
# - "Total_Trans_Ct": quantidade total de transferências;
# - "Avg_Utilization_Ratio": taxa de utilização do cartão.
# Não foi possível identificar o que as demais colunas significam, por isso, serão excluídas do modelo para que não atrapalhem a análise. A coluna "CLIENTNUM" também será apagada pois não é relevante para o objetivo.
tabela = tabela.drop("CLIENTNUM", axis=1)
tabela = tabela.drop("Total_Amt_Chng_Q4_Q1", axis=1)
tabela = tabela.drop("Total_Ct_Chng_Q4_Q1", axis=1)
tabela = tabela.drop(
"Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1",
axis=1,
)
tabela = tabela.drop(
"Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2",
axis=1,
)
tabela
# # Aprofundando a análise
# Como primeiro passo da análise, iremos descobrir a quantidade de clientes ativos e que cancelaram e suas respectivas porcentagens em relação ao total. O número encontrado é:
# - Clientes ativos: 8500 (aprox. 83,9%)
# - Clientes que cancelaram: 1627 (aprox. 16,1%)
qtd_categoria = tabela["Attrition_Flag"].value_counts()
display(qtd_categoria)
qtd_categoria_perc = tabela["Attrition_Flag"].value_counts(normalize=True)
display(qtd_categoria_perc)
# # Gerando os gráficos
# O próximo passo será criar gráficos, que comparam a quantidade de clientes ativos (em azul) e os que cancelaram (vermelho) com as demais informaçõe na tabela, para analisar se algo "salta aos olhos" e possa servir como indício de ser o causador do problema.
for coluna in tabela.columns:
grafico = px.histogram(tabela, x=coluna, color="Attrition_Flag", text_auto=True)
grafico.show()
# # Conclusões
# Após analisar, calmamente, cada gráfico, foi possível notar:
# - **Contatos**
# **68,16%** dos clientes cancelados entraramm em contato **3 ou mais** vezes com o atendimento no último ano;
# Criando um novo df com apenas as colunas necessárias (opcional, mas recomendo)
tabela_contatos = tabela[["Attrition_Flag", "Contacts_Count_12_mon"]]
# Deixando apenas os clientes cancelados
tabela_contatos = tabela_contatos[tabela_contatos.Attrition_Flag == "Attrited Customer"]
# Gerando um groupby para conseguir calcular a quantidade de clientes por quantidade de contatos
tabela_contatos = tabela_contatos.groupby("Contacts_Count_12_mon").count()
# Como groupby não gera um df, é preciso retransformá-lo. Por isso o reset_index
tabela_contatos = tabela_contatos.reset_index()
# Gerando uma coluna que calcula a porcentagem
tabela_contatos["porcentagem"] = (
tabela_contatos["Attrition_Flag"] / tabela_contatos["Attrition_Flag"].sum()
)
tabela_contatos
# Isolando os clientes com 3 contatos ou mais e somando as colunas
clientes_3contatos = tabela_contatos[tabela_contatos.Contacts_Count_12_mon >= 3].sum()
clientes_3contatos
# - **Produtos Contratados**
# **60,17%** dos clientes que cancelaram possuíam **3 ou menos** produtos contratados na sua carteira.
# Repetindo o processo anterior, mas agora com a coluna de produtos contratados
tabela_produtos = tabela[["Attrition_Flag", "Total_Relationship_Count"]]
tabela_produtos = tabela_produtos[tabela_produtos.Attrition_Flag == "Attrited Customer"]
tabela_produtos = tabela_produtos.groupby("Total_Relationship_Count").count()
tabela_produtos = tabela_produtos.reset_index()
tabela_produtos["porcentagem"] = (
tabela_produtos["Attrition_Flag"] / tabela_produtos["Attrition_Flag"].sum()
)
tabela_produtos
clientes_3produtos = tabela_produtos[
tabela_produtos.Total_Relationship_Count <= 3
].sum()
clientes_3produtos
# - **Transações**
# **75,48%** dos clientes que cancelaram movimentaram **2800,00 ou menos** em transferências com o seu cartão, assim como **75,60%** daqueles que usaram **51 ou menos vezes**. Ambos valores estão bastante abaixo da média de cada categoria (4404,09 e 64,86, respectivamente).
# #### Cálculo da porcentagem do valor de transferências:
tabela_valor_transf = tabela[["Attrition_Flag", "Total_Trans_Amt"]]
tabela_valor_transf = tabela_valor_transf[
tabela_valor_transf.Attrition_Flag == "Attrited Customer"
]
tabela_valor_transf = tabela_valor_transf.groupby("Total_Trans_Amt").count()
tabela_valor_transf = tabela_valor_transf.reset_index()
tabela_valor_transf["porcentagem"] = (
tabela_valor_transf["Attrition_Flag"] / tabela_valor_transf["Attrition_Flag"].sum()
)
tabela_valor_transf
clientes_2800 = tabela_valor_transf[tabela_valor_transf.Total_Trans_Amt <= 2800].sum()
clientes_2800
# #### Cálculo da movimentações
tabela_qtd_transf = tabela[["Attrition_Flag", "Total_Trans_Ct"]]
tabela_qtd_transf = tabela_qtd_transf[
tabela_qtd_transf.Attrition_Flag == "Attrited Customer"
]
tabela_qtd_transf = tabela_qtd_transf.groupby("Total_Trans_Ct").count()
tabela_qtd_transf = tabela_qtd_transf.reset_index()
tabela_qtd_transf["porcentagem"] = (
tabela_qtd_transf["Attrition_Flag"] / tabela_qtd_transf["Attrition_Flag"].sum()
)
tabela_qtd_transf
clientes_51 = tabela_qtd_transf[tabela_qtd_transf.Total_Trans_Ct <= 51].sum()
clientes_51
|
# # Predicting Eurovision Song Contest Qualifying
import numpy as np
import pandas as pd
import os
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot as plt
# Set parameters
results_data_path = (
"/kaggle/input/eurovision-song-contest-data/Kaggle Dataset/Final Results/Jury/"
)
ogae_data_path = (
"/kaggle/input/eurovision-song-contest-data/Kaggle Dataset/Polls/OGAE Poll/"
)
eurovisionworld_data_path = "/kaggle/input/eurovision-song-contest-data/Kaggle Dataset/Polls/Eurovision World Poll/"
wiwibloggs_data_path = (
"/kaggle/input/eurovision-song-contest-data/Kaggle Dataset/Polls/Wiwibloggs Poll/"
)
myeurovisionscoreboard_data_path = "/kaggle/input/eurovision-song-contest-data/Kaggle Dataset/Polls/My Eurovision Scoreboard Poll/"
eurojury_data_path = (
"/kaggle/input/eurovision-song-contest-data/Kaggle Dataset/Polls/EuroJury Poll/"
)
song_data_path = "/kaggle/input/eurovision-song-contest-data/Kaggle Dataset/"
years = ["2016", "2017", "2018", "2019", "2021", "2022"]
voting_points = [12, 10, 8, 7, 6, 5, 4, 3, 2, 1]
def get_poll_data_eurovsion_score(data_path, file_name, column_name):
data = pd.read_csv(os.path.join(data_path, file_name), encoding="ISO-8859-1")
data["Points"] = [
sum(voting_points) * i / sum(data["Points"]) for i in data["Points"]
]
data = data.rename(columns={"Points": column_name})
return data
def get_poll_data_popular_vote(data_path, file_name, column_name):
data = pd.read_csv(os.path.join(data_path, file_name))
data["Votes"] = pd.to_numeric(data["Votes"])
data["Vote Proportion"] = data["Votes"] / sum(data["Votes"])
data[column_name] = sum(voting_points) * data["Vote Proportion"]
data = data[["Contestant", column_name]]
return data
# Read in polls data
all_polls_data = pd.DataFrame(
columns=[
"Contestant",
"Year",
"Average OGAE Points",
"Average Eurovision World Points",
]
)
for year in years:
# get ogae data
file_name = year + "_ogae_results.csv"
ogae_data = pd.read_csv(
os.path.join(ogae_data_path, file_name), encoding="ISO-8859-1"
)
ogae_data = ogae_data.rename(columns={"Average Points": "Average OGAE Points"})
# get eurovision world data
file_name = year + "_eurovisionworld_results.csv"
eurovisionworld_data = pd.read_csv(
os.path.join(eurovisionworld_data_path, file_name)
)
eurovisionworld_data["Vote Proportion"] = eurovisionworld_data["Votes"] / sum(
eurovisionworld_data["Votes"]
)
eurovisionworld_data["Average Eurovision World Points"] = eurovisionworld_data[
"Vote Proportion"
] * sum(voting_points)
eurovisionworld_data = eurovisionworld_data[
["Contestant", "Average Eurovision World Points"]
]
# merge results
polls_data = eurovisionworld_data.merge(
ogae_data, on="Contestant", how="left"
) # .fillna(0)
polls_data["Year"] = year
all_polls_data = all_polls_data.append(polls_data)
all_polls_data = pd.DataFrame(
columns=[
"Contestant",
"Year",
"Average OGAE Points",
"Average Eurovision World Points",
"Average My Eurovision Scoreboard Points",
"Average Wiwibloggs Points",
"Average EuroJury Online Points",
"Average EuroJury Jury Points",
]
)
for year in years:
# get poll data
file_name = year + "_ogae_results.csv"
ogae_data = pd.read_csv(
os.path.join(ogae_data_path, file_name), encoding="ISO-8859-1"
)
ogae_data["Average Points"] = [
sum(voting_points) * i / sum(ogae_data["Points"]) for i in ogae_data["Points"]
]
ogae_data = ogae_data.rename(columns={"Average Points": "Average OGAE Points"})
# data = get_poll_data_eurovsion_score(ogae_data_path, year+'_ogae_results.csv', "Average OGAE Points")
myeurovisionscoreboard_data = get_poll_data_eurovsion_score(
myeurovisionscoreboard_data_path,
year + "_myeurovisionscoreboard_results.csv",
"Average My Eurovision Scoreboard Points",
)
file_name = year + "_eurojury_results.csv"
eurojury_data = pd.read_csv(
os.path.join(eurojury_data_path, file_name), encoding="ISO-8859-1"
)
eurojury_data["Contestant"] = (
eurojury_data["Contestant"]
.replace("Macedonia", "North Macedonia")
.replace("North Macedoia", "North Macedonia")
.replace("Marino", "San Marino")
)
eurojury_data["Average Online Points"] = [
sum(voting_points) * i / sum(eurojury_data["Online Points"])
for i in eurojury_data["Online Points"]
]
eurojury_data["Average Jury Points"] = [
sum(voting_points) * i / sum(eurojury_data["Jury Points"])
for i in eurojury_data["Jury Points"]
]
eurojury_data = eurojury_data.rename(
columns={"Average Online Points": "Average EuroJury Online Points"}
)
eurojury_data = eurojury_data.rename(
columns={"Average Jury Points": "Average EuroJury Jury Points"}
)
# data = get_poll_data_eurovsion_score(eurojury_data_path, year+'_eurojury_results.csv', "Average EuroJury Points")
eurovisionworld_data = get_poll_data_popular_vote(
eurovisionworld_data_path,
year + "_eurovisionworld_results.csv",
"Average Eurovision World Points",
)
if year in ["2018", "2019", "2021", "2022"]:
wiwibloggs_data = get_poll_data_popular_vote(
wiwibloggs_data_path,
year + "_wiwibloggs_results.csv",
"Average Wiwibloggs Points",
)
# merge results
polls_data = eurovisionworld_data.merge(ogae_data, on="Contestant", how="left")
polls_data = polls_data.merge(
myeurovisionscoreboard_data, on="Contestant", how="left"
)
polls_data = polls_data.merge(eurojury_data, on="Contestant", how="left")
if year in ["2018", "2019", "2021", "2022"]:
polls_data = polls_data.merge(wiwibloggs_data, on="Contestant", how="left")
polls_data["Year"] = year
all_polls_data = all_polls_data.append(polls_data)
all_polls_data = all_polls_data.drop(columns=["Points", "Online Points", "Jury Points"])
all_polls_data.head()
# Read in song data
song_data = pd.read_csv(
os.path.join(song_data_path, "song_data.csv"), encoding="Latin-1"
)
song_data["Year"] = song_data["year"].fillna(0).astype(int).astype(str)
song_data = song_data[[str(i) in years for i in song_data["Year"]]]
song_data["english_10"] = [1 if i == "English" else 0 for i in song_data["language"]]
song_data["backing_instruments_10"] = [
int(i > 0) for i in song_data["backing_instruments"]
]
song_data["backing_dancers_10"] = [int(i > 0) for i in song_data["backing_dancers"]]
song_data["backing_singers_10"] = [int(i > 0) for i in song_data["backing_singers"]]
country_data = pd.read_csv(os.path.join(song_data_path, "country_data.csv"))
entry_data = song_data.merge(country_data, on="country", how="left")
entry_data["Contestant"] = entry_data["country"]
entry_data["favourite_10"] = entry_data["favourite_10"].astype("int")
region_dummies = pd.get_dummies(entry_data["region"])
region_list = list(region_dummies.columns)
entry_data = pd.concat([entry_data, region_dummies], axis=1)
entry_data = entry_data[entry_data["qualified"].isin(["1", "0"])]
entry_data["qualified"] = entry_data["qualified"].astype("int")
# 54 Televote points is the expected number to qualify
# 48.5% of entries have qualified, and top 48.5% (103) of enrries since 2016 have achieved 54 Televote points or higher
entry_data["televote_qualified"] = [
1 if i >= 54 else 0 for i in entry_data["semi_televote_points"]
]
used_fields = [
"Contestant",
"Year",
"semi_televote_points",
"qualified",
"televote_qualified",
"english_10",
"favourite_10",
"backing_instruments_10",
"backing_dancers_10",
"backing_singers_10",
"instrument_10",
"host_10",
]
used_fields.extend(region_list)
entry_data = entry_data[used_fields]
full_data = entry_data.merge(all_polls_data, on=["Contestant", "Year"], how="left")
full_data.head()
# ## Qualifying Decision Tree
# set variables
target_variable = ["qualified"]
prediction_variables = [
"Average OGAE Points",
"Average Eurovision World Points",
"Average My Eurovision Scoreboard Points",
"Average EuroJury Online Points",
"Average EuroJury Jury Points",
"english_10",
"favourite_10",
"backing_instruments_10",
"backing_dancers_10",
"backing_singers_10",
"instrument_10",
"host_10",
]
prediction_variables.extend(region_list)
data = full_data[prediction_variables + target_variable]
X = data[prediction_variables].values
Y = data[target_variable].values
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.3
) # , random_state = 10)
cart = DecisionTreeClassifier(max_depth=3, min_samples_leaf=20)
cart.fit(X_train, y_train)
y_pred = cart.predict(X_test)
# print("Model RMSE: ", str(np.sqrt(mean_squared_error(y_test,y_pred))))
# print("Eurovision World RMSE: ", str(np.sqrt(mean_squared_error(y_test, X_test[:,0]))))
X = data[prediction_variables].values
Y = data[target_variable].values
cart = DecisionTreeClassifier(max_depth=3, min_samples_leaf=20)
cart.fit(X, Y)
y_pred = cart.predict(X)
# print("Model RMSE: ", str(np.sqrt(mean_squared_error(Y,y_pred))))
# print("Eurovision World RMSE: ", str(np.sqrt(mean_squared_error(Y, X[:,0]))))
plt.figure(figsize=(25, 10))
ax = plt.axes()
tree.plot_tree(
cart, feature_names=prediction_variables, filled=True
) # left = true, right = false
plt.suptitle("Qualification Decision Tree (left = yes, right = no)")
# ## Semi Televote Tree
# set variables
target_variable = ["semi_televote_points"]
prediction_variables = [
"Average OGAE Points",
"Average Eurovision World Points",
"Average My Eurovision Scoreboard Points",
"Average EuroJury Online Points",
"Average EuroJury Jury Points",
"english_10",
"favourite_10",
"backing_instruments_10",
"backing_dancers_10",
"backing_singers_10",
"instrument_10",
"host_10",
]
prediction_variables.extend(region_list)
data = full_data[prediction_variables + target_variable]
X = data[prediction_variables].values
Y = data[target_variable].values
cart = DecisionTreeRegressor(max_depth=3, min_samples_leaf=10)
cart.fit(X, Y)
y_pred = cart.predict(X)
# print("Model RMSE: ", str(np.sqrt(mean_squared_error(Y,y_pred))))
# print("Eurovision World RMSE: ", str(np.sqrt(mean_squared_error(Y, X[:,0]))))
plt.figure(figsize=(25, 10))
ax = plt.axes()
tree.plot_tree(
cart, feature_names=prediction_variables, filled=True
) # left = true, right = false
plt.suptitle("Jury Decision Tree (left = yes, right = no)")
# ## Semi Televote Qualifying Tree
# set variables
target_variable = ["televote_qualified"]
prediction_variables = [
"Average OGAE Points",
"Average Eurovision World Points",
"Average My Eurovision Scoreboard Points",
"Average EuroJury Online Points",
"Average EuroJury Jury Points",
"english_10",
"favourite_10",
"backing_instruments_10",
"backing_dancers_10",
"backing_singers_10",
"instrument_10",
"host_10",
]
prediction_variables.extend(region_list)
data = full_data[prediction_variables + target_variable]
X = data[prediction_variables].values
Y = data[target_variable].values
cart = DecisionTreeClassifier(max_depth=3, min_samples_leaf=20)
cart.fit(X, Y)
y_pred = cart.predict(X)
plt.figure(figsize=(25, 10))
ax = plt.axes()
tree.plot_tree(
cart, feature_names=prediction_variables, filled=True
) # left = true, right = false
plt.suptitle("Qualification Decision Tree (left = yes, right = no)")
data = full_data[full_data["Average My Eurovision Scoreboard Points"] < 0.28]
print(sum(data["qualified"]) / len(data), sum(data["qualified"]), len(data))
data = full_data[full_data["Average My Eurovision Scoreboard Points"] < 0.46]
print(sum(data["qualified"]) / len(data), sum(data["qualified"]), len(data))
data = full_data[full_data["Average My Eurovision Scoreboard Points"] > 1.8]
print(sum(data["qualified"]) / len(data), sum(data["qualified"]), len(data))
data = full_data[full_data["Average Eurovision World Points"] < 0.25]
print(sum(data["qualified"]) / len(data), sum(data["qualified"]), len(data))
data = full_data[full_data["Average Eurovision World Points"] >= 3]
print(sum(data["qualified"]) / len(data), sum(data["qualified"]), len(data))
from matplotlib import pyplot
from scipy import stats
fig, (ax1, ax2) = pyplot.subplots(1, 2, figsize=(20, 8))
ax1.set_ylabel("semi_televote_points", fontsize="medium")
ax1.set_xlabel("Average Eurovision World Points", fontsize="medium")
ax1.scatter(
y=full_data["semi_televote_points"], x=full_data["Average Eurovision World Points"]
)
ax2.set_ylabel("semi_televote_points", fontsize="medium")
ax2.set_xlabel("Average My Eurovision Scoreboard Points", fontsize="medium")
ax2.scatter(
y=full_data["semi_televote_points"],
x=full_data["Average My Eurovision Scoreboard Points"],
)
slope, intercept, r_value, p_value, std_err = stats.linregress(
full_data["Average My Eurovision Scoreboard Points"],
y=full_data["semi_televote_points"],
)
ax1.set_title("R-Squared = " + str(round(r_value**2, 2)))
slope, intercept, r_value, p_value, std_err = stats.linregress(
full_data["Average Eurovision World Points"], y=full_data["semi_televote_points"]
)
ax2.set_title("R-Squared = " + str(round(r_value**2, 2)))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
leagues = {
"bundesliga": "Bundesliga",
"laliga": "Laliga",
"ligue1": "Ligue 1",
"premier_league": "Premier League",
"seriea": "Serie A",
}
footballer_info, player_stat, goalkeeper_stat = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for dirname, _, filenames in os.walk("/kaggle/input"):
fi_delim = len("footballer_info") + 1
ps_delim = len("player_stats") + 1
for filename in filenames:
file_dir = os.path.join(dirname, filename)
print(file_dir)
if dirname.find("eu-football-transfer-price") > 0:
table = pd.read_csv(file_dir, encoding="utf-16", sep="\t", header=0).assign(
League=leagues[filename[fi_delim:-4]]
)
footballer_info = footballer_info.append(table, ignore_index=True)
elif filename.find("_gks") > 0:
table = pd.read_csv(file_dir, encoding="utf-16", sep="\t", header=0).assign(
League=leagues[filename[ps_delim:-8]]
)
goalkeeper_stat = goalkeeper_stat.append(table, ignore_index=True)
else:
table = pd.read_csv(file_dir, encoding="utf-16", sep="\t", header=0).assign(
League=leagues[filename[ps_delim:-4]]
)
player_stat = player_stat.append(table, ignore_index=True)
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# a glance of footballer_info data
footballer_info.head(30)
# a glance of player_stat data
player_stat
# a glance of goalkeeper_stat data
goalkeeper_stat
footballer_info["Current Value"] = footballer_info.apply(
lambda row: float(row["Current Value"][1:-1]) / 1000
if row["Current Value"][-1] == "k"
else float(row["Current Value"][1:-1]),
axis=1,
)
footballer_info.head(30)
for league in leagues.values():
print(
f"{league}: {footballer_info[footballer_info['League'] == league].shape} - {player_stat[player_stat['League'] == league].shape} - {goalkeeper_stat[goalkeeper_stat['League'] == league].shape}"
)
for league in leagues.values():
fi_position = set(footballer_info[footballer_info["League"] == league]["Position"])
ps_position = set(player_stat[player_stat["League"] == league]["Position"])
print(
f"""{league}:
footballer_info only : {list(fi_position - ps_position)}
player_stat only : {list(ps_position - fi_position)}
intersection : {len(fi_position.intersection(ps_position))} elements
"""
)
for league in leagues.values():
fi_club = set(footballer_info[footballer_info["League"] == league]["Club"])
ps_club = set(player_stat[player_stat["League"] == league]["Club"])
gk_club = set(goalkeeper_stat[goalkeeper_stat["League"] == league]["Club"])
print(
f"""{league}:
footballer_info - player_stat : {list(fi_club - ps_club)}
player_stat - footballer_info : {list(ps_club - fi_club)}
goalkeeper_stat - footballer_info : {list(gk_club - fi_club)}
intersection : {len(fi_club.intersection(ps_club))} elements
"""
)
for_2_clubs_name = list(player_stat[player_stat["Club"] == "for 2 clubs"]["Name"])
latest_club = footballer_info[footballer_info["Name"].isin(for_2_clubs_name)][
["Name", "Club"]
]
print(f"""{len(latest_club)} / {len(for_2_clubs_name)} players""")
latest_club
m = latest_club.set_index("Name")["Club"]
player_stat["Club"] = player_stat["Name"].map(m).fillna(player_stat["Club"])
player_stat.loc[
player_stat["Name"].isin(latest_club["Name"]), ["Name", "Club"]
].sort_values(by=["Name"]).reset_index(drop=True).equals(
latest_club.sort_values(by=["Name"]).reset_index(drop=True)
)
column_join = footballer_info.columns.intersection(player_stat.columns)
football_stats = pd.merge(footballer_info, player_stat, on=list(column_join))
football_stats[football_stats["Nation"] != football_stats["Nationality"]]
football_stats.drop(columns="Nationality")
column_join = footballer_info.columns.intersection(goalkeeper_stat.columns)
goalkeeper_stats = pd.merge(footballer_info, goalkeeper_stat, on=list(column_join))
goalkeeper_stats.drop(columns="Nationality")
football_stats.to_csv("player_statistics.csv", encoding="utf-16", sep="\t", index=False)
goalkeeper_stats.to_csv(
"goalkeeper_statistics.csv", encoding="utf-16", sep="\t", index=False
)
|
# # Portfolio optimization
# **Problem**: Is it possible to use macroeconomic data to "predict" an optimal asset allocation for a portfolio to achieve better risk-adjusted returns?
# ## Data Collection
# **Input**: -
# **Output**: Raw data stored in MongoDB
# Where we took data from:
# - OECD (https://data.oecd.org/api/)
# - FRED (https://fred.stlouisfed.org/docs/api/fred/)
# - YahooFinance (library yfinance)
# - Investing.com (scraping with BeautifulSoup4)
# First thing, we choose what indexes to use as a benchmark for the different asset classes (Equity, Bond, Real Estate, Commodity, Cash). These will be the **targets** in our model.
# Equity (Yahoo finance, with related ticker):
# - SP500 ^GSPC
# - DowJones ^DJI
# - Nasdaq ^IXIC
# - Russell2000 ^RUT
# Bond:
# - Long-term interest rates (OECD https://data.oecd.org/interest/long-term-interest-rates.htm)
# - Treasury10Y Yield (Yahoo Finance ^TNX)
# Real Estate:
# - All-Transactions House Price Index (FRED series_id = USSTHPI)
# - Housing prices (OECD https://data.oecd.org/price/housing-prices.htm)
# Commodity (Investing.com):
# - GOLD (https://www.investing.com/commodities/gold)
# - OIL (https://www.investing.com/commodities/crude-oil)
# - WHEAT (https://www.investing.com/commodities/us-wheat)
# Cash (OECD):
# - Short-term interest rates (OECD https://data.oecd.org/interest/short-term-interest-rates.htm)
# As **features** we take every series in the FRED and OECD datasets. These contain data such as gdp, growth, inflation, unemployment, equity market volatility, new houses permits, FED rates, gold reserves, balance of payments, and much more.
# We save raw data as-is in MongoDB Atlas, which we use as a Data Lake.
# The alternatives we evaluated are S3 and DocumentDB (AWS).
# We choose MongoDB Atlas, as it allows a Free Tier with 512MB of storage, while also allowing to query the documents (unlike S3)
# ### OECD Data Collection
# OECD presents data via REST API with no auth.
# https://data.oecd.org/api/sdmx-json-documentation/
# Data can be retrieved via http requests containing filters and dataset id.
# "Live" most recent data is in the DP_LIVE dataset.
# Here we get all features data from OECD + 3 targets described above.
# Below an example of a request for GDP data for USA.
import requests
url = f"https://stats.oecd.org/SDMX-JSON/data/DP_LIVE/USA.GDP..MLN_USD./all?dimensionAtObservation=allDimensions&startTime=2010"
r = requests.get(url).json()
print(r["dataSets"])
# Data format is a little bit oscure at this point, but we will solve (and explain) this in the Data cleaning part of the process.
# ### FRED Data collection
# FRED presents data via REST API with authentication via api_key (free to request and use)
# https://fred.stlouisfed.org/docs/api/fred/
# To retrieve a series data you need to specify the corresponding series_id.
# We couldn't find a comprehensive series_id list, so we decided to traverse the whole tree structure of categories and series.
# We started from the root category and ask for the category children and so on. When we have all the categories we ask for the series contained in that category. This way we retrieved all possible series_id.
# Due to a higher than excepted amount of data, we chose to keep only series with "popularity" >= 30. Popularity is a metadata of each series representing interest of public in that series data. (For example "GDP" data for USA is "more interesting" than "Employed Persons in Talbot County, GA" data)
# Here we get all features data from FRED + 1 target described above.
# Below an example of a request for GDP data for USA. api_key is been obscured for privacy reasons, to run the same you will need to request an api_key from FRED.
# https://fred.stlouisfed.org/docs/api/api_key.html
import fredapi as fa
import os
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
fred_api_key = user_secrets.get_secret("fred_api_key")
fred = fa.Fred(api_key=fred_api_key)
df = fred.get_series("GDP").to_frame()
df.tail()
# ### YahooFinance Data Collection
# For YahooFinance data we can use the yfinance library.
# https://pypi.org/project/yfinance/
# Here we get the target data we need from YahooFinance as described above.
# Below an example of a request for S&P500 price data.
import yfinance as yf
ticker = "^GSPC"
t = yf.Ticker(ticker)
df = t.history(period="max", interval="1mo")
df.tail()
# ### Investing.com Data Collection
# For investing.com we manually download data in .csv and created a scraper that retrieve subsequential data.
# The problem with the scraper is that data after the past month is loaded via javascript in the webpage.
# It could possibly be achieved using Selenium, but we tried to keep things as simple as possible using only BeautifulSoup.
# ### Storing Data in MongoDB
# We save raw data as-is in MongoDB Atlas, which we use as a Data Lake.
# https://www.mongodb.com/cloud/atlas/register
# To store a pandas Dataframe we have to convert it to a dictionary.
# Each document in MongoDB is assigned a random "_id". We can override this to achieve an unique column in the collection.
# Below an example of how to connect to MongoDB (in this case Atlas version) and insert a json file into a desired database and collection. You would need an account on MongoDB Atlas to run this. Or alternatively you can install MongoDB on your local machine and the connection string would look like: *"mongodb://localhost:27017"*
from pymongo import MongoClient
import json
mongo_connection_string = user_secrets.get_secret("mongo_connection_string")
data = {"_id": ticker, "data": json.loads(df.reset_index().to_json(orient="records"))}
# connection_string = f"mongodb://{username}:{password}@cluster0.3dxfmjo.mongodb.net/?" \
# f"retryWrites=true&w=majority"
client = MongoClient(mongo_connection_string)
client
# database = client[db_name]
# collection = database[collection]
# collection.insert_one(data)
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib as plt
from sklearn.metrics import classification_report, roc_auc_score, mean_squared_error
from sklearn.model_selection import GridSearchCV, cross_validate
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import (
RandomForestRegressor,
GradientBoostingRegressor,
AdaBoostRegressor,
)
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import r2_score
pd.set_option("display.max_columns", None)
pd.set_option("display.width", 500)
df = pd.read_csv("/kaggle/input/food-delivery-dataset/train.csv")
# Veri genel Hatlarını İnceleme
def check_df(dataframe, head=5):
print("##################### Shape #####################")
print(dataframe.shape)
print("##################### Types #####################")
print(dataframe.dtypes)
print("##################### Head #####################")
print(dataframe.head(head))
print("##################### Tail #####################")
print(dataframe.tail(head))
print("##################### NA #####################")
print(dataframe.isnull().sum())
print("##################### Quantiles #####################")
print(dataframe.describe([0, 0.05, 0.50, 0.95, 0.99, 1]).T)
check_df(df)
df.head()
# Order date değişken türünü değiştirme
df["Order_Date"] = pd.to_datetime(df["Order_Date"])
df["Order_Date"].head()
cols = [
"Restaurant_latitude",
"Restaurant_longitude",
"Delivery_location_latitude",
"Delivery_location_longitude",
]
for col in cols:
df[col] = abs(df[col])
df["Order_Date"] = pd.to_datetime(df["Order_Date"])
df["Order_Date"].head()
df = df.replace("NaN", float(np.nan), regex=True)
df.isnull().sum()
# mesafeyi hesaplatma
from geopy.distance import geodesic
import gc
df["distance"] = np.zeros(len(df))
restaurant_cordinates_df = df[
["Restaurant_latitude", "Restaurant_longitude"]
].to_numpy()
delivery_location_cordinates_df = df[
["Delivery_location_latitude", "Delivery_location_longitude"]
].to_numpy()
for i in range(len(df)):
df["distance"].loc[i] = geodesic(
restaurant_cordinates_df[i], delivery_location_cordinates_df[i]
)
df["distance"] = df["distance"].astype("str").str.extract("(\d+.\d+)")
df["distance"] = df["distance"].astype("float64")
df["distance"].describe()
df["distance"]
# Yedek alma
df_yedek = df
# Sipariş hazırlanma süresi
df = df.replace("NaN", float(np.nan), regex=True)
df.isnull().sum()
df["Time_Orderd"] = pd.to_timedelta(df["Time_Orderd"])
df["Time_Order_picked"] = pd.to_timedelta(df["Time_Order_picked"])
td = pd.Timedelta(1, "d")
df.loc[(df["Time_Order_picked"] < df["Time_Orderd"]), "preparation1"] = (
df["Time_Order_picked"] - df["Time_Orderd"] + td
)
df.loc[(df["Time_Order_picked"] > df["Time_Orderd"]), "preparation2"] = (
df["Time_Order_picked"] - df["Time_Orderd"]
)
df["preparation1"].fillna(df["preparation2"], inplace=True)
df["preparation"] = pd.to_timedelta(df["preparation1"], "minute")
df = df.drop(columns=["preparation1", "preparation2"])
# gün isimlerini çıkartma
df["day_name"] = df["Order_Date"].dt.day_name()
# df["Gün Dilimi"].value_counts()
df.loc[
(df["Time_Orderd"].dt.components.hours >= 6)
& (df["Time_Orderd"].dt.components.hours < 12),
"Gün Dilimi",
] = "Sabah"
df.loc[
(df["Time_Orderd"].dt.components.hours >= 12)
& (df["Time_Orderd"].dt.components.hours < 15),
"Gün Dilimi",
] = "Öğle"
df.loc[
(df["Time_Orderd"].dt.components.hours >= 15)
& (df["Time_Orderd"].dt.components.hours < 18),
"Gün Dilimi",
] = "İkindi"
df.loc[
(df["Time_Orderd"].dt.components.hours >= 18)
& (df["Time_Orderd"].dt.components.hours < 22),
"Gün Dilimi",
] = "Akşam"
df.loc[
(df["Time_Orderd"].dt.components.hours >= 22)
& (df["Time_Orderd"].dt.components.hours < 24),
"Gün Dilimi",
] = "Gece"
df.loc[
(df["Time_Orderd"].dt.components.hours >= 0)
& (df["Time_Orderd"].dt.components.hours < 6),
"Gün Dilimi",
] = "Gece"
check_df(df)
df.tail()
df["Time_taken(min)"] = df["Time_taken(min)"].astype("str").str.extract("(\d+)")
df["Time_taken(min)"] = df["Time_taken(min)"].astype("timedelta64[m]")
df["Delivery_person_Ratings"] = df["Delivery_person_Ratings"].astype("float64")
df["Delivery_person_Age"] = df["Delivery_person_Age"].astype("float64")
df["Delivery_person_ID"].value_counts()
df.nunique()
df.groupby("Gün Dilimi").agg({"Time_taken(min)": ["mean", "count"]})
# RESTORAN SAYISI : 389
# TESLİMAT NOKTASI : 4373
df_yedek2 = df
# df["Restaurant_latitude"] = df["Restaurant_latitude"].astype("str")
# df["Restaurant_longitude"] = df["Restaurant_longitude"].astype("str")
# df["Restaurant"] = df["Restaurant_latitude"] + " and "+ df["Restaurant_longitude"]
# for i in range(len(df["Restaurant"].unique())+1):
# id=str(i)
# df.loc[df["Restaurant"]==df["Restaurant"].unique()[i-1]] = "restaurant" + id
df = df.drop(
[
"Restaurant_latitude",
"Restaurant_longitude",
"Delivery_location_latitude",
"Delivery_location_longitude",
],
axis=1,
)
df.head()
df.head()
df["Type_of_order"].value_counts()
# KATEGORİK VE NUMERİK DEĞİŞKENLERİN YAKALANMASI
def grab_col_names(dataframe, cat_th=10, car_th=20):
"""
grab_col_names for given dataframe
:param dataframe:
:param cat_th:
:param car_th:
:return:
"""
cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"]
num_but_cat = [
col
for col in dataframe.columns
if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"
]
cat_but_car = [
col
for col in dataframe.columns
if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"
]
cat_cols = cat_cols + num_but_cat
cat_cols = [col for col in cat_cols if col not in cat_but_car]
num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"]
num_cols = [col for col in num_cols if col not in num_but_cat]
print(f"Observations: {dataframe.shape[0]}")
print(f"Variables: {dataframe.shape[1]}")
print(f"cat_cols: {len(cat_cols)}")
print(f"num_cols: {len(num_cols)}")
print(f"cat_but_car: {len(cat_but_car)}")
print(f"num_but_cat: {len(num_but_cat)}")
# cat_cols + num_cols + cat_but_car = değişken sayısı.
# num_but_cat cat_cols'un içerisinde zaten.
# dolayısıyla tüm şu 3 liste ile tüm değişkenler seçilmiş olacaktır: cat_cols + num_cols + cat_but_car
# num_but_cat sadece raporlama için verilmiştir.
return cat_cols, cat_but_car, num_cols
cat_cols, cat_but_car, num_cols = grab_col_names(df)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Aykırı değerlerin baskılanması
def outlier_thresholds(dataframe, variable, low_quantile=0.10, up_quantile=0.90):
quantile_one = dataframe[variable].quantile(low_quantile)
quantile_three = dataframe[variable].quantile(up_quantile)
interquantile_range = quantile_three - quantile_one
up_limit = quantile_three + 1.5 * interquantile_range
low_limit = quantile_one - 1.5 * interquantile_range
return low_limit, up_limit
# Aykırı değer kontrolü
def check_outlier(dataframe, col_name):
low_limit, up_limit = outlier_thresholds(dataframe, col_name)
if dataframe[
(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)
].any(axis=None):
return True
else:
return False
for col in num_cols:
if col != "Time_taken(min)":
print(col, check_outlier(df, col))
# Aykırı değerlerin baskılanması
def replace_with_thresholds(dataframe, variable):
low_limit, up_limit = outlier_thresholds(dataframe, variable)
dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit
dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit
for col in num_cols:
if col != "Time_taken(min)":
replace_with_thresholds(df, col)
def rare_analyser(dataframe, target, cat_cols):
for col in cat_cols:
print(col, ":", len(dataframe[col].value_counts()))
print(
pd.DataFrame(
{
"COUNT": dataframe[col].value_counts(),
"RATIO": dataframe[col].value_counts() / len(dataframe),
"TARGET_MEAN": dataframe.groupby(col)[target].mean(),
}
),
end="\n\n\n",
)
rare_analyser(df, "Time_taken(min)", cat_cols)
# eksik değer analizi
#
def missing_values_table(dataframe, na_name=False):
na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0]
n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False)
ratio = (
dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100
).sort_values(ascending=False)
missing_df = pd.concat(
[n_miss, np.round(ratio, 2)], axis=1, keys=["n_miss", "ratio"]
)
print(missing_df, end="\n")
if na_name:
return na_columns
missing_values_table(df)
# Bu fonsksiyon eksik değerlerin median veya mean ile doldurulmasını sağlar
def quick_missing_imp(
data, num_method="median", cat_length=20, target="Time_taken(min)"
):
variables_with_na = [
col for col in data.columns if data[col].isnull().sum() > 0
] # Eksik değere sahip olan değişkenler listelenir
temp_target = data[target]
print("# BEFORE")
print(
data[variables_with_na].isnull().sum(), "\n\n"
) # Uygulama öncesi değişkenlerin eksik değerlerinin sayısı
# değişken object ve sınıf sayısı cat_lengthe eşit veya altındaysa boş değerleri mode ile doldur
data = data.apply(
lambda x: x.fillna(x.mode()[0])
if (x.dtype == "O" and len(x.unique()) <= cat_length)
else x,
axis=0,
)
# num_method mean ise tipi object olmayan değişkenlerin boş değerleri ortalama ile dolduruluyor
if num_method == "mean":
data = data.apply(lambda x: x.fillna(x.mean()) if x.dtype != "O" else x, axis=0)
# num_method median ise tipi object olmayan değişkenlerin boş değerleri ortalama ile dolduruluyor
elif num_method == "median":
data = data.apply(
lambda x: x.fillna(x.median()) if x.dtype != "O" else x, axis=0
)
data[target] = temp_target
print("# AFTER \n Imputation method is 'MODE' for categorical variables!")
print(
" Imputation method is '" + num_method.upper() + "' for numeric variables! \n"
)
print(data[variables_with_na].isnull().sum(), "\n\n")
return data
df = quick_missing_imp(df, num_method="median", cat_length=17)
# ENCODİNG
#
from sklearn.preprocessing import LabelEncoder
def label_encoder(dataframe, binary_col):
labelencoder = LabelEncoder()
dataframe[binary_col] = labelencoder.fit_transform(dataframe[binary_col])
return dataframe
binary_cols = [
col for col in df.columns if df[col].dtypes == "O" and len(df[col].unique()) == 2
]
for col in binary_cols:
label_encoder(df, col)
def one_hot_encoder(dataframe, categorical_cols, drop_first=False):
dataframe = pd.get_dummies(
dataframe, columns=categorical_cols, drop_first=drop_first
)
return dataframe
df = one_hot_encoder(df, cat_cols, drop_first=True)
df.head()
df_yedek3 = df
df.info()
data = df.drop(["ID", "Delivery_person_ID"], axis=1)
data.info()
# MODEL KURMA
data["id"] = range(1, len(df) + 1)
X = data.drop(["Time_taken(min)"], axis=1)
y = data["Time_taken(min)"]
data["Time_taken(min)"] = data["Time_taken(min)"] / np.timedelta64(1, "s")
data["Time_taken(min)"] = data["Time_taken(min)"].astype("float64")
data.head()
data.head()
data.drop(["Order_Date", "Time_Orderd", "Time_Order_picked"], axis=1, inplace=True)
data.dtypes
df = df.drop(["ID", "Delivery_person_ID"], axis=1)
# unique_values = b.unique()
# for value in unique_values:
# try:
# float(value)
# except ValueError:
# print(f"{value} cannot be converted to float")
data.head()
from catboost import CatBoostRegressor
from lightgbm import LGBMRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBRegressor
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
X = data.drop(["Time_taken(min)"], axis=1)
y = data["Time_taken(min)"]
# Train verisi ile model kurup, model başarısını değerlendiriniz.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=17
)
print(data.columns[data.columns.str.contains("[^a-zA-Z0-9_]").tolist()])
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def plot_importance(model, features, num=len(X), save=False):
feature_imp = pd.DataFrame(
{"Value": model.feature_importances_, "Feature": features.columns}
)
plt.figure(figsize=(10, 10))
sns.set(font_scale=1)
sns.barplot(
x="Value",
y="Feature",
data=feature_imp.sort_values(by="Value", ascending=False)[0:num],
)
plt.title("Features")
plt.tight_layout()
plt.show()
if save:
plt.savefig("importances.png")
model = CatBoostRegressor(verbose=False)
model.fit(X, y)
plot_importance(model, X)
def evaluate_models(X_train, X_test, y_train, y_test):
models = [
LinearRegression(),
Ridge(),
Lasso(),
ElasticNet(),
DecisionTreeRegressor(),
RandomForestRegressor(),
GradientBoostingRegressor(),
AdaBoostRegressor(),
KNeighborsRegressor(),
MLPRegressor(),
]
for model in models:
model_name = type(model).__name__
model.fit(X_train, y_train)
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
train_rmse = mean_squared_error(y_train, y_train_pred, squared=False)
test_rmse = mean_squared_error(y_test, y_test_pred, squared=False)
train_r2 = r2_score(y_train, y_train_pred)
test_r2 = r2_score(y_test, y_test_pred)
print(f"{model_name}:")
print(f"Train RMSE: {train_rmse:.2f}")
print(f"Test RMSE: {test_rmse:.2f}")
print(f"Train R^2: {train_r2:.2f}")
print(f"Test R^2: {test_r2:.2f}")
print("-------------------------------------------------------")
evaluate_models(X_train, X_test, y_train, y_test)
# RandomForestRegressor:
# Train RMSE: 91.29
# Test RMSE: 241.92
# Train R^2: 0.97
# Test R^2: 0.82
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
# define the model
rf = RandomForestRegressor()
# define the grid of hyperparameters to search
param_grid = {
"n_estimators": [100, 200, 500],
"max_depth": [
5,
10,
20,
],
"max_features": ["sqrt", "log2"],
}
# define the search
search_rf = GridSearchCV(rf, param_grid, cv=5)
# perform the search
search_rf.fit(X_train, y_train)
# get the best hyperparameters
print(search_rf.best_params_)
model = RandomForestRegressor(**search_rf.best_params_)
model.fit(X_train, y_train)
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
train_rmse = mean_squared_error(y_train, y_train_pred, squared=False)
test_rmse = mean_squared_error(y_test, y_test_pred, squared=False)
train_r2 = r2_score(y_train, y_train_pred)
test_r2 = r2_score(y_test, y_test_pred)
print(f"{model}:")
print(f"Train RMSE: {train_rmse:.2f}")
print(f"Test RMSE: {test_rmse:.2f}")
print(f"Train R^2: {train_r2:.2f}")
print(f"Test R^2: {test_r2:.2f}")
print("-------------------------------------------------------")
|
# ### El aprendizaje automático:
# El aprendizaje automático es un subconjunto de la inteligencia artificial que proporciona a una máquina la capacidad de aprender automáticamente a partir de la experiencia sin ser programada explícitamente.
# ### Observations:
# Al final del kernel, puede encontrar las métricas de evaluación donde encuentra que la métrica AUC no es buena para ninguno de los algoritmos que se prueban
# - La precisión de algunos modelos resultó ser del 100 %, mientras que la precesión, la recuperación y la puntuación F1 no son satisfactorias
# - La parte desafiante de este conjunto de datos es limpiar los datos sin procesar.
# ### Nota:
# - una de las posibles razones por las que los modelos no funcionan bien es por el tamaño muy pequeño del conjunto de datos.
#
#
# Importamos las bibliotecas para la carga y el preprocesamiento de datos.
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import Pipeline
from sklearn.ensemble import (
RandomForestClassifier,
AdaBoostClassifier,
GradientBoostingClassifier,
)
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import (
RocCurveDisplay,
roc_curve,
auc,
classification_report,
confusion_matrix,
)
project_data = pd.read_csv("../input/transport/transport.csv")
project_data.head()
# nuevo marco de datos con columnas de valores divididos
new = project_data[
"num;age;sexe;sitfam;principal;voiture;velo;commun;revenu"
].str.split(";", n=9, expand=True)
# hacer una columna separada del nuevo marco de datos
project_data["num"] = pd.to_numeric(new[0])
project_data["age"] = pd.to_numeric(new[1])
project_data["sexe"] = new[2].astype(str)
project_data["sitfam"] = pd.to_numeric(new[3])
project_data["principal"] = pd.to_numeric(new[4])
project_data["voiture"] = pd.to_numeric(new[5])
project_data["velo"] = pd.to_numeric(new[6])
project_data["commun"] = pd.to_numeric(new[7])
project_data["revenu"] = pd.to_numeric(new[8])
# Eliminando columnas antiguas
project_data.drop(
columns=["num;age;sexe;sitfam;principal;voiture;velo;commun;revenu"], inplace=True
)
project_data.columns
# Comprobamos nuevamente el orden de las columnas
project_data.head()
# Revisamos las estadísticas de las variables numéricas con el método de descripción.
# Si usamos la transposición del conjunto de datos, puede ver mejor las estadísticas.
project_data.describe()
# usaré el método de información para obtener más información sobre el conjunto de datos.
project_data.info()
# devuelve el número de valores NaN en todas las columnas
project_data.isna().sum()
### Preprocesamiento de datos:
# La función sklearn.preprocessing.LabelEncoder codifica etiquetas de una característica categórica
# en valores numéricos entre 0 y el número de clases menos 1. Una vez instanciado, el método fit lo
# entrena (creando el mapeado entre las etiquetas y los números) y el método transform transforma
# las etiquetas que se incluyan como argumento en los números correspondientes.
# El método fit_transform realiza ambas acciones simultáneamente.
encoder = LabelEncoder()
project_data["sexe"] = encoder.fit_transform(project_data["sexe"])
# so este método para completar los valores faltantes por promedio de una columna.
project_data = project_data.apply(lambda x: x.fillna(x.median()), axis=0)
# ## Análisis univariado y bivariado
# Descripción: La grafica muestra la densidad en el eje y la edad en el eje x según de la variable común Si el individuo toma el transporte público o no
plt.figure(figsize=(12, 8))
sns.displot(data=project_data, x="age", hue="commun", kind="kde", fill=True)
# Descripción: Toma de transporte según el sexo
plt.figure(figsize=(12, 8))
sns.displot(data=project_data, x="sexe", hue="commun", kind="kde", fill=True)
# Descripción: Toma de transporte según el nivel de ingresos, cuando aumenta la variable ingresos, tanto los ciudadanos usan más el sistema de transporte.
plt.figure(figsize=(12, 8))
sns.displot(data=project_data, x="revenu", hue="commun", kind="kde", fill=True)
# grafica principales medios de transporte
plt.figure(figsize=(12, 8))
sns.displot(data=project_data, x="principal", hue="commun", kind="kde", fill=True)
plt.figure(figsize=(12, 8))
chart = sns.distplot(project_data["commun"], fit=norm, kde=False, color="y")
plt.figure(figsize=(12, 8))
chart = sns.distplot(project_data["age"], fit=norm, kde=False, color="y")
plt.figure(figsize=(12, 8))
chart = sns.distplot(project_data["revenu"], fit=norm, kde=False, color="y")
plt.figure(figsize=(12, 8))
data_new = project_data.drop(columns=["num"])
corr = data_new.corr()
sns.heatmap(corr, annot=True, cmap="YlGnBu")
# ## Data Preparation and Model Creation
X_train = project_data.drop(columns=["commun", "num"])
y_train = project_data["commun"]
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2)
print("Feature Training set shape: ", X_train.shape)
print("Target Training set shape: ", y_train.shape)
numeric_columns = X_train.select_dtypes(exclude="object").columns
print(numeric_columns)
print("*" * 100)
categorical_columns = X_train.select_dtypes(include="object").columns
print(categorical_columns)
numeric_features = Pipeline(
[
("handlingmissingvalues", SimpleImputer(strategy="median")),
("scaling", StandardScaler(with_mean=True)),
]
)
print(numeric_features)
print("*" * 100)
categorical_features = Pipeline(
[
("handlingmissingvalues", SimpleImputer(strategy="most_frequent")),
("encoding", OneHotEncoder()),
("scaling", StandardScaler(with_mean=False)),
]
)
print(categorical_features)
processing = ColumnTransformer(
[
("numeric", numeric_features, numeric_columns),
("categorical", categorical_features, categorical_columns),
]
)
processing
def prepare_model(algorithm):
model = Pipeline(
steps=[
("processing", processing),
("pca", TruncatedSVD(n_components=3, random_state=12)),
("modeling", algorithm),
]
)
model.fit(X_train, y_train)
return model
def prepare_confusion_matrix(algo, model):
print(algo)
pred = model.predict(X_test)
cm = confusion_matrix(y_test, pred)
ax = plt.subplot()
sns.heatmap(cm, annot=True, fmt="g", ax=ax)
plt.show()
# labels, title and ticks
ax.set_xlabel("Predicted labels")
ax.set_ylabel("True labels")
ax.set_title("Confusion Matrix")
def prepare_classification_report(algo, model):
print(algo + " Report :")
pred = model.predict(X_test)
print(classification_report(y_test, pred))
def prepare_roc_curve(algo, model):
print(algo)
y_pred_proba = model.predict_proba(X_test)[::, 1]
fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba)
roc_auc = auc(fpr, tpr)
curve = RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc)
curve.plot()
plt.show()
algorithms = [
("logistic regression", LogisticRegression()),
("KNN classifier", KNeighborsClassifier()),
("Random Forest calssifier", RandomForestClassifier()),
("Adaboost classifier", AdaBoostClassifier()),
("Gradientboot classifier", GradientBoostingClassifier()),
("Naive-bayes Classifier", GaussianNB()),
("Support Vector Classifier", SVC(probability=True)),
]
trained_models = []
model_and_score = {}
for index, tup in enumerate(algorithms):
model = prepare_model(tup[1])
model_and_score[tup[0]] = str(model.score(X_train, y_train) * 100) + "%"
trained_models.append((tup[0], model))
# ## Model Evaluation
# vistazo al rendimiento del modelo. Para hacer esto, voy a usar el coeficiente de determinación.
# Cuanto más se acerque este valor a 1, mejor será el modelo.
print(model_and_score)
for index, tup in enumerate(trained_models):
prepare_confusion_matrix(tup[0], tup[1])
for index, tup in enumerate(trained_models):
prepare_classification_report(tup[0], tup[1])
print("\n")
for index, tup in enumerate(trained_models):
prepare_roc_curve(tup[0], tup[1])
|
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization of any data
import seaborn as sns
import matplotlib.pyplot as plt
# First we need to connect the data to Python with the help of Pandas
train_df = pd.read_csv("/kaggle/input/titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
# Let see the data into each columns
train_df.head()
# lets check the info of both data sets
train_df.info()
print("-" * 40)
test_df.info()
# lets Check the central tendancy and measure of dispersion on the train data sets
train_df.describe()
# Following findings we can get from above data:-
# * total sample has 891 rows of data.
# * Survived column has values either 0 or 1.
# * About 38% of the samples in the training dataset survived, which is representative of the actual survival rate of 32%.
# * Nearly 30% of the passengers had siblings and/or a spouse on board.
# * Fares exhibited significant variation, with a small percentage of passengers (<1%) paying as high as $512.
# * The percentage of elderly passengers (aged 65-80) was less than 1%
# lets check for non numeric values
train_df.describe(include=["O"])
# Following findings we can get from above data:-
# * There are 891 unique names in the dataset, meaning that no names are repeated.
# * Among the passengers, 65% are male, with "male" being the most common value appearing 577 times out of 891.
# * The "Ticket" feature has a high ratio of duplicate values, with 22% of the tickets being duplicates out of 681 unique tickets in total.
# * The "Cabin" values have duplicates, indicating that some passengers shared a cabin or there are cabins with the same value.
# * The "Embarked" variable has three possible values, with "S" being the most common value among the passengers.
# **Lets start checking the corelation between the survival factors and all columns by Pivoting**
train_df[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
train_df[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
train_df[["SibSp", "Survived"]].groupby(["SibSp"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
train_df[["Parch", "Survived"]].groupby(["Parch"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
# Following findings we can get from above data:-
# * P-Class:- its stating a significant relation between the survival rate and P-Class rank
# * Sex:- It showing that women has higher survival rate than the man So if there is a female in ship then she has 74 % chances of survival
# * Sibsp & Parch:- It showing a less correlation with some of the values for survival rate.
# let check on the visualization
sns.FacetGrid(train_df, col="Survived").map(sns.histplot, "Age", bins=10)
# Following findings we can get from above data:-
# * Majority of people are in the 15-40 age range and most of them are not survived.
# * Highest Age of person on to the ship is 80 as per the data.
# * So in predictive model, Age will be a good factor.
grid = sns.FacetGrid(train_df, col="Survived", row="Pclass").map(
plt.hist, "Age", alpha=0.5, bins=20
)
grid.add_legend()
# Following findings we can get from above data:-
# * Pclass 3 had most of the passengers but most of them did not survive
# * At overall Pclass 2 & 3, mostly survived
# * In Pclass 1, Mostly people survived and has the highest survival rate also.
sns.catplot(
x="Pclass", y="Survived", hue="Sex", data=train_df, kind="bar", palette="pastel"
)
plt.xlabel("Passenger Class")
plt.ylabel("Survival Rate")
plt.title("Survival Rate by Passenger Class and Gender")
pd.crosstab(
[train_df.Embarked, train_df.Pclass],
[train_df.Sex, train_df.Survived],
margins=True,
).style.background_gradient(cmap="YlGnBu")
# Following findings we can get from above data:-
# * Embarked C had the highest rate of survival than S & Q
# * we can easily infer that survival for Women from Pclass1 is about 95-96%, as only 3 out of 94 Women from Pclass1 died
# lets extract the salutations from passenger name
train_df["Initial"] = 0
for i in train_df:
train_df["Initial"] = train_df.Name.str.extract("([A-Za-z]+)\.")
# check for type of salutation
pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap="YlGnBu")
# replace the salutation with the proper salutation
train_df["Initial"].replace(
[
"Mlle",
"Mme",
"Ms",
"Dr",
"Major",
"Lady",
"Countess",
"Jonkheer",
"Col",
"Rev",
"Capt",
"Sir",
"Don",
],
[
"Miss",
"Miss",
"Miss",
"Mr",
"Mr",
"Mrs",
"Mrs",
"Other",
"Other",
"Other",
"Mr",
"Mr",
"Mr",
],
inplace=True,
)
# lets check the average age by Initials
train_df.groupby("Initial")["Age"].mean()
# Assigning the NaN Values with the Ceil values of the mean ages
train_df.loc[(train_df.Age.isnull()) & (train_df.Initial == "Mr"), "Age"] = 33
train_df.loc[(train_df.Age.isnull()) & (train_df.Initial == "Mrs"), "Age"] = 36
train_df.loc[(train_df.Age.isnull()) & (train_df.Initial == "Master"), "Age"] = 5
train_df.loc[(train_df.Age.isnull()) & (train_df.Initial == "Miss"), "Age"] = 22
train_df.loc[(train_df.Age.isnull()) & (train_df.Initial == "Other"), "Age"] = 46
# check if age columns do have any nulls or not.
train_df.Age.isnull().any()
|
# # Introduction
# In today's retail industry, accurate forecasting is crucial for success. Retailers need to be able to predict demand for their products in order to maintain the right inventory levels and avoid overstocking or stockouts. Traditional forecasting methods are often subjective and lack the necessary data to make accurate predictions. However, with the advancements in machine learning, retailers now have access to sophisticated forecasting models that can analyze large amounts of data and make more accurate predictions. In this way, machine learning can help retailers optimize their inventory management, reduce waste, and improve customer satisfaction. This is particularly important in the grocery industry, where perishable goods and unpredictable product demand require retailers to be particularly strategic with their inventory management.
# # Import Modules
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
import matplotlib.pyplot as plt
from pathlib import Path
plt.rcParams["figure.figsize"] = (12, 6)
plt.style.use("fivethirtyeight")
import warnings
warnings.filterwarnings("ignore")
# # Load the Datasets
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Load the data
comp_dir = Path("../input/store-sales-time-series-forecasting")
store_sales = pd.read_csv(
comp_dir / "train.csv",
usecols=["store_nbr", "family", "date", "sales", "onpromotion"],
dtype={
"store_nbr": "category",
"family": "category",
"sales": "float32",
"onpromotion": "uint32",
},
parse_dates=["date"],
infer_datetime_format=True,
)
store_sales["date"] = store_sales.date.dt.to_period("D")
store_sales = store_sales.set_index(["store_nbr", "family", "date"]).sort_index()
# # EDA
# Perform EDA
family_sales = (
store_sales.groupby(["family", "date"]).mean().unstack("family").loc["2017"]
)
family_sales.plot(figsize=(12, 8), title="Average Sales by Family in 2017")
plt.xlabel("Date")
plt.ylabel("Average Sales")
plt.show()
import plotly.graph_objs as go
# Load predictions
predictions = pd.read_csv(
"predictions.csv", parse_dates=["date"], index_col=["store_nbr", "family", "date"]
)
# Create line chart
fig = go.Figure()
for family in predictions.index.get_level_values("family").unique():
data = predictions.loc[(slice(None), family), "sales"]
fig.add_trace(go.Scatter(x=data.index, y=data, name=family))
fig.update_layout(
title="Predicted Sales by Family",
xaxis_title="Date",
yaxis_title="Sales",
legend_title="Family",
)
fig.show()
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ## **Import data and required libraries**
# ----main libraries----
import numpy as np
import pandas as pd
# ----plotting libraries----
import seaborn as sns
import matplotlib as plt
# ----model and support imports----
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import (
confusion_matrix,
classification_report,
accuracy_score,
recall_score,
precision_score,
f1_score,
roc_auc_score,
)
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
# Load data
tel_cus_df = pd.read_csv(
"/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv"
)
# ## **Explore Data**
tel_cus_df.head()
tel_cus_df.info()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
from sklearn.preprocessing import (
LabelEncoder,
OneHotEncoder,
OrdinalEncoder,
MinMaxScaler,
)
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import seaborn as sns
import matplotlib.pyplot as plt
# train = "/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
# test = "/kaggle/input/house-prices-advanced-regression-techniques/test.csv"
test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv")
train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
print(train.shape)
print(test.shape)
train.select_dtypes(include=object).head(20)
missing_fraction = train.isnull().mean()
tmissing_fraction = test.isnull().mean()
drop_columns = missing_fraction[missing_fraction > 0.3].index
tdrop_columns = tmissing_fraction[tmissing_fraction > 0.3].index
train = train.drop(drop_columns, axis=1)
test = test.drop(tdrop_columns, axis=1)
print(tdrop_columns)
train.dropna(
subset=[
"GarageQual",
"GarageCond",
"MasVnrType",
"BsmtQual",
"BsmtCond",
"BsmtCond",
"GarageFinish",
],
inplace=True,
)
train.fillna(train["LotFrontage"].mean(), inplace=True)
test.fillna(test["LotFrontage"].mean(), inplace=True)
f, axes = plt.subplots(ncols=1, figsize=(15, 15))
sns.heatmap(train.corr(), annot=True, fmt=".1f")
k = 10 # number of variables for heatmap
cols = train.corr().nlargest(k, "SalePrice")["SalePrice"].index
sns.heatmap(train[cols].corr(), annot=True, fmt=".2f")
train["SalePrice"] = StandardScaler().fit_transform(train[["SalePrice"]])
cols = (
"MSSubClass",
"MSZoning",
"Street",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"OverallCond",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"KitchenQual",
"Functional",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"PavedDrive",
"SaleType",
"SaleCondition",
)
# process columns, apply LabelEncoder to categorical features
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(train[c].values))
train[c] = lbl.transform(list(train[c].values))
# shape
print("Shape all_data: {}".format(train.shape))
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(test[c].values))
test[c] = lbl.transform(list(test[c].values))
train.head()
X = train.drop("SalePrice", axis=1)
y = train["SalePrice"]
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn import datasets, svm
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
Forest = RandomForestClassifier()
Forest.fit(X_train, y_train)
score_forest = Forest.score(X_test, y_test)
print(score_forest)
|
# Ideas:
# - Geospatial Analysis
# - CLV
# - RFM or Clustering
# - Sales Prediction
# - Delivery Performance
# - Product Quality
# **olist_customers_dataset**
# 1. `customer_unqinue_id`: unique identifier of a customer.⭐
# 2. `customer_id`: key to the orders dataset. Each order has a unique customer_id.⭐
# 3. `customer_zip_code_prefix`: first five digits of customer zip code.⭐
# 4. `customer_city`: customer city name.
# 5. `customer_state`: customer state.
# **olist_geolocation_dataset**
# 1. `geolocation_zip_code_prefix`: first 5 digits of zip code.⭐
# 2. `geolocation_lat`: latitude.
# 3. `geolocation_lng`: longitude.
# 4. `geolocation_city`: city name.
# 5. `geolocation_state`: state name
# **olist_products_dataset**
# 1. `product_id`: unique product identifier.⭐
# 2. `product_category_name`: root category of product, in Portuguese.
# 3. `product_name_lenght`: number of characters extracted from the product name.
# 4. `product_description_lenght`: number of characters extracted from the product description.
# 5. `product_photos_qty`: number of product published photos.
# 6. `product_weight_g`: product weight measured in grams.
# 7. `product_length_cm`: product length measured in centimeters.
# 8. `product_height_cm`: product height measured in centimeters.
# **olist_sellers_dataset**
# 1. `seller_id`: seller unique identifier.⭐
# 2. `seller_zip_code_prefix`: first 5 digits of seller zip code.⭐
# 3. `seller_city`: seller city name.
# 4. `seller_state`: seller state.
# **olist_orders_dataset**
# 1. `order_id`: unique identifier of the order.⭐
# 2. `customer_id`: key to the customer dataset. Each order has a unique customer_id.
# 3. `order_status`: Reference to the order status (delivered, shipped, etc).
# 4. `order_purchase_timestamp`: Shows the purchase timestamp.
# 5. `order_approved_at`: Shows the payment approval timestamp.
# 6. `order_delivered_carrier_date`: Shows the order posting timestamp. When it was handled to the logistic partner.
# 7. `order_delivered_customer_date`: Shows the actual order delivery date to the customer.
# 8. `order_estimated_delivery_date`: Shows the estimated delivery date that was informed to customer at the purchase moment.
# **olist_order_items_dataset**
# 1. `order_id`: order unique identifier.⭐
# 2. `order_item_id`: sequential number identifying number of items included in the same order.⭐
# 3. `product_id`: product unique identifier.⭐
# 4. `seller_id`: seller unique identifier.⭐
# 5. `shipping_limit_date`: Shows the seller shipping limit date for handling the order over to the logistic partner.
# 6. `price`: item price
# 7. `freight_value`: item freight value item (if an order has more than one item the freight value is splitted between items)
# **olist_order_payments_dataset**
# 1. `order_id`: unique identifier of an order.⭐
# 2. `payment_sequential`: a customer may pay an order with more than one payment method. If he does so, a sequence will be created to accommodate all payments.
# 3. `payment_type`: method of payment chosen by the customer.
# 4. `payment_installments`: number of installments chosen by the customer.
# 5. `payment_value`: transaction value.
# **olist_order_reviews_dataset**
# 1. `review_id`: unique review identifier.⭐
# 2. `order_id`: unique order identifier.⭐
# 3. `review_score`: Note ranging from 1 to 5 given by the customer on a satisfaction survey.
# 4. `review_comment_title`: Comment title from the review left by the customer, in Portuguese.
# 5. `review_comment_message`: Comment message from the review left by the customer, in Portuguese.
# 6. `review_creation_date`: Shows the date in which the satisfaction survey was sent to the customer.
# 7. `review_answer_timestamp`: Shows satisfaction survey answer timestamp.
# **product_category_name_translation**
# 1. `product_category_name`: category name in Portuguese⭐
# 2. `product_category_name_english`: category name in English
# Load Package and Load Data
import os
import numpy as np
import pandas as pd
from scipy import stats
import json
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_selection import SelectKBest, SelectPercentile, mutual_info_classif
from imblearn.over_sampling import RandomOverSampler
from xgboost import XGBClassifier
customers = pd.read_csv(
"../input/brazilian-ecommerce/olist_customers_dataset.csv",
dtype={"customer_zip_code_prefix": str},
)
geolocation = pd.read_csv(
"../input/brazilian-ecommerce/olist_geolocation_dataset.csv",
dtype={"geolocation_zip_code_prefix": str},
)
order_items = pd.read_csv("../input/brazilian-ecommerce/olist_order_items_dataset.csv")
order_payments = pd.read_csv(
"../input/brazilian-ecommerce/olist_order_payments_dataset.csv"
)
order_reviews = pd.read_csv(
"../input/brazilian-ecommerce/olist_order_reviews_dataset.csv"
)
orders = pd.read_csv("../input/brazilian-ecommerce/olist_orders_dataset.csv")
products = pd.read_csv("../input/brazilian-ecommerce/olist_products_dataset.csv")
sellers = pd.read_csv(
"../input/brazilian-ecommerce/olist_sellers_dataset.csv",
dtype={"seller_zip_code_prefix": str},
)
product_category_name_translation = pd.read_csv(
"../input/brazilian-ecommerce/product_category_name_translation.csv"
)
geojson = json.load(open("/kaggle/input/brazil-geojson/brazil_geo.json"))
orders.order_delivered_customer_date = pd.to_datetime(
orders.order_delivered_customer_date
)
orders.order_approved_at = pd.to_datetime(orders.order_approved_at)
orders.order_delivered_carrier_date = pd.to_datetime(
orders.order_delivered_carrier_date
)
orders.order_estimated_delivery_date = pd.to_datetime(
orders.order_estimated_delivery_date
)
orders.order_delivered_customer_date = pd.to_datetime(
orders.order_delivered_customer_date
)
order_items.shipping_limit_date = pd.to_datetime(order_items.shipping_limit_date)
order_reviews.review_answer_timestamp = pd.to_datetime(
order_reviews.review_answer_timestamp
)
order_reviews.review_creation_date = pd.to_datetime(order_reviews.review_creation_date)
df = pd.merge(orders, order_items, on="order_id", how="outer")
df = pd.merge(df, order_payments, on="order_id", how="outer")
df = pd.merge(df, order_reviews, on="order_id", how="outer")
df = pd.merge(df, products, on="product_id", how="outer")
df = pd.merge(df, customers, on="customer_id", how="outer")
df = pd.merge(df, sellers, on="seller_id", how="outer")
df = pd.merge(
df, product_category_name_translation, on="product_category_name", how="outer"
)
# 1. Product Analysis
product_sale = (
df.groupby("product_category_name_english")["price"]
.sum()
.sort_values(ascending=False)
.to_frame()
)
fig = px.bar(product_sale, y="price", hover_name=product_sale.index)
fig.update_traces(marker=dict(color="#a27e7e"))
fig.update_layout(height=800, width=1800)
fig.show()
product_score = (
df.groupby("product_category_name_english")["review_score"]
.mean()
.sort_values(ascending=True)
.to_frame()
)
fig = px.bar(product_score, y="review_score", hover_name=product_score.index)
fig.update_traces(marker=dict(color="#a27e7e"))
fig.update_layout(height=800, width=1800)
fig.show()
# 5. Geospatial Analysis
# Prepare Data for Geopatial Analysis
gp = df.groupby("customer_state")["price"].sum().to_frame()
gf = df.groupby("customer_state")["freight_value"].sum().to_frame()
tmp = df[
["customer_state", "order_delivered_customer_date", "order_delivered_carrier_date"]
]
tmp["delivery_time"] = (
df["order_delivered_customer_date"] - df["order_delivered_carrier_date"]
).dt.days.to_frame()
gdt = tmp.groupby("customer_state")["delivery_time"].mean().to_frame()
rs = df.groupby("customer_state")["review_score"].mean().to_frame()
sc = df.groupby("seller_state")["seller_id"].count().to_frame()
sc = sc.rename(columns={"seller_id": "seller_count"})
cc = df.groupby("customer_state")["customer_id"].count().to_frame()
cc = cc.rename(columns={"customer_id": "customer_count"})
order_status = (
df.groupby("customer_state")["order_status"].value_counts()
/ df.groupby("customer_state")["order_status"].count()
)
order_status = order_status.unstack().fillna(0)["delivered"].to_frame()
tmp = df[
["customer_state", "order_delivered_customer_date", "order_estimated_delivery_date"]
]
tmp["is_delayed"] = (
df["order_delivered_customer_date"] > df["order_estimated_delivery_date"]
)
is_delayed = (
tmp.groupby("customer_state")["is_delayed"].sum()
/ tmp.groupby("customer_state")["is_delayed"].count()
)
geo_analysis = pd.merge(gp, gf, left_index=True, right_index=True, how="left")
geo_analysis = pd.merge(
geo_analysis, gdt, left_index=True, right_index=True, how="left"
)
geo_analysis = pd.merge(geo_analysis, rs, left_index=True, right_index=True, how="left")
geo_analysis["freight_value_ratio"] = (
geo_analysis["freight_value"] / geo_analysis["price"]
)
geo_analysis = pd.merge(geo_analysis, sc, left_index=True, right_index=True, how="left")
geo_analysis = pd.merge(geo_analysis, cc, left_index=True, right_index=True, how="left")
geo_analysis["avg_price"] = geo_analysis["price"] / geo_analysis["customer_count"]
geo_analysis = pd.merge(
geo_analysis, order_status, left_index=True, right_index=True, how="left"
)
geo_analysis = pd.merge(
geo_analysis, is_delayed, left_index=True, right_index=True, how="left"
)
geo_analysis
def plot_state_choropleth_map(
data_frame,
locations,
geojson,
color,
color_continuous_scale="Reds",
height=800,
width=900,
):
fig = px.choropleth(
data_frame=data_frame,
locations=locations,
geojson=geojson,
color=color,
color_continuous_scale=color_continuous_scale,
scope="south america",
)
fig.update_layout(
height=height,
width=width,
title=f"<b>Brazilian E-Commerce Geolocation for {color}</b>",
geo=dict(lonaxis=dict(range=[-33.0, -80.0]), lataxis=dict(range=[5.0, -35.0])),
)
fig.show()
plot_state_choropleth_map(
data_frame=geo_analysis,
locations=geo_analysis.index,
geojson=geojson,
color="delivered",
)
plot_state_choropleth_map(
data_frame=geo_analysis,
locations=geo_analysis.index,
geojson=geojson,
color="customer_count",
)
plot_state_choropleth_map(
data_frame=geo_analysis,
locations=geo_analysis.index,
geojson=geojson,
color="avg_price",
)
# There is higher Average transaction value for Remote areas than central areas!!!
# Most of the revenue came from the Southeast and South regions of Brazil.
plot_state_choropleth_map(
data_frame=geo_analysis,
locations=geo_analysis.index,
geojson=geojson,
color="freight_value_ratio",
)
plot_state_choropleth_map(
data_frame=geo_analysis,
locations=geo_analysis.index,
geojson=geojson,
color="delivery_time",
)
plot_state_choropleth_map(
data_frame=geo_analysis,
locations=geo_analysis.index,
geojson=geojson,
color="review_score",
)
plot_state_choropleth_map(
data_frame=geo_analysis,
locations=geo_analysis.index,
geojson=geojson,
color="seller_count",
)
plot_state_choropleth_map(
data_frame=geo_analysis,
locations=geo_analysis.index,
geojson=geojson,
color="is_delayed",
)
|
# # Method to flatten the papyrus surface to have better training and testing data
# I looked at the fragments and noticed they are not completely flat.
# Here I want to show a way to flatten it more.
# My hope is that this will lead to similar data on similar layers.
# If similar data is on similar layers it should be easier to train a model on it.
# Also, we can remove layers if most relevant data is on the same layer and by this reduce memory footprint.
# Let's have a look at the current data.
#
import gc
from scipy.ndimage import gaussian_filter
from scipy import ndimage
import numpy as np
import glob
import PIL.Image as Image
import matplotlib.pyplot as plt
from tqdm import tqdm
from os import path
SAVE_IMAGE_STACK = True
PATH_TO_FRAGMENT = (
"/kaggle/input/vesuvius-challenge-ink-detection/train/1/surface_volume/"
)
WORK_DIR = "/kaggle/working/"
# Load images to numpy array. If you want you can save numpy array to load faster later.
image_stack = None
if SAVE_IMAGE_STACK:
images = [
np.array(Image.open(filename), dtype=np.float32) / 65535.0
for filename in tqdm(sorted(glob.glob(PATH_TO_FRAGMENT + "*.tif")))
]
image_stack = np.stack(images)
del images
gc.collect()
with open(path.join(WORK_DIR, "image_stack.npy"), "wb") as f:
np.save(f, image_stack, allow_pickle=True)
else:
with open(path.join(WORK_DIR, "image_stack.npy"), "rb") as f:
image_stack = np.load(f)
image_stack = np.flip(
image_stack, axis=0
) # turn data upside down so papyrus is right side up
# We have seen the surface volumes from the top. Let's have a look from the side.
slice_at = 4000
slice_length = slice(2000, 2500)
plt.imshow(image_stack[:, slice_length, slice_at], cmap="gray")
plt.show()
# The papyrus is the noisy stuff between layer 20 and 64. We can see that the surface of the papyrus is not completely flat. So the ink is not on the same layer all the time.
# Let's change that. Because my way takes a lot of memory we will first try on a smaller portion of the data. Then I will show a way to do it more memory friendly.
image_stack = image_stack[:, 1000:6000, 1000:4000] # smaller portion
image_stack_ = gaussian_filter(image_stack, sigma=1) # blur data a little bit
image_stack_ = ndimage.sobel(image_stack_, axis=0) # detect edges in top-down direction
image_stack_ = gaussian_filter(image_stack_, sigma=1) # blur again
# Now for every pixel on the 2d-plane we find the first depth-index where a value is >=0.5. The 0.5 was chosen by trying out.
filtered_stack = np.where(image_stack_ >= 0.5, 1, 0)
topographic_map = np.argmax(filtered_stack, axis=0)
# Let's have a look at the data
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1)
slice_at = 500
slice_length = slice(2000, 2500)
ax1.imshow(image_stack[:, slice_length, slice_at], cmap="gray")
ax2.imshow(image_stack_[:, slice_length, slice_at], cmap="gray")
ax3.imshow(filtered_stack[:, slice_length, slice_at], cmap="gray")
ax4.plot(topographic_map[slice_length, slice_at] * -1)
plt.show()
# Looks like the topograhic_map is accurate enough for our slice. There is one missing data_point at ~230 but the papyrus seems to be broken there.
# If we look at the whole topograhic_map we can clearly see the surface of the papyrus.
plt.imshow(topographic_map, cmap="gray")
# Now we can use the topographic map to flatten the data.
z_buffer = 5 # how much air we will leave above the papyrus
is_idx = np.indices(image_stack.shape)
image_stack_flattened = image_stack[
(is_idx[0] + topographic_map - z_buffer) % image_stack.shape[0],
is_idx[1],
is_idx[2],
]
# Let's have a look at the flattened data. We can see the surface is even, but we can also see that the layers under it are warped. So we probably should combine this in training with other data to not lose information. I will leave that to you for now.
plt.imshow(image_stack_flattened[:, slice_length, slice_at], cmap="gray")
# Looks very promising to me. Let's put it in a function we can use.
def flatten(arr, z_buffer, z_layers):
"""
:param arr: numpy array with the surface_volume_data
:param z_buffer: how much air we will leave above the papyrus
:param z_layers: how much layers we want to keep after transsforming
:return:
"""
arr = np.flip(arr, axis=0)
arr = gaussian_filter(arr, sigma=1)
arr = ndimage.sobel(arr, axis=0)
arr = gaussian_filter(arr, sigma=1)
topo = np.argmax(np.where(arr < 0.5, 0, 1), axis=0)
arr_idx = np.indices(arr.shape)
arr = arr[(arr_idx[0] + topo - z_buffer) % arr.shape[0], arr_idx[1], arr_idx[2]]
arr = arr[0:z_layers]
return arr
# If you don't have enough memory to flatten the whole image_stack in one go, here is a way to do it:
# Load from disk. We saved it earlier
with open(path.join(PATH_TO_FRAGMENT, "image_stack.npy"), "rb") as f:
image_stack = np.load(f)
def do_in_stripes(arr, stripe_width, stripe_overlay, file_path, func, *args, **kwargs):
"""
:param arr: array to work on
:param stripe_width: width of stripes in pixels
:param stripe_overlay: overlay between stripes in pixels so we don't have artefacts at the borders
:param file_path: A file path to temporarily save data to
:param func: The function we want to apply to our array
:param args: args for the func
:param kwargs: kwargs for the func
:return: The number of stripes saved, so we can load them easily
"""
sc = 0
with open(file_path, "wb") as f:
for i in tqdm(range(0, arr.shape[1], stripe_width)):
sc = sc + 1
start = max(i - stripe_overlay, 0)
end = min(i + stripe_width + stripe_overlay, arr.shape[1])
stripe = func(arr[:, start:end, :], *args, **kwargs)
if end == arr.shape[1]:
np.save(f, stripe[:, stripe_overlay : stripe_width + stripe_overlay, :])
break
elif i == 0:
np.save(f, stripe[:, 0:stripe_width, :])
else:
np.save(f, stripe[:, stripe_overlay : stripe_width + stripe_overlay, :])
return sc
stripe_count = do_in_stripes(
image_stack, 500, 20, path.join(WORK_DIR, "stripes"), flatten, 3, 15
) # 3 is z_buffer and 15 is z_layers for flatten function
del image_stack
with open(path.join(WORK_DIR, "stripes"), "rb") as f:
stripes = []
for i in range(0, stripe_count):
stripes.append(np.load(f))
image_stack_flattened = np.concatenate(stripes, axis=1)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import seaborn as sb
import os
import cv2
import numpy as np
from tqdm import tqdm
import torch
import torch.utils as utils
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
print(f"# Using device: {device}")
class dehazer:
def __init__(self, IMG_SIZE, LABEL_DIR, LABEL_NAME):
self.IMG_SIZE = IMG_SIZE
self.LABEL_DIR = LABEL_DIR
self.LABEL_NAME = LABEL_NAME
self.training_data = []
def make_training_data(self):
NUM_IMAGES = len(os.listdir(self.LABEL_DIR))
for f in tqdm(range(1, NUM_IMAGES + 1)):
f = "{:02d}".format(f) + "_" + self.LABEL_NAME + ".png"
path = os.path.join(self.LABEL_DIR, f)
img = cv2.imread(path)
REBUILD_DATA = True
IMG_SIZE = 256
gt_dir = "/kaggle/input/gttrain"
hazy_dir = "/kaggle/input/hazytrain"
if REBUILD_DATA:
dehazing = dehazer(IMG_SIZE, gt_dir, "GT")
dehazing.make_training_data()
dehazing = dehazer(IMG_SIZE, hazy_dir, "hazy")
dehazing.make_training_data()
from PIL import Image
import os, sys
import cv2
import numpy as np
# Path to image directory
path1 = "/kaggle/input/gttrain/"
dirs1 = os.listdir(path1)
dirs1.sort()
# print(dirs1)
x1_train = []
# Append images to a list
for item in dirs1:
# if os.path.isfile(path1+item):
# print(path1+item)
im = Image.open(path1 + item).convert("RGB")
im = np.array(im)
x1_train.append(im)
# print(x1_train)
# print(x1_train)
path2 = "/kaggle/input/hazytrain/"
dirs2 = os.listdir(path2)
dirs2.sort()
# print(dirs1)
x2_train = []
# Append images to a list
for item in dirs2:
# if os.path.isfile(path1+item):
# print(path1+item)
im = Image.open(path2 + item).convert("RGB")
im = np.array(im)
x2_train.append(im)
# print(x1_train)
# print(x1_train)
imgset = np.array(x1_train)
np.save("gt.npy", imgset)
imgset1 = np.array(x2_train)
np.save("hazy.npy", imgset1)
patch = np.load("gt.npy", allow_pickle=True)
mask = np.load("hazy.npy", allow_pickle=True)
len(patch), len(mask)
print(patch.shape)
for i in range(0, len(patch), 5):
fig = plt.figure(figsize=(15, 5))
ax = plt.subplot(131)
plt.imshow(mask[i])
ax = plt.subplot(132)
plt.imshow(patch[i])
plt.show()
IMG_SIZE = 256
EPOCHS = 100
batch_size = 1
learning_rate = 0.0005
patch_loader = torch.utils.data.DataLoader(
dataset=patch, batch_size=batch_size, shuffle=False
)
for data in patch_loader:
print(data.size())
print(type(data))
break
X_orig = torch.Tensor([patch[i] for i in range(len(patch))])
X_hazy = torch.Tensor([mask[i] for i in range(len(mask))])
X_orig = X_orig / 255
X_hazy = X_hazy / 255
print("X_orig: ", X_orig.size())
X_orig_T = np.transpose(X_orig, (0, 3, 1, 2))
X_hazy_T = np.transpose(X_hazy, (0, 3, 1, 2))
print("X_orig_T: ", X_orig_T.shape)
X_orig_flat = X_orig_T.reshape(-1, 1, IMG_SIZE, IMG_SIZE)
X_hazy_flat = X_hazy_T.reshape(-1, 1, IMG_SIZE, IMG_SIZE)
print("X_orig_flat: ", X_orig_flat.shape)
# # test images
#
REBUILD_DATA = True
IMG_SIZE = 256
gt_dir_test = "/kaggle/input/gttest"
hazy_dir_test = "/kaggle/input/hazytest"
if REBUILD_DATA:
dehazing = dehazer(IMG_SIZE, gt_dir_test, "GT_test")
dehazing.make_training_data()
dehazing = dehazer(IMG_SIZE, hazy_dir_test, "hazy_test")
dehazing.make_training_data()
from PIL import Image
import os, sys
import cv2
import numpy as np
# Path to image directory
path1_t = "/kaggle/input/gttest/"
dirs1_t = os.listdir(path1_t)
dirs1_t.sort()
# print(dirs1)
x1_test = []
# Append images to a list
for item in dirs1_t:
# if os.path.isfile(path1+item):
# print(path1+item)
im = Image.open(path1_t + item).convert("RGB")
im = np.array(im)
x1_test.append(im)
# print(x1_train)
# print(x1_train)
path2_t = "/kaggle/input/hazytest/"
dirs2_t = os.listdir(path2_t)
dirs2_t.sort()
# print(dirs1)
x2_test = []
# Append images to a list
for item in dirs2_t:
# if os.path.isfile(path1+item):
# print(path1+item)
im = Image.open(path2_t + item).convert("RGB")
im = np.array(im)
x2_test.append(im)
# print(x1_train)
# print(x1_train)
imgset_t = np.array(x1_test)
np.save("gt_test.npy", imgset_t)
imgset1_t = np.array(x2_test)
np.save("hazy_test.npy", imgset1_t)
patch_test = np.load("gt_test.npy", allow_pickle=True)
mask_test = np.load("hazy_test.npy", allow_pickle=True)
len(patch_test), len(mask_test)
print(patch_test.shape)
for i in range(0, len(patch_test)):
fig = plt.figure(figsize=(15, 5))
ax = plt.subplot(131)
plt.imshow(mask_test[i])
ax = plt.subplot(132)
plt.imshow(patch_test[i])
plt.show()
IMG_SIZE = 256
EPOCHS = 100
batch_size = 1
learning_rate = 0.0005
patch_loader_test = torch.utils.data.DataLoader(
dataset=patch_test, batch_size=batch_size, shuffle=False
)
for data in patch_loader_test:
print(data.size())
print(type(data))
break
X_orig_test = torch.Tensor([patch_test[i] for i in range(len(patch_test))])
X_hazy_test = torch.Tensor([mask_test[i] for i in range(len(mask_test))])
X_orig_test = X_orig_test / 255
X_hazy_test = X_hazy_test / 255
print("X_orig_test: ", X_orig_test.size())
X_orig_T_test = np.transpose(X_orig_test, (0, 3, 1, 2))
X_hazy_T_test = np.transpose(X_hazy_test, (0, 3, 1, 2))
print("X_orig_T_test: ", X_orig_T_test.shape)
X_orig_flat_test = X_orig_T_test.reshape(-1, 1, IMG_SIZE, IMG_SIZE)
X_hazy_flat_test = X_hazy_T_test.reshape(-1, 1, IMG_SIZE, IMG_SIZE)
print("X_orig_flat_test: ", X_orig_flat_test.shape)
# # MODEL
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 32, 3, padding=1), # batch x 32 x 256 x 256
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Conv2d(32, 32, 3, padding=1), # batch x 32 x 256 x 256
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Conv2d(32, 64, 3, padding=1), # batch x 64 x 256 x 256
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(64, 64, 3, padding=1), # batch x 64 x 256 x 256
nn.ReLU(),
nn.BatchNorm2d(64),
nn.MaxPool2d(2, 2), # batch x 64 x 128 x 128
)
self.layer2 = nn.Sequential(
nn.Conv2d(64, 128, 3, padding=1), # batch x 128 x 128 x 128
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Conv2d(128, 128, 3, padding=1), # batch x 128 x 128 x 128
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 256, 3, padding=1), # batch x 256 x 64 x 64
nn.ReLU(),
)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(batch_size, -1)
return out
encoder = Encoder().cpu()
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.layer1 = nn.Sequential(
nn.ConvTranspose2d(256, 128, 3, 2, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.ConvTranspose2d(128, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.ConvTranspose2d(128, 64, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.ConvTranspose2d(64, 64, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(64),
)
self.layer2 = nn.Sequential(
nn.ConvTranspose2d(64, 32, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.ConvTranspose2d(32, 32, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.ConvTranspose2d(32, 1, 3, 2, 1, 1),
nn.ReLU(),
)
def forward(self, x):
out = x.view(batch_size, 256, 64, 64)
out = self.layer1(out)
out = self.layer2(out)
return out
decoder = Decoder().cpu()
train_orig_loader = torch.utils.data.DataLoader(
dataset=X_orig_flat, batch_size=batch_size, shuffle=False
)
train_hazy_loader = torch.utils.data.DataLoader(
dataset=X_hazy_flat, batch_size=batch_size, shuffle=False
)
for train_orig, train_hazy in zip(train_orig_loader, train_hazy_loader):
orig_image = Variable(train_orig).cpu()
hazy_image = Variable(train_hazy).cpu()
encoder_op = encoder(hazy_image)
output = decoder(encoder_op)
print("Image Dim: ", orig_image.size())
print("Hazy Image Dim: ", hazy_image.size())
print("Encoder Output Dim: ", encoder_op.size())
print("Output Dim: ", output.size())
break
# # Loss function and Optimizer
# Loss Function: Mean Squared Error (MSE) Loss
# As our objective is to minimize the difference between decoder output (dehazed image) and ground-truth, we aim to minimize the MSE between two images.
# Optimizer: Adam (Adaptive Moment Estimation), commonly seen as combination of Adagrad and Momentum
# Adaptive learning rate for different parameters
# Faster converging through Momentum, which results in accelerated gradients
# In order to use multi parameters with one optimizer, concat parameters after changing into list
parameters = list(encoder.parameters()) + list(decoder.parameters())
loss_func = nn.MSELoss()
optimizer = torch.optim.Adam(parameters, lr=learning_rate)
losses = []
X_orig1 = X_orig
for epoch in tqdm(range(EPOCHS)):
rand_idx = torch.randperm(X_orig1.size()[0])
X_orig_iter = X_orig[rand_idx]
X_hazy_iter = X_hazy[rand_idx]
X_orig_iter1 = np.transpose(X_orig_iter, (0, 3, 1, 2))
X_hazy_iter1 = np.transpose(X_hazy_iter, (0, 3, 1, 2))
X_orig_iter2 = X_orig_iter1.reshape(-1, 1, IMG_SIZE, IMG_SIZE)
X_hazy_iter2 = X_hazy_iter1.reshape(-1, 1, IMG_SIZE, IMG_SIZE)
train_orig_loader = torch.utils.data.DataLoader(
dataset=X_orig_iter2, batch_size=batch_size, shuffle=False
)
train_hazy_loader = torch.utils.data.DataLoader(
dataset=X_hazy_iter2, batch_size=batch_size, shuffle=False
)
for train_orig, train_hazy in zip(train_orig_loader, train_hazy_loader):
orig_image = Variable(train_orig).cpu()
hazy_image = Variable(train_hazy).cpu()
optimizer.zero_grad()
encoder_op = encoder(hazy_image)
output = decoder(encoder_op)
loss = loss_func(output, orig_image)
loss.backward()
optimizer.step()
losses.append(loss)
torch.save([encoder, decoder], "dehaze_autoencoder.pkl")
plt.title("MSE Loss Plot")
plt.xlabel("Epochs")
plt.ylabel("Value")
plt.plot(losses)
plt.show()
encoder, decoder = torch.load("dehaze_autoencoder.pkl")
test_hazy_loader = torch.utils.data.DataLoader(
dataset=X_hazy_flat_test, batch_size=batch_size, shuffle=False
)
dehazed_output = []
for train_hazy in tqdm(train_hazy_loader):
hazy_image = Variable(train_hazy).cuda()
encoder_op = encoder(hazy_image)
output = decoder(encoder_op)
output = output.cpu()
output = output.detach()
dehazed_output.append(output)
X_dehazed = dehazed_output
X_dehazed = torch.stack(X_dehazed)
print(X_dehazed.size())
X_dehazed = X_dehazed.view(-1, 1, 256, 256)
print(X_dehazed.size())
X_dehazed = X_dehazed.view(-1, 3, 256, 256)
print(X_dehazed.size())
X_dehazed = X_dehazed.permute(0, 2, 3, 1)
print(X_dehazed.shape)
for i in range(0, len(X_orig), 10):
fig = plt.figure(figsize=(15, 5))
ax = plt.subplot(131)
plt.title("Original Image")
plt.imshow(X_orig_test[i])
ax = plt.subplot(132)
plt.title("Hazy Image")
plt.imshow(X_hazy_test[i])
ax = plt.subplot(133)
plt.title("Dehazed Image")
plt.imshow(X_dehazed[i])
plt.show()
|
# # Cricket Umpire Mediapipe
# This notebook is a modified version of the Colab notebook given here so that it can be viewed on Kaggle.
# https://colab.research.google.com/drive/1FvH5eTiZqayZBOHZsFm-i7D-JvoB9DVz#scrollTo=nW2TjFyhLvVH
# https://github.com/google/mediapipe
import cv2
import os
import math
import numpy as np
import matplotlib.pyplot as plt
import mediapipe as mp
mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
paths = []
for dirname, _, filenames in os.walk(
"/kaggle/input/cricket-umpires-action-classification/umpire"
):
for filename in filenames:
if filename[-4:] == ".jpg":
paths += [(os.path.join(dirname, filename))]
print(paths[0:3])
labels = []
for path in paths:
file = path.split("/")[-1]
traintest = path.split("/")[-2]
labels += [file[0:4]]
img = cv2.imread(path)
with mp_hands.Hands(
static_image_mode=True, max_num_hands=2, min_detection_confidence=0.1
) as hands:
results = hands.process(cv2.flip(image, 1))
if not results.multi_hand_landmarks:
print("No hand detected.")
else:
image_hight, image_width, _ = image.shape
annotated_image = cv2.flip(image.copy(), 1)
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
annotated_image,
hand_landmarks,
mp_hands.HAND_CONNECTIONS,
mp_drawing_styles.get_default_hand_landmarks_style(),
mp_drawing_styles.get_default_hand_connections_style(),
)
anno_img = cv2.flip(annotated_image, 1)
resize_and_show(anno_img)
cv2.imwrite(
os.path.join(traintest, file), cv2.cvtColor(anno_img, cv2.COLOR_BGR2RGB)
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# **Yerleşik Veri Türleri**
# Programlamada veri tipi önemli bir kavramdır.
# Değişkenler farklı türde verileri depolayabilir ve farklı türler farklı şeyler yapabilir.
# Python, bu kategorilerde varsayılan olarak yerleşik olarak aşağıdaki veri türlerine sahiptir:
# *Text Type*: str Numeric Types: int, float, complex Sequence Types: list, tuple, range Mapping Type: dict Set Types: set, frozenset Boolean Type: bool Binary Types: bytes, bytearray, memoryview None Type: NoneType
# **Veri Türünü Öğrenme**
# type() işlevini kullanarak herhangi bir nesnenin veri türünü öğrenebilirsiniz.
k = 6
print(type(k))
# **Setting the Data Type**
# Example Data Ty pe Try it x = "Hello World" str x = 20 int x = 20.5 float x = 1j complex x = ["apple", "banana", "cherry"] list x = ("apple", "banana", "cherry") tuple x = range(6) range x = {"name" : "John", "age" : 36} dict x = {"apple", "banana", "cherry"} set x = frozenset({"apple", "banana", "cherry"}) frozenset x = True bool x = b"Hello" bytes x = bytearray(5) bytearray x = memoryview(bytes(5)) memoryview x = None NoneType
# *Belirli Veri Türünü Ayarlama*
# If you want to specify the data type, you can use the following constructor functions:
# Example Data Type Try it x = str("Hello World") str x = int(20) int x = float(20.5) float x = complex(1j) complex x = list(("apple", "banana", "cherry")) list x = tuple(("apple", "banana", "cherry")) tuple x = range(6) range x = dict(name="John", age=36) dict x = set(("apple", "banana", "cherry")) set x = frozenset(("apple", "banana", "cherry")) frozenset x = bool(5) bool x = bytes(5) bytes x = bytearray(5) bytearray x = memoryview(bytes(5)) memoryview
# *Python Numbers*
# Python'da üç sayısal tür vardır:
# int float complex Sayısal tipteki değişkenler, onlara bir değer atadığınızda oluşturulur:
t = 5 # int olur
m = 3.4 # float olur
n = 2j # complex olur (karışık sayı)
# Python'da herhangi bir nesnenin türünü doğrulamak için type() işlevini kullanırız.
print(type(t))
print(type(m))
print(type(n))
# **Int - Integer**
# Int veya tamsayı, pozitif veya negatif, ondalık basamak içermeyen, sınırsız uzunlukta bir tam sayıdır.
# integers ( sayılar)
g = 2
f = 137137137137137
d = -66666666
print(type(g))
print(type(f))
print(type(d))
# **Float**
# Bir veya daha fazla ondalık basamak içeren pozitif veya negatif bir sayıdır.
p = 3.40
r = 4.0
s = -72.43
print(type(p))
print(type(r))
print(type(s))
# Float, 10'un kuvvetini belirtmek için "e" harfi bulunan bilimsel sayılar da olabilir.
# floats (ondalıklı sayılar)
b = 45e4
c = 32e5
v = -25.8e120
print(type(b))
print(type(c))
print(type(v))
# **Complex**
# Karmaşık sayılar sanal kısım olarak "j" ile yazılır:
s = 4 + 6j
w = 7j
a = -8j
print(type(s))
print(type(w))
# **Tip Dönüşümü**
# int(), float() ve Complex() yöntemleriyle bir türden diğerine dönüştürebilirsiniz:
g = 2 # int (sayıları için)
k = 3.4 # float (ondalıklı sayılar için)
n = 2j # complex (karmaşık için j olmayınca hata verir9
# convert from int to float:
v = float(g)
# convert from float to int:
r = int(k)
# convert from int to complex:
s = complex(g)
print(v)
print(r)
print(s)
print(type(v))
print(type(r))
print(type(s))
# *Not:* Karmaşık sayıları başka bir sayı türüne dönüştüremezsiniz.
# **Rastgele Sayılar**
# Python'un rasgele bir sayı yapmak için bir random() işlevi yoktur, ancak Python'un rasgele sayılar yapmak için kullanılabilecek random adlı yerleşik bir modülü vardır:
import random
print(random.randrange(2, 9))
# **Bir Değişken Türü Oluşturma**
# Bir değişkene bir tür belirtmek istediğiniz zamanlar olabilir. Bu döküm ile yapılabilir. Python, nesne yönelimli bir dildir ve bu nedenle, ilkel türleri de dahil olmak üzere veri türlerini tanımlamak için sınıfları kullanır.
# **Bir Değişken Türü Belirtin**
# Bir değişkene bir tür belirtmek istediğiniz zamanlar olabilir. Python, nesne yönelimli bir dildir
# int() - bir tamsayı hazır bilgisinden, bir değişken değişmez bilgisinden (tüm ondalık sayıları kaldırarak) veya bir dize değişmez bilgisinden (dizgenin bir tam sayıyı temsil etmesi koşuluyla) bir tamsayı oluşturur float() - bir tamsayı hazır bilgisinden, bir değişken sabit değerden veya bir dize değişmez bilgisinden bir kayan sayı oluşturur (dizenin bir kayan nokta veya bir tamsayıyı temsil etmesi koşuluyla) str() - diziler, tamsayı sabit değerleri ve değişken sabit değerler dahil olmak üzere çok çeşitli veri türlerinden bir dize oluşturur
# integers (sayılar)
k = int(5) # k will be 5
l = int(3.8) # l will be 3
m = int("8") # m will be 8
print(k)
print(l)
print(m)
# floats ( ondalıklı sayılar örneği)
a = float(4) # a will be 4.0
b = float(5.8) # b will be 5.8
c = float("9") # c will be 9.0
d = float("1.2") # d will be 1.2
print(a)
print(b)
print(c)
print(d)
# strings ( yazılar için kullanılır)
m = str("s2") # m will be 's2'
r = str(5) # r will be '5'
s = str(8.0) # s will be '8.0'
print(m)
print(r)
print(s)
# ****Strings****
# Python'daki dizeler, tek tırnak işaretleri veya çift tırnak işaretleri içine alınır.
# "merhaba", "merhaba" ile aynıdır.
# print() işleviyle bir dize hazır bilgisini görüntüleyebilirsiniz:
print("naber")
print("naber")
# **Dizeyi bir Değişkene Atama**
# Bir değişkene bir dize atamak, değişken adının ardından eşittir işareti ve dize ile yapılır:
c = "selam"
print(c)
# **Çok Satırlı Dizeler**
# Üç tırnak kullanarak bir değişkene çok satırlı bir dize atayabilirsiniz:
z = """The evil it spread like a fever ahead
It was night when you died, my firefly
What could I have said to raise you from the dead?
Oh could I be the sky on the Fourth of July?
"""
print(z)
# *Veya üç tek tırnak ile kullanılabilir:*
x = """Shall we look at the moon, my little loon
Why do you cry?
Make the most of your life, while it is rife
While it is light
"""
print(x)
# **Sringsler Dizilerdir**
# Diğer birçok popüler programlama dili gibi, Python'daki dizeler de unicode karakterleri temsil eden bayt dizileridir.
# Bununla birlikte, Python'un bir karakter veri türü yoktur, tek bir karakter yalnızca 1 uzunluğunda bir dizedir.
# Dizenin öğelerine erişmek için köşeli parantezler kullanılabilir.
# 1 konumundaki karakteri alın (ilk karakterin 0 konumunda olduğunu unutmayın):
a = "say goodbye!"
print(a[1])
# **Bir Dizide Döngü Yapmak**
# Dizeler dizi olduğundan, bir dizideki karakterler arasında bir for döngüsü ile döngü yapabiliriz.
# "Muz" kelimesindeki harfler arasında dolaşın:
for m in "love":
print(m)
# **String Length**
# Bir dizenin uzunluğunu almak için len() işlevini kullanın.
# len() işlevi, bir dizenin uzunluğunu döndürür:
s = "in your area!"
print(len(s))
# **Dizeyi Kontrol Et**
# Bir dizgede belirli bir ifadenin veya karakterin olup olmadığını kontrol etmek için in anahtar kelimesini kullanabiliriz.
# Aşağıdaki metinde "free" olup olmadığını kontrol edin:
txt = "please dont be sorry!"
print("dont" in txt)
# Bir if ifadesinde kullanın:
txt = "One time for the present!"
if "One" in txt:
print("Two time for the past")
# **OLMADIĞINI kontrol edin**
# Belirli bir kelime öbeğinin veya karakterin bir dizgede OLMADIĞINI kontrol etmek için not in anahtar kelimesini kullanabiliriz.
# Aşağıdaki metinde "pahalı" ifadesinin OLMADIĞINI kontrol edin
txt = "Happiness can be found even in the darkest times!"
print("sadness" not in txt)
# Use it in an if statement:
txt = "Happiness can be found even in the darkest times!"
if "sadness" not in txt:
print("No, 'sadness' is NOT present.")
|
# dataset link : http://archive.ics.uci.edu/ml/datasets/Internet+Firewall+Data
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv(
"https://raw.githubusercontent.com/semnan-university-ai/Internet-Firewall/main/firewall.csv"
)
df
df.info()
# target distribution
print("Absolute Frequencies:")
print(df.Action.value_counts())
print()
print("Percentages:")
print(df.Action.value_counts(normalize=True))
sns.countplot(x="Action", data=df, palette="hls")
plt.show()
# Setup subplot
fig, ((axes1, axes2), (axes3, axes4)) = plt.subplots(2, 2, figsize=(15, 15))
sns.histplot(
data=df.loc[df["Action"] == "allow", (["Action", "Elapsed Time (sec)"])],
x="Elapsed Time (sec)",
bins=100,
ax=axes1,
).set(title="allow")
sns.histplot(
data=df.loc[df["Action"] == "deny", (["Action", "Elapsed Time (sec)"])],
x="Elapsed Time (sec)",
bins=10,
ax=axes2,
).set(title="deny")
sns.histplot(
data=df.loc[df["Action"] == "drop", (["Action", "Elapsed Time (sec)"])],
x="Elapsed Time (sec)",
bins=10,
ax=axes3,
).set(title="drop")
sns.histplot(
data=df.loc[df["Action"] == "reset-both", (["Action", "Elapsed Time (sec)"])],
x="Elapsed Time (sec)",
bins=10,
ax=axes4,
).set(title="reset-both")
# Setup subplot
fig, ((axes1, axes2), (axes3, axes4)) = plt.subplots(2, 2, figsize=(15, 15))
sns.histplot(
data=df.loc[df["Action"] == "allow", (["Action", "Packets"])],
x="Packets",
bins=100,
log_scale=True,
ax=axes1,
).set(title="allow")
sns.histplot(
data=df.loc[df["Action"] == "deny", (["Action", "Packets"])],
x="Packets",
bins=10,
ax=axes2,
).set(title="deny")
sns.histplot(
data=df.loc[df["Action"] == "drop", (["Action", "Packets"])],
x="Packets",
bins=10,
ax=axes3,
).set(title="drop")
sns.histplot(
data=df.loc[df["Action"] == "reset-both", (["Packets"])],
x="Packets",
bins=10,
ax=axes4,
).set(title="reset-both")
# ****Numerical Features****
#
features_num = [
"Bytes",
"Bytes Sent",
"Bytes Received",
"Packets",
"Elapsed Time (sec)",
"pkts_sent",
"pkts_received",
]
# define log trafo for numerical features
def num_trafo(x):
return np.log10(1 + x)
# plot distribution of numerical features
for f in features_num:
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(11, 7), sharex=True)
ax1.hist(num_trafo(df[f]), bins=20)
ax1.grid()
ax1.set_title("Feature: " + f + " - trafo [log_10(1+x)]")
ax2.boxplot(num_trafo(df[f]), vert=False)
ax2.grid()
ax2.set_title("Feature: " + f + " - trafo [log_10(1+x)]")
plt.show()
# **Categorical Features**
features_cat = [
"Source Port",
"Destination Port",
"NAT Source Port",
"NAT Destination Port",
]
# show only top 10 levels for each feature
for f in features_cat:
print("Feature:", f)
print(df[f].value_counts()[0:10])
print()
df[f].value_counts()[0:10].plot(kind="bar")
plt.title(f)
plt.grid()
plt.show()
# Source/Destination
plt.figure(figsize=(7, 7))
plt.scatter(df["Source Port"], df["Destination Port"], alpha=0.05)
plt.xlabel("Source Port")
plt.ylabel("Destination Port")
plt.show()
# Source/Destination NAT (Network Address Translation)
plt.figure(figsize=(7, 7))
plt.scatter(df["NAT Source Port"], df["NAT Destination Port"], alpha=0.05)
plt.xlabel("NAT Source Port")
plt.ylabel("NAT Destination Port")
plt.show()
# **Categorical Features**
# visualize crosstable target vs feature (using top 10 levels only)
for f in features_cat:
top10_levels = df[f].value_counts()[0:10].index.to_list()
df_temp = df[df[f].isin(top10_levels)]
ctab = pd.crosstab(df_temp.Action, df_temp[f])
print("Feature:" + f + " - Top 10 levels only")
plt.figure(figsize=(12, 5))
sns.heatmap(
ctab, annot=True, fmt="d", cmap="Blues", linecolor="black", linewidths=0.1
)
plt.show()
# **Source/Destination plots split by target**
# source/destination plot by Action
xx = "Source Port"
yy = "Destination Port"
fig, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10, 10))
df_temp = df[df.Action == "allow"]
axs[0, 0].scatter(df_temp[xx], df_temp[yy], alpha=0.05)
axs[0, 0].set_title("Action = allow")
axs[0, 0].set_xlabel(xx)
axs[0, 0].set_ylabel(yy)
axs[0, 0].grid()
df_temp = df[df.Action == "deny"]
axs[0, 1].scatter(df_temp[xx], df_temp[yy], alpha=0.05)
axs[0, 1].set_title("Action = deny")
axs[0, 1].set_xlabel(xx)
axs[0, 1].set_ylabel(yy)
axs[0, 1].grid()
df_temp = df[df.Action == "drop"]
axs[1, 0].scatter(df_temp[xx], df_temp[yy], alpha=0.5)
axs[1, 0].set_title("Action = drop")
axs[1, 0].set_xlabel(xx)
axs[1, 0].set_ylabel(yy)
axs[1, 0].grid()
df_temp = df[df.Action == "reset-both"]
axs[1, 1].scatter(df_temp[xx], df_temp[yy], alpha=0.5)
axs[1, 1].set_title("Action = reset-both")
axs[1, 1].set_xlabel(xx)
axs[1, 1].set_ylabel(yy)
axs[1, 1].grid()
plt.show()
# source/destination plot by Action - NAT (Network Address Translation) version
xx = "NAT Source Port"
yy = "NAT Destination Port"
fig, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10, 10))
df_temp = df[df.Action == "allow"]
axs[0, 0].scatter(df_temp[xx], df_temp[yy], alpha=0.05)
axs[0, 0].set_title("Action = allow")
axs[0, 0].set_xlabel(xx)
axs[0, 0].set_ylabel(yy)
axs[0, 0].grid()
df_temp = df[df.Action == "deny"]
axs[0, 1].scatter(df_temp[xx], df_temp[yy], alpha=0.5)
axs[0, 1].set_title("Action = deny")
axs[0, 1].set_xlabel(xx)
axs[0, 1].set_ylabel(yy)
axs[0, 1].grid()
df_temp = df[df.Action == "drop"]
axs[1, 0].scatter(df_temp[xx], df_temp[yy], alpha=0.5)
axs[1, 0].set_title("Action = drop")
axs[1, 0].set_xlabel(xx)
axs[1, 0].set_ylabel(yy)
axs[1, 0].grid()
df_temp = df[df.Action == "reset-both"]
axs[1, 1].scatter(df_temp[xx], df_temp[yy], alpha=0.5)
axs[1, 1].set_title("Action = reset-both")
axs[1, 1].set_xlabel(xx)
axs[1, 1].set_ylabel(yy)
axs[1, 1].grid()
plt.show()
|
a = "We are learning {}"
b = a.format("Analytics")
b
s = "{1} is a {0} company in {2}"
p = s.format("tech", "google", "usa")
p
s = "{company_name} is a {company_type} company"
p = s.format(company_type="tech", company_name="google")
p
d = {
"Apple": 235780721990,
"Google": 91361779690,
"Facebook": 87398349747364,
"Netflix": 373669866969,
}
for i in d:
print(
"{:<10} - {:>20}".format(i, d[i])
) # to control the alignment left and right :< left and :> right 10 is space of 10 px
a = "The profit generates by Apple is {:,}"
b = a.format(12345678679)
b
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
import copy, math
df = pd.read_csv("/kaggle/input/california-housing-prices/housing.csv", index_col=False)
df
df.head()
df.describe()
df.info()
df = df.dropna()
df.info()
df
df.hist(figsize=(10, 10))
# df=df.drop(['total_rooms_normalized'],axis=1)
plt.figure(figsize=(7, 7))
sns.heatmap(df.corr(), annot=True, cmap="YlGnBu")
df["total_rooms"] = np.log(df["total_rooms"] + 1)
# since the above hist graphs for these were leftskewed
# and +1 if zero values exits (since log(1) is not defined)
df["total_bedrooms"] = np.log(df["total_bedrooms"] + 1)
df["population"] = np.log(df["population"] + 1)
df["households"] = np.log(df["households"] + 1)
df.hist(figsize=(10, 10))
# one hot encoding ocean proximitycolumn
df.ocean_proximity.value_counts()
df = df.join(pd.get_dummies(df.ocean_proximity)).drop(["ocean_proximity"], axis=1)
plt.figure(figsize=(16, 13))
sns.heatmap(df.corr(), annot=True, cmap="YlGnBu")
plt.figure(figsize=(15, 8))
sns.scatterplot(
data=df, x="latitude", y="longitude", hue="median_house_value", palette="coolwarm"
)
df["bedroom_ratio"] = df["total_bedrooms"] / df["total_rooms"]
df["household_rooms"] = df["total_rooms"] / df["households"]
plt.figure(figsize=(16, 13))
sns.heatmap(df.corr(), annot=True, cmap="YlGnBu")
x, y = df.drop(["median_house_value"], axis=1), df["median_house_value"]
x
x_norm = (x - x.mean()) / x.std()
x_norm
y = y.to_frame()
y
plt.scatter(x["median_income"], y)
plt.xlabel("X")
plt.ylabel("price")
plt.show()
x_np = x_norm.to_numpy()
x_np.shape
y_np = y.to_numpy()
y_np.shape
x_np, y_np
# a1=np.ones(shape=(5,5))
# a1
# a2=np.zeros(shape=(5,4))
# a2
# a1[: , 1:]=a2
# a1
x_norm.shape
y.shape
def get_predictions(parameter_array, x):
"""
obtain predictions for the model(parameters) and inputs (x)
parameters => np array of shape (p+1,1) p+1 is number of parameters including(theta(0) + theta(n))
X => np array of shape (n,p) so we add a column of ones as zeroth column before X for
"""
(n, p) = x.shape
p_plus_one = p + 1
new_x = np.ones(shape=(n, p_plus_one))
new_x[:, 1:] = x
return np.dot(new_x, parameter_array)
# test_parameters=np.arange(1,17) # => 16 parameters including theta0 and 15 theta other
# # test_parameters
# get_predictions(test_parameters,x).shape
w_init = np.zeros(shape=(15, 1))
w_init
b_init = 0
# def compute_cost(X, y, w, b):
# """
# compute cost
# Args:
# X (ndarray (m,n)): Data, m examples with n features
# y (ndarray (m,)) : target values
# w (ndarray (n,)) : model parameters
# b (scalar) : model parameter
# Returns:
# cost (scalar): cost
# """
# m = X.shape[0]
# cost = 0.0
# for i in range(m):
# f_wb_i = np.dot(X[i], w) + b #(n,)(n,) = scalar (see np.dot)
# cost = cost + (f_wb_i - y[i])**2 #scalar
# cost = cost / (2 * m) #scalar
# return cost
def cost_function(x, y, w, b):
"""
compute cost
x => np array of shape (m,n) m training examples with n features
y => np array of shape(m,1) target values
w => np array of shape (n,1) parameters for the model
b => scalar value
returns cost => scalar value
"""
m = x.shape[0]
cost = 0.0
for i in range(m):
f_wb_i = np.dot(x[i], w) + b
cost = cost + (f_wb_i - y[i]) ** 2
cost = cost / (2 * m)
return cost
# def compute_gradient(X, y, w, b):
# """
# Computes the gradient for linear regression
# Args:
# X (ndarray (m,n)): Data, m examples with n features
# y (ndarray (m,)) : target values
# w (ndarray (n,)) : model parameters
# b (scalar) : model parameter
# Returns:
# dj_dw (ndarray (n,)): The gradient of the cost w.r.t. the parameters w.
# dj_db (scalar): The gradient of the cost w.r.t. the parameter b.
# """
# m,n = X.shape #(number of examples, number of features)
# dj_dw = np.zeros((n,))
# dj_db = 0.
# for i in range(m):
# err = (np.dot(X[i], w) + b) - y[i]
# for j in range(n):
# dj_dw[j] = dj_dw[j] + err * X[i, j]
# dj_db = dj_db + err
# dj_dw = dj_dw / m
# dj_db = dj_db / m
# return dj_db, dj_dw
# partial derivative
def compute_gradient(x, y, w, b):
"""
Computes the gradient for linear regression
Args:
X (ndarray (m,n)): Data, m examples with n features
y (ndarray (m,)) : target values
w (ndarray (n,)) : model parameters
b (scalar) : model parameter
Returns:
dj_dw (ndarray (n,)): The gradient of the cost w.r.t. the parameters w.
dj_db (scalar): The gradient of the cost w.r.t. the parameter b.
"""
(m, n) = x.shape # (number of examples, number of features)
dj_dw = np.zeros((15, 1))
dj_db = 0.0
for i in range(m):
err = (np.dot(x[i], w) + b) - y[i]
for j in range(n):
dj_dw[j] = dj_dw[j] + (err * x[i, j])
dj_db = dj_db + err
dj_dw = dj_dw / m
dj_db = dj_db / m
return dj_db, dj_dw
def gradient_descent(
X, y, w_in, b_in, cost_function, gradient_function, alpha, num_iters
):
"""
Performs batch gradient descent to learn w and b. Updates w and b by taking
num_iters gradient steps with learning rate alpha
Args:
X (ndarray (m,n)) : Data, m examples with n features
y (ndarray (m,)) : target values
w_in (ndarray (n,)) : initial model parameters
b_in (scalar) : initial model parameter
cost_function : function to compute cost
gradient_function : function to compute the gradient
alpha (float) : Learning rate
num_iters (int) : number of iterations to run gradient descent
Returns:
w (ndarray (n,)) : Updated values of parameters
b (scalar) : Updated value of parameter
"""
# An array to store cost J and w's at each iteration primarily for graphing later
J_history = []
w = copy.deepcopy(w_in) # avoid modifying global w within function
b = b_in
for i in range(num_iters):
# Calculate the gradient and update the parameters
dj_db, dj_dw = gradient_function(X, y, w, b) ##None
# Update Parameters using w, b, alpha and gradient
w = w - alpha * dj_dw ##None
b = b - alpha * dj_db ##None
# Save cost J at each iteration
# if i<100000: # prevent resource exhaustion
# J_history.append( cost_function(X, y, w, b))
# Print cost every at intervals 10 times or as many iterations if < 10
# if i% math.ceil(num_iters / 10) == 0:
# print(f"Iteration {i:4d}: Cost {J_history[-1]:8.2f} ")
return w, b, J_history # return final w,b and J history for graphing
# # initialize parameters
# initial_w = np.zeros(shape=(x_np.shape[1],1))
# initial_b = 0.
# # some gradient descent settings
# iterations = 100
# alpha = 0.01
# # run gradient descent
# w_final, b_final, J_hist = gradient_descent(x_np, y_np, initial_w, initial_b,
# cost_function, compute_gradient,
# alpha, iterations)
# print(f"b,w found by gradient descent: {b_final} , {w_final} ")
# # print(f"b,w found by gradient descent: {b_final:0.2f},{w_final} ")
# # m,_ = x_np.shape
# # for i in range(m):
# # print(f"prediction: {np.dot(x_np[i], w_final) + b_final:0.2f}, target value: {y_np[i]}")
def predict(x, w, b):
"""
single predict using linear regression
Args:
x (ndarray): Shape (n,) example with multiple features
w (ndarray): Shape (n,) model parameters
b (scalar): model parameter
Returns:
p (scalar): prediction
"""
p = np.dot(x, w) + b
return p
# y_predicted=predict(x_np,w_final,b_final)
# y_predicted , y_np
# now using sklearn
x
y
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_test_scaled = scaler.transform(x_test)
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(x_train_scaled, y_train)
reg.score(x_test_scaled, y_test)
# random forest regressor
from sklearn.ensemble import RandomForestRegressor
forest = RandomForestRegressor()
forest.fit(x_train_scaled, y_train)
forest.score(x_test_scaled, y_test)
y_pred = reg.predict(x_test_scaled)
y_test.shape, y_pred.shape
sns.regplot(
x=y_test,
y=y_pred,
ci=None,
scatter_kws={"color": "black"},
line_kws={"color": "red"},
)
|
import gc
import time
import re
import argparse
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import nltk
nltk.download("punkt")
nltk.download("wordnet")
nltk.download("omw-1.4")
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import spacy
import contractions
from tqdm.auto import tqdm
import transformers
print(transformers.__version__)
model_checkpoint = "t5-base"
# ## Loading the dataset
from datasets import load_metric
metric = load_metric("rouge")
ndf = pd.read_csv("/kaggle/input/tweetsummemnlp2017/grouped_data.csv").drop(
columns=["index"]
)
mini = np.inf
maxi = 0
cnt = 0
tot = 0
n_tweets_per_grp = 5
new_df = pd.DataFrame([])
for grp_id in ndf["group_id"].unique():
sub_df = ndf[ndf["group_id"] == grp_id]
rows = sub_df.shape[0]
tot += 1
if rows < n_tweets_per_grp:
cnt += 1
continue
inds = np.random.randint(0, high=rows, size=(n_tweets_per_grp,))
new_df = pd.concat([new_df, sub_df.iloc[inds]], ignore_index=True)
if rows < mini:
mini = rows
if rows > maxi:
maxi = rows
del sub_df, rows
gc.collect()
print(mini, maxi, cnt, tot)
# ## Preprocessing the data
def expand_contractions(sentence):
contractions_expanded = [contractions.fix(word) for word in sentence.split()]
return " ".join(contractions_expanded)
def lower_case(sentence):
return " ".join([word.lower() for word in sentence.split()])
def remove_punctuation(sentence):
return " ".join([re.sub(r"[^\w\s]", "", word) for word in sentence.split()])
def preprocess(
lst,
process=True,
):
if process == True:
for i, sent in enumerate(lst):
lst[i] = lower_case(remove_punctuation(expand_contractions(sent)))
return lst
start_time = time.time()
new_df["paraphrase"] = preprocess(list(new_df["paraphrase"]))
new_df["tweet"] = preprocess(list(new_df["tweet"]))
print("Time taken for preprocessing", time.time() - start_time)
lang = "en"
cls = spacy.util.get_lang_class(lang)
nlp = cls()
pipeline = ["tagger", "parser", "lemmatizer"]
for name in pipeline:
nlp.add_pipe(name)
nlp = spacy.load("en_core_web_sm")
tweets = []
start_time = time.time()
start_time_tweets = time.time()
for doc in nlp.pipe(new_df["tweet"], n_process=-1):
# Do something with the doc here
# print([ent for ent in doc.tags])
# print(doc.tags)
# print(doc.deps)
# print(doc.lemmas)
tweets.append(doc)
print("Time taken for tweets", time.time() - start_time_tweets)
# print([[token.text, token.pos_, token.dep_] for token in doc])
# paras = []
# start_time_para = time.time()
# for doc in nlp.pipe(df["paraphrase"], n_process = -1):
# # Do something with the doc here
# # print([ent for ent in doc.tags])
# # print(doc.tags)
# # print(doc.deps)
# # print(doc.lemmas)
# paras.append(doc)
# print("Time taken for paras", time.time() - start_time_para)
print("Total time taken", time.time() - start_time)
# for tweet in nlp(lower_case(remove_punctuation(expand_contractions(new_df.iloc[4]['tweet'])))):
# print(f'{tweet.text: <15}', '|', f'{tweet.tag_: >3}', '|', f'{tweet.dep_: >10}')
# for tweet in nlp(new_df.iloc[4]['tweet']):
# print(f'{tweet.text: <15}', '|', f'{tweet.tag_: >3}', '|', f'{tweet.dep_: >10}')
def get_input_sentence(
doc, pos_tokens=["<pos>", "</pos>"], dep_tokens=["<dep>", "</dep>"]
):
sent = []
pos = []
dep = []
for token in doc:
sent.append(token.text)
pos.append(token.pos_)
dep.append(token.dep_)
return (
" ".join(sent)
+ " "
+ pos_tokens[0]
+ " "
+ " ".join(pos)
+ " "
+ pos_tokens[1]
+ " "
+ dep_tokens[0]
+ " "
+ " ".join(dep)
+ " "
+ dep_tokens[1]
)
ip_sentence = []
for i in range(new_df.shape[0]):
ip_sentence.append(get_input_sentence(tweets[i]))
new_df["ip_sentence"] = ip_sentence
pos_tokens = ["<pos>", "</pos>"]
dep_tokens = ["<dep>", "</dep>"]
class ParaphraseDataset(Dataset):
def __init__(self, tokenizer, data, max_len=256):
self.source_column = "ip_sentence"
self.target_column = "paraphrase"
self.data = data
self.max_len = max_len
self.tokenizer = tokenizer
self.inputs = []
self.targets = []
self._build()
def __len__(self):
return len(self.inputs)
def __getitem__(self, index):
source_ids = self.inputs[index]["input_ids"].squeeze()
target_ids = self.targets[index]["input_ids"].squeeze()
src_mask = self.inputs[index][
"attention_mask"
].squeeze() # might need to squeeze
target_mask = self.targets[index][
"attention_mask"
].squeeze() # might need to squeeze
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"labels": target_ids,
}
def _build(self):
for idx in range(len(self.data)):
input_, target = (
self.data.iloc[idx][self.source_column],
self.data.iloc[idx][self.target_column],
)
input_ = "summarize: " + input_ + " </s>"
target = target + " </s>"
# tokenize inputs
tokenized_inputs = self.tokenizer(
[input_],
max_length=self.max_len,
pad_to_max_length=True,
truncation=True,
return_tensors="pt",
add_special_tokens=True,
)
# tokenize targets
tokenized_targets = self.tokenizer(
text_target=[target],
max_length=self.max_len,
pad_to_max_length=True,
truncation=True,
return_tensors="pt",
)
self.inputs.append(tokenized_inputs)
self.targets.append(tokenized_targets)
def get_dataset(tokenizer, data):
return ParaphraseDataset(tokenizer, data)
# # Model Fine Tuning
from transformers import T5Tokenizer
additional_special_tokens = pos_tokens + dep_tokens
tokenizer = T5Tokenizer.from_pretrained(model_checkpoint)
tokenizer.add_tokens(additional_special_tokens)
tokenizer.get_added_vocab()
from sklearn.model_selection import train_test_split
train_split, test_split = train_test_split(
new_df, test_size=0.2, random_state=42, shuffle=True
)
# print(train_split.shape)
start_time = time.time()
train_data = get_dataset(tokenizer, train_split[["ip_sentence", "paraphrase"]])
print("Train Data took", f"{time.time() - start_time:4.3f}", "seconds")
start_time = time.time()
test_data = get_dataset(tokenizer, test_split[["ip_sentence", "paraphrase"]])
print("Test Data took", f"{time.time() - start_time:4.3f}", "seconds")
# del train_split, test_split
gc.collect()
from transformers import (
AutoModelForSeq2SeqLM,
DataCollatorForSeq2Seq,
Seq2SeqTrainingArguments,
Seq2SeqTrainer,
)
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
batch_size = 8
model_name = model_checkpoint.split("/")[-1]
args = Seq2SeqTrainingArguments(
f"{model_name}-finetuned-xsum",
do_eval=False,
evaluation_strategy="no",
learning_rate=2e-5,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
weight_decay=0.1,
save_total_limit=3,
num_train_epochs=1,
adam_beta1=0.2,
adam_beta2=0.2,
predict_with_generate=True,
fp16=True,
# push_to_hub=True,
no_cuda=False,
)
import nltk
import numpy as np
def compute_metrics(eval_pred):
predictions, labels = eval_pred
decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True)
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Rouge expects a newline after each sentence
decoded_preds = [
"\n".join(nltk.sent_tokenize(pred.strip())) for pred in decoded_preds
]
decoded_labels = [
"\n".join(nltk.sent_tokenize(label.strip())) for label in decoded_labels
]
result = metric.compute(
predictions=decoded_preds, references=decoded_labels, use_stemmer=True
)
# Extract a few results
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
# Add mean generated length
prediction_lens = [
np.count_nonzero(pred != tokenizer.pad_token_id) for pred in predictions
]
result["gen_len"] = np.mean(prediction_lens)
return {k: round(v, 4) for k, v in result.items()}
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)
trainer = Seq2SeqTrainer(
model,
args,
train_dataset=train_data,
eval_dataset=test_data,
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)
trainer.evaluate()
trainer.train()
trainer.save_model("/kaggle/working/mymodel")
model = AutoModelForSeq2SeqLM.from_pretrained("/kaggle/working/mymodel")
batch_size = 8
model_name = model_checkpoint.split("/")[-1]
args = Seq2SeqTrainingArguments(
f"{model_name}-finetuned-xsum",
do_eval=False,
evaluation_strategy="no",
learning_rate=2e-7,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
weight_decay=0.1,
save_total_limit=3,
num_train_epochs=1,
adam_beta1=0.2,
adam_beta2=0.2,
predict_with_generate=True,
fp16=True,
# push_to_hub=True,
no_cuda=False,
)
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)
trainer = Seq2SeqTrainer(
model,
args,
train_dataset=train_data,
eval_dataset=test_data,
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)
tokenizer = T5Tokenizer.from_pretrained("/kaggle/working/mymodel")
tokenizer.add_tokens(additional_special_tokens)
trainer.evaluate()
ind = 45
print(test_split.iloc[ind]["tweet"])
preds = model.generate(
input_ids=test_data[ind]["input_ids"].unsqueeze(0).to("cuda"),
attention_mask=test_data[ind]["attention_mask"].unsqueeze(0).to("cuda"),
)
tokenizer.batch_decode(preds, skip_special_tokens=True)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
from PIL import Image, ImageDraw
import math
import matplotlib.pyplot as plt
from matplotlib.pyplot import hist
import numpy as np # Подключаем библиотеку для работы с числовыми массивами numpy
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
car = Image.open("/kaggle/input/images/car.png")
coins = Image.open("/kaggle/input/images2/Coins.jpg")
arch = Image.open("/kaggle/input/images2/arch.jpg")
rice = Image.open("/kaggle/input/images2/rice.jpg")
ans = Image.open("./ans.jpg")
draw = ImageDraw.Draw(car) # Создаем инструмент для рисования
for i in range(car.size[0]):
for j in range(car.size[1]):
r = car.load()[i, j][0]
g = car.load()[i, j][1]
b = car.load()[i, j][2]
Y = int(0.2126 * r + 0.7152 * g + 0.0722 * b)
draw.point((i, j), (Y, Y, Y))
car.save("ans.jpg", "JPEG")
ans = Image.open("./ans.jpg")
ans
array = [ans, coins, arch, rice]
for arr in array:
plt.hist(np.ravel(arr), bins=256)
arr.show()
plt.show()
# На первой картинке почти все цвета черные. На второй картинке много светлых цветов. На третьей картинке цвета распределены равномерно. На четвертой картинке много темных цветов.
# s = T(r) = c log(1+r) - логарифмические преобразования
car_orig = Image.open("/kaggle/input/images/car.png")
car = car_orig.copy()
coins_orig = Image.open("/kaggle/input/images2/Coins.jpg")
coins = coins_orig.copy()
arch_orig = Image.open("/kaggle/input/images2/arch.jpg")
arch = arch_orig.copy()
rice_orig = Image.open("/kaggle/input/images2/rice.jpg")
rice = rice_orig.copy()
coins
def log_mod(num, i_img):
image = i_img.copy()
draw = ImageDraw.Draw(image) # Создаем инструмент для рисования
for i in range(image.size[0]):
for j in range(image.size[1]):
r = image.load()[i, j][0]
g = image.load()[i, j][1]
b = image.load()[i, j][2]
s = num * np.int((math.log(1 + r)))
draw.point((i, j), (s, s, s))
del draw
plt.figure(figsize=(14, 8), dpi=90)
plt.subplot(1, 2, 1)
plt.imshow(image)
plt.subplot(1, 2, 2)
plt.hist(np.ravel(image), bins=256)
plt.show()
log_mod(255, car_orig)
log_mod(30, Image.open("/kaggle/input/images2/Coins.jpg"))
coins_orig
|
# **In this article, I’ll guide you through the code that allowed me to reach the first position in the [Store Sales – Time Series Forecasting](https://www.kaggle.com/competitions/store-sales-time-series-forecasting).**
# By exploring this code, you will be able to copy it and reach the top of the ranking... or even improve it to beat me!
# So why am I sharing this code with you if I’m going to lose my ranking?
# First of all, because I like open source and the idea that everyone can access the information.
# Second, because this competition is in the *Getting Started* category anyway. This implies that the ranking is renewed every 3 months. But above all, that there is no deadline to participate in this challenge.
# The progress in AI being what it is, it is obvious that my record will be beaten in the months or years to come. So, instead of constraining this progress, why not encourage it by sharing my code?
# Anyway, that being said, for the sake of history (and honestly for my ego) I have recorded the ranking of April 04, 2023:
# To reach the first place I inspired myself from [Ferdinand Berr’s code](https://www.kaggle.com/code/ferdinandberr/darts-forecasting-deep-learning-global-models#4.1.-N-HiTS), adding some changes to it.
# First I modified the hyperparameters of the model he uses. Then I added a basic improvement technique that seems to have been neglected in this competition: [the Ensemble Method](https://inside-machinelearning.com/en/ensemble-methods/).
# The idea is to combine the predictions of several models and to average them to obtain an optimal result. I detail the method [in this article](https://inside-machinelearning.com/en/ensemble-methods/).
# **The objective of this competition is to predict the number of sales that different stores located in Ecuador will generate.**
# To make these predictions, we’ll have to rely on the past sales of the stores.
# This type of variable is called a time series.
# > A **time series** is a sequence of data measured at regular intervals in time.
# In our case, the data was recorded every day.
# In the following I will detail the code I used. I have removed the extra information that was not helpful to the understanding to keep only the essential. You’ll see that the code is more airy than in the solution I used as a basis.
# **This tutorial is aimed at people with an intermediate level in AI, even if it can also be approached by beginners.**
# We will combine several data and use a little known but powerful Machine Learning library for our task: [darts](https://unit8co.github.io/darts/).
# Darts allows to manipulate and predict the values of a time series easily.
# Without further introduction, let’s get to work!
# # **Data**
# First of all I propose to explore the dataset.
# Our goal is to predict future sales of stores located in Ecuador, for the dates August 16, 2017 to August 31, 2017 (16 days).
# **In our dataset, there are 54 stores for 33 product families.**
# We need to predict the sales for each of these product families from each store. So `33 * 54 * 16 = 28,512` values to predict.
# Let's load the dataset with this line of code:
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# To help us make the predictions, no less than 8 CSV files are provided.
# **Let’s display them to better understand our job.**
# ## **train.csv**
# First of all the main file: *train.csv*. It contains some features and the label to predict sales, the number of sales per day:
import pandas as pd
df_train = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/train.csv")
display(df_train.head())
# Here are the columns of the DataFrame:
# - `id` – the index of the row
# - `date` – the current date
# - `store_nbr` – the store
# - `family` – the product family
# - `sales` – number of sales in this family
# - `onpromotion` – the number of products on promotion in this family
# ## **holidays_events.csv**
# The *holidays_events.csv* groups the national holidays. This information is independent of the store but can have an impact on sales.
# For example, on a holiday, there might be more people in the city and therefore more customers in the stores. Or conversely, more people might go on vacation and therefore there would be fewer customers in the stores.
df_holidays_events = pd.read_csv(
"/kaggle/input/store-sales-time-series-forecasting/holidays_events.csv"
)
display(df_holidays_events.head())
# Here are the columns in the DataFrame:
# - `date` – the date of the holiday
# - `type` – the type of holiday (`Holiday`, `Event`, `Transfer` (see `transferred` column), `Additional`, `Bridge`, `Work Day`)
# - `locale` – the scope of the event (Local, Regional, National)
# - `locale_name` – the city where the event takes place
# - `description` – name of the event
# - `transferred` – whether the event has been transferred (moved to another day) or not
# ## **oil.csv**
# Then a CSV file gathers the daily oil price from January 01, 2013 to August 31, 2017:
df_oil = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/oil.csv")
display(df_oil.head())
# ## **store.csv**
# The *store.csv* file gathers information about the stores. There is one store per line so 54 lines:
df_stores = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/stores.csv")
display(df_stores.head())
# DataFrame columns:
# - `store_nbr` – the store
# - `city` – the city where the store is located
# - `state` – the state where the store is located
# - `type` – the type of the store
# - `cluster` – the number of similar stores in the vicinity
# ## **transactions.csv**
# The *transactions.csv* file groups the daily transactions by stores:
df_transactions = pd.read_csv(
"/kaggle/input/store-sales-time-series-forecasting/transactions.csv"
)
display(df_transactions.head())
# Note: a transaction is a receipt created after a customer’s purchase
# ## **test.csv**
# Finally, we have the *test.csv* that will allow us to predict the `sale` column. The file starts on August 16, 2017 and ends on August 31, 2017. We also have the *sample_submission.csv* to fill in with the number of sales per day and per family:
df_test = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/test.csv")
df_sample_submission = pd.read_csv(
"/kaggle/input/store-sales-time-series-forecasting/sample_submission.csv"
)
display(df_test.head())
display(df_sample_submission.head())
# The *test.csv* contains 5 columns:
# - `id` – the index of the row (which will be used to fill in the *sample_submission.csv* file)
# date – the current date
# - `store_nbr` – the store
# - `family` – the product family
# - `sales` – the number of sales in this family
# - `onpromotion` – the number of products on promotion in this family
# Now that we know our dataset better, we can move on to the preprocessing step which will allow us to format our data to train our Machine Learning model.
# # **Preprocessing**
# To start the preprocessing, let's group the name of each product family and the number of each store:
family_list = df_train["family"].unique()
store_list = df_stores["store_nbr"].unique()
display(family_list)
display(store_list)
# Next, we assemble the `df_train` and `df_stores` datasets. By grouping this data in a single dataset, it will allow us to access the information more easily. In addition to that, we sort the sales of the DataFrame by date, by family and by stores:
train_merged = pd.merge(df_train, df_stores, on="store_nbr")
train_merged = train_merged.sort_values(["store_nbr", "family", "date"])
train_merged = train_merged.astype(
{
"store_nbr": "str",
"family": "str",
"city": "str",
"state": "str",
"type": "str",
"cluster": "str",
}
)
display(train_merged.head())
# A time series being the number of sales made per day for a family of a store, this sorting will allow us to extract them more easily.
# Same thing for the test DataFrame, we sort the sales by date, by family and by stores:
df_test_dropped = df_test.drop(["onpromotion"], axis=1)
df_test_sorted = df_test_dropped.sort_values(by=["store_nbr", "family"])
display(df_test_sorted.head())
# Now we are going to concretely create time series!
# ## **Main Time Series**
# As I mentioned earlier, we are going to use a specific library for time series processing in Python: [Darts](https://unit8co.github.io/darts/).
# Darts allows us to easily manipulate time series.
# I invite you to install the darts library
# Like Pandas with its DataFrame, the Darts library offers us its class allowing to manipulate time series : the `TimeSeries`.
# We will use these class to extract our time series.
# But before that, we have to discuss our strategy.
# ### **Strategy**
# > Reminder: Our objective is to predict for each family in each store the number of future sales. There are 33 families for 54 stores.
# **From there, several routes can be taken.**
# The most obvious one is to train a Machine Learning model on our dataset. On the 1782 time series.
# This is obvious because it allows us to use a maximum of data to train our model. It will then be able to generalize its knowledge to each of the product families. With such a strategy, our model will have a good global prediction.
# A less obvious strategy, but quite logical, is to train a Machine Learning model for each time series.
# Indeed, by assigning a model to each series, we ensure that each model is specialized in its task and therefore performs well in its prediction, and this for each product family of each store.
# The problem with the first method is that the model, having only a general knowledge of our data, will not have an optimal prediction on each specific time series.
# The problem with the second method is that the model will be specialized on each time series, but will lack data to perfect its training.
# **We will therefore not take any of the strategies described above.**
# Our strategy is to position ourselves between these two methods.
# After several tests and analyses of our data (which I will not detail here), we understand that the sales by families seem to be correlated across stores.
# **Hence we'll train a Machine Learning model by product family.**
# We will have 33 models, each trained on 54 time series.
# This is a good compromise, because it allows us to have a lot of data to train a model. But also to obtain, at the end of the training, a model specialized in its task (because trained on a single product family).
# Now that you know the strategy, let’s implement it!
# ### **sales**
# #### **Extract the time series**
# For each product family, we will gather all the time series concerning it.
# So we will have 33 sub-datasets. These datasets will be contained in the `family_TS_dict` dictionary.
# In the following lines of code, we extract the `TimeSeries` of the 54 stores for each family.
# These TimeSeries will group the sales by family, the date of each sale, but also the dependent covariates (indicated with `group_cols` and `static_cols`) of these sales: `store_nbr`, `family`, `city`, `state`, `type`, `cluster` :
import numpy as np
import darts
from darts import TimeSeries
family_TS_dict = {}
for family in family_list:
df_family = train_merged.loc[train_merged["family"] == family]
list_of_TS_family = TimeSeries.from_group_dataframe(
df_family,
time_col="date",
group_cols=["store_nbr", "family"],
static_cols=["city", "state", "type", "cluster"],
value_cols="sales",
fill_missing_dates=True,
freq="D",
)
for ts in list_of_TS_family:
ts = ts.astype(np.float32)
list_of_TS_family = sorted(
list_of_TS_family, key=lambda ts: int(ts.static_covariates_values()[0, 0])
)
family_TS_dict[family] = list_of_TS_family
# You can also see that we indicate `fill_missing_dates=True` because in the dataset, the sales of each December 25th are missing.
# We also indicate `freq='D'`, to indicate that the interval for the values of the time series is in days (D for day).
# Finally, we indicate that the values of the `TimeSeries` must be interpreted in `float32` and that the time series must be sorted by stores.
# We can display the first time series of the first family:
display(family_TS_dict["AUTOMOTIVE"][0])
# We retrieve all the values indicated above: the number of sales, the date of each sale in `Coordinates > date`, and the dependent covariates in `Attributes > static_covariates`.
# You can also see that the length of the time series is 1688. Originally it was 1684 but we added the values of the four December 25s that are missing from the dataset.
# Then we apply a normalization to our `TimeSeries`.
# #### **Normalizing time series**
# Normalization is a technique used to improve the performance of a Machine Learning model by facilitating its training. I let you refer to [our article on the subject](https://inside-machinelearning.com/en/normalize-your-data/) if you want to know more.
# We can easily normalize a `TimeSeries` with the `Scaler` function of darts.
# Moreover, we will further optimize the training of the model by one hot encoding our covariates. We implement the one hot encoding via the `StaticCovariatesTransformer` function.
from darts.dataprocessing import Pipeline
from darts.dataprocessing.transformers import (
Scaler,
StaticCovariatesTransformer,
MissingValuesFiller,
InvertibleMapper,
)
import sklearn
family_pipeline_dict = {}
family_TS_transformed_dict = {}
for key in family_TS_dict:
train_filler = MissingValuesFiller(verbose=False, n_jobs=-1, name="Fill NAs")
static_cov_transformer = StaticCovariatesTransformer(
verbose=False,
transformer_cat=sklearn.preprocessing.OneHotEncoder(),
name="Encoder",
)
log_transformer = InvertibleMapper(
np.log1p, np.expm1, verbose=False, n_jobs=-1, name="Log-Transform"
)
train_scaler = Scaler(verbose=False, n_jobs=-1, name="Scaling")
train_pipeline = Pipeline(
[train_filler, static_cov_transformer, log_transformer, train_scaler]
)
training_transformed = train_pipeline.fit_transform(family_TS_dict[key])
family_pipeline_dict[key] = train_pipeline
family_TS_transformed_dict[key] = training_transformed
# We can display the first transformed `TimeSeries` of the first family:
display(family_TS_transformed_dict["AUTOMOTIVE"][0])
# You can see that the sales have been normalized and that the `static_covariates` have been one hot encoded.
# We now have our main time series that will allow us to train our model.
# Why not expand our dataset with other covariates?
# ## **Covariates**
# > **A covariate** is a variable that helps to predict a target variable.
# This covariate can be dependent on the target variable. For example, the type of store, `type`, where the sales are made. But it can also be independent. For example, the price of oil on the day of the sale of a product.
# This covariate can be known in advance, for example in our dataset we have the price of oil from January 1, 2013 to August 31, 2017. In this case, we talk about a **future covariate**.
# There are also **past covariates**. These are covariates that are not known in advance. For example in our dataset, the transactions are known for the dates January 1, 2013 to August 15, 2017.
# ### **Date**
# The first covariate we are interested in is the date.
# The date is a future covariate because we know the date of the coming days.
# It has, in many cases, an impact on the traffic of a store. For example, we can expect that on Saturday there will be more customers in the store than on Monday.
# But it can also be expected that during the summer vacations the store will be less busy than in normal times.
# **Hence every little detail counts.**
# In order not to miss anything, we will extract as much information as possible from this date. Here, 7 columns :
# - `year` – year
# - `month` – month
# - `day` – day
# - `dayofyear` – day of the year (for example February 1 is the 32nd day of the year)
# - `weekday` – day of the week (there are 7 days in a week)
# - `weekofyear` – week of the year (there are 52 weeks in a year)
# - `linear_increase` – the index of the interval
from darts.utils.timeseries_generation import datetime_attribute_timeseries
full_time_period = pd.date_range(start="2013-01-01", end="2017-08-31", freq="D")
year = datetime_attribute_timeseries(time_index=full_time_period, attribute="year")
month = datetime_attribute_timeseries(time_index=full_time_period, attribute="month")
day = datetime_attribute_timeseries(time_index=full_time_period, attribute="day")
dayofyear = datetime_attribute_timeseries(
time_index=full_time_period, attribute="dayofyear"
)
weekday = datetime_attribute_timeseries(
time_index=full_time_period, attribute="dayofweek"
)
weekofyear = datetime_attribute_timeseries(
time_index=full_time_period, attribute="weekofyear"
)
timesteps = TimeSeries.from_times_and_values(
times=full_time_period,
values=np.arange(len(full_time_period)),
columns=["linear_increase"],
)
time_cov = (
year.stack(month)
.stack(day)
.stack(dayofyear)
.stack(weekday)
.stack(weekofyear)
.stack(timesteps)
)
time_cov = time_cov.astype(np.float32)
# This is what it gives us for the date at index `100`:
display(print(time_cov.components.values))
display(time_cov[100])
# And of course, we will normalize this data:
time_cov_scaler = Scaler(verbose=False, n_jobs=-1, name="Scaler")
time_cov_train, time_cov_val = time_cov.split_before(pd.Timestamp("20170816"))
time_cov_scaler.fit(time_cov_train)
time_cov_transformed = time_cov_scaler.transform(time_cov)
# You can also see that a split is made between the dates before August 15, 2017 and after (dates that will be used in the prediction).
# ### **Oil**
# As said before, the price of oil is a future covariate because it is known in advance.
# Here, we will not simply extract the daily oil price but we will calculate the moving average.
# > The **moving average in X**, is an average of the current value and the X-1 previous values of a time series.
# For example the moving average in 7 is the average of `(t + t-1 + … + t-6) / 7`. It is calculated at each `t`, that’s why it is called “moving”.
# **Calculating the moving average allows us to remove the momentary fluctuations of a value and thus to accentuate the long-term trends.**
# The moving average is used in trading, but more generally in Time Series Analysis.
# In the following code, we calculate the moving average in 7 and 28 of the oil price. And of course, we apply a normalization :
from darts.models import MovingAverage
# Oil Price
oil = TimeSeries.from_dataframe(
df_oil, time_col="date", value_cols=["dcoilwtico"], freq="D"
)
oil = oil.astype(np.float32)
# Transform
oil_filler = MissingValuesFiller(verbose=False, n_jobs=-1, name="Filler")
oil_scaler = Scaler(verbose=False, n_jobs=-1, name="Scaler")
oil_pipeline = Pipeline([oil_filler, oil_scaler])
oil_transformed = oil_pipeline.fit_transform(oil)
# Moving Averages for Oil Price
oil_moving_average_7 = MovingAverage(window=7)
oil_moving_average_28 = MovingAverage(window=28)
oil_moving_averages = []
ma_7 = oil_moving_average_7.filter(oil_transformed).astype(np.float32)
ma_7 = ma_7.with_columns_renamed(col_names=ma_7.components, col_names_new="oil_ma_7")
ma_28 = oil_moving_average_28.filter(oil_transformed).astype(np.float32)
ma_28 = ma_28.with_columns_renamed(
col_names=ma_28.components, col_names_new="oil_ma_28"
)
oil_moving_averages = ma_7.stack(ma_28)
# Here is the result obtained at index `100`:
display(oil_moving_averages[100])
# ### **Holidays**
# Let’s now focus on the holidays.
# Here, Ferdinand Berr has implemented functions to detail these holidays. In particular, he adds information about whether the holiday is Christmas day, whether it is a soccer game day, etc:
def holiday_list(df_stores):
listofseries = []
for i in range(0, len(df_stores)):
df_holiday_dummies = pd.DataFrame(columns=["date"])
df_holiday_dummies["date"] = df_holidays_events["date"]
df_holiday_dummies["national_holiday"] = np.where(
(
(df_holidays_events["type"] == "Holiday")
& (df_holidays_events["locale"] == "National")
),
1,
0,
)
df_holiday_dummies["earthquake_relief"] = np.where(
df_holidays_events["description"].str.contains("Terremoto Manabi"), 1, 0
)
df_holiday_dummies["christmas"] = np.where(
df_holidays_events["description"].str.contains("Navidad"), 1, 0
)
df_holiday_dummies["football_event"] = np.where(
df_holidays_events["description"].str.contains("futbol"), 1, 0
)
df_holiday_dummies["national_event"] = np.where(
(
(df_holidays_events["type"] == "Event")
& (df_holidays_events["locale"] == "National")
& (~df_holidays_events["description"].str.contains("Terremoto Manabi"))
& (~df_holidays_events["description"].str.contains("futbol"))
),
1,
0,
)
df_holiday_dummies["work_day"] = np.where(
(df_holidays_events["type"] == "Work Day"), 1, 0
)
df_holiday_dummies["local_holiday"] = np.where(
(
(df_holidays_events["type"] == "Holiday")
& (
(df_holidays_events["locale_name"] == df_stores["state"][i])
| (df_holidays_events["locale_name"] == df_stores["city"][i])
)
),
1,
0,
)
listofseries.append(df_holiday_dummies)
return listofseries
# Then, we have a function to remove the days equal to 0 and the duplicates:
def remove_0_and_duplicates(holiday_list):
listofseries = []
for i in range(0, len(holiday_list)):
df_holiday_per_store = list_of_holidays_per_store[i].set_index("date")
df_holiday_per_store = df_holiday_per_store.loc[
~(df_holiday_per_store == 0).all(axis=1)
]
df_holiday_per_store = (
df_holiday_per_store.groupby("date")
.agg(
{
"national_holiday": "max",
"earthquake_relief": "max",
"christmas": "max",
"football_event": "max",
"national_event": "max",
"work_day": "max",
"local_holiday": "max",
}
)
.reset_index()
)
listofseries.append(df_holiday_per_store)
return listofseries
# And finally a function that allows us to have the holidays associated to each of the 54 stores :
def holiday_TS_list_54(holiday_list):
listofseries = []
for i in range(0, 54):
holidays_TS = TimeSeries.from_dataframe(
list_of_holidays_per_store[i],
time_col="date",
fill_missing_dates=True,
fillna_value=0,
freq="D",
)
holidays_TS = holidays_TS.slice(
pd.Timestamp("20130101"), pd.Timestamp("20170831")
)
holidays_TS = holidays_TS.astype(np.float32)
listofseries.append(holidays_TS)
return listofseries
# Now we just need to apply these functions:
list_of_holidays_per_store = holiday_list(df_stores)
list_of_holidays_per_store = remove_0_and_duplicates(list_of_holidays_per_store)
list_of_holidays_store = holiday_TS_list_54(list_of_holidays_per_store)
holidays_filler = MissingValuesFiller(verbose=False, n_jobs=-1, name="Filler")
holidays_scaler = Scaler(verbose=False, n_jobs=-1, name="Scaler")
holidays_pipeline = Pipeline([holidays_filler, holidays_scaler])
holidays_transformed = holidays_pipeline.fit_transform(list_of_holidays_store)
# We get 54 `TimeSeries` with 7 columns:
# - `national_holiday`
# - `earthquake_relief`
# - `christmas`
# - `football_event`
# - `national_event`
# - `work_day`
# - `local_holiday`
# Here is the `TimeSeries` index `100` for the first store:
display(len(holidays_transformed))
display(holidays_transformed[0].components.values)
display(holidays_transformed[0][100])
# ### **Promotion**
# The last future covariate to process is the `onpromotion` column.
# It gives us the number of items on promotion in a product family.
# Here the code is similar to the one used for the `sales` column. It allows to extract for each family, the time series of the 54 stores:
df_promotion = pd.concat([df_train, df_test], axis=0)
df_promotion = df_promotion.sort_values(["store_nbr", "family", "date"])
df_promotion.tail()
family_promotion_dict = {}
for family in family_list:
df_family = df_promotion.loc[df_promotion["family"] == family]
list_of_TS_promo = TimeSeries.from_group_dataframe(
df_family,
time_col="date",
group_cols=["store_nbr", "family"],
value_cols="onpromotion",
fill_missing_dates=True,
freq="D",
)
for ts in list_of_TS_promo:
ts = ts.astype(np.float32)
family_promotion_dict[family] = list_of_TS_promo
# We can display the first `TimeSeries` of the first family :
display(family_promotion_dict["AUTOMOTIVE"][0])
# Let’s go further by calculating also the moving average in 7 and 28, like for the oil price:
from tqdm import tqdm
promotion_transformed_dict = {}
for key in tqdm(family_promotion_dict):
promo_filler = MissingValuesFiller(verbose=False, n_jobs=-1, name="Fill NAs")
promo_scaler = Scaler(verbose=False, n_jobs=-1, name="Scaling")
promo_pipeline = Pipeline([promo_filler, promo_scaler])
promotion_transformed = promo_pipeline.fit_transform(family_promotion_dict[key])
# Moving Averages for Promotion Family Dictionaries
promo_moving_average_7 = MovingAverage(window=7)
promo_moving_average_28 = MovingAverage(window=28)
promotion_covs = []
for ts in promotion_transformed:
ma_7 = promo_moving_average_7.filter(ts)
ma_7 = TimeSeries.from_series(ma_7.pd_series())
ma_7 = ma_7.astype(np.float32)
ma_7 = ma_7.with_columns_renamed(
col_names=ma_7.components, col_names_new="promotion_ma_7"
)
ma_28 = promo_moving_average_28.filter(ts)
ma_28 = TimeSeries.from_series(ma_28.pd_series())
ma_28 = ma_28.astype(np.float32)
ma_28 = ma_28.with_columns_renamed(
col_names=ma_28.components, col_names_new="promotion_ma_28"
)
promo_and_mas = ts.stack(ma_7).stack(ma_28)
promotion_covs.append(promo_and_mas)
promotion_transformed_dict[key] = promotion_covs
# We obtain a normalized time series with 3 columns.
# We can display the index `1` of the first `TimeSeries` of the first family:
display(promotion_transformed_dict["AUTOMOTIVE"][0].components.values)
display(promotion_transformed_dict["AUTOMOTIVE"][0][1])
# ### **Grouping the covariates**
# To finish with the future covariates, we are going to gather them in the same `TimeSeries`.
# We start with the time series of the dates, the oil price and the moving averages of the oil price that we group in the variable `general_covariates` :
general_covariates = time_cov_transformed.stack(oil_transformed).stack(
oil_moving_averages
)
# Then for each store, we gather the `TimeSeries` of the holidays with the `general_covariates` :
store_covariates_future = []
for store in range(0, len(store_list)):
stacked_covariates = holidays_transformed[store].stack(general_covariates)
store_covariates_future.append(stacked_covariates)
# Finally, for each family, we combine the previously created covariates with the promotion covariates:
future_covariates_dict = {}
for key in tqdm(promotion_transformed_dict):
promotion_family = promotion_transformed_dict[key]
covariates_future = [
promotion_family[i].stack(store_covariates_future[i])
for i in range(0, len(promotion_family))
]
future_covariates_dict[key] = covariates_future
# Here are the different columns obtained for each `TimeSeries` of each family of each store:
display(future_covariates_dict["AUTOMOTIVE"][0].components)
# ## **Transactions – Past Covariates**
# Before launching the training of the model, let’s extract the past covariates: the transactions.
# As you might already have understand, after having taken the transactions for each store, we will normalize them:
df_transactions.sort_values(["store_nbr", "date"], inplace=True)
TS_transactions_list = TimeSeries.from_group_dataframe(
df_transactions,
time_col="date",
group_cols=["store_nbr"],
value_cols="transactions",
fill_missing_dates=True,
freq="D",
)
transactions_list = []
for ts in TS_transactions_list:
series = TimeSeries.from_series(ts.pd_series())
series = series.astype(np.float32)
transactions_list.append(series)
transactions_list[24] = transactions_list[24].slice(
start_ts=pd.Timestamp("20130102"), end_ts=pd.Timestamp("20170815")
)
from datetime import datetime, timedelta
transactions_list_full = []
for ts in transactions_list:
if ts.start_time() > pd.Timestamp("20130101"):
end_time = ts.start_time() - timedelta(days=1)
delta = end_time - pd.Timestamp("20130101")
zero_series = TimeSeries.from_times_and_values(
times=pd.date_range(start=pd.Timestamp("20130101"), end=end_time, freq="D"),
values=np.zeros(delta.days + 1),
)
ts = zero_series.append(ts)
ts = ts.with_columns_renamed(
col_names=ts.components, col_names_new="transactions"
)
transactions_list_full.append(ts)
transactions_filler = MissingValuesFiller(verbose=False, n_jobs=-1, name="Filler")
transactions_scaler = Scaler(verbose=False, n_jobs=-1, name="Scaler")
transactions_pipeline = Pipeline([transactions_filler, transactions_scaler])
transactions_transformed = transactions_pipeline.fit_transform(transactions_list_full)
# Here is the `TimeSeries` for the first store:
display(transactions_transformed[0])
# We are finally ready to create our Machine Learning model.
# # **Machine Learning Model**
# Now, we will train a first Machine Learning model with the darts library to confirm that our data is consistent and that the predictions obtained are convincing.
# Then we will use ensemble methods to improve our final result.
# ## **Single model**
# The Darts library offers us various Machine Learning models to use on `TimeSeries`.
# In Ferdinand Berr’s solution, we can see that he uses different models:
# - `NHiTSModel` – score : 0.43265
# - `RNNModel` (with LSTM layers) – score : 0.55443
# - `TFTModel` – score : 0.43226
# - `ExponentialSmoothing` – score : 0.37411
# These scores are obtained on validation data, artificially generated from the training data.
# Personally, I decided to use the LightGBMModel, an implementation of the eponymous library model on which you will find [an article here](https://inside-machinelearning.com/en/lightgbm-guide/).
# Why use this model ? Not after hours of practice and experimentation, but simply by using it and seeing that, alone, it gives me better results than the `ExponentialSmoothing`.
# As explained in the Strategy section, we will train a Machine Learning model for each product family.
# So for each family, we have to take the corresponding `TimeSeries` and send them to our Machine Learning model.
# First, we prepare the data:
# - `TCN_covariates` represents the future covariates associated with the target product family
# - `train_sliced` represents the number of sales associated with the target product family. The `slice_intersect` function that you can see used simply ensures that the components span the same time interval. In the case of different time intervals an error message will appear if we try to combine them.
# - `transactions_transformed`, the past covariates do not need to be indexed on the target family because there is only one global `TimeSeries` per store
# Next, we initialize hyperparameters for our model.
# **This is the key to model results.**
# By modifying these hyperparameters you can improve the performance of the Machine Learning model.
# ### **Training**
# Here are the important hyperparameters:
# - `lags` – the number of past values on which we base our predictions
# - `lags_future_covariates` – the number of future covariate values on which we base our predictions. If we give a tuple, the left value represents the number of covariates in the past and the right value represents the number of covariates in the future
# - `lags_past_covariates` – the number of past covariate values on which we base our predictions
# For these three hyperparameters, if a list is passed, we take the indexes associated with the numbers of this list. For example if we pass: `[-3, -4, -5]`, we take the indexes `t-3`, `t-4`, `t-5`. But if we pass an integer for example 10, we take the 10 previous values (or the 10 future values depending on the case).
# The hyperparameters `output_chunk_length` controls the number of predicted values in the future, `random_state` ensures the reproducibility of the results and `gpu_use_dp` indicates if we want to use a GPU.
# After that we launch the training. And at the end, we save the trained model in a dictionary.
from darts.models import LightGBMModel
LGBM_Models_Submission = {}
display("Training...")
for family in tqdm(family_list):
sales_family = family_TS_transformed_dict[family]
training_data = [ts for ts in sales_family]
TCN_covariates = future_covariates_dict[family]
train_sliced = [
training_data[i].slice_intersect(TCN_covariates[i])
for i in range(0, len(training_data))
]
LGBM_Model_Submission = LightGBMModel(
lags=63,
lags_future_covariates=(14, 1),
lags_past_covariates=[-16, -17, -18, -19, -20, -21, -22],
output_chunk_length=1,
random_state=2022,
gpu_use_dp="false",
)
LGBM_Model_Submission.fit(
series=train_sliced,
future_covariates=TCN_covariates,
past_covariates=transactions_transformed,
)
LGBM_Models_Submission[family] = LGBM_Model_Submission
# In the above code, we only use `lags_past_covariates = [-16,-17,-18,-19,-20,-21,-22]`, why? Because during the 16th prediction (the one of August 31, 2017), the values of the past covariates from -1 to -15 are not known.
# After training, we obtain 33 Machine Learning models stored in `LGBM_Models_Submission`.
# ### **Predict**
# We can now perform the predictions:
display("Predictions...")
LGBM_Forecasts_Families_Submission = {}
for family in tqdm(family_list):
sales_family = family_TS_transformed_dict[family]
training_data = [ts for ts in sales_family]
LGBM_covariates = future_covariates_dict[family]
train_sliced = [
training_data[i].slice_intersect(TCN_covariates[i])
for i in range(0, len(training_data))
]
forecast_LGBM = LGBM_Models_Submission[family].predict(
n=16,
series=train_sliced,
future_covariates=LGBM_covariates,
past_covariates=transactions_transformed,
)
LGBM_Forecasts_Families_Submission[family] = forecast_LGBM
# Note: even if the model has an `output_chunk_length` of 1, we can directly instruct it to predict 16 values in the future.
# We now have our predictions. If you follow well, you know the next step.
# Previously, we normalized our data with the `Scaler` function. So the predicted data are also normalized.
# To de-normalize them we use the `inverse_transform` function on each `TimeSeries`:
LGBM_Forecasts_Families_back_Submission = {}
for family in tqdm(family_list):
LGBM_Forecasts_Families_back_Submission[family] = family_pipeline_dict[
family
].inverse_transform(LGBM_Forecasts_Families_Submission[family], partial=True)
# Finally, here is the code that allows to go from the predicted time series cluster to the prediction DataFrame:
for family in tqdm(LGBM_Forecasts_Families_back_Submission):
for n in range(0, len(LGBM_Forecasts_Families_back_Submission[family])):
if (family_TS_dict[family][n].univariate_values()[-21:] == 0).all():
LGBM_Forecasts_Families_back_Submission[family][
n
] = LGBM_Forecasts_Families_back_Submission[family][n].map(lambda x: x * 0)
listofseries = []
for store in tqdm(range(0, 54)):
for family in family_list:
oneforecast = LGBM_Forecasts_Families_back_Submission[family][
store
].pd_dataframe()
oneforecast.columns = ["fcast"]
listofseries.append(oneforecast)
df_forecasts = pd.concat(listofseries)
df_forecasts.reset_index(drop=True, inplace=True)
# No Negative Forecasts
df_forecasts[df_forecasts < 0] = 0
forecasts_kaggle = pd.concat(
[df_test_sorted, df_forecasts.set_index(df_test_sorted.index)], axis=1
)
forecasts_kaggle_sorted = forecasts_kaggle.sort_values(by=["id"])
forecasts_kaggle_sorted = forecasts_kaggle_sorted.drop(
["date", "store_nbr", "family"], axis=1
)
forecasts_kaggle_sorted = forecasts_kaggle_sorted.rename(columns={"fcast": "sales"})
forecasts_kaggle_sorted = forecasts_kaggle_sorted.reset_index(drop=True)
# Submission
submission_kaggle = forecasts_kaggle_sorted
# We can display the predictions:
submission_kaggle.head()
# **But it’s not over yet!** ☝🏻
# Now we need to train several models and apply the Ensemble method.
# ## **Multiple models**
# As explained before, the important thing in this code is the hyperparameters. We will train 3 models by taking the following hyperparameters:
model_params = [
{
"lags": 7,
"lags_future_covariates": (16, 1),
"lags_past_covariates": [-16, -17, -18, -19, -20, -21, -22],
},
{
"lags": 365,
"lags_future_covariates": (14, 1),
"lags_past_covariates": [-16, -17, -18, -19, -20, -21, -22],
},
{
"lags": 730,
"lags_future_covariates": (14, 1),
"lags_past_covariates": [-16, -17, -18, -19, -20, -21, -22],
},
]
# For each of these parameters, we will train 33 models, run the predictions and fill the final DataFrame. The 3 DataFrames obtained will be stored in the `submission_kaggle_list` :
from sklearn.metrics import mean_squared_log_error as msle, mean_squared_error as mse
from lightgbm import early_stopping
submission_kaggle_list = []
for params in model_params:
LGBM_Models_Submission = {}
display("Training...")
for family in tqdm(family_list):
# Define Data for family
sales_family = family_TS_transformed_dict[family]
training_data = [ts for ts in sales_family]
TCN_covariates = future_covariates_dict[family]
train_sliced = [
training_data[i].slice_intersect(TCN_covariates[i])
for i in range(0, len(training_data))
]
LGBM_Model_Submission = LightGBMModel(
lags=params["lags"],
lags_future_covariates=params["lags_future_covariates"],
lags_past_covariates=params["lags_past_covariates"],
output_chunk_length=1,
random_state=2022,
gpu_use_dp="false",
)
LGBM_Model_Submission.fit(
series=train_sliced,
future_covariates=TCN_covariates,
past_covariates=transactions_transformed,
)
LGBM_Models_Submission[family] = LGBM_Model_Submission
display("Predictions...")
LGBM_Forecasts_Families_Submission = {}
for family in tqdm(family_list):
sales_family = family_TS_transformed_dict[family]
training_data = [ts for ts in sales_family]
LGBM_covariates = future_covariates_dict[family]
train_sliced = [
training_data[i].slice_intersect(TCN_covariates[i])
for i in range(0, len(training_data))
]
forecast_LGBM = LGBM_Models_Submission[family].predict(
n=16,
series=train_sliced,
future_covariates=LGBM_covariates,
past_covariates=transactions_transformed,
)
LGBM_Forecasts_Families_Submission[family] = forecast_LGBM
# Transform Back
LGBM_Forecasts_Families_back_Submission = {}
for family in tqdm(family_list):
LGBM_Forecasts_Families_back_Submission[family] = family_pipeline_dict[
family
].inverse_transform(LGBM_Forecasts_Families_Submission[family], partial=True)
# Prepare Submission in Correct Format
for family in tqdm(LGBM_Forecasts_Families_back_Submission):
for n in range(0, len(LGBM_Forecasts_Families_back_Submission[family])):
if (family_TS_dict[family][n].univariate_values()[-21:] == 0).all():
LGBM_Forecasts_Families_back_Submission[family][
n
] = LGBM_Forecasts_Families_back_Submission[family][n].map(
lambda x: x * 0
)
listofseries = []
for store in tqdm(range(0, 54)):
for family in family_list:
oneforecast = LGBM_Forecasts_Families_back_Submission[family][
store
].pd_dataframe()
oneforecast.columns = ["fcast"]
listofseries.append(oneforecast)
df_forecasts = pd.concat(listofseries)
df_forecasts.reset_index(drop=True, inplace=True)
# No Negative Forecasts
df_forecasts[df_forecasts < 0] = 0
forecasts_kaggle = pd.concat(
[df_test_sorted, df_forecasts.set_index(df_test_sorted.index)], axis=1
)
forecasts_kaggle_sorted = forecasts_kaggle.sort_values(by=["id"])
forecasts_kaggle_sorted = forecasts_kaggle_sorted.drop(
["date", "store_nbr", "family"], axis=1
)
forecasts_kaggle_sorted = forecasts_kaggle_sorted.rename(columns={"fcast": "sales"})
forecasts_kaggle_sorted = forecasts_kaggle_sorted.reset_index(drop=True)
# Submission
submission_kaggle_list.append(forecasts_kaggle_sorted)
# We end up with four prediction DataFrames that we will sum and average (this is the so-called ensemble method):
df_sample_submission["sales"] = (
submission_kaggle[["sales"]]
+ submission_kaggle_list[0][["sales"]]
+ submission_kaggle_list[1][["sales"]]
+ submission_kaggle_list[2][["sales"]]
) / 4
# Here is the result:
df_sample_submission.head()
# You can now save the predictions in a CSV file and submit it to Kaggle:
df_sample_submission.to_csv("/kaggle/working/submission.csv", index=False)
|
import pandas as pd
import numpy as np
# from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
# from sklearn.model_selection import GridSearchCV
# for random forest classifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_squared_error, accuracy_score
d = pd.read_csv("/kaggle/input/titanic/train.csv")
df = pd.read_csv("/kaggle/input/titanic/test.csv")
df1 = df.drop(["Name", "Ticket", "Cabin"], axis=1)
df1["Sex"] = df1["Sex"].astype("category")
df1["Sex"] = df1["Sex"].cat.codes
df1["Embarked"] = df1["Embarked"].astype("category")
df1["Embarked"] = df1["Embarked"].cat.codes
mean_age = df1["Age"].mean()
# Fill the null values with the mean age
df1["Age"].fillna(mean_age, inplace=True)
mea = df1["Fare"].mean()
# Fill the null values with the mean age
df1["Fare"].fillna(mea, inplace=True)
print(df1.isnull().sum())
mean_age1 = d["Age"].mean()
# Fill the null values with the mean age
d["Age"].fillna(mean_age1, inplace=True)
print(d.isnull().sum())
mea1 = d["Fare"].mean()
# Fill the null values with the mean age
d["Fare"].fillna(mea1, inplace=True)
d["Sex"] = d["Sex"].astype("category")
d["Sex"] = d["Sex"].cat.codes
d["Embarked"] = d["Embarked"].astype("category")
d["Embarked"] = d["Embarked"].cat.codes
print(d)
x = d.drop(["Survived", "Name", "Ticket", "Cabin"], axis=1)
y = d["Survived"]
print(y)
print(d.isnull().sum())
xtrain, xval, ytrain, yval = train_test_split(x, y, test_size=0.2, random_state=42)
"""x1=d.drop(['Survived','Name','Ticket','Cabin'],axis=1)
y1 = d['Survived']"""
"""xval1,xtest,yval1,ytest= train_test_split(x1,y1,test_size=.99)
print(xtest)"""
# print(df1.isnull().sum())
# ###### ss = pd.read_csv('/kaggle/working/submission.csv')
# print(ss)
# print(df1.head())
# print(submission.head())
# print(submission.shape)
# print(df1.shape)
"""parameter={
'criterion':['gini','entropy','log_loss'],
'splitter':['best','random'],
'max_depth':[1,2,3,4,5],
'max_features':['auto', 'sqrt', 'log2'],
'min_samples_split':[1,2,3,4,1.4,2.3,2.6,3.2,4.2]
}"""
"""treemodel=DecisionTreeClassifier()
cv=GridSearchCV(treemodel,param_grid=parameter,cv=5,scoring='accuracy')"""
# cv.fit(xtrain,ytrain)
# cv.best_params_
# predicted=cv.predict(df1)
# print(predicted)
m = RandomForestClassifier()
m.fit(xtrain, ytrain)
m.criterion = "log_loss"
predicted = m.predict(df1)
# Save the predictions to a CSV file
submission = pd.DataFrame({"PassengerId": df1["PassengerId"], "Survived": predicted})
# Save the predictions to a CSV file
submission.to_csv("submission.csv", index=False)
print(submission.shape[0])
|
import numpy as np # linear algebra
np.random.seed(1337)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing Libraries
import tensorflow as tf
import matplotlib.pyplot as plt
import PIL
import pathlib
import cv2
from tensorflow.keras import layers
from sklearn.model_selection import KFold, StratifiedKFold
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import glob
import csv
print(tf.__version__)
# # Preparing dataset
# Link to the [Dataset being used](https://www.kaggle.com/sartajbhuvaji/brain-tumor-classification-mri)
# Reference Tutorial for general code: [Image Classification Tutorial](https://www.tensorflow.org/tutorials/images/classification)
# path_data = '../input/brain-mri-images-for-brain-tumor-detection/brain_tumor_dataset'
path_test = "../input/brain-tumor-classification-mri/Testing"
path_data = "../input/brain-tumor-classification-mri/Training"
path_test = pathlib.Path(path_test)
path_data = pathlib.Path(path_data)
print(path_data)
image_count = len(list(path_data.glob("*/*.jpg")))
print(image_count)
# #### Image of a brain with tumor
tumors = list(path_data.glob("glioma_tumor/*"))
print(tumors[1])
img1 = PIL.Image.open(str(tumors[0]))
img1
# #### Image of a brain with no tumor
not_tumors = list(path_data.glob("no_tumor/*"))
img2 = PIL.Image.open(str(not_tumors[0]))
img2
img_opencv = cv2.imread(str(not_tumors[0]))
print(img_opencv.shape)
img_opencv1 = cv2.imread(str(tumors[0]))
print(img_opencv1.shape)
# ### Creating Testing Validation and Testing Sets
batch = 32
img_height = 180
img_width = 180
train = tf.keras.preprocessing.image_dataset_from_directory(
path_data,
validation_split=0.2,
subset="training",
seed=42,
image_size=(img_height, img_width),
batch_size=batch,
)
val = tf.keras.preprocessing.image_dataset_from_directory(
path_data,
validation_split=0.2,
subset="validation",
seed=42,
image_size=(img_height, img_width),
batch_size=batch,
)
test = tf.keras.preprocessing.image_dataset_from_directory(
path_test, seed=42, image_size=(img_height, img_width), batch_size=batch
)
print(train.class_names)
print(val.class_names)
print(test.class_names)
classes = train.class_names
plt.figure(figsize=(10, 10))
for img, label in train.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(img[i].numpy().astype("uint8"))
plt.title(classes[label[i]], fontdict={"fontsize": "19", "color": "white"})
for image_batch, labels_batch in train:
print(image_batch.shape)
print(labels_batch.shape)
break
AUTOTUNE = tf.data.AUTOTUNE
train = train.prefetch(buffer_size=AUTOTUNE)
val = val.prefetch(buffer_size=AUTOTUNE)
test = test.prefetch(buffer_size=AUTOTUNE)
# # Helper Functions
#
def prediction_label_comparison(model, test):
# Retrieve a batch of images from the test set
image_batch, label_batch = test.as_numpy_iterator().next()
predictions = model.predict_on_batch(image_batch).flatten()
# Apply a sigmoid since our model returns logits
predictions = tf.nn.sigmoid(predictions).numpy()
n = 0
predict = []
while n <= (predictions.shape[0] - 4):
pred = np.argmax(
predictions[n : n + 4]
) # Returns the index of the largest element in the selected subarray
n += 4
predict.append(pred)
predict = np.array(predict)
# print('Predictions:\n',predictions)#.numpy())
print("Labels:\n", label_batch)
print("Predictions:\n", predict)
"""
print(predictions.shape)
print(label_batch.shape)
print(predict.shape)
"""
plt.figure(figsize=(10, 10))
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image_batch[i].astype("uint8"))
plt.title(classes[predict[i]], fontdict={"fontsize": "14", "color": "white"})
plt.axis("off")
def test_tumor(list_test_path, model):
# sunflower_url = 'https://'
# sunflower_path = tf.keras.utils.get_file('name of file', origin=sunflower_url)
for path_name in list_test_path:
test_img_path = path_name
test_image = tf.keras.preprocessing.image.load_img(
test_img_path, target_size=(img_height, img_width)
)
test_array = tf.keras.preprocessing.image.img_to_array(test_image)
test_array = tf.expand_dims(test_array, 0) # Create a batch
predictions = model.predict(test_array)
score = tf.nn.softmax(predictions[0])
print(
"This image most likely belongs to {} with a {:.2f} percent confidence.".format(
classes[np.argmax(score)], 100 * np.max(score)
)
)
def csv_builder(path_data, label_names):
df = pd.DataFrame(columns=["images", "labels"])
for name in label_names:
BASE_DIR = str(path_data) + "/"
# train_folder_glioma = BASE_DIR+'glioma_tumor/'
train_folder_name = BASE_DIR + name + "/"
# train_annotation = BASE_DIR+'annotated_train_data/'
files_in_train = sorted(os.listdir(train_folder_name))
# files_in_annotated = sorted(os.listdir(train_annotation))
image_names = [i for i in files_in_train]
for x in image_names:
# df = df.append({'images':train_folder_name+str(x),'labels':name},ignore_index=True)
df = df.append({"images": str(x), "labels": name}, ignore_index=True)
# df['images']=[train_folder_glioma+str(x) for x in image_names]
# df['labels']=[train_annotation+str(x) for x in images]
# pd.to_csv('files_path.csv', header=None)
return df
normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1.0 / 255)
conv_layer_32 = tf.keras.layers.Conv2D(32, (3, 3), activation="relu")
conv_layer_64 = tf.keras.layers.Conv2D(64, 3, activation="relu")
conv_layer_16 = tf.keras.layers.Conv2D(16, 3, activation="relu")
max_pool = tf.keras.layers.MaxPooling2D()
callback = tf.keras.callbacks.EarlyStopping(monitor="val_accuracy", patience=5)
data_augmentation = tf.keras.Sequential(
[
normalization_layer,
tf.keras.layers.experimental.preprocessing.RandomFlip(
"horizontal_and_vertical"
),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.1),
tf.keras.layers.experimental.preprocessing.RandomZoom(0.1),
tf.keras.layers.experimental.preprocessing.RandomContrast(0.1),
# tf.keras.layers.experimental.preprocessing.RandomCrop(170,170)
]
)
# ### Checking effects of the data augmentation
# IMG_SIZE = 180
# resize_and_rescale = tf.keras.Sequential([
# tf.keras.layers.experimental.preprocessing.Resizing(IMG_SIZE, IMG_SIZE),
# tf.keras.layers.experimental.preprocessing.Rescaling(1./255)
# ])
# result = resize_and_rescale(img_opencv)
# _ = plt.imshow(result)
plt.figure(figsize=(10, 10))
img_array = tf.keras.preprocessing.image.img_to_array(img_opencv)
img_array = tf.expand_dims(img_array, 0)
for i in range(9):
augmented_image = data_augmentation(img_array)
ax = plt.subplot(3, 3, i + 1)
plt.imshow(augmented_image[0])
plt.axis("off")
# # Model Building
# ### References for model:
# Model 2: Obtained from a [Kaggle notebook](https://www.kaggle.com/chityeaung/brain-tumor-classification) by [chityeaung](https://www.kaggle.com/chityeaung)
# Model 1 and 3 taken from Tensorflow tutorials:
# [Image Classification Tutorial](https://www.tensorflow.org/tutorials/images/classification)
# [Transfer Learning Tutorial](https://www.tensorflow.org/tutorials/images/transfer_learning)
num_classes = 4
model = tf.keras.Sequential(
[
normalization_layer,
conv_layer_32,
max_pool,
conv_layer_32,
max_pool,
conv_layer_32,
max_pool,
layers.Flatten(),
layers.Dense(128, activation="relu"),
layers.Dense(num_classes),
]
)
model.compile(
optimizer="adam",
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
history = model.fit(
train, validation_data=val, epochs=3, callbacks=callback, shuffle=False
)
eff_epochs = len(history.history["loss"])
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
epochs = 10
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs_range = range(eff_epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label="Training Accuracy")
plt.plot(epochs_range, val_acc, label="Validation Accuracy")
plt.legend(loc="lower right")
plt.title(
"Training and Validation Accuracy", fontdict={"fontsize": "14", "color": "white"}
)
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label="Training Loss")
plt.plot(epochs_range, val_loss, label="Validation Loss")
plt.legend(loc="upper right")
plt.title("Training and Validation Loss", fontdict={"fontsize": "14", "color": "white"})
plt.show()
model.summary()
results = model.evaluate(test)
print("test loss, test acc:", results)
list_of_paths = [
"../input/brain-tumor-classification-mri/Testing/pituitary_tumor/image(20).jpg",
"../input/brain-tumor-classification-mri/Testing/no_tumor/image(11).jpg",
"../input/brain-tumor-classification-mri/Testing/meningioma_tumor/image(120).jpg",
"../input/brain-tumor-classification-mri/Testing/glioma_tumor/image(16).jpg",
"../input/brain-mri-images-for-brain-tumor-detection/yes/Y100.JPG",
]
test_tumor(list_of_paths, model)
# # Second Model
# Adding a data augmentation layer to add more images to the training data by simply modifying the existing images in ways such as flipping them or making similar random transformations to the training data.
num_classes = 4
model2 = tf.keras.Sequential(
[
# data_augmentation,
normalization_layer,
# tf.keras.layers.Conv2D(32,3,activation='relu'),
conv_layer_32,
layers.MaxPooling2D(pool_size=(2, 2)),
conv_layer_32,
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dense(32, activation="relu"),
layers.Dropout(0.25),
layers.Dense(num_classes, activation="softmax"),
]
)
model2.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
epochs = 50
history = model2.fit(
train, validation_data=val, epochs=epochs, callbacks=callback, shuffle=False
)
model2.summary()
eff_epochs = len(history.history["loss"])
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
epochs = 10
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs_range = range(eff_epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label="Training Accuracy")
plt.plot(epochs_range, val_acc, label="Validation Accuracy")
plt.legend(loc="lower right")
plt.title(
"Training and Validation Accuracy", fontdict={"fontsize": "14", "color": "white"}
)
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label="Training Loss")
plt.plot(epochs_range, val_loss, label="Validation Loss")
plt.legend(loc="upper right")
plt.title("Training and Validation Loss", fontdict={"fontsize": "14", "color": "white"})
plt.show()
results = model2.evaluate(test)
print("test loss, test acc:", results)
list_of_paths = [
"../input/brain-tumor-classification-mri/Testing/pituitary_tumor/image(20).jpg",
"../input/brain-tumor-classification-mri/Testing/no_tumor/image(11).jpg",
"../input/brain-tumor-classification-mri/Testing/meningioma_tumor/image(120).jpg",
"../input/brain-tumor-classification-mri/Testing/glioma_tumor/image(16).jpg",
"../input/brain-mri-images-for-brain-tumor-detection/yes/Y100.JPG",
]
test_tumor(list_of_paths, model2)
# ## Third Model
# ### Using a pretrained model: MobileNetV2
# Create the base model from the pre-trained model MobileNet V2
image_size = (img_width, img_height)
IMG_SHAPE = image_size + (3,)
base_model = tf.keras.applications.MobileNetV2(
input_shape=IMG_SHAPE, include_top=False, weights="imagenet"
)
base_model.trainable = False
image_batch, label_batch = next(iter(train))
feature_batch = base_model(image_batch)
print(feature_batch.shape)
base_model.summary()
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
print(feature_batch_average.shape)
prediction_layer = tf.keras.layers.Dense(4)
prediction_batch = prediction_layer(feature_batch_average)
print(prediction_batch.shape)
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
help(preprocess_input)
inputs = tf.keras.Input(shape=(180, 180, 3))
# x = data_augmentation(inputs)
x = preprocess_input(inputs)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1280, activation="relu")(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
base_learning_rate = 0.0001
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
len(model.trainable_variables)
initial_epochs = 10
loss0, accuracy0 = model.evaluate(val)
print("initial loss: {:.2f}".format(loss0))
print("initial accuracy: {:.2f}".format(accuracy0))
history_base = model.fit(
train, epochs=initial_epochs, validation_data=val, shuffle=False
)
model.summary()
acc = history_base.history["accuracy"]
val_acc = history_base.history["val_accuracy"]
loss = history_base.history["loss"]
val_loss = history_base.history["val_loss"]
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label="Training Accuracy")
plt.plot(val_acc, label="Validation Accuracy")
plt.legend(loc="lower right")
plt.ylabel("Accuracy")
plt.ylim([min(plt.ylim()), 1])
plt.title(
"Training and Validation Accuracy", fontdict={"fontsize": "14", "color": "white"}
)
plt.subplot(2, 1, 2)
plt.plot(loss, label="Training Loss")
plt.plot(val_loss, label="Validation Loss")
plt.legend(loc="upper right")
plt.ylabel("Cross Entropy")
plt.ylim([0, 1.0])
plt.title("Training and Validation Loss", fontdict={"fontsize": "14", "color": "white"})
plt.xlabel("epoch")
plt.show()
list_of_paths = [
"../input/brain-tumor-classification-mri/Testing/pituitary_tumor/image(20).jpg",
"../input/brain-tumor-classification-mri/Testing/no_tumor/image(11).jpg",
"../input/brain-tumor-classification-mri/Testing/meningioma_tumor/image(120).jpg",
"../input/brain-tumor-classification-mri/Testing/glioma_tumor/image(16).jpg",
"../input/brain-mri-images-for-brain-tumor-detection/yes/Y100.JPG",
"../input/brain-mri-images-for-brain-tumor-detection/no/14 no.jpg",
]
test_tumor(list_of_paths, model)
result = model.evaluate(test)
print(result)
# ## Fine Tuning the model
base_model.trainable = True
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
# Fine-tune from this layer onwards
fine_tune_at = 100
# Attempting to fine tune more layers
more_layer = 50
# Freeze all the layers before the `fine_tune_at` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
# Changing the number of trainable layers doesn't affect the test accuracy too much.
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate / 10),
metrics=["accuracy"],
)
model.summary()
len(model.trainable_variables)
fine_tune_epochs = 10
total_epochs = initial_epochs + fine_tune_epochs
history_fine = model.fit(
train,
epochs=total_epochs,
initial_epoch=history_base.epoch[-1],
validation_data=val,
)
acc += history_fine.history["accuracy"]
val_acc += history_fine.history["val_accuracy"]
loss += history_fine.history["loss"]
val_loss += history_fine.history["val_loss"]
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label="Training Accuracy")
plt.plot(val_acc, label="Validation Accuracy")
plt.ylim([0.8, 1])
plt.plot(
[initial_epochs - 1, initial_epochs - 1], plt.ylim(), label="Start Fine Tuning"
)
plt.legend(loc="lower right")
plt.title(
"Training and Validation Accuracy", fontdict={"fontsize": "14", "color": "white"}
)
plt.subplot(2, 1, 2)
plt.plot(loss, label="Training Loss")
plt.plot(val_loss, label="Validation Loss")
plt.ylim([0, 1.0])
plt.plot(
[initial_epochs - 1, initial_epochs - 1], plt.ylim(), label="Start Fine Tuning"
)
plt.legend(loc="upper right")
plt.title("Training and Validation Loss", fontdict={"fontsize": "14", "color": "white"})
plt.xlabel("epoch")
plt.show()
loss, accuracy = model.evaluate(test)
print("Test accuracy :", accuracy)
list_of_paths = [
"../input/brain-tumor-classification-mri/Testing/pituitary_tumor/image(20).jpg",
"../input/brain-tumor-classification-mri/Testing/no_tumor/image(11).jpg",
"../input/brain-tumor-classification-mri/Testing/meningioma_tumor/image(120).jpg",
"../input/brain-tumor-classification-mri/Testing/glioma_tumor/image(16).jpg",
"../input/brain-mri-images-for-brain-tumor-detection/yes/Y100.JPG",
]
test_tumor(list_of_paths, model)
prediction_label_comparison(model, test)
# ## Using k-fold cross validation
# #### Reference article for the k-fold cross validation: [Link](https://medium.com/the-owl/k-fold-cross-validation-in-keras-3ec4a3a00538)
# ## Building a .csv file for the images along with their labels: [Reference](https://datascience.stackexchange.com/questions/49094/how-to-transform-a-folder-of-images-into-csv-file)
path_data
label_names = os.listdir(path_data)
label_names
# Function csv_builder() has been defined in the [helper functions](#helper)
final_csv = csv_builder(path_data, label_names)
final_csv
final_csv
final_csv.to_csv("files_path.csv", header=None)
path_of_csv = "./files_path.csv"
# ### Now this csv file can be used to create a kfold split in the training data
n_splits = 5
Y = final_csv[["labels"]]
kf = KFold(n_splits=5)
skf = StratifiedKFold(n_splits=5, random_state=7, shuffle=True)
n = len(Y)
print(n)
idg = ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.3,
fill_mode="nearest",
horizontal_flip=True,
rescale=1.0 / 255,
)
# for train_index, val_index in kf.split(np.zeros(n),Y):
# training_data = final_csv.iloc[train_index]
# validation_data = final_csv.iloc[val_index]
# train_data_generator = idg.flow_from_dataframe(training_data, directory = path_data,
# x_col = "images", y_col = "labels",
# class_mode = "categorical", shuffle = True,subset='training')
# valid_data_generator = idg.flow_from_dataframe(validation_data, directory = path_data,
# x_col = "images", y_col = "labels",
# class_mode = "categorical", shuffle = True,subset='validation')
help(idg)
help(idg.flow_from_dataframe)
# # CREATE NEW MODEL
# model = model
# # COMPILE NEW MODEL
# model.compile(loss='categorical_crossentropy',
# optimizer=opt,
# metrics=['accuracy'])
# # There can be other callbacks, but just showing one because it involves the model name
# # This saves the best model
# # FIT THE MODEL
# history = model.fit(train_data_generator,
# epochs=num_epochs,
# callbacks=callbacks_list,
# validation_data=valid_data_generator)
# #PLOT HISTORY
# # :
# # :
# # LOAD BEST MODEL to evaluate the performance of the model
# results = model.evaluate(valid_data_generator)
# results = dict(zip(model.metrics_names,results))
# VALIDATION_ACCURACY.append(results['accuracy'])
# VALIDATION_LOSS.append(results['loss'])
# tf.keras.backend.clear_session()
# fold_var += 1
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
df_train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
df_train.head()
df_train.describe()
df_train.info()
df_train["SalePrice"].describe()
sns.distplot(df_train["SalePrice"])
f, ax = plt.subplots(figsize=(15, 12))
sns.heatmap(df_train.corr(), vmax=0.8, square=True)
k = 10
cols = df_train.corr().nlargest(k, "SalePrice")["SalePrice"].index
cm = np.corrcoef(df_train[cols].values.T)
# sns.set(font.scale=1.25)
sns.heatmap(
cm,
cbar=True,
annot=True,
square=True,
fmt=".2f",
annot_kws={"size": 10},
yticklabels=cols.values,
xticklabels=cols.values,
)
|
# # Machine Learning Series - Lecture 2
# ***
# ## Inconsistent Data Entry
# ### Get our environment set up
# The first thing we'll need to do is load in the libraries and dataset we'll be using.
# modules we'll use
import pandas as pd
import numpy as np
# helpful modules
import fuzzywuzzy
from fuzzywuzzy import process
import chardet
# read in all our data
professors = pd.read_csv(
"../input/pakistan-intellectual-capital/pakistan_intellectual_capital.csv"
)
# set seed for reproducibility
np.random.seed(0)
# # Do some preliminary text pre-processing
# We'll begin by taking a quick look at the first few rows of the data.
professors.head()
# Say we're interested in cleaning up the "Country" column to make sure there's no data entry inconsistencies in it. We could go through and check each row by hand, of course, and hand-correct inconsistencies when we find them. There's a more efficient way to do this, though!
# get all the unique values in the 'Country' column
countries = professors["Country"].unique()
# sort them alphabetically and then take a closer look
countries.sort()
countries
# Just looking at this, I can see some problems due to inconsistent data entry: ' Germany', and 'germany', for example, or ' New Zealand' and 'New Zealand'.
# The first thing I'm going to do is make everything lower case (I can change it back at the end if I like) and remove any white spaces at the beginning and end of cells. Inconsistencies in capitalizations and trailing white spaces are very common in text data and you can fix a good 80% of your text data entry inconsistencies by doing this.
# convert to lower case
professors["Country"] = professors["Country"].str.lower()
# remove trailing white spaces
professors["Country"] = professors["Country"].str.strip()
# Next we're going to tackle more difficult inconsistencies.
# # Use fuzzy matching to correct inconsistent data entry
# Alright, let's take another look at the 'Country' column and see if there's any more data cleaning we need to do.
# get all the unique values in the 'Country' column
countries = professors["Country"].unique()
# sort them alphabetically and then take a closer look
countries.sort()
countries
# It does look like there is another inconsistency: 'southkorea' and 'south korea' should be the same.
# We're going to use the [fuzzywuzzy](https://github.com/seatgeek/fuzzywuzzy) package to help identify which strings are closest to each other. This dataset is small enough that we could probably correct errors by hand, but that approach doesn't scale well. (Would you want to correct a thousand errors by hand? What about ten thousand? Automating things as early as possible is generally a good idea. Plus, it’s fun!)
# > **Fuzzy matching:** The process of automatically finding text strings that are very similar to the target string. In general, a string is considered "closer" to another one the fewer characters you'd need to change if you were transforming one string into another. So "apple" and "snapple" are two changes away from each other (add "s" and "n") while "in" and "on" and one change away (rplace "i" with "o"). You won't always be able to rely on fuzzy matching 100%, but it will usually end up saving you at least a little time.
# Fuzzywuzzy returns a ratio given two strings. The closer the ratio is to 100, the smaller the edit distance between the two strings. Here, we're going to get the ten strings from our list of countries that have the closest distance to "south korea".
# get the top 10 closest matches to "south korea"
matches = fuzzywuzzy.process.extract(
"south korea", countries, limit=10, scorer=fuzzywuzzy.fuzz.ratio
)
# take a look at them
matches
# We can see that two of the items in the countries are very close to "south korea": "south korea" and "southkorea". Let's replace all rows in our "Country" column that have a ratio of >= 95 with "south korea".
# function to replace rows in the provided column of the provided dataframe
# that match the provided string above the provided ratio with the provided string
def replace_matches_in_column(df, column, string_to_match, min_ratio=90):
# get a list of unique strings
strings = df[column].unique()
# get the top 10 closest matches to our input string
matches = fuzzywuzzy.process.extract(
string_to_match, strings, limit=10, scorer=fuzzywuzzy.fuzz.ratio
)
# only get matches with a ratio > 90
close_matches = [matches[0] for matches in matches if matches[1] >= min_ratio]
# get the rows of all the close matches in our dataframe
rows_with_matches = df[column].isin(close_matches)
# replace all rows with close matches with the input matches
df.loc[rows_with_matches, column] = string_to_match
# let us know the function's done
print("All done!")
# Now that we have a function, we can put it to the test!
# use the function we just wrote to replace close matches to "south korea" with "south korea"
replace_matches_in_column(
df=professors, column="Country", string_to_match="south korea"
)
# And now let's check the unique values in our "Country" column again and make sure we've tidied up "south korea" correctly.
# get all the unique values in the 'Country' column
countries = professors["Country"].unique()
# sort them alphabetically and then take a closer look
countries.sort()
countries
# Excellent! Now we only have "south korea" in our dataframe and we didn't have to change anything by hand.
# # Your turn!
# Therre are still some inconsistencies in the dataset. **Continue cleaning the data** in the next exercise.
# # 1) Continue working with countries
# Take another look at the "Country" column and see if there's any more data cleaning we need to do.
# It looks like 'usa' and 'usofa' should be the same country. Correct the "Country" column in the dataframe so that 'usofa' appears instead as 'usa'.
# get the top 10 closest matches to "usa"
matches = fuzzywuzzy.process.extract(
"usa", countries, limit=10, scorer=fuzzywuzzy.fuzz.ratio
)
# take a look at them
matches
# **Hint:** Use the `replace_matches_in_column()` function defined above.
# use the function we just wrote to replace close matches to "usa" with "usa"
replace_matches_in_column(
df=professors, column="Country", string_to_match="usa", min_ratio=70
)
# Let's check the unique values in our "Country" column again.
# get all the unique values in the 'Country' column
countries = professors["Country"].unique()
# sort them alphabetically and then take a closer look
countries.sort()
countries
# # 2) Examine another column
# Run code below to take a look at all the unique values in the "Graduated from" column.
# get all the unique values in the 'Graduated from' column
unis = professors["Graduated from"].unique()
# sort them alphabetically and then take a closer look
unis.sort()
unis
# There are inconsistencies that can be fixed by removing white spaces at the beginning and end of cells. For instance, "University of Central Florida" and " University of Central Florida" both appear in the column.
# # 3) Do some text pre-processing
# Convert every entry in the "Graduated from" column in the `professors` DataFrame to remove white spaces at the beginning and end of cells.
# remove white spaces at the beginning and end of cells
professors["Graduated from"] = professors["Graduated from"].str.strip()
# And now let's check the unique values in our "Graduated from" column again.
# get all the unique values in the 'Graduated from' column
unis = professors["Graduated from"].unique()
# sort them alphabetically and then take a closer look
unis.sort()
unis
# ***
# ## Dealing with missing values
# First method is to replace the missing values with the “mean” or “median” of the whole available data.
# loading data set
data = pd.read_csv("../input/grocery-items/grocery_items.csv")
# display the data
data
# In order to check null values in Pandas DataFrame, we use `isnull()` function this function return dataframe of Boolean values which are True for NaN values.
# using isnull() function
data.isnull()
# Masking data to select only the rows with `quantity` = **NULL**.
# creating bool series True for NaN values
bool_series = pd.isnull(data["quantity"])
# filtering data
# displaying data only with quantity = NaN
data[bool_series]
# Masking data to select only the rows with `quantity` != **NULL**.
# creating bool series True for NaN values
bool_series = pd.notnull(data["quantity"])
# filtering data
# displayind data only with quantity = Not NaN
data[bool_series]
# # How many missing data points do we have?
# Ok, now we know that we do have some missing values. Let's see how many we have in each column.
# get the number of missing data points per column
missing_values_count = data.isnull().sum()
# look at the # of missing points in all columns
missing_values_count
# That seems like a lot! It might be helpful to see what percentage of the values in our dataset were missing to give us a better sense of the scale of this problem:
# how many total missing values do we have?
total_cells = np.product(data.shape)
total_missing = missing_values_count.sum()
# percent of data that is missing
percent_missing = (total_missing / total_cells) * 100
print("{:.2f} %".format(percent_missing))
# Wow, almost a quarter of the cells in this dataset are empty!
# ## Drop missing values
# If you're in a hurry or don't have a reason to figure out why your values are missing, one option you have is to just remove any rows or columns that contain missing values. (Note: I don't generally recommend this approch for important projects! It's usually worth it to take the time to go through your data and really look at all the columns with missing values one-by-one to really get to know your dataset.)
# If you're sure you want to drop rows with missing values, pandas does have a handy function, `dropna()` to help you do this. Let's try it out on our dataset!
# remove all the rows that contain a missing value
data.dropna()
# Oh dear, it looks like that's removed all our data! 😱 This is because every row in our dataset had at least one missing value. We might have better luck removing the rows that have at least one missing value by looking at subset of the **columns**. [dropna documentation](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.dropna.html)
# define in which columns to look for missing values
data_na_dropped = data.dropna(
axis="index", subset=["quantity", "price", "bought"], how="any"
)
# display the data after dropping Na values
data_na_dropped
# Check how much we lost from our data?
# just how much data did we lose?
print("Rows in original dataset: %d \n" % data.shape[0])
print("Rows with na's dropped: %d" % data_na_dropped.shape[0])
# ## Filling in missing values automatically
# Another option is to try and fill in the missing values.
# filling missing value using fillna()
data.fillna(0)
# filling a missing value with
# previous ones
data.fillna(method="pad")
# filling a missing value with
# next ones
data.fillna(method="bfill")
# Let's generate the descriptive statistics of the data.
# Generate descriptive statistics
data.describe().iloc[:, 1:]
# Replace missing values in `quantity` column with its **mean** value.
# replacing missing values in quantity
# column with mean of that column
data["quantity"] = data["quantity"].fillna(data["quantity"].mean())
# display the data
data.iloc[:, 1:]
# Replace missing values in `price` column with its **median** value.
# replacing missing values in price column
# with median of that column
data["price"] = data["price"].fillna(data["price"].median())
# display the data
data.iloc[:, 1:]
# Replace missing values in `bought` column with its **standard deviation** value.
# replacing missing values in bought column with
# standard deviation of that column
data["bought"] = data["bought"].fillna(data["bought"].std())
# display the data
data.iloc[:, 1:]
# Replace missing values in `forenoon` column with its **minimum** value.
# replacing missing values in forenoon column with
# minimum number of that column
data["forenoon"] = data["forenoon"].fillna(data["forenoon"].min())
# display the data
data.iloc[:, 1:]
# Replace missing values in `afternoon` column with its **maximum** value.
# replacing missing values in afternoon column with
# maximum number of that column
data["afternoon"] = data["afternoon"].fillna(data["afternoon"].max())
# display the data
data.iloc[:, 1:]
# ## Filling missing data with interpolation
# Creating the dataframe
df = pd.DataFrame(
{
"A": [16, 8, 4, None, 1],
"B": [None, 2, 3, 4, None],
"C": [20, 15, None, 5, 0],
"D": [2, 3, None, None, 6],
}
)
# Print the dataframe
df
import matplotlib.pyplot as plt
plt.scatter(df["C"], df["D"])
plt.xlabel("C", fontsize=14)
plt.ylabel("D", fontsize=14)
plt.show()
# to interpolate the missing values
interpolated_df = df.interpolate(method="linear", limit_direction="forward", limit=2)
interpolated_df
# As we can see the output, values in the first row could not get filled as the direction of filling of values is forward and there is no previous value which could have been used in interpolation. [interpolate documentation](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html)
interpolated_df.plot.scatter("C", "D")
plt.show()
df_averaged = df.copy()
df_averaged["C"] = df_averaged["C"].fillna(df_averaged["C"].mean())
df_averaged["D"] = df_averaged["D"].fillna(df_averaged["D"].mean())
df_averaged
df_averaged.plot.scatter("C", "D")
plt.show()
# ## Filling missing data with randomization
np.random.seed(765)
def fill_with_random(df):
"""Fill `df2`'s column with name `column` with random data based on non-NaN data from `column`"""
df_copy = df.copy()
for column in df_copy.columns:
df_copy[column] = df_copy[column].apply(
lambda x: np.random.choice(df_copy[column].dropna().values)
if np.isnan(x)
else x
)
return df_copy
df_randomized = fill_with_random(df)
df_randomized
df_randomized.plot.scatter("C", "D")
plt.show()
# ***
# ## Deal with categorical data
# Mapping to numeric representation with **ordinal** sequence.
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
x = ["Apple", "Orange", "Apple", "Pear"]
y = label_encoder.fit_transform(x)
print(y)
# Mapping to numeric representation with **nominal** relations.
from sklearn.preprocessing import OneHotEncoder
onehot_encoder = OneHotEncoder(sparse=False)
y = np.array(x).reshape(len(x), 1)
onehot_encoded = onehot_encoder.fit_transform(y)
print(onehot_encoded)
# ***
# ## Feature scaling and Normailzaion
# ### Preparing the Data
# Before data can be used as input for machine learning algorithms, it often must be cleaned, formatted, and restructured — this is typically known as **preprocessing**. Fortunately, for this dataset, there are no invalid or missing entries we must deal with, however, there are some qualities about certain features that must be adjusted. This preprocessing can help tremendously with the outcome and predictive power of nearly all learning algorithms.
# Allows the use of display() for DataFrames
from IPython.display import display
# Read the data.
raw_data = pd.read_csv("../input/eeg-bands-vector/eeg_bands_vector.csv")
# Split the data into features and target label
target_raw = raw_data[raw_data.columns[-1]]
features_raw = raw_data.drop(raw_data.columns[-1], axis=1)
# Print data shape.
print("The shape of the data: {}".format(raw_data.shape))
# Success - Display the first fifteen records
display(raw_data.head(n=15))
import matplotlib.pyplot as plt
def distribution(data, value, transformed=False):
"""
Visualization code for displaying skewed distributions of features
"""
# Create figure
fig = plt.figure(figsize=(11, 5))
# Skewed feature plotting
for i, feature in enumerate([value]):
ax = fig.add_subplot(1, 1, i + 1)
ax.hist(data[data.columns[feature - 1]], bins=25, color="#00A0A0")
ax.set_title(
"'%s' Feature Distribution" % (data.columns[feature - 1]), fontsize=14
)
ax.set_xlabel("Value")
ax.set_ylabel("Number of Records")
ax.set_ylim((0, 500))
ax.set_yticks([0, 100, 200, 300, 400, 500])
# Plot aesthetics
if transformed:
fig.suptitle(
"Log-transformed Distributions of Continuous EEG Data Features",
fontsize=16,
y=1.03,
)
else:
fig.suptitle(
"Skewed Distributions of Continuous EEG Data Features", fontsize=16, y=1.03
)
fig.tight_layout()
# ### Transforming Skewed Continuous Features
# A dataset may sometimes contain at least one feature whose values tend to lie near a single number, but will also have a non-trivial number of vastly larger or smaller values than that single number. Algorithms can be sensitive to such distributions of values and can underperform if the range is not properly normalized. With our dataset for example, feature such as: `'delta ch(2)'`, fit this description.
# The code cell below will plot a histogram of this feature. Note the range of the values present and how they are distributed.
# Visualize skewed continuous features of original data
distribution(raw_data, 6)
# For highly-skewed feature distributions such as `'delta ch(2)'`, it is common practice to apply a logarithmic transformation on the data so that the very large and very small values do not negatively affect the performance of a learning algorithm. Using a logarithmic transformation significantly reduces the range of values caused by outliers. Care must be taken when applying this transformation however: The logarithm of `0` is undefined, so we must translate the values by a small amount above `0` to apply the the logarithm successfully.
# The code cell below will perform a transformation on the data and visualize the results. Again, note the range of values and how they are distributed.
# Log-transform the skewed features
features_log_transformed = features_raw.apply(lambda x: np.log(x + 1))
# Visualize the new log distributions
distribution(features_log_transformed, 6, transformed=True)
# ### Outlier Detection
# Detecting outliers in the data is extremely important in the data preprocessing step of any analysis. The presence of outliers can often skew results which take into consideration these data points. There are many "rules of thumb" for what constitutes an outlier in a dataset. Here, we will use [Tukey's Method for identfying outliers](https://www.stat.cmu.edu/~cshalizi/statcomp/13/labs/05/lab-05.pdf): An **outlier step** is calculated as 1.5 times the interquartile range (IQR). A data point with a feature that is beyond an outlier step outside of the IQR for that feature is considered abnormal.
# Run the code cell below to display the boxplot of the `delta ch(2)` feature with the `state` output class to check if there are any remaining outlier's effect.
# import seaborn library for visualization
import seaborn as sns
# plot the boxplot for 'delta ch(2)' feature
sns.boxplot(x="state", y="delta ch(2)", data=features_log_transformed.join(target_raw))
# Now let's try to **remove** the outliers using IQR method.
# Calculate Q1 (25th quantile of the data) for all features.
Q1 = features_log_transformed.quantile(0.25)
# Calculate Q3 (75th quantile of the data) for all features.
Q3 = features_log_transformed.quantile(0.75)
# Use the interquartile range to calculate an outlier step (1.5 times the interquartile range).
IQR = Q3 - Q1
step = 1.5 * IQR
# Remove the outliers from the dataset.
features_log_transformed_out = features_log_transformed[
~(
(features_log_transformed < (Q1 - step))
| (features_log_transformed > (Q3 + step))
).any(axis=1)
]
# Join the features and the target after removing outliers.
preprocessed_data_out = features_log_transformed_out.join(target_raw)
target_raw_out = preprocessed_data_out[preprocessed_data_out.columns[-1]]
# Print data shape after removing outliers.
print(
"The shape of the data after removing outliers: {}".format(
preprocessed_data_out.shape
)
)
# Success - Display the first ten records
display(preprocessed_data_out.head(n=10))
# Let's check again the boxplot for `delta ch(2)` feature with `state` after outliers removal.
# plot the boxplot for 'delta ch(2)' feature after outliers removal
sns.boxplot(x="state", y="delta ch(2)", data=preprocessed_data_out)
# ### Normalizing Numerical Features
# In addition to performing transformations on features that are highly skewed, it is often good practice to perform some type of scaling on numerical features. Applying a scaling to the data does not change the shape of each feature's distribution (such as `'delta ch(2)'` above); however, normalization ensures that each feature is treated equally when applying supervised learners. Note that once scaling is applied, observing the data in its raw form will no longer have the same original meaning, as exampled below.
# Run the code cell below to normalize each numerical feature. We will use [`sklearn.preprocessing.MinMaxScaler`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html) for this.
# Import sklearn.preprocessing.StandardScaler
from sklearn.preprocessing import MinMaxScaler
# Initialize a scaler, then apply it to the features
scaler = MinMaxScaler() # default=(0, 1)
features_log_minmax_transform_out = pd.DataFrame(
scaler.fit_transform(features_log_transformed_out), columns=features_raw.columns
)
# Show an example of a record with scaling applied
display(features_log_minmax_transform_out.head())
|
# # Additional Packages
# Initially, we are required to install any additional package that might be used within this notebook. The Optimum-Path Forest Python-based implementation is available at the OPFython package, which can be installed through `pip`.
# Install additionall packages
# # Imports
# Following up, we are going to define all required imports to work within this notebook. In such case, we will be using Pandas to perform the data's input and output, as well as Numpy to perform the transiction between data frames and arrays. Finally, we will be importing the SupervisedOPF class from OPFython.
import numpy as np
import pandas as pd
import opfython.math.general as g
from opfython.models.supervised import SupervisedOPF
# # Data Loading
# The first step should be pretty straightforward. Just fire up Pandas and loads the provided `.csv` files.
# Loading training and testing sets
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
# # Pre-Processing the Data
# Before preparing the data and creating the corresponding features and labels arrays, it is important to perform an exploratory analysis over the data, pre-processing it for missing values or even removing some unwanted information that might impact our training procedure.
# The following colums are available: PassengerId, Survied, Pclass, Name, Sex, Age, SibSp, Parch, Ticket, Fare, Cabin, Embarked.
# Extracts information regarding the availability of non-null values
# Note that `Cabin` can be discarded away as there are roughly only 23% available data
print(train.info(), test.info())
# Let us print some information regarding how a row of data looks like
# Note that we can also initially discard `Name` and `Ticket` are they are string objects and might not be
# the ideal type to employ in a naïve version of a supervised classifier
print(train.head(), test.head())
# Further, let us gather the median of `Age` column and fills the `NaN` values
# Note that we need to ignore its missing values or the median will not be properly calculated
train_age_median = np.median(train["Age"][train["Age"].notna()])
train["Age"] = train["Age"].fillna(train_age_median)
# Analyzes the distribution of `Embarked` samples
# Note that `S` is the most occurring value, thus, we will fill up the 2 remaining rows with `S`
# print(train['Embarked'].value_counts())
train["Embarked"] = train["Embarked"].fillna("S")
# We also need to perform the same operation to the testing set `Age` column
# Note that the testing set has also a missing fare value
test_age_median = np.median(test["Age"][test["Age"].notna()])
test_fare_median = np.median(test["Fare"][test["Fare"].notna()])
test["Age"] = test["Age"].fillna(test_age_median)
test["Fare"] = test["Fare"].fillna(test_fare_median)
# # Preparing the Data
# With the pre-processing ready, we can now select the features that will be used and further convert them into normalized `x` and `y` numpy arrays.
# Note that `SupervisedOPF.fit()` requires arrays of training samples and labels. Also, the labels should start their indexing with `1` instead of `0`.
# Defines the features that will be used
features = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
# Gathers the training samples and convert to a normalized numpy array
# Note that OPF labels start from `1`
x_train = g.normalize(pd.get_dummies(train[features]).to_numpy())
y_train = train["Survived"].to_numpy() + 1
# Gathers the testing samples and convert to a normalized numpy array
x_test = g.normalize(pd.get_dummies(test[features]).to_numpy())
# # Training an Optimum-Path Forest
# With the data prepared, it is possible to instantiate a `SupervisedOPF` class and fit the training data.
# Defines the OPF-based model
opf = SupervisedOPF()
# Fits the model
opf.fit(x_train, y_train)
# # Predicting Unseen Data and Final Outputting
# After performing the training, we can predict the unseen data and prepare the final submission data.
# Predicts the unseen data
# Remember that we need to subtract `1` due to OPF labeling
preds = np.asarray(opf.predict(x_test)) - 1
# Creates the submission data and outputs to a .csv file
output = pd.DataFrame({"PassengerId": test.PassengerId, "Survived": preds})
output.to_csv("titanic-opf_submission.csv", index=False)
|
# Import des Moduls "Panda" mit dem Alias "pd"
import pandas as pd
# # Datensätze aufbereiten
# ## Zusammenführen aller Daten
# Einlesen des "Alkohol-Konsum pro Land"-Datensatzes
dfalc = pd.read_csv(
"https://raw.githubusercontent.com/fivethirtyeight/data/master/alcohol-consumption/drinks.csv"
)
# Umbenennung von Variablen
dfalc.rename(
columns={"country": "Country", "total_litres_of_pure_alcohol": "Alc Consumpt"},
inplace=True,
)
# Löschen aller Variablen außer "Country" und "Alc Consumpt"
dfalc = dfalc.filter(items=["Country", "Alc Consumpt"])
# Entfernen unsichtbarer Zeichen
dfalc.Country = dfalc.Country.str.strip()
dfalc
# Einlesen eines zweiten Datensatzes: "Anteil muslimischer BürgerInnen pro Land" (CIA Factbook)
dfrel = pd.read_excel(
"http://gsociology.icaap.org/data/religion.xls", sheet_name="data", header=2
)
# Löschen aller Variablen außer "Country" und "Muslim"
dfrel = dfrel.filter(items=["Country", "Muslim"])
# Entfernen unsichtbarer Zeichen
dfrel.Country = dfrel.Country.str.strip()
dfrel
# Erstellen eines neuen Datensatzes "df" durch zusammenführen ("merging") der ersten beiden Datensatze.
# Die "Key"-Variable, die in beiden Datensätzen vorkommen muss, ist hier "Country".
df = dfalc.merge(dfrel, on="Country")
df
df.plot.scatter(x="Muslim", y="Alc Consumpt")
# ## Bereinigung des Datensatzes
# Beispieldatensatz: "Wein"
df = pd.read_csv(
"https://gist.githubusercontent.com/clairehq/79acab35be50eaf1c383948ed3fd1129/raw/407a02139ae1e134992b90b4b2b8c329b3d73a6a/winemag-data-130k-v2.csv",
index_col=0,
)
df.head(5)
df.shape
# ### Prüfen ob Datenpunkte fehlen
# Prüfen, wie hoch die Summe der leeren Datenpunkte für die einzelnen Variablen ist (absteigend sortiert)
df.isnull().sum().sort_values(ascending=False)
# Alle Beobachtungen (Zeilen) löschen, für die keine Daten für die Variable "price" vorliegen.
df.dropna(subset=["price"], inplace=True)
df.shape
# ### Überflüssige Variablen löschen
# Löscht die Variablen (Spalten) "region_2" und "taster_twitter_handle"
df.drop(columns=["region_2", "taster_twitter_handle"], inplace=True)
# Alternative: Behält z.B. nur die Variablen "country", "points" und "price"
# df = df.filter(items=['country', 'points', 'price'])
# ## Indizieren/Labeln
# ### Indizieren/Labeln von Beobachtungen
# Mit set_index wählen wir eine Variable als Labelindex aus
df.set_index("title", inplace=True)
df.head(5)
# Beobachtungen zu indizieren ist insbesondere für Zeitreihen- und Panel-Datensätze wichtig
df_ts = pd.read_csv(
"https://raw.githubusercontent.com/datasets/covid-19/main/data/time-series-19-covid-combined.csv"
)
df_ts.head()
df_ts.set_index("Date", inplace=True)
df_ts.iloc[200:210]
# ### Indizieren/Labeln von Variablen
# Beispiel eines Ihrer Kommillitonen:
df_UN = pd.read_csv(
"https://data.un.org/_Docs/SYB/CSV/SYB63_176_202009_Tourist-Visitors%20Arrival%20and%20Expenditure.csv"
)
df_UN.head(5)
# Der Zusatz "header=X" beim Einlesen des Datensatzes sagt Pandas, dass die Variablenlabels in Spalte X+1 sind.
df_UN = pd.read_csv(
"https://data.un.org/_Docs/SYB/CSV/SYB63_176_202009_Tourist-Visitors%20Arrival%20and%20Expenditure.csv",
header=1,
)
df_UN.head(5)
# Manuelles Umbenennen der Variable (Spalte) "Unnamed: 1"
df_UN.rename(
columns={"Unnamed: 1": "Tourist/visitor arrivals and tourism expenditure"},
inplace=True,
)
df_UN.head(5)
# ### Exkurs: Daten auswählen/ansteuern mit iloc und loc
# #### Ansteuern über Integerindizes (iloc)
# ##### Zeilen über Integerindex ansteuern
# Steuert die Zeile an, die den Index "0" hat (entspricht der ersten Beobachtung)
df.iloc[0]
# ##### Spalten über Integerindex ansteuern
# Steuert alle Zeilen der Spalte an, die den Index "0" hat (entspricht der ersten Variable)
df.iloc[:, 0]
# ##### Beliebige Teile des Datensatzes über Integerindizes ansteuern
# Steuert alle Zeilen mit Index bis 3 (exklusive) in der ersten Spalte an.
df.iloc[:3, 0]
# Steuert die erste Spalte aller Zeilen mit Index zwischen 1 (einschließlich) und 3 (ausschließlich) an.
df.iloc[1:3, 0]
# Steuert die letzten fünf Zeilen an (alle Spalten).
df.iloc[-5:]
#
# #### Ansteuern über Labelindizes (loc)
# Steuert eine bestimmte Zeile und Spalte an.
df.loc["Tapestry 2005 Cabernet Sauvignon (McLaren Vale)", "country"]
# Steuert alle Zeilen der Variablen "Country", "Title" und "Variety" an.
df.reindex(columns=["country", "title", "variety"])
# Die folgende Alternative funktioniert nur, wenn wir noch keinen Labelindex für die Beobachrtungen (Zeilen) bestimmt haben:
# df.loc[:, ['country', 'title', 'variety']]
# #### Bedingtes Ansteuern (Bedingung: Eine bestimmte Variablen enthält bestimmte Werte)
# Wir können in den loc-Befehl auch eine Bedingung eintragen.
df.loc[df.country == "Italy"]
# Wir können auch mehrere Bedingungen stellen. Wir verknüpfen die Bedingungen mit einem "&".
df.loc[(df.country == "Italy") & (df.points >= 88)]
# "Oder"-Bedingungen verknüpfen wir mit einem "|".
df.loc[(df.country == "Italy") | (df.points >= 88)]
# ## Variablenformate anpassen
# Format aller Variablen anzeigen
df.dtypes
# Formatiert die Variable "country" zu einer Kategorialvariable ("category")
df["country"] = df["country"].astype("category")
# ## Duplikate von Beobachtungen/Elementen prüfen
# Löscht alle Beobachtungen, für die es ein EXAKTES Doppel im Datensatz gibt.
df.drop_duplicates(inplace=True)
df.shape
# Zeigt alle doppelten Elemente an (d.h. Zeilen mit identischem Labelindex)
pd.set_option("display.max_rows", None)
df[df.index.duplicated(keep=False)]
df.loc["Jacquart NV Brut Mosaïque (Champagne)"]
# ## Plausibilitätschecks
# ### Plausibilitätschecks für qualitative Variablen
# Zeigt die Häufigkeitverteilung an
df["country"].value_counts(dropna=False, ascending=False)
df["variety"].value_counts(dropna=False, ascending=False)
# Zeigt deskriptive Statistiken für alle quantitativen Variablen an.
df.describe()
# ## Variablen erstellen
# Mit der "isin"-Bedingung können wir Kategorial-Variablen erstellen.
# Beispielsweise können wir eine Variable "continent" aus dem Inhalt der Variable "country" erstellen.
# 1. Schritt: Unterschiedliche Werte der Variable "country" anzeigen:
df["country"].unique()
# 2. Schritt:
# Erstellen einer leeren String-Variable "continent"
df["continent"] = ""
# 3. Schritt:
# Zuordnung der Kontinente mithilfe des "isin"-Befehls
df.loc[
df.country.isin(
[
"Italy",
"Portugal",
"Spain",
"France",
"Germany",
"Austria",
"Hungary",
"Greece",
"Romania",
"Turkey",
"Czech Republic",
"Slovenia",
"Luxembourg",
"Croatia",
"Georgia",
"Uruguay",
"England",
"Serbia",
"Moldova",
"Bulgaria",
"Cyprus",
"Armenia",
"Switzerland",
"Bosnia and Herzegovina",
"Ukraine",
"Slovakia",
"Macedonia",
]
),
"continent",
] = "Europe"
df.loc[
df.country.isin(["Argentina", "Chile", "Mexico", "Uruguay", "Brazil", "Peru"]),
"continent",
] = "South America"
df.loc[df.country.isin(["US", "Canada"]), "continent"] = "North America"
df.loc[df.country.isin(["South Africa", "Morocco"]), "continent"] = "Africa"
df.loc[df.country.isin(["Australia", "New Zealand"]), "continent"] = "Australia"
df.loc[df.country.isin(["Israel", "Lebanon", "India"]), "continent"] = "Asia"
pd.set_option("display.max_rows", 10)
df.reindex(columns=["country", "continent"])
df.to_csv("WineData_aufbereitet.csv", sep=",")
|
# # **Load and Pre-process Data**
import sklearn
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
# Load your Flipkart dataset
# Assuming the dataset is in a CSV file, replace 'your_dataset.csv' with the file name
data = pd.read_csv(
"/kaggle/input/flipkart/flipkart_customer_reviews_dataset/flipkart_cleaned_data.csv"
)
# Shuffle the dataset
subset_frac = 0.10 # Choose the fraction of data
subset_data = data.sample(frac=subset_frac, random_state=42)
# Encode sentiment labels using LabelEncoder
label_encoder = LabelEncoder()
data["encoded_sentiment"] = label_encoder.fit_transform(data["sentiment"])
# Perform text vectorization using TfidfVectorizer
max_features = 5000 # Choose an appropriate number of maximum features
vectorizer = TfidfVectorizer(max_features=max_features)
X = vectorizer.fit_transform(data["review"]).toarray()
Y = data["encoded_sentiment"].values
# # **Logistic Regression**
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
import matplotlib.pyplot as plt
import seaborn as sns
# Use a hashing vectorizer instead of TfidfVectorizer for memory efficiency
max_features = 5000 # Choose an appropriate number of maximum features
vectorizer = HashingVectorizer(n_features=max_features)
# Replace TfidfVectorizer in previous code with the code below
X = vectorizer.fit_transform(data["review"]).toarray()
# Split the dataset into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.20, random_state=0
)
# Initialize the classifier
clf_logreg = SGDClassifier(loss="log", random_state=0, max_iter=1000, tol=1e-3)
# Active learning loop parameters
n_queries = 3
chunk_size = 200 # Use an appropriate chunk size during online learning
max_accuracy = 0.95 # Set the maximum desired accuracy
for i in range(n_queries):
# Train the classifier on smaller chunks of labeled data
for j in range(0, len(X_train), chunk_size):
X_train_chunk = X_train[j : j + chunk_size]
y_train_chunk = y_train[j : j + chunk_size]
clf_logreg.partial_fit(X_train_chunk, y_train_chunk, classes=np.unique(Y))
# Get the uncertainty scores for the samples in the test set
uncertainty = clf_logreg.predict_proba(X_test).max(axis=1)
# Get the index of the most uncertain sample
uncertain_sample = uncertainty.argmax()
# Add the uncertain sample to the labeled data
X_train = np.concatenate((X_train, [X_test[uncertain_sample]]))
y_train = np.concatenate((y_train, [y_test[uncertain_sample]]))
# Remove the uncertain sample from the test set
X_test = np.delete(X_test, uncertain_sample, axis=0)
y_test = np.delete(y_test, uncertain_sample)
# Report performance metrics
y_pred = clf_logreg.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Logistic Regression Iteration", i + 1, "Accuracy:", accuracy)
# Check if the desired accuracy has been reached
if accuracy >= max_accuracy:
print("Desired accuracy of", max_accuracy, "has been reached!")
break
# Compute the final model's confusion matrix and classification report
y_pred = clf_logreg.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True, cmap="Blues", fmt="d")
plt.xlabel("Predicted")
plt.ylabel("True")
plt.title(f"Logistic Regression Confusion Matrix")
plt.show()
print("Logistic Regression Performance Metrics")
print(classification_report(y_test, y_pred))
# # **KNeighbor**
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
import matplotlib.pyplot as plt
import seaborn as sns
# Load and preprocess the dataset (same as the first optimization)
# ...
# Use a hashing vectorizer instead of TfidfVectorizer for memory efficiency
max_features = 5000 # Choose an appropriate number of maximum features
vectorizer = HashingVectorizer(n_features=max_features)
# Replace TfidfVectorizer in previous code with the code below
X = vectorizer.fit_transform(data["review"]).toarray()
# Split the dataset into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.20, random_state=0
)
# Initialize the classifier
clf_knn = KNeighborsClassifier(n_neighbors=5)
# Active learning loop parameters
n_queries = 3
chunk_size = 200 # Use an appropriate chunk size during online learning
for i in range(n_queries):
# Train the classifier on smaller chunks of labeled data
for j in range(0, len(X_train), chunk_size):
X_train_chunk = X_train[j : j + chunk_size]
y_train_chunk = y_train[j : j + chunk_size]
clf_knn.fit(X_train_chunk, y_train_chunk)
# Get the uncertainty scores for the samples in the test set
uncertainty = clf_knn.predict_proba(X_test).max(axis=1)
# Get the index of the most uncertain sample
uncertain_sample = uncertainty.argmax()
# Add the uncertain sample to the labeled data
X_train = np.concatenate((X_train, [X_test[uncertain_sample]]))
y_train = np.concatenate((y_train, [y_test[uncertain_sample]]))
# Remove the uncertain sample from the test set
X_test = np.delete(X_test, uncertain_sample, axis=0)
y_test = np.delete(y_test, uncertain_sample)
# Report performance metrics
y_pred = clf_knn.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("KNN Iteration", i + 1, "Accuracy:", accuracy)
# Compute the final model's confusion matrix and classification report
y_pred = clf_knn.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True, cmap="Blues", fmt="d")
plt.xlabel("Predicted")
plt.ylabel("True")
plt.title(f"KNN Confusion Matrix")
plt.show()
print("KNN Performance Metrics")
print(classification_report(y_test, y_pred))
# # **Random Forest**
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
import matplotlib.pyplot as plt
import seaborn as sns
# Load and preprocess the dataset (same as the first optimization)
# ...
# Use a hashing vectorizer instead of TfidfVectorizer for memory efficiency
max_features = 5000 # Choose an appropriate number of maximum features
vectorizer = HashingVectorizer(n_features=max_features)
# Replace TfidfVectorizer in previous code with the code below
X = vectorizer.fit_transform(data["review"]).toarray()
# Split the dataset into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.20, random_state=0
)
# Initialize the classifier
clf_rf = RandomForestClassifier(n_estimators=100, random_state=0)
# Active learning loop parameters
n_queries = 3
chunk_size = 200 # Use an appropriate chunk size during online learning
max_accuracy = 0.95 # Set the maximum desired accuracy
for i in range(n_queries):
# Train the classifier on smaller chunks of labeled data
for j in range(0, len(X_train), chunk_size):
X_train_chunk = X_train[j : j + chunk_size]
y_train_chunk = y_train[j : j + chunk_size]
clf_rf.fit(X_train_chunk, y_train_chunk)
# Get the uncertainty scores for the samples in the test set
preds = clf_rf.predict(X_test)
uncertainty = np.max(
np.abs(clf_rf.predict_proba(X_test) - preds[:, np.newaxis]), axis=1
)
# Get the index of the most uncertain sample
uncertain_sample = uncertainty.argmax()
# Add the uncertain sample to the labeled data
X_train = np.concatenate((X_train, [X_test[uncertain_sample]]))
y_train = np.concatenate((y_train, [y_test[uncertain_sample]]))
# Remove the uncertain sample from the test set
X_test = np.delete(X_test, uncertain_sample, axis=0)
y_test = np.delete(y_test, uncertain_sample)
# Report performance metrics
y_pred = clf_rf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Random Forest Iteration", i + 1, "Accuracy:", accuracy)
# Check if the desired accuracy has been reached
if accuracy >= max_accuracy:
print("Desired accuracy of", max_accuracy, "has been reached!")
break
# Compute the final model's confusion matrix and classification report
y_pred = clf_rf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True, cmap="Blues", fmt="d")
plt.xlabel("Predicted")
plt.ylabel("True")
plt.title(f"Random Forest Confusion Matrix")
plt.show()
print("Random Forest Performance Metrics")
print(classification_report(y_test, y_pred))
# # **Gradient Boost**
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
import matplotlib.pyplot as plt
import seaborn as sns
# Load and preprocess the dataset (same as the first optimization)
# ...
# Use a hashing vectorizer instead of TfidfVectorizer for memory efficiency
max_features = 5000 # Choose an appropriate number of maximum features
vectorizer = HashingVectorizer(n_features=max_features)
# Replace TfidfVectorizer in previous code with the code below
X = vectorizer.fit_transform(data["review"]).toarray()
# Split the dataset into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.20, random_state=0
)
# Initialize the classifier
clf_gbm = GradientBoostingClassifier(n_estimators=100, random_state=0)
# Active learning loop parameters
n_queries = 3
chunk_size = 200 # Use an appropriate chunk size during online learning
min_accuracy = 0.95 # Set the min desired accuracy
for i in range(n_queries):
# Train the classifier on smaller chunks of labeled data
for j in range(0, len(X_train), chunk_size):
X_train_chunk = X_train[j : j + chunk_size]
y_train_chunk = y_train[j : j + chunk_size]
clf_gbm.fit(X_train_chunk, y_train_chunk)
# Get the uncertainty scores for the samples in the test set
uncertainty = 1 - clf_gbm.score(X_test, y_test)
# Get the index of the most uncertain sample
uncertain_sample = uncertainty.argmax()
# Add the uncertain sample to the labeled data
X_train = np.concatenate((X_train, [X_test[uncertain_sample]]))
y_train = np.concatenate((y_train, [y_test[uncertain_sample]]))
# Remove the uncertain sample from the test set
X_test = np.delete(X_test, uncertain_sample, axis=0)
y_test = np.delete(y_test, uncertain_sample)
# Report performance metrics
y_pred = clf_gbm.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Gradient Boosting Iteration", i + 1, "Accuracy:", accuracy)
# Check if the desired accuracy has been reached
if accuracy >= min_accuracy:
print("Desired accuracy of", min_accuracy, "has been reached!")
break
# Compute the final model's confusion matrix and classification report
y_pred = clf_gbm.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True, cmap="Blues", fmt="d")
plt.xlabel("Predicted")
plt.ylabel("True")
plt.title(f"Gradient Boosting Confusion Matrix")
plt.show()
print("Gradient Boosting Performance Metrics")
print(classification_report(y_test, y_pred))
# # **Decision Tree**
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import seaborn as sns
# Load and preprocess the dataset (same as the first optimization)
# ...
# Use a hashing vectorizer instead of TfidfVectorizer for memory efficiency
max_features = 5000 # Choose an appropriate number of maximum features
vectorizer = HashingVectorizer(n_features=max_features)
# Replace TfidfVectorizer in previous code with the code below
X = vectorizer.fit_transform(data["review"]).toarray()
# Split the dataset into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.20, random_state=0
)
# Initialize the classifier
clf_dt = DecisionTreeClassifier(max_depth=5, random_state=0)
# Active learning loop parameters
n_queries = 3
chunk_size = 200 # Use an appropriate chunk size during online learning
min_accuracy = 0.95 # Set the maximum desired accuracy
for i in range(n_queries):
# Train the classifier on smaller chunks of labeled data
for j in range(0, len(X_train), chunk_size):
X_train_chunk = X_train[j : j + chunk_size]
y_train_chunk = y_train[j : j + chunk_size]
clf_dt.fit(X_train_chunk, y_train_chunk)
# Get the uncertainty scores for the samples in the test set
preds = clf_dt.predict(X_test)
uncertainty = np.abs(preds - np.mean(preds))
# Get the index of the most uncertain sample
uncertain_sample = uncertainty.argmax()
# Add the uncertain sample to the labeled data
X_train = np.concatenate((X_train, [X_test[uncertain_sample]]))
y_train = np.concatenate((y_train, [y_test[uncertain_sample]]))
# Remove the uncertain sample from the test set
X_test = np.delete(X_test, uncertain_sample, axis=0)
y_test = np.delete(y_test, uncertain_sample)
# Report performance metrics
y_pred = clf_dt.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Decision Tree Iteration", i + 1, "Accuracy:", accuracy)
# Check if the desired accuracy has been reached
if accuracy >= min_accuracy:
print("Desired accuracy of", min_accuracy, "has been reached!")
break
# Compute the final model's confusion matrix and classification report
y_pred = clf_dt.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True, cmap="Blues", fmt="d")
plt.xlabel("Predicted")
plt.ylabel("True")
plt.title(f"Decision Tree Confusion Matrix")
plt.show()
print("Decision Tree Performance Metrics")
print(classification_report(y_test, y_pred))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import re
ratings = pd.read_csv("/kaggle/input/the-movies-dataset/ratings_small.csv")
movies = pd.read_csv("/kaggle/input/the-movies-dataset/movies_metadata.csv")
ratings
type(movies.loc[0, "id"])
series = movies.loc[:, "id"]
lista = []
for word in series:
result = re.findall(r"\w\w\w\w-\w\w-\w\w", word)
if result != lista:
print(result)
movies.loc[movies["id"] == "2014-01-01", "id"]
a = ["1997-08-20", "2012-09-29", "2014-01-01"]
# 19730 29503 35587
#
movies.drop(index=[19730, 29503, 35587], axis=0, inplace=True)
movies["id"] = movies["id"].astype(int)
movies.rename(columns={"id": "movieId"}, inplace=True)
# # Filtering users
ratings
ratings["userId"].value_counts(ascending=True)
# filtering users who voted more than 10 times
mais_rat = ratings["userId"].value_counts() > 10
mais_rat = mais_rat[mais_rat].index
ratings = ratings.loc[ratings["userId"].isin(mais_rat), :]
ratings
# # Creating the final base with the two bases - ratings and movies
ratings_movies = ratings.merge(movies, on="movieId")
ratings_movies
ratings_movies.columns
ratings_movies.rename(
columns={
"rating": "user_rating",
"vote_average": "vote_average_movie",
"vote_count": "vote_count_movie",
},
inplace=True,
)
# getting the number of times the movie was rated
number_ava = ratings_movies.groupby("movieId")["user_rating"].count().reset_index()
number_ava.rename(columns={"user_rating": "number_of_ratings"}, inplace=True)
number_ava
# # Final data base
df = ratings_movies.merge(number_ava, on="movieId")
df
# picking up movies that have more than 50 user ratings
df = df[df["number_of_ratings"] >= 10]
df
# eliminated duplicates
df.drop_duplicates(["userId", "movieId"], inplace=True)
df
df.columns
# deleting columns that are not needed
df.drop(
columns=[
"timestamp",
"belongs_to_collection",
"budget",
"homepage",
"imdb_id",
"original_language",
"original_title",
"overview",
"poster_path",
"production_companies",
"production_countries",
"release_date",
"revenue",
"runtime",
"spoken_languages",
"status",
"tagline",
"video",
],
inplace=True,
)
df.adult = df.adult.astype(bool)
df.popularity = df.popularity.astype(float)
df.info()
df.head(3)
# # Pivot
# Transposition of the userId column for user evaluations to become variables for the machine
movie_pivot = df.pivot_table(columns="userId", index="title", values="user_rating")
movie_pivot
movie_pivot.fillna(0, inplace=True)
# As the base has many 0s values we will have to transform it into a sparse matrix. Ex - https://jkolb.com.br/wp-content/uploads/2017/11/Matriz-Esparsa.png
from scipy.sparse import csr_matrix
movie_sparse = csr_matrix(movie_pivot)
movie_sparse
# # Modelo
# modelo vai usar o calculo da distancia euclidiana
from sklearn.neighbors import NearestNeighbors
model = NearestNeighbors(algorithm="brute", metric="euclidean")
model.fit(movie_sparse)
# recommendation
from numpy import random
numbe_of_recommendations = 3
number_ran = random.randint(943)
suggestions = model.kneighbors(
movie_pivot.iloc[number_ran, :].values.reshape(1, -1),
numbe_of_recommendations,
return_distance=False,
)
movies
suggestions
# Related movies recommendation from our model
for i in range(len(suggestions)):
movie_pivot.index[suggestions[i]]
for i in range(len(suggestions[0])):
movie = movie_pivot.index[suggestions[0][i]]
b = df.genres.loc[df["title"] == movie].reset_index()
print(f"Genres the movie: {movie}\n {b.genres[0]}")
|
# <p style="color:#694b98;
# text-align:center;
# font-size:240%;
# font-weight: bold;
# font-family:Serif;">
# World University Rankings Visualization
# ## Table of contents
# - [Overview](#overview)
# - [Import Libraries](#Libraries)
# - [Load Dataset](#Dataset)
# - [Data Preprocessing](#Preprocessing)
# - [Data Visualizing](#Visualizing)
# <div style="color:white;
# display:fill;
# border-radius:10px;
# background-color:#694b98;
# font-size:270%;
# font-weight: bold;
# height: 70px;
# width: 470px;
# margin: 50px 190px 30px 160px;
# font-family:Serif;">
# <p style="color:white;
# padding: 22px 80px;
# text-align:center;">
# Overview
#
#
#
# In this notebook, We will analyze and visualize the World University rankings dataset. This year’s QS World University Rankings include almost 1000 institutions from all around the world. Ranks are between 1-1000 (rank 1 is for the best university in that feature) and scores are ranging from 1 to 100 (score 100 is for the best university considering the features).
#
# |Features|
# |---------|
# |World_Rank|
# |Institution|
# |Location|
# |National_Rank|
# |Quality_of_Education|
# |Alumni_Employment|
# |Quality_of_Faculty|
# |Research_Output|
# |Quality_of_Publications|
# |Influence|
# |Citiations|
# |Target|
# |------|
# |Score|
#
# <div style="color:white;
# display:fill;
# border-radius:10px;
# background-color:#694b98;
# font-size:270%;
# font-weight: bold;
# height: 70px;
# width: 470px;
# margin: 50px 190px 30px 160px;
# font-family:Serif;">
# <p style="color:white;
# padding: 22px 80px;
# text-align:center;">
# Import Libraries
#
#
#
# Import libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from dash import Dash, dcc, html, Input, Output
from jupyter_dash import JupyterDash
from plotly.offline import iplot
import plotly.figure_factory as ff
pd.options.mode.chained_assignment = None
# <div style="color:white;
# display:fill;
# border-radius:10px;
# background-color:#694b98;
# font-size:270%;
# font-weight: bold;
# height: 70px;
# width: 470px;
# margin: 50px 190px 30px 160px;
# font-family:Serif;">
# <p style="color:white;
# padding: 22px 80px;
# text-align:center;">
# Load Dataset
#
#
# Load dataset
# data = pd.read_csv('eighteen_nineteen_university_datasets.csv', encoding = 'unicode_escape')
main_df = pd.DataFrame(data)
df = main_df.copy()
df.tail(10)
df.info()
# The information of the data shows that the 'Institution' and 'Location' are categorical data. Rest of the data with object type should be converted to numerical data type.
#
# <div style="color:white;
# display:fill;
# border-radius:10px;
# background-color:#694b98;
# font-size:260%;
# font-weight: bold;
# height: 70px;
# width: 470px;
# margin: 50px 190px 30px 160px;
# font-family:Serif;">
# <p style="color:white;
# padding: 22px 80px;
# text-align:center;">
# Data Preprocessing
#
#
# List of columns
df.columns.to_list()
# Some columns have wrong name and need to be corrected.
df.rename(
columns={
"Quality\xa0of Education": "Quality of Education",
"Quality\xa0of Faculty": "Quality of Faculty",
},
inplace=True,
)
df.isin(["> 1000"]).sum()
df.isin(["-"]).sum()
# Some values are missed (filled with '>1000' and '-'). At first step, '>1000' will be replaced with 1001 and '-' with np.nan.
# replace >1000 with 1001
# replace - with np.nan
df.replace("> 1000", "1001", inplace=True)
df.replace("-", np.nan, inplace=True)
# change type of the columns
cols = [
"Alumni Employment",
"Quality of Faculty",
"Quality of Education",
"Research Output",
"Quality Publications",
"Influence",
"Citations",
]
for cl in cols:
df[cl] = df[cl].astype(float)
df.tail(10)
# To fill NaN values of two columns, relation between two features ('Quality of Education' , 'Quality of Faculty') and Score are shown:
fig = make_subplots(rows=1, cols=2)
fig.add_trace(
go.Scatter(
x=df["Quality of Education"],
y=df["Score"],
mode="markers",
marker=dict(color="slateblue"),
),
1,
1,
)
fig.add_trace(
go.Scatter(
x=df["Quality of Faculty"],
y=df["Score"],
mode="markers",
marker=dict(color="slateblue"),
),
1,
2,
)
fig.update_layout(
title=dict(
text="Relation between Score and two features",
x=0.47,
y=0.90,
font_size=18,
font_color="black",
),
xaxis1=dict(title_text="Quality of Education", ticks="outside", linecolor="grey"),
yaxis1=dict(title_text="Score", ticks="outside", linecolor="grey"),
xaxis2=dict(title_text="Quality of Faculty", ticks="outside", linecolor="grey"),
yaxis2=dict(title_text="Score", ticks="outside", linecolor="grey"),
autosize=False,
width=900,
height=520,
showlegend=False,
)
fig.show()
# There are large number of nan values in two columns but because of their relation with target, it's not rational to drop two columns, so let's fill them with maximum value of the column plus 1.
# fill NA with max+1
df["Quality of Faculty"].fillna((df["Quality of Faculty"].max()) + 1, inplace=True)
df["Quality of Education"].fillna((df["Quality of Education"].max()) + 1, inplace=True)
df.tail(10)
# <div style="color:white;
# display:fill;
# border-radius:10px;
# background-color:#694b98;
# font-size:270%;
# font-weight: bold;
# height: 70px;
# width: 470px;
# margin: 50px 190px 30px 160px;
# font-family:Serif;">
# <p style="color:white;
# padding: 22px 80px;
# text-align:center;">
# Data Visualizing
#
# Scatter plot
# To show relation between each feature and target (score).
# Relation between features and target(Score)
for col in df.drop(columns=["Institution", "Score"]).columns:
trace = go.Scatter(
x=df[col], y=df["Score"], mode="markers", marker=dict(color="slateblue")
)
layout = dict(
title=dict(text=f"{col} vs Score", x=0.48, y=0.93),
xaxis=dict(title_text=col, ticks="outside", linecolor="grey"),
yaxis=dict(title_text="Score", ticks="outside", linecolor="grey"),
autosize=False,
width=700,
height=520,
showlegend=False,
font_size=12,
font_color="black",
)
fig = dict(data=trace, layout=layout)
iplot(fig)
# Count plot
# To show number of institutions per country.
# number of universities per country
plt.figure(figsize=(10.5, 4.5), dpi=90)
sns.countplot(df, x="Location")
plt.xticks(rotation=90)
plt.title("Institution counts per country")
plt.show()
# Combination of bar and scatter plots
# To show rank of each features per world rank (1-10).
# Features for each world rank (1-10) combination of bar and scatter charts
df1 = df[0:10]
fig = make_subplots(rows=1, cols=2, shared_yaxes=True, horizontal_spacing=0.01)
fig.add_trace(
go.Bar(
y=df1["Institution"],
x=df1["Score"],
textfont_color="black",
textfont_size=14,
orientation="h",
marker=dict(
color="rgb(100, 160, 180)",
line=dict(color="rgba(50, 151, 96, 1.0)", width=1),
),
name="Score",
),
1,
1,
)
cols = [
"Quality of Education",
"Influence",
"Quality Publications",
"Research Output",
"Citations",
"Quality of Faculty",
]
for cl in cols:
fig.add_trace(
go.Scatter(
y=df1["Institution"],
x=df1[cl],
textfont_color="black",
mode="lines+markers",
textfont_size=13,
line=dict(width=3),
name=cl,
),
1,
2,
)
fig.update_layout(
title={"text": "Features for Each World Rank (1-10)", "y": 0.96, "x": 0.24},
font_color="black",
font_size=13,
yaxis=dict(showgrid=False, showline=False, showticklabels=True, domain=[0, 0.9]),
yaxis2=dict(
showgrid=False,
showline=True,
showticklabels=False,
linecolor="rgba(102, 102, 102, 0.8)",
linewidth=2,
domain=[0, 0.9],
),
xaxis=dict(
zeroline=False,
showline=False,
showticklabels=True,
showgrid=True,
domain=[0, 0.42],
),
xaxis2=dict(
zeroline=False,
showline=False,
showticklabels=True,
showgrid=True,
domain=[0.47, 1],
nticks=10,
side="top",
),
legend=dict(x=1.1, y=1.038, font_size=10),
margin=dict(l=0, r=0, t=70, b=70),
paper_bgcolor="rgb(248, 248, 255)",
plot_bgcolor="rgb(248, 248, 255)",
)
annotations = []
# Adding labels
for ydn, yd, xd in zip(df1["Citations"], df1["Score"], df1["Institution"]):
annotations.append(
dict(
xref="x1",
yref="y1",
y=xd,
x=yd + 13,
text=str(yd) + "%",
font=dict(family="Arial", size=14, color="rgb(20, 128, 160)"),
showarrow=False,
)
)
fig.update_layout(annotations=annotations, autosize=False, width=950, height=520)
fig.show()
# Box plot
# To show distribution of the ranks in top 10 countries (with most universities).
#
# Range of features per country (10 countries with the most number of universities)
fig = make_subplots(
rows=3,
cols=2,
shared_yaxes=True,
vertical_spacing=0.12,
subplot_titles=(
"Influence Grouped by country",
"Citations Grouped by country",
"Quality of Publications Grouped by country",
"Research Output Grouped by country",
"Quality of Education Grouped by country",
"Quality of Faculty Grouped by country",
),
)
df2 = df.groupby("Location").count().reset_index().filter(["Institution", "Location"])
df2 = df2.sort_values(by="Institution", ascending=False)
contry_list = df2["Location"].iloc[0:10].to_list()
for i in range(10):
df3 = df[df["Location"] == contry_list[i]]
fig.add_trace(
go.Box(x=df3["Influence"], name=contry_list[i], showlegend=True), 1, 1
)
for i in range(10):
df3 = df[df["Location"] == contry_list[i]]
fig.add_trace(
go.Box(x=df3["Citations"], name=contry_list[i], showlegend=False), 1, 2
)
for i in range(10):
df3 = df[df["Location"] == contry_list[i]]
fig.add_trace(
go.Box(x=df3["Quality Publications"], name=contry_list[i], showlegend=False),
2,
1,
)
for i in range(10):
df3 = df[df["Location"] == contry_list[i]]
fig.add_trace(
go.Box(x=df3["Research Output"], name=contry_list[i], showlegend=False), 2, 2
)
for i in range(10):
df3 = df[df["Location"] == contry_list[i]]
fig.add_trace(
go.Box(x=df3["Quality of Education"], name=contry_list[i], showlegend=False),
3,
1,
)
for i in range(10):
df3 = df[df["Location"] == contry_list[i]]
fig.add_trace(
go.Box(x=df3["Quality of Faculty"], name=contry_list[i], showlegend=False), 3, 2
)
fig.update_xaxes(
tickfont_size=11, nticks=10, showline=True, mirror=True, linecolor="gray"
)
fig.update_yaxes(tickfont_size=11, showline=True, mirror=True, linecolor="gray")
fig.update_layout(
title={"text": "Range of rank features per country", "y": 0.97, "x": 0.01},
legend={"title": "Country", "x": 1.05, "y": 1.05},
yaxis1_title="Country",
yaxis3_title="Country",
yaxis5_title="Country",
xaxis1_title="Influence",
xaxis2_title="Citations",
xaxis3_title="Publications Quality",
xaxis4_title="Research Output",
xaxis5_title="Quality of Education",
xaxis6_title="Quality of Faculty",
font_size=11,
font_color="black",
plot_bgcolor="whitesmoke",
autosize=False,
height=1000,
width=950,
)
fig.show()
# Add continent of each location
# Convert Country to Continent
import pycountry_convert as pc
def country_to_continent(country_name):
country_alpha2 = pc.country_name_to_country_alpha2(country_name)
country_continent_code = pc.country_alpha2_to_continent_code(country_alpha2)
country_continent_name = pc.convert_continent_code_to_continent_name(
country_continent_code
)
return country_continent_name
df1 = df.copy()
df1["Continent"] = df1["Location"].apply(country_to_continent)
list_of_continents = df1["Continent"].unique()
list_of_continents = np.append(list_of_continents, "World")
df1.tail(10)
# Pie plot
# To show number of universities per country by feature & continent using dash plot.
# Number of universities per country by feature & continent (rank<100)
number_of_institutions = 100
app = JupyterDash(__name__)
app.layout = html.Div(
[
html.H3("Number of universities per country by feature & continent (rank<100)"),
html.H3("Select a continent:"),
dcc.RadioItems(list_of_continents, "World", id="continent", inline=True),
html.H3("Select a feature:"),
dcc.Dropdown(
[
"Citations",
"Alumni Employment",
"Quality of Faculty",
"Research Output",
"Quality Publications",
"Influence",
"Quality of Education",
],
"Influence",
id="feature",
style={"width": "50%"},
),
dcc.Graph(id="graph", style={"width": "65%"}),
]
)
@app.callback(
Output("graph", "figure"), Input("feature", "value"), Input("continent", "value")
)
def display_pie(feature_candidate, continent_candidate):
if continent_candidate != "World": # Select the countries of the continent
df2 = df1[df1["Continent"] == continent_candidate]
else:
df2 = df1 # Select all countries
df3 = df2[df2[feature_candidate] < number_of_institutions]
df4 = df3.groupby("Location").count().reset_index()
df4.loc[df4["Institution"] < 2, "Location"] = "Other countries"
fig = go.Figure(
data=[
go.Pie(
labels=df4["Location"],
hoverinfo="label+percent",
values=df4["Institution"],
textinfo="label+percent",
)
]
)
fig.update_traces(marker=dict(colors=px.colors.sequential.Agsunset), hole=0.45)
fig.update_layout(
legend_title="Country",
margin={"r": 0, "t": 30, "l": 0, "b": 50},
annotations=[
dict(text=feature_candidate, x=0.5, y=0.52, font_size=17, showarrow=False),
dict(
text=continent_candidate, x=0.5, y=0.45, font_size=16, showarrow=False
),
],
autosize=False,
height=410,
width=650,
)
return fig
app.run_server(debug=False, mode="inline")
# Dash plots are dynamic and great plots to visualize dataset but they're not easy to be displayed in kaggle, you can run the code on your system correctly and see the result. Below is a picture of the output:
# 
# Map plots
# To show number of universities per country.
# Number of Universities per Country
df1 = df.groupby("Location").count().reset_index().filter(["Institution", "Location"])
fig = go.Figure(
data=go.Choropleth(
locations=df1["Location"],
z=df1["Institution"],
locationmode="country names",
colorscale="matter",
colorbar_title="Count",
)
)
fig.update_layout(
title={"text": "Number of Universities per Country", "y": 0.92, "x": 0.02},
font_color="black",
font_size=13,
margin={"r": 0, "t": 100, "l": 0, "b": 0},
)
fig.show()
# To show best score of each country.
# Best Score per Country
df1 = df.groupby(["Location"]).max().reset_index()
fig = go.Figure(
data=go.Choropleth(
locations=df1["Location"],
z=df1["Score"],
locationmode="country names",
colorscale="matter",
colorbar_title="Score(%)",
)
)
fig.update_layout(
title={"text": "Best Score per Country", "y": 0.92, "x": 0.01},
font_color="black",
font_size=13,
margin={"r": 0, "t": 100, "l": 0, "b": 0},
)
fig.show()
# To show average score of each country.
# Average Score per Country
df1 = df.groupby(["Location"]).mean().reset_index()
fig = go.Figure(
data=go.Choropleth(
locations=df1["Location"],
z=df1["Score"],
locationmode="country names",
colorscale="matter",
colorbar_title="Score(%)",
)
)
fig.update_layout(
title={"text": "Average Score per Country", "y": 0.92, "x": 0.01},
font_color="black",
font_size=13,
margin={"r": 0, "t": 100, "l": 0, "b": 0},
)
fig.show()
# To show score of each country by world rank.
# Score per Country by World Rank
fig = px.choropleth(
df,
locations="Location",
color="Score",
hover_name="Location",
animation_frame="World Rank",
locationmode="country names",
color_continuous_scale=px.colors.sequential.matter,
)
fig.update_layout(
title={"text": "Score per Country by World Rank", "y": 0.92, "x": 0.03},
font_color="black",
font_size=13,
margin={"r": 0, "t": 100, "l": 0, "b": 0},
)
fig.show()
# To show average rank of each country by features.
# Average Rank per Country by Features with dash
df1 = df.groupby(["Location"]).mean().reset_index()
app = JupyterDash(__name__)
app.layout = html.Div(
[
html.H2("Average Rank per Country by Feature"),
html.H3("Select a feature:"),
dcc.RadioItems(
id="feature",
options=[
"Citations",
"Alumni Employment",
"Quality of Faculty",
"Research Output",
"Quality Publications",
"Influence",
"Quality of Education",
],
value="Influence",
inline=True,
),
dcc.Graph(id="graph"),
]
)
@app.callback(Output("graph", "figure"), Input("feature", "value"))
def display_choropleth(candidate_feature):
fig = px.choropleth(
df1.round(),
locations="Location",
color=candidate_feature,
locationmode="country names",
color_continuous_scale=px.colors.sequential.matter,
range_color=[0, 1001],
)
fig.update_layout(
margin={"r": 0, "t": 50, "l": 0, "b": 0}, autosize=False, height=510, width=830
)
return fig
app.run_server(debug=True, mode="inline")
# Below is an instance picture of the output for the above dash plot:
# 
# To show best rank of each country by features.
# Best Rank per Country by Features with dash
df1 = df.groupby(["Location"]).min().reset_index()
app = JupyterDash(__name__)
app.layout = html.Div(
[
html.H2("Best Rank per Country by Feature"),
html.H3("Select a feature:"),
dcc.RadioItems(
id="feature",
options=[
"Citations",
"Alumni Employment",
"Quality of Faculty",
"Research Output",
"Quality Publications",
"Influence",
"Quality of Education",
],
value="Influence",
inline=True,
),
dcc.Graph(id="graph"),
]
)
@app.callback(Output("graph", "figure"), Input("feature", "value"))
def display_choropleth(candidate_feature):
fig = px.choropleth(
df1.round(),
locations="Location",
color=candidate_feature,
locationmode="country names",
color_continuous_scale=px.colors.sequential.matter,
range_color=[0, 1001],
)
fig.update_layout(
margin={"r": 0, "t": 50, "l": 0, "b": 0}, autosize=False, height=510, width=830
)
return fig
app.run_server(debug=True, mode="inline")
# Below is an instance picture of the output for the above dash plot:
# 
# Correlation between features
# Features Correlation
dfcor = df.corr()
x = list(dfcor.columns)
y = list(dfcor.index)
z = np.array(dfcor)
fig = ff.create_annotated_heatmap(
z,
x=x,
y=y,
annotation_text=np.around(z, decimals=2),
hoverinfo="z",
colorscale=px.colors.diverging.RdBu,
zmin=-1,
zmax=1,
textfont={"size": 6},
showscale=True,
)
fig.update_layout(
title={"text": "Features Correlation", "y": 0.94, "x": 0.01},
yaxis={"tickfont_size": 14},
xaxis={"tickfont_size": 14},
font_color="black",
font_size=14,
margin={"r": 0, "t": 180, "l": 0, "b": 50},
autosize=False,
width=800,
height=570,
)
for i in range(len(fig.layout.annotations)):
fig.layout.annotations[i].font.size = 12
fig.show()
# The features Research_Output, Quality_Publications, Influence and Citations are closely correlated with each other.
# Scatter matrix
# To show plot of multiple features considering world rank.
df1 = df.loc[:, ["Citations", "Research Output", "Influence"]]
df1["index"] = np.arange(1, len(df1) + 1)
# scatter matrix
fig = ff.create_scatterplotmatrix(
df1, diag="scatter", index="index", colormap="RdBu", height=700, width=800
)
fig.show()
# 3D plot
# To show correlation between multiple features considering world rank.
#
# 3D Scatter
# Correlation between Research Output & Influence & Citations
fig = plt.figure(figsize=(8.5, 8.5))
ax = fig.add_subplot(111, projection="3d")
p1 = ax.scatter(
df["Research Output"], df["Influence"], df["Citations"], c=df["World Rank"]
)
fig.colorbar(p1, shrink=0.5, label="World Rank")
ax.set_xlabel("Research Output")
ax.set_ylabel("Influence")
ax.set_zlabel("Citations")
ax.xaxis.pane.set_facecolor("white")
ax.yaxis.pane.set_facecolor("white")
ax.zaxis.pane.set_facecolor("white")
ax.set_title(
"Correlation between Research Output & Influence & Citations",
fontdict={"fontsize": 12},
)
fig.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Better Understanding the Data
import matplotlib.pyplot as plt
plt.style.use("ggplot")
import seaborn as sns
import plotly.express as px
# ## MVPS Data
mvps = pd.read_csv(
"../input/19912021-nba-stats/mvps.csv", encoding="latin-1", delimiter=";"
)
mvps
mvps.describe
print("The dimession of the mvp dataset is:", mvps.shape)
mvps.info
mvps.isnull().sum()
sns_plot = sns.clustermap(mvps.corr(), cmap="rocket_r")
corr_mat_train = mvps.corr()
corr_mat_test = mvps.corr()
threshold = 0.2
corr_threshold_train = corr_mat_train[
(corr_mat_train > threshold) | (corr_mat_train < -threshold)
]
corr_threshold_test = corr_mat_test[
(corr_mat_test > threshold) | (corr_mat_test < -threshold)
]
fig, axes = plt.subplots(1, 2, figsize=(22, 9))
sns.heatmap(
corr_threshold_train,
annot=True,
cmap="seismic",
fmt=".2f",
linewidths=0.5,
cbar_kws={"shrink": 0.5},
annot_kws={"size": 8},
ax=axes[0],
).set_title("Correlations Among Features (in Train)")
sns.heatmap(
corr_threshold_test,
annot=True,
cmap="seismic",
fmt=".2f",
linewidths=0.5,
cbar_kws={"shrink": 0.5},
annot_kws={"size": 8},
ax=axes[1],
).set_title("Correlations Among Features (in Test)")
import pandas_profiling as pp
# generating pandas profiling report
profile = pp.ProfileReport(mvps, title="EDA made easy", explorative=True)
# printing using HTML
profile.to_notebook_iframe()
# ## Players Data
players.describe
players.isnull().sum()
corr_mat_train = mvps.corr()
corr_mat_test = mvps.corr()
threshold = 0.2
corr_threshold_train = corr_mat_train[
(corr_mat_train > threshold) | (corr_mat_train < -threshold)
]
corr_threshold_test = corr_mat_test[
(corr_mat_test > threshold) | (corr_mat_test < -threshold)
]
fig, axes = plt.subplots(1, 2, figsize=(22, 9))
sns.heatmap(
corr_threshold_train,
annot=True,
cmap="seismic",
fmt=".2f",
linewidths=0.5,
cbar_kws={"shrink": 0.5},
annot_kws={"size": 8},
ax=axes[0],
).set_title("Correlations Among Features (in Train)")
sns.heatmap(
corr_threshold_test,
annot=True,
cmap="seismic",
fmt=".2f",
linewidths=0.5,
cbar_kws={"shrink": 0.5},
annot_kws={"size": 8},
ax=axes[1],
).set_title("Correlations Among Features (in Test)")
# ## Teams
teams = pd.read_csv(
"../input/19912021-nba-stats/teams.csv", encoding="latin-1", delimiter=";"
)
teams.head()
# ## Cleaning the Data
# deleting the Rk variable
del players["Rk"]
# removing the * from the data names
players["Player"] = players["Player"].str.replace("*", "", regex=False)
mvps["Player"] = mvps["Player"].str.replace("*", "", regex=False)
teams["Team"] = teams["Team"].str.replace("*", "", regex=False)
def single_team(df):
if df.shape[0] == 1:
return df
else:
row = df[df["Tm"] == "TOT"]
row["Tm"] = df.iloc[-1, :]["Tm"]
return row
players = players.groupby(["Player", "Year"]).apply(single_team)
players.head()
players.index = players.index.droplevel()
# attempting to combine the two datasets
combined = players.merge(mvps, how="outer", on=["Player", "Year"])
combined.isnull().sum()
combined[["Pts Won", "Pts Max", "Share"]] = combined[
["Pts Won", "Pts Max", "Share"]
].fillna(0)
combined
combined.head()
teams.head()
# removing divisions from team
teams = teams[~teams["W"].str.contains("Division")].copy()
sorted(combined["Tm"].unique())
# combing teams
full = combined.merge(teams, how="outer", on=["Team", "Year"])
full
mvps["PTS"].value_counts
# limiting the variables
mvps = mvps[["Player", "Year", "Pts Won", "Pts Max", "Share"]]
mvps.head()
# # Creating Visualziations
import matplotlib.pyplot as plt
plt.hist(mvps["Pts Won"])
plt.xlabel("Pts Won")
plt.ylabel("Count")
plt.grid()
plt.show()
import matplotlib.pyplot as plt
plt.hist(mvps["TRB"])
plt.xlabel("TRB")
plt.ylabel("Count")
plt.grid()
plt.show()
import matplotlib.pyplot as plt
plt.hist(mvps["AST"])
plt.xlabel("AST")
plt.ylabel("Count")
plt.grid()
plt.show()
# # MVP Player Data
player_mvps = pd.read_csv(
"../input/19912021-nba-stats/player_mvp_stats.csv",
encoding="latin-1",
delimiter=";",
)
player_mvps.head()
import matplotlib.pyplot as plt
plt.hist(mvps["PTS"])
plt.xlabel("PTS")
plt.ylabel("Count")
plt.grid()
plt.show()
# ## Loading the player stats
players = pd.read_csv(
"../input/19912021-nba-stats/players.csv", encoding="latin-1", delimiter=";"
)
players
# deleting the unnecessary columns
del players["Rk"]
# removing * from data
players["Player"] = players["Player"].str.replace("*", "", regex=False)
# definning the function in order to create player and years they played
def player_team(df):
if df.shape[0] == 1:
return df
else:
row = df[df["Tm"] == "TOT"]
row["Tm"] = df.iloc[-1, :]["Tm"]
return row
players = players.groupby(["Player", "Year"]).apply(player_team)
players.index = players.index.droplevel()
players.index = players.index.droplevel()
# checking player_team function
players[players["Player"] == "Ja Morant"]
# ## Combining players and MVP to have all information in one area
# attempting to use .merge
combined = players.merge(mvps, how="outer", on=["Player", "Year"])
combined.head()
# this will use the MVP Pts and eliminate those who received 0 points
combined[combined["Pts Won"] > 0]
combined
# using fillna to fill missing values with 0
combined = combined.fillna(0)
# this will use the MVP Pts and eliminate those who received 0 points
combined = combined[combined["Pts Won"] > 0]
combined
combined[["Pts Won", "Pts Max", "Share"]] = combined[
["Pts Won", "Pts Max", "Share"]
].fillna(0)
combined
# loading the teams csv
teams = pd.read_csv(
"../input/19912021-nba-stats/teams.csv", encoding="latin-1", delimiter=";"
)
teams.head()
teams["Team"] = teams["Team"].str.replace("*", "", regex=False)
teams = teams[~teams["W"].str.contains("Division")].copy()
nicknames = {}
with open("../input/19912021-nba-stats/nicknames.csv") as f:
lines = f.readlines()
for line in lines[1:]:
abbrev, name = line.replace("\n", "").split(";")
nicknames[abbrev] = name
sorted(teams["Team"].unique())
sorted(combined["Tm"].unique())
combined["Team"] = combined["Tm"].map(nicknames)
combined
full = combined.merge(teams, how="outer", on=["Team", "Year"])
full
|
# # List of World Cities by Population Density
# > List of Metropolitan Areas by Population Density Top 50 Cities
# This is a list of cities worldwide by population density. The population, population density and land area for the cities listed are based on the entire city proper, the defined boundary or border of a city or the city limits of the city. The population density of the cities listed is based on the average number of people living per square kilometer or per square mile. This list does not refer to the population, population density or land area of the greater metropolitan area or urban area, nor particular districts in any of the cities listed.
import pandas as pd
import numpy as np
# ## Importing the data
df = pd.read_csv(
"/kaggle/input/list-of-world-cities-by-population-density/List of world cities by population density.csv"
)
df.shape
df.sample(3)
# ## Data Cleaning
df.drop(columns=["Unnamed: 0"], inplace=True)
df.columns
df["Population"] = df["Population"].apply(lambda x: x.split("[")[0])
df["Area (km²)"] = df["Area (km²)"].apply(lambda x: x.split("[")[0])
df.sample(3)
df.isnull().sum()
# There are no missing values so we can proceed.
df.info()
df.describe()
# ## Exploratory Data Analysis (EDA) 📊
import matplotlib.pyplot as plt
import seaborn as sns
df.columns
df["Country"].value_counts().plot(kind="bar")
plt.figure(figsize=(15, 6))
sns.scatterplot(x=df["Area (km²)"], y=df["Population"], hue=df["Country"])
plt.xticks(rotation=45)
plt.show()
plt.figure(figsize=(15, 6))
sns.scatterplot(x=df["Area (km²)"], y=df["Density (/km²)"], hue=df["Country"])
plt.xticks(rotation=45)
plt.show()
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Flatten
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
from keras.layers import Bidirectional
from keras.layers import TimeDistributed
n_timesteps = 10
data = pd.read_csv(
"/kaggle/input/electronics-session-features-2/electronics_Session_Features(25).csv"
)
print(data.columns)
cols = data.columns.tolist()
# move the last column to the second-last position
cols = cols[:-2] + [cols[-1]] + [cols[-2]]
# select the columns in the desired order and assign back to the DataFrame
data = data[cols]
data = data.sample(frac=0.1)
y = data["Purchase"]
y = pd.get_dummies(y).values
y
y.shape
y
data.head()
y.shape
SelectFeatures = data.loc[
:,
[
"TotalEventsInSession",
"interactionTime",
"NumTimesCartedInSession",
"NumTimesViewedInSession",
"maxPrice",
"minPrice",
"AvgAmtCartedInSession",
"AvgAmtViewedInSession",
"NumCategoriesCartedInSession",
"NumCategoriesViewedInSession",
"Purchase",
],
]
SelectFeatures
data = data.fillna(0)
data
import imblearn
import collections
counter = collections.Counter(data["Purchase"].values)
from imblearn.under_sampling import RandomUnderSampler
data, label = RandomUnderSampler(sampling_strategy=0.5, random_state=42).fit_resample(
data.iloc[:, :-1], data.iloc[:, -1]
)
data
print(counter)
# SMOTE
# from imblearn.over_sampling import SMOTE
# oversample = SMOTE()
# data_resampled, label_resampled = oversample.fit_resample(data.iloc[:,:-1], data.iloc[:,-1])
# oversample = SMOTE()
# data.columns = data.columns.astype(str)
# data,label=oversample.fit_resample(data.iloc[:,:-1],data.iloc[:,-1])
counter2 = collections.Counter(label)
print(counter2)
data.to_csv("smotedata.csv")
label.to_csv("smotelabel.csv")
values = data.values
values.shape
# Balancing Data
purchase = SelectFeatures[SelectFeatures["Purchase"] == 1]
not_purchase = SelectFeatures[SelectFeatures["Purchase"] == 0]
not_purchase.shape
# dropping most of the False records, keep the purchase around half
not_purchase = not_purchase.sample(n=100000)
df = purchase.append(not_purchase)
df.to_csv("samplingdata.csv")
# Train Test Split
X = df.iloc[:, :-1].values
y = df.iloc[:, -1]
y = pd.get_dummies(y).values
y
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.15)
X_test, y_test
# reshape input to be 3D [samples, timesteps, features]
X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))
X_test = X_test.reshape((X_test.shape[0], 1, X_test.shape[1]))
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
# Parameter Tuning using GridSearchCV
# Try different batch_sizes and epochs
# skip this cell first
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
# Model = KerasClassifier(build_fn = c_model)
# batch_sizes = [50, 100, 200, 500, 1000, 2000]
# epochs = [5,10,50]
# parameters = {'batch_size': batch_sizes, 'epochs': epochs}
# clf = GridSearchCV(model, parameters)
# clf.fit(X_train, y_train)
# #
# Start here
# Tune optimizer
def c_model(optimizer):
model = Sequential()
model.add(Bidirectional(LSTM(40, return_sequences=True), input_shape=(1, 10)))
model.add(Dense(20, activation="relu"))
model.add(Dense(10, activation="sigmoid"))
model.add(Dense(5, activation="tanh"))
model.add(TimeDistributed(Dense(1)))
model.add(Flatten())
model.add(Dense(2, activation="softmax"))
# Compile model
model.compile(
loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"]
)
return model
model = KerasClassifier(build_fn=c_model, epochs=20, batch_size=100)
parameters = {
"optimizer": [
"SGD",
"RMSprop",
]
}
clf = GridSearchCV(model, parameters, cv=3)
grid_result = clf.fit(X_train, y_train)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_["mean_test_score"]
stds = grid_result.cv_results_["std_test_score"]
params = grid_result.cv_results_["params"]
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# Running this example produces the following output.
X = df.iloc[:, :-1]
Y = df.iloc[:, -1]
# Could stop here, let's just get the summarize of the optimizer tuning for this step
# define 10-fold cross validation test harness
from sklearn.model_selection import StratifiedKFold
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
cvscores = []
for train, test in kfold.split(X, Y):
# create model
model = Sequential()
model.add(Bidirectional(LSTM(20, return_sequences=True), input_shape=(1, 9)))
model.add(Dense(12, activation="relu"))
model.add(Dense(8, activation="relu"))
model.add(TimeDistributed(Dense(1)))
model.add(Flatten())
model.add(Dense(2, activation="softmax"))
# Compile model
model.compile(loss="categorical_crossentropy", metrics=["accuracy"])
# Fit the model
model.fit(X[train], Y[train], epochs=10, batch_size=100, verbose=0)
# evaluate the model
scores = model.evaluate(X[test], Y[test], verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
cvscores.append(scores[1] * 100) # try different hyperparameters
train_X = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))
test_X = X_test.reshape((X_test.shape[0], 1, X_test.shape[1]))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.15)
# fit network
history = model.fit(
X_train,
y_train,
epochs=10,
batch_size=100,
validation_data=(X_test, y_test),
verbose=2,
shuffle=False,
)
# plot history
pyplot.plot(history.history["loss"], label="train")
pyplot.plot(history.history["val_loss"], label="test")
pyplot.legend()
pyplot.show()
|
# ## Keşifçi Veri Analizi | Becerileri Pekiştirme
# Aşağıda ihtiyacımız doğrultusunda kullanacağımız kütüphaneleri yükleyelim.
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
# Veri çerçevemizi bulunduğumuz dizinden yükleyelim ve bir veri çerçevesi haline getirerek df değişkenine atayalım. (pd.read_csv(...csv))
df = pd.read_csv("/kaggle/input/iris-flower-dataset/IRIS.csv")
# Veri çerçevesinin ilk 5 gözlemini görüntüleyelim.
df.head() # head fonksiyonu ile baştan ilk beş satırı bize gösterir.
# Veri çerçevesinin kaç öznitelik ve kaç gözlemden oluştuğunu görüntüleyelim.
oznitelik = df.shape[1]
print("Veri çerçevesinin öznitelik adedi: ", oznitelik)
gozlem_sayisi = df.shape[0]
print("Veri çerçevesinin gözlem sayısı: ", gozlem_sayisi)
# Veri çerçevesindeki değişkenlerin hangi tipte olduğunu ve bellek kullanımını görüntüleyelim.
df.info()
# Veri çerçevesindeki sayısal değişkenler için temel istatistik değerlerini görüntüleyelim.
#
istatistik_degerler = df.describe()
print(istatistik_degerler)
# Standart sapma ve ortalama değerlerden çıkarımda bulunarak hangi değişkenlerin ne kadar varyansa sahip olduğu hakkında fikir yürütelim.
# Veri çerçevesinde hangi öznitelikte kaç adet eksik değer olduğunu gözlemleyelim.
df_eksik_deger = (
df.isnull().sum()
) # veri çerçevesindeki hangi değişkenlerde kaçar tane eksik değer olduğunu tespit eder.
toplam_eksik_deger = (
df_eksik_deger.sum()
) # veri çerçevesinde toplam kaç eksik değer olduğunu tespit eder.
print("Hangi değişkenlerde kaç adet eksik var?")
print(df_eksik_deger)
print("\nToplam eksik değer adedi: ", toplam_eksik_deger)
# ancak bu yöntem ile her eksik veri tespit edilemez. Eksik veriler için veri toplayan kişiler tarafından boşluk karakteri veya "boş", "yok" gibi metinler girilmiş olabilir.
# Eksik verilerin fonksiyon çıktısında tespit edilen miktarda olduğunu teyit etmek gerekir. Bunun için en sık kullanılan yöntem olan değişken benzersiz değerlerini görüntüleyelim.
print("\nBenzersiz değerleri:")
print("sepal_length ---> ", df["sepal_length"].unique(), "\n")
print("sepal_width ---> ", df["sepal_width"].unique(), "\n")
print("petal_length ---> ", df["petal_length"].unique(), "\n")
print("petal_width ---> ", df["petal_width"].unique(), "\n")
# Görüldüğü üzere değikenlerin benzersiz değerleri normal görünüyor.
# Sayısal değişkenler arasında korelasyon olup olmadığını göstermek için korelasyon matrisi çizdirelim. Korelasyon katsayıları hakkında fikir yürütelim.
# En güçlü pozitif ilişki hangi iki değişken arasındadır?
korelasyon_matris = df.corr()
plt.figure(figsize=(5, 5))
sns.heatmap(korelasyon_matris, annot=True)
plt.show()
# korelasyon katsayılar -1<r<1 aralğında değer alır.
# -1 e yakın değerler negatif korelasyon, 1e yakın değerler pozitif korelasyon olarak yorumlanır.
# pozitif korelasyona sahip iki değişken birlikte artar veya birlikte azalır.
# Negatif korelasyona sahip iki değişkenlerden birinin değeri artarken diğerininki azalır.
# Değerin 0'a yakın olması durumu ise bu iki değişken arasında bir bağlantı olmadığını gösterir.
# En güçlü pozitif ilişki: petal.length-total.length
# Korelasyon katsayılarını daha iyi okuyabilmek için ısı haritası çizdirelim.
korelasyon = df.corr()
sns.heatmap(korelasyon)
plt.show()
# korelasyon matrisinden farkı üzerinde sayısal değer bulunmamasıdır. Sayısal değerleri göstermek için heatmap() fonskiyonunun
# içine üstte annot = True yazarken burada yazmıyoruz.
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz değerlerini görüntüleyelim.
df["species"].unique() # benzersiz değerler unique() fonksiyonu ile bulunur.
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz kaç adet değer içerdiğini görüntüleyelim.
benzersiz = df["species"].unique().size # size() fonksiyonu kaç adet olduğunu gösterir.
print("{} adet benzersiz değişkeni vardır.".format(benzersiz))
# Veri çerçevesindeki sepal.width ve sepal.length değişkenlerinin sürekli olduğunu görüyoruz. Bu iki sürekli veriyi görselleştirmek için önce scatterplot kullanalım.
sns.scatterplot(x="sepal_width", y="sepal_length", data=df, color="purple")
plt.show()
# x grafiğin x eksenine yazacağımız özniteliği, y grafiğin y değişkenine yazacağımız özniteliği gösterir.
# Aynı iki veriyi daha farklı bir açıdan frekanslarıyla incelemek için jointplot kullanarak görselleştirelim.
sns.jointplot(x="sepal_width", y="sepal_length", kind="hex", data=df)
plt.show()
# Aynı iki veriyi scatterplot ile tekrardan görselleştirelim fakat bu sefer "variety" parametresi ile hedef değişkenine göre kırdıralım.
# 3 farklı renk arasında sepal değişkenleriyle bir kümeleme yapılabilir mi? Ne kadar ayırt edilebilir bunun üzerine düşünelim.
sns.scatterplot(x="sepal_width", y="sepal_length", hue="species", data=df)
plt.show()
# value_counts() fonksiyonu ile veri çerçevemizin ne kadar dengeli dağıldığını sorgulayalım.
df.value_counts()
# görüldüğü ilk öznitelikte sadece son değer diğerlerinden biraz daha fazladır. diğer değerlerin hepsi birbirine yakındır. Dengelidir denebilir.
# ikinci öznitelikte değerlerinin hepsinin birbirine yakın olduğunu görebiliyoruz. dengeli bir dağılıma sahiptir diyebiliriz.
# üçüncü öznitelikte değerlerin hepsinin birbirine yakın olduğu söylenemez. belirli bir değerler birbirine yakın olsa da diğerleri uzaktır. dengeli dağılım olduğunu söyleyemeyiz.
# dördüncü öznitelikte dedğerlerin hepsi birbirine çok yakındır. dengeli bir dağılıma sahiptir diyebiliriz
# dengeli dağılımları sıralamak istersek ---> petal.width>sepal.width>sepal.length>petal.length
# Keman grafiği çizdirerek sepal.width değişkeninin dağılımını inceleyin.
# Söz konusu dağılım bizim için ne ifade ediyor, normal bir dağılım olduğunu söyleyebilir miyiz?
sns.violinplot(y="sepal_width", data=df, color="green")
plt.show()
# Violin (keman) grafiği verilerin dağılımını ve olasılık yoğunluğunu görselleştirmek için en efektif yöntemlerden birisidir.
# maksimum ve minimuma ulaşan sayıların dağılımını aşağı yukarı aynıdır. Buna bakılarak normak bir dağılım olduğunu söylenebilir.
# Daha iyi anlayabilmek için sepal.width üzerine bir distplot çizdirelim.
sns.histplot(df["sepal_width"])
plt.show()
# distplot() metodu tek değişkenli bir dağılıma hızlıca göz atmanın en kolay yoludur.
# çanak yaprağı genişliği 3.0 olan çiçek sayısının daha fazla olduğunu yoğunluktan anlayabiliriz.
# Üç çiçek türü için üç farklı keman grafiğini sepal.length değişkeninin dağılımı üzerine tek bir satır ile görselleştirelim.
sns.violinplot(x="species", y="sepal_length", data=df)
plt.show()
# Hangi çiçek türünden kaçar adet gözlem barındırıyor veri çerçevemiz?
# 50 x 3 olduğunu ve dengeli olduğunu value_counts ile zaten görmüştük, ancak bunu görsel olarak ifade etmek için sns.countplot() fonksiyonuna variety parametresini vereilm.
sns.countplot(x="species", data=df)
plt.show()
# sepal.length ve sepal.width değişkenlerini sns.jointplot ile görselleştirelim, dağılımı ve dağılımın frekansı yüksek olduğu bölgelerini inceleyelim.
sns.jointplot(data=df, x="sepal_length", y="sepal_width")
plt.show()
# Ortak dağılım grafiği, saçılım grafiği ile histogramı birleştirerek bize iki değişkenli dağılımlar hakkında detaylı bilgi verir.
# sepal_length'in 5.5 - 6.5 arasında yoğun olduğunu gözlemleyebiliriz. Bu aralıkta daha fazla çiçek olduğunu söyleyebiliriz
# aynı zamanda sepal_width'in 3.0 noktasında yoğun olduğunu gözlemleyebiliriz. 3 çanak yaprak genişliğine sahip çiçek sayısı daha fazla diyebiliriz.
# Bir önceki hücrede yapmış olduğumuz görselleştirmeye kind = "kde" parametresini ekleyelim. Böylelikle dağılımın noktalı gösterimden çıkıp yoğunluk odaklı bir görselleştirmeye dönüştüğünü görmüş olacağız.
sns.jointplot(data=df, x="sepal_length", y="sepal_width", kind="kde", color="brown")
plt.show()
# scatterplot ile petal.length ve petal.width değişkenlerinin dağılımlarını çizdirelim.
sns.scatterplot(x="petal_length", y="petal_width", data=df, color="purple")
plt.show()
# Aynı görselleştirmeye hue = "variety" parametresini ekleyerek 3. bir boyut verelim.
sns.scatterplot(x="petal_length", y="petal_width", data=df, hue="species")
plt.show()
# sns.lmplot() görselleştirmesini petal.length ve petal.width değişkenleriyle implemente edelim. Petal length ile petal width arasında ne tür bir ilişki var ve bu ilişki güçlü müdür? sorusunu yanıtlayalım.
sns.lmplot(x="petal_length", y="petal_width", data=df)
plt.show()
# Bu sorunun yanıtını pekiştirmek için iki değişken arasında korelasyon katsayısını yazdıralım.
df[["petal_length", "petal_width"]].corr()
# Petal Length ile Sepal Length değerlerini toplayarak yeni bir total length özniteliği oluşturalım.
df["total_length"] = df["petal_length"] + df["sepal_length"]
print("total_length")
df["total_length"]
# total.length'in ortalama değerini yazdıralım.
ortalama = df["total_length"].mean()
print("Ortalama Değer: ", ortalama)
# total.length'in standart sapma değerini yazdıralım.
std = df["total_length"].std()
print("Standart Sapma: ", std)
# sepal.length'in maksimum değerini yazdıralım.
max = df["sepal_length"].max()
print("Maximum Değer: ", max)
# sepal.length'i 5.5'den büyük ve türü setosa olan gözlemleri yazdıralım.
df[(df["sepal_length"] > 5.5) & (df["species"] == "setosa")]
# petal.length'i 5'den küçük ve türü virginica olan gözlemlerin sadece sepal.length ve sepal.width değişkenlerini ve değerlerini yazdıralım.
df[(df["petal_length"] < 5) & (df["species"] == "virginica")][
["sepal_length", "sepal_width"]
]
# Hedef değişkenimiz variety'e göre bir gruplama işlemi yapalım değişken değerlerimizin ortalamasını görüntüleyelim.
grup = df.groupby("species")
ortalama = grup.mean()
print(ortalama)
# Hedef değişkenimiz variety'e göre gruplama işlemi yaparak sadece petal.length değişkenimizin standart sapma değerlerini yazdıralım.
grup = df.groupby("species")
std = grup["petal_length"].std()
print(std)
|
# # Introduction
# The task is to predict library by question title on StackOverflow. For solving this task I fine-tune pretrained transformer-based models like BERT (or some variations).
# P.S. If you have any issues with plots on GitHub pls [check this notebook on Kaggle](https://www.kaggle.com/code/xyinspired/framework-prediction-stackoverflow/notebook).
# # Imports
from lets_plot import *
from transformers import (
DistilBertTokenizer,
DistilBertForSequenceClassification,
AdamW,
get_linear_schedule_with_warmup,
BertTokenizer,
BertForSequenceClassification,
)
from torch.utils.data import (
TensorDataset,
random_split,
DataLoader,
RandomSampler,
SequentialSampler,
)
from sklearn import metrics
import torch.nn.functional as F
import numpy as np
import random
import pandas as pd
import os
import torch
np.random.seed(42)
LetsPlot.setup_html()
# # SetUp
if torch.cuda.is_available():
device = torch.device("cuda")
print(f"Using GPU : {torch.cuda.get_device_name(0)}")
else:
device = torch.device("cpu")
print(f"Using CPU")
# # Data
# ## Exploration
path_to_train_data = "/kaggle/input/stackoverlow-data/train.csv"
path_to_test_data = "/kaggle/input/stackoverlow-data/test.csv"
train_data = pd.read_csv(path_to_train_data)
test_data = pd.read_csv(path_to_test_data)
print(train_data.shape)
train_data.head()
# As we can see there are no null values in our train dataset. Also we can notice that some questions have the same title and at all we got 24 libraries (classes).
print(f"Null values:\n{train_data.isna().sum()}")
print(f"Unique:\n{train_data.nunique()}")
print(test_data.shape)
test_data.head()
label_to_id = {k: v for v, k in enumerate(set(train_data.lib))}
id_to_label = {v: k for k, v in label_to_id.items()}
label_to_id
frequency = pd.DataFrame(
{
"Library": list(label_to_id.keys()),
"Count": [
len(train_data[train_data.lib == lib]) for lib in list(label_to_id.keys())
],
}
)
(
ggplot(
frequency,
aes(x=frequency.Library, weight=frequency.Count, fill=frequency.Library),
)
+ geom_bar()
+ labs(x="Library", y="Count")
)
# ## Preprocessing
def extract_label_vector(label, total_labels):
labels = np.zeros(total_labels)
labels[label] = 1
return labels.tolist()
train_data.lib = train_data.lib.map(lambda x: label_to_id[x])
train_data["labels"] = train_data.lib.map(
lambda x: extract_label_vector(x, len(label_to_id.keys()))
)
train_data.head(10)
text_data = train_data.title.values
labels_data = list(train_data.labels)
# model_name = 'distilbert-base-uncased'
model_name = "bert-base-cased"
# ## Tokenizing text data
# tokenizer = DistilBertTokenizer.from_pretrained(model_name)
tokenizer = BertTokenizer.from_pretrained(model_name)
max_len = np.zeros(len(text_data))
for i in range(len(text_data)):
input_ids = tokenizer.encode(text_data[i], add_special_tokens=True)
max_len[i] = len(input_ids)
print("Max length: ", max_len.max())
input_ids = []
attention_masks = []
for text in text_data:
encoded_dict = tokenizer.encode_plus(
text,
add_special_tokens=True,
max_length=128,
pad_to_max_length=True,
truncation=True,
return_attention_mask=True,
return_tensors="pt",
)
input_ids.append(encoded_dict["input_ids"])
attention_masks.append(encoded_dict["attention_mask"])
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels_tensor = torch.tensor(labels_data)
val_size = 0.10
dataset = TensorDataset(input_ids, attention_masks, labels_tensor)
train_dataset, val_dataset = random_split(dataset, [1 - val_size, val_size])
print(f"Train set : {len(train_dataset)}")
print(f"Validation set : {len(val_dataset)}")
batch_size = 128
train_dataloader = DataLoader(
train_dataset, sampler=RandomSampler(train_dataset), batch_size=batch_size
)
validation_dataloader = DataLoader(
val_dataset, sampler=SequentialSampler(val_dataset), batch_size=batch_size
)
# model = DistilBertForSequenceClassification.from_pretrained(
# model_name,
# problem_type="multi_class_classification",
# num_labels = len(label_to_id.keys()),
# output_attentions = False,
# output_hidden_states = False,
# )
model = BertForSequenceClassification.from_pretrained(
model_name,
problem_type="multi_class_classification",
num_labels=len(label_to_id.keys()),
output_attentions=False,
output_hidden_states=False,
)
model.to(device) # fix to device
# We fine-tune our model, not training from scratch so we don't need too much epochs not to overfit (or use EarlyStop)
learning_rate = 2e-4
epochs = 10
optimizer = AdamW(model.parameters(), lr=learning_rate)
total_steps = len(train_dataloader) * epochs
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=2, num_training_steps=total_steps
)
# # Training and Validation
# def flat_accuracy(preds, labels):
# print(f'got preds : {preds} with shape {preds.shape}')
# print(f'got lables : {labels} with shape {labels.shape}')
# pred_flat = np.argmax(preds, axis=1).flatten()
# labels_flat = np.argmax(labels).flatten()
# print(f'here')
# return sum(pred_flat == labels_flat) / len(labels_flat)
def compute_f1_macro(out, pred):
probs = F.softmax(pred, dim=1)
pred = torch.argmax(probs, dim=1)
out = torch.argmax(out, dim=1)
return metrics.f1_score(pred, out, average="macro")
def compute_f1_micro(out, pred):
probs = F.softmax(pred, dim=1)
pred = torch.argmax(probs, dim=1)
out = torch.argmax(out, dim=1)
return metrics.f1_score(pred, out, average="micro")
def compute_precision(out, pred):
probs = F.softmax(pred, dim=1)
pred = torch.argmax(probs, dim=1)
out = torch.argmax(out, dim=1)
return metrics.precision_score(pred.numpy(), out.numpy(), average="micro")
def compute_recall(out, pred):
probs = F.softmax(pred, dim=1)
pred = torch.argmax(probs, dim=1)
out = torch.argmax(out, dim=1)
return metrics.recall_score(pred.numpy(), out.numpy(), average="micro")
def get_accuracy_from_logits(logits, labels):
probs = F.softmax(logits, dim=1)
output = torch.argmax(probs, dim=1)
labels = torch.argmax(labels, dim=1)
acc = (output == labels).float().mean()
return acc
loss_function = torch.nn.CrossEntropyLoss()
print("Training started...")
np.random.seed(42)
random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
training_stats = []
for epoch_i in range(epochs):
print()
print("#-----------------------#")
print(f" Epoch : {epoch_i + 1} / {epochs}")
print("#-----------------------#")
model.train()
total_train_loss = 0
for step, batch in enumerate(train_dataloader):
batch_input_ids = batch[0].to(device)
batch_input_mask = batch[1].to(device)
batch_labels = batch[2].float().to(device)
model.zero_grad()
result = model(
batch_input_ids,
attention_mask=batch_input_mask,
labels=batch_labels,
return_dict=True,
)
logits = result.logits
loss = loss_function(logits, batch_labels)
total_train_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
avg_train_loss = total_train_loss / len(train_dataloader)
print(f"Average train loss : {avg_train_loss:.3f}")
print()
print("Validation started...")
print()
model.eval()
total_eval_loss = 0
total_eval_f1_micro = 0
total_eval_f1_macro = 0
total_precision = 0
nb_eval_steps = 0
for batch in validation_dataloader:
batch_input_ids = batch[0].to(device)
batch_input_mask = batch[1].to(device)
batch_labels = batch[2].float().to(device)
with torch.no_grad():
result = model(
batch_input_ids,
attention_mask=batch_input_mask,
labels=batch_labels,
return_dict=True,
)
logits = result.logits
loss = loss_function(logits, batch_labels)
total_eval_loss += loss.item()
logits = logits.detach().cpu()
label_ids = batch_labels.to("cpu")
total_eval_f1_micro += compute_f1_micro(logits, label_ids)
total_eval_f1_macro += compute_f1_macro(logits, label_ids)
total_precision += compute_precision(logits, label_ids)
avg_val_f1_micro = total_eval_f1_micro / len(validation_dataloader)
avg_val_f1_macro = total_eval_f1_macro / len(validation_dataloader)
avg_val_loss = total_eval_loss / len(validation_dataloader)
avg_val_precision = total_precision / len(validation_dataloader)
print(f"Average validation loss : {avg_val_loss:.3f}")
print("Average validation metrics:")
print("----------------")
print(f"Precision : {avg_val_precision:.3f}")
print(f"f1-score micro : {avg_val_f1_micro:.3f}")
print(f"f1-score macro : {avg_val_f1_macro:.3f}")
training_stats.append(
{
"epoch": epoch_i + 1,
"train_loss": avg_train_loss,
"valid_loss": avg_val_loss,
"val_f1_micro": avg_val_f1_micro,
"val_f1_macro": avg_val_f1_macro,
"val_precision": avg_val_precision,
}
)
print()
print("Training finished...")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train = pd.read_csv("/kaggle/input/the-price-of-art/train.csv")
test = pd.read_csv("/kaggle/input/the-price-of-art/test.csv")
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer, mean_squared_log_error, mean_squared_error
from scipy.spatial.distance import cdist
from scipy.stats import mode
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
import warnings
warnings.filterwarnings("ignore")
# # Data wrangling and feature preprocessing
# For my feature engineering I will take some inspriration from Miguel's and Rhowena's code. For categorical variables I will use the LabelEncoder() to transform my categorical variables. I will create variables "Year" and "exhibited" inspired by Miguel's code. I will create a avg_estimate_price variable and remove outliers from the target variable inspired by Rhowena's code.
train
# Removing outliers from target variable
train = train[train["price_realized_usd"] <= 10000000]
train = train[train["price_realized_usd"] >= 0]
# Transforming categorical variables
enc = LabelEncoder()
train["artist"] = enc.fit_transform(train["artist"])
train["category"] = enc.fit_transform(train["category"])
train["original_currency"] = enc.fit_transform(train["original_currency"])
train["location"] = enc.fit_transform(train["location"])
train["auction"] = enc.fit_transform(train["auction"])
train["provenance"] = enc.fit_transform(train["provenance"])
test["artist"] = enc.fit_transform(test["artist"])
test["category"] = enc.fit_transform(test["category"])
test["original_currency"] = enc.fit_transform(test["original_currency"])
test["location"] = enc.fit_transform(test["location"])
test["auction"] = enc.fit_transform(test["auction"])
test["provenance"] = enc.fit_transform(test["provenance"])
# Creating average estimate price variable
train["ave_est"] = (train["estimate_low_usd"] + train["estimate_high_usd"]) / 2
test["ave_est"] = (test["estimate_low_usd"] + test["estimate_high_usd"]) / 2
# Creating Year variable by splitting "Date" into year,month,day. I had to remove the year and month columns because they were left over from the date split.
train[["year", "month", "day"]] = train["date"].str.split("-", expand=True)
test[["year", "month", "day"]] = test["date"].str.split("-", expand=True)
train["year"] = pd.to_numeric(train["year"], errors="coerce")
test["year"] = pd.to_numeric(test["year"], errors="coerce")
train = train.drop(["month", "day"], axis=1)
test = test.drop(["month", "day"], axis=1)
# Converting all features with price into numeric values.
train["estimate_low_usd"] = pd.to_numeric(train["estimate_low_usd"], errors="coerce")
train["estimate_high_usd"] = pd.to_numeric(train["estimate_high_usd"], errors="coerce")
train["ave_est"] = pd.to_numeric(train["ave_est"], errors="coerce")
train["price_realized_usd"] = pd.to_numeric(
train["price_realized_usd"], errors="coerce"
)
test["estimate_low_usd"] = pd.to_numeric(test["estimate_low_usd"], errors="coerce")
test["estimate_high_usd"] = pd.to_numeric(test["estimate_high_usd"], errors="coerce")
test["ave_est"] = pd.to_numeric(test["ave_est"], errors="coerce")
# Creating a new variable "details". Here I just took the string lengths of the details column.
train["details"] = train["details"].str.len()
test["details"] = test["details"].str.len()
# Create a new variable for Exhibited. Here all NAs are assigned to 0 and all other strings are converted to 1. This it to signify if exhibited is missing or not from each data instance.
train["exhibited"] = np.where(train["exhibited"].isnull(), 0, 1)
test["exhibited"] = np.where(test["exhibited"].isnull(), 0, 1)
# Here I check to see if my feature engineering is what I want it to look like.
train.head()
test.head()
# All my features that I want to use are in the format I want them.
# # Subsetting the variables I will use
xTrain = train[
[
"estimate_low_usd",
"estimate_high_usd",
"auction",
"artist",
"original_currency",
"location",
"provenance",
"category",
"ave_est",
"year",
"exhibited",
"details",
]
]
yTrain = train["price_realized_usd"]
xTest = test[
[
"estimate_low_usd",
"estimate_high_usd",
"auction",
"artist",
"original_currency",
"location",
"provenance",
"category",
"ave_est",
"year",
"exhibited",
"details",
]
]
xTrain.head()
# Checking fro missing values in Train and Test sets
# Training set check
import missingno as msno
missing_val_count_by_column = xTrain.isnull().sum()
missing_val_count_by_column
# Test set check
missing_val_count_by_column1 = xTest.isnull().sum()
missing_val_count_by_column1
# Here I create my scorer function to evaluate my models
RMSLE = make_scorer(mean_squared_error, squared=False)
# # Grid Search
# Splitting up training data
X_train, X_test, y_train, y_test = train_test_split(
xTrain, yTrain, test_size=0.2, random_state=41
)
# **Ridge Regression**
ridge = Ridge()
param_grid_ridge = {
"alpha": np.concatenate(
(np.arange(0.1, 2, 0.1), np.arange(2, 5, 0.5), np.arange(5, 105, 5))
)
}
gridRidge = GridSearchCV(
ridge, param_grid_ridge, scoring=RMSLE, cv=5, error_score="raise"
)
gridRidge.fit(X_train, y_train)
print(gridRidge.best_params_)
print(gridRidge.best_score_)
# best_model_dt = gridRidge.best_estimator_
# predict_y_dt = best_model_dt.predict(xTrain)
# predict_y_dt.min()
# acc_dt = mean_squared_log_error(yTrain, predict_y_dt, squared=False)
best_model_ridge = gridRidge.best_estimator_
predict_y_ridge = best_model_ridge.predict(X_test)
predict_y_ridge[predict_y_ridge < 0] = 0
ridge_rmsle = mean_squared_log_error(y_test, predict_y_ridge, squared=False)
print(ridge_rmsle)
# **Lasso Regression**
lasso = Lasso()
param_grid_lasso = {
"alpha": np.concatenate(
(np.arange(0.1, 2, 0.1), np.arange(2, 5, 0.5), np.arange(5, 105, 5))
)
}
gridLasso = GridSearchCV(
lasso, param_grid_lasso, scoring=RMSLE, cv=5, error_score="raise"
)
gridLasso.fit(X_train, y_train)
print(gridLasso.best_params_)
print(gridLasso.best_score_)
best_model_lasso = gridLasso.best_estimator_
predict_y_lasso = best_model_lasso.predict(X_test)
predict_y_lasso[predict_y_lasso < 0] = 0
lasso_rmsle = mean_squared_log_error(y_test, predict_y_lasso, squared=False)
print(lasso_rmsle)
# Subsetting data so that I can run RandomForestRegressor() quicker. I subsetted my data into 10,000 data instances.
xTrain_subset = xTrain.iloc[87275:, :]
yTrain_subset = yTrain.iloc[87275:]
# Creating split for subsetted data
X_train_subset, X_test_subset, y_train_subset, y_test_subset = train_test_split(
xTrain_subset, yTrain_subset, test_size=0.2, random_state=41
)
# **Random Forrest**
from sklearn.ensemble import RandomForestRegressor
RF = RandomForestRegressor()
param_grid_RF = {"n_estimators": [100, 200, 300, 400]}
gridRF = GridSearchCV(RF, param_grid_RF, scoring=RMSLE, cv=5, error_score="raise")
gridRF.fit(X_train_subset, y_train_subset)
print(gridRF.best_params_)
print(gridRF.best_score_)
best_model_RF = gridRF.best_estimator_
predict_y_RF = best_model_RF.predict(X_test_subset)
RF_rmsle = mean_squared_log_error(y_test_subset, predict_y_RF, squared=False)
print(RF_rmsle)
# Results:
# * Ridge - 'alpha': 0.1, RMSLE = 2.0029492630738313
# * Lasso - 'alpha' : 100.0, RMSLE = 1.975684414452485
# * RF - 'n_estimators': 300, RMSLE = 0.6595684291689251
# Based on these results, I will go with the Random Forrest regression with 300 'n_estimators'. I believe that Lasso and Ridge performed worse than RF because they prodcued some negative values for prediction that had to be adjusted to 0s and they are less flexible methods than RF.
# Using best model for test predictions
best_model = RandomForestRegressor(n_estimators=300)
best_model.fit(xTrain, yTrain)
predictions_test = best_model.predict(xTest)
# Creating output dataframe and checking for correct submission format
output = pd.DataFrame(
{"object_id": test["object_id"], "price_realized_usd": predictions_test}
)
output.isnull().sum()
output.to_csv("submission.csv", index=False)
|
# # What impacts your CPU inference time?
# In this competition, we only have **120 minutes of CPU inference time**.
# Let's explore what impacts the CPU inference time in this notebook:
# * Batch size
# * Choice of backbone
# * Spectrogram size
# **References:**
# * This notebook builds on the inference baselines from [[Pytorch] BirdCLEF23 Starter](https://www.kaggle.com/code/debarshichanda/pytorch-birdclef23-starter)
# * The function to estimate inference time on the test set is copied from [BirdCLEF23: Pretraining is All you Need [Infer]](https://www.kaggle.com/code/awsaf49/birdclef23-pretraining-is-all-you-need-infer/notebook#Submission-Time-%E2%8F%B0)
import os
import gc
import cv2
import math
import copy
import time
import random
# For data manipulation
import numpy as np
import pandas as pd
# Deep Learning framework
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader
# Audio processing
import torchaudio
import torchaudio.transforms as T
import librosa
# Pre-trained image models
import timm
# Utils
import joblib
from tqdm import tqdm
from collections import defaultdict
from pathlib import Path
import gc
from sklearn.metrics import average_precision_score
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# For descriptive error messages
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
# # Batch size
# The bigger the batch, the fast the throughput.
# To keep it short, we will not run any experiments here.
# Instead, increase your batch size in powers of two until you get an error.
# Then use the biggest batch size your training and inference pipeline can handle.
# **For this pipeline, the biggest possible batch size turned out to be 128**
CONFIG = {
"seed": 27032023,
"epochs": 5, # 15-30
"model_name": "eca_nfnet_l0", # "tf_efficientnet_b3_ns",
"embedding_size": 768,
"num_classes": 264,
"batch_size": 128,
"learning_rate": 1e-3,
"min_lr": 1e-6,
"T_max": 500,
"weight_decay": 1e-6,
"n_fold": 5,
"device": torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
"competition": "BirdCLEF23",
"_wandb_kernel": "lemon",
# Audio Specific
"sample_rate": 32000,
"audio_length": 5,
"n_mels": 128,
"n_fft": 1024, # 2048
"hop_length": 512, # 512
}
def set_seed(seed=42):
"""Sets the seed of the entire notebook so results are the same every time we run.
This is for REPRODUCIBILITY."""
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# When running on the CuDNN backend, two further options must be set
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set a fixed value for the hash seed
os.environ["PYTHONHASHSEED"] = str(seed)
set_seed(CONFIG["seed"])
ROOT_DIR = "../input/birdclef-2023"
TRAIN_DIR = "../input/birdclef-2023/train_audio"
TEST_DIR = "../input/birdclef-2023/test_soundscapes"
def get_train_file_path(filename):
return f"{TRAIN_DIR}/{filename}"
df = pd.read_csv(f"{ROOT_DIR}/train_metadata.csv")
df["file_path"] = df["filename"].apply(get_train_file_path)
df.head()
df_test = pd.DataFrame(
[
(path.stem, *path.stem.split("_"), path)
for path in Path("/kaggle/input/birdclef-2023/test_soundscapes/").glob("*.ogg")
],
columns=["filename", "name", "id", "path"],
)
print(df_test.shape)
df_test.head()
class AudioDataset(Dataset):
def __init__(
self,
df,
target_sample_rate,
audio_length,
n_mels=CONFIG["n_mels"],
hop_length=CONFIG["hop_length"],
step=None,
):
self.file_paths = df["path"].values
self.target_sample_rate = target_sample_rate
self.num_samples = target_sample_rate * audio_length
self.step = step or self.num_samples
self.n_mels = n_mels
self.hop_length = hop_length
def __len__(self):
return len(self.file_paths)
def audio_to_image(self, audio):
mel_spectogram = T.MelSpectrogram(
sample_rate=self.target_sample_rate,
n_mels=self.n_mels,
n_fft=CONFIG["n_fft"],
hop_length=self.hop_length,
)
mel = mel_spectogram(audio)
# Convert to Image
image = torch.stack([mel])
# Normalize Image
max_val = torch.abs(image).max()
image = image / max_val
return image
def __getitem__(self, index):
audio, sample_rate = torchaudio.load(self.file_paths[index])
audio = self.to_mono(audio)
if sample_rate != self.target_sample_rate:
resample = T.Resample(sample_rate, self.target_sample_rate)
audio = resample(audio)
audios = []
for i in range(self.num_samples, len(audio) + self.step, self.step):
start = max(0, i - self.num_samples)
end = start + self.num_samples
audios.append(audio[start:end])
if len(audios[-1]) < self.num_samples:
audios = audios[:-1]
images = [self.audio_to_image(audio) for audio in audios]
images = np.stack(images)
return images
def to_mono(self, audio):
return torch.mean(audio, axis=0)
class AudioModel(nn.Module):
def __init__(
self,
name,
model_name="tf_efficientnet_b3_ns",
pretrained=True,
num_classes=CONFIG["num_classes"],
):
super(AudioModel, self).__init__()
self.model = timm.create_model(name, pretrained=pretrained, in_chans=1)
self.model.reset_classifier(num_classes=0)
in_features = self.model.num_features
self.fc = nn.Linear(in_features, CONFIG["num_classes"])
def forward(self, x):
x = self.model(x)
x = self.fc(x)
return x
def criterion(outputs, labels):
return nn.CrossEntropyLoss()(outputs, labels)
def padded_cmap(solution, submission, padding_factor=5):
new_rows = []
for i in range(padding_factor):
new_rows.append([1 for i in range(len(solution.columns))])
new_rows = pd.DataFrame(new_rows)
new_rows.columns = solution.columns
padded_solution = pd.concat([solution, new_rows]).reset_index(drop=True).copy()
padded_submission = pd.concat([submission, new_rows]).reset_index(drop=True).copy()
score = average_precision_score(
padded_solution.values,
padded_submission.values,
average="macro",
)
return score
def predict(data_loader, model):
model.to("cpu")
model.eval()
predictions = []
for en in range(len(data_loader)):
images = torch.from_numpy(data_loader[en])
with torch.no_grad():
outputs = model(images).sigmoid().detach().cpu().numpy()
predictions.append(outputs)
return predictions
def estimate_time(model_name, n_mels=CONFIG["n_mels"], hop_length=CONFIG["hop_length"]):
# Start stopwatch
tick = time.time()
ds_test = AudioDataset(
df_test,
target_sample_rate=CONFIG["sample_rate"],
audio_length=CONFIG["audio_length"],
n_mels=n_mels,
hop_length=hop_length,
)
model = AudioModel(model_name)
preds = predict(ds_test, model)
gc.collect()
torch.cuda.empty_cache()
tock = time.time()
sub_time = (tock - tick) * 200 # ~200 recording on the test data
sub_time = time.gmtime(sub_time)
sub_time = time.strftime("%H hr: %M min : %S sec", sub_time)
print(f">> Estimated Time for submission: ~ {sub_time} for {model_name}")
return (tock - tick) * 200 / 60
# # Choice of backbone
# Due to the limitations of the inference time, you might not be able to ensemble multiple models if your choice of backbone is too time-consuming.
# Let's explore a few popular backbones from past BirdCLEF competitions:
# * 'tf_efficientnet_b3_ns', # https://www.kaggle.com/competitions/birdclef-2022/discussion/327047
# * 'tf_efficientnet_b0_ns', # https://www.kaggle.com/competitions/birdclef-2022/discussion/327193
# * 'tf_efficientnetv2_s_in21k', # https://www.kaggle.com/competitions/birdclef-2022/discussion/327193
# * 'tf_efficientnetv2_m_in21k', # https://www.kaggle.com/competitions/birdclef-2021/discussion/243463
# * 'resnet34', # https://www.kaggle.com/competitions/birdclef-2022/discussion/327193
# * 'resnet50', # https://www.kaggle.com/competitions/birdclef-2021/discussion/243351
# * 'convnext_tiny', # https://www.kaggle.com/competitions/birdclef-2022/discussion/327044
# * 'seresnext26t_32x4d', # https://www.kaggle.com/competitions/birdclef-2021/discussion/243463
# * 'eca_nfnet_l0', # https://www.kaggle.com/competitions/birdclef-2022/discussion/327047
# timm.list_models()
model_zoo = [
"tf_efficientnet_b3_ns", # https://www.kaggle.com/competitions/birdclef-2022/discussion/327047
"tf_efficientnet_b0_ns", # https://www.kaggle.com/competitions/birdclef-2022/discussion/327193
"tf_efficientnetv2_s_in21k", # https://www.kaggle.com/competitions/birdclef-2022/discussion/327193
"tf_efficientnetv2_m_in21k", # https://www.kaggle.com/competitions/birdclef-2021/discussion/243463
"resnet34", # https://www.kaggle.com/competitions/birdclef-2022/discussion/327193
"resnet50", # https://www.kaggle.com/competitions/birdclef-2021/discussion/243351
"convnext_tiny", # https://www.kaggle.com/competitions/birdclef-2022/discussion/327044
"seresnext26t_32x4d", # https://www.kaggle.com/competitions/birdclef-2021/discussion/243463
"eca_nfnet_l0", # https://www.kaggle.com/competitions/birdclef-2022/discussion/327047
]
t = []
for m in model_zoo:
estimated_time = estimate_time(m)
t.append(estimated_time)
fig = plt.figure(figsize=(5, 10))
plt.barh(model_zoo, t)
plt.ylabel("Backbone")
plt.xlabel("Minutes of estimated inference time")
plt.show()
# Fro the above plot, we can see that 'tf_efficientnetv2_s_in21k' and 'tf_efficientnetv2_m_in21k' won't be suitable because the estimated inference time is longer than 120 mintes.
# # Spectrogram Size
# The spectrogram size is impacted by `n_mels` (= height of Spectrogram) and the `hop_length` inversly proportional to the width ((sample_rate * audio_length)/hop_length + 1)
# * the bigger n_mels, the bigger the spectrogram height.
# * the smaller hop_length, the the bigger the spectrogram width.
n_mels = (CONFIG["n_mels"],)
hop_length = (CONFIG["hop_length"],)
n_mels_list = [128, 256, 512]
hop_length_list = [128, 256, 512]
t = []
for hop_length in hop_length_list:
estimated_time = estimate_time("resnet34", CONFIG["n_mels"], hop_length)
t.append(estimated_time)
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].barh(hop_length_list, t)
ax[0].set_ylabel("hop_length")
ax[0].set_xlabel("Minutes of estimated inference time")
t = []
for n_mels in n_mels_list:
estimated_time = estimate_time("resnet34", n_mels, CONFIG["hop_length"])
t.append(estimated_time)
ax[1].barh(n_mels_list, t)
ax[1].set_ylabel("n_mels")
ax[1].set_xlabel("Minutes of estimated inference time")
plt.tight_layout()
plt.show()
# # Summary
# * **Batch size:** Should be as large as possible in your inference pipeline (For the code in this Notebook, the biggest batch size in powers of two is 128)
# * **Choice of backbone:** Find a trade-off between backbone performance and inference time to ensemble multiple models
# * **Spectrogram size:** find a trade-off between performance (bigger spectrogram, bigger n_mels, smaller hop_length) and inference time (smaller spectrogram, smaller n_mels, bigger hop_length)
# * the bigger n_mels, the bigger the spectrogram height.
# * the smaller hop_length, the the bigger the spectrogram width.
"""#!pip install onnxruntime
import onnx
import onnxruntime
# https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html
model = AudioModel(CONFIG['model_name'])
dummy_input = torch.randn(16, 1, 3,3, requires_grad=False, device=CONFIG['device'])
torch_out = model(dummy_input)
# Providing input and output names sets the display names for values
# within the model's graph. Setting these does not change the semantics
# of the graph; it is only for readability.
#
# The inputs to the network consist of the flat list of inputs (i.e.
# the values you would pass to the forward() method) followed by the
# flat list of parameters. You can partially specify names, i.e. provide
# a list here shorter than the number of inputs to the model, and we will
# only set that subset of names, starting from the beginning.
#input_names = [ "actual_input_1" ] + [ "learned_%d" % i for i in range(16) ]
#output_names = [ "output1" ]
# Export model to ONNX format
torch.onnx.export(model,
dummy_input,
f"{CONFIG['model_name']}.onnx",
verbose=True,
#input_names=input_names,
#output_names=output_names
)
# https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html
# Load the ONNX model
model = onnx.load(f"/kaggle/working/{CONFIG['model_name']}.onnx")
# Check that the model is well formed
onnx.checker.check_model(model)
# Print a human readable representation of the graph
print(onnx.helper.printable_graph(model.graph))
# https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html
ort_session = onnxruntime.InferenceSession(f"{CONFIG['model_name']}.onnx")
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(dummy_input)}
ort_outs = ort_session.run(None, ort_inputs)
img_out_y = ort_outs[0]
# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
print("Exported model has been tested with ONNXRuntime, and the result looks good!")
def predict_with_onnx(data_loader, model):
model.to('cpu')
model.eval()
predictions = []
for en in range(len(data_loader)):
images = torch.from_numpy(data_loader[en])
with torch.no_grad():
outputs = model(images).sigmoid().detach().cpu().numpy()
predictions.append(outputs)
return predictions"""
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv(
"/kaggle/input/national-health-and-nutrition-examination-survey/labs.csv"
)
df.drop(["SEQN"], axis=1, inplace=True)
df.head()
import pandas as pd
# Data import
labs = pd.read_csv(
"/kaggle/input/national-health-and-nutrition-examination-survey/labs.csv"
)
# Select only the gender and laboratory variables
variables_of_interest = ["LBDHBG", "LBXHBS", "LBXHBC"]
df = labs[variables_of_interest]
# Print first few rows of the merged dataset
print(df.head())
# Define a dictionary with the new names for the columns
new_names = {
"LBXHBS": "Surface Antibody",
"LBDHBG": "surface antigen",
"LBXHBC": "core antibody",
}
# Rename the columns using the `rename()` method
df = df.rename(columns=new_names)
# Print the first few rows of the renamed DataFrame
print(df.head())
d = ((df.isnull().sum() / df.shape[0])).sort_values(ascending=False)
d.plot(
kind="bar",
color=sns.cubehelix_palette(
start=2, rot=0.15, dark=0.15, light=0.95, reverse=True, n_colors=24
),
figsize=(20, 10),
)
plt.title("\nProportions of Missing Values:\n", fontsize=40)
plt.show()
df
df.dropna(subset=["Surface Antibody", "surface antigen", "core antibody"], inplace=True)
null_counts = df.isnull().sum()
print(null_counts)
df.head()
from sklearn.cluster import KMeans
# normalize your data
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
df_norm = scaler.fit_transform(df)
# apply k-means clustering with k=2
kmeans = KMeans(n_clusters=5, random_state=0).fit(df_norm)
# get the cluster assignments for each data point
labels = kmeans.labels_
# add the cluster assignments to the original DataFrame
df["cluster"] = labels
# compute the mean values of each feature within each cluster
cluster_means = df.groupby("cluster").mean()
# display the mean values for each feature within each cluster
print(cluster_means)
import matplotlib.pyplot as plt
# plot the data points and color them according to their cluster assignments
plt.scatter(df_norm[:, 0], df_norm[:, 1], c=labels)
# add axis labels and a title
plt.xlabel("Surface Antibody")
plt.ylabel("Surface Antigen")
plt.title("Clustering result")
plt.show()
df.dtypes
import matplotlib.pyplot as plt
# group the data by cluster and compute the mean of each variable for each cluster
df_summary = df.groupby("cluster").mean()
print(df_summary)
# plot the cluster means as a bar plot
df_summary.plot(kind="bar")
plt.title("Cluster Means")
plt.xlabel("Cluster")
plt.ylabel("Mean Value")
plt.show()
# replace 1 and 2 with + and -
df_readable = df
df_readable["surface antigen"] = df["surface antigen"].replace({1: "+", 2: "-"})
df_readable["Surface Antibody"] = df["Surface Antibody"].replace({1: "+", 2: "-"})
df_readable["core antibody"] = df["core antibody"].replace({1: "+", 2: "-"})
df_readable
# get the cluster sizes
cluster_sizes = pd.Series(labels).value_counts().sort_index()
# print the cluster sizes
print(cluster_sizes)
import matplotlib.pyplot as plt
# Get the cluster centers
centers = kmeans.cluster_centers_
# Plot the cluster centers
plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, alpha=0.5)
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
# reduce the dimensionality of the data using PCA
pca = PCA(n_components=2)
df_pca = pca.fit_transform(df_norm)
# plot the data points using different colors based on their cluster assignments
plt.scatter(df_pca[:, 0], df_pca[:, 1], c=labels)
plt.title("Cluster Visualization")
plt.xlabel("Principal Component 1")
plt.ylabel("Principal Component 2")
plt.show()
from sklearn.metrics import silhouette_samples
# calculate the silhouette score for each sample
silhouette_vals = silhouette_samples(df_norm, labels)
# calculate the mean silhouette score for all samples
silhouette_avg = silhouette_score(df_norm, labels)
print(f"Silhouette Score: {silhouette_avg}")
from sklearn.model_selection import train_test_split
# Split the dataset into training and testing sets
X = df.drop("cluster", axis=1)
y = df["cluster"]
random_seed = 15
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, stratify=y, random_state=random_seed
)
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
models = [
("Random Forest", RandomForestClassifier()),
("Gradient Boosting", GradientBoostingClassifier()),
]
for model_name, model in models:
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy score for {model_name}: {accuracy:.3f}")
from sklearn.metrics import classification_report
for model_name, model in models:
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(f"Evaluation metrics for {model_name}:")
print(classification_report(y_test, y_pred))
import seaborn as sns
# Define function to set plot style
def set_style(ax, left=True, bottom=True, axis_grid="y"):
ax.spines["bottom"].set_visible(bottom)
ax.spines["left"].set_visible(left)
ax.grid(axis=axis_grid, linestyle="--", alpha=0.7)
sns.despine()
colors = ["#e60026", "#001f3f", "#007f7f"]
model = RandomForestClassifier(random_state=20)
model.fit(X_train, y_train)
feature_importance = (
pd.Series(data=model.feature_importances_, index=X.columns).sort_values(
ascending=False
)
* 100
)
feature_importance = feature_importance.round(1)
fig, ax = plt.subplots(figsize=(10, 6))
sns.barplot(
y=feature_importance.index,
x=feature_importance.values,
ax=ax,
color=colors[0],
linewidth=3,
edgecolor="black",
)
ax.bar_label(ax.containers[0], color="black", fontsize=15, padding=5, fmt="%g%%")
set_style(ax, left=False, bottom=True, axis_grid="x")
ax.set_title("Feature Importance: Random Forest Classifier", fontsize="xx-large")
ax.set_xlim([0, 100])
plt.show()
df
import pandas as pd
data = {
"surface antigen": [2.0, 2.0, 2.0, 1.0, 2.0],
"Surface Antibody": [2.0, 1.0, 1.0, 2.0, 2.0],
"core antibody": [2.0, 1.0, 2.0, 1.0, 1.0],
}
df_Test = pd.DataFrame(data)
df_Test
red_model = RandomForestClassifier(random_state=400)
red_model.fit(X_train, y_train)
y_pred_rf = red_model.predict(df_Test)
print(y_pred_rf)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Basic configuration
import matplotlib.pyplot as plt
import seaborn as sns
from typing import Optional, Tuple
sns.set_theme(style="dark")
# # Kidney stone prediction based on Urine analysis
# This notebook is created as part of Kaggle's playground competition (Season 3, episode 12). The goal of the analysis is to predict kidney stones in a patient based on results of urine analysis.
# # Read data
# We start by importing the data into pandas DataFrames.
def get_data(train_or_test: str) -> Tuple[pd.DataFrame, Optional[pd.Series]]:
if train_or_test not in ("train", "test"):
raise ValueError(f"The argument of 'get_data' must be either 'train' or 'test'")
df = pd.read_csv(
f"/kaggle/input/playground-series-s3e12/{train_or_test}.csv", index_col="id"
)
X = df.copy()
y = None
if train_or_test == "train":
X = df.drop("target", axis="columns")
y = df["target"]
return X, y
X, y = get_data("train")
df = pd.concat((X, y), axis="columns")
df
# # EDA - Initial observations
df.info()
df.describe()
pg = sns.PairGrid(
df,
hue="target",
diag_sharey=False,
corner=True,
)
pg.map_diag(sns.kdeplot, fill=True)
pg.map_lower(sns.scatterplot)
pg.add_legend()
plt.show()
df.groupby("target").describe().T
# Comments:
# 1. No missing values in the dataset. Expected since the data is synthetic.
# 2. No obvious outliers in the dataset.
# 3. Features have different scales. Need to standardize.
# 4. Most important seems to be `calc`. Vastly different distributions for people with and without kidney stones. If `calc` is greater than 5 many cases of kidney stones.
# 5. Least important feature is `ph`. Almost identical distributions for people with and without kidney stones.
# 6. Feature `calc` is right skewed. Need to normalize.
from sklearn.feature_selection import f_classif, mutual_info_classif
f_scores, f_score_p_values = f_classif(X, y)
mi = mutual_info_classif(X, y)
scores = pd.DataFrame(
{"f_score": f_scores, "p_value": f_score_p_values, "mutual_info": mi},
index=X.columns,
)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 7))
sns.heatmap(scores[["f_score"]], annot=True, ax=ax1)
sns.heatmap(scores[["mutual_info"]], annot=True, ax=ax2)
plt.show()
scores[["f_score", "p_value"]]
# Calculating the mutual information between the `target` and the different features confirms that `calc` is the most important and `ph` is the least important feature. We draw similar conclusions from an analysis of variance (ANOVA) test.
# # Preprocessing
# Preprocessing steps for the data set:
# 1. Create polynomial features to capture interactions between different columns
# 2. Use a power transformer to normalize and standardize the data
# 3. Select features with the highest f-score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import SplineTransformer, PowerTransformer
from sklearn.feature_selection import SelectPercentile, f_classif
spline = SplineTransformer(include_bias=False)
power_transf = PowerTransformer(method="yeo-johnson", standardize=True)
feat_select = SelectPercentile(score_func=f_classif, percentile=80)
preprocessor = Pipeline(
[("spline", spline), ("normalize", power_transf), ("f_selector", feat_select)]
)
display(preprocessor)
# We also need to split the dataset into training and testing sets. We further split the training set into K folds for the purpose of later determining hyper-parameters for the models via cross-validation. We make sure to stratify the splits so that the two classes are equally represented in each split.
from sklearn.model_selection import train_test_split, StratifiedKFold
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y)
cv_split = StratifiedKFold(n_splits=5, shuffle=True)
# # Logistic regression
# We first consider logistic regression.
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
lr = Pipeline(
[
("preprocessor", preprocessor),
(
"logistic",
LogisticRegression(
penalty="l1", solver="liblinear", class_weight="balanced", max_iter=1000
),
),
]
)
params = {
"logistic__C": np.geomspace(0.01, 100, num=5),
"preprocessor__f_selector__percentile": np.arange(10, 100, 20),
"preprocessor__spline__degree": [2, 3],
"preprocessor__spline__n_knots": np.arange(5, 10),
}
lr_cv = GridSearchCV(lr, params, cv=cv_split).fit(X_train, y_train)
lr_cv.best_params_
# ## Test the model
# Display the confusion matric and the ROC curve.
from sklearn.metrics import ConfusionMatrixDisplay
ConfusionMatrixDisplay.from_estimator(lr_cv, X_test, y_test)
from sklearn.metrics import RocCurveDisplay
RocCurveDisplay.from_estimator(lr_cv, X_test, y_test)
# ## Make competion predictions
X_comp, _ = get_data("test")
lr_best = lr_cv.best_estimator_.fit(X, y)
lr_predictions = pd.DataFrame(
lr_best.predict_proba(X_comp)[:, 1], index=X_comp.index, columns=["target"]
)
display(lr_predictions)
lr_predictions.to_csv("logistic_regression.csv")
# # Random forest
# Next, we try a random forest model.
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
forest = Pipeline(
[
("preprocessor", preprocessor),
(
"forest",
RandomForestClassifier(bootstrap=True, class_weight="balanced_subsample"),
),
]
)
params = {
"forest__n_estimators": [50, 100, 500, 1000],
"forest__criterion": ["gini", "entropy"],
"forest__max_depth": [2, 4, 6, 8, None],
"preprocessor__f_selector__percentile": np.arange(10, 100, 20),
"preprocessor__spline__degree": [2, 3],
"preprocessor__spline__n_knots": np.arange(5, 10),
}
forest_cv = GridSearchCV(forest, params, cv=cv_split).fit(X_train, y_train)
forest_cv.best_params_
# ## Test the model
# Display the confusion matric and the ROC curve.
from sklearn.metrics import ConfusionMatrixDisplay
ConfusionMatrixDisplay.from_estimator(forest_cv, X_test, y_test)
from sklearn.metrics import RocCurveDisplay
RocCurveDisplay.from_estimator(forest_cv, X_test, y_test)
# ## Make competion predictions
X_comp, _ = get_data("test")
forest_best = forest_cv.best_estimator_.fit(X, y)
forest_predictions = pd.DataFrame(
forest_best.predict_proba(X_comp)[:, 1], index=X_comp.index, columns=["target"]
)
display(forest_predictions)
forest_predictions.to_csv("random_forest.csv")
|
# Importing the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import yfinance as yf
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, GRU
from tensorflow.keras.optimizers import SGD
from tensorflow.random import set_seed
from pandas_datareader.data import DataReader
import warnings
warnings.simplefilter("ignore")
set_seed(455)
np.random.seed(455)
# Download Apple's stock market prices.
end = datetime.now()
start = datetime(2016, end.month, end.day)
dataset = yf.download("AAPL", start, end)
dataset
tstart = 2016
tend = 2020
def train_test_plot(dataset, tstart, tend):
dataset.loc[f"{tstart}":f"{tend}", "High"].plot(figsize=(16, 4), legend=True)
dataset.loc[f"{tend+1}":, "High"].plot(figsize=(16, 4), legend=True)
plt.legend([f"Train (Before {tend+1})", f"Test ({tend+1} and beyond)"])
plt.title("APPLE stock price")
plt.show()
train_test_plot(dataset, tstart, tend)
def train_test_split(dataset, tstart, tend):
train = dataset.loc[f"{tstart}":f"{tend}", "High"]
test = dataset.loc[f"{tend+1}":, "High"]
return train, test
def train_test_split_values(dataset, tstart, tend):
train, test = train_test_split(dataset, tstart, tend)
return train.values, test.values
training_set, test_set = train_test_split_values(dataset, tstart, tend)
# Scaling the training set
sc = MinMaxScaler(feature_range=(0, 1))
training_set = training_set.reshape(-1, 1)
training_set_scaled = sc.fit_transform(training_set)
def split_sequence(sequence, window):
X, y = list(), list()
for i in range(len(sequence)):
end_ix = i + window
if end_ix > len(sequence) - 1:
break
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
window_size = 60
features = 1
X_train, y_train = split_sequence(training_set_scaled, window_size)
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], features)
# # RNN
# RNN stands for Recurrent Neural Network, a type of artificial neural network designed to work with sequences of data, such as time series or natural language. Unlike feedforward neural networks, which process input data in a single pass and generate output, RNNs maintain a hidden state that can capture information from previous inputs in the sequence. This allows them to exhibit temporal dynamic behavior and learn patterns or dependencies over time.
# RNNs consist of interconnected layers of neurons, with recurrent connections that allow information to loop back within the network. This recurrent nature enables the network to "remember" previous inputs and use that information in processing subsequent inputs. However, RNNs can suffer from issues like vanishing or exploding gradients, which make it difficult for them to learn long-range dependencies.
# To address these limitations, more advanced variants of RNNs, such as Long Short-Term Memory (LSTM) networks and Gated Recurrent Units (GRUs), have been developed. These architectures introduce specialized gating mechanisms that help to control the flow of information, making it easier to capture and learn from long-range dependencies in the data.
# # LTSM
model_lstm = Sequential()
model_lstm.add(LSTM(units=125, activation="tanh", input_shape=(window_size, features)))
model_lstm.add(Dense(25))
model_lstm.add(Dense(units=1))
model_lstm.compile(optimizer="adam", loss="mse")
model_lstm.summary()
model_lstm.fit(X_train, y_train, epochs=15, batch_size=32)
dataset_total = dataset.loc[:, "High"]
inputs = dataset_total[len(dataset_total) - len(test_set) - window_size :].values
inputs = inputs.reshape(-1, 1)
inputs = sc.transform(inputs)
X_test, y_test = split_sequence(inputs, window_size)
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], features)
predicted_stock_price = model_lstm.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
y_test = sc.inverse_transform(y_test)
def plot_predictions(test, predicted):
plt.plot(test, color="gray", label="Real")
plt.plot(predicted, color="red", label="Predicted")
plt.title("Stock Price Prediction")
plt.xlabel("Time")
plt.ylabel("Stock Price")
plt.legend()
plt.show()
def return_rmse(test, predicted):
rmse = np.sqrt(mean_squared_error(test, predicted))
print("The root mean squared error is {:.2f}.".format(rmse))
plot_predictions(y_test, predicted_stock_price)
return_rmse(y_test, predicted_stock_price)
# # GRU
model_gru = Sequential()
model_gru.add(GRU(units=125, activation="tanh", input_shape=(window_size, features)))
model_gru.add(Dense(25))
model_gru.add(Dense(units=1))
# Compiling the model
model_gru.compile(optimizer="adam", loss="mse")
model_gru.summary()
model_gru.fit(X_train, y_train, epochs=10, batch_size=32)
GRU_predicted_stock_price = model_gru.predict(X_test)
GRU_predicted_stock_price = sc.inverse_transform(GRU_predicted_stock_price)
plot_predictions(y_test, GRU_predicted_stock_price)
return_rmse(y_test, GRU_predicted_stock_price)
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder
from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score
from sklearn.metrics import (
accuracy_score,
roc_auc_score,
classification_report,
confusion_matrix,
f1_score,
roc_curve,
precision_score,
recall_score,
roc_auc_score,
)
from sklearn import linear_model, tree, ensemble
from sklearn.model_selection import train_test_split
import warnings
warnings.simplefilter(action="ignore")
train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv", index_col=[0])
df_test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv", index_col=[0])
original = pd.read_csv(
"/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
)
train["is_generated"] = 1
df_test["is_generated"] = 1
original["is_generated"] = 0
original = original.reset_index()
original["id"] = original["index"] + df_test.index[-1] + 1
original = original.drop(columns=["index"]).set_index("id")
df_train = pd.concat([train, original])
class Preprocessing:
def __init__(self, df, is_test=False):
self.df = df
self.is_test = is_test
def shape(self):
print(f"shape: {self.df.shape}")
def dtypes(self, pr=False):
print("Types")
if pr:
print(self.df.dtypes)
def isNaN(self, pr=False):
if pr:
print("Contain NaN")
print(self.df.isnull().sum())
else:
return self.df.columns[self.df.isna().any()].tolist()
def isObject(self):
return [
column for column in self.df.columns if self.df[column].dtype == "object"
]
def check_dataframe(self):
self.shape()
self.dtypes(True)
self.isNaN(True)
# Thanks to https://www.kaggle.com/code/lusfernandotorres/s03e12-stacking-tuned-models
# Also: https://www.kaggle.com/code/tetsutani/ps3e12-eda-ensemble-baseline#Pre-Processing
def feature_engineering(self):
print("Feature Engineering")
self.df["pH_cat"] = pd.cut(
self.df["ph"], bins=[0, 6, 8, 14], labels=["Acid", "Normal", "Base"]
)
self.df["osmo-to-urea-ratio"] = self.df["osmo"] / self.df["urea"]
self.df["osmo-to-cond-diff"] = self.df["osmo"] - self.df["cond"]
self.df["calc-to-ph-ratio"] = self.df["calc"] / self.df["osmo"]
self.df["osmo-to-urea-diff"] = self.df["osmo"] - self.df["urea"]
self.df["ion_product"] = self.df["calc"] * self.df["urea"]
self.df["calcium_to_urea_ratio"] = self.df["calc"] / self.df["urea"]
self.df["electrolyte_balance"] = self.df["cond"] / (10 ** (-self.df["ph"]))
self.df["osmolality_to_sg_ratio"] = self.df["osmo"] / self.df["gravity"]
self.df["osmo_density"] = self.df["osmo"] * self.df["gravity"]
def get_df(self):
self.feature_engineering()
return self.df
def split_target(self):
print("Split Target")
if not self.is_test:
self.feature_engineering()
self.X = self.df.drop("target", axis=1)
self.y = self.df["target"].astype(int).to_numpy()
else:
self.feature_engineering()
self.X = self.df
# def get_X(self):
# self.split_target()
# return self.X
def find_enc_method(self):
print("Find Encoding Method")
self.split_target()
one_hot_cols = [
column
for column in self.X.columns
if self.X[column].dtype == "category" or self.X[column].dtype == "object"
]
return one_hot_cols
def encoding(self):
print("Encoding")
one_hot_cols = self.find_enc_method()
num_cols = [col for col in self.X.columns if col not in one_hot_cols]
print(one_hot_cols, num_cols)
X_OHE, X_NUM = self.X[one_hot_cols].copy(), self.X[num_cols].copy()
self.OHE = OneHotEncoder(drop="first", handle_unknown="error")
X_OHE = self.OHE.fit_transform(X_OHE).toarray()
return X_OHE, X_NUM.to_numpy()
def scaling(self):
print("Scaling")
X_OHE, X_num = self.encoding()
self.SS = StandardScaler()
X_num = self.SS.fit_transform(X_num)
self.X_total = np.concatenate((X_OHE, X_num), axis=1)
def get_encoders(self):
return self.OHE
def get_scaler(self):
return self.SS
def get_Xy(self):
if not self.is_test:
self.scaling()
return self.X_total, self.y
else:
self.split_target()
return self.X
pre_test = Preprocessing(df_test, True)
X_test = pre_test.get_Xy()
print("-" * 50)
pre_train = Preprocessing(df_train)
X, y = pre_train.get_Xy()
SS = pre_train.get_scaler()
OHE = pre_train.get_encoders()
one_hot_cols = [
column
for column in X_test.columns
if X_test[column].dtype == "category" or X_test[column].dtype == "object"
]
num_cols = [col for col in X_test.columns if col not in one_hot_cols]
X_OHE, X_NUM = X_test[one_hot_cols].copy(), X_test[num_cols].copy()
X_OHE = OHE.transform(X_OHE).toarray()
X_NUM = SS.transform(X_NUM)
X_test = np.concatenate((X_OHE, X_NUM), axis=1)
unique, counts = np.unique(y, return_counts=True)
unique, counts
from imblearn.over_sampling import SMOTE
def apply_smote(X, y, random_state=None):
"""
Applies SMOTE to the input features (X) and target variable (y) to balance the dataset.
Parameters:
X: numpy array or pandas DataFrame with the input features
y: numpy array or pandas Series with the target variable
random_state: int, default=None, controls the randomness of the SMOTE algorithm
Returns:
X_resampled: numpy array with the resampled input features
y_resampled: numpy array with the resampled target variable
"""
smote = SMOTE(random_state=random_state)
X_resampled, y_resampled = smote.fit_resample(X, y)
return X_resampled, y_resampled
X_resampled, y_resampled = apply_smote(X, y, random_state=42)
print(X_resampled.shape)
print(y_resampled.shape)
import optuna
from lightgbm import LGBMClassifier
def objective_lgbm(trial):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
param = {
"random_state": 42,
"n_estimators": trial.suggest_categorical(
"n_estimators", [150, 200, 300, 3000]
),
"reg_alpha": trial.suggest_loguniform("reg_alpha", 1e-3, 10.0),
"reg_lambda": trial.suggest_loguniform("reg_lambda", 1e-3, 10.0),
"colsample_bytree": trial.suggest_categorical(
"colsample_bytree", [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
),
"subsample": trial.suggest_categorical(
"subsample", [0.4, 0.5, 0.6, 0.7, 0.8, 1.0]
),
"learning_rate": trial.suggest_categorical(
"learning_rate", [0.001, 0.006, 0.008, 0.01, 0.014, 0.017, 0.02]
),
"max_depth": trial.suggest_categorical("max_depth", [10, 20, 100]),
"num_leaves": trial.suggest_int("num_leaves", 1, 1000),
"min_child_samples": trial.suggest_int("min_child_samples", 1, 300),
"cat_smooth": trial.suggest_int("min_data_per_groups", 1, 100),
}
model = LGBMClassifier(**param)
model.fit(
X_train,
y_train,
early_stopping_rounds=100,
eval_set=[(X_test, y_test)],
verbose=False,
)
preds = model.predict_proba(X_test)[:, 1]
auc_score = roc_auc_score(y_test, preds)
return auc_score
# study = optuna.create_study(direction='maximize')
# study.optimize(objective_lgbm, n_trials=50)
# params_lgbm = study.best_trial.params
# print('Number of finished trials:', len(study.trials))
# print('Best trial:', params_lgbm)
# With higher number of features, performance of LGBM Decreases
params = {
"n_estimators": 150,
"reg_alpha": 0.0019653343724204955,
"reg_lambda": 0.08634203265588371,
"colsample_bytree": 0.7,
"subsample": 0.4,
"learning_rate": 0.02,
"max_depth": 100,
"num_leaves": 822,
"min_child_samples": 26,
"min_data_per_groups": 44,
}
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.2, random_state=42
)
lgbm_model = LGBMClassifier(**params)
lgbm_model.fit(
X_train,
y_train,
early_stopping_rounds=50,
eval_set=[(X_valid, y_valid)],
verbose=False,
)
y_pred = lgbm_model.predict_proba(X_valid)[:, 1]
print(f"Auc: {round(roc_auc_score(y_valid,y_pred), 2)}")
import time
def cross_validate_model(
model,
X_test,
n_splits=10,
):
KF = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
aucs = []
y_test = []
oof_preds = []
test_preds = []
for index, (train_index, val_index) in enumerate(KF.split(X, y)):
print(f"Fold {index+1} out of {n_splits}")
start = time.time()
X_train, X_val = X[train_index], X[val_index]
y_train, y_val = y[train_index], y[val_index]
model.fit(
X_train,
y_train,
early_stopping_rounds=50,
eval_set=[(X_val, y_val)],
verbose=False,
)
y_pred = model.predict_proba(X_val)[:, 1]
y_test = model.predict_proba(X_test)[:, 1]
oof_preds.append(y_pred)
test_preds.append(y_test)
auc = roc_auc_score(y_val, y_pred)
print(f"Auc: {round(roc_auc_score(y_val, y_pred), 2)}")
aucs.append(auc)
end = time.time()
print(f"This Fold {index+1}, took {end - start} seconds.")
return aucs, y_test / n_splits
aucs, y_test = cross_validate_model(LGBMClassifier(**params), X_test=X_test)
submission = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
submission["target"] = y_test
submission.to_csv("submission.csv", index=False)
import optuna
from xgboost import XGBClassifier
def objective_xgb(trial):
X_train, X_test, y_train, y_test = train_test_split(
X_resampled, y_resampled, test_size=0.2, random_state=42
)
param = {
"lambda": trial.suggest_loguniform("lambda", 1e-3, 10.0),
"alpha": trial.suggest_loguniform("alpha", 1e-3, 10.0),
"colsample_bytree": trial.suggest_categorical(
"colsample_bytree", [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
),
"subsample": trial.suggest_categorical("subsample", [0.6, 0.7, 0.8, 1.0]),
"learning_rate": trial.suggest_categorical(
"learning_rate", [0.008, 0.009, 0.01, 0.012, 0.014, 0.016, 0.018, 0.02]
),
"n_estimators": trial.suggest_categorical(
"n_estimators", [150, 200, 300, 3000]
),
"max_depth": trial.suggest_categorical(
"max_depth", [4, 5, 7, 9, 11, 13, 15, 17]
),
"random_state": 42,
"min_child_weight": trial.suggest_int("min_child_weight", 1, 300),
}
model = XGBClassifier(**param)
model.fit(
X_train,
y_train,
early_stopping_rounds=100,
eval_set=[(X_test, y_test)],
verbose=False,
)
preds = model.predict_proba(X_test)[:, 1]
auc_score = roc_auc_score(y_test, preds)
return auc_score
# study = optuna.create_study(direction='maximize')
# study.optimize(objective_xgb, n_trials=50)
# params_xgb = study.best_trial.params
# print('Number of finished trials:', len(study.trials))
# print('Best trial:', params_xgb)
# XGB accuracy increases with more features
X_train, X_valid, y_train, y_valid = train_test_split(
X_resampled, y_resampled, test_size=0.2, random_state=42
)
model = XGBClassifier(**params_xgb)
model.fit(
X_train,
y_train,
early_stopping_rounds=50,
eval_set=[(X_valid, y_valid)],
verbose=False,
)
y_pred = model.predict_proba(X_valid)[:, 1]
print(f"Auc: {round(roc_auc_score(y_valid,y_pred), 2)}")
y_pred = lgbm_model.predict_proba(X_test)[:, 1]
submission = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
submission["target"] = y_pred
submission.to_csv("submission.csv", index=False)
|
import os
os.environ["WANDB_DISABLED"] = "true"
import torch
import pandas as pd
from torch import nn
from datasets import Dataset, DatasetDict
from transformers import (
BertTokenizerFast,
TrainingArguments,
Trainer,
BertForSequenceClassification,
)
from sklearn.model_selection import train_test_split
pd.set_option("max_colwidth", 300)
SEED = 0
BATCH_SIZE = 38
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# ### Choosing the model and the tokenizer
model_name = "bert-base-cased"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
bert_model = BertForSequenceClassification.from_pretrained(
model_name, num_labels=1, ignore_mismatched_sizes=True
)
bert_model.to(DEVICE)
# ### Reading and preprocessing the dataset
train_df = pd.read_csv(
"/kaggle/input/commonlitreadabilityprize/train.csv", usecols=[3, 4]
)
train_df = train_df.rename(columns={"excerpt": "text", "target": "labels"})
train_df, eval_df = train_test_split(
train_df, test_size=0.1, stratify=pd.cut(train_df["labels"], 5), random_state=SEED
)
train_df.head()
# ### Converting the dataset into transformer-friendly format
def create_dataset(train, evaluation):
dataset = DatasetDict()
dataset["train"] = Dataset.from_dict(train.to_dict(orient="list"), split="train")
dataset["eval"] = Dataset.from_dict(evaluation.to_dict(orient="list"), split="eval")
return dataset
dataset = create_dataset(train_df, eval_df)
# ### Preparing the dataset for feeding into the model
def tokenize(examples):
return tokenizer(
examples["text"],
padding="max_length",
truncation=True,
max_length=300,
return_tensors="pt",
return_attention_mask=True,
)
tokenized_dataset = dataset.map(
tokenize, batched=True, batch_size=64, remove_columns="text"
)
tokenized_dataset.set_format(
"torch", columns=["input_ids", "attention_mask"], output_all_columns=True
)
# ### Setting all training params
training_args = TrainingArguments(
report_to=None,
output_dir="trainer",
evaluation_strategy="epoch",
per_device_train_batch_size=BATCH_SIZE,
per_device_eval_batch_size=BATCH_SIZE,
num_train_epochs=3.0,
learning_rate=1e-5,
optim="adamw_torch",
)
# ### Calculating RMSE metrics
def compute_metrics(pred):
labels = torch.from_numpy(pred.label_ids)
preds = torch.from_numpy(pred.predictions).squeeze()
mse = torch.mean((preds - labels) ** 2)
rmse = torch.sqrt(mse)
return {
"rmse": rmse,
}
# ### Creating custom trainer
# We need it for calculating RMSE loss
class BertRegressorTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
labels = inputs["labels"]
outputs = model(**inputs)
loss = torch.sqrt(nn.functional.mse_loss(outputs["logits"].squeeze(), labels))
return (loss, outputs) if return_outputs else loss
trainer = BertRegressorTrainer(
model=bert_model,
args=training_args,
train_dataset=tokenized_dataset["train"].shuffle(seed=SEED),
eval_dataset=tokenized_dataset["eval"].shuffle(seed=SEED),
compute_metrics=compute_metrics,
)
trainer.train()
trainer.save_model("/model/")
tokenizer.save_pretrained("/tokenizer/")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
imagelist = dict()
subfolders = [
f.path
for f in os.scandir("/kaggle/input/food11-image-dataset/training")
if f.is_dir()
]
for folder in subfolders:
classname = folder.split("/")[5]
imagelist[classname] = list()
img_folder = "data/"
if not os.path.exists(img_folder) or len(os.listdir(img_folder)) == 0:
os.makedirs(img_folder, exist_ok=True)
import shutil
import os
import csv
filename = "/kaggle/working/file.csv" # enter your filename with file path
file = open(filename, "w")
file.write(str("class") + "," + str("name") + "\n")
dest = "/kaggle/working/data"
unique_class = set()
for dirname in list(subfolders):
src_files = os.listdir(dirname)
for file_name in src_files:
classname = dirname.split("/")[5]
newfile = classname + file_name
unique_class.add(classname)
shutil.copy(dirname + "/" + file_name, dest + "/" + newfile)
file.write(str(classname) + "," + str("/kaggle/working/data/" + newfile) + "\n")
imagelist[classname].append("/kaggle/working/data/" + newfile)
file.close()
item_df = pd.read_csv(filename, on_bad_lines="skip")
item_df.head(1)
item_df.shape
itemid = "name"
item_df["image"] = item_df[itemid]
item_df.head(1)
from sklearn.model_selection import train_test_split
item_df, _ = train_test_split(item_df, test_size=0.2, stratify=item_df["class"])
for filename in os.listdir("/kaggle/working/data"):
f = os.path.join("/kaggle/working/data", filename)
# checking if it is a file
if os.path.isfile(f):
if item_df[item_df["name"] == str(f)].shape[0] == 0:
os.remove(f)
item_df.shape
# # Search with FAISS GPU accuracy
#
import torch
import faiss
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("clip-ViT-B-32")
device = torch.device("cuda")
model.to(device)
import torch
torch.cuda.is_available()
import glob
images_list = list(glob.glob("/kaggle/working/data/*.jpg"))
import PIL
print("Images:", len(images_list))
img_emb = model.encode(
[PIL.Image.open(filepath) for filepath in images_list],
batch_size=512,
convert_to_tensor=False,
show_progress_bar=True,
)
img_emb.shape[1]
image_eval_list = dict()
subfolders = [
f.path
for f in os.scandir("/kaggle/input/food11-image-dataset/evaluation")
if f.is_dir()
]
for folder in subfolders:
classname = folder.split("/")[5]
image_eval_list[classname] = list()
img_folder = "data/"
if not os.path.exists(img_folder) or len(os.listdir(img_folder)) == 0:
os.makedirs(img_folder, exist_ok=True)
import shutil
import os
import csv
filename = "/kaggle/working/file_eval.csv" # enter your filename with file path
file = open(filename, "w")
file.write(str("class") + "," + str("name") + "\n")
dest = "/kaggle/working/data"
unique_class = set()
for dirname in list(subfolders):
src_files = os.listdir(dirname)
for file_name in src_files:
classname = dirname.split("/")[5]
newfile = classname + file_name
unique_class.add(classname)
file.write(str(classname) + "," + str(dirname + "/" + file_name) + "\n")
# image_eval_list[classname].append("/kaggle/working/data/"+newfile)
file.close()
eval_df = pd.read_csv(filename, on_bad_lines="skip")
eval_df.head(1)
# encoded_data = np.asarray(img_emb)
index = faiss.IndexIDMap(faiss.IndexFlatIP(img_emb.shape[1]))
index.add_with_ids(img_emb, np.array(range(0, len(images_list))))
faiss.write_index(index, "item.index")
### fetch details of the document
def searchfaiss(query, k):
qv = model.encode([query])
top_k = index.search(qv, k)
top_k_ids = top_k[1].tolist()[0]
top_k_ids = list(np.unique(top_k_ids))
results = [images_list[idx] for idx in top_k_ids]
for i in results:
display(Image(filename=i))
return results
eval_df = eval_df.head(1000)
# ## FAISS
## Evaluate
import time
def searchfaiss_id(query, k):
qv = model.encode([query], show_progress_bar=False)
top_k = index.search(qv, k)
top_k_ids = top_k[1].tolist()[0]
top_k_ids = list(np.unique(top_k_ids))
results = [images_list[idx] for idx in top_k_ids]
return results
results_clip_faiss = dict()
curr_time = round(time.time() * 1000)
count = 0
for i in range(len(eval_df)):
results_clip_faiss[eval_df.loc[i, "name"]] = searchfaiss_id(
PIL.Image.open(eval_df.loc[i, "name"]), k=10
)
count += 1
end_time = round(time.time() * 1000)
print("total Images :", count)
print("Total time to search :", end_time - curr_time)
print("Avarage time:", ((end_time - curr_time) / count))
total_hit = 0
for key, lst in results_clip_faiss.items():
label = list(eval_df[eval_df["name"] == key]["class"])[0]
# print(key)
# print(label)
# print("-------------")
for i in lst:
# print(i)
res_label = list(item_df[item_df["image"] == i]["class"])[0]
if res_label == label:
total_hit += 1
print(total_hit)
# break
print("Preceision@10 :" + str(total_hit / (count * 10)))
# # hnswlib
# img_emb
import hnswlib
import torch
embedding_size = img_emb.shape[1]
index_path = "hnswlib"
index = hnswlib.Index(space="cosine", dim=embedding_size)
index.init_index(max_elements=len(img_emb), ef_construction=400, M=64)
index.add_items(img_emb, list(range(len(img_emb))))
index.save_index(index_path)
def searchhnswlib_id(query, k=10):
qv = model.encode([query], show_progress_bar=False)
corpus_ids, distances = index.knn_query(qv, k)
hits = [
{"corpus_id": id, "score": 1 - score}
for id, score in zip(corpus_ids[0], distances[0])
]
hits = sorted(hits, key=lambda x: x["score"], reverse=True)
results = []
for hit in hits:
results.append(images_list[hit["corpus_id"]])
return results
results_hns = dict()
curr_time = round(time.time() * 1000)
count = 0
for i in range(len(eval_df)):
results_hns[eval_df.loc[i, "name"]] = searchhnswlib_id(
PIL.Image.open(eval_df.loc[i, "name"]), k=10
)
count += 1
end_time = round(time.time() * 1000)
print("total Images :", count)
print("Total time to search :", end_time - curr_time)
print("Avarage time:", ((end_time - curr_time) / count))
total_hit = 0
for key, lst in results_hns.items():
label = list(eval_df[eval_df["name"] == key]["class"])[0]
# print(key)
# print(label)
# print("-------------")
for i in lst:
# print(i)
res_label = list(item_df[item_df["image"] == i]["class"])[0]
if res_label == label:
total_hit += 1
print(total_hit)
# break
print("Preceision@10 :" + str(total_hit / (count * 10)))
|
# # The Fractal Time Series Dataset for Evaluating Probabilistic Forecasting. Fractal Brownian motion Data generator
# # Dataset that was created with the notebook:
# # https://www.kaggle.com/datasets/unfriendlyai/fractal-brownian-motion
import numpy as np
import gc
len_left = 1024 # Time series length
len_right = 1024 # Future quantiles length
hurst = 0.45 # Hurst exponent
# ## Hosking method for 10001 future variants of common part of TS
def hosking_commonstart(N=200, H=0.7, M=1, n_start=1):
"""
Generates sample paths of fractional Brownian Motion using the Hosking method
Source: https://github.com/732jhy/Fractional-Brownian-Motion
args:
N: total number of time steps of TS
H: Hurst parameter of FBM TS
M: number of variants of time steps
n_start:number of common starting TS values for all M variants
output:
X: array [M, N] where first n_start TS are equal and the rest are M variants of probable future
"""
warm_up = 0 # number of previous time steps that will be ignored in output
n_start += warm_up
N += n_start
gam0 = np.arange(0, N + 2) ** (2 * H)
c = np.array(
[0.5 * (gam0[k - 1] - 2 * gam0[k] + gam0[k + 1]) for k in range(1, N + 1)]
)
X = np.random.standard_normal((N, M))
X[:n_start, :] = X[:n_start, 0:1]
mu = c[0] * X[0, :]
tau = [c[0] ** 2]
sigsq = [1 - tau[0]]
d = np.array([c[0]])
for n in range(1, N):
# sigma(n+1)**2
s = sigsq[n - 1] - ((c[n] - tau[n - 1]) ** 2) / sigsq[n - 1]
# d(n+1)
phi = (c[n] - tau[n - 1]) / sigsq[n - 1]
d = d - phi * d[::-1]
d = np.append(d, phi)
# mu(n+1) and tau(n+1)
X[n] = mu + sigsq[n - 1] * X[n]
sigsq.append(s)
mu = d @ X[: n + 1][::-1]
tau.append(c[: n + 1][::-1] @ d)
X = X[warm_up:, :] / X[n_start:, :].std()
return X
fbm_data = {"H": hurst, "ts": [], "quantiles": []}
for _ in range(100):
X = hosking_commonstart(len_right, hurst, 30001, n_start=len_left).transpose()
X_cum = np.cumsum(X, axis=1)
ts = X_cum[0, :len_left]
qu = np.sort(X_cum[:, len_left:], axis=0)[::300, :]
fbm_data["ts"].append(ts)
fbm_data["quantiles"].append(qu)
del X_cum
del qu
gc.collect()
np.save(f"fbm{len_left}_h{round(hurst*100):02d}", fbm_data)
|
# # 📲 **Imports.**
# This cell contains a list of Python libraries and modules that are being imported for use in the notebook. These imports include both standard Python libraries (such as 📁 **io**, 📁 **os**, 📁 **csv**, ➗ **math**, and 🌊 **logging**) and popular third-party libraries (such as 📷 **cv2**, 🧮 **numpy**, 🐼 **pandas**, 🌊 **seaborn**, 🔥 **torch**, 🤖 **sklearn**, 🤖 **models**, 🤖 **transforms**, 🧑🔬 **optim**, and 🧠 **nn**).
# Some of the libraries are used for data manipulation and visualization (such as 🧮 **numpy**, 🐼 **pandas**, and 🌊 **seaborn**), while others are used for machine learning and deep learning tasks (such as 🔥 **torch**, 🤖 **sklearn**, 🤖 **models**, 🤖 **transforms**, 🧑🔬 **optim**, and 🧠 **nn**). The 📷 **cv2** library is specifically used for computer vision tasks, while the 🌊 **logging** library is used for logging messages during the execution of the code.
import io
import os
import cv2
import csv
import time
import copy
import math
import torch
import shutil
import logging
import argparse
import numpy as np
import torchvision
import numpy as np
import pandas as pd
import seaborn as sb
import torch.nn as nn
from PIL import Image
from tqdm import tqdm
import torch.optim as optim
from sklearn import datasets
import matplotlib.pyplot as plt
from tqdm.notebook import trange
from statistics import mean, stdev
from torchvision.utils import make_grid
import torch.utils.model_zoo as model_zoo
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, models, transforms
from sklearn.model_selection import (
train_test_split,
StratifiedKFold,
StratifiedShuffleSplit,
KFold,
)
# # ✔ **Checking Pytorch and Torchvision Versions.**
# This cell 🔍 prints out the current versions of the 🔥 PyTorch and 🔍 Torchvision libraries that are installed in the notebook's environment. 🔥 PyTorch is a popular machine learning library, while 🔍 Torchvision is a package that provides image and video datasets and models for 🔥 PyTorch.
# Checking the versions of these libraries is important for ensuring that the notebook is using the correct and up-to-date versions of the libraries, which can affect the performance and compatibility of the code. The print function is used to display the 🔥 **PyTorch** and 🔍 **Torchvision** versions, and the 🔥 **torch** and 🔍 **torchvision** modules are imported at the beginning of the notebook to access their version numbers. 🔬
print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
# # ⌨ **Defining some parameters for the model.**
# This cell sets several important parameters that will be used to configure and train a machine learning model. The **num_classes**, **batch_size**, and **num_epochs** variables specify the number of classes in the dataset, the batch size for training, and the number of epochs to train for, respectively. These are common parameters in machine learning tasks and can affect the performance and training time of the model.
# The model_choice variable specifies the choice of the machine learning model to be used in the notebook, with options such as "**Resnet152**", "**Resnet34**", and "**ViT-L**". The **learning_rate**, **SGD_momentum**, and **SGD_weight_decay** variables define the hyperparameters used for training the model.
# Lastly, the **feature_extract** variable is a boolean that determines whether to fine-tune all layers of the model or just the last layer. This can be important in transfer learning scenarios where a pre-trained model is used.
# This cell provides a convenient and organized way to set these important parameters for the machine learning model.
num_classes = 100
batch_size = 128
num_epochs = 40
model_choice = "Resnet152"
learning_rate = 0.001
SGD_momentum = 0.9
SGD_weight_decay = 4e-4
feature_extract = False
# # 🔧 **Transformation Configurations.**
# This cell discusses how to configure PyTorch transforms for image preprocessing 🖼️, which is an important step in developing deep learning models for image classification tasks 🤖. The code shows how to define different sets of transforms for training and testing data 🔧, including random horizontal flipping 🔁, random rotation 🔄, random adjustment of sharpness 🔍, color jitter 🎨, normalization 📊, and random erasing ❌. The use of these transforms can improve the model's performance by increasing the variability and diversity of the training data 💪.
transform_train = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(0.1),
transforms.RandomRotation(20),
transforms.ToTensor(),
transforms.RandomAdjustSharpness(sharpness_factor=2, p=0.1),
transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
transforms.RandomErasing(p=0.75, scale=(0.02, 0.1), value=1.0, inplace=False),
]
)
transform_train_without_transformers = transforms.Compose([transforms.ToTensor()])
transform_test = transforms.Compose(
[
transforms.ToTensor(),
transforms.Resize((224, 224)),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
)
# # 🖨 **Datasets and Dataloaders.**
# This cell deals with the configuration of data for training and validation of a model 🧑🔬. The code in the cell sets up the data in three different ways, with **transformers** 🔧, **without transformers** ❌, and for **validation** 🔍. The **torchvision.datasets.CIFAR100** function is used to load the data 🔍, and **torch.utils.data.DataLoader** is used to create dataloaders for the data 🔢. The dataloaders are grouped into a dictionary dataloaders_dict for convenience 📚.
""" Training Dataset & Dataloaders with Transformers
"""
train_set = torchvision.datasets.CIFAR100(
root="./data", train=True, download=True, transform=transform_train
)
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=batch_size, shuffle=True, num_workers=1
)
""" Training Dataset & Dataloaders without Transformers
"""
train_set_without_transformers = torchvision.datasets.CIFAR100(
root="./data",
train=True,
download=True,
transform=transform_train_without_transformers,
)
train_loader_without_transformers = torch.utils.data.DataLoader(
train_set_without_transformers, batch_size=batch_size, shuffle=True, num_workers=0
)
""" Validation Dataset & Dataoaders
"""
validation_set = torchvision.datasets.CIFAR100(
root="./data", train=False, download=True, transform=transform_test
)
validation_loader = torch.utils.data.DataLoader(
validation_set, batch_size=batch_size, shuffle=False, num_workers=0
)
dataloaders_dict = {}
dataloaders_dict["Train"] = train_loader
dataloaders_dict["Validation"] = validation_loader
# # 📷 **Function for showing batch of the images.**
# In machine learning projects, it is often necessary to visualize a batch of images during training to check if the data is being processed correctly 👨💻👀📷. This is where the "**show_batch**" function comes in handy 🤖. This function takes in a batch of data and displays a grid of images with their corresponding labels using **Matplotlib** 📊.
# The "**show_batch**" function is defined with a single parameter, "**data**", which represents the batch of images and their labels 🔍. Within the function, a for loop is used to iterate over each batch in the data 🔁. The function then creates a figure with a large size using "**plt.subplots(figsize=(30, 30))**" 📈. The axes are set to remove ticks and labels using "**ax.set_xticks([]); ax.set_yticks([])**" ❌, and the images are displayed in a grid format using "**ax.imshow(make_grid(images, nrow=16).permute(1, 2, 0))**" 📷. The "**break**" statement is used to display only the first batch of images and their corresponding labels ⏭️.
def show_batch(data):
for images, labels in data:
fig, ax = plt.subplots(figsize=(30, 30))
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(make_grid(images, nrow=16).permute(1, 2, 0))
break
# # 📸 **Showing Batch of The Images Before Applying Transformers.**
# The code in the cell below this topic simply calls the **show_batch**() function with the **train_loader_without_transformers** parameter, which displays the first batch of images in the loader 📷👀. This can be useful for inspecting the raw data and checking for any issues or inconsistencies before applying transformations 🔧❌.
show_batch(train_loader_without_transformers)
# # 📸 **Showing Batch of The Images After After Applying Transformers.**
# The code in the cell uses the **show_batch**() function to display a batch of images from the **train_loader** object, which contains the transformed images 📷🔍. The resulting images should appear different from the original images in terms of their **orientation** 🔄, **brightness** 🌞, **contrast** 🌑, and **sharpness** 🔪. The purpose of this display is to visualize the effect of the transformations on the images 👀🌟, which can help the developer to assess the quality of the transformations and their suitability for the task at hand 💻.
show_batch(train_loader)
# # 📜 **Function to get the learning rate to view it in every iteration.**
# This cell focuses on a function that retrieves the learning rate of the optimizer at each training iteration 🔎🔢. The function takes in an **optimizer** as an argument and returns the current learning rate value 💡. This is useful for monitoring the learning rate as it changes throughout the training process and can help in adjusting the learning rate to optimize the model's performance 📈📉.
# To use this function, you can simply pass your **optimizer** object as an argument and call the function during training iterations to view the current learning rate 💻🔄. It's a helpful tool for those who want to fine-tune their model's performance and achieve better results in their machine learning projects 🎓🚀.
def Learning_Rate(optimizer):
for param_group in optimizer.param_groups:
return param_group["lr"]
# # 📝 **Training Loop (The *definition* of the function).**
# The following code defines a Python function called "train_model" that implements the training loop for a given model architecture, dataset, loss function, optimizer, and learning rate scheduler.
# The function takes five input arguments:
# "**model**" refers to the model architecture that needs to be trained.
# "dataloaders" refers to a dictionary containing the training and validation data loaders.
# "**criterion**" refers to the loss function that needs to be optimized.
# "**optimizer**" refers to the optimization algorithm that updates the model parameters.
# "**scheduler**" refers to the learning rate scheduler that dynamically adjusts the learning rate during training.
# The "**num_epochs**" parameter specifies the number of times the model will iterate over the entire dataset during training.
# During each epoch, the training loop iterates over the training and validation data loaders, computing the loss and accuracy on each batch of data. The model parameters are updated based on the gradients of the loss function with respect to the model parameters. The learning rate scheduler adjusts the learning rate at each epoch based on a predefined schedule.
# The function also saves the model with the best validation accuracy and the final model parameters in two different files for later use.
# The training loop's progress is printed out for each epoch, including the loss, accuracy, and learning rate for both the training and validation phases.
# The function returns the trained model, along with four lists that contain the **training** and **validation** **accuracy** and **loss** history.
def train_model(model, dataloaders, criterion, optimizer, scheduler, num_epochs=10):
since = time.time()
training_accuracy_history = []
training_loss_history = []
validation_accuracy_history = []
validation_loss_history = []
best_acc = 0.0
for epoch in trange(
num_epochs,
desc=f"Model: {model_choice}, Number of Epochs: {num_epochs}, Batch Size: {batch_size}, Learning Rate: {(Learning_Rate(optimizer)):.9f} ",
):
print("Epoch {}/{}".format(epoch + 1, num_epochs))
print("-" * 10)
# Each epoch has a training and validation phase
for phase in ["Train", "Validation"]:
if phase == "Train":
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == "Train"):
outputs = model_ft(inputs.to(device))
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
if phase == "Train":
loss.backward()
def closure():
outputs = model_ft(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
loss.backward()
return loss
optimizer.step(closure)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print(
f"{phase} Loss: {epoch_loss:.9f}, Accuracy: {(epoch_acc * 100):.9f}%, Learning Rate: {(Learning_Rate(optimizer)):.9f}"
)
if phase == "Validation" and epoch_acc > best_acc:
best_acc = epoch_acc
# best_model_wts = copy.deepcopy(model.state_dict())
torch.save(model_ft.state_dict(), "./Best_Checkpoint.pth")
if phase == "Train":
training_accuracy_history.append(epoch_acc.item() * 100)
training_loss_history.append(epoch_loss)
if phase == "Validation":
validation_accuracy_history.append(epoch_acc.item() * 100)
validation_loss_history.append(epoch_loss)
torch.save(model_ft.state_dict(), "./Last_Checkpoint.pth")
scheduler.step()
print()
time_elapsed = time.time() - since
print(
"Training completed in {:.0f}h {:.0f}m {:.0f}s".format(
time_elapsed // 3600, (time_elapsed % 3600) // 60, time_elapsed % 60
)
)
print("Best Validation Accuracy: {:9f}".format(best_acc * 100))
model.load_state_dict(torch.load("./Best_Checkpoint.pth"))
return (
model,
validation_accuracy_history,
training_accuracy_history,
validation_loss_history,
training_loss_history,
)
# # 📝 **Choosing an Architecture (The *definition* of the function).**
# The code defines a function called "**initialize_model**" that takes in parameters such as the **model name**, **number of classes**, and whether to use **pre-trained weights or not**.
# The function then checks which model was selected and initializes it accordingly. For example, if "**Resnet34**" was selected, the function initializes a **Resnet34** model and replaces its final fully connected layer with a new one that has the specified **number of classes**. The function also allows for **fine-tuning** of the pre-trained models by setting some layers to require **gradient updates** while leaving others **frozen**.
# Other models included in the function are **Resnet18**, **Resnet101**, **Resnext101**, **Resnet152**, **Alexnet**, **VGG11**, **Squeezenet**, **Densenet121**, *ViT-H*, and **ViT-L**. The **ViT models** are **Vision Transformers**, which are a type of attention-based neural network that has shown promising results in image classification tasks.
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
model_ft = None
if model_name == "Resnet34":
"""Resnet34"""
model_ft = models.resnet34(models.ResNet34_Weights.DEFAULT)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Sequential(nn.Dropout(0.1), nn.Linear(num_ftrs, num_classes))
elif model_name == "Resnet18":
"""Resnet18"""
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
torch.nn.init.xavier_uniform_(model_ft.fc.weight)
model_ft.fc = nn.Sequential(nn.Dropout(0.1), nn.Linear(num_ftrs, num_classes))
elif model_name == "Resnet101":
"""Resnet101"""
model_ft = models.resnet101(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Sequential(nn.Dropout(0.1), nn.Linear(num_ftrs, num_classes))
elif model_name == "Resnext101":
"""Resnext101"""
model_ft = models.resnext101_32x8d(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Sequential(nn.Dropout(0.1), nn.Linear(num_ftrs, num_classes))
elif model_name == "Resnet152":
"""Resnet152"""
model_ft = models.resnet152(models.ResNet152_Weights.DEFAULT)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Sequential(nn.Dropout(0.1), nn.Linear(num_ftrs, num_classes))
elif model_name == "Alexnet":
"""Alexnet"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
elif model_name == "VGG11":
"""VGG11"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
elif model_name == "Squeezenet":
"""Squeezenet"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
model_ft.classifier[1] = nn.Conv2d(
512, num_classes, kernel_size=(1, 1), stride=(1, 1)
)
model_ft.num_classes = num_classes
model_ft.classifier[1] = nn.Conv2d(
512, num_classes, kernel_size=(1, 1), stride=(1, 1)
)
elif model_name == "Densenet121":
"""Densenet121"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
elif model_name == "ViT-H":
"""Vision Transform - H"""
model_ft = torchvision.models.vit_h_14(weights="DEFAULT")
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.heads.head.in_features
model_ft.heads.head = nn.Linear(num_ftrs, num_classes)
elif model_name == "ViT-L":
"""Vision Transform - L"""
model_ft = torchvision.models.vit_l_16(weights="DEFAULT")
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.heads.head.in_features
model_ft.heads.head = nn.Linear(num_ftrs, num_classes)
else:
print("Invalid model name, exiting...")
exit()
return model_ft
# # 📝 **PyramidNet Model Initialization.**
# This code defines the PyramidNet model, which is a type of deep convolutional neural network (CNN) for image classification. It consists of several layers of basic or bottleneck blocks that process the input image at different scales or resolutions to extract more informative features.
# The **conv3x3** function returns a 2D convolutional layer with a 3x3 kernel size and padding of 1. The **BasicBlock** and **Bottleneck** classes define the basic and bottleneck blocks, respectively, used in the PyramidNet architecture. The basic block consists of two convolutional layers, each followed by a batch normalization and ReLU activation function, and a residual connection. The bottleneck block consists of three convolutional layers, each followed by batch normalization and ReLU, and a residual connection.
# The **PyramidNet** class defines the entire model architecture by stacking several layers of basic or bottleneck blocks with different output feature map sizes. The **pyramidal_make_layer** function creates a layer of blocks with a stride of 1 or 2, depending on the given **stride** parameter. The model ends with a global average pooling layer, a fully connected layer, and a softmax activation function for classification.
# The **for** loop at the end initializes the weights of the convolutional layers using the He normal initialization method.
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
)
class BasicBlock(nn.Module):
outchannel_ratio = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn3 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
if self.downsample is not None:
shortcut = self.downsample(x)
featuremap_size = shortcut.size()[2:4]
else:
shortcut = x
featuremap_size = out.size()[2:4]
batch_size = out.size()[0]
residual_channel = out.size()[1]
shortcut_channel = shortcut.size()[1]
if residual_channel != shortcut_channel:
padding = torch.autograd.Variable(
torch.cuda.FloatTensor(
batch_size,
residual_channel - shortcut_channel,
featuremap_size[0],
featuremap_size[1],
).fill_(0)
)
out += torch.cat((shortcut, padding), 1)
else:
out += shortcut
return out
class Bottleneck(nn.Module):
outchannel_ratio = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, (planes * 1), kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn3 = nn.BatchNorm2d((planes * 1))
self.conv3 = nn.Conv2d(
(planes * 1),
planes * Bottleneck.outchannel_ratio,
kernel_size=1,
bias=False,
)
self.bn4 = nn.BatchNorm2d(planes * Bottleneck.outchannel_ratio)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn4(out)
if self.downsample is not None:
shortcut = self.downsample(x)
featuremap_size = shortcut.size()[2:4]
else:
shortcut = x
featuremap_size = out.size()[2:4]
batch_size = out.size()[0]
residual_channel = out.size()[1]
shortcut_channel = shortcut.size()[1]
if residual_channel != shortcut_channel:
padding = torch.autograd.Variable(
torch.cuda.FloatTensor(
batch_size,
residual_channel - shortcut_channel,
featuremap_size[0],
featuremap_size[1],
).fill_(0)
)
out += torch.cat((shortcut, padding), 1)
else:
out += shortcut
return out
class PyramidNet(nn.Module):
def __init__(self, dataset, depth, alpha, num_classes, bottleneck=False):
super(PyramidNet, self).__init__()
self.dataset = dataset
self.inplanes = 16
if bottleneck == True:
n = int((depth - 2) / 9)
block = Bottleneck
else:
n = int((depth - 2) / 6)
block = BasicBlock
self.addrate = alpha / (3 * n * 1.0)
self.input_featuremap_dim = self.inplanes
self.conv1 = nn.Conv2d(
3, self.input_featuremap_dim, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim)
self.featuremap_dim = self.input_featuremap_dim
self.layer1 = self.pyramidal_make_layer(block, n)
self.layer2 = self.pyramidal_make_layer(block, n, stride=2)
self.layer3 = self.pyramidal_make_layer(block, n, stride=2)
self.final_featuremap_dim = self.input_featuremap_dim
self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim)
self.relu_final = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(self.final_featuremap_dim, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def pyramidal_make_layer(self, block, block_depth, stride=1):
downsample = None
if (
stride != 1
): # or self.inplanes != int(round(featuremap_dim_1st)) * block.outchannel_ratio:
downsample = nn.AvgPool2d((2, 2), stride=(2, 2), ceil_mode=True)
layers = []
self.featuremap_dim = self.featuremap_dim + self.addrate
layers.append(
block(
self.input_featuremap_dim,
int(round(self.featuremap_dim)),
stride,
downsample,
)
)
for i in range(1, block_depth):
temp_featuremap_dim = self.featuremap_dim + self.addrate
layers.append(
block(
int(round(self.featuremap_dim)) * block.outchannel_ratio,
int(round(temp_featuremap_dim)),
1,
)
)
self.featuremap_dim = temp_featuremap_dim
self.input_featuremap_dim = (
int(round(self.featuremap_dim)) * block.outchannel_ratio
)
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.bn_final(x)
x = self.relu_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
# # 📜 **Function that changes *grad* value in the model.**
# The code snippet below demonstrates a useful function that can be used to set the "**requires_grad**" attribute of model parameters to False, effectively freezing the corresponding layers during training.
# This function takes as input a PyTorch model and a boolean value indicating whether to freeze the feature extraction layers or not. If set to True, all parameters in the model will have their requires_grad attribute set to False, effectively freezing them during training.
# By using this function, you can easily customize the training process of your machine learning models by selectively freezing certain layers and fine-tuning others, resulting in improved accuracy and reduced overfitting.
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
# # ⚙ **Choosing an Architecture (The *call* of the function).**
# The code snippet below shows how the initialize_model function is called to create a model with the specified **model_choice**, **num_classes**, **feature_extract**, and **use_pretrained** parameters.
# By calling this function, the user can easily initialize a pre-trained model or train a new model from scratch with the desired architecture. The **model_ft** variable then holds the initialized model object which can be further customized and trained using various techniques.
model_ft = initialize_model(
model_choice, num_classes, feature_extract, use_pretrained=True
)
# # ⚙ **Choosing PyramidNet as Architecture.**
# The code below is an example of instantiating a **PyramidNet** architecture with the specified hyperparameters. Here, the **dataset** parameter specifies the dataset to be used, **depth** specifies the depth of the network, **alpha** controls the width of the network, **num_classes** specifies the number of classes in the classification task, and **bottleneck** determines whether to use bottleneck blocks or not. By experimenting with different architectures and hyperparameters, we can find the optimal network that best suits our task.
# model_ft = PyramidNet(dataset = train_set, depth = 220, alpha = 96, num_classes = num_classes, bottleneck = True)
# # 📠 **Checking if we want to extract the features or not.**
# The **params_to_update** variable is initialized with all the model's parameters. If **feature_extract** is set to True, the code creates an empty list and iterates through all the named parameters of the model. For each parameter whose requires_grad attribute is True, it appends the parameter to the **params_to_update** list. This means that only the parameters for the selected layers will be updated during the fine-tuning process. Otherwise, if **feature_extract** is False, all parameters with requires_grad set to True will be updated.
# This code allows for more flexibility in fine-tuning models, as it allows us to choose which layers to train and which ones to freeze.
params_to_update = model_ft.parameters()
# print("Params to learn:")
if feature_extract:
params_to_update = []
for name, param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
pass
# print("\t",name)
else:
for name, param in model_ft.named_parameters():
if param.requires_grad == True:
pass
# print("\t",name)
# # 🖥 **Transfaring the model to the GPU to make traning cycle faster and efficient.**
# The process of transferring a model to the GPU can significantly speed up the training cycle and increase its efficiency. In this code snippet, the model is transferred to the GPU using the PyTorch framework. First, the device is set to the GPU using **torch.device("cuda:0")**. Then, if there are multiple GPUs available, the model is wrapped in a **nn.DataParallel** module to utilize all available GPUs. Finally, the model is moved to the GPU using the **to()** method.
device = torch.device("cuda:0")
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs")
model_ft = nn.DataParallel(model_ft)
model_ft.to(device)
# # ⌨ **Define the SAM optimizer class.**
# The code provided in the cell defines the SAM optimizer in PyTorch. The SAM optimizer takes as input a base optimizer (such as SGD or Adam) and adds a sharpness-aware step to the optimization process. The sharpness-aware step involves first computing an estimate of the local maximum of the loss surface in the vicinity of the current parameters, and then taking a step in that direction.
# The SAM optimizer also introduces a new hyperparameter called **rho**, which controls the step size in the sharpness-aware direction. If **rho** is set to zero, the optimizer reduces to the base optimizer.
# The code defines the SAM optimizer as a subclass of the PyTorch Optimizer class. It overrides the step method, which is called by PyTorch during the optimization process, to perform the two-step sharpness-aware update. It also implements the **_grad_norm** method, which computes the norm of the gradient used in the sharpness-aware update. Finally, it overrides the **load_state_dict** method to ensure that the state of the base optimizer is properly loaded when the state of the SAM optimizer is loaded.
class SAM(torch.optim.Optimizer):
def __init__(self, params, base_optimizer, rho=0.05, adaptive=False, **kwargs):
assert rho >= 0.0, f"Invalid rho, should be non-negative: {rho}"
defaults = dict(rho=rho, adaptive=adaptive, **kwargs)
super(SAM, self).__init__(params, defaults)
self.base_optimizer = base_optimizer(self.param_groups, **kwargs)
self.param_groups = self.base_optimizer.param_groups
self.defaults.update(self.base_optimizer.defaults)
@torch.no_grad()
def first_step(self, zero_grad=False):
grad_norm = self._grad_norm()
for group in self.param_groups:
scale = group["rho"] / (grad_norm + 1e-12)
for p in group["params"]:
if p.grad is None:
continue
self.state[p]["old_p"] = p.data.clone()
e_w = (
(torch.pow(p, 2) if group["adaptive"] else 1.0)
* p.grad
* scale.to(p)
)
p.add_(e_w) # climb to the local maximum "w + e(w)"
if zero_grad:
self.zero_grad()
@torch.no_grad()
def second_step(self, zero_grad=False):
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
p.data = self.state[p]["old_p"] # get back to "w" from "w + e(w)"
self.base_optimizer.step() # do the actual "sharpness-aware" update
if zero_grad:
self.zero_grad()
@torch.no_grad()
def step(self, closure=None):
assert (
closure is not None
), "Sharpness Aware Minimization requires closure, but it was not provided"
closure = torch.enable_grad()(
closure
) # the closure should do a full forward-backward pass
self.first_step(zero_grad=True)
closure()
self.second_step()
def _grad_norm(self):
shared_device = self.param_groups[0]["params"][
0
].device # put everything on the same device, in case of model parallelism
norm = torch.norm(
torch.stack(
[
((torch.abs(p) if group["adaptive"] else 1.0) * p.grad)
.norm(p=2)
.to(shared_device)
for group in self.param_groups
for p in group["params"]
if p.grad is not None
]
),
p=2,
)
return norm
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.base_optimizer.param_groups = self.param_groups
# # ⌨ **Defining the *loss function*, *optimizer* and the *scheduler.***
# The code in the cell below shows how to define a cross-entropy loss function and an optimizer using the **SAM** (Sharpness-Aware Minimization) algorithm, which helps to avoid sharp minima and improve generalization. The learning rate, momentum, and weight decay values are also specified for the optimizer. Finally, a scheduler is defined using the **CosineAnnealingLR** algorithm to adjust the learning rate over epochs.
criterion = nn.CrossEntropyLoss().to(device)
# optimizer = optim.SGD(params_to_update, lr = learning_rate, momentum = SGD_momentum, weight_decay = SGD_weight_decay)
base_optimizer = torch.optim.SGD
optimizer = SAM(
model_ft.parameters(),
base_optimizer,
adaptive=True,
lr=learning_rate,
momentum=SGD_momentum,
weight_decay=SGD_weight_decay,
)
# optimizer = optim.Adam(params_to_update, lr=1e-3)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode = 'min', factor = 0.001, patience = 5, threshold = 0.0001, threshold_mode='abs')
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
# # ⚙ **Calling the training loop function.**
# The code shown in the cell below calls the train_model function with appropriate arguments. The train_model function takes in the pre-trained model, data loaders, loss function, optimizer, scheduler, and number of epochs as arguments, and performs the training process.
# The function returns the trained model and the training and validation history for accuracy and loss, which can be used for analysis and visualization.
(
model_ft,
validation_accuracy_history,
training_accuracy_history,
validation_loss_history,
training_loss_history,
) = train_model(
model_ft, dataloaders_dict, criterion, optimizer, scheduler, num_epochs=num_epochs
)
# # 📈 **Plotting the Training and Validation Accuracies.**
# The code provided plots the training and validation accuracies curves using Matplotlib. The **training_accuracy_history** and **validation_accuracy_history** are arrays that store the training and validation accuracies respectively, for each epoch during the model training process.
# The **plt.plot()** function is used to plot the loss history arrays against the epochs. The **black** and **green** colors are used to distinguish between the training and validation losses respectively. The **plt.legend()** function is used to add a legend to the plot, which helps to distinguish between the two curves. The **plt.xlabel()** and **plt.ylabel()** functions are used to label the x-axis and y-axis respectively. Finally, the **plt.title()** function is used to add a title to the plot.
# By visualizing the Accuracy curves, it becomes easier to track the progress of the model during training and to identify if there is overfitting or underfitting occurring.
plt.figure(figsize=[6, 4])
plt.plot(training_accuracy_history, "black", linewidth=2.0)
plt.plot(validation_accuracy_history, "blue", linewidth=2.0)
plt.legend(["Training Accuracy", "Validation Accuracy"], fontsize=14)
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Accuracy", fontsize=10)
plt.title("Accuracy Curves", fontsize=12)
# # 📉 **Plotting the Training and Validation Losses.**
# The code provided plots the training and validation loss curves using Matplotlib. The **training_loss_history** and **validation_loss_history** are arrays that store the training and validation losses respectively, for each epoch during the model training process.
# The **plt.plot()** function is used to plot the loss history arrays against the epochs. The **black** and **green** colors are used to distinguish between the training and validation losses respectively. The **plt.legend()** function is used to add a legend to the plot, which helps to distinguish between the two curves. The **plt.xlabel()** and **plt.ylabel()** functions are used to label the x-axis and y-axis respectively. Finally, the **plt.title()** function is used to add a title to the plot.
# By visualizing the loss curves, it becomes easier to track the progress of the model during training and to identify if there is overfitting or underfitting occurring.
plt.figure(figsize=[6, 4])
plt.plot(training_loss_history, "black", linewidth=2.0)
plt.plot(validation_loss_history, "green", linewidth=2.0)
plt.legend(["Training Loss", "Validation Loss"], fontsize=14)
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Loss", fontsize=10)
plt.title("Loss Curves", fontsize=12)
# # ➕➖ **Calculating the inference time for a single image.** ➗✖
# This cell covers how to calculate the inference time for a single image using a pre-trained model. The code uses a validation dataset to load a single image and its corresponding label. The model is then loaded onto the device, and the inference time is calculated using the time library. The time taken for the model to output the predictions for the image is recorded and printed to the console.
# First Iteration
inference_data_loader = torch.utils.data.DataLoader(
validation_set, batch_size=1, shuffle=False, num_workers=2
)
images, labels = next(iter(inference_data_loader))
labels = labels.to(device)
images = images.to(device)
model_ft = model_ft.to(device)
start = time.time()
outputs = model_ft(images)
end = time.time()
infrence_time = end - start
print(f"The inference time is: {infrence_time}")
# Second Iteration
inference_data_loader = torch.utils.data.DataLoader(
validation_set, batch_size=1, shuffle=False, num_workers=2
)
images, labels = next(iter(inference_data_loader))
labels = labels.to(device)
images = images.to(device)
model_ft = model_ft.to(device)
start = time.time()
outputs = model_ft(images)
end = time.time()
infrence_time = end - start
print(f"The inference time is: {infrence_time}")
# # 📑 **Creating the prediction file.**
# The code in the cell uses the trained model to generate predictions for the validation data and stores them in a list. Then, the list is transformed into a **CSV** format that can be submitted to Kaggle for evaluation.
# The first part of the code uses a for loop to iterate over the validation data and generate predictions for each image using the trained model. The predictions are stored in a list called "**predictions**".
# The second part of the code transforms the list of predictions into a format that can be submitted to Kaggle. It uses the csv library to create a **CSV** file named "**submission.csv**". The file has two columns: "**ID**" and "**Label**". For each prediction in the list, it writes a new row in the CSV file with the image ID and the predicted label.
# By creating this prediction file, we can submit our predictions to Kaggle and evaluate our model's performance in the competition.
predictions = []
with torch.no_grad():
for data in validation_loader:
images, labels = data
outputs = model_ft(images)
_, predicted = torch.max(outputs.data, 1)
predictions.append(predicted)
predictions_transformed = [x.item() for x in torch.cat(predictions)]
with open("submission.csv", "w", encoding="utf-8", newline="") as out:
writer = csv.writer(out)
writer.writerow(["ID", "Label"])
for ID, Label in enumerate(predictions_transformed):
writer.writerow([ID, Label])
|
# # Kidney Stone Prediction
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
import lightgbm as lgbm
from sklearn.model_selection import train_test_split
import sklearn.metrics as metrics
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv("../input/playground-series-s3e12/train.csv")
test = pd.read_csv("../input/playground-series-s3e12/test.csv")
original = pd.read_csv(
"../input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
)
submission = pd.read_csv("../input/playground-series-s3e12/sample_submission.csv")
# # Key Notes about Dataset
# # Understanding Dataset 1
# # Understanding Dataset 2
# inspecting the dataset
print("Train Dataset\n", train.head())
print("\n Test Dataset\n", test.head())
print("\n Original Dataset \n", original.head())
# checking for missing values
print("Train Dataset\n", train.isnull().sum())
print("\n Test Dataset\n", test.isnull().sum())
print("\n Original Dataset \n", original.isnull().sum())
print(
"Train dataset descriptive stat \n", train.drop(["id", "target"], axis=1).describe()
)
print(
"\n Original dataset descriptive stat \n",
original.drop(["target"], axis=1).describe(),
)
print("Train balanced dataset check \n", train["target"].value_counts())
print("\nOriginal balanced dataset check \n", original["target"].value_counts())
print("Train info \n", train.info())
print("\nTest info \n", test.info())
print("\nOriginal info \n", original.info())
# checking for duplicates
print("Duplication in train \n", train[train.duplicated()])
print("\nDuplication in original \n", original[original.duplicated()])
# # Dataset Summary
# # Dataset cleaning
# the only cleaning that identified at this point is the need to remove the feature id from both train and test dataset.
train.drop("id", axis=1, inplace=True)
test.drop("id", axis=1, inplace=True)
# # Exploratory Data Analysis (EDA)
columns = test.columns.tolist()
fig, ax = plt.subplots(3, 2, figsize=(10, 10), dpi=300)
ax = ax.flatten()
for i in columns:
sns.kdeplot(train[i], ax=ax[columns.index(i)])
sns.kdeplot(test[i], ax=ax[columns.index(i)])
sns.kdeplot(original[i], ax=ax[columns.index(i)])
ax[columns.index(i)].set_title(f"{i} Distribution")
fig.suptitle("Distribution of Features per Dataset", fontsize=12, fontweight="bold")
fig.legend(["Train", "Test", "Original"])
plt.tight_layout()
def cor(data, title):
corr = data.corr()
sns.heatmap(corr, annot=True)
plt.title(f"{title} Correlation Matrix")
plt.show
cor(train, "Train")
cor(test, "Test")
cor(original, "Original")
columns = test.columns.tolist()
fig, ax = plt.subplots(3, 2, figsize=(8, 5), dpi=150)
ax = ax.flatten()
for i in columns:
sns.boxplot(train[i], ax=ax[columns.index(i)])
ax[columns.index(i)].set_title(f"{i} Boxplot")
plt.tight_layout()
# # EDA Observation/conclusion
# # Feature Eng and Model training
original_train = pd.concat([original, train])
original_train[original_train.duplicated()]
x = original_train.drop("target", axis=1)
y = original_train["target"]
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=30, random_state=1)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
lg = lgbm.LGBMClassifier()
lg.fit(X_train, y_train)
lgResult = lg.predict(X_test)
print(metrics.accuracy_score(y_test, lgResult))
from xgboost import XGBClassifier
xb = XGBClassifier()
xb.fit(X_train, y_train)
xbResult = xb.predict(X_test)
print(metrics.accuracy_score(y_test, xbResult))
x_kaggle = original_train.drop("target", axis=1)
y_kaggle = original_train["target"]
xb = XGBClassifier()
xb.fit(x_kaggle, y_kaggle)
xbResult = xb.predict(test)
submission["target"] = xbResult
submission.to_csv("submission.csv", index=False)
lg = lgbm.LGBMClassifier()
lg.fit(x_kaggle, y_kaggle)
lgResult = lg.predict(test)
submission["target"] = lgResult
submission.to_csv("submission1.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing Data
# Process of loading and reading data into python from various resource.
#
# Two important properties:
# ### Format (various format such as csv, json, xlxs, etc)
# ### File path of the data set
#
# * Computer/Desktop/Folder
# * Internet: URL/API
# # Reading Datasets
# How to read data from different sources
# * Read data from a CSV
# * Read data from a CSV where headers are missing
# * Read data from a CSV where delimiters/separators are not comma
# * How to bypass given headers with your own?
# * Skip initial rows
# * Reading from an Excel file
# * Reading from an Excel file - how to use `sheet_name`
# * General delimated text file can be read same as a CSV
# * Readling data from Url
# ## Importing Packages:
import numpy as np
import pandas as pd
df1 = pd.read_csv("../input/dummy-data-for-reading-files/CSV_EX_1.csv")
df1
# ### Read data from a CSV where headers are missing
df2 = pd.read_csv("../input/dummy-data-for-reading-files/CSV_EX_2.csv")
df2
# ignore headers while reading data file
df2 = pd.read_csv("../input/dummy-data-for-reading-files/CSV_EX_2.csv", header=None)
df2
# bypass given headers with your own
df2 = pd.read_csv(
"../input/dummy-data-for-reading-files/CSV_EX_2.csv",
header=None,
names=["Bedroom", "Sq.ft", "Locality", "Price($)"],
)
df2
# ### Read data from a CSV where delimiters/separators are not comma
df3 = pd.read_csv("../input/dummy-data-for-reading-files/CSV_EX_3.csv")
df3
# using semicolon as seperator
df3 = pd.read_csv("../input/dummy-data-for-reading-files/CSV_EX_3.csv", sep=";")
df3
# ### How to bypass given headers with your own?
df4 = pd.read_csv(
"../input/dummy-data-for-reading-files/CSV_EX_1.csv", names=["A", "B", "C", "D"]
)
df4
df4 = pd.read_csv(
"../input/dummy-data-for-reading-files/CSV_EX_1.csv",
header=0,
names=["A", "B", "C", "D"],
)
df4
# ### Skip initial rows
df5 = pd.read_csv("../input/dummy-data-for-reading-files/CSV_EX_skiprows.csv")
df5
df5 = pd.read_csv(
"../input/dummy-data-for-reading-files/CSV_EX_skiprows.csv", skiprows=2
)
df5
# ### Reading from an Excel file
df_excel = pd.read_excel(
"../input/dummy-data-for-reading-files/Housing_data-single.xlsx"
)
df_excel
df_excel.shape
# ### Reading from an Excel file - how to use `sheet_name`
df11_1 = pd.read_excel(
"../input/dummy-data-for-reading-files/Housing_data.xlsx", sheet_name="Data_Tab_1"
)
df11_2 = pd.read_excel(
"../input/dummy-data-for-reading-files/Housing_data.xlsx", sheet_name="Data_Tab_2"
)
df11_3 = pd.read_excel(
"../input/dummy-data-for-reading-files/Housing_data.xlsx", sheet_name="Data_Tab_3"
)
df11_1.shape
df11_2.shape
df11_3.shape
# ### General delimated text file can be read same as a CSV
df_txt = pd.read_table("../input/dummy-data-for-reading-files/Table_EX_1.txt")
df_txt
df_txt = pd.read_table("../input/dummy-data-for-reading-files/Table_EX_1.txt", sep=",")
df_txt
# ### Readling data from Url
import pandas as pd
url = "https://raw.githubusercontent.com/cs109/2014_data/master/countries.csv"
df_url = pd.read_csv(url)
df_url
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import warnings
warnings.filterwarnings("ignore")
import plotly.express as px
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
ROOT_PATH = "/kaggle/input/store-sales-time-series-forecasting"
stores = pd.read_csv(ROOT_PATH + "/stores.csv")
train = pd.read_csv(ROOT_PATH + "/train.csv")
transactions = pd.read_csv(ROOT_PATH + "/transactions.csv")
## https://www.kaggle.com/code/arjanso/reducing-dataframe-memory-size-by-65
def reduce_memory_usage(df):
start_mem = df.memory_usage().sum() / 1024**2
print("Memory usage of dataframe is {:.2f} MB".format(start_mem))
for col in df.columns:
col_type = df[col].dtype.name
if (col_type != "datetime64[ns]") & (col_type != "category"):
if col_type != "object":
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif (
c_min > np.iinfo(np.int16).min
and c_max < np.iinfo(np.int16).max
):
df[col] = df[col].astype(np.int16)
elif (
c_min > np.iinfo(np.int32).min
and c_max < np.iinfo(np.int32).max
):
df[col] = df[col].astype(np.int32)
elif (
c_min > np.iinfo(np.int64).min
and c_max < np.iinfo(np.int64).max
):
df[col] = df[col].astype(np.int64)
else:
if (
c_min > np.finfo(np.float16).min
and c_max < np.finfo(np.float16).max
):
df[col] = df[col].astype(np.float16)
elif (
c_min > np.finfo(np.float32).min
and c_max < np.finfo(np.float32).max
):
df[col] = df[col].astype(np.float32)
else:
pass
else:
df[col] = df[col].astype("category")
mem_usg = df.memory_usage().sum() / 1024**2
print("Memory usage became: ", mem_usg, " MB")
return df
def NumPyRMSLE(y_true: list, y_pred: list) -> float:
n = len(y_true)
rmsle = np.sqrt(np.mean(np.square(np.log1p(y_pred) - np.log1p(y_true))))
return rmsle
train = reduce_memory_usage(train)
def summary(text, df):
print(f"{text} shape: {df.shape}")
summ = pd.DataFrame(df.dtypes, columns=["dtypes"])
summ["null"] = df.isnull().sum()
summ["unique"] = df.nunique()
summ["min"] = df.min()
summ["median"] = df.median()
summ["max"] = df.max()
summ["mean"] = df.mean()
summ["std"] = df.std()
return summ
summary("stores", stores)
summary("train", train)
summary("transactions", transactions)
train = pd.merge(train, stores, how="left", on="store_nbr")
train.head()
oil = pd.read_csv(ROOT_PATH + "/oil.csv")
oil["date"] = pd.to_datetime(oil["date"])
dates = pd.DataFrame(train.date.unique(), columns=["date"])
dates["date"] = pd.to_datetime(dates["date"])
oil = pd.merge(dates, oil, how="left", on="date")
oil = oil.set_index("date")
oil.shape
print(oil.query("dcoilwtico != dcoilwtico"))
print(oil.isnull().sum())
fig = px.scatter(
oil, x=oil.index, y="dcoilwtico", title="Oil price change over the year"
)
fig.update_xaxes(
rangeslider_visible=False,
)
fig.show()
oil_na = oil.copy()
oil_na.dropna()
pd.plotting.lag_plot(oil["dcoilwtico"], lag=1)
train
train["date"] = pd.to_datetime(train["date"])
train = train.set_index("date")
train.groupby(["city"]).sales.median().plot(kind="line")
train.groupby(["family"]).sales.median().plot(kind="bar")
train = pd.merge(train, oil, how="left", on="date")
data_group = train.groupby(["date"]).agg({"dcoilwtico": "median", "sales": "mean"})
fig = px.scatter(data_group, x=data_group.index, y=["sales", "dcoilwtico"])
fig.update_layout(title="Sale Vs Date ")
fig.show()
del data_group
train.head(5)
holidays = pd.read_csv(ROOT_PATH + "/holidays_events.csv")
holidays.head(5)
|
# 
# image from [link](https://novascriptscentral.org/diabetes/)
# * #### Feature engineering is the process of working with variables to create new ones or enhance existing ones.
# * #### Feature engineering is the process of creating new variables from raw data.
# * #### Most of machine learning is data preprocessing, the rest is machine learning.
# ### Dataset Details
# Pregnancies: Number of times pregnant
# Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test
# BloodPressure: Diastolic blood pressure (mm Hg)
# SkinThickness: Triceps skin fold thickness (mm)
# Insulin: 2-Hour serum insulin (mu U/ml)
# BMI: Body mass index (weight in kg/(height in m)^2)
# DiabetesPedigreeFunction: Diabetes pedigree function
# Age: Age (years)
# Outcome: Class variable (0 or 1)
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
import missingno as msno
from datetime import date
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import (
MinMaxScaler,
LabelEncoder,
StandardScaler,
RobustScaler,
)
pd.set_option("display.max_columns", None)
pd.set_option("display.float_format", lambda x: "%.3f" % x)
pd.set_option("display.width", 500)
df = pd.read_csv("/kaggle/input/pima-indians-diabetes-database/diabetes.csv")
def check_df(dataframe, head=5):
print("#################### Head ####################")
print(dataframe.head(head))
print("################### Shape ####################")
print(dataframe.shape)
print("#################### Info #####################")
print(dataframe.info())
print("################### Nunique ###################")
print(dataframe.nunique())
print("##################### NA #####################")
print(dataframe.isnull().sum())
print("################## Quantiles #################")
print(dataframe.describe([0, 0.05, 0.50, 0.95, 0.99, 1]).T)
print("################# Duplicated ###################")
print(dataframe.duplicated().sum())
check_df(df)
# First, we need to identify the numerical and categorical variables in the data.
def grab_col_names(dataframe, cat_th=10, car_th=20):
# cat_cols, cat_but_car
cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"]
num_but_cat = [
col
for col in dataframe.columns
if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"
]
cat_but_car = [
col
for col in dataframe.columns
if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"
]
cat_cols = cat_cols + num_but_cat
cat_cols = [col for col in cat_cols if col not in cat_but_car]
# num_cols
num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"]
num_cols = [col for col in num_cols if col not in num_but_cat]
return cat_cols, num_cols, cat_but_car
cat_cols, num_cols, cat_but_car = grab_col_names(df)
def cat_summary(dataframe, col_name, plot=False):
print(
pd.DataFrame(
{
col_name: dataframe[col_name].value_counts(),
"Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe),
}
)
)
print("##########################################")
if plot:
sns.countplot(x=dataframe[col_name], data=dataframe)
plt.show(block=True)
# We are analyzing the numeric variables.
for col in num_cols:
cat_summary(df, col, plot=True)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Import Libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
import warnings
warnings.filterwarnings("ignore")
# # Load Dataset
data = pd.read_csv("/kaggle/input/iris-dataset/IRIS - IRIS.csv")
data.head()
data.describe()
data.info()
data["species"].unique()
sns.pairplot(data, hue="species")
# # Train Test Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
data.drop("species", axis=1), data["species"], test_size=0.30, random_state=101
)
# # KNN Model
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
# # Evaluations
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, pred))
print(classification_report(y_test, pred))
# # Find The K Value
error_rate = []
for i in range(1, 10):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
predict = knn.predict(X_test)
error_rate.append(np.mean(predict != y_test))
plt.figure(figsize=(10, 6))
plt.plot(
range(1, 10),
error_rate,
color="blue",
linestyle="dashed",
marker="o",
markerfacecolor="red",
markersize=10,
)
plt.title("Error Rate vs. K Value")
plt.xlabel("K")
plt.ylabel("Error Rate")
knn = KNeighborsClassifier(n_neighbors=8)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
print(confusion_matrix(y_test, pred))
print(classification_report(y_test, pred))
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split, RandomizedSearchCV, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.base import ClassifierMixin
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import (
classification_report,
confusion_matrix,
ConfusionMatrixDisplay,
precision_score,
recall_score,
accuracy_score,
f1_score,
RocCurveDisplay,
make_scorer,
)
from sklearn.preprocessing import LabelBinarizer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import StratifiedKFold
# ## Preparing the Dataset: [Non verbal tourists data](https://archive.ics.uci.edu/ml/datasets/Non+verbal+tourists+data)
# This dataset is also available on Kaggle: https://www.kaggle.com/datasets/surajjha101/nonverbal-tourists-data
# About the Dataset:
# > A total of 73 customers, aged between 24 and 81 years old, were surveyed. Of the customers surveyed, 38 were returning customers, and 35 were new cus-tomers. The variables chosen are the essential ones that make up the non-verbal communication system. In addition to being the most feasible to evaluate in clients. The non-verbal system is made up of subsystems such as kinesic, paralanguage, proxemic, chronic, and others. In the design of the questionnaire, the indicators that make up these subsystems were taken into account to be explored as part of the client's communication preferences, as well as being feasible to evaluate in clients. The 22 variables analyzed were considered feasible to evaluate by the hotel's clientele.
# The **data dictonary** is available in attributes information section on: https://archive.ics.uci.edu/ml/datasets/Non+verbal+tourists+data
dataset_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00620/non-verbal%20tourist%20data.csv"
df = pd.read_csv(dataset_url)
df.head()
df.info()
# ## Data Cleaning
# ### Sex
df["sex"].unique()
le = preprocessing.LabelEncoder()
le.fit(df["sex"])
df["sex"] = le.transform(df["sex"])
sns.countplot(x=df["sex"])
plt.show()
# ### Age
df["age"].unique()
sns.boxplot(df, x="age")
plt.show()
# ### Country
df["country"].unique()
sns.countplot(df, y="country")
plt.show()
df = pd.get_dummies(df, columns=["country"], drop_first=True)
df.info()
# ### Returning
df["returning"].unique()
le = preprocessing.LabelEncoder()
le.fit(df["returning"])
df["returning"] = le.transform(df["returning"])
sns.countplot(df, x="returning")
plt.show()
# ### Non Verbal Postures (Nominal values)
# The Proxemics and TAudio1 features had their values swapped. We can adjust this.
df[["TAudio1", "Proxemics"]].head()
df[["TAudio1", "Proxemics"]] = df[["Proxemics", "TAudio1"]]
df[["TAudio1", "Proxemics"]].head()
nominal_features = [
"GImg1",
"GImg2",
"GImg3",
"PImg1",
"PImg2",
"PImg3",
"PImg4",
"PImg5",
"TAudio1",
"TAudio2",
"TAudio3",
"QAudio1",
"QAudio2",
"QAudio3",
]
df[nominal_features].head()
pd.crosstab(**df[nominal_features].melt(var_name="columns", value_name="index"))
# drop the rows with the '?' character
df.drop(df[df["GImg3"] == "?"].index, inplace=True)
df.drop(df[df["PImg5"] == "?"].index, inplace=True)
value_counts = pd.crosstab(
**df[nominal_features].melt(var_name="columns", value_name="index")
)
value_counts
ax = sns.heatmap(value_counts.transpose(), annot=True, cmap="Blues")
ax.set(ylabel="values")
plt.show()
df = pd.get_dummies(df, columns=nominal_features, drop_first=True)
df.head()
df.info()
# ### Non Verbal Postures (Ordinal values)
df[
["Tense - relaxed", "Authoritative -anarchic ", "Hostile - friendly", "Proxemics"]
].head()
df["Proxemics"].unique()
le = preprocessing.LabelEncoder()
le.fit(df["Proxemics"])
df["Proxemics"] = le.transform(df["Proxemics"])
sns.countplot(df, x="Proxemics")
plt.show()
# one of them has object type (Authoritative -anarchic), possibly a '?' character
df.drop(df[df["Authoritative -anarchic "] == "?"].index, inplace=True)
df["Authoritative -anarchic "] = df["Authoritative -anarchic "].astype(int)
df.info()
## seems like relaxed and friendly environments are correlated
sns.heatmap(
df[["Tense - relaxed", "Authoritative -anarchic ", "Hostile - friendly"]].corr(),
annot=True,
cmap="Blues",
)
plt.show()
# ### Targe: Type of Client
df["Type of Client"].unique()
sns.histplot(df, x="Type of Client")
plt.show()
# ## Data Holdout and StratifiedKFold
X = df.drop(columns=["Type of Client"])
y = df["Type of Client"]
# using stratify to keep the same proportion as the target
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, stratify=y, random_state=111
)
def cross_validate_stratify(
X: pd.DataFrame, y: pd.Series, pipeline: Pipeline, cv: int
) -> dict:
accuracy = []
precision = []
recall = []
kfold = StratifiedKFold(n_splits=cv, shuffle=True, random_state=111)
for train_index, test_index in kfold.split(X, y):
X_train_fold, X_test_fold = X.iloc[train_index], X.iloc[test_index]
y_train_fold, y_test_fold = y.iloc[train_index], y.iloc[test_index]
pipeline.fit(X_train_fold, y_train_fold)
y_pred_fold = pipeline.predict(X_test_fold)
# average weighted to take classes inbalance into account
# zero division = 0 because we have cases that could not be predicted
accuracy.append(accuracy_score(y_test_fold, y_pred_fold))
precision.append(
precision_score(
y_test_fold, y_pred_fold, average="weighted", zero_division=0
)
)
recall.append(
recall_score(y_test_fold, y_pred_fold, average="weighted", zero_division=0)
)
return dict(accuracy=accuracy, precision=precision, recall=recall)
# ## Data Normalization and Pipeline
features_to_normalize = [
"age",
"Tense - relaxed",
"Authoritative -anarchic ",
"Hostile - friendly",
"Proxemics",
]
def get_normalizer():
return ColumnTransformer(
[("std_scaler", preprocessing.StandardScaler(), features_to_normalize)],
remainder="passthrough",
)
def get_pipeline(classifier: ClassifierMixin) -> Pipeline:
column_transformer = get_normalizer()
return Pipeline([("scaler", column_transformer), ("classifier", classifier)])
normalizer = get_normalizer()
normalizer.fit(X_train)
X_train_sc = normalizer.transform(X_train)
X_test_sc = normalizer.transform(X_test)
X_train_sc[0]
X_test_sc[0]
# ## KNN
# Evaluation metrics criteria:
# > Besides accuracy, i'm interested on precision and recall (or f1-score) metrics, because we have an unbalanced dataset and we need to know how relevant the predictions were for each class individually (to see how many classes were mistaken for another)
precision_list = []
recall_list = []
accuracy_list = []
k_values = range(1, 31)
for k in k_values:
pipeline = get_pipeline(KNeighborsClassifier(n_neighbors=k))
# cv=3 because the least populated class in y has only 3 members
# using the whole dataset because we have few samples
scores = cross_validate_stratify(X, y, pipeline, cv=3)
accuracy = np.mean(scores["accuracy"])
precision = np.mean(scores["precision"])
recall = np.mean(scores["recall"])
accuracy_list.append(accuracy)
precision_list.append(precision)
recall_list.append(recall)
print(
"K = %d | Accuracy = %.2f | Precision = %.2f | Recall = %.2f"
% (k, accuracy, precision, recall)
)
def plot_line(x: np.array, y: np.array, ylabel: str, title: str) -> None:
ax = sns.lineplot(x=x, y=y, marker="o", markerfacecolor="red", markersize=10)
ax.set(title=title, xlabel="K", ylabel=ylabel)
plt.show()
plot_line(k_values, accuracy_list, ylabel="Accuracy", title="Accuracy x K")
plot_line(k_values, precision_list, ylabel="Precision", title="Precision x K")
plot_line(k_values, recall_list, ylabel="Recall", title="Recall x K")
# ### Using K = 3
n_neighbors = 3
knn = get_pipeline(KNeighborsClassifier(n_neighbors=n_neighbors))
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
def plot_metrics(y_test: pd.Series, y_pred: np.array) -> None:
print(classification_report(y_test, y_pred, zero_division=0))
ConfusionMatrixDisplay.from_predictions(y_test, y_pred, cmap=plt.cm.Blues)
plt.show()
def plot_roc_curve(
classifier: ClassifierMixin,
X_test: pd.DataFrame,
y_test: pd.Series,
y_train: pd.Series,
) -> None:
fig, ax = plt.subplots(figsize=(6, 6))
y_pred = classifier.predict_proba(X_test)
label_binarizer = LabelBinarizer().fit(y_train)
y_onehot_test = label_binarizer.transform(y_test)
colors = ["red", "orange", "blue", "green", "purple", "yellow"]
for class_id, color in zip(classifier.classes_, colors):
display = RocCurveDisplay.from_predictions(
y_onehot_test[:, class_id],
y_pred[:, class_id],
name=f"ROC curve for {class_id}",
color=color,
ax=ax,
)
plt.plot([0, 1], [0, 1], "k--", label="ROC curve for chance level (AUC = 0.5)")
plt.axis("square")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve: One-vs-Rest multiclass")
plt.legend()
plt.show()
def evaluate_model(y_test: pd.Series, y_pred: np.array) -> None:
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred, average="weighted", zero_division=0)
recall = recall_score(y_test, y_pred, average="weighted", zero_division=0)
print("Accuracy: %.2f" % accuracy)
print("Precision: %.2f" % precision)
print("Recall: %.2f" % recall)
return accuracy, precision, recall
base_accuracy, base_precision, base_recall = evaluate_model(y_test, y_pred)
plot_metrics(y_test, y_pred)
plot_roc_curve(knn, X_test, y_test, y_train)
# ## Random Forest
random_forest = get_pipeline(RandomForestClassifier())
random_forest.fit(X_train, y_train)
y_pred = random_forest.predict(X_test)
base_accuracy, base_precision, base_recall = evaluate_model(y_test, y_pred)
plot_metrics(y_test, y_pred)
plot_roc_curve(random_forest, X_test, y_test, y_train)
# ### Hyperparameter tuning: Random Search
# Reference: https://towardsdatascience.com/hyperparameter-tuning-the-random-forest-in-python-using-scikit-learn-28d2aa77dd74
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]
# Number of features to consider at every split
max_features = ["auto", "sqrt"]
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
random_grid = {
"classifier__n_estimators": n_estimators,
"classifier__max_features": max_features,
"classifier__max_depth": max_depth,
"classifier__min_samples_split": min_samples_split,
"classifier__min_samples_leaf": min_samples_leaf,
"classifier__bootstrap": bootstrap,
}
random_grid
rf_random = RandomizedSearchCV(
estimator=get_pipeline(RandomForestClassifier()),
param_distributions=random_grid,
n_iter=100,
cv=StratifiedKFold(n_splits=3),
random_state=111,
n_jobs=-1,
)
rf_random.fit(X, y)
random_search_params = rf_random.best_params_
random_search_params
def parse_params(params: dict) -> dict:
return {key.replace("classifier__", ""): value for key, value in params.items()}
def measure_improvement(
params: dict,
X_train: pd.DataFrame,
y_train: pd.Series,
X_test: pd.DataFrame,
y_test: pd.Series,
previous_metrics: dict,
) -> None:
random_forest = get_pipeline(RandomForestClassifier(**parse_params(params)))
random_forest.fit(X_train, y_train)
y_pred = random_forest.predict(X_test)
accuracy, precision, recall = evaluate_model(y_test, y_pred)
print(
"Accuracy Improvement: %0.2f%%"
% (
100
* (accuracy - previous_metrics["accuracy"])
/ previous_metrics["accuracy"]
)
)
print(
"Precision Improvement: %0.2f%%."
% (
100
* (precision - previous_metrics["precision"])
/ previous_metrics["precision"]
)
)
print(
"Recall Improvement: %0.2f%%"
% (100 * (recall - previous_metrics["recall"]) / previous_metrics["recall"])
)
measure_improvement(
random_search_params,
X_train,
y_train,
X_test,
y_test,
dict(accuracy=base_accuracy, precision=base_precision, recall=base_recall),
)
n_iter = range(1, 101)
mean_test_score = rf_random.cv_results_["mean_test_score"]
ax = sns.lineplot(x=n_iter, y=mean_test_score)
ax.set(title="Mean Test Score", xlabel="Iteration", ylabel="Score")
plt.show()
print("Mean Test Score: %.2f" % (np.mean(mean_test_score)))
# ### Hyperparameter tuning: Grid Search
#
scoring = {
"f1-score": make_scorer(f1_score, average="weighted"),
"accuracy": make_scorer(accuracy_score),
}
param_grid = {
**dict((param, [random_search_params[param]]) for param in random_search_params),
"classifier__max_depth": max_depth,
}
param_grid
grid_search = GridSearchCV(
estimator=get_pipeline(RandomForestClassifier()),
param_grid=param_grid,
cv=3,
scoring=scoring,
n_jobs=-1,
refit="f1-score",
)
grid_search.fit(X, y)
grid_search_params = grid_search.best_params_
grid_search_params
measure_improvement(
grid_search_params,
X_train,
y_train,
X_test,
y_test,
dict(accuracy=base_accuracy, precision=base_precision, recall=base_recall),
)
sns.lineplot(
x=max_depth, y=grid_search.cv_results_["mean_test_accuracy"], label="Accuracy"
)
ax = sns.lineplot(
x=max_depth, y=grid_search.cv_results_["mean_test_f1-score"], label="F1-Score"
)
ax.set(title="Grid Search Metrics x Tree Max Depth", xlabel="Max Depth", ylabel="Score")
plt.show()
print(
"Mean Test Accuracy: %.2f"
% (np.mean(grid_search.cv_results_["mean_test_accuracy"]))
)
print(
"Mean Test F1-Score: %.2f"
% (np.mean(grid_search.cv_results_["mean_test_f1-score"]))
)
# ### Random forest with optimized params
optimized_params = grid_search.best_params_
optimized_params
random_forest = get_pipeline(RandomForestClassifier(**parse_params(optimized_params)))
random_forest.fit(X_train, y_train)
y_pred = random_forest.predict(X_test)
accuracy, precision, recall = evaluate_model(y_test, y_pred)
plot_metrics(y_test, y_pred)
plot_roc_curve(random_forest, X_test, y_test, y_train)
|
# # Config
import os
data_dir = "/kaggle/input/cbc-data"
# Hyper-parameter
VERSION = 1
DEBUG = False
CENTRAL_BANK = ["ECB"]
# Possible values: 'Riksbank', 'Finlands Bank', 'ECB'
FROM_DATE = "2002-04-02"
TO_DATE = "2023-04-01"
TRAIN_TO_DATE = "2020-01-01"
# Period to predict and periods used as features
PREDICT_PERIOD = 90
PAST_PERIOD = [90, 180]
PREDICT_PERIOD_VARIABLE = f"predict_rate_change_{PREDICT_PERIOD}D"
PAST_PERIOD_VARIABLE = [f"past_rate_change_{d}D" for d in PAST_PERIOD]
print("Past rate change features: ", PAST_PERIOD_VARIABLE)
print("Target: ", PREDICT_PERIOD_VARIABLE)
# Model parameter
PRETRAINED_MODEL = "microsoft/deberta-v3-base"
MAX_LEN = 128
MODEL_NAME = f'{PRETRAINED_MODEL.split("/",1)[1]}_v{VERSION}'
LOAD_MODEL_FROM = os.path.join(data_dir, MODEL_NAME)
print(MODEL_NAME)
print(LOAD_MODEL_FROM)
LOAD_LDA_FROM = "/kaggle/input/cbc-lda"
# Import useful libraries
import pandas as pd
import numpy as np
import re
import random
import pickle
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.utils import shuffle
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
import seaborn as sns
sns.set_style("darkgrid")
sns.set(rc={"figure.figsize": (10, 4)})
# Set up device: TPU or GPU
from transformers import *
import tensorflow as tf
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print("TPU strategy")
except:
if len(tf.config.list_physical_devices("GPU")) >= 2:
strategy = tf.distribute.MirroredStrategy()
print("DEVICES AVAILABLE: {}".format(strategy.num_replicas_in_sync))
else:
strategy = tf.distribute.get_strategy()
print("single strategy")
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
print("Mixed precision enabled")
# # Get data
# ## Speeches
# ### Riksbank, Finlands Bank
# Helper function to get corpus dataset
def get_corpus(data_dir, from_date=FROM_DATE, sep=","):
data = pd.read_csv(data_dir, sep=sep)
data["date"] = pd.to_datetime(data["date"])
return data[data["date"] >= from_date]
corpus_df = get_corpus(os.path.join(data_dir, "all_corpus.csv"))
corpus_df["article_len"] = corpus_df["article"].str.len()
corpus_df = corpus_df[corpus_df["article_len"] > 100]
print("df shape: ", corpus_df.shape)
corpus_df.head()
# ### ECB
# Get speeches from file downloaded from ECB website
ecb_corpus_df = get_corpus(os.path.join(data_dir, "all_ECB_speeches.csv"), sep="|")
ecb_corpus_df["speaker_title"] = ecb_corpus_df["subtitle"].str.split(",").str[1]
ecb_corpus_df["article_len"] = ecb_corpus_df["contents"].str.len()
ecb_corpus_df = ecb_corpus_df[ecb_corpus_df["article_len"] > 100]
ecb_corpus_df["id"] = (
"ecb"
+ ecb_corpus_df["date"].dt.strftime("%m%d%Y")
+ ecb_corpus_df["speakers"].str[:2].str.lower()
+ ecb_corpus_df["article_len"].astype(str)
)
ecb_corpus_df["bank"] = "ECB"
ecb_corpus_df["policy_rate_control"] = "ECB"
ecb_corpus_df["article"] = ecb_corpus_df["contents"].apply(
lambda s: re.split(r"\s{2,}", s)
)
ecb_corpus_df = ecb_corpus_df[
[
"bank",
"policy_rate_control",
"id",
"date",
"speakers",
"speaker_title",
"article",
]
]
ecb_corpus_df.rename(columns={"speakers": "speaker"}, inplace=True)
print("df shape: ", ecb_corpus_df.shape)
print("Number of unique ids: ", len(ecb_corpus_df["id"].unique()))
ecb_corpus_df.head()
# ### All speeches
# Join speech tables
full_corpus_df = pd.concat([corpus_df, ecb_corpus_df])
full_corpus_df = full_corpus_df[full_corpus_df["bank"].isin(CENTRAL_BANK)]
full_corpus_df.shape
# Convert long article to smaller paragraph
full_corpus_df = full_corpus_df.explode("article")
full_corpus_df["article_len"] = (
full_corpus_df["article"].str.strip().str.split().apply(len)
)
full_corpus_df = full_corpus_df[full_corpus_df["article_len"] > 10]
print("df shape: ", full_corpus_df.shape)
full_corpus_df.head()
# Article length distribution
sns.displot(
data=full_corpus_df, x="article_len", hue="bank", kind="kde", height=4, aspect=7 / 4
)
plt.show()
# Number of article per month
data = (
full_corpus_df.groupby(
[full_corpus_df["bank"], full_corpus_df["date"].dt.to_period("M")]
)
.agg({"id": "nunique"})
.reset_index()
)
data = data[data["date"] >= "2020-01-01"]
sns.barplot(data, x="date", y="id", hue="bank")
plt.xticks(rotation=90)
plt.show()
# ## Policy rate
# ### Riks bank
# Helper function to get and process policy rate csv files
def get_policy_rate(
data_dir,
skiprows=None,
policy_rate_control="Riksbank",
date_col="Period",
format_date="%d/%m/%Y",
value_col="Value",
from_date=FROM_DATE,
to_date=TO_DATE,
convert_value_col=False,
sep=",",
):
data = pd.read_csv(data_dir, sep=sep, skiprows=skiprows)
data["policy_rate_control"] = policy_rate_control
data[date_col] = pd.to_datetime(data[date_col], format=format_date)
data = data[["policy_rate_control", date_col, value_col]]
data.columns = ["policy_rate_control", "date", "policy_rate"]
data = data[data["date"] >= from_date]
# Fill rate when data is not available
date_df = pd.DataFrame({"date": pd.date_range(start=from_date, end=to_date)})
data = date_df.merge(data, on="date", how="left")
data = data.fillna(method="ffill")
# If rate format is not appropriate, fix the column
if convert_value_col:
data["policy_rate"] = data["policy_rate"].str.replace(",", ".").astype(float)
# Create past rate change and future rate change columns
data[PREDICT_PERIOD_VARIABLE] = (
data["policy_rate"].shift(-PREDICT_PERIOD) - data["policy_rate"]
)
for i in range(len(PAST_PERIOD_VARIABLE)):
data[PAST_PERIOD_VARIABLE[i]] = data["policy_rate"] - data["policy_rate"].shift(
PAST_PERIOD[i]
)
for col in PAST_PERIOD_VARIABLE + [PREDICT_PERIOD_VARIABLE]:
conditions = [data[col] > 0, data[col] == 0, data[col] < 0]
choices = ["Increase", "Same", "Decrease"]
data[f"{col}_class"] = np.select(conditions, choices, default=np.nan)
return data
riks_rate_df = get_policy_rate(
data_dir=os.path.join(data_dir, "riksbank_policy_rate.csv"),
convert_value_col=True,
sep=";",
)
print("df shape: ", riks_rate_df.shape)
riks_rate_df.dropna().head()
# ### ECB official interest rate
# Get data for ECB
ecb_rate_df = get_policy_rate(
data_dir=os.path.join(data_dir, "ecb_interest_rate.csv"),
skiprows=[0, 1, 3, 4],
policy_rate_control="ECB",
date_col="Unnamed: 0",
format_date="%Y-%m-%d",
value_col="Daily, ECB Deposit facility - date of changes (raw data), Level",
)
print(ecb_rate_df.shape)
ecb_rate_df.dropna().head()
# ### All policy rate
# Join all policy rate tables
full_rate_df = pd.concat([riks_rate_df, ecb_rate_df])
print("df shape: ", full_rate_df.shape)
full_rate_df.dropna().head()
# Compare policy rate
sns.lineplot(data=full_rate_df, x="date", y="policy_rate", hue="policy_rate_control")
plt.xticks(rotation=90)
plt.show()
# # Prepare dataset
# Combine speeches and policy rate tables
full_df = full_corpus_df.merge(
full_rate_df, on=["date", "policy_rate_control"], how="left"
)
full_df = full_df.dropna()
print("df shape: ", full_df.shape)
full_df.head()
# ## Train test split
train_df = full_df[full_df["date"] <= TRAIN_TO_DATE]
test_df = full_df[full_df["date"] > TRAIN_TO_DATE]
print("Train df shape: ", train_df.shape)
print("Test df shape: ", test_df.shape)
# ## Classify topics
import gensim
from pprint import pprint
from nltk.corpus import stopwords
stop_words = stopwords.words("english")
import spacy
nlp = spacy.load("en_core_web_sm", disable=["parser", "ner"])
def preprocess(article):
result = gensim.utils.simple_preprocess(article, deacc=True) # lowercase, deaccent
result = [w for w in result if w not in stop_words] # remove stop words
result = [w.lemma_ for w in nlp(" ".join(result))] # lemmatize
return result
if os.path.exists(LOAD_LDA_FROM):
id2word = pickle.load(open("/kaggle/input/cbc-lda/id2word.lda", "rb"))
corpus = pickle.load(open("/kaggle/input/cbc-lda/corpus.lda", "rb"))
else:
articles = train_df["article"].tolist()
articles = [preprocess(s) for s in articles]
id2word = gensim.corpora.Dictionary(articles)
corpus = [id2word.doc2bow(s) for s in articles]
# Train LDA model to detect topic of each paragraph
if os.path.exists(LOAD_LDA_FROM):
lda_model = pickle.load(open("/kaggle/input/cbc-lda/lda_model.lda", "rb"))
else:
lda_model = gensim.models.ldamodel.LdaModel(
corpus=corpus,
id2word=id2word,
num_topics=10,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha="auto",
per_word_topics=True,
)
pickle.dump(lda_model, open("/kaggle/working/lda_model.lda", "wb"))
pickle.dump(corpus, open("/kaggle/working/corpus.lda", "wb"))
pickle.dump(id2word, open("/kaggle/working/id2word.lda", "wb"))
# Print the keyword of topics
pprint(lda_model.print_topics())
TOPIC = [1, 2, 3]
# Create the topic column
from operator import itemgetter
def get_topic(article):
s = preprocess(article)
corp = id2word.doc2bow(s)
return max(lda_model.get_document_topics(corp), key=itemgetter(1))[0]
train_df["topic"] = [
max(lda_model.get_document_topics(c), key=itemgetter(1))[0] for c in corpus
]
test_df["topic"] = test_df["article"].apply(lambda s: get_topic(s))
# Only train and predict for articles that are relevant in predicting rate change
train_df = train_df[train_df["topic"].isin(TOPIC)]
test_df = test_df[test_df["topic"].isin(TOPIC)]
print("Train df shape: ", train_df.shape)
print("Test df shape: ", test_df.shape)
# ## Oversampling/ Undersampling
# Implement undersampling to make the train dataset more balanced
train_increase_df = train_df[train_df[f"{PREDICT_PERIOD_VARIABLE}_class"] == "Increase"]
train_decrease_df = train_df[train_df[f"{PREDICT_PERIOD_VARIABLE}_class"] == "Decrease"]
train_same_df = train_df[train_df[f"{PREDICT_PERIOD_VARIABLE}_class"] == "Same"]
max_size = max(train_increase_df.shape[0], train_decrease_df.shape[0])
train_same_df = train_same_df.sample(int(max_size * 1.2))
# New balanced train_df
train_df = pd.concat([train_increase_df, train_decrease_df, train_same_df])
train_df = shuffle(train_df)
# Check if train_df is balanced
data = (
train_df.groupby(f"{PREDICT_PERIOD_VARIABLE}_class")
.agg({"id": "count"})
.reset_index()
)
sns.barplot(data, x=f"{PREDICT_PERIOD_VARIABLE}_class", y="id")
plt.show()
# ## Token, attention, output
# Check if we already have trained parameters
if os.path.exists(LOAD_MODEL_FROM):
tokenizer = AutoTokenizer.from_pretrained(f"{LOAD_MODEL_FROM}")
else:
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
# Define function to get token from pretrained tokenizer
def get_token(df):
nrows = df.shape[0]
input_ids = np.zeros((nrows, MAX_LEN), dtype="int32")
attention_mask = np.zeros((nrows, MAX_LEN), dtype="int32")
for i in range(nrows):
txt = df.iloc[i]["article"]
tokens = tokenizer.encode_plus(
txt, max_length=MAX_LEN, padding="max_length", truncation=True
)
input_ids[i,] = tokens["input_ids"]
attention_mask[i,] = tokens["attention_mask"]
return input_ids, attention_mask
# Get tokens
train_input_ids, train_attention_mask = get_token(train_df)
test_input_ids, test_attention_mask = get_token(test_df)
print("Train set:")
print("Input_ids shape: ", train_input_ids.shape)
print("Attention mask shape: ", train_attention_mask.shape)
print("\n")
print("Test set:")
print("Input_ids shape: ", test_input_ids.shape)
print("Attention mask shape: ", test_attention_mask.shape)
# Encode target variables so we can use to feed to our model
enc = LabelEncoder()
enc.fit(train_df[f"{PREDICT_PERIOD_VARIABLE}_class"])
print("Classes: ", enc.classes_)
# Encode targets
train_target = enc.transform(train_df[f"{PREDICT_PERIOD_VARIABLE}_class"])
test_target = enc.transform(test_df[f"{PREDICT_PERIOD_VARIABLE}_class"])
print("Train targets shape: ", train_target.shape)
print("Test targets shape: ", test_target.shape)
# # Build model
def build_model():
# Speech feature
## Load pre-trained model or fine-tuned model
if os.path.exists(LOAD_MODEL_FROM):
config = AutoConfig.from_pretrained(f"{LOAD_MODEL_FROM}/config.json")
backbone = TFAutoModel.from_pretrained(f"{LOAD_MODEL_FROM}/tf_model.h5")
else:
config = AutoConfig.from_pretrained(PRETRAINED_MODEL, output_hidden_states=True)
backbone = TFAutoModel.from_pretrained(PRETRAINED_MODEL, config=config)
## Get embeddings from pretrained model
tokens = tf.keras.layers.Input(shape=(MAX_LEN,), name="tokens", dtype=tf.int32)
attention = tf.keras.layers.Input(
shape=(MAX_LEN,), name="attention", dtype=tf.int32
)
## Get the last hidden state from pretrained model
outputs = backbone(tokens, attention_mask=attention)
hidden_state = outputs[1][-1]
## Calculate mean embeddings from the last hidden state
input_mask_expanded = tf.broadcast_to(
tf.expand_dims(attention, -1), tf.shape(hidden_state)
)
input_mask_expanded = tf.cast(input_mask_expanded, tf.float32)
sum_embeddings = tf.reduce_sum(hidden_state * input_mask_expanded, axis=1)
sum_mask = tf.reduce_sum(input_mask_expanded, axis=1)
sum_mask = tf.clip_by_value(
sum_mask, clip_value_min=1e-9, clip_value_max=tf.float32.max
)
mean_embeddings = sum_embeddings / sum_mask
speech_feature = tf.keras.layers.Dense(
4,
activation="relu",
dtype="float32",
kernel_initializer=tf.keras.initializers.Orthogonal(seed=42),
)(mean_embeddings)
# Other features
rate_change_input = tf.keras.layers.Input(
shape=len(PAST_PERIOD_VARIABLE), name="past rate change", dtype=tf.float32
)
# Combine input
all_feature = tf.concat([speech_feature, rate_change_input], axis=1)
# Final layer with softmax activation
logits = tf.keras.layers.Dense(
len(enc.classes_),
activation="softmax",
dtype="float32",
kernel_initializer=tf.keras.initializers.Orthogonal(seed=42),
)(all_feature)
# Compile model
model = tf.keras.Model(
inputs=[tokens, attention, rate_change_input], outputs=logits
)
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
return model
with strategy.scope():
model = build_model()
if DEBUG:
model.summary()
# This chunk is for debug only
if DEBUG:
config = AutoConfig.from_pretrained(PRETRAINED_MODEL, output_hidden_states=True)
backbone = TFAutoModel.from_pretrained(PRETRAINED_MODEL, config=config)
outputs = backbone(
train_input_ids[0:2,],
attention_mask=train_attention_mask[0:2,],
output_hidden_states=True,
)
hidden_state = outputs[0]
print(tf.shape(hidden_state))
print(tf.shape(train_attention_mask[0:2,]))
# Fit model
EPOCHS = 3
BATCH_SIZE = 16
if os.path.exists(LOAD_MODEL_FROM):
model.load_weights(f"{LOAD_MODEL_FROM}/{MODEL_NAME}")
else:
model.fit(
x=[train_input_ids, train_attention_mask, train_df[PAST_PERIOD_VARIABLE]],
y=train_target,
validation_data=(
[test_input_ids, test_attention_mask, test_df[PAST_PERIOD_VARIABLE]],
test_target,
),
epochs=EPOCHS,
batch_size=BATCH_SIZE,
verbose=1,
)
# # Analyze results
# How the prediction looks like
preds = model.predict(
[test_input_ids, test_attention_mask, test_df[PAST_PERIOD_VARIABLE]],
batch_size=BATCH_SIZE,
verbose=1,
)
preds = np.argmax(preds, axis=1)
preds
# How the target looks like?
test_target
# Confusion matrix
cf_matrix = confusion_matrix(
enc.inverse_transform(test_target),
enc.inverse_transform(preds),
labels=enc.classes_.tolist(),
)
sns.heatmap(cf_matrix, cmap="Blues")
plt.show()
|
import numpy as np
import pandas as pd
# This takes an argument which is the path of the csv file in the form of string
# df = pd.read_csv('/kaggle/input/iriscsv/Iris.csv')
df
df.head(10)
df.tail()
# It provides some statistial information about all the numerical columns
df.describe()
df["SepalLengthCm"]
projected_columns = ["SepalLengthCm", "SepalWidthCm"]
df[projected_columns]
df["Species"].value_counts()
df.mean()
df.median()
data = {
"Id": [1, np.nan, 3, 5, 2, np.nan],
"Age": [23, 12, np.nan, 24, 45, 50],
"Gender": ["M", "F", "M", "F", np.nan, "M"],
}
df = pd.DataFrame(data)
df
df.mean()
df.isnull().sum()
|
import numpy as np
import pandas as pd
import plotly.express as px
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv("../input/covid-world-vaccination-progress/country_vaccinations.csv")
def plot_vacination(date="today"):
df = pd.read_csv(
"../input/covid-world-vaccination-progress/country_vaccinations.csv"
)
df[["total_vaccinations", "people_fully_vaccinated"]] = df[
["total_vaccinations", "people_fully_vaccinated"]
].fillna(-1)
country = df["country"].unique()
total_vax = []
vax_completed = []
dates = []
kind = []
for i in range(0, len(country)):
total_vax.append(
df["total_vaccinations"][df["country"] == country[i]].iloc[-1,]
)
vax_completed.append(
df["people_fully_vaccinated"][df["country"] == country[i]].iloc[-1,]
)
dates.append(df["date"][df["country"] == country[i]].iloc[-1,])
kind.append(df["vaccines"][df["country"] == country[i]].iloc[-1,])
dataframe = pd.DataFrame(
{
"total vax": total_vax,
"vax completed": vax_completed,
"date": dates,
"vax": kind,
},
index=country,
)
wales_total = dataframe[dataframe.index == "Wales"].iloc[0, 0]
wales_completed = dataframe[dataframe.index == "Wales"].iloc[0, 1]
dataframe["total vax"][dataframe.index == "United Kingdom"] += wales_total
dataframe["vax completed"][dataframe.index == "United Kingdom"] += wales_completed
Man_total = dataframe[dataframe.index == "Isle of Man"].iloc[0, 0]
Man_completed = dataframe[dataframe.index == "Isle of Man"].iloc[0, 1]
dataframe["total vax"][dataframe.index == "United Kingdom"] += Man_total
dataframe["vax completed"][dataframe.index == "United Kingdom"] += Man_completed
dataframe["total vax"] = dataframe["total vax"].astype(int)
dataframe["vax completed"] = dataframe["vax completed"].astype(int)
fig = px.choropleth(
dataframe,
locations=dataframe.index,
color_continuous_scale="Viridis",
locationmode="country names",
color=dataframe.iloc[:, 0].values,
# color_continuous_scale=px.colors.sequential.OrRd,
width=1050,
height=600,
labels={"index": "State", "color": "Vacinations"},
)
fig.update_layout(title="Vacinations around the world")
fig.show()
return dataframe
prova = plot_vacination()
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
# Importing Train dataset
a = pd.read_csv("/kaggle/input/train-datsset/train.csv")
a
# Importing test dataset
a1 = pd.read_csv("/kaggle/input/test-dataset/test.csv")
a1
# to read first five data from the dataset
a.head()
# to read the last five data from the dataset
a.tail()
# to read the column names
a.columns
# to check wheather any missing values in the datset
a.isna().sum()
a.dtypes
a["Survived"].value_counts().plot(kind="pie")
a["Survived"].value_counts()
a["Pclass"].value_counts().plot(kind="pie")
a["Pclass"].value_counts()
a["Embarked"].value_counts().plot(kind="pie")
a["Embarked"].value_counts()
a["Sex"].value_counts().plot(kind="pie")
a["Sex"].value_counts()
a["SibSp"].value_counts().plot(kind="bar")
a["SibSp"].value_counts()
a["Parch"].value_counts().plot(kind="bar")
a["Parch"].value_counts()
a.columns
a.isna().sum()
a.dtypes
from sklearn.preprocessing import LabelEncoder
la = LabelEncoder()
cols = ["Name", "Sex", "Ticket", "Cabin", "Embarked"]
a[cols] = a[cols].apply(la.fit_transform)
a.dtypes
a.isna().sum()
# missing values in'age' is are replaced with mean()
a["Age"] = a["Age"].fillna(a["Age"].mean())
a.isna().sum()
fig, axs = plt.subplots(4, 2, figsize=(20, 20))
sns.histplot(data=a["Pclass"], kde=True, color="skyblue", ax=axs[0, 0])
sns.histplot(data=a["Age"], kde=True, color="olive", ax=axs[0, 1])
sns.histplot(data=a["PassengerId"], kde=True, color="gold", ax=axs[1, 0])
sns.histplot(data=a["Fare"], kde=True, color="green", ax=axs[1, 1])
sns.histplot(data=a["Cabin"], kde=True, color="green", ax=axs[2, 0])
sns.countplot(x=a["Survived"], ax=axs[2, 1])
sns.countplot(x=a["Embarked"], ax=axs[3, 0])
sns.countplot(x=a["Sex"], ax=axs[3, 1])
a = a.drop(["PassengerId"], axis=1)
a
# splitting dataset x(inputs) & y(output)
x = a.drop(["Survived"], axis=1)
y = a["Survived"]
y
x
# implementing train test split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.30, random_state=42
)
x_train
y_train
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
scalar.fit(x_train)
x_train = scalar.fit_transform(x_train)
x_test = scalar.fit_transform(x_test)
# Since it is a classification model,we implememt all the 5 classification algorithms
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
model1 = KNeighborsClassifier(n_neighbors=9, weights="uniform")
model2 = GaussianNB()
model3 = SVC()
model4 = DecisionTreeClassifier(criterion="entropy")
model5 = RandomForestClassifier(n_estimators=100)
modellist = [model1, model2, model3, model4, model5]
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
for i in modellist:
i.fit(x_train, y_train)
y_pred = i.predict(x_test)
print("the classification details of model", i, "is below")
print("the confusion matrix of ", i, "is")
print(confusion_matrix(y_test, y_pred))
print("accuracy score of", i, "is")
print(accuracy_score(y_test, y_pred))
print("the classification report of", i, "is")
print(classification_report(y_test, y_pred))
from sklearn.metrics._plot.confusion_matrix import ConfusionMatrixDisplay
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
result = confusion_matrix(y_test, y_pred)
cm = ["1", "0"]
cmd = ConfusionMatrixDisplay(result, display_labels=cm)
cmd.plot()
sns.heatmap(a.corr(), annot=True, cmap="viridis")
# Now we implement on testing data
a1 = a1.drop(["PassengerId"], axis=1)
a1
a1.dtypes
from sklearn.preprocessing import LabelEncoder
la = LabelEncoder()
cols = ["Name", "Sex", "Ticket", "Cabin", "Embarked"]
a1[cols] = a1[cols].apply(la.fit_transform)
a["Embarked"].value_counts().plot(kind="bar")
a["Embarked"].value_counts()
a1.columns
a1.isna().sum()
# missing values in 'Age' and 'Fare' are replaced with mean()
a1["Age"] = a1["Age"].fillna(a1["Age"].mean())
a1["Fare"] = a1["Fare"].fillna(a1["Fare"].mean())
fig, axs = plt.subplots(4, 2, figsize=(20, 20))
sns.histplot(data=a1["Pclass"], kde=True, color="skyblue", ax=axs[0, 0])
sns.histplot(data=a1["Age"], kde=True, color="olive", ax=axs[0, 1])
sns.histplot(data=a1["Ticket"], kde=True, color="gold", ax=axs[1, 0])
sns.histplot(data=a1["Fare"], kde=True, color="green", ax=axs[1, 1])
sns.histplot(data=a1["Cabin"], kde=True, color="black", ax=axs[2, 0])
sns.histplot(data=a1["Parch"], kde=True, color="orange", ax=axs[2, 1])
sns.countplot(x=a1["SibSp"], ax=axs[3, 0])
sns.countplot(x=a1["Sex"], ax=axs[3, 1])
# splitting dataset x(inputs) & y(output)
x = a1.drop(["Embarked"], axis=1)
y = a1["Embarked"]
y
x
# Implementing Training and Testing
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.30, random_state=42
)
x_train
y_train
# Preprocessing Steps
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
scalar.fit(x_train)
x_train = scalar.fit_transform(x_train)
x_test - scalar.fit_transform(x_test)
# Here we implement 5 classification models
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
model11 = KNeighborsClassifier(n_neighbors=9, weights="uniform")
model22 = GaussianNB()
model33 = SVC()
model44 = DecisionTreeClassifier(criterion="entropy")
model55 = RandomForestClassifier(n_estimators=100)
modellist = [model11, model22, model33, model44, model55]
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
for i in modellist:
i.fit(x_train, y_train)
y_pred = i.predict(x_test)
print("the classification details of model", i, "is below")
print("the confusion matrix of ", i, "is")
print(confusion_matrix(y_test, y_pred))
print("accuracy score of", i, "is")
print(accuracy_score(y_test, y_pred))
print("the classification report of", i, "is")
print(classification_report(y_test, y_pred))
y_train.head(10)
from sklearn.metrics._plot.confusion_matrix import ConfusionMatrixDisplay
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
result = confusion_matrix(y_test, y_pred)
cm = ["0", "1", "2"]
cmd = ConfusionMatrixDisplay(result, display_labels=cm)
cmd.plot()
sns.heatmap(a1.corr(), annot=True, cmap="viridis")
|
# Import des Moduls "Panda" mit dem Alias "pd"
import pandas as pd
# ### AUFGABE:
# Erstellen Sie eine Serie, die wie folgt aussieht (die linke Spalte ist der Index):
# Punkte Kandidat 1
# 17
# Punkte Kandidat 2
# 19
# Punkte Kandidat 3
# 12
#
pd.Series(
[17, 19, 12],
index=["Punkte Kanditat 1", "Punkte Kandidat 2", "Punkte Kanditat 3"],
name="Bewertung",
)
# ### AUFGABE:
# Erstellen Sie ein Dataframe, das wie folgt aussieht:
# Die linke Spalte und die obere Spalte sind jeweils ein Labelindex.
# Die Werte der mittleren Spalte haben das Format "String".
# Die Werte der rechten Spalte haben das Format "Integer".
#
# KanzlerIn
# Dauer in Jahren
# 1949–1963
# Konrad Adenauer
# 14
# 1963–1966
# Ludwig Erhard
# 3
# 1969–1974
# Willy Brandt
# 5
#
# 2005–2021
# Angela Merkel
# 16
#
#
pd.DataFrame(
{
"KanzlerIn": [
"Konrad Adenauer",
"Ludwig Erhard",
"Willy Brandt",
"Angela Merkel",
],
"Dauer in Jahren": ["14", "3", "5", "16"],
},
index=["1949-1963", "1963-1966", "1969-1974", "2005-2021"],
)
# Einlesen einer csv-Datei aus Kaggle
df = pd.read_csv(
"/kaggle/input/students-performance-in-exams/StudentsPerformance.csv", index_col=0
)
pd.read_csv("/kaggle/input/students-performance-in-exams/StudentsPerformance.csv")
# Die metrisch abhängige Variable sind die Mathematikergebnisse, die unabhängigen Variablen sind z.B: Geschlecht, ethnische Zugehörigkeit, elterliche Bildung
# Ausmaß des Datensatz bestimmen (Anzahl Beobachtungen, Anzahl Variablen)
# Zeige die obersten 5 Zeilen des Datensatzes
# Nach Geschlecht
df.groupby("gender")["math score"].mean()
# nach ethnischer Zugehörigkeit
# männliche Schüler erzielen im Durchschnitt bessere Mathematikprüfungsergebnisse als weibliche Schüler
df.groupby("race/ethnicity")["math score"].mean()
# nach der elterliche Bildung
df.groupby("parental level of education")["math score"].mean()
# Schüler und Schülerinnen deren Eltern einen höheren Bildungsgrad haben schneiden besser ab
|
# # GAN ON MNIST
# 
# # Importing the packages
import keras
import numpy as np
import keras.layers as L
import keras.models as M
import tensorflow as tf
from keras.datasets.mnist import load_data
import imageio
import matplotlib.pyplot as pyplot
import os
# # Making discriminator model
def make_d_model(in_shape=(28, 28, 1)):
model = M.Sequential()
model.add(
L.Conv2D(
filters=64,
kernel_size=(3, 3),
activation=L.LeakyReLU(alpha=0.2),
input_shape=in_shape,
padding="same",
)
)
model.add(L.Dropout(0.4))
model.add(
L.Conv2D(
filters=64,
kernel_size=(3, 3),
activation=L.LeakyReLU(alpha=0.2),
input_shape=in_shape,
padding="same",
)
)
model.add(L.Dropout(0.4))
model.add(L.Flatten())
model.add(L.Dense(1, "sigmoid"))
opt = keras.optimizers.Adam(lr=0.0001, beta_1=0.5)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
return model
# # Making function to get MNIST data
def get_data():
train, test = load_data()
X = train[0]
X = np.expand_dims(X, axis=-1)
X = X.astype("float32")
X = X / 255
return X
# # Making function to get real sample data
def make_real_samples(dataset, n_samples):
ix = np.random.randint(0, dataset.shape[0], n_samples)
X = dataset[ix]
y = np.ones((n_samples, 1))
return X, y
# # Function to make random latent points
def make_latent_points(latent_dim, n_samples):
latent = np.random.randn(latent_dim * n_samples)
latent = latent.reshape(n_samples, latent_dim)
return latent
# # Making Generator model
# Transpose works like input+kernel_size*strides-1 ===> output
def make_generator_model(latent_dim):
model = M.Sequential()
model.add(
L.Dense(128 * 7 * 7, input_dim=latent_dim, activation=L.LeakyReLU(alpha=0.2))
)
model.add(L.Reshape((7, 7, 128)))
model.add(
L.Conv2DTranspose(
128,
kernel_size=(4, 4),
strides=(2, 2),
padding="same",
activation=L.LeakyReLU(alpha=0.2),
)
)
model.add(
L.Conv2DTranspose(
128,
kernel_size=(4, 4),
strides=(2, 2),
padding="same",
activation=L.LeakyReLU(alpha=0.2),
)
)
model.add(L.Conv2D(1, kernel_size=(7, 7), activation="sigmoid", padding="same"))
return model
# # Making Function to create fake sample using generator and Latent points
def make_fake_samples(generator, latent_dim, n_samples):
X = make_latent_points(latent_dim, n_samples)
predictions = generator.predict(X)
y = np.zeros((n_samples, 1))
return predictions, y
# # Making GAN Model
# Define the gan model
def make_gan_model(g_model, d_model):
d_model.trainable = False
model = M.Sequential()
model.add(g_model)
model.add(d_model)
opt = keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
return model
# # Function to show result after certain no. of epochs
def summarize_performance(epoch, g_model, d_model, dataset, latent_dim, n_samples=100):
X_real, y_real = make_real_samples(dataset, n_samples)
_, real_acc = d_model.evaluate(X_real, y_real, verbose=0)
X_fake, y_fake = make_fake_samples(g_model, latent_dim, n_samples)
_, fake_acc = d_model.evaluate(X_fake, y_fake, verbose=0)
print(
"For Epoch:", epoch, " Real Accuracy:", real_acc, " Fake Accuracy:", fake_acc
)
save_plot(X_fake, epoch)
# # Function to train GAN
def train_gan(
g_model,
d_model,
gan_model,
dataset,
epochs=100,
batch_size=256,
latent_dim=100,
n_samples=100,
n_timer=10,
):
size = batch_size // 2
for i in range(epochs):
X_real, y_real = make_real_samples(dataset, n_samples)
X_fake, y_fake = make_fake_samples(g_model, latent_dim, n_samples)
X, y = np.vstack((X_real, X_fake)), np.vstack((y_real, y_fake))
d_loss, _ = d_model.train_on_batch(X, y)
X_gan = make_latent_points(latent_dim, batch_size)
y_gan = np.ones((batch_size, 1))
g_loss, _ = gan_model.train_on_batch(X_gan, y_gan)
print("Epoch :", i, "d_loss:", d_loss, " gan loss:", g_loss)
if i % n_timer == 0:
summarize_performance(i, g_model, d_model, dataset, latent_dim, n_samples)
# # Function to save result of generator
# create and save a plot of generated images (reversed grayscale)
def save_plot(examples, epoch, n=8):
# plot images
for i in range(n * n):
# define subplot
pyplot.subplot(n, n, 1 + i)
# turn off axis
pyplot.axis("off")
# plot raw pixel data
pyplot.imshow(examples[i, :, :, 0], cmap="gray_r")
# save plot to file
filename = "generated_plot_e%03d.png" % (epoch + 1)
pyplot.savefig(filename, dpi=100)
pyplot.close()
# # Calling Functions
latent_dim = 100
d_model = make_d_model()
g_model = make_generator_model(latent_dim)
gan_model = make_gan_model(g_model, d_model)
dataset = get_data()
train_gan(g_model, d_model, gan_model, dataset, epochs=1000)
path = "./"
import glob
from PIL import Image
# filepaths
fp_in = "./*.png"
fp_out = "./imag2.gif"
# https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#gif
img, *imgs = [Image.open(f) for f in sorted(glob.glob(fp_in))]
img.save(
fp=fp_out, format="GIF", append_images=imgs, save_all=True, duration=100, loop=0
)
|
# # Bank Central Asia Stock Forecasting using Sentiment Analysis
# BCA (Bank Central Asia) is one of the largest banks in Indonesia, and as with many publicly traded companies, its stock price is subject to a wide range of market forces, including news articles, social media mentions, and other factors that can affect investor sentiment. Sentiment analysis is a powerful tool for analyzing such market data, allowing investors and analysts to better understand the market sentiment around a particular stock. In this analysis, we will examine the use of sentiment analysis to forecast the stock price of BCA and explore how it can provide valuable insights into the underlying market sentiment and potential future trends.
# "This analysis is performed to compare the use of sentiment analysis feature in LSTM forecasting models"
# Note : (10 April 2023) This notebook is a part of assignment of Master of Information Technology - Universitas Indonesia (2021SB)
# ### Libraries
import re
import time
import math
import datetime
import numpy as np
import pandas as pd
import seaborn as sns
import yfinance as yf
import tensorflow as tf
from tensorflow import keras
from textblob import TextBlob
from scipy.stats import shapiro
import matplotlib.pyplot as plt
from scipy.stats import wilcoxon
from keras.optimizers import Adam
from keras.models import Sequential
from tensorflow.keras import layers
from sklearn.metrics import r2_score
from yahoofinancials import YahooFinancials
from deep_translator import GoogleTranslator
from keras.layers import LSTM, Dense, Dropout
from sklearn.preprocessing import StandardScaler
sns.set_style("darkgrid")
# ### Functions
def remove_links(text):
cleaned_text = re.sub(r"http\S+", "", text)
return cleaned_text
def clean_hashtags(tweet):
cleaned_tweet = re.sub(r"#(\w+)", "", tweet)
return cleaned_tweet
def remove_usernames(text):
cleaned_text = re.sub(r"@\w+", "", text)
return cleaned_text
def remove_emoticons(text):
pattern = re.compile(
"["
"\U0001F600-\U0001F64F" # emoticons
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+",
flags=re.UNICODE,
)
return pattern.sub(r"", text)
def windowing(data, window):
data_w = []
for i in range(window, len(data)):
data_w.append(data[i - window : i])
data_w = np.reshape(
data_w, (np.shape(data_w)[0], np.shape(data_w)[2], np.shape(data_w)[1])
)
data_w = np.array(data_w)
return data_w
# ## 1. Get BBCA Stock Prices from Yahoo Finance
yahoo_financials = YahooFinancials("BBCA.JK")
data = yahoo_financials.get_historical_price_data(
start_date="2021-11-29", end_date="2022-11-29", time_interval="daily"
)
stock = pd.DataFrame(data["BBCA.JK"]["prices"])
stock = stock.drop("date", axis=1).set_index("formatted_date")
stock["date"] = stock.index
stock["date"] = stock["date"].astype("datetime64[ns]")
stock = stock.reset_index(drop=True)
# ## 2. Generate Relevant Stock Features
stock["close_yesterday"] = stock.close.shift(1)
stock["open_yesterday"] = stock.open.shift(1)
stock["volume_yesterday"] = stock.volume.shift(1)
stock["low_yesterday"] = stock.low.shift(1)
stock["price_diff_yesterday"] = stock["close"] - stock["close_yesterday"]
stock["price_limit_yesterday"] = (stock["close"] - stock["close_yesterday"]) / stock[
"close_yesterday"
]
stock["volume_diff_yesterday"] = stock["volume"] - stock["volume_yesterday"]
stock["volume_limit_yesterday"] = (stock["volume"] - stock["volume_yesterday"]) / stock[
"volume_yesterday"
]
stock["amplitude_yesterday"] = (stock["high"] - stock["low_yesterday"]) / stock[
"close_yesterday"
]
stock["difference_yesterday"] = (stock["close"] - stock["open"]) / stock[
"close_yesterday"
]
# ## 3. Technical Columns
technical_columns = [
"date",
"open",
"close",
"high",
"low",
"volume",
"price_diff_yesterday",
"price_limit_yesterday",
"volume_diff_yesterday",
"volume_limit_yesterday",
"amplitude_yesterday",
"difference_yesterday",
]
stock = stock[technical_columns]
# ## 4. Exploratory Data Analysis
print("Data Shape :", np.shape(stock))
stock.info()
# There is no NULL value in the dataset
fig, ax = plt.subplots(6, 1, figsize=(16, 16))
sns.lineplot(data=stock, x="date", y="open", ax=ax[0], label="open", color="orange")
sns.lineplot(data=stock, x="date", y="close", ax=ax[0], label="close", color="navy")
sns.lineplot(data=stock, x="date", y="high", ax=ax[0], label="high", color="green")
sns.lineplot(data=stock, x="date", y="low", ax=ax[0], label="low", color="darkred")
ax[0].set_title("BBCA Stock Price")
sns.lineplot(data=stock, x="date", y="price_diff_yesterday", ax=ax[1], color="navy")
ax[1].set_title("BBCA 1 Day Lag Price Diff")
sns.lineplot(data=stock, x="date", y="price_limit_yesterday", ax=ax[2], color="red")
ax[2].set_title("BBCA 1 Day Lag Price Diff")
sns.lineplot(data=stock, x="date", y="volume_diff_yesterday", ax=ax[3], color="orange")
ax[3].set_title("BBCA 1 Day Lag Volume Diff")
sns.lineplot(data=stock, x="date", y="amplitude_yesterday", ax=ax[4], color="navy")
ax[4].set_title("BBCA 1 Day Lag Amplitude Diff")
sns.lineplot(data=stock, x="date", y="difference_yesterday", ax=ax[5], color="red")
ax[5].set_title("BBCA 1 Day Lag Open and Close")
fig.tight_layout()
plt.show()
# ## 5. Get Tweet Data
bca_sentiment = pd.read_excel("/kaggle/input/bank-central-asia-tweet/saham_bca.xlsx")[
["id", "permalink", "username", "text", "date"]
]
bca_sentiment.head()
# ## 6. Processing Tweet Data
# ### 6.1 Initial Punctuation and Character Cleansing
bca_sentiment["text_cleansed"] = bca_sentiment.text.apply(
lambda x: re.sub(r"\r?\n", "", x)
)
bca_sentiment["text_cleansed"] = bca_sentiment.text.apply(
lambda x: remove_emoticons(
remove_usernames(remove_links(clean_hashtags(x)))
).rstrip()
)
bca_sentiment.head()
# ### 6.2 Translate to English
bca_sentiment["text_eng"] = None
translation_eng = []
c = 0
for tx in bca_sentiment.text_cleansed:
try:
translate_text = GoogleTranslator(source="id", target="en").translate(tx)
translation_eng.append(translate_text)
except:
c = c + 1
translation_eng.append("Google Translate Error")
bca_sentiment["text_eng"] = translation_eng
print("error num : ", c)
bca_sentiment.head()
# ### 6.3 Sentiment Scoring using TextBlob
score_list = []
for te in bca_sentiment["text_eng"]:
blob = TextBlob(te)
sentiment_score = blob.sentiment.polarity
score_list.append(sentiment_score)
bca_sentiment["sentiment_score"] = score_list
bca_sentiment.head()
# ### 6.4 Daily Sentiment Average
daily_sentiment_avg = (
bca_sentiment.groupby("date").mean()[["sentiment_score"]].reset_index()
)
fig, ax = plt.subplots(1, 2, figsize=(16, 4))
sns.lineplot(data=daily_sentiment_avg, x="date", y="sentiment_score", ax=ax[0])
sns.histplot(bca_sentiment.sentiment_score, kde=True, ax=ax[1])
ax[0].set_title("Daily Sentiment Score Average")
ax[1].set_title("Sentiment Score Distribution")
# ## 7. Modelling
# ### 7.1 Dataset Preparation
stock_dt = stock
stock_dt["target"] = stock_dt.close.shift(-1)
stock_dt = stock_dt.iloc[1:]
target = stock_dt.target
dataset = pd.merge(
stock_dt, daily_sentiment_avg, left_on="date", right_on="date", how="inner"
)
# Repositioning Target Columns
target = dataset.target
dataset = dataset.drop("target", axis=1)
dataset["target"] = target
dataset.head()
dataset.tail()
# ### 7.2 Dataset Normalization
scaler1 = StandardScaler()
scaler2 = StandardScaler()
scaled_data1 = scaler1.fit_transform(
dataset.drop(["date", "sentiment_score"], axis=1).iloc[:-1, :]
)
scaled_data2 = scaler2.fit_transform(dataset.drop(["date"], axis=1).iloc[:-1, :])
scaled_data1_test = scaled_data1[:, -1]
scaled_data1_train = scaled_data1[:, :-1]
scaled_data2_test = scaled_data2[:, -1]
scaled_data2_train = scaled_data2[:, :-1]
scaled_data1_train = np.reshape(
scaled_data1_train,
(np.shape(scaled_data1_train)[0], np.shape(scaled_data1_train)[1]),
)
scaled_data2_train = np.reshape(
scaled_data2_train,
(np.shape(scaled_data2_train)[0], np.shape(scaled_data2_train)[1]),
)
print("Scaled data 1 train shape : ", np.shape(scaled_data1_train))
print("Scaled data 1 Test shape : ", np.shape(scaled_data1_test))
print("Scaled data 2 train shape : ", np.shape(scaled_data2_train))
print("Scaled data 2 Test shape : ", np.shape(scaled_data2_test))
# ### 7.3 Train Test Split
training_data_len = int(0.8 * len(dataset))
x_train1 = scaled_data1_train[:training_data_len]
y_train1 = scaled_data1_test[:training_data_len]
print("X Training 1 Shape : ", np.shape(x_train1))
print("Y Training 1 Shape : ", np.shape(y_train1))
x_test1 = scaled_data1_train[training_data_len:]
y_test1 = scaled_data1_train[training_data_len:]
print("X Test 1 Shape : ", np.shape(x_test1))
print("Y Test 1 Shape : ", np.shape(y_test1))
x_train2 = scaled_data2_train[:training_data_len]
y_train2 = scaled_data2_test[:training_data_len]
print("X Training 2 Shape : ", np.shape(x_train2))
print("Y Training 2 Shape : ", np.shape(y_train2))
x_test2 = scaled_data2_train[training_data_len:]
y_test2 = scaled_data2_train[training_data_len:]
print("X Test 2 Shape : ", np.shape(x_test2))
print("Y Test 2 Shape : ", np.shape(y_test2))
# ### 7.2 LSTM only with stocks data
model1 = keras.Sequential()
model1.add(
layers.LSTM(
11,
return_sequences=False,
input_shape=(x_train1.shape[1], 1),
activation="relu",
)
)
model1.add(Dense(units=1))
model1.summary()
callback = tf.keras.callbacks.EarlyStopping(monitor="loss", patience=3)
model1.compile(optimizer="adam", loss="mean_squared_error")
model1.fit(x_train1, y_train1, batch_size=1, epochs=100, callbacks=[callback])
predictions1 = model1.predict(x_test1)
pred_df1 = pd.DataFrame(
scaled_data1, columns=dataset.drop(["date", "sentiment_score"], axis=1).columns
)
pred_df1 = pred_df1.iloc[training_data_len:]
pred_df1 = pred_df1.drop("target", axis=1)
pred_df1["predictions"] = predictions1
pred_df1 = pd.DataFrame(scaler1.inverse_transform(pred_df1), columns=pred_df1.columns)
pred_df1.head()
actual_df1 = pd.DataFrame(
scaled_data1, columns=dataset.drop(["date", "sentiment_score"], axis=1).columns
)
actual_df1 = actual_df1.iloc[training_data_len:]
actual_df1 = pd.DataFrame(
scaler1.inverse_transform(actual_df1), columns=actual_df1.columns
)
actual_df1.head()
fig, ax = plt.subplots(figsize=(16, 4))
sns.lineplot(actual_df1.target, ax=ax, label="target")
sns.scatterplot(actual_df1.target, ax=ax)
sns.lineplot(pred_df1.predictions, ax=ax, label="prediction")
sns.scatterplot(pred_df1.predictions, ax=ax)
ax.set_title("Model 1 Target vs Predictions")
fig.tight_layout()
# ### 7.2.1 Model 1 R2 Score
# Evaluate your model on test data
y_true = actual_df1.target.values # true test labels
y_pred = pred_df1.predictions.values # predicted test labels
# Calculate R2 value
r2 = r2_score(y_true, y_pred)
# Print R2 value
print("Model 1 R2 value: {:.2f}".format(r2))
# ### 7.2.2 Save Model
model1.save("bca_lstm_model_non_sentiment.h5")
# ### 7.3 LSTM with Stocks and Sentiment Data
model2 = keras.Sequential()
model2.add(
layers.LSTM(
11,
return_sequences=False,
input_shape=(x_train2.shape[1], 1),
activation="relu",
)
)
model2.add(Dense(units=1))
model2.summary()
callback = tf.keras.callbacks.EarlyStopping(monitor="loss", patience=3)
model2.compile(optimizer="adam", loss="mean_squared_error")
model2.fit(x_train2, y_train2, batch_size=1, epochs=100, callbacks=[callback])
predictions2 = model2.predict(x_test2)
pred_df2 = pd.DataFrame(scaled_data2, columns=dataset.drop(["date"], axis=1).columns)
pred_df2 = pred_df2.iloc[training_data_len:]
pred_df2 = pred_df2.drop("target", axis=1)
pred_df2["predictions"] = predictions2
pred_df2 = pd.DataFrame(scaler2.inverse_transform(pred_df2), columns=pred_df2.columns)
pred_df2.head()
actual_df2 = pd.DataFrame(scaled_data2, columns=dataset.drop(["date"], axis=1).columns)
actual_df2 = actual_df2.iloc[training_data_len:]
actual_df2 = pd.DataFrame(
scaler2.inverse_transform(actual_df2), columns=actual_df2.columns
)
actual_df2.head()
fig, ax = plt.subplots(figsize=(16, 4))
sns.lineplot(actual_df2.target, ax=ax, label="target")
sns.scatterplot(actual_df2.target, ax=ax)
sns.lineplot(pred_df2.predictions, ax=ax, label="prediction")
sns.scatterplot(pred_df2.predictions, ax=ax)
ax.set_title("Model 2 Target vs Predictions")
fig.tight_layout()
# ### 7.3.1 Model 2 R2 Score
# Evaluate your model on test data
y_true2 = actual_df2.target.values # true test labels
y_pred2 = pred_df2.predictions.values # predicted test labels
# Calculate R2 value
r2_2 = r2_score(y_true2, y_pred2)
# Print R2 value
print("Model 2 R2 value: {:.2f}".format(r2_2))
# ### 7.3.2 Save Model 2
model2.save("bca_lstm_model_with_sentiment.h5")
# ## 8. Model Result Comparison
fig, ax = plt.subplots(figsize=(10, 4))
sns.histplot(
pred_df1.predictions, bins=20, kde=True, label="Model 1", ax=ax, color="red"
)
sns.histplot(
pred_df2.predictions, bins=20, kde=True, label="Model 2", ax=ax, color="navy"
)
plt.legend()
ax.set_title("Model 1 and Model 2 RMSE Distributions")
# ### 8.1 Distribution Normality Test
def shapiro_test(data, alpha=0.05):
stat, p = shapiro(data)
if p > alpha:
print("Sample looks Gaussian (Normal) (fail to reject H0)")
else:
print("Sample does not look Gaussian (Nor Normal) (reject H0)")
return stat, p
print("Model 1 : ")
stat1, p1 = shapiro_test(pred_df1.predictions.values)
print("Model 2 : ")
stat2, p2 = shapiro_test(pred_df2.predictions.values)
# ### 8.2 Wilcoxon Signed Rank Test (Since the distribution is not normal)
data1 = pred_df1.predictions.values ##### NON SENTIMENT
data2 = pred_df2.predictions.values #### WITH SENTIMENT
# perform Wilcoxon signed-rank test
stat, p = wilcoxon(data1, data2)
# interpret the test results
alpha = 0.05
if p > alpha:
print(
"The difference between the two related samples is not statistically significant (fail to reject H0)"
)
else:
print(
"The difference between the two related samples is statistically significant (reject H0)"
)
# # Code Archive
# import nltk
# from nltk.corpus import wordnet
# from nltk.corpus import stopwords
# from nltk.tokenize import word_tokenize
# from nltk.stem import PorterStemmer, WordNetLemmatizer
# nltk.download('punkt')
# nltk.download('stopwords')
# nltk.download('averaged_perceptron_tagger')
# ### Tokenizing
# token_list = []
# for te in bca_sentiment["text_eng"]:
# tokens = word_tokenize(te)
# token_list.append(tokens)
# bca_sentiment["tokenized"] = token_list
# bca_sentiment.head()
# ### POS Tagging
# pt_list = []
# for tokens in bca_sentiment.tokenized:
# pos_tags = nltk.pos_tag(tokens)
# pt_list.append(pos_tags)
# bca_sentiment["postag"] = pt_list
# bca_sentiment.head()
# ### Stopword Removal
# # Load the stopwords
# sr_list = []
# stop_words = set(stopwords.words('english'))
# for tokens in bca_sentiment.postag:
# sr_list_temp = []
# for word in tokens:
# if word[0].lower() not in stop_words:
# sr_list_temp.append(word)
# sr_list.append(sr_list_temp)
# bca_sentiment["stopremov"] = sr_list
# bca_sentiment.head()
# ### Lemmatization
# # FIX NLTK WORDNET ERROR - RUN ONLY IF WORDNET UNZIP ERROR
# import os
# # Create the directory if it does not already exist
# if not os.path.exists("my_nltk_data"):
# os.mkdir("my_nltk_data")
# # Add the directory to the list of NLTK data directories
# nltk.data.path.append(os.path.abspath("my_nltk_data"))
# # Download the WordNet corpus to the specified directory
# nltk.download("wordnet", download_dir="my_nltk_data")
# !unzip -o my_nltk_data/corpora/wordnet.zip -d my_nltk_data/corpora/
# def get_wordnet_pos(tag):
# if tag.startswith('N'):
# return wordnet.NOUN
# elif tag.startswith('V'):
# return wordnet.VERB
# elif tag.startswith('J'):
# return wordnet.ADJ
# elif tag.startswith('R'):
# return wordnet.ADV
# else:
# return wordnet.NOUN # default to noun if tag is not recognized
# lemmatizer = WordNetLemmatizer()
# lemmatized = []
# for li in bca_sentiment.stopremov:
# lemmatized_words = [lemmatizer.lemmatize(word, pos=get_wordnet_pos(pos)) for word, pos in li] # add pos tag for verbs
# full_text = ' '.join(lemmatized_words)
# lemmatized.append(full_text)
# bca_sentiment["lemmatized"] = lemmatized
# bca_sentiment.head()
# ### 7.4 Windowing
# x_train_w1 = windowing(x_train1,7)
# x_test_w1 = windowing(x_test1,7)
# x_train_w2 = windowing(x_train2,7)
# x_test_w2 = windowing(x_test2,7)
# print(np.shape(x_train_w1))
# print(np.shape(x_test_w1))
# print(np.shape(x_train_w2))
# print(np.shape(x_test_w2))
|
# # **Setup**
# **Requirements**
# Pacchetti necessari da installare.
# **Imports**
# Librerie base, Keras, TensorFlow e Segmentation Models.
import numpy as np
import pandas as pd
import random
import os
import gc
import cv2
from glob import glob
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib as mpl
import matplotlib.patches as mpatches
from sklearn.model_selection import KFold
import tensorflow as tf
from tensorflow import keras
import keras
from keras import backend as K
from keras.models import Model
from keras.losses import binary_crossentropy
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.models import load_model, save_model
from keras_unet.utils import get_augmented
from keras.optimizers import Adam
import segmentation_models as sm
from segmentation_models import Unet
# **Config**
# Parametri utili di configurazione.
IM_HEIGHT = 320
IM_WIDTH = 320
BATCH_SIZE = 8
LR = 0.0001 # 0.00001, 0.001
EPOCHS = 100 # 50
N_SPLITS = 10
# **Seeds**
# Semi di randomizzazione per la riproducibilità.
# set numpy, random and TensorFlow seeds
def set_seeds(seed=0):
np.random.seed(seed)
random.seed(seed)
tf.random.set_seed(seed)
set_seeds()
# **Run-Length Encoding (RLE)**
# Funzioni per la codifica e decodifica RLE.
# RLE è un formato di compressione dei dati senza perdita in cui le sequenze di valori ripetuti vengono sostituite con il valore e un count.
# Risulta particolarmente utile per la memorizzazione delle maschere di segmentazione.
def rle_encode(img):
pixels = img.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return " ".join(str(x) for x in runs)
def rle_decode(mask_rle, shape, color=1):
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros((shape[0] * shape[1], shape[2]), dtype=np.float32)
for lo, hi in zip(starts, ends):
img[lo:hi] = color
return img.reshape(shape)
# # **Preprocessing**
# **Data Loading**
# Caricamento del CSV contenente il dataset.
# load dataset csv as dataframe
df = pd.read_csv("/kaggle/input/uw-madison-gi-tract-image-segmentation/train.csv")
print("data_len:", len(df))
df.head()
# **Data Transformation**
# Trasformazione del dataset in un formato più utile all'elaborazione.
# transform dataframe
def transform(df):
# split id in case, day, slice
df["case"] = df["id"].apply(lambda x: int(x.split("_")[0].replace("case", "")))
df["day"] = df["id"].apply(lambda x: int(x.split("_")[1].replace("day", "")))
df["slice"] = df["id"].apply(lambda x: x.split("_")[3])
# get path, width and height of dataset images
DIR = "/kaggle/input/uw-madison-gi-tract-image-segmentation/train"
all_images = glob(os.path.join(DIR, "**", "*.png"), recursive=True)
x = all_images[0].rsplit("/", 4)[0]
path_partial_list = []
for i in range(0, df.shape[0]):
path_partial_list.append(
os.path.join(
x,
"case" + str(df["case"].values[i]),
"case"
+ str(df["case"].values[i])
+ "_"
+ "day"
+ str(df["day"].values[i]),
"scans",
"slice_" + str(df["slice"].values[i]),
)
)
df["path_partial"] = path_partial_list
path_partial_list = []
for i in range(0, len(all_images)):
path_partial_list.append(str(all_images[i].rsplit("_", 4)[0]))
tmp_df = pd.DataFrame()
tmp_df["path_partial"] = path_partial_list
tmp_df["path"] = all_images
df = df.merge(tmp_df, on="path_partial").drop(columns=["path_partial"])
df["width"] = df["path"].apply(lambda x: int(x[:-4].rsplit("_", 4)[1]))
df["height"] = df["path"].apply(lambda x: int(x[:-4].rsplit("_", 4)[2]))
del x, path_partial_list, tmp_df
# create new dataframe
# id, segmentation classes (large_boewl, small_bowel, stomach), path, case, day, slice, width, height
df_out = pd.DataFrame({"id": df["id"][::3]})
df_out["large_bowel"] = df["segmentation"][::3].values
df_out["small_bowel"] = df["segmentation"][1::3].values
df_out["stomach"] = df["segmentation"][2::3].values
df_out["path"] = df["path"][::3].values
df_out["case"] = df["case"][::3].values
df_out["day"] = df["day"][::3].values
df_out["slice"] = df["slice"][::3].values
df_out["width"] = df["width"][::3].values
df_out["height"] = df["height"][::3].values
# reorder indexes and fill NaN values with empty values
df_out = df_out.reset_index(drop=True)
df_out = df_out.fillna("")
return df_out
df = transform(df)
print("data_len:", len(df))
df.head()
# **Data Statistics - Per Class Segmentation Distribution**
# Distribuzione delle immagini segmentate per ciascuna classe di segmentazione.
# plot per class segmentation distribution
def plot_segm_class(df):
bar = plt.bar([1, 2, 3], 100 * np.mean(df.iloc[:, 1:4] != "", axis=0))
plt.title("Per Class Segmentation Distribution", fontsize=16)
plt.xlabel("Class")
plt.ylabel("Segmented Images %")
plt.xticks([1, 2, 3])
labels = ["large bowel", "small bowel", "stomach"]
for rect, lbl in zip(bar, labels):
height = rect.get_height()
plt.text(
rect.get_x() + rect.get_width() / 2,
height,
lbl + "\n%.1f %%" % height,
ha="center",
va="bottom",
fontsize=12,
)
plt.ylim((0, 50))
plt.show()
plot_segm_class(df)
# **Data Statistics - Per Case Full Segmentation Distribution**
# Distribuzione delle immagini totalmente segmentate per ciascun caso in esame.
# plot per case full segmentation distribution
def plot_segm_case(df):
cases = df["case"].unique()
n_labels = len(cases)
means = np.empty(shape=n_labels)
for i in range(n_labels):
means[i] = len(
df[
(df["case"] == cases[i])
& (df.iloc[:, 1] != "")
& (df.iloc[:, 2] != "")
& (df.iloc[:, 3] != "")
]
) / len(df)
plt.figure(figsize=(12, 6))
bar = plt.bar(np.arange(1, n_labels + 1), 100 * means)
plt.title("Per Case Full Segmentation Distribution", fontsize=16)
plt.ylabel("Fully Segmented Images %")
plt.xlabel("Case")
plt.xticks([])
plt.show()
plot_segm_case(df)
# **Data Statistics - Full Segmentation Distribution**
# Distribuzione complessiva delle immagini totalmente segmentate.
# plot overall full segmentation distribution
def plot_segm(df):
bar = plt.bar(
[1],
100
* np.mean(
(df.iloc[:, 1] != "") & (df.iloc[:, 2] != "") & (df.iloc[:, 3] != ""),
axis=0,
),
)
plt.title("Full Segmentation Distribution", fontsize=16)
plt.ylabel("Fully Segmented Images %")
plt.xlabel("Images")
plt.xticks([])
for rect in bar:
height = rect.get_height()
plt.text(
rect.get_x() + rect.get_width() / 2,
height,
"%.1f %%" % height,
ha="center",
va="bottom",
fontsize=12,
)
plt.ylim((0, 25))
plt.show()
plot_segm(df)
# **Data Cleaning**
# Rimozione di tutti i record di dati relativi a immagini non totalmente segmentate.
# filter data containing NaN values and reorder indexes
def filter_data(df):
df["large_bowel"].replace("", np.nan, inplace=True)
df["small_bowel"].replace("", np.nan, inplace=True)
df["stomach"].replace("", np.nan, inplace=True)
df = df.dropna()
df = df.reset_index(drop=True)
return df
df = filter_data(df)
print("data_len:", len(df))
df.head()
# **Data Saving**
# Salvataggio del nuovo dataset come CSV e delle relative maschere.
# save decoded masks of new dataset
def save_masks(df):
for index, row in df.iterrows():
case = str(row["case"])
day = str(row["day"])
mask_lb = row["large_bowel"]
mask_sb = row["small_bowel"]
mask_s = row["stomach"]
h = row["height"]
w = row["width"]
path = "/kaggle/working/data/processed/masks/" + case + "/" + day + "/"
if not os.path.exists(path):
os.makedirs(path)
img_lb = np.uint8(rle_decode(mask_lb, (h, w, 1)))
img_lb = img_lb.astype(np.float32) * 255.0
cv2.imwrite(path + "large_bowel.png", img_lb)
img_sb = np.uint8(rle_decode(mask_sb, (h, w, 1)))
img_sb = img_sb.astype(np.float32) * 255.0
cv2.imwrite(path + "small_bowel.png", img_sb)
img_s = np.uint8(rle_decode(mask_s, (h, w, 1)))
img_s = img_s.astype(np.float32) * 255.0
cv2.imwrite(path + "stomach.png", img_s)
# save new dataset as csv
# df.to_csv("/kaggle/working/data/processed/data.csv", index=False)
# save_masks(df)
# **Data Generation**
# Si utilizza la classe DataGenerator di Keras per la generazione dei dati a batch che consente di risparmiare la memoria impiegata per il caricamento del dataset di immagini.
# Sono stati distinti due casi di generazione dei dati:
# - default/train -> generazione di una batch (size 8) a partire da due minibatch:
#
# - train minibatch (size 6): immagini esistenti del train set.
#
# - augmented minibatch (size 2): immagini aumentate ottenute dalla train minibatch.
#
# I dati generati per il training sono incrementati del 33%
# circa, ottenendo un train set complessivo con il 25% circa di immagini aumentate.
#
# - validation/test -> viene generata direttamente una batch (size 8) dal validation/test set.
# DataGenerator class
class DataGenerator(tf.keras.utils.Sequence):
# DataGenerator initialization
def __init__(self, df, batch_size=BATCH_SIZE, shuffle=False, subset="train"):
super().__init__()
self.df = df
self.shuffle = shuffle
self.subset = subset
self.batch_size = (int)(batch_size)
self.indexes = np.arange(len(df))
self.on_epoch_end()
if self.subset == "train":
self.augm_minibatch_size = int(np.floor((int)(0.25 * self.batch_size)))
self.batch_len = int(
np.floor(len(self.df) / (self.batch_size - self.augm_minibatch_size))
)
self.augm_len = int(np.floor(self.augm_minibatch_size * self.batch_len))
else:
self.batch_len = int(np.floor(len(self.df) / (int)(self.batch_size)))
# BATCH_LEN
def __len__(self):
return self.batch_len
# epoch end event trigger
def on_epoch_end(self):
if self.shuffle == True:
np.random.shuffle(self.indexes)
# image and mask batch generation
def __getitem__(self, index):
# default/train batch case
if self.subset == "train":
# create minibatch with 3/4 BATCH_SIZE original samples
X = np.empty(
((self.batch_size - self.augm_minibatch_size), IM_HEIGHT, IM_WIDTH, 3)
)
y = np.empty(
(((self.batch_size - self.augm_minibatch_size), IM_HEIGHT, IM_WIDTH, 3))
)
indexes = self.indexes[
index
* ((self.batch_size - self.augm_minibatch_size)) : (index + 1)
* ((self.batch_size - self.augm_minibatch_size))
]
for i, img_path in enumerate(self.df["path"].iloc[indexes]):
w = self.df["width"].iloc[indexes[i]]
h = self.df["height"].iloc[indexes[i]]
img = cv2.imread(img_path, cv2.IMREAD_ANYDEPTH)
dsize = (IM_HEIGHT, IM_WIDTH)
img = cv2.resize(img, dsize)
img = img.astype(np.float32) / 255.0
img = np.expand_dims(img, axis=-1)
X[i,] = img
for k, j in enumerate(["large_bowel", "small_bowel", "stomach"]):
rles = self.df[j].iloc[indexes[i]]
mask = rle_decode(rles, shape=(h, w, 1))
mask = cv2.resize(mask, (IM_HEIGHT, IM_WIDTH))
y[i, :, :, k] = mask
# create minibatch with 1/4 BATCH_SIZE augmented samples
X_augm, y_augm = self.augment_data(X, y)
# concat minibatches in a batch with BATCH_SIZE samples
X = tf.concat([X, X_augm], axis=0)
y = tf.concat([y, y_augm], axis=0)
return X, y
# validation/test batch case
else:
# create batch with BATCH_SIZE original samples
X = np.empty((self.batch_size, IM_HEIGHT, IM_WIDTH, 3))
y = np.empty((self.batch_size, IM_HEIGHT, IM_WIDTH, 3))
indexes = self.indexes[
index * (self.batch_size) : (index + 1) * (self.batch_size)
]
for i, img_path in enumerate(self.df["path"].iloc[indexes]):
w = self.df["width"].iloc[indexes[i]]
h = self.df["height"].iloc[indexes[i]]
img = cv2.imread(img_path, cv2.IMREAD_ANYDEPTH)
dsize = (IM_HEIGHT, IM_WIDTH)
img = cv2.resize(img, dsize)
img = img.astype(np.float32) / 255.0
img = np.expand_dims(img, axis=-1)
X[i,] = img
for k, j in enumerate(["large_bowel", "small_bowel", "stomach"]):
rles = self.df[j].iloc[indexes[i]]
mask = rle_decode(rles, shape=(h, w, 1))
mask = cv2.resize(mask, (IM_HEIGHT, IM_WIDTH))
y[i, :, :, k] = mask
return X, y
# data augmentation
# augmented data -> 1/3 of train existing samples
def augment_data(self, X, y):
augm_data = get_augmented(
X,
y,
batch_size=self.augm_minibatch_size,
data_gen_args=dict(
rotation_range=180,
width_shift_range=0.02,
height_shift_range=0.02,
shear_range=10,
zoom_range=0.2,
horizontal_flip=False,
vertical_flip=True,
fill_mode="constant",
),
)
X_augm, y_augm = next(augm_data)
return X_augm, y_augm
# **Data Visualization**
# Visualizzazione di una batch campione di immagini e relative segmentazioni.
# sample plot
def plot_segm(img, mask):
# labels
labels = ["Large Bowel", "Small Bowel", "Stomach"]
# label colors
colors = ["yellow", "green", "red"]
# color maps
cmap1 = mpl.colors.ListedColormap(colors[0])
cmap2 = mpl.colors.ListedColormap(colors[1])
cmap3 = mpl.colors.ListedColormap(colors[2])
# patches
patches = [
mpatches.Patch(color=colors[i], label=f"{labels[i]}")
for i in range(len(labels))
]
# grid figure
fig = plt.figure(figsize=(12, 28)) # figsize=(25,200) for batch_size = 32
grid = gridspec.GridSpec(nrows=BATCH_SIZE, ncols=2)
plt.legend(handles=patches, fontsize=6, loc=1, title="Masks", title_fontsize=8)
plt.xticks([])
plt.yticks([])
# plot images and segmentations
for i in range(BATCH_SIZE):
ax0 = fig.add_subplot(grid[i, 0])
ax0.imshow(img[i, :, :, 0], cmap="bone")
mask_lb = mask[i, :, :, 0]
mask_sb = mask[i, :, :, 1]
mask_s = mask[i, :, :, 2]
ax1 = fig.add_subplot(grid[i, 1])
ax1.imshow(img[i, :, :, 0], cmap="bone")
im1 = ax1.imshow(
np.ma.masked_where(mask_lb == False, mask_lb), cmap=cmap1, alpha=1
)
im2 = ax1.imshow(
np.ma.masked_where(mask_sb == False, mask_sb), cmap=cmap2, alpha=1
)
im3 = ax1.imshow(
np.ma.masked_where(mask_s == False, mask_s), cmap=cmap3, alpha=1
)
for ax in [ax0, ax1]:
ax.set_axis_off()
for im in [im1, im2, im3]:
im.cmap(im.norm(1))
if i == 0:
ax0.set_title("Image", fontsize=18, weight="bold")
ax1.set_title("Segmentation", fontsize=18, weight="bold")
# data generator instance
data_gen = DataGenerator(df, shuffle=True)
# get batch sample
img, mask = data_gen[0]
plot_segm(img, mask)
# **Data Splitting**
# Considerando il dataset ottenuto dopo la fase di cleaning, si è scelto di utilizzare uno split **80% train, 10% validation, 10% test**.
# (Si esclude in questa fase la generazione di augmented data per il train set)
df_train = df.sample(frac=0.8, random_state=0)
df_val_test = df.drop(df_train.index)
df_val = df_val_test.sample(frac=0.5, random_state=0)
df_test = df_val_test.drop(df_val.index)
print("train_len:", len(df_train))
print("validation_len:", len(df_val))
print("test_len:", len(df_test))
# # **Model Selection**
# **Metrics**
# Si sono selezionate due metriche di similarità per la valutazione dell'accuratezza del modello:
# - coefficiente di **Dice** -> 2 |A∩B| / (|A|+|B|)
#
# Tende a indicare l'accuratezza di segmentazione nel caso medio.
#
# - coefficiente di Intersection over Union (**IoU**) -> |A∩B| / |A∪B|
#
# Tende a indicare l'accuratezza di segmentazione nel caso peggiore.
# Dice coefficent
def dice_coef(y_true, y_pred, smooth=1):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2.0 * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
# Intersection over Union (IoU) coefficent
def iou_coef(y_true, y_pred, smooth=1):
intersection = K.sum(K.abs(y_true * y_pred), axis=[1, 2, 3])
union = K.sum(y_true, [1, 2, 3]) + K.sum(y_pred, [1, 2, 3]) - intersection
return K.mean((intersection + smooth) / (union + smooth), axis=0)
# **Loss**
# Si è selezionata una funzione di loss che è combinazione della Binary Cross Entropy (BCE) loss e della Dice loss:
# - BCE loss -> similarità di informazione tra le distribuzioni di probabilità dei due set.
# - Dice loss -> similarità bit a bit tra i due set.
#
# **BCE Dice loss** -> BCE loss + Dice loss
#
# N.B. La metrica IoU non è utilizzabile per la funzione di loss in quanto non differenziabile.
# Dice loss
def dice_loss(y_true, y_pred):
smooth = 1.0
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = y_true_f * y_pred_f
score = (2.0 * K.sum(intersection) + smooth) / (
K.sum(y_true_f) + K.sum(y_pred_f) + smooth
)
return 1.0 - score
# Binary cross-entropy (BCE) dice loss
def bce_dice_loss(y_true, y_pred):
return binary_crossentropy(tf.cast(y_true, tf.float32), y_pred) + dice_loss(
tf.cast(y_true, tf.float32), y_pred
)
# **Definition**
# Si è selezionato **U-Net** come modello di deep learning per le sue performance nel campo della segmentazione di immagini mediche.
# Per l'encoder di U-Net si sono confrontati due architetture di rete neurale convoluzionale (CNN) preaddestrate su ImageNet:
# - VGG16: CNN con 13 layers convoluzionali e 3 layers fully connected.
#
# - ResNet34: CNN residuale con 33 layers convoluzionali e 1 layer fully connected.
# **ResNet34** ha ottenuto performance lievemente superiori ed è stata quindi selezionata.
#
# Nella directory "kaggle/working/models" sono presenti entrambe le versioni addestrate con relativa learning history.
# Segmentazione multiclasse -> Output con tre mappe di probabilità di segmentazione dei pixel.
# Metriche -> coefficienti di Dice e IoU.
# Loss -> BCE Dice loss.
# Ottimizzatore -> Algoritmo adattivo Adam con learning rate iniziale di 10^-4.
# enable multiple GPUs use
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
"""""" """
# *** U-Net_vgg16 ***
# define multiclass U-Net model with VGG16 as backbone
model = Unet(
backbone_name='vgg16',
input_shape=(IM_HEIGHT, IM_WIDTH, 3),
classes=3,
activation='sigmoid',
encoder_weights='imagenet')
""" """"""
# *** U-Net_resnet34 ***
# define multiclass U-Net model with ResNet34 as backbone
model = Unet(
backbone_name="resnet34",
input_shape=(IM_HEIGHT, IM_WIDTH, 3),
classes=3,
activation="sigmoid",
encoder_weights="imagenet",
)
# define custom optimizer, metrics and loss
optimizer = Adam(LR)
model.compile(
optimizer=optimizer, metrics=[dice_coef, iou_coef], loss=bce_dice_loss
)
# show model info
model.summary()
# **K-Fold Cross-validation**
# Valutazione del modello mediante K-Fold Cross-validation con 10 splits.
# (Non è stato possibile eseguire per mancanza di risorse)
"""""" """
# 10 splits
cv = KFold(n_splits=N_SPLITS)
# model, loss, dice and iou arrays
models = np.empty(shape=N_SPLITS)
val_losses = np.empty(shape=N_SPLITS)
val_dice_coefs = np.empty(shape=N_SPLITS)
val_iou_coefs = np.empty(shape=N_SPLITS)
for i, (train_ids, val_ids) in enumerate (cv.split(df_train)):
# create train and validation folds
df_train_fold = df_train.filter(items = train_ids, axis=0)
df_val_fold = df_train.filter(items = val_ids, axis=0)
# train and validation generators instance
train_gen = DataGenerator(df_train_fold, shuffle=True)
val_gen = DataGenerator(df_val_fold, shuffle=True, subset="val")
models[i] = model
# fit model
models[i].fit(
x=train_gen,
validation_data=val_gen,
epochs=EPOCHS)
# evaluate model on validation set
scores = model[i].evaluate(x=val_gen)
# read loss, dice and iou
val_losses.append(scores['val_loss'])
val_dice_coefs.append(scores['val_dice_coef'])
val_iou_coefs.append(scores['val_iou_coef'])
# compute loss, dice and iou means as cross-validation scores
cv_loss = np.mean(val_losses)
cv_dice_coef = np.mean(val_dice_coefs)
cv_iou_coef = np.mean(val_iou_coefs)
print("val_dice scores: ", val_dice_coefs)
print("val_iou scores: ", val_iou_coefs)
print("val_loss scores: ", val_losses)
print("cv_dice_coef score: ", cv_dice_coef)
print("cv_iou_coef score: ", cv_iou_coef)
print("cv_loss score: ", cv_loss)
""" """"""
print("Not enough resources to run cross-validation...")
# # **Model Training**
# **Callbacks**
# Utilizzo delle seguenti callbacks per il fitting del modello:
# - **ModelCheckpoint**: salvataggio del modello migliore secondo un certo score.
#
# - **ReduceLROnPlateau**: decay del learning rate se il modello non migliora secondo un certo score dopo un numero prefissato di epoche.
#
# Per entrambe le callbacks lo score scelto da monitorare è la validation loss.
# ModelCheckpoint callback
model_checkpoint = ModelCheckpoint(
filepath="/kaggle/working/models/U-Net_resnet34/model.h5",
monitor="val_loss",
verbose=1,
save_best_only=True,
save_weights_only=False,
mode="min",
)
# ReduceLROnPlateau callback
reduce_lr_on_plateau = ReduceLROnPlateau(
monitor="val_loss", factor=0.1, patience=10, verbose=1, min_delta=0.0001 # 5
)
# **Fitting**
# Fitting del modello su 100 epoche.
"""""" """
# train and validation data generators instance
train_gen = DataGenerator(df_train, shuffle=True, subset="train")
val_gen = DataGenerator(df_val, shuffle=True, subset="validation")
# garbage collection
gc.collect()
# fit model
fit_result = model.fit(
x=train_gen,
validation_data=val_gen,
epochs=EPOCHS,
callbacks=[reduce_lr_on_plateau, model_checkpoint])
""" """"""
# load pretrained model
model = load_model(
filepath="/kaggle/working/models/U-Net_resnet34/model.h5", # ".../U-Net_vgg16/model.h5"
custom_objects={
"dice_coef": dice_coef,
"iou_coef": iou_coef,
"bce_dice_loss": bce_dice_loss,
},
)
# **Learning Curves**
# Curve di apprendimento per train e validation rispetto alla loss e alle metriche.
# Epoche trascorse per il raggiungimento del modello migliore e relativi scores.
# plot learning curves
def learning_plot(history, epochs):
plt.figure(figsize=(14, 21))
# train/validation loss subplot
plt.subplot(3, 1, 1)
plt.plot(range(1, epochs + 1), history["loss"][0:epochs], label="Train_Loss")
plt.plot(range(1, epochs + 1), history["val_loss"][0:epochs], label="Val_loss")
plt.title("LOSS")
plt.xlabel("Epoch")
plt.ylabel("loss")
plt.legend()
# train/validation dice subplot
plt.subplot(3, 1, 2)
plt.plot(
range(1, epochs + 1), history["dice_coef"][0:epochs], label="Train_dice_coef"
)
plt.plot(
range(1, epochs + 1), history["val_dice_coef"][0:epochs], label="Val_dice_coef"
)
plt.title("DICE")
plt.xlabel("Epoch")
plt.ylabel("dice_coef")
plt.legend()
# train/validation iou subplot
plt.subplot(3, 1, 3)
plt.plot(
range(1, epochs + 1), history["iou_coef"][0:epochs], label="Train_iou_coef"
)
plt.plot(
range(1, epochs + 1), history["val_iou_coef"][0:epochs], label="Val_iou_coef"
)
plt.title("IOU")
plt.xlabel("Epoch")
plt.ylabel("iou_coef")
plt.legend()
plt.show()
"""""" """
# history dataframe
history_df = pd.DataFrame(fit_result.history)
# save history dataframe as csv
history_df.to_csv("/kaggle/working/models/U-Net_resnet34/history.csv") # ".../U-Net_vgg16/history.csv"
""" """"""
# load history csv as dataframe
history_df = pd.read_csv(
"/kaggle/working/models/U-Net_resnet34/history.csv"
) # ".../U-Net_vgg16/history.csv"
# MODEL CHECKPOINT EPOCHS
mcp_epochs = history_df["val_loss"].idxmin() + 1
print("MODEL CHECKPOINT EPOCHS:", mcp_epochs)
print()
# *** TRAIN SCORES ***
print("TRAIN SCORES")
print("train_loss: %.4f" % history_df["loss"][mcp_epochs - 1])
print("train_dice_coef: %.4f" % history_df["dice_coef"][mcp_epochs - 1])
print("train_iou_coef: %.4f" % history_df["iou_coef"][mcp_epochs])
print()
# *** VALIDATION SCORES ***
print("VALIDATION SCORES")
print("val_loss: %.4f" % history_df["val_loss"][mcp_epochs - 1])
print("val_dice_coef: %.4f" % history_df["val_dice_coef"][mcp_epochs - 1])
print("val_iou_coef: %.4f" % history_df["val_iou_coef"][mcp_epochs - 1])
print()
learning_plot(history_df, mcp_epochs)
# # **Model Testing**
# **Prediction**
# Calcolo predizioni sul test set e conversione delle mappe di probabilità a immagini binarie.
# Visualizzazione di una batch campione di ground truth e di predizione delle segmentazioni.
# prediction plot
def plot_pred_segm(img, mask, pred):
# labels
labels = ["Large Bowel", "Small Bowel", "Stomach"]
# label colors
colors = ["yellow", "green", "red"]
# color maps
cmap1 = mpl.colors.ListedColormap(colors[0])
cmap2 = mpl.colors.ListedColormap(colors[1])
cmap3 = mpl.colors.ListedColormap(colors[2])
# patches
patches = [
mpatches.Patch(color=colors[i], label=f"{labels[i]}")
for i in range(len(labels))
]
# grid figure
fig = plt.figure(figsize=(12, 28))
grid = gridspec.GridSpec(nrows=BATCH_SIZE, ncols=2)
plt.legend(handles=patches, fontsize=6, loc=1, title="Masks", title_fontsize=8)
plt.xticks([])
plt.yticks([])
# plot ground truth and predicted segmentations
for i in range(BATCH_SIZE):
mask_lb = mask[i, :, :, 0]
mask_sb = mask[i, :, :, 1]
mask_s = mask[i, :, :, 2]
ax0 = fig.add_subplot(grid[i, 0])
ax0.imshow(img[i, :, :, 0], cmap="bone")
im1 = ax0.imshow(
np.ma.masked_where(mask_lb == False, mask_lb), cmap=cmap1, alpha=1
)
im2 = ax0.imshow(
np.ma.masked_where(mask_sb == False, mask_sb), cmap=cmap2, alpha=1
)
im3 = ax0.imshow(
np.ma.masked_where(mask_s == False, mask_s), cmap=cmap3, alpha=1
)
pred_lb = pred[i, :, :, 0]
pred_sb = pred[i, :, :, 1]
pred_s = pred[i, :, :, 2]
ax1 = fig.add_subplot(grid[i, 1])
ax1.imshow(img[i, :, :, 0], cmap="bone")
im4 = ax1.imshow(
np.ma.masked_where(pred_lb == False, pred_lb), cmap=cmap1, alpha=1
)
im5 = ax1.imshow(
np.ma.masked_where(pred_sb == False, pred_sb), cmap=cmap2, alpha=1
)
im6 = ax1.imshow(
np.ma.masked_where(pred_s == False, pred_s), cmap=cmap3, alpha=1
)
for ax in [ax0, ax1]:
ax.set_axis_off()
for im in [im1, im2, im3, im4, im5, im6]:
im.cmap(im.norm(1))
if i == 0:
ax0.set_title("Ground Truth", fontsize=18, weight="bold")
ax1.set_title("Prediction", fontsize=18, weight="bold")
# test data generator instance
test_gen = DataGenerator(df_test, shuffle=True, subset="test")
# get test batch sample
img, mask = test_gen[0]
# predict on test set
preds = model.predict(x=test_gen, verbose=1)
# convert pixel probability maps to binary image
preds = (preds > 0.5).astype(np.float32)
# prediction batch sample
pred = preds[0:BATCH_SIZE]
plot_pred_segm(img, mask, pred)
# **Performance**
# Test scores per la valutazione delle performance del modello.
# evaluate model on test set
eval_result = model.evaluate(x=test_gen, verbose=1)
# *** TEST SCORES ***
print("TEST SCORES")
print("test_loss: %.4f" % eval_result[0])
print("test_dice_coef: %.4f" % eval_result[1])
print("test_iou_coef: %.4f" % eval_result[2])
# **Submission**
# Sottomissione del CSV contenente le predizioni calcolate sul test set.
# create, fill and save submission with predictions
def pred_submit(ids, preds):
# load submission template csv as dataframe
submission_df = pd.read_csv(
"/kaggle/input/uw-madison-gi-tract-image-segmentation/sample_submission.csv"
)
# split predictions per classes
preds_lb = preds[:, :, :, 0]
preds_sb = preds[:, :, :, 1]
preds_s = preds[:, :, :, 2]
for i in range(len(preds)):
# create rows
row_lb = [ids.iloc[i], "large_bowel", rle_encode(preds_lb[i])]
row_sb = [ids.iloc[i], "small_bowel", rle_encode(preds_sb[i])]
row_s = [ids.iloc[i], "stomach", rle_encode(preds_s[i])]
# append rows to dataframe
submission_df.loc[len(submission_df)] = row_lb
submission_df.loc[len(submission_df)] = row_sb
submission_df.loc[len(submission_df)] = row_s
# save dataframe as csv
submission_df.to_csv("/kaggle/working/sample_submission.csv")
# pred_submit(df_test['id'], preds)
# load submission csv as dataframe
submission_df = pd.read_csv("/kaggle/working/sample_submission.csv")
print("submission_len:", len(submission_df))
submission_df[["id", "class", "predicted"]].head()
|
import os
import numpy as np
import plotly.graph_objects as go
import polars as pl
import panel as pn
user_followers_file_path = "/kaggle/input/meta-kaggle/UserFollowers.csv"
users_file_path = "/kaggle/input/meta-kaggle/Users.csv"
# Loading data into memory
follower_df = pl.read_csv(user_followers_file_path)
users_df = pl.read_csv(users_file_path)
# Create subset dataframes based on total followers and total following
followers_merged = follower_df.join(users_df, left_on="FollowingUserId", right_on="Id")
following_merged = follower_df.join(users_df, left_on="UserId", right_on="Id")
most_followers = (
merged_df.groupby("UserName").count().sort("count", descending=True)[:250]
)
most_followers = merged_df.rename({"UserName": "User Name"})
most_followers = most_followers.with_row_count(name="Rank", offset=1)
most_following = (
following_merged.groupby("UserName").count().sort("count", descending=True)[:250]
)
most_following = most_following.with_row_count(name="Rank", offset=1)
test = (
followers_merged.groupby(["UserName", "CreationDate"])
.count()
.sort("count", descending=True)[0:20]
)
test = test.unique(subset=["UserName"], keep="first", maintain_order=True)
test
# # 👑 Top 250 Kaggle Users Ranked by Follower Count
pn.extension("tabulator")
follower_table_widget = pn.widgets.Tabulator(
most_followers.to_pandas(), show_index=False, pagination="local", page_size=10
)
follower_table_widget
# # 👑 Top 250 Kaggle Users Ranked by Following Count
pn.extension("tabulator")
following_table_widget = pn.widgets.Tabulator(
most_following.to_pandas(), show_index=False, pagination="local", page_size=10
)
following_table_widget
|
# Space missions data understanding, visualization and analysis
# Quick navigation
# * [1. General dataset overview](#1)
# * [2. Geo analysis](#2)
# * [3. Other imnteresting questions](#3)
# * [4. USA vs USSR analysis](#4)
# * [5. Best every year](#5)
# * [6. Time series decomposition](#6)
# * [7. Where India stands](#7)
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from iso3166 import countries
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.seasonal import seasonal_decompose
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from collections import OrderedDict
from plotly.offline import init_notebook_mode
init_notebook_mode(connected=True)
#
# 1. Lets see how's the data in our dataset
df = pd.read_csv("/kaggle/input/all-space-missions-from-1957/Space_Corrected.csv")
df.columns = [
"Unnamed: 0",
"Unnamed: 0.1",
"Company Name",
"Location",
"Datum",
"Detail",
"Status Rocket",
"Rocket",
"Status Mission",
]
df = df.drop(["Unnamed: 0", "Unnamed: 0.1"], axis=1)
# Let's see percent of NaNs for every column.
missed = pd.DataFrame()
missed["column"] = df.columns
percent = list()
for col in df.columns:
percent.append(round(100 * df[col].isnull().sum() / len(df), 2))
missed["percent"] = percent
missed = missed.sort_values("percent")
missed = missed[missed["percent"] > 0]
fig = px.bar(
missed,
x="percent",
y="column",
orientation="h",
title="Missed values percent for every column (percent > 0)",
width=600,
height=200,
)
fig.show()
# From this short chart we can see that we have only 1 column that contains missed values. It is `Rocket` column with about 80% of missed values.
# Now let's see how many launches made every company presented in this dataset.
ds = df["Company Name"].value_counts().reset_index()
ds.columns = ["Company", "Number of Launches"]
ds = ds.sort_values(["Number of Launches"], ascending=False)
fig = px.bar(
ds,
x="Number of Launches",
y="Company",
orientation="h",
title="Number of Launches by Every Company",
labels={"Number of Launches": "Number of Launches", "Company": "Company Name"},
height=600,
width=800,
template="simple_white",
)
fig.update_layout(
plot_bgcolor="#FFFFFF",
xaxis=dict(title="", showgrid=True, gridcolor="lightgray", gridwidth=0.1),
yaxis=dict(
title="",
showgrid=True,
gridcolor="lightgray",
gridwidth=0.1,
tickfont=dict(size=12),
automargin=True,
),
font=dict(family="Arial", size=14, color="black"),
)
fig.update_traces(texttemplate="%{x}", textposition="outside")
fig.show()
ds = df["Company Name"].value_counts().reset_index()
ds.columns = ["Company", "Number of Launches"]
trace = go.Bar(
x=ds["Number of Launches"],
y=ds["Company"],
orientation="h",
text=ds["Number of Launches"],
textposition="outside",
marker=dict(color="#1f77b4"),
)
layout = go.Layout(
title="Number of Launches by Every Company",
xaxis=dict(
title="Number of Launches",
tickfont=dict(size=12),
showgrid=True,
gridcolor="lightgray",
gridwidth=0.1,
),
yaxis=dict(title="Company Name", tickfont=dict(size=12), automargin=True),
font=dict(family="Arial", size=14, color="black"),
plot_bgcolor="#FFFFFF",
)
fig = go.Figure(data=[trace], layout=layout)
fig.show()
# What about rocket's status?
ds = df["Status Rocket"].value_counts().reset_index()
ds.columns = ["status", "count"]
ds = ds.sort_values("count", ascending=False)
colors = ["rgb(75, 109, 153)", "rgb(232, 114, 114)"]
fig = go.Figure(
go.Pie(
labels=ds["status"],
values=ds["count"],
hole=0.5,
marker=dict(colors=colors),
textfont=dict(size=14, color="black"),
hoverinfo="label+percent",
textinfo="label+percent",
)
)
fig.update_layout(
title=dict(text="Rocket Status", font=dict(size=24)),
margin=dict(l=0, r=0, t=80, b=0),
font=dict(family="Arial", size=16, color="black"),
)
fig.show()
# Now we will check status distribution for all missions.
ds = df["Status Mission"].value_counts().reset_index()
ds.columns = ["mission_status", "count"]
ds = ds.sort_values("count", ascending=False)
colors = ["#FFC300", "#FF5733", "#C70039", "#900C3F", "#581845"]
fig = px.bar(
ds,
x="mission_status",
y="count",
title="Mission Status Distribution",
color="mission_status",
color_discrete_sequence=colors,
height=600,
width=800,
)
fig.update_layout(
plot_bgcolor="#FFFFFF",
xaxis=dict(
title="",
showgrid=True,
gridcolor="lightgray",
gridwidth=0.1,
tickfont=dict(size=12),
),
yaxis=dict(
title="Count",
showgrid=True,
gridcolor="lightgray",
gridwidth=0.1,
tickfont=dict(size=12),
automargin=True,
),
font=dict(family="Arial", size=14, color="black"),
)
fig.show()
# And as last part of this section, lets see Value distribution for Rockets. A lot of them have missed values so setting `0` for this samples.
df["Rocket"] = df["Rocket"].fillna(0.0).str.replace(",", "")
df["Rocket"] = df["Rocket"].astype(np.float64).fillna(0.0)
df["Rocket"] = df["Rocket"] * 1000000
colorscale = [[0, "#FFFFFF"], [0.5, "#72A1E5"], [1, "#153E75"]]
fig = go.Figure(
go.Histogram(
x=df["Rocket"],
nbinsx=50,
marker=dict(color="#72A1E5"),
opacity=0.7,
hovertemplate="Count: %{y}",
name="Rocket Value",
)
)
fig.update_layout(
title=dict(text="Rocket Value Distribution", font=dict(size=24)),
xaxis=dict(
title="Rocket Value (USD)", showgrid=True, gridcolor="lightgray", gridwidth=0.1
),
yaxis=dict(
title="Count",
showgrid=True,
gridcolor="lightgray",
gridwidth=0.1,
tickfont=dict(size=12),
automargin=True,
),
font=dict(family="Arial", size=16, color="black"),
)
fig.data[0].update({"marker": {"colorscale": colorscale, "showscale": True}})
fig.show()
#
# 2. Geo analysis
# First we extract `country` feature and change some country names.
countries_dict = {
"Russia": "Russian Federation",
"New Mexico": "USA",
"Yellow Sea": "China",
"Shahrud Missile Test Site": "Iran",
"Pacific Missile Range Facility": "USA",
"Barents Sea": "Russian Federation",
"Gran Canaria": "USA",
}
df["country"] = df["Location"].str.split(", ").str[-1].replace(countries_dict)
# Let's see sunburst chart for countries.
sun = (
df.groupby(["country", "Company Name", "Status Mission"])["Datum"]
.count()
.reset_index()
)
sun.columns = ["country", "company", "status", "count"]
fig = px.sunburst(
sun,
path=["country", "company", "status"],
values="count",
title="Sunburst chart for all countries",
width=800, # increase the width to 800
height=800, # increase the height to 800
)
fig.show()
# Now let's convert country name to Alpha3 format using iso3166 package.
country_dict = dict()
for c in countries:
country_dict[c.name] = c.alpha3
df["alpha3"] = df["country"]
df = df.replace({"alpha3": country_dict})
df.loc[df["country"] == "North Korea", "alpha3"] = "PRK"
df.loc[df["country"] == "South Korea", "alpha3"] = "KOR"
df
# How many launches every country has?
def plot_map(
dataframe, target_column, title, width=800, height=600, color_scale="Viridis"
):
mapdf = (
dataframe.groupby(["country", "alpha3"])[target_column].count().reset_index()
)
fig = px.choropleth(
mapdf,
locations="alpha3",
hover_name="country",
color=target_column,
projection="natural earth",
width=width,
height=height,
color_continuous_scale=color_scale,
range_color=[0, mapdf[target_column].max()],
title=title,
template="plotly_dark",
)
fig.update_geos(
showcountries=True,
countrycolor="white",
showocean=True,
oceancolor="MidnightBlue",
showcoastlines=True,
coastlinecolor="white",
showland=True,
landcolor="LightGrey",
)
fig.show()
plot_map(
dataframe=df,
target_column="Status Mission",
title="Number of launches per country",
color_scale="YlOrRd",
)
# How many Failures evry country has?
fail_df = df[df["Status Mission"] == "Failure"]
plot_map(
dataframe=fail_df,
target_column="Status Mission",
title="Number of Fails per country",
color_scale="YlOrRd",
)
#
# 3. Other interesting questions
# How much money companies spent for missions?
data = df.groupby(["Company Name"])["Rocket"].sum().reset_index()
data = data[data["Rocket"] > 0]
data.columns = ["company", "money"]
fig = px.bar(
data,
x="company",
y="money",
orientation="v",
title="Total money spent on missions",
width=800,
height=600,
color="money",
color_continuous_scale=px.colors.sequential.YlOrRd,
color_continuous_midpoint=data["money"].median(),
)
fig.update_yaxes(title="", showticklabels=False)
fig.show()
money = df.groupby(["Company Name"])["Rocket"].sum()
starts = df["Company Name"].value_counts().reset_index()
starts.columns = ["Company Name", "count"]
av_money_df = pd.merge(money, starts, on="Company Name")
av_money_df["avg"] = av_money_df["Rocket"] / av_money_df["count"]
av_money_df = av_money_df[av_money_df["avg"] > 0]
av_money_df = av_money_df.reset_index()
fig = px.bar(
av_money_df,
x="Company Name",
y="avg",
orientation="v",
title="Average money per one launch",
width=800,
height=600,
color="avg",
color_continuous_scale=px.colors.sequential.YlOrRd,
color_continuous_midpoint=av_money_df["avg"].median(),
)
fig.update_yaxes(title="", showticklabels=False)
fig.show()
# Let's create traditional features based on Date.
df["date"] = pd.to_datetime(df["Datum"], infer_datetime_format=True)
df["year"] = df["date"].apply(lambda datetime: datetime.year)
df["month"] = df["date"].apply(lambda datetime: datetime.month)
df["weekday"] = df["date"].apply(lambda datetime: datetime.weekday())
# How many launches were every year?
ds = df["year"].value_counts().reset_index()
ds.columns = ["year", "count"]
colors = ["#3c7ebf"] * len(ds)
colors[0] = "#00bfff"
bar = go.Bar(
x=ds["year"],
y=ds["count"],
marker=dict(color=colors, line=dict(color="#000000", width=1)),
)
layout = go.Layout(
title="Missions number by year",
xaxis=dict(title="year", tickmode="linear", tick0=min(ds["year"]), dtick=1),
yaxis=dict(
title="Number of Missions",
showgrid=True,
gridwidth=0.5,
gridcolor="#c0c0c0",
tickmode="linear",
tick0=0,
dtick=100,
),
plot_bgcolor="#f9f9f9",
)
fig = go.Figure(data=[bar], layout=layout)
fig.show()
# What is months distribution?
ds = df["month"].value_counts().reset_index()
ds.columns = ["month", "count"]
fig = px.bar(
ds,
x="month",
y="count",
orientation="v",
title="Missions number by month",
width=800,
)
fig.show()
# What is weekday distribution?
ds = df["weekday"].value_counts().reset_index()
ds.columns = ["weekday", "count"]
fig = px.bar(
ds,
x="weekday",
y="count",
orientation="v",
title="Missions number by weekday",
width=800,
)
fig.show()
# How many years ago every company did last Rocket start?
res = list()
for group in df.groupby(["Company Name"]):
res.append(group[1][["Company Name", "year"]].head(1))
data = pd.concat(res)
data = data.sort_values("year")
data["year"] = 2020 - data["year"]
fig = px.bar(
data,
x="year",
y="Company Name",
orientation="h",
title="Years from last start",
width=900,
height=1000,
)
fig.show()
res = list()
for group in df.groupby(["Company Name"]):
res.append(group[1][["Company Name", "year"]].head(1))
data = pd.concat(res)
data = data.sort_values("year")
data["year"] = 2020 - data["year"]
fig = go.Figure(
go.Bar(
x=data["year"],
y=data["Company Name"],
orientation="h",
marker=dict(color=data["year"], coloraxis="coloraxis"),
text=data["year"],
textposition="inside",
hovertemplate="<b>%{y}</b><br>"
+ "Years since last start: %{x}<br>"
+ "<extra></extra>",
)
)
fig.update_layout(
title="Years since 2020",
title_x=0.5,
font=dict(size=12),
width=900,
height=1000,
xaxis=dict(title="Years"),
yaxis=dict(title="Company Name"),
coloraxis=dict(
colorscale="RdYlGn",
colorbar=dict(
title="Years since last start",
titleside="right",
ticks="outside",
ticklen=5,
showticklabels=True,
),
),
plot_bgcolor="rgba(0,0,0,0)",
)
fig.show()
# What is the average price per launch for every year?
money = df[df["Rocket"] > 0]
money = money.groupby(["year"])["Rocket"].mean().reset_index()
fig = px.line(
money,
x="year",
y="Rocket",
title="Average Money Spent by Year",
labels={"year": "Year", "Rocket": "Average Money Spent (USD)"},
width=800,
height=500,
template="simple_white",
)
fig.update_layout(
title={"font": {"size": 24, "family": "Arial"}, "x": 0.5, "xanchor": "center"},
xaxis={
"title": {
"font": {"size": 16, "family": "Arial"},
},
"tickfont": {"size": 14, "family": "Arial"},
"showgrid": False,
"linecolor": "black",
"linewidth": 1,
"mirror": True,
"ticks": "outside",
"ticklen": 10,
},
yaxis={
"title": {
"font": {"size": 16, "family": "Arial"},
},
"tickfont": {"size": 14, "family": "Arial"},
"showgrid": False,
"linecolor": "black",
"linewidth": 1,
"mirror": True,
"ticks": "outside",
"ticklen": 10,
},
plot_bgcolor="white",
legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
margin=dict(l=50, r=50, t=80, b=50),
)
fig.show()
# Which companies are most experienced (years)?
ds = df.groupby(["Company Name"])["year"].nunique().reset_index()
ds.columns = ["company", "count"]
fig = px.bar(
ds,
x="company",
y="count",
title="Most experienced companies (years of launches)",
color_discrete_sequence=["#1f77b4"],
)
fig.show()
# Dynamics for some companies.
data = df.groupby(["Company Name", "year"])["Status Mission"].count().reset_index()
data.columns = ["company", "year", "starts"]
top5 = (
data.groupby(["company"])["starts"]
.sum()
.reset_index()
.sort_values("starts", ascending=False)
.head(5)["company"]
.tolist()
)
data = data[data["company"].isin(top5)]
fig = px.line(
data,
x="year",
y="starts",
title="Dynamic of top 5 companies by number of starts",
color="company",
)
fig.show()
data = data[data["company"].isin(top5)]
fig = px.line(
data,
x="year",
y="starts",
title="Dynamic of top 5 companies by number of starts",
color="company",
width=900,
height=500,
)
fig.update_layout(
xaxis=dict(title="Year", tickangle=-45),
yaxis=dict(title="Number of starts", tickfont=dict(size=12)),
font=dict(size=16),
)
fig.show()
# Number of starts for 2020.
data = df.groupby(["Company Name", "year"])["Status Mission"].count().reset_index()
data.columns = ["company", "year", "starts"]
data = data[data["year"] == 2020]
fig = px.bar(
data, x="company", y="starts", title="Number of starts for 2020", width=800
)
fig.show()
data = df[df["Status Mission"] == "Failure"]
data = data.groupby(["Company Name", "year"])["Status Mission"].count().reset_index()
data.columns = ["company", "year", "starts"]
data = data[data["year"] == 2020]
fig = px.bar(data, x="company", y="starts", title="Failures in 2020", width=600)
fig.show()
#
# 4. USA vs USSR analysis
# Space race was an important part in the cold war between USA and USSR. So lets check the results of it.
cold = df[df["year"] <= 1991]
cold["country"].unique()
cold.loc[cold["country"] == "Kazakhstan", "country"] = "USSR"
cold.loc[cold["country"] == "Russian Federation", "country"] = "USSR"
cold = cold[(cold["country"] == "USSR") | (cold["country"] == "USA")]
# Total number of launches.
ds = cold["country"].value_counts().reset_index()
ds.columns = ["country", "count"]
colors = px.colors.qualitative.Dark24
title_font = dict(size=20, family="Arial")
fig = px.pie(
ds,
names="country",
values="count",
title="Number of Launches by Country",
hole=0.5, # Change hole size
color_discrete_sequence=colors, # Assign custom colors
labels={"country": "Country", "count": "Number of Launches"}, # Rename labels
width=700,
height=500,
)
fig.update_traces(textposition="inside", textinfo="percent+label")
fig.update_layout(title_font=title_font)
fig.show()
# Launches year by year.
ds = cold.groupby(["year", "country"])["alpha3"].count().reset_index()
ds.columns = ["Year", "Country", "Launches"]
colors = ["rgb(53, 83, 255)", "rgb(255, 128, 0)"]
fig = px.bar(
ds,
x="Year",
y="Launches",
color="Country",
title="USA vs USSR: Launches Year by Year",
color_discrete_sequence=colors, # Set custom color palette
labels={
"Year": "Year",
"Launches": "Number of Launches",
"Country": "Country",
}, # Rename labels
height=500,
width=800,
)
fig.update_xaxes(tickangle=45, tickfont=dict(size=10))
fig.update_layout(
legend=dict(
title=None,
orientation="h",
yanchor="top",
y=1.1,
xanchor="left",
x=0.15,
font=dict(size=12),
)
)
fig.show()
# Number of companies year by year.
ds = cold.groupby(["year", "country"])["Company Name"].nunique().reset_index()
ds.columns = ["Year", "Country", "Companies"]
colors = ["rgb(53, 83, 255)", "rgb(255, 128, 0)"]
fig = px.bar(
ds,
x="Year",
y="Companies",
color="Country",
color_discrete_sequence=colors,
title="USA vs USSR: Number of Companies Year by Year",
labels={"Year": "Year", "Companies": "Number of Companies", "Country": "Country"},
height=500,
width=800,
)
fig.update_xaxes(tickangle=45, tickfont=dict(size=10))
fig.update_layout(
legend=dict(
title=None,
orientation="h",
yanchor="top",
y=1.1,
xanchor="left",
x=0.15,
font=dict(size=12),
),
font=dict(size=14),
)
fig.show()
# Number of failures year by year.
ds = cold[cold["Status Mission"] == "Failure"]
ds = ds.groupby(["year", "country"])["alpha3"].count().reset_index()
ds.columns = ["Year", "Country", "Failures"]
colors = ["rgb(53, 83, 255)", "rgb(255, 128, 0)"]
fig = px.bar(
ds,
x="Year",
y="Failures",
color="Country",
title="USA vs USSR: Failures Year by Year",
color_discrete_sequence=colors, # Set custom color palette
labels={
"Year": "Year",
"Failures": "Number of Failures",
"Country": "Country",
}, # Rename labels
height=500,
width=800,
)
fig.update_xaxes(tickangle=45, tickfont=dict(size=10))
fig.update_layout(
legend=dict(
title=None,
orientation="h",
yanchor="top",
y=1.1,
xanchor="left",
x=0.15,
font=dict(size=12),
)
)
fig.show()
#
# 5. Best every year
# Let's see witch countries and companies were the best for every year.
ds = (
df.groupby(["year", "country"])["Status Mission"]
.count()
.reset_index()
.sort_values(["year", "Status Mission"], ascending=False)
)
ds = pd.concat([group[1].head(1) for group in ds.groupby(["year"])])
ds.columns = ["year", "country", "launches"]
fig = px.bar(
ds,
x="year",
y="launches",
color="country",
title="Leaders by launches for every year (countries)",
)
fig.show()
ds = df[df["Status Mission"] == "Success"]
ds = (
ds.groupby(["year", "country"])["Status Mission"]
.count()
.reset_index()
.sort_values(["year", "Status Mission"], ascending=False)
)
ds = pd.concat([group[1].head(1) for group in ds.groupby(["year"])])
ds.columns = ["year", "country", "launches"]
fig = px.bar(
ds,
x="year",
y="launches",
color="country",
title="Leaders by success launches for every year (countries)",
width=800,
)
fig.show()
ds = (
df.groupby(["year", "Company Name"])["Status Mission"]
.count()
.reset_index()
.sort_values(["year", "Status Mission"], ascending=False)
)
ds = pd.concat([group[1].head(1) for group in ds.groupby(["year"])])
ds.columns = ["year", "company", "launches"]
fig = px.bar(
ds,
x="year",
y="launches",
color="company",
title="Leaders by launches for every year (companies)",
width=800,
)
fig.show()
ds = df[df["Status Mission"] == "Success"]
ds = (
ds.groupby(["year", "Company Name"])["Status Mission"]
.count()
.reset_index()
.sort_values(["year", "Status Mission"], ascending=False)
)
ds = pd.concat([group[1].head(1) for group in ds.groupby(["year"])])
ds.columns = ["year", "company", "launches"]
fig = px.bar(
ds,
x="year",
y="launches",
color="company",
title="Leaders by success launches for every year (companies)",
width=800,
)
fig.show()
#
# 6. Time series decomposition
df["month_year"] = df["year"].astype(str) + "-" + df["month"].astype(str)
df["month_year"] = pd.to_datetime(df["month_year"]).dt.to_period("M")
ds = df.groupby(["month_year"])["alpha3"].count().reset_index()
ds.columns = ["month_year", "count"]
ds["month_year"] = ds["month_year"].astype(str)
fig = px.line(
ds, x="month_year", y="count", orientation="v", title="Launches by months"
)
fig.show()
# December 1971 is most active month for Launch attempts.
# Let's do decomposition of time series of number of launches for every month.
dates = ["1957-10-01", "2020-08-02"]
start, end = [datetime.strptime(_, "%Y-%m-%d") for _ in dates]
dd = pd.DataFrame(
list(
OrderedDict(
((start + timedelta(_)).strftime(r"%Y-%m"), None)
for _ in range((end - start).days)
).keys()
),
columns=["date"],
)
dd["date"] = pd.to_datetime(dd["date"])
ds["month_year"] = pd.to_datetime(ds["month_year"])
res = pd.merge(ds, dd, how="outer", left_on="month_year", right_on="date")
res = res.sort_values("date")[["date", "count"]]
res = res.fillna(0).set_index("date")
result = seasonal_decompose(res, model="additive", period=12)
fig = result.plot()
matplotlib.rcParams["figure.figsize"] = [20, 15]
plt.show()
# Let's drop noise from time series
ts = (result.trend + result.seasonal).reset_index()
ts.columns = ["date", "count"]
ts["origin"] = "cleaned"
dres = res.reset_index()
dres["origin"] = "original"
data = pd.concat([dres, ts])
fig = px.line(
data,
x="date",
y="count",
color="origin",
orientation="v",
title="Original and cleaned time series",
width=800,
)
fig.show()
# Simple modeling
model = ARIMA(ds["count"], order=(10, 1, 2))
model_fit = model.fit()
pred = model_fit.predict(dynamic=False)
plt.plot(ds.index, ds["count"], label="Actual")
plt.plot(ds.index, pred, label="Predicted")
plt.title("Actual vs Predicted values")
plt.xlabel("Date")
plt.ylabel("Count")
plt.legend()
plt.show()
preds = model_fit.forecast(16)
preds = preds.tolist()
preds = [int(item) for item in preds]
months = [
"2020-09-01",
"2020-10-01",
"2020-11-01",
"2020-12-01",
"2021-01-01",
"2021-02-01",
"2021-03-01",
"2021-04-01",
"2021-05-01",
"2021-06-01",
"2021-07-01",
"2021-08-01",
"2021-09-01",
"2021-10-01",
"2021-11-01",
"2021-12-01",
]
new_df = pd.DataFrame()
new_df["month_year"] = months
new_df["count"] = preds
data = pd.concat([ds, new_df])
fig = px.line(data, x="month_year", y="count", title="Launches per month prediction")
fig.show()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
# # import data
dataset_raw = pd.read_csv(
"/kaggle/input/higher-education-predictors-of-student-retention/dataset.csv"
)
print(dataset_raw.shape)
dataset_raw.head().T
# # data description
# > Marital status - The marital status of the student. (Categorical)
# > Application mode - The method of application used by the student. (Categorical)
# > Application order - The order in which the student applied. (Numerical)
# > Course - The course taken by the student. (Categorical)
# > Daytime/evening attendance - Whether the student attends classes during the day or in the evening. (Categorical)
# > Previous qualification - The qualification obtained by the student before enrolling in higher education. (Categorical)
# > Nacionality - The nationality of the student. (Categorical)
# > Mother's qualification The qualification of the student's mother. (Categorical)
# > Father's qualification The qualification of the student's father. (Categorical)
# > Mother's occupation The occupation of the student's mother. (Categorical)
# > Father's occupation The occupation of the student's father. (Categorical)
# > Displaced - Whether the student is a displaced person. (Categorical)
# > Educational special needs - Whether the student has any special educational needs. (Categorical)
# > Debtor - Whether the student is a debtor. (Categorical)
# > Tuition fees up to date - Whether the student's tuition fees are up to date. (Categorical)
# > Gender - The gender of the student. (Categorical)
# > Scholarship holder - Whether the student is a scholarship holder. (Categorical)
# > Age at enrollment - The age of the student at the time of enrollment. (Numerical)
# > International - Whether the student is an international student. (Categorical)
# > Curricular units 1st sem (credited) - The number of curricular units credited by the student in the first semester. (Numerical)
# > Curricular units 1st sem (enrolled) - The number of curricular units enrolled by the student in the first semester. (Numerical)
# > Curricular units 1st sem (evaluations) - The number of curricular units evaluated by the student in the first semester. (Numerical)
# > Curricular units 1st sem (approved) - The number of curricular units approved by the student in the first semester. (Numerical)
dataset_raw.info()
dataset_raw.describe(include="all").T
dataset = dataset_raw.copy()
dataset.shape
dataset["Target"].value_counts()
# # target and features
target = dataset["Target"]
features = dataset.drop(["Target"], axis=1)
target.shape, features.shape
# Standardization
from sklearn.preprocessing import StandardScaler
features_std = StandardScaler().fit_transform(features)
features_std = pd.DataFrame(features_std, columns=features.columns)
features_std.describe()
from sklearn.model_selection import train_test_split, KFold
kf = KFold(n_splits=30, shuffle=True, random_state=2304)
for tr_idx, te_idx in kf.split(features_std):
X_train, X_test = features_std.iloc[tr_idx], features_std.iloc[te_idx]
y_train, y_test = target.iloc[tr_idx], target.iloc[te_idx]
X_train.shape, X_test.shape, y_train.shape, y_test.shape
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y_train = le.fit_transform(y_train)
print(y_train)
y_test = le.fit_transform(y_test)
y_test
# 0: Dropout, 1: Enrolled, 2: Graduate
# # modeling
from xgboost import XGBClassifier
model = XGBClassifier(
n_estimators=100, random_state=2304, eval_metric="mlogloss"
) # use_label_encoder=False
model.fit(X_train, y_train)
print(model.score(X_train, y_train))
print(model.score(X_test, y_test))
pred_proba = model.predict_proba(X_test)[:, 1]
pred_proba[:10]
pred_label = model.predict(X_test)
pred_label[:100]
y_test[:100]
classes = np.unique(y_train)
classes
from yellowbrick.classifier import confusion_matrix
plt.figure(figsize=(3, 3))
confusion_matrix(model, X_train, y_train, X_test, y_test, classes=classes)
plt.show()
import seaborn as sns
XGBClassifier_importances_values = model.feature_importances_
XGBClassifier_importances = pd.Series(
XGBClassifier_importances_values, index=X_train.columns
)
XGBClassifier_top34 = XGBClassifier_importances.sort_values(ascending=False)[:34]
plt.figure(figsize=(8, 6))
plt.title("Feature importances Top 34")
sns.barplot(x=XGBClassifier_top34, y=XGBClassifier_top34.index)
plt.show()
XGBClassifier_top34[:10]
dataset_important = dataset[
[
"Target",
"Curricular units 2nd sem (approved)",
"Tuition fees up to date",
"Curricular units 1st sem (enrolled)",
"Curricular units 2nd sem (enrolled)",
"Curricular units 1st sem (approved)",
"Scholarship holder",
"Curricular units 1st sem (evaluations)",
"Debtor",
"Curricular units 2nd sem (evaluations)",
]
]
dataset_important.head()
g = sns.pairplot(dataset_important, hue="Target")
plt.show()
# # update to increase accuracy
# ## Feature Selection - Low Correlated Columns
corr_rate_threshold = 0.85
cor_matrix = features.corr().abs()
cor_matrix
# remove mirror and diagonal values
upper_tri = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(bool))
upper_tri
# Drop columns with higher correlation than rate_corr_threshold
to_drop = [
column
for column in upper_tri.columns
if any(upper_tri[column] >= corr_rate_threshold)
]
print(to_drop)
selected_features = features.drop(features[to_drop], axis=1)
selected_features.head()
features = selected_features.copy()
kf = KFold(n_splits=30, shuffle=True, random_state=2304)
for tr_idx, te_idx in kf.split(features):
X_train, X_test = features.iloc[tr_idx], features.iloc[te_idx]
y_train, y_test = target.iloc[tr_idx], target.iloc[te_idx]
X_train.shape, X_test.shape, y_train.shape, y_test.shape
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y_train = le.fit_transform(y_train)
print(y_train)
y_test = le.fit_transform(y_test)
y_test
# 0: Dropout, 1: Enrolled, 2: Graduate
model.fit(X_train, y_train)
print(model.score(X_train, y_train))
print(model.score(X_test, y_test))
pred_proba = model.predict_proba(X_test)[:, 1]
pred_proba[:10]
pred_label = model.predict(X_test)
pred_label[:100]
from yellowbrick.classifier import confusion_matrix
plt.figure(figsize=(3, 3))
confusion_matrix(model, X_train, y_train, X_test, y_test, classes=classes)
plt.show()
# Standardization
from sklearn.preprocessing import StandardScaler
features_std = StandardScaler().fit_transform(features)
features_std = pd.DataFrame(features_std, columns=features.columns)
features_std.describe()
# ## Feature Extraction - through PCA
from sklearn.decomposition import PCA
# PCA holds Variance 95%
pca = PCA(n_components=0.95, whiten=True)
pca
features_pca = pca.fit_transform(features_std)
features_pca.shape
from sklearn.model_selection import train_test_split, KFold
kf = KFold(n_splits=30, shuffle=True, random_state=2304)
for tr_idx, te_idx in kf.split(features_pca):
X_train, X_test = features_pca[tr_idx], features_pca[te_idx]
y_train, y_test = target.iloc[tr_idx], target.iloc[te_idx]
X_train.shape, X_test.shape, y_train.shape, y_test.shape
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y_train = le.fit_transform(y_train)
print(y_train)
y_test = le.fit_transform(y_test)
y_test
from xgboost import XGBClassifier
model = XGBClassifier(
n_estimators=100, random_state=2304, eval_metric="mlogloss"
) # use_label_encoder=False
model.fit(X_train, y_train)
print(model.score(X_train, y_train))
print(model.score(X_test, y_test))
pred_proba = model.predict_proba(X_test)[:, 1]
pred_proba[:10]
from yellowbrick.classifier import confusion_matrix
plt.figure(figsize=(3, 3))
confusion_matrix(model, X_train, y_train, X_test, y_test, classes=classes)
plt.show()
# ## Feature Selection - by Variance criterion
# Select columns with variance higher than 0.5
from sklearn.feature_selection import VarianceThreshold
thresholder = VarianceThreshold(threshold=0.5)
features_higher_variance = thresholder.fit_transform(features_std)
features_higher_variance
from sklearn.model_selection import train_test_split, KFold
kf = KFold(n_splits=30, shuffle=True, random_state=2304)
for tr_idx, te_idx in kf.split(features_higher_variance):
X_train, X_test = features_higher_variance[tr_idx], features_higher_variance[te_idx]
y_train, y_test = target.iloc[tr_idx], target.iloc[te_idx]
X_train.shape, X_test.shape, y_train.shape, y_test.shape
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y_train = le.fit_transform(y_train)
print(y_train)
y_test = le.fit_transform(y_test)
y_test
from xgboost import XGBClassifier
model = XGBClassifier(
n_estimators=100, random_state=2304, eval_metric="mlogloss"
) # use_label_encoder=False
model.fit(X_train, y_train)
print(model.score(X_train, y_train))
print(model.score(X_test, y_test))
pred_proba = model.predict_proba(X_test)[:, 1]
pred_proba[:10]
from yellowbrick.classifier import confusion_matrix
plt.figure(figsize=(3, 3))
confusion_matrix(model, X_train, y_train, X_test, y_test, classes=classes)
plt.show()
|
# # Advanced topics in computer science
# #### Assignment to analyse the communication in Oman.
# #### The dataset from https://data.gov.om/ .
# #### The puposes of this assigment are learning the way to analyse data using data science.
# It looks like you have a pandas DataFrame with four columns: "region", "Service or indicator", "Date", and "Value". Each row in the DataFrame represents a specific observation of a service or indicator in a particular region and year.
# The "region" column contains the name of the region where the observation was made, such as "Muscat". The "Service or indicator" column contains the name of the service or indicator being measured, such as "Analogue Fixed telephone lines". The "Date" column contains the year in which the observation was made, such as 2012. Finally, the "Value" column contains the actual value of the service or indicator in the given region and year, such as 143845.
# With this DataFrame, you can perform a wide range of data analysis tasks, such as filtering, aggregating, and visualizing the data to gain insights into the trends and patterns of the services and indicators in Oman.
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# ## Reading the file
df = pd.read_csv("/kaggle/input/communicationinoman/Main.csv")
# The df.head() method is used to display the first few rows of a pandas DataFrame named df. By default, it displays the first 5 rows of the DataFrame, but this can be adjusted by passing a different integer value as a parameter.
df.head()
# The df.info() method is used to display information about the structure and contents of a pandas DataFrame named df.
# It provides a summary of the DataFrame, including the number of rows and columns, the data type of each column, and the number of non-null values in each column. It also displays the memory usage of the DataFrame.
# This method is useful for quickly checking the data types and missing values in the DataFrame, and can help identify any potential issues or data quality problems.
# ## Data cleansing and improvement(Make the information more useful)
df.info()
df1 = df.copy()
df.tail()
df1 = df.copy()
df.info()
# This code defines a function named change_to_numeric that takes two parameters: a string x, and a pandas DataFrame df1.
# The purpose of the function is to convert a categorical variable in the DataFrame (x) into a set of numeric binary variables, and then add these new variables to the DataFrame.
# The pd.get_dummies function is used to convert the categorical variable into a set of binary variables. Each possible value of the categorical variable is transformed into a new binary variable, which takes the value 1 if the original variable has that value, and 0 otherwise.
# The pd.concat function is used to concatenate the new binary variables to the original DataFrame, creating a new DataFrame with the binary variables included.
# Finally, the original categorical variable is dropped from the DataFrame using the drop method of pandas.
def change_to_numeric(x, df1):
temp = pd.get_dummies(df1[x])
df1 = pd.concat([df1, temp], axis=1)
df1.drop([x], axis=1, inplace=True)
return df1
df2 = change_to_numeric("Service or indicator", df1)
df2.head()
df2.info()
df2["Calls Fixed Telephone"].unique()
df3 = df2.copy()
def change_to_numeric(x, df3):
temp = pd.get_dummies(df3[x])
df3 = pd.concat([df3, temp], axis=1)
df3.drop([x], axis=1, inplace=True)
return df3
df4 = change_to_numeric("region", df3)
df4.head()
df4.info()
df4["Muscat"].unique()
df4.isna().sum()
df4.duplicated().sum()
df4["Date"].unique()
# This code snippet assumes that there is a pandas DataFrame named df4, and it retrieves the unique values of the "Month" column in the DataFrame using the unique() method.
# The unique() method is used to retrieve the unique values of a column in a pandas DataFrame. It returns a numpy array containing the unique values of the column, in the order in which they appear in the DataFrame.
# In this case, the code assumes that the "Month" column contains a set of values representing months (e.g., "January", "February", etc.), and it retrieves the unique values of this column.
# The resulting numpy array contains a list of the unique months that appear in the DataFrame, which can be used for further analysis or visualization.
df4["Muscat"].unique()
df4.rename(columns={"Date": "Year"}, inplace=True)
df4.head()
df.isna().sum()
df4.rename(columns={"region": "Region"}, inplace=True)
df4.head()
# This code snippet assumes that there is a pandas DataFrame named df4, and it generates a styled HTML table of descriptive statistics for the DataFrame using the describe() method, along with a background gradient applied using the style.background_gradient() method.
# The describe() method is used to generate a set of summary statistics for the DataFrame, including count, mean, standard deviation, minimum, maximum, and quartile information.
# The resulting DataFrame is transposed using the .T attribute, which switches the rows and columns so that the statistics are displayed as columns and the variables are displayed as rows.
# The .style.background_gradient(cmap = "magma") method is then used to apply a background gradient to the table, with the "magma" colormap specified as the color scheme. This results in a visually appealing table where the background color of each cell is shaded according to its value.
# The resulting styled HTML table can be displayed in a Jupyter notebook or exported to an HTML file for further use
df4.describe().T.style.background_gradient(cmap="magma")
# ## Checking if there's any duplicated value
# To check for duplicated values in a pandas dataframe, you can use the duplicated() method. This method returns a boolean series indicating whether each row is a duplicate of a previous row.
df4.duplicated().sum()
# ## checking Values for year columns
# This code assumes that there is a pandas DataFrame named df4, and it retrieves the unique values of the "Year" column in the DataFrame using the unique() method.
# The unique() method is used to retrieve the unique values of a column in a pandas DataFrame. It returns a numpy array containing the unique values of the column, in the order in which they appear in the DataFrame.
# In this case, the code assumes that the "Year" column contains a set of values representing years (e.g., 2010, 2011, etc.), and it retrieves the unique values of this column.
# The resulting numpy array contains a list of the unique years that appear in the DataFrame, which can be used for further analysis or visualization.
df4["Year"].unique()
temp = df[df["Value"].isna()]
temp
df.head()
df.isna().sum()
# This code snippet assumes that there is a pandas DataFrame named df, and it fills the missing values in the "region" column with the value 1 using the fillna() method.
# The fillna() method is used to fill missing (i.e., NaN) values in a pandas DataFrame or Series with a specified value. In this case, the fillna() method is called on the "region" column of the DataFrame df, and the missing values in this column are filled with the value 1.
# The inplace = True argument specifies that the changes should be made to the DataFrame in place (i.e., modify the existing DataFrame rather than creating a new one).
# The resulting DataFrame df will have the missing values in the "region" column replaced with the value 1.
df1 = df[df["region"] == "Year"].copy()
df1["region"].unique()
df["region"].fillna(1, inplace=True)
df
df.head()
am = df["Value"].sum()
am
df.head(5)
# ## Finding the correlation between data:
df4.corr()
df.head()
# ## Visulization
# ## presenting the releation in bar chart
import matplotlib.pyplot as plt
import seaborn as sns
# This code snippet creates a histogram using the seaborn library to visualize the distribution of values in the "Date" column of the pandas DataFrame df.
# The plt.figure(figsize=(20,4)) command creates a figure with a size of 20 inches in width and 4 inches in height.
# The plt.subplot(1,2,1) command sets up a grid of subplots, with one row and two columns, and selects the first subplot.
# The plt.title('Analogue Fixed telephone lines') command sets the title of the subplot.
# The sns.histplot(df.Date) command creates a histogram of the values in the "Date" column of the DataFrame df using seaborn. The resulting plot shows the frequency of each value in the "Date" column.
# The plt.show() command displays the plot.
# Note that the second subplot is not specified in the code, so it will not be displayed. If you want to display two subplots side-by-side, you will need to add additional commands to create the second subplot and plot data in it.
plt.figure(figsize=(20, 4))
plt.subplot(1, 2, 1)
plt.title("Analogue Fixed telephone lines")
sns.histplot(df.Date)
plt.show()
x = df4.corr()["Fixed Post Paid"]
x
# This code snippet assumes that x is a pandas Series or DataFrame, and it creates a horizontal bar chart to visualize the values in x.
# The abs() function is used to take the absolute value of the values in x, which ensures that all values are positive.
# The sort_values(ascending=False) method is used to sort the values in x in descending order, so that the largest values appear at the top of the chart.
# The plot(kind = 'bar') method is used to create a horizontal bar chart of the values in x, with the bars arranged horizontally and the length of each bar proportional to the value it represents.
# The resulting plot shows the values in x sorted in descending order, with the largest values appearing at the top of the chart.
x = abs(x)
x = x.sort_values(ascending=False)
x.plot(kind="bar")
df1 = df.copy()
df1 = df1.drop(["Value"], axis=1)
df1.head()
categ = df.select_dtypes(exclude="number")
categ.head()
df1 = df.copy()
df1 = df1.drop(["region", "Service or indicator", "Date"], axis=1)
df1.head
# This code snippet performs some data cleaning on the 'Date' column of a pandas DataFrame named df.
# The first two lines use the replace method to replace specific values in the 'Date' column with new values. Specifically, it replaces any occurrence of the string '200,000' with the string '200,000 - 200,000', and any occurrence of the string '0' with the string '0 - 0'.
# The purpose of these replacements is not clear without more context, but it appears to be a way of standardizing the format of the values in the 'Date' column.
# The third line creates a list of unique values in the 'Date' column using the unique method of the pandas DataFrame. The resulting list is stored in a variable named f.
# This list of unique values can be useful for gaining insights into the distribution of values in the 'Date' column. For example, it can be used to identify any missing values, duplicates, or outliers in the data. It can also be used to visualize the distribution of values using a histogram, bar chart, or other graphical representation.
df["Date"] = df["Date"].replace("200,000", "200,000 - 200,000")
df["Date"] = df["Date"].replace("0", "0 - 0")
f = list(df["Date"].unique())
f
# ### Univariate Analysis
# Univariate analysis is a statistical analysis technique that involves analyzing a single variable at a time. In other words, it is the analysis of a single variable in isolation, without considering its relationship with other variables.
plt.figure(figsize=(15, 5))
plt.subplot(1, 3, 1)
Date = df["Date"].value_counts().head(15).reset_index()
Date.columns = ["Date", "count"]
sns.barplot(data=Date, x="Date", y="count")
plt.xticks(rotation=90)
plt.subplot(1, 3, 2)
region = df["region"].value_counts().head(15).reset_index()
region.columns = ["region", "count"]
sns.barplot(data=region, x="region", y="count")
plt.xticks(rotation=90)
plt.subplot(1, 3, 3)
engine = df["Service or indicator"].value_counts().head(15).reset_index()
engine.columns = ["Service or indicator", "count"]
sns.barplot(data=engine, x="Service or indicator", y="count")
plt.xticks(rotation=90)
plt.show()
# ## Calculating the mean , median and mode
cft = df4[df4["Calls Fixed Telephone"] == 1]
cft.head()
year1 = []
mean1 = []
median1 = []
for i in range(2012, 2023):
year1.append(i)
Mean = cft["Value"].mean()
Median = cft["Value"].median()
Mod = cft["Value"].mode()[0]
mean1.append(Mean)
median1.append(Median)
print(" year\t", year1, "\n", "Mean\t", mean1, "\n", "Median\t", median1)
# ## Showing the median in bar chart
x = year1
y = median1
plt.bar(x, y)
# ## Machine learning
# This code snippet imports the linear_model module from the sklearn (Scikit-learn) library. The linear_model module provides a variety of linear regression models, such as LinearRegression, Lasso, Ridge, and ElasticNet, among others.
from sklearn import linear_model
df4.tail()
# This code snippet uses the scikit-learn library to perform linear regression on the DataFrame df4.
# The linear_model.LinearRegression() command creates a new instance of the LinearRegression class, which is used to perform linear regression.
# The allfeatures = df4.columns.tolist() command creates a list of all the column names in the DataFrame df4.
# The allfeatures.remove("Value") command removes the "Value" column name from the list of feature column names, since this is the target variable that we are trying to predict.
# The X = df4[allfeatures] command creates a new DataFrame X that contains only the feature columns, with the "Value" column removed.
# The y = df4["Value"] command creates a new Series y that contains the target variable column, which is the "Value" column.
# The command.fit(X,y) command fits a linear regression model to the feature and target variables in X and y, respectively.
# The print("Done") command simply prints the string "Done" to indicate that the linear regression has been completed.
from sklearn import linear_model
import pandas as pd
# Convert categorical columns to numerical using one-hot encoding
df4 = pd.get_dummies(df4)
# Create linear regression model
command = linear_model.LinearRegression()
# Define features and target
allfeatures = df4.columns.tolist()
allfeatures.remove("Value")
X = df4[allfeatures]
y = df4["Value"]
# Fit the model
command.fit(X, y)
print("Done")
print(allfeatures)
print(len(allfeatures))
# This is a Python code snippet that demonstrates how to fit a linear regression model using the LinearRegression class from the sklearn (Scikit-learn) library.
# The first line imports the linear_model module from sklearn.
# The second line creates an instance of the LinearRegression class and assigns it to the variable reg.
# The third line calls the fit method of the LinearRegression object reg. The fit method takes two arguments:
# A list of lists [[0, 0], [1, 1], [2, 2]] that represents the independent variables (also called features or predictors) of the model. In this case, there are two independent variables, each with three observations.
# A list [0, 1, 2] that represents the dependent variable (also called the response variable or target) of the model. In this case, there is only one dependent variable, which has three observations.
# The fit method fits the linear regression model to the data and learns the coefficients of the regression line.
# The fourth line prints the coefficients of the regression line, which are stored in the coef_ attribute of the LinearRegression object. In this case, the output will be [0.5 0.5], which means that the coefficients of the regression line are 0.5 for both independent variables.
# Note that this is a very simple example with only two independent variables and three observations, and is only intended to demonstrate the basic usage of the LinearRegression class. In practice, you would typically work with much larger datasets and more complex models.
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
print(reg.coef_)
# This is a Python code snippet that creates a heatmap visualization of the correlation matrix of a DataFrame df4.
# The plt.subplots(figsize=(28, 24)) line creates a figure with a size of 28 inches in width and 24 inches in height.
# The colormap variable defines a diverging color map that goes from 220 to 10, which will be used for the heatmap.
# The sns.heatmap() function is then called with the following arguments:
# df4.corr(): this selects the correlation matrix of the DataFrame df4.
# cmap=colormap: this sets the color map for the heatmap.
# square=True: this sets the aspect ratio of the heatmap to be square.
# cbar_kws={'shrink':0.9 }: this sets the size of the color bar to be 90% of its default size.
# ax=ax: this specifies the Axes object where the heatmap will be drawn.
# annot=True: this enables the annotation of the values in the heatmap.
# linewidths=0.1: this sets the width of the lines between the cells of the heatmap.
# vmax=1.0: this sets the maximum value for the color scale of the heatmap to be 1.0.
# linecolor='white': this sets the color of the lines between the cells to be white.
# annot_kws={'fontsize':12}: this sets the font size of the annotations to be 12.
# Finally, the plt.title() function sets the title of the plot to "Pearson Correlation of Features", with a font size of 15, and a vertical position of 1.05 (slightly above the top of the plot).
#
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
_, ax = plt.subplots(figsize=(28, 24))
colormap = sns.diverging_palette(220, 10, as_cmap=True)
_ = sns.heatmap(
df4.corr(),
cmap=colormap,
square=True,
cbar_kws={"shrink": 0.9},
ax=ax,
annot=True,
linewidths=0.1,
vmax=1.0,
linecolor="white",
annot_kws={"fontsize": 12},
)
plt.title("Pearson Correlation of Features", y=1.05, size=15)
|
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, train_test_split as tts
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv("/kaggle/input/iris/Iris.csv")
df.drop("Id", axis=1, inplace=True)
df.head()
df.Species.value_counts()
df.replace({"Iris-setosa": 0, "Iris-versicolor": 1, "Iris-virginica": 2}, inplace=True)
df.head()
x, y = df.iloc[:, :-1], df.iloc[:, -1]
x_train, x_test, y_train, y_test = tts(x, y, test_size=0.2)
from sklearn.base import BaseEstimator
class ClfSwitcher(BaseEstimator):
def __init__(self, estimator=LogisticRegression()):
"""
A Custom BaseEstimator that can switch between classifiers.
:param estimator: sklearn object - The classifier
"""
self.estimator = estimator
def fit(self, X, y=None, **kwargs):
self.estimator.fit(X, y)
return self
def predict(self, X, y=None):
return self.estimator.predict(X)
def predict_proba(self, X):
return self.estimator.predict_proba(X)
def score(self, X, y):
return self.estimator.score(X, y)
pipeline = Pipeline([("clf", ClfSwitcher())])
parameters = [
{
"clf__estimator": [LogisticRegression()],
"clf__estimator__max_iter": [50, 80, 100],
},
{
"clf__estimator": [RandomForestClassifier()],
"clf__estimator__n_estimators": [100, 250, 500],
"clf__estimator__max_depth": [3, 5, 7],
"clf__estimator__min_samples_split": [2, 3],
},
]
gscv = GridSearchCV(pipeline, parameters, cv=3, n_jobs=-1, verbose=2)
gscv.fit(x_train, y_train)
gscv.best_params_
gscv.best_estimator_
|
# Étude comparative de différentes métaheuristiques d'optimisation
# Présentation du probléme :
# L’objectif de ce travail est d’optimiser (minimiser) le coût total, (y compris le coût du matériau, du formage et du soudage) et essayer de comparer les différentes métaheuristiques d’optimisation. Il s'agit d'un problème d'optimisation réel non-convexe, borné et avec contrainte où on a quatre variables de conception (x1, x2, x3, x4)
# Z1 : l’épaisseur de la coque
# Z2 : l’épaisseur de la tête
# X3 : le rayon intérieur
# X4 : la longueur du récipient excluant la tête
# Où : 𝑧1 = 0.625𝑥1, 𝑧2 = 0.625𝑥2,
# Les variables ont les bornes suivantes : 𝑥1 ≤ 99, 𝑥2 ≥ 1, 𝑥3 ≤ 200, 𝑥4 ≥ 10
# Le problème doit satisfaire les contraintes suivantes :
# G1(x) = 0,00954x3 ≤ z2
# G2(x) = 0,0193x3 ≤ z1
# G3(x) = x4 ≤ 240
# G4(x) = - πX32x4 – (4/3) * πx33 ≤ -1296000
# On peut le formuler mathématiquement comme suit :
# 𝑓(𝒙) = 1.7781 * Z2 X32 + 0.6224* Z1* X3 * X4 + 3.1661* Z12 * X4 + 19.84* Z12 *X3
# Résolution du probléme :
# Reformulation du problème pour simplifier les contraintes, les bornes et la complexité de la fonction objective :
# Fonction_Objective:
def objective(x):
if (0 <= x[0] < 99.0 and 1 <= x[1] <= 100) and (
0 <= x[2] <= 200.0 and 10.0 <= x[3] <= 300.0
):
return (
(1.7781 * 0.625 * x[1] * (x[2] ** 2))
+ (
0.6224 * 0.625 * x[0] * x[2] * x[3]
+ 3.1661 * x[3] * (0.625 * x[0]) ** 2
)
+ (7.75 * x[2] * (x[0]) ** 2)
)
# Remarque :
# Puisque le problème est réel, Il ne doit pas nous fournir une valeur d’infini.
# c’est pourquoi on accepte que les valeurs positives et on se limite par exemple en 300 comme valeur maximale.
# Les contraintes :
def constraint1(x):
return 0.00954 * x[2] - 0, 625 * x[1]
def constraint2(x):
return 0.0193 * x[2] - 0, 625 * x[0]
def constraint3(x):
return x[3] - 240
def constraint4(x):
return -3.14 * x[3] * (x[2] ** 2) - (4 / 3) * 3.14 * (x[2] ** 3) + 1296000
# Recherche aleatoire:
import numpy as np
def Random_search(
objective, constraint1, constraint2, constraint3, constraint4, max_iter=50
):
# créer des valeurs aléatoires dans les limites
x1 = np.random.uniform(size=1, low=0, high=99)[0]
x2 = np.random.uniform(size=1, low=1, high=100)[0]
x3 = np.random.uniform(size=1, low=0, high=200)[0]
x4 = np.random.uniform(size=1, low=10, high=300)[0]
solution = []
solution.append(x1)
solution.append(x2)
solution.append(x3)
solution.append(x4)
for i in range(max_iter):
solution_objective = objective(solution)
solution_constraint1 = constraint1(solution)
solution_constraint2 = constraint2(solution)
solution_constraint3 = constraint3(solution)
solution_constraint4 = constraint4(solution)
x1 = np.random.uniform(size=1, low=0, high=99)[0]
x2 = np.random.uniform(size=1, low=1, high=100)[0]
x3 = np.random.uniform(size=1, low=0, high=200)[0]
x4 = np.random.uniform(size=1, low=10, high=300)[0]
new_solution = [x1, x2, x3, x4]
solution_candidate = objective(new_solution)
solution_candidate1 = constraint1(new_solution)
solution_candidate2 = constraint2(new_solution)
solution_candidate3 = constraint3(new_solution)
solution_candidate4 = constraint4(new_solution)
if (
solution_objective > solution_candidate
and solution_constraint1 > solution_candidate1
and solution_constraint2 > solution_candidate2
and solution_constraint3 > solution_candidate3
and solution_constraint4 > solution_candidate4
):
best_solution = new_solution
else:
best_solution = solution
best_objective = objective(best_solution)
best_constraint1 = constraint1(best_solution)[0]
best_constraint2 = constraint2(best_solution)[0]
best_constraint3 = constraint3(best_solution)
best_constraint4 = constraint4(best_solution)
print("La meillleure solution est: ")
print(" f(%s) = %.5f" % (best_solution, best_objective))
print(" g1(%s) = %.5g1" % (best_solution, best_constraint1))
print(" g2(%s) = %.5g2" % (best_solution, best_constraint2))
print(" g3(%s) = %.5g3" % (best_solution, best_constraint3))
print(" g4(%s) = %.5g4" % (best_solution, best_constraint4))
return best_objective, best_solution
n_iterations = 50
best, sol = Random_search(objective, constraint1, constraint2, constraint3, constraint4)
# Hill-Climbing:
from numpy.random import randn
from numpy.random import seed
import numpy as np
from numpy import asarray
def hillclimbing(
objective,
bounds,
n_iterations,
step_size,
constraint1,
constraint2,
constraint3,
constraint4,
):
# Generer les points initiales :
x1 = np.random.uniform(size=1, low=0, high=99)[0]
print("val x1", x1)
x2 = np.random.uniform(size=1, low=1, high=100)[0]
print("val x2", x2)
x3 = np.random.uniform(size=1, low=0, high=200)[0]
print("val x3", x3)
x4 = np.random.uniform(size=1, low=10, high=300)[0]
print("val x4", x4)
solution = []
solution.append(x1)
solution.append(x2)
solution.append(x3)
solution.append(x4)
# Evaluer les points:
k = list()
solution_Objective = objective(solution)
k.append(solution_Objective)
solution_1 = constraint1(solution)[0]
solution_2 = constraint2(solution)[0]
solution_3 = constraint3(solution)
solution_4 = constraint4(solution)
# run hill climbing:
for i in range(n_iterations):
# Avancer par pas:
candidate = solution + randn(len(bounds)) * step_size
# evaluer les points candidates:
candidte_objective = objective(candidate)
candidate_1 = constraint1(candidate)[0]
candidate_2 = constraint2(candidate)[0]
candidate_3 = constraint3(candidate)
candidate_4 = constraint4(candidate)
# vérifier si nous devons garder le nouveau point:
if candidte_objective <= solution_Objective:
# stocker le nouveau point
solution, solution_Objective = candidate, candidte_objective
print(">%d f(%s) = %.5f" % (i, solution, solution_Objective))
k.append(solution_Objective)
if candidate_1 <= solution_1:
# stocker le nouveau point
solution, solution_1 = candidate, candidate_1
print(" g1(%s) = %.5g1" % (solution, solution_1))
if candidate_2 <= solution_2:
# stocker le nouveau point
solution, solution_2 = candidate, candidate_2
print(" g2(%s) = %.5g2" % (solution, solution_2))
if candidate_3 <= solution_3:
# stocker le nouveau point
solution, solution_3 = candidate, candidate_3
print(" g3(%s) = %.5g3" % (solution, solution_3))
if candidate_4 <= solution_4:
# stocker le nouveau point
solution, solution_4 = candidate, candidate_4
print(" g4(%s) = %.5g4" % (solution, solution_4))
return [
solution,
solution_Objective,
k,
solution_1,
solution_2,
solution_3,
solution_4,
]
# semer le générateur de nombres pseudo-aléatoires
seed(5)
bounds = asarray([[0.0, 99.0], [1.0, 100.0], [0.0, 200.0], [10.0, 300.0]])
n_iterations = 50
step_size = 0.1
best, sol, k, s1, s2, s3, s4 = hillclimbing(
objective,
bounds,
n_iterations,
step_size,
constraint1,
constraint2,
constraint3,
constraint4,
)
print("La meilleure solution est :")
print("f(%s) = %f" % (best, sol))
print("g1(%s) = %g1" % (best, s1))
print("g2(%s) = %g2" % (best, s2))
print("g3(%s) = %g3" % (best, s3))
print("g4(%s) = %g4" % (best, s4))
from matplotlib import pyplot
pyplot.plot(k, ".-")
pyplot.xlabel("Amelioration")
pyplot.ylabel("f(x)")
pyplot.show()
# Recuit Simulé
from numpy.random import randn
import numpy as np
from math import exp
from numpy import asarray
from numpy import exp
from numpy.random import randn
from numpy.random import rand
from numpy.random import seed
def simulated_annealing(
objective,
bounds,
n_iterations,
step_size,
temp,
constraint1,
constraint2,
constraint3,
constraint4,
):
# generer les points initiales:
x1 = np.random.uniform(size=1, low=0, high=99)[0]
x2 = np.random.uniform(size=1, low=1, high=100)[0]
x3 = np.random.uniform(size=1, low=0, high=200)[0]
x4 = np.random.uniform(size=1, low=10, high=300)[0]
solution = []
solution.append(x1)
solution.append(x2)
solution.append(x3)
solution.append(x4)
# evaluer:
solution_eval = objective(solution)
solution_1 = constraint1(solution)[0]
solution_2 = constraint2(solution)[0]
solution_3 = constraint3(solution)
solution_4 = constraint4(solution)
curr, curr_eval, curr_1, curr_2, curr_3, curr_4 = (
solution,
solution_eval,
solution_1,
solution_2,
solution_3,
solution_4,
)
scores = list()
for i in range(n_iterations):
# Avancer par pas:
candidate = solution + randn(len(bounds)) * step_size
# evaluer les points candidates:
candidate_eval = objective(candidate)
candidate_1 = constraint1(candidate)[0]
candidate_2 = constraint2(candidate)[0]
candidate_3 = constraint3(candidate)
candidate_4 = constraint4(candidate)
if candidate_eval < solution_eval:
best, best_eval = candidate, candidate_eval
scores.append(solution_eval)
print(">%d f(%s) = %.5f" % (i, best, solution_eval))
if candidate_1 < solution_1:
best, best_1 = candidate, candidate_1
scores.append(solution_1)
print("g1(%s) = %.5g1" % (best, solution_1))
if candidate_2 < solution_2:
best, best_2 = candidate, candidate_2
scores.append(solution_2)
print("g2(%s) = %.5g2" % (best, solution_2))
if candidate_3 < solution_3:
best, best_3 = candidate, candidate_3
scores.append(solution_3)
print("g3(%s) = %.5g3" % (best, solution_3))
if candidate_4 < solution_4:
best, best_4 = candidate, candidate_4
scores.append(solution_4)
print("g4(%s) = %.5g4" % (best, solution_4))
# difference entre point current et eval:
diff = candidate_eval - curr_eval
# calcul du temperature:
t = (temp / float(i + 1),)
metropolis = exp(-diff / t)
if diff < 0 or rand() < metropolis:
curr, curr_eval = candidate, candidate_eval
return [
solution,
solution_eval,
solution_1,
solution_2,
solution_3,
solution_4,
scores,
]
seed(5)
bounds = asarray([[0.0, 99.0], [1.0, 100.0], [0.0, 200.0], [10.0, 300.0]])
n_iterations = 50
step_size = 0.1
temp = 10
# perform the simulated annealing search
best, score, n_iterations, s1, s2, s3, s4 = simulated_annealing(
objective,
bounds,
n_iterations,
step_size,
temp,
constraint1,
constraint2,
constraint3,
constraint4,
)
print("La meilleure solution est :")
print("f(%s) = %f" % (best, score))
print("g1(%s) = %g1" % (best, s1))
print("g2(%s) = %g2" % (best, s2))
print("g3(%s) = %g3" % (best, s3))
print("g4(%s) = %g4" % (best, s4[0]))
|
import spacy
import re
import nltk
import string
import sklearn
import neattext as nt
import neattext.functions as nfx
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
nltk.download("stopwords")
from nltk.corpus import stopwords
from collections import Counter
from tensorflow.keras import layers
from tensorflow.keras.layers import Input, Dense
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import classification_report
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.initializers import TruncatedNormal
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import CategoricalAccuracy
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.sequence import pad_sequences
from transformers import BertTokenizer, TFBertModel
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
bert = TFBertModel.from_pretrained("bert-base-uncased")
data = pd.read_csv(
"/kaggle/input/cyberbullying-classification/cyberbullying_tweets.csv"
)
data.head()
data.groupby("cyberbullying_type").describe()
data["cyberbullying_type"].value_counts()
data["cyberbullying_type"].nunique()
data["cyberbullying_type"].count()
data.info()
data.shape
data.tail()
data.isnull().sum()
data.drop_duplicates(inplace=True)
data.shape
# shuffle and split data
data_train, data_test = train_test_split(
data, test_size=0.3, random_state=42, shuffle=True, stratify=data.cyberbullying_type
)
# preprocessing
data_train["tweet_text"] = data_train["tweet_text"].apply(nfx.remove_hashtags)
data_train.head()
data_train["tweet_text"] = data_train["tweet_text"].apply(nfx.remove_userhandles)
data_train.tail()
data_train["tweet_text"] = data_train["tweet_text"].apply(nfx.remove_multiple_spaces)
data_train.head()
data_train.tail()
data_train["tweet_text"] = data_train["tweet_text"].apply(nfx.remove_stopwords)
data_train.head()
data_train["tweet_text"] = data_train["tweet_text"].apply(nfx.remove_urls)
data_train.head()
data_train["cyberbullying_type"].unique()
data_test["cyberbullying_type"].unique()
# encoding
label_enc = LabelEncoder()
data_train["cyberbullying_type"] = label_enc.fit_transform(
data_train["cyberbullying_type"]
)
data_train.head()
data_test["cyberbullying_type"] = label_enc.transform(data_test["cyberbullying_type"])
data_test["cyberbullying_type"].unique()
data_train.dtypes
data_train["cyberbullying_type"].unique()
data_train["cyberbullying_type"].value_counts()
x_train = tokenizer(
text=data_train["tweet_text"].tolist(),
add_special_tokens=True,
max_length=100,
truncation=True,
padding=True,
return_tensors="tf",
return_token_type_ids=False,
return_attention_mask=True,
verbose=True,
)
x_train["input_ids"]
max_len = 100
input_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_ids")
input_mask = Input(shape=(max_len,), dtype=tf.int32, name="attention_mask")
bert = TFBertModel.from_pretrained("bert-base-uncased")
embeddings = bert(input_ids, attention_mask=input_mask)[0]
out = tf.keras.layers.GlobalMaxPool1D()(embeddings)
out = Dense(128, activation="relu")(out)
out = tf.keras.layers.Dropout(0.1)(out)
out = Dense(32, activation="relu")(out)
y = Dense(6, activation="sigmoid")(out)
model = tf.keras.Model(inputs=[input_ids, input_mask], outputs=y)
model.layers[2].trainable = True
optimizer = Adam(learning_rate=5e-05, epsilon=1e-08, clipnorm=1.0)
loss = CategoricalCrossentropy(from_logits=True)
metric = CategoricalAccuracy("balanced_accuracy")
model.compile(optimizer=optimizer, loss=loss, metrics=metric)
model.summary()
from keras.utils.vis_utils import plot_model
plot_model(model, to_file="model_plot.png", show_layer_names=True)
x_test = tokenizer(
text=data_test["tweet_text"].tolist(),
add_special_tokens=True,
max_length=100,
truncation=True,
padding=True,
return_tensors="tf",
return_token_type_ids=False,
return_attention_mask=True,
verbose=True,
)
bert_train = model.fit(
x={"input_ids": x_train["input_ids"], "attention_mask": x_train["attention_mask"]},
y=to_categorical(data_train.cyberbullying_type),
validation_data=(
{"input_ids": x_test["input_ids"], "attention_mask": x_test["attention_mask"]},
to_categorical(data_test.cyberbullying_type),
),
epochs=3,
batch_size=36,
)
# we can see a balanced accuracy of 88.52% and validation balanced accuracy of 84.07% over 3 epochs
pred_raw = model.predict(
{"input_ids": x_test["input_ids"], "attention_mask": x_test["attention_mask"]}
)
pred_raw[0]
y_pred = np.argmax(pred_raw, axis=1)
data_test.cyberbullying_type
print(classification_report(data_test.cyberbullying_type, y_pred))
|
#
# ---
# # **Trabalho Final - Python para análise de dados** | Dados de crédito
#
# Professor [André Perez](https://www.linkedin.com/in/andremarcosperez/)
# Aluno Júlio Cidade
# ---
# ## 1\. Descrição do problema
# O exercício se inícia com uma base de dados de crédito de um banco fictício. Nela estão incluídos um conjunto de dados sobre cada cliente, bem como o status de inadimplência do mesmo, indicado pela coluna "*default*".
# O objetivo da tarefa consiste em analisar os dados fornecidos, a fim de determinar quais deles apresentam relação com a inadimplência. Dessa forma pretende-se traçar um perfil de clientes que possuem maiores chances de estarem inadimplentes ou entrarem nesta categoria.
# ## 2\. Código de importação de bibliotecas
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# ## 3\. Código o download/carregamento/geração de dados
# Os dados fornecidos se encontram no formato CSV e se encontram [aqui](https://raw.githubusercontent.com/andre-marcos-perez/ebac-course-utils/develop/dataset/credito.csv).
# O primeiro passo consiste na importação da base de dados num *dataframe*
df = pd.read_csv(
"/kaggle/input/python-m10-supportmaterial/Python_M10_support material.csv",
na_values="na",
)
# ## 4\. Etapa de exploração
# Abaixo é possível observar como o *dataframe* está estruturado. A coluna *default* indica se um cliente é adimplente (`default = 0`), ou inadimplente (`default = 1`)
df.head(n=10)
# ### **4.1. Estrutura**
# A sequência de códigos abaixo servirá para determinar o número total de clientes, bem como quantos deles estão adimplentes ou inadimplentes.
#
df.shape
df[df["default"] == 0].shape
df[df["default"] == 1].shape
qtd_total, _ = df.shape
qtd_adimplentes, _ = df[df["default"] == 0].shape
qtd_inadimplentes, _ = df[df["default"] == 1].shape
print(
f"A proporcão clientes adimplentes é de {round(100 * qtd_adimplentes / qtd_total, 2)}%"
)
print(
f"A proporcão clientes inadimplentes é de {round(100 * qtd_inadimplentes / qtd_total, 2)}%"
)
# Como pode ser observado o número total de clientes é de 10127, dos quais 8500 são adimplentes e 1627 são inadimplentes.
# ### **4.2. Schema**
df.head(n=5)
# #### **4.2.1 Colunas e seus respectivos tipos de dados**
df.dtypes
# #### **4.2.2 Atributos categóricos**
df.select_dtypes("object").describe().transpose()
# #### **4.2.3 Atributos numéricos**
df.drop("id", axis=1).select_dtypes("number").describe().transpose()
# ### **4.3. Dados faltantes**
# Dados faltantes podem se apresentar de diversar formas, no caso deste exercício foi indicado durante a importação da base de dados que os dados faltantes podem ser indentificados como "na" no arquivo CSV
# Utilizando o código abaixo é possível verificar que faltam dados de escolaridade, estado civil e salário anual de alguns clientes.
df.isna().any()
# A função abaixo levanta algumas estatisticas sobre as colunas dos dados faltantes.
def stats_dados_faltantes(df: pd.DataFrame) -> None:
stats_dados_faltantes = []
for col in df.columns:
if df[col].isna().any():
qtd, _ = df[df[col].isna()].shape
total, _ = df.shape
dict_dados_faltantes = {
col: {"quantidade": qtd, "porcentagem": round(100 * qtd / total, 2)}
}
stats_dados_faltantes.append(dict_dados_faltantes)
for stat in stats_dados_faltantes:
print(stat)
stats_dados_faltantes(df=df)
stats_dados_faltantes(df=df[df["default"] == 0])
stats_dados_faltantes(df=df[df["default"] == 1])
# ## 5\. Transformação e limpeza de dados
# A transformação e limpeza dos dados irá atuar em dois problemas distintos:
# - Corrigir o *schema* das nossas colunas;
# - Remover os dados faltantes.
# ### **5.1. Correção de schema**
# Na etapa de exploração, foi possível observar que as colunas **limite_credito** e **valor_transacoes_12m** estavam sendo interpretadas como colunas categóricas (`dtype = object`), quando na verdade deveriam ser interpretadas como um atributo numérico(`dtype = float64`), isso se deve ao fato da base de dados utilizar o padrão brasileiro de notação, que utiliza "." como agrupador de milhar e "," como separador decimal .
# Esta falha pode ser observada abaixo:
df[["limite_credito", "valor_transacoes_12m"]].dtypes
df[["limite_credito", "valor_transacoes_12m"]].head(n=5)
# Para a limpeza dos dados será utilizada uma função `lambda` aplicada às colunas de interesse.
fn = lambda valor: float(valor.replace(".", "").replace(",", "."))
df["valor_transacoes_12m"] = df["valor_transacoes_12m"].apply(fn)
df["limite_credito"] = df["limite_credito"].apply(fn)
# Descrevendo novamente o *schema* podemos observar que o tipo dos dados foi corrigido:
df.dtypes
# - Atributos **categóricos**.
df.select_dtypes("object").describe().transpose()
# - Atributos **numéricos**.
df.drop("id", axis=1).select_dtypes("number").describe().transpose()
# ### **5.2. Remoção de dados faltantes**
# Como dito anteriormente na importação dos dados já foi fornecido ao pandas o que é um dado faltante, tornando esta etapa bem simples.
# É importante lembrar que só é possível remover os dados faltantes sem comprometer a análise devido ao fato dos atributos escolaridade, estado civil e salário anual apresentarem procentagens muito semelhantes tanto no total, como nos adimplente e inadimplentes. Como pode ser relembrado no código abaixo:
print("Base de dados completa")
stats_dados_faltantes(df=df)
print("Base de dados adimplentes")
stats_dados_faltantes(df=df[df["default"] == 0])
print("Base de dados inadimplentes")
stats_dados_faltantes(df=df[df["default"] == 1])
# Removendo os dados faltantes:
df.dropna(inplace=True)
# Analisando a nova estrutura de dados:
df.shape
df[df["default"] == 0].shape
df[df["default"] == 1].shape
qtd_total_novo, _ = df.shape
qtd_adimplentes_novo, _ = df[df["default"] == 0].shape
qtd_inadimplentes_novo, _ = df[df["default"] == 1].shape
print(
f"A proporcão adimplentes ativos é de {round(100 * qtd_adimplentes / qtd_total, 2)}%"
)
print(
f"A nova proporcão de clientes adimplentes é de {round(100 * qtd_adimplentes_novo / qtd_total_novo, 2)}%"
)
print("")
print(
f"A proporcão clientes inadimplentes é de {round(100 * qtd_inadimplentes / qtd_total, 2)}%"
)
print(
f"A nova proporcão de clientes inadimplentes é de {round(100 * qtd_inadimplentes_novo / qtd_total_novo, 2)}%"
)
# ## 6\. Análise
# Com dados estão prontos serão criadas diversas visualizações para correlacionar variáveis explicativas com a variável resposta. Dessa forma busca-se entender quais fatores levam um cliente à inadimplência.
# Para isso, serão comparadas a base com todos os clientes com as bases de adimplentes e inadimplentes.
# Separando os clientes adimplentes dos inadimplentes e configurando o pacote de visualização:
sns.set_style("whitegrid")
df_adimplente = df[df["default"] == 0]
df_inadimplente = df[df["default"] == 1]
# ### **6.1. Visualizações categóricas**
# Nesta etapa da análise, sera visualizada uma possível relação entre a variável resposta **default** com os atributos categóricos.
df.select_dtypes("object").head(n=5)
# #### **6.1.1 Escolaridade**
coluna = "escolaridade"
titulos = [
"Escolaridade dos Clientes",
"Escolaridade dos Clientes Adimplentes",
"Escolaridade dos Clientes Inadimplentes",
]
eixo = 0
max_y = 0
max = df.select_dtypes("object").describe()[coluna]["freq"] * 1.1
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
df_to_plot = dataframe[coluna].value_counts().to_frame()
df_to_plot.rename(columns={coluna: "frequencia_absoluta"}, inplace=True)
df_to_plot[coluna] = df_to_plot.index
df_to_plot.sort_values(by=[coluna], inplace=True)
df_to_plot.sort_values(by=[coluna])
f = sns.barplot(
x=df_to_plot[coluna], y=df_to_plot["frequencia_absoluta"], ax=eixos[eixo]
)
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
f.set_xticklabels(labels=f.get_xticklabels(), rotation=90)
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# Como é possível observar nas imagens acima, a proporção de diferentes níveis de escolaridade se mantém a mesma estre todos os clientes, os adimplentes e os inadimplentes. Isso é um forte indicativo que a escolaridade não é um fator relevante para definir se um cliente pode vir à inadimplência.
# #### **6.1.2 Salário anual**
coluna = "salario_anual"
titulos = [
"Salário Anual dos Clientes",
"Salário Anual dos Clientes Adimplentes",
"Salário Anual dos Clientes Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
df_to_plot = dataframe[coluna].value_counts().to_frame()
df_to_plot.rename(columns={coluna: "frequencia_absoluta"}, inplace=True)
df_to_plot[coluna] = df_to_plot.index
df_to_plot.reset_index(inplace=True, drop=True)
df_to_plot.sort_values(by=[coluna], inplace=True)
f = sns.barplot(
x=df_to_plot[coluna], y=df_to_plot["frequencia_absoluta"], ax=eixos[eixo]
)
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
f.set_xticklabels(labels=f.get_xticklabels(), rotation=90)
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# A análise das imagens acima é análoga a realizada com a escolaridade. A proporção de diferentes níveis de salários anuais se mantém a mesma estre todos os clientes, os adimplentes e os inadimplentes. Isso é um forte indicativo que o salário anual não é um fator relevante para definir se um cliente pode vir à inadimplência.
# ### **6.2. Visualizações numéricas**
# Nesta etapa da análise, sera visualizada uma possível relação entre a variável resposta **default** com os atributos numéricos.
df.select_dtypes("object").head(n=5)
# #### **6.2.1 Quantidade de Transações nos Últimos 12 Meses**
coluna = "qtd_transacoes_12m"
titulos = [
"Qtd. de Transações no Último Ano",
"Qtd. de Transações no Último Ano de Adimplentes",
"Qtd. de Transações no Último Ano de Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
f = sns.histplot(x=coluna, data=dataframe, stat="count", ax=eixos[eixo])
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# Na imagem acima é possível observar que a quantidade de transações não apresenta o mesmo comportamento para os três grupos. O grupo dos inadimplentes possui uma forte concentração em torno das 40 transações anuais, o que não é observado entre os adimplentes.
# Desta forma este é um dado que pode ser utilizado para definir um perfil médio dos clientes que podem vir a inadimplência.
# #### **6.2.2 Valor das Transações nos últimos 12 Meses**
coluna = "valor_transacoes_12m"
titulos = [
"Valor das Transações no Último Ano",
"Valor das Transações no Último Ano de Adimplentes",
"Valor das Transações no Último Ano de Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
f = sns.histplot(x=coluna, data=dataframe, stat="count", ax=eixos[eixo])
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# Assim como na quantidade de transações, o valor das transações não apresenta o mesmo comportamento para os três grupos. Na imagem acima é possível observar que os inadimplentes apresentam uma forte concentração em torno do valor de 2500 anualmente.
# Desta forma este é um dado que também pode ser utilizado para definir um perfil médio dos clientes que podem vir a inadimplência.
# #### **6.2.3 Valor de Transações nos Últimos 12 Meses x Quantidade de Transações nos Últimos 12 Meses**
# Para melhor visualização dos dados sera feito uma imagem que irá comparar o Valor de Transações nos Últimos 12 Meses com a Quantidade de Transações nos Últimos 12 Meses.
f = sns.relplot(
x="valor_transacoes_12m", y="qtd_transacoes_12m", data=df, hue="default"
)
_ = f.set(
title="Relação entre Valor e Quantidade de Transações no Último Ano",
xlabel="Valor das Transações no Último Ano",
ylabel="Quantidade das Transações no Último Ano",
)
|
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv("/kaggle/input/diabetes-dataset/diabetes.csv")
df.head()
df.shape
df.dtypes
df.isnull().sum()
df.duplicated().sum()
df.describe().T
features = df.drop("Outcome", axis=1)
target = df["Outcome"].values
class LogisticRegression:
def __init__(self, learning_rate=0.01, n_iters=1000):
self.learning_rate = learning_rate
self.n_iters = n_iters
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def fit(self, X_train, y_train):
rows, columns = X_train.shape
self.weights = np.random.random(columns)
self.bias = 0
for i in range(1, self.n_iters):
linear_prediction = np.matmul(X_train, self.weights) + self.bias
prediction = self.sigmoid(linear_prediction)
error = prediction - y_train
dervitive_weights = np.dot(X_train.T, error) / rows
dervitive_bias = np.sum(error) / rows
self.weights = self.weights - (self.learning_rate * dervitive_weights)
self.bias = self.bias - (self.learning_rate * dervitive_bias)
if i % 100 == 0:
print(f" epoch : {i} error: {np.mean(error)}")
def predict(self, X_test):
linear_prediction = np.matmul(X_test, self.weights) + self.bias
prediction = self.sigmoid(linear_prediction)
return np.round(prediction)
def score(self, X_test, y_test):
preds = self.predict(X_test)
accuracy = sum(preds == y_test) / len(y_test)
return accuracy
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
features, target, test_size=0.2, random_state=42
)
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
import matplotlib.pyplot as plt
import seaborn as sns
# scaler = MinMaxScaler()
# scaler.fit(X_train)
# X_train = scaler.transform(X_train)
# X_test = scaler.transform(X_test)
# # sklearn implementation
lr = LogisticRegression(max_iter=1000, random_state=42)
lr.fit(X_train, y_train)
lr.score(X_test, y_test)
preds = lr.predict(X_test)
cm = metrics.confusion_matrix(y_test, preds)
sns.heatmap(cm, annot=True)
plt.show()
# precision_pos = 84 / (84+15)
# precision_neg = 32 / 55
# recall = 84 / (84 + 23)
# recall
prec = metrics.precision_score(y_test, preds)
rec = metrics.recall_score(y_test, preds)
f1_score = metrics.f1_score(y_test, preds)
f1_score
print(metrics.classification_report(y_test, preds))
# # Grid Search Cross Validation
from sklearn.model_selection import cross_val_score
cross_v_score = cross_val_score(lr, features, target, cv=10)
print(cross_v_score)
mean_score = np.mean(cross_v_score)
print(mean_score)
# # Hyper Parmeter Tuning
# # PipeLine
steps = [
("scaler", MinMaxScaler()),
("pca", PCA(n_components=7)),
("model", LogisticRegression(max_iter=1000)),
]
pipe = Pipeline(steps=steps)
pipe.fit(X_train, y_train)
sum(pipe.predict(X_test) == y_test) / len(y_test)
|
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
import matplotlib.pyplot as plt
from pandas_profiling import ProfileReport
import seaborn as sns
from sklearn import metrics
from scipy import stats
from copy import deepcopy
from sklearn.ensemble import RandomForestRegressor
from xgboost import XGBRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
import optuna
from optuna import Trial, visualization
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, confusion_matrix
from sklearn.metrics import accuracy_score, mean_squared_error
import pandas as pd
import numpy as np
train_df = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/train.csv")
test_df = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/test.csv")
sub_df = pd.read_csv(
"/kaggle/input/tabular-playground-series-jan-2021/sample_submission.csv"
)
train_df.head()
feature_cols = train_df.drop(["id", "target"], axis=1).columns
x = train_df[feature_cols]
y = train_df["target"]
print(x.shape, y.shape)
## Join train and test datasets in order to obtain the same number of features during categorical conversion
train_indexs = train_df.index
test_indexs = test_df.index
df = pd.concat(objs=[train_df, test_df], axis=0).reset_index(drop=True)
df = df.drop("id", axis=1)
len(train_indexs), len(test_indexs)
def objective(trial, data=x, target=y):
train_x, test_x, train_y, test_y = train_test_split(
data, target, test_size=0.15, random_state=42
)
# To select which parameters to optimize, please look at the XGBoost documentation:
# https://xgboost.readthedocs.io/en/latest/parameter.html
param = {
"tree_method": "gpu_hist", # Use GPU acceleration
"lambda": trial.suggest_loguniform("lambda", 1e-3, 10.0),
"alpha": trial.suggest_loguniform("alpha", 1e-3, 10.0),
"colsample_bytree": trial.suggest_categorical(
"colsample_bytree", [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
),
"subsample": trial.suggest_categorical("subsample", [0.6, 0.7, 0.8, 1.0]),
"learning_rate": trial.suggest_categorical(
"learning_rate", [0.008, 0.009, 0.01, 0.012, 0.014, 0.016, 0.018, 0.02]
),
"n_estimators": trial.suggest_categorical(
"n_estimators", [150, 200, 300, 3000]
),
"max_depth": trial.suggest_categorical(
"max_depth", [4, 5, 7, 9, 11, 13, 15, 17]
),
"random_state": 42,
"min_child_weight": trial.suggest_int("min_child_weight", 1, 300),
}
model = XGBRegressor(**param)
model.fit(
train_x,
train_y,
eval_set=[(test_x, test_y)],
early_stopping_rounds=100,
verbose=False,
)
preds = model.predict(test_x)
rmse = mean_squared_error(test_y, preds, squared=False)
return rmse
study = optuna.create_study(direction="minimize")
study.optimize(objective, n_trials=25)
print("Number of finished trials:", len(study.trials))
print("Best trial:", study.best_trial.params)
# finally plot best parameters
study.best_params
best_params = study.best_params
best_params["tree_method"] = "gpu_hist"
best_params["random_state"] = 42
clf = XGBRegressor(**(best_params))
clf.fit(x, y)
preds = pd.Series(clf.predict(test_df.drop("id", axis=1)), name="target")
preds = pd.concat([test_df["id"], preds], axis=1)
preds.head()
preds.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
tf.__version__
# Avoid OOM errors by setting GPU Memory Consumption Growth
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
tf.config.list_physical_devices("GPU")
data_dir = "../input/new-plant-diseases-dataset/New Plant Diseases Dataset(Augmented)/New Plant Diseases Dataset(Augmented)"
train_dir = data_dir + "/train"
valid_dir = data_dir + "/valid"
diseases = os.listdir(train_dir)
print("Total disease classes are: {}".format(len(diseases)))
plants = []
NumberOfDiseases = 0
for plant in diseases:
if plant.split("___")[0] not in plants:
plants.append(plant.split("___")[0])
if plant.split("___")[1] != "healthy":
NumberOfDiseases += 1
# unique plants in the dataset
print(f"Unique Plants are: \n{plants}")
print(
f"total number of plants{len(plants)} and number of dieseases are {NumberOfDiseases}"
)
# Number of images for each disease
nums = {}
for disease in diseases:
nums[disease] = len(os.listdir(train_dir + "/" + disease))
# converting the nums dictionary to pandas dataframe passing index as plant name and number of images as column
img_per_class = pd.DataFrame(
nums.values(), index=nums.keys(), columns=["no. of images"]
)
img_per_class
# plotting number of images available for each disease
index = [n for n in range(38)]
plt.figure(figsize=(20, 5))
plt.bar(index, [n for n in nums.values()], width=0.3)
plt.xlabel("Plants/Diseases", fontsize=10)
plt.ylabel("No of images available", fontsize=10)
plt.xticks(index, diseases, fontsize=5, rotation=90)
plt.title("Images per each class of plant disease")
# ## preparing the data for traininig using Tensorflow
import imghdr
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
from tensorflow.keras.metrics import Precision, Recall, BinaryAccuracy
from tensorflow.keras.models import load_model
from tensorflow.keras.losses import SparseCategoricalCrossentropy
# train_data = tf.keras.utils.image_dataset_from_directory('train')
data_dir = "../input/new-plant-diseases-dataset/New Plant Diseases Dataset(Augmented)/New Plant Diseases Dataset(Augmented)"
train_dir = data_dir + "/train"
valid_dir = data_dir + "/valid"
# keras automatically gone through all the folders and read the images data quiet good!!
train_data = tf.keras.utils.image_dataset_from_directory(
"../input/new-plant-diseases-dataset/New Plant Diseases Dataset(Augmented)/New Plant Diseases Dataset(Augmented)/train"
)
val_data = tf.keras.utils.image_dataset_from_directory(
"../input/new-plant-diseases-dataset/New Plant Diseases Dataset(Augmented)/New Plant Diseases Dataset(Augmented)/valid"
)
test_data = tf.keras.utils.image_dataset_from_directory(
"../input/new-plant-diseases-dataset/test"
)
classes = 38
train_data # we have a data itterator
train_data_itterator = train_data.as_numpy_iterator()
train_batch = train_data_itterator.next()
len(train_batch) # there is two parts to our dataset images and labels
train_batch[0].shape
# 32- images per batch
# 256,256 - rows by columns size
# 3 - layers of r g b
train_batch[1] # these are labels
# checking which lable is for which image
fig, ax = plt.subplots(ncols=4, figsize=(20, 20))
for idx, img in enumerate(train_batch[0][:4]):
ax[idx].imshow(img.astype(int))
ax[idx].title.set_text(train_batch[1][idx])
train_data = train_data.map(lambda x, y: (x / 255, y))
val_data = val_data.map(lambda x, y: (x / 255, y))
val_data.as_numpy_iterator().next()[0].min()
train_data.as_numpy_iterator().next()[0].max()
# we have to form an architecture in the tensorflow
model = Sequential()
""" adding each value to sequential model first layer would be conv and it should be input layer
relu - rectified linear unit activation function
sigmoid - sigma activation function for clear convergence, then densing to single layer
"""
model.add(Conv2D(16, (3, 3), 1, activation="relu", input_shape=(256, 256, 3)))
model.add(MaxPooling2D())
model.add(Conv2D(32, (3, 3), 1, activation="relu"))
model.add(MaxPooling2D())
model.add(Conv2D(16, (3, 3), 1, activation="relu"))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dense(38, activation="linear"))
# optimizer is the adam errors are entropy cached
model.compile(
"adam", loss=SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"]
)
model.summary() # 166,945 if we add extra commented maxpooling with conv2d
# training our neural network
with open("logs", "w") as log:
logdir = os.path.dirname(
"../kaggle/working/logs"
) # creating a pointer for our log directory to save callbacks
import os
tensorboard_callbacks = tf.keras.callbacks.TensorBoard(log_dir=logdir)
hist = model.fit(
train_data, epochs=5, validation_data=val_data, callbacks=[tensorboard_callbacks]
)
fig = plt.figure()
plt.plot(hist.history["loss"], color="teal", label="loss")
plt.plot(hist.history["val_loss"], color="orange", label="val_loss")
fig.suptitle("Loss", fontsize=20)
plt.legend(loc="upper left")
plt.show()
fig = plt.figure()
plt.plot(hist.history["accuracy"], color="teal", label="accuracy")
plt.plot(hist.history["val_accuracy"], color="orange", label="val_accuracy")
fig.suptitle("Accuracy", fontsize=20)
plt.legend(loc="upper left")
plt.show()
pre = Precision()
re = Recall()
acc = BinaryAccuracy()
for batch in test_data.as_numpy_iterator():
X, y = batch
yhat = model.predict(X).transpose()[0]
print(y.shape, yhat.shape)
pre.update_state(y, yhat)
re.update_state(y, yhat)
acc.update_state(y, yhat)
print(pre.result(), re.result(), acc.result())
# test_img = open("/kaggle/input/new-plant-diseases-dataset/test/test/AppleScab1.JPG",'rb')
img_path = "/kaggle/input/new-plant-diseases-dataset/test/test/AppleScab1.JPG"
img = image.load_img(img_path, target_size=(224, 224)) # we have to resize the image
plt.imshow(img_path)
plt.show()
from tensorflow.keras.preprocessing import image
type(test_img)
model.save("imageclassifier.h5")
import pickle
file = open("model.pkl", "wb")
pickle.dump(model, file)
|
# Published on April 15, 2023. By Marília Prata, mpwolke.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from geopy.geocoders import Nominatim
import folium
import warnings
warnings.filterwarnings("ignore")
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# https://ontheworldmap.com/slovakia/
df = pd.read_csv("/kaggle/input/slovakia-towns-2021/slovakia_towns_coordinates.csv")
# df = df.set_index(['dep', 'type', 'idx'])
df.head()
# #Missing values
df.isnull().sum()
bra = df[(df["Region"] == "Bratislava Region")].reset_index(drop=True)
bra.head()
# #Bratislava Region Map
# By Saldenisov https://www.kaggle.com/code/saldenisov/prisons-in-france
from geopy import Point
from geopy.distance import distance
# Create a map centered on the first Bratislava's region coordinates
m = folium.Map(location=[df.iloc[0]["Lat"], df.iloc[0]["Lon"]], zoom_start=4)
coordinates = set()
# Add markers for each prison
for i, row in df.iterrows():
lat = row["Lat"]
long = row["Lon"]
if (lat, long) in coordinates:
# Define the original coordinate
original_coord = Point(lat, long)
# Calculate the new coordinate
new_coord = distance(meters=25).destination(original_coord, 0)
new_coord = distance(meters=25).destination(new_coord, 90)
lat, long = new_coord.latitude, new_coord.longitude
folium.Marker(location=[lat, long], tooltip=row["Region"]).add_to(m)
coordinates.add((lat, long))
# Display the map
m
# Could be any column: No, only numeric data.
df["Population 2019"].plot()
plt.title("Population 2019", fontsize=20)
# Could be any column: No, only numeric data.
df["Population 2001"].plot()
plt.title("Population 2001", fontsize=20)
# by Gabriel Preda https://www.kaggle.com/code/gpreda/electronic-health-records-ehrs-data-exploration
def plot_count(feature, title, df, size=1, ordered=True):
sns.set_theme(style="whitegrid")
f, ax = plt.subplots(1, 1, figsize=(4 * size, 4))
total = float(len(df))
if ordered:
g = sns.countplot(
x=feature,
data=df,
order=df[feature].value_counts().index[:20],
palette="Set3",
)
else:
g = sns.countplot(x=feature, data=df, palette="Set3")
g.set_title("Number and percentage of {}".format(title))
if size > 2:
plt.xticks(rotation=90, size=8)
for p in ax.patches:
height = p.get_height()
ax.text(
p.get_x() + p.get_width() / 2.0,
height + 3,
"{:1.2f}%".format(100 * height / total),
ha="center",
)
plt.show()
plot_count("Region", "Slovakia Regions", df, 4)
# plt.style.use('dark_background')
plt.figure(figsize=(20, 7))
plt.plot(df["Population 2019"], label="Population 2019")
plt.plot(df["Population 2001"], label="Population 2001")
plt.plot(df["Population Change"], label="Population Change")
plt.legend()
# plt.grid()
plt.title("Slovakia Towns")
plt.xticks(df.index, rotation=90)
plt.xlabel("Population")
plt.ylabel("Count")
plt.show()
|
import numpy as np
import matplotlib.pylab as plt
import matplotlib
import pandas as pd
import seaborn as sns
drug_data = pd.read_csv(
"/kaggle/input/national-survey-on-drug-use-and-health-2021/NSDUH_2021_Tab.txt",
sep="\t",
)
print(drug_data.info())
drug_data.head(10)
# **Choosing the required variables**
filtered_drug_data = drug_data[
[
"GOVTPROG",
"INCOME",
"POVERTY3",
"COUTYP4",
"WRKDPSTYR",
"WRKHADJOB",
"IRSEX",
"IRMARIT",
"CATAG7",
"HEALTH2",
"NEWRACE2",
]
]
filtered_drug_data.head(10)
# **Plotting of Data**
fig, ax = plt.subplots()
age_dictionary = {
"1": "12-13",
"2": "14-15",
"3": "16-17",
"4": "18-20",
"5": "21-25",
"6": "26-34",
"7": "35-Older",
}
ax = (
filtered_drug_data["CATAG7"]
.value_counts()
.sort_index()
.plot(ax=ax, kind="bar", title="Data by Age Group")
)
ax.set_xticklabels(age_dictionary.values(), rotation=360)
ax.set_xlabel("Age Groups")
ax.set_ylabel("Frequency")
fig, ax = plt.subplots()
race_dictionary = {
"1": "White",
"2": "African American",
"3": "Am/AK Native",
"4": "Native HI/Other Pac Isl",
"5": "Asian",
"6": "NonHisp more than one race",
"7": "Hispanic",
}
ax = (
filtered_drug_data["NEWRACE2"]
.value_counts()
.sort_index()
.plot(ax=ax, kind="bar", title="Data by Race")
)
ax.set_xticklabels(race_dictionary.values(), rotation=80)
ax.set_xlabel("Race")
ax.set_ylabel("Frequency")
# **Drug Use Analysis**
filtered_drug_use_data = drug_data[
[
"CIGEVER",
"CIGTRY",
"METHAMEVR",
"METHAMAGE",
"ALCEVER",
"ALCTRY",
"MJEVER",
"MJAGE",
"COCEVER",
"COCAGE",
"CRKEVER",
"CRKAGE",
"HEREVER",
"HERAGE",
"METHAMEVR",
"METHAMAGE",
]
]
filtered_drug_use_data.head(10)
filtered_drug_use_data.info()
# Find how many have used any of the substances mentioned in data set
# confirmed_drug_used = filtered_drug_use_data.query("CIGEVER = 1")
drug_used_correlation = drug_data[
["CIGEVER", "METHAMEVR", "ALCEVER", "MJEVER", "COCEVER", "CRKEVER", "HEREVER"]
]
# change garbage values to zero
for a in drug_used_correlation.columns:
drug_used_correlation.loc[drug_used_correlation[a] > 1, a] = 0
# # Correlation of values
corr = drug_used_correlation.corr(method="kendall")
corr.style.background_gradient(cmap="coolwarm")
# # People who start drugs when they were minors
# fig, ax = plt.subplots()
first_drug_used_date = drug_data[
["CIGTRY", "METHAMAGE", "ALCTRY", "MJAGE", "COCAGE", "CRKAGE", "HERAGE"]
]
first_drug_used_date.head(10)
under_age = first_drug_used_date.query(
"CIGTRY < 18 | METHAMAGE < 18 | ALCTRY < 18 | MJAGE < 18 | COCAGE < 18 | CRKAGE < 18 | HERAGE < 18"
)
under_age_dictionary = {}
minor_dataFrame = pd.DataFrame(under_age.columns, columns=["Substance"])
minor_count = []
# count the under age cells
for a in under_age.columns:
all_under_age = under_age.filter(items=[a]).query(a + "< 18")
raws, columns = all_under_age.shape
minor_count.append(raws)
# under_age_dictionary['substance'] = a
# under_age_dictionary['count'] = raws
minor_dataFrame["Count"] = minor_count
# sort the data for better visualization
minor_dataFrame = minor_dataFrame.sort_values(by=["Count"], ascending=False)
print(minor_dataFrame)
fig, ax = plt.subplots()
ax = minor_dataFrame.plot(ax=ax, kind="bar", title="Substance Users as a Minor")
ax.set_xticklabels(minor_dataFrame["Substance"], rotation=80)
ax.set_xlabel("Substance")
ax.set_ylabel("Count")
ax.xaxis.label.set_color("blue") # setting up X-axis label color to yellow
ax.yaxis.label.set_color("blue")
# # Eudcation
# By 'school' mean elementary school, junior high or middle school, high school, or a college or university.
# Please include home schooling as well. Do you go to school?
education_data = drug_data[
["ENRLCOLLST2", "EDUSCHLGO", "EDUSCHGRD2", "EDUFULPAR", "CATAGE"]
]
education_data.head(10)
# plotting data
print(type(education_data))
school_data = education_data[["EDUSCHLGO", "CATAGE"]].query(
"EDUSCHLGO != 85 & EDUSCHLGO != 94 & CATAGE == 3"
)
plot_data = school_data.value_counts()
plot_data = school_data.groupby(["EDUSCHLGO"]).size().reset_index(name="count")
# sort values for graph
plot_order = plot_data.sort_values(by="count", ascending=False).EDUSCHLGO.values
# sort values for dataframe(since seaborn plot dont follow the order of the dataframe)
plot_data = plot_data.sort_values(by=["count"], ascending=False)
print(plot_data.head())
# want to highlight people who are going to school ( 1== yes)
colors = ["cadetblue" if (x == 1) else "orange" for x in plot_data.EDUSCHLGO]
print(colors)
plot = sns.barplot(
x=plot_data["EDUSCHLGO"], y=plot_data["count"], palette=colors, order=plot_order
)
plot.set_xticklabels(["No", "Yes", "Uncertain", "Refused", "NO ANSWER"], rotation=0)
plot.set_title("Which underaged (12-17) are going to School")
# Need to figure out how education got effected with recenet drug use
#
# finding out peple who are aged between 12-17 and not going to school and also used atleast one substance in past 30 days
# IRMJFM - marjuwana , IRALCFM - alchohol, IICRKFM - cocain, IRHALLUC30N - HALLUCINOGEN, IRINHAL30N - inhalent, IRMETHAM30N - meth
# IRTRQNM30FQ - TRANQUILIZERS, IRPNRNM30FQ - PAIN RELIEVERS, IRSEDNM30FQ- sedatives , IRSTMNM30FQ - Stimulants
# removed some school attending status due to outliers
minor_recent_drug_used = drug_data[
[
"IRMJFM",
"IRALCFM",
"IRCRKFM",
"IRHALLUC30N",
"IRINHAL30N",
"IRMETHAM30N",
"IRTRQNM30FQ",
"IRPNRNM30FQ",
"IRSEDNM30FQ",
"IRSTMNM30FQ",
"EDUSCHLGO",
"CATAGE",
"IRCOCFM",
]
].query(
"(EDUSCHLGO == 1 | EDUSCHLGO == 2 | EDUSCHLGO == 98) & CATAGE == 1 & (IRMJFM <= 30 | IRALCFM <= 30 | IRCOCFM <= 30 | IRCRKFM <= 30 | IRHALLUC30N <= 30 | IRINHAL30N <= 30 | IRMETHAM30N <= 30 | IRTRQNM30FQ <= 30 | IRPNRNM30FQ <= 30 | IRSEDNM30FQ <= 30 | IRSTMNM30FQ <= 30)"
)
minor_recent_drug_used = minor_recent_drug_used.drop(["CATAGE"], axis=1)
print(type(minor_recent_drug_used))
# find recently used
print(minor_recent_drug_used)
def findSubstanceFrequency(drugs_array):
for drug in drugs_array:
if drug <= 30:
return drug
# get the index from matching element in the series
def findSubstanceName(drugs_array):
for drug in drugs_array:
if drug <= 30:
return drugs_array[drugs_array == drug].index[0]
# Creating two columns to summarize the substance use in last 30 days
minor_recent_drug_used["used_frquency"] = minor_recent_drug_used.apply(
lambda x: findSubstanceFrequency(x), axis=1
)
minor_recent_drug_used["used_drug"] = minor_recent_drug_used.apply(
lambda x: findSubstanceName(x), axis=1
)
print(minor_recent_drug_used.head(10))
# drop unwanted columns
thisFilter = minor_recent_drug_used.filter(
[
"IRMJFM",
"IRALCFM",
"IRCRKFM",
"IRHALLUC30N",
"IRINHAL30N",
"IRMETHAM30N",
"IRTRQNM30FQ",
"IRPNRNM30FQ",
"IRSEDNM30FQ",
"IRSTMNM30FQ",
"IRCOCFM",
]
)
minor_recent_drug_used.drop(thisFilter, inplace=True, axis=1)
# minor_recent_drug_used = minor_recent_drug_used.drop(, axis = 1)
print(minor_recent_drug_used.head(10))
# WRKDPSTWK Len : 2 WORK AT JOB LAST WEEK
# PDEN10 Len : 1 POPULATION DENSITY 2010 - THREE LEVELS
# group_by = minor_recent_drug_used.groupby(['EDUSCHLGO', 'IRMJFM', 'IRALCFM', 'IRCRKFM', 'IRHALLUC30N', 'IRINHAL30N', 'IRMETHAM30N',
# 'IRTRQNM30FQ','IRPNRNM30FQ','IRSEDNM30FQ', 'IRSTMNM30FQ', 'IRCOCFM']).size().reset_index(name='count')
# Group by
print(minor_recent_drug_used.info())
minor_recent_drug_used = (
minor_recent_drug_used.groupby(["EDUSCHLGO", "used_drug"])
.size()
.reset_index(name="count")
)
print(minor_recent_drug_used.head(10))
# create pivot table
pivot = pd.pivot_table(
data=minor_recent_drug_used,
index=["used_drug"],
columns=["EDUSCHLGO"],
values="count",
)
# renaming column names
pivot.columns = ["Attending", "Not Attending", "No Answer"]
print(pivot)
# pivot.columns = ['Population > Million', 'Popultion < Million', 'Not in a Segment']
# print(pivot.columns)
ax = pivot.plot(
kind="bar", title="Last 30 days substance use of minors Vs attending school"
)
ax.set_xticklabels(
[
"Alcohol",
"Hallucination",
"Inhalents",
"Meth",
"Marujana",
"PAIN RELIEVERS",
"SEDATIVES",
"STIMULANTS",
"TRANQUILIZERS",
],
rotation=75,
)
ax.set_xlabel("substance used")
ax.set_ylabel("Count")
# # Employment Vs GeoGraphic
employment_data = drug_data[
[
"WRKSTATWK2",
"WRKDPSTWK",
"WRKHADJOB",
"COUTYP4",
"WRKDPSTYR",
"WRKHADJOB",
"IRSEX",
"IRMARIT",
"CATAG7",
"HEALTH2",
"NEWRACE2",
]
]
employment_data_plot = drug_data[["WRKDPSTWK", "PDEN10"]].query(
"WRKDPSTWK == 1 | WRKDPSTWK == 2 | WRKDPSTWK == 98 | WRKDPSTWK == 99"
)
print(employment_data_plot.head(10))
# employment_data_plot['WRKDPSTWK'].unique()
# employment_data_plot['PDEN10'].unique()
group_by = (
employment_data_plot.groupby(["WRKDPSTWK", "PDEN10"])
.size()
.reset_index(name="count")
)
print(group_by.head(10))
pivot = pd.pivot_table(
data=group_by, index=["WRKDPSTWK"], columns=["PDEN10"], values="count"
)
print(pivot)
pivot.columns = ["Population > Million", "Popultion < Million", "Not in a Segment"]
print(pivot.columns)
# WRKDPSTWK Len : 2 WORK AT JOB LAST WEEK
# PDEN10 Len : 1 POPULATION DENSITY 2010 - THREE LEVELS
ax = pivot.plot(kind="bar", title="Work At Last Week")
ax.set_xticklabels(["Yes", "No", "NO ANSWER", "SKIP"], rotation=0)
ax.set_xlabel("Work At Last Week")
ax.set_ylabel("Count")
# # Social Environment
#
# religious data
# SNRLGSVC1 Len : 2 PAST 12 MOS, HOW MANY RELIG. SERVICES
mental_health_data = drug_data[["SNRLGSVC"]]
# # Mental Health
mental_health_data = drug_data[["SNRLGSVC"]]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Importing Libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
log_loss,
roc_auc_score,
precision_score,
f1_score,
recall_score,
roc_curve,
auc,
)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
fbeta_score,
matthews_corrcoef,
)
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
# machine learning algorithms
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (
RandomForestClassifier,
VotingClassifier,
AdaBoostClassifier,
GradientBoostingClassifier,
RandomForestClassifier,
ExtraTreesClassifier,
)
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv("/kaggle/input/heart-disease-statlog/Heart_disease_statlog.csv")
df.info()
df.describe()
df.columns
df.columns = [
"age",
"sex",
"chest_pain_type",
"resting_blood_pressure",
"cholesterol",
"fasting_blood_sugar",
"rest_ecg",
"max_heart_rate_achieved",
"exercise_induced_angina",
"st_depression",
"st_slope",
"ca",
"thalassemia",
"target",
]
df.head()
df["chest_pain_type"][df["chest_pain_type"] == 0] = "typical angina "
df["chest_pain_type"][df["chest_pain_type"] == 1] = "atypical angina"
df["chest_pain_type"][df["chest_pain_type"] == 2] = "non-angina pain"
df["chest_pain_type"][df["chest_pain_type"] == 3] = "asymptomatic"
df["rest_ecg"][df["rest_ecg"] == 0] = "normal"
df["rest_ecg"][df["rest_ecg"] == 1] = "Abnormality in ST-T wave"
df["rest_ecg"][df["rest_ecg"] == 2] = "left ventricular hypertrophy"
df["st_slope"][df["st_slope"] == 0] = "upsloping"
df["st_slope"][df["st_slope"] == 1] = "flat"
df["st_slope"][df["st_slope"] == 2] = "downsloping"
df["thalassemia"][df["thalassemia"] == 0] = "null"
df["thalassemia"][df["thalassemia"] == 1] = "fixed defect"
df["thalassemia"][df["thalassemia"] == 2] = "normal blood flow"
df["thalassemia"][df["thalassemia"] == 3] = "reversible defect"
df["sex"] = df.sex.apply(lambda x: "male" if x == 1 else "female")
df.head()
df["rest_ecg"].value_counts()
df["chest_pain_type"].value_counts()
df["thalassemia"].value_counts()
df.isna().sum()
sns.pairplot(df, hue="target", palette="mako")
df["target"].value_counts().plot.pie(
x="Heart disease",
y="no.of patients",
autopct="%1.0f%%",
labels=["Normal", "Heart Disease"],
startangle=60,
colors=sns.color_palette("crest"),
)
plt.bar(df["sex"], df["target"], color="green")
plt.bar(df["chest_pain_type"], df["target"], color="red")
plt.bar(df["rest_ecg"], df["target"], color="yellow")
plt.pie(df["ca"])
plt.show()
sns.pairplot(df, hue="target", palette="mako")
sns.scatterplot(x="resting_blood_pressure", y="cholesterol", hue="target", data=df)
X = df.drop(["target"], axis=1)
y = df["target"]
X.corrwith(y).plot.bar(
figsize=(12, 4), title="Correlation with Diabetes", fontsize=12, rot=90, grid=True
)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=9)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train[
[
"age",
"resting_blood_pressure",
"cholesterol",
"max_heart_rate_achieved",
"st_depression",
]
] = scaler.fit_transform(
X_train[
[
"age",
"resting_blood_pressure",
"cholesterol",
"max_heart_rate_achieved",
"st_depression",
]
]
)
X_train.head()
X_test[
[
"age",
"resting_blood_pressure",
"cholesterol",
"max_heart_rate_achieved",
"st_depression",
]
] = scaler.transform(
X_test[
[
"age",
"resting_blood_pressure",
"cholesterol",
"max_heart_rate_achieved",
"st_depression",
]
]
)
X_test.head()
from sklearn import model_selection
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
# function initializing baseline machine learning models
def GetBasedModel():
basedModels = []
basedModels.append(("LR_L2", LogisticRegression(penalty="l2")))
basedModels.append(("KNN7", KNeighborsClassifier(7)))
basedModels.append(("KNN5", KNeighborsClassifier(5)))
basedModels.append(("KNN9", KNeighborsClassifier(9)))
basedModels.append(("KNN11", KNeighborsClassifier(11)))
basedModels.append(("CART", DecisionTreeClassifier()))
basedModels.append(
("SVM Linear", SVC(kernel="linear", gamma="auto", probability=True))
)
basedModels.append(("SVM RBF", SVC(kernel="rbf", gamma="auto", probability=True)))
basedModels.append(
("RF_Ent100", RandomForestClassifier(criterion="entropy", n_estimators=100))
)
basedModels.append(("ET100", ExtraTreesClassifier(n_estimators=100)))
basedModels.append(("MLP", MLPClassifier()))
basedModels.append(("SGD3000", SGDClassifier(max_iter=1000, tol=1e-4)))
return basedModels
# function for performing 10-fold cross validation of all the baseline models
def BasedLine2(X_train, y_train, models):
# Test options and evaluation metric
num_folds = 10
scoring = "accuracy"
seed = 7
results = []
names = []
for name, model in models:
kfold = model_selection.KFold(n_splits=10)
cv_results = model_selection.cross_val_score(
model, X_train, y_train, cv=kfold, scoring=scoring
)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
return results, msg
models = GetBasedModel()
names, results = BasedLine2(X_train, y_train, models)
|
# В этом соревновании я занял 49 место (score 1,7061886202) полное решение можно посмотреть на гитхабе: https://github.com/SorokinMaksimArtemovich/MTS-ML-CUP
# В этом нотбуке я предобработал начальные данные такие как:
# - region_name
# - city_name
# - cpe_manufacturer_name
# - cpe_model_name
# - cpe_type_cd
# - cpe_model_os_type
# - date
# - part_of_day
import sys
import os
import warnings
os.environ["OPENBLAS_NUM_THREADS"] = "1"
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import gc
import time
import pyarrow as pa
import pyarrow.parquet as pq
from tqdm import tqdm
import pickle
import scipy
import implicit
import bisect
import sklearn.metrics as m
from catboost import CatBoostClassifier, CatBoostRegressor, Pool
from sklearn.model_selection import train_test_split
from sklearn.calibration import calibration_curve, CalibratedClassifierCV
import implicit
data = pd.read_feather("/kaggle/input/mts-ml-cookies/dataset_full.feather")
data = pa.Table.from_pandas(data)
# # Baceline
# Baceline предоставленный организаторами
data_agg = (
data.select(["user_id", "url_host", "request_cnt"])
.group_by(["user_id", "url_host"])
.aggregate([("request_cnt", "sum")])
)
url_set = set(data_agg.select(["url_host"]).to_pandas()["url_host"])
print(f"{len(url_set)} urls")
url_dict = {url: idurl for url, idurl in zip(url_set, range(len(url_set)))}
usr_set = set(data_agg.select(["user_id"]).to_pandas()["user_id"])
print(f"{len(usr_set)} users")
usr_dict = {usr: user_id for usr, user_id in zip(usr_set, range(len(usr_set)))}
values = np.array(data_agg.select(["request_cnt_sum"]).to_pandas()["request_cnt_sum"])
rows = np.array(data_agg.select(["user_id"]).to_pandas()["user_id"].map(usr_dict))
cols = np.array(data_agg.select(["url_host"]).to_pandas()["url_host"].map(url_dict))
mat = scipy.sparse.coo_matrix(
(values, (rows, cols)), shape=(rows.max() + 1, cols.max() + 1)
)
als = implicit.approximate_als.FaissAlternatingLeastSquares(
factors=50,
iterations=100,
use_gpu=False,
calculate_training_loss=False,
regularization=0.1,
nlist=400,
nprobe=20,
)
als.fit(mat)
d_factors = als.item_factors
u_factors = als.user_factors
inv_usr_map = {v: k for k, v in usr_dict.items()}
bace_emb = pd.DataFrame(d_factors)
bace_emb["user_id"] = bace_emb.index.map(inv_usr_map)
bace_emb.to_csv("df_bace.csv", index=False)
del data_agg
del values
del rows
del cols
del mat
del als
del d_factors
del inv_usr_map
del bace_emb
gc.collect()
def save(obj, path, verbose=True):
if verbose:
print("Saving object to {}".format(path))
with open(path, "wb") as obj_file:
pickle.dump(obj, obj_file, protocol=pickle.HIGHEST_PROTOCOL)
if verbose:
print("Object saved to {}".format(path))
pass
url_factor_features_dict = {}
url_factor_features_dict["feature_names"] = []
for i in range(len(u_factors[0])):
url_factor_features_dict["feature_names"].append("url_factor_{}".format(i))
uniq_urls = list(
sorted(list(set(data.select(["url_host"]).to_pandas()["url_host"].values)))
)
url_id_dict = {}
for i in tqdm(range(len(uniq_urls)), desc="Building url --> id mapping"):
url_id_dict[uniq_urls[i]] = i
inverted_url_id_dict = {v: k for k, v in url_id_dict.items()}
for i in tqdm(range(len(u_factors)), desc="Building url factor features dict"):
url_id = inverted_url_id_dict[i]
url_factor_features = u_factors[i]
url_factor_features_dict[url_id] = url_factor_features
save(url_factor_features_dict, "url_only_factor_features_dict.pkl")
# # ALS
# ## Количество дней посещений сайтов
data_agg = (
pa.Table.from_pandas(
data.select(["user_id", "url_host", "date"]).to_pandas().drop_duplicates()
)
.group_by(["user_id", "url_host"])
.aggregate([("date", "count")])
)
values = np.array(data_agg.select(["date_count"]).to_pandas()["date_count"])
rows = np.array(data_agg.select(["user_id"]).to_pandas()["user_id"].map(usr_dict))
cols = np.array(data_agg.select(["url_host"]).to_pandas()["url_host"].map(url_dict))
mat = scipy.sparse.coo_matrix(
(values, (rows, cols)), shape=(rows.max() + 1, cols.max() + 1)
)
als = implicit.approximate_als.FaissAlternatingLeastSquares(
factors=50,
iterations=30,
use_gpu=False,
calculate_training_loss=False,
regularization=0.1,
)
als.fit(mat)
d_factors = als.item_factors
inv_usr_map = {v: k for k, v in usr_dict.items()}
date_emb = pd.DataFrame(d_factors)
date_emb["user_id"] = date_emb.index.map(inv_usr_map)
date_emb.to_csv("date_emb.csv", index=False)
del data_agg
del values
del rows
del cols
del mat
del als
del d_factors
del inv_usr_map
del date_emb
del url_set
del url_dict
del usr_set
del usr_dict
gc.collect()
# ## Region
region_agg = (
data.select(["user_id", "region_name", "request_cnt"])
.group_by(["user_id", "region_name"])
.aggregate([("request_cnt", "count")])
)
usr_set = set(region_agg.select(["user_id"]).to_pandas()["user_id"])
print(f"{len(usr_set)} users")
usr_dict = {usr: user_id for usr, user_id in zip(usr_set, range(len(usr_set)))}
region_set = set(region_agg.select(["region_name"]).to_pandas()["region_name"])
print(f"{len(region_set)} regions")
region_dict = {url: idurl for url, idurl in zip(region_set, range(len(region_set)))}
values = np.array(
region_agg.select(["request_cnt_count"]).to_pandas()["request_cnt_count"]
)
rows = np.array(region_agg.select(["user_id"]).to_pandas()["user_id"].map(usr_dict))
cols = np.array(
region_agg.select(["region_name"]).to_pandas()["region_name"].map(region_dict)
)
mat = scipy.sparse.coo_matrix(
(values, (rows, cols)), shape=(rows.max() + 1, cols.max() + 1)
)
als = implicit.approximate_als.FaissAlternatingLeastSquares(
factors=50,
iterations=30,
use_gpu=False,
calculate_training_loss=False,
regularization=0.1,
)
als.fit(mat)
d_factors = als.item_factors
inv_usr_map = {v: k for k, v in usr_dict.items()}
region_emb = pd.DataFrame(d_factors)
region_emb["user_id"] = region_emb.index.map(inv_usr_map)
region_emb.to_csv("region_emb.csv", index=False)
del region_agg
del values
del rows
del cols
del mat
del als
del d_factors
del inv_usr_map
del region_emb
del region_set
del region_dict
gc.collect()
# ## City
city_agg = (
data.select(["user_id", "region_name", "city_name", "request_cnt"])
.group_by(["user_id", "region_name", "city_name"])
.aggregate([("request_cnt", "count")])
)
city_agg = city_agg.to_pandas()
city_agg["city_name"] = (
city_agg["region_name"].astype("string")
+ " "
+ city_agg["city_name"].astype("string")
)
city_agg = city_agg.drop("region_name", axis=1)
city_agg
city_agg = pa.Table.from_pandas(city_agg)
city_set = set(city_agg.select(["city_name"]).to_pandas()["city_name"])
print(f"{len(city_set)} cities")
city_dict = {url: idurl for url, idurl in zip(city_set, range(len(city_set)))}
values = np.array(
city_agg.select(["request_cnt_count"]).to_pandas()["request_cnt_count"]
)
rows = np.array(city_agg.select(["user_id"]).to_pandas()["user_id"].map(usr_dict))
cols = np.array(city_agg.select(["city_name"]).to_pandas()["city_name"].map(city_dict))
mat = scipy.sparse.coo_matrix(
(values, (rows, cols)), shape=(rows.max() + 1, cols.max() + 1)
)
als = implicit.approximate_als.FaissAlternatingLeastSquares(
factors=50,
iterations=30,
use_gpu=False,
calculate_training_loss=False,
regularization=0.1,
)
als.fit(mat)
d_factors = als.item_factors
inv_usr_map = {v: k for k, v in usr_dict.items()}
city_emb = pd.DataFrame(d_factors)
city_emb["user_id"] = city_emb.index.map(inv_usr_map)
city_emb.to_csv("city_emb.csv", index=False)
del city_agg
del values
del rows
del cols
del mat
del als
del d_factors
del inv_usr_map
del city_emb
del city_set
del city_dict
gc.collect()
# ## Model
model_agg = (
data.select(["user_id", "cpe_model_name", "request_cnt"])
.group_by(["user_id", "cpe_model_name"])
.aggregate([("request_cnt", "count")])
)
model_set = set(model_agg.select(["cpe_model_name"]).to_pandas()["cpe_model_name"])
print(f"{len(model_set)} cities")
model_dict = {url: idurl for url, idurl in zip(model_set, range(len(model_set)))}
values = np.array(
model_agg.select(["request_cnt_count"]).to_pandas()["request_cnt_count"]
)
rows = np.array(model_agg.select(["user_id"]).to_pandas()["user_id"].map(usr_dict))
cols = np.array(
model_agg.select(["cpe_model_name"]).to_pandas()["cpe_model_name"].map(model_dict)
)
mat = scipy.sparse.coo_matrix(
(values, (rows, cols)), shape=(rows.max() + 1, cols.max() + 1)
)
als = implicit.approximate_als.FaissAlternatingLeastSquares(
factors=50,
iterations=30,
use_gpu=False,
calculate_training_loss=False,
regularization=0.1,
)
als.fit(mat)
u_factors = als.user_factors
d_factors = als.item_factors
inv_usr_map = {v: k for k, v in usr_dict.items()}
model_emb = pd.DataFrame(d_factors)
model_emb["user_id"] = model_emb.index.map(inv_usr_map)
model_emb.to_csv("model_emb.csv", index=False)
del model_agg
del values
del rows
del cols
del mat
del als
del d_factors
del inv_usr_map
del model_emb
del model_set
del model_dict
del usr_set
del usr_dict
gc.collect()
# # Matrix
# ## PartOfDay
data_agg = (
data.select(["user_id", "part_of_day", "request_cnt"])
.group_by(["user_id", "part_of_day"])
.aggregate([("request_cnt", "sum"), ("request_cnt", "count")])
)
usr_set = set(data_agg.select(["user_id"]).to_pandas()["user_id"])
print(f"{len(usr_set)} users")
usr_dict = {usr: user_id for usr, user_id in zip(usr_set, range(len(usr_set)))}
region_set = set(data_agg.select(["part_of_day"]).to_pandas()["part_of_day"])
print(f"{len(region_set)} part_of_days")
region_dict = {url: idurl for url, idurl in zip(region_set, range(len(region_set)))}
values = np.array(
data_agg.select(["request_cnt_count"]).to_pandas()["request_cnt_count"]
)
rows = np.array(data_agg.select(["user_id"]).to_pandas()["user_id"].map(usr_dict))
cols = np.array(
data_agg.select(["part_of_day"]).to_pandas()["part_of_day"].map(region_dict)
)
count_mat = scipy.sparse.coo_matrix(
(values, (rows, cols)), shape=(rows.max() + 1, cols.max() + 1)
).toarray()
usr_set = set(data_agg.select(["user_id"]).to_pandas()["user_id"])
print(f"{len(usr_set)} users")
usr_dict = {usr: user_id for usr, user_id in zip(usr_set, range(len(usr_set)))}
region_set = set(data_agg.select(["part_of_day"]).to_pandas()["part_of_day"])
print(f"{len(region_set)} part_of_days")
region_dict = {url: idurl for url, idurl in zip(region_set, range(len(region_set)))}
values = np.array(data_agg.select(["request_cnt_sum"]).to_pandas()["request_cnt_sum"])
rows = np.array(data_agg.select(["user_id"]).to_pandas()["user_id"].map(usr_dict))
cols = np.array(
data_agg.select(["part_of_day"]).to_pandas()["part_of_day"].map(region_dict)
)
sum_mat = scipy.sparse.coo_matrix(
(values, (rows, cols)), shape=(rows.max() + 1, cols.max() + 1)
).toarray()
inv_usr_map = {v: k for k, v in usr_dict.items()}
count_mat = pd.DataFrame(count_mat)
count_mat["user_id"] = count_mat.index.map(inv_usr_map)
sum_mat = pd.DataFrame(sum_mat)
sum_mat["user_id"] = sum_mat.index.map(inv_usr_map)
count_mat = count_mat.merge(
sum_mat, on="user_id", how="inner", suffixes=("count", "sum")
)
count_mat.to_csv("part_of_day.csv", index=False)
del data_agg
del values
del rows
del cols
del sum_mat
del count_mat
del inv_usr_map
del region_set
del region_dict
del usr_set
del usr_dict
gc.collect()
# # AGG
# ### date
date_agg = (
data.select(["user_id", "date", "request_cnt"])
.group_by(["user_id", "date"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("date", "count"),
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
date_agg.to_pandas().to_csv("date_agg.csv", index=False)
del date_agg
gc.collect()
# ### url_host
url_agg = (
data.select(["user_id", "url_host", "request_cnt"])
.group_by(["user_id", "url_host"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
url_agg.to_pandas().to_csv("url_agg.csv", index=False)
del url_agg
gc.collect()
# ### region
region_agg = (
data.select(["user_id", "region_name", "request_cnt"])
.group_by(["user_id", "region_name"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
region_agg.to_pandas().to_csv("region_agg.csv", index=False)
del region_agg
gc.collect()
# ### city
city_agg = (
data.select(["user_id", "region_name", "city_name", "request_cnt"])
.group_by(["user_id", "region_name", "city_name"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
city_agg.to_pandas().to_csv("city_agg.csv", index=False)
del city_agg
gc.collect()
# ### model
model_agg = (
data.select(["user_id", "cpe_manufacturer_name", "cpe_model_name", "request_cnt"])
.group_by(["user_id", "cpe_manufacturer_name", "cpe_model_name"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
model_agg.to_pandas().to_csv("model_agg.csv", index=False)
del model_agg
gc.collect()
# ### manufacturer
manuf_agg = (
data.select(["user_id", "cpe_manufacturer_name", "request_cnt"])
.group_by(["user_id", "cpe_manufacturer_name"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
manuf_agg.to_pandas().to_csv("manuf_agg.csv", index=False)
del manuf_agg
gc.collect()
# ### cpe_type
cpe_agg = (
data.select(["user_id", "cpe_type_cd", "request_cnt"])
.group_by(["user_id", "cpe_type_cd"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
cpe_agg.to_pandas().to_csv("cpe_agg.csv", index=False)
del cpe_agg
gc.collect()
# ### os_type
os_agg = (
data.select(["user_id", "cpe_model_os_type", "request_cnt"])
.group_by(["user_id", "cpe_model_os_type"])
.aggregate([("request_cnt", "count"), ("request_cnt", "sum")])
.group_by(["user_id"])
.aggregate(
[
("request_cnt_count", "mean"),
("request_cnt_count", "approximate_median"),
("request_cnt_count", "min"),
("request_cnt_count", "max"),
("request_cnt_count", "stddev"),
("request_cnt_sum", "mean"),
("request_cnt_sum", "approximate_median"),
("request_cnt_sum", "min"),
("request_cnt_sum", "max"),
("request_cnt_sum", "stddev"),
]
)
)
os_agg.to_pandas().to_csv("os_agg.csv", index=False)
del os_agg
gc.collect()
|
# ### Introduction
# - Making plots and static or interactive visualizations is one of the most important tasks in data analysis. It may be a part of the exploratory process; for example, helping identify outliers, needed data transformations, or coming up with ideas for models.
# - Matplotlib is the most extensively used library of python for data visualization due to it's high flexibility and extensive functionality that it provides.
# ### Table of Contents
# 1. Setting up
# - Importing matplotlib
# - Matplotlib for Jupyter notebook
# - Dataset
# - Documentation
# 2. Matplotlib basics
# - Make a simple plot
# - Labels, and Legends
# - Size, Colors, Markers, and Line Styles
# - Figures and subplots
# 3. Line Chart
# 4. Bar Chart
# 5. Histogram
# 6. Box plot
# 7. Violin plot
# 8. Scatter plot
# 9. Bubble plot
# ### 1. Setting up
# #### Importing matplotlib
# Just as we use the `np` shorthand for NumPy and the `pd` shorthand for Pandas, we will use standard shorthands for Matplotlib import:
# ```python
# import matplotlib.pyplot as plt
# ```
# We import the **pyplot** interface of matplotlib with a shorthand of `plt` and we will be using it like this in the entire notebook.
# #### Matplotlib for Jupyter notebook
# You can directly use matplotlib with this notebook to create different visualizations in the notebook itself. In order to do that, the following command is used:
# ```python
# %matplotlib inline
# ```
# #### Documentation
# All the functions covered in this notebook and their detail description can be found in the [official matplotlib documentation](https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.html).
# importing required libraries
import numpy as np
import pandas as pd
# importing matplotlib
import matplotlib.pyplot as plt
# display plots in the notebook itself
# ### 2. Matplotlib basics
# #### Make a simple plot
# Let's create a basic plot to start working with!
height = [150, 160, 165, 185]
weight = [70, 80, 90, 100]
# draw the plot
plt.plot(height, weight)
# We pass two arrays as our input arguments to **plot()** method and invoke the required plot. Here note that the first array appears on the x-axis and second array appears on the y-axis of the plot.
# #### Title, Labels, and Legends
# - Now that our first plot is ready, let us add the title, and name x-axis and y-axis using methods title(), xlabel() and ylabel() respectively.
#
# draw the plot
plt.plot(height, weight)
# add title
plt.title("Relationship between height and weight")
# label x axis
plt.xlabel("Height")
# label y axis
plt.ylabel("Weight")
calories_burnt = [65, 75, 95, 99]
# draw the plot for calories burnt
plt.plot(calories_burnt)
# draw the plot for weight
plt.plot(weight)
# - Adding **legends** is also simple in matplotlib, you can use the `legend()` which takes **labels** and **loc** as label names and location of legend in the figure as paremeters.
# draw the plot for calories burnt
plt.plot(calories_burnt)
# draw the plot for weight
plt.plot(weight)
# add legend in the lower right part of the figure
plt.legend(labels=["Calories Burnt", "Weight"], loc="lower right")
# - Notice that in the previous plot, we are not able to understand that each of these values belong to different persons.
# - Look at the X axis, can we add labels to show that each belong to different persons?
# - The labeled values on any axis is known as a **tick**.
# - You can use the `xticks` to change both the location of each tick and it's label. Let's see this in an example
# draw the plot
plt.plot(calories_burnt)
plt.plot(weight)
# add legend in the lower right part of the figure
plt.legend(labels=["Calories Burnt", "Weight"], loc="lower right")
# set labels for each of these persons
plt.xticks(ticks=[0, 1, 2, 3], labels=["p1", "p2", "p3", "p4"])
# #### Size, Colors, Markers and Line styles
# - You can also specify the size of the figure using method `figure()` and passing the values as a tuple of the length of rows and columns to the argument figsize.
# - The values of length are considered to be in **inches**.
# figure size in inches
plt.figure(figsize=(15, 5))
# draw the plot
plt.plot(calories_burnt)
plt.plot(weight)
# add legend in the lower right part of the figure
plt.legend(labels=["Calories Burnt", "Weight"], loc="lower right")
# set labels for each of these persons
plt.xticks(ticks=[0, 1, 2, 3], labels=["p1", "p2", "p3", "p4"])
# - With every X and Y argument, you can also pass an optional third argument in the form of a string which indicates the colour and line type of the plot.
# - The default format is `b-` which means a **solid blue line**. In the figure below we use `go` which means **green circles**. Likewise, we can make many such combinations to format our plot.
# draw the plot
plt.plot(calories_burnt)
plt.plot(weight, "y--")
# add legend in the lower right part of the figure
plt.legend(labels=["Calories Burnt", "Weight"], loc="lower right")
# set labels for each of these persons
plt.xticks(ticks=[0, 1, 2, 3], labels=["p1", "p2", "p3", "p4"])
# - We can also plot multiple sets of data by passing in multiple sets of arguments of X and Y axis in the `plot()` method as shown.
# #### Figure and subplots
# - We can use `subplots()` method to add more than one plots in one figure.
# - The `subplots()` method takes two arguments: they are **nrows, ncols**. They indicate the number of rows, number of columns respectively.
# - This method creates two objects: **figure** and **axes** which we store in variables `fig` and `ax`.
# - You plot each figure by specifying its position using row index and column index. Let's have a look at the below example:
# create 2 plots
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(6, 6))
# plot on 0 row and 0 column
ax[0, 0].plot(calories_burnt, "go")
# plot on 0 row and 1 column
ax[0, 1].plot(weight)
# set titles for subplots
ax[0, 0].set_title("Calories Burnt")
ax[0, 1].set_title("Weight")
# set ticks for each of these persons
ax[0, 0].set_xticks(ticks=[0, 1, 2, 3])
ax[0, 1].set_xticks(ticks=[0, 1, 2, 3])
# set labels for each of these persons
ax[0, 0].set_xticklabels(labels=["p1", "p2", "p3", "p4"])
ax[0, 1].set_xticklabels(labels=["p1", "p2", "p3", "p4"])
# - Notice that in the above figure we have two empty plots, that is because we created 4 subplots ( 2 rows and 2 columns).
# - As a data scientist, there will be times when you need to have a common axis for all your subplots. You can do this by using the **sharex** and **sharey** paremeters of `subplot()`.
# create 2 plots
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(6, 6), sharex=True, sharey=True)
# plot on 0 row and 0 column
ax[0].plot(calories_burnt, "go")
# plot on 0 row and 1 column
ax[1].plot(weight)
# set titles for subplots
ax[0].set_title("Calories Burnt")
ax[1].set_title("Weight")
# set ticks for each of these persons
ax[0].set_xticks(ticks=[0, 1, 2, 3])
ax[1].set_xticks(ticks=[0, 1, 2, 3])
# set labels for each of these persons
ax[0].set_xticklabels(labels=["p1", "p2", "p3", "p4"])
ax[1].set_xticklabels(labels=["p1", "p2", "p3", "p4"])
# - Notice in the above plot, now both x and y axes are only labelled once for each of the outer plots. This is because the inner plots "share" both the axes.
# - Also, there are only **two plots** since we decreased the number of rows to 1 and columns to 2 in the `subplot()`.
# - You can learn more about [subplots here](https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.subplots.html).
# ### Load dataset
# Let's load a dataset and have a look at first 5 rows.
# read the dataset
data_BM = pd.read_csv("../input/big-mart-sales/train_v9rqX0R.csv")
# drop the null values
data_BM = data_BM.dropna(how="any")
# view the top results
data_BM.head()
# ### 3. Line Chart
# - We will create a line chart to denote the **mean price per item**. Let's have a look at the code.
# - With some datasets, you may want to understand changes in one variable as a function of time, or a similarly continuous variable.
# - In matplotlib, **line chart** is the default plot when using the `plot()`.
price_by_item = data_BM.groupby("Item_Type").Item_MRP.mean()[:10]
price_by_item
# mean price based on item type
price_by_item = data_BM.groupby("Item_Type").Item_MRP.mean()[:10]
x = price_by_item.index.tolist()
y = price_by_item.values.tolist()
# set figure size
plt.figure(figsize=(14, 8))
# set title
plt.title("Mean price for each item type")
# set axis labels
plt.xlabel("Item Type")
plt.ylabel("Mean Price")
# set xticks
plt.xticks(labels=x, ticks=np.arange(len(x)))
plt.plot(x, y)
# ### 4. Bar Chart
# - Suppose we want to have a look at **what is the mean sales for each outlet type?**
# - A bar chart is another simple type of visualization that is used for categorical variables.
# - You can use `plt.bar()` instead of `plt.plot()` to create a bar chart.
#
# sales by outlet size
sales_by_outlet_size = data_BM.groupby("Outlet_Size").Item_Outlet_Sales.mean()
# sort by sales
sales_by_outlet_size.sort_values(inplace=True)
x = sales_by_outlet_size.index.tolist()
y = sales_by_outlet_size.values.tolist()
# set axis labels
plt.xlabel("Outlet Size")
plt.ylabel("Sales")
# set title
plt.title("Mean sales for each outlet type")
# set xticks
plt.xticks(labels=x, ticks=np.arange(len(x)))
plt.bar(x, y, color=["red", "orange", "magenta"])
# ### 5. Histogram
# - **Distribution of Item price**
# - Histograms are a very common type of plots when we are looking at data like height and weight, stock prices, waiting time for a customer, etc which are continuous in nature.
# - Histogram’s data is plotted within a range against its frequency.
# - Histograms are very commonly occurring graphs in probability and statistics and form the basis for various distributions like the normal -distribution, t-distribution, etc.
# - You can use `plt.hist()` to draw a histogram. It provides many parameters to adjust the plot, you can [explore more here](https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.hist.html).
# title
plt.title("Item MRP (price) distribution")
# xlabel
plt.xlabel("Item_MRP")
# ylabel
plt.ylabel("Frequency")
# plot histogram
plt.hist(data_BM["Item_MRP"], bins=20, color="lightblue")
# ### 6. Box Plots
# - **Distribution of sales**
# - Box plot shows the three quartile values of the distribution along with extreme values.
# - The “whiskers” extend to points that lie within 1.5 IQRs of the lower and upper quartile, and then observations that fall outside this range are displayed independently.
# - This means that each value in the boxplot corresponds to an actual observation in the data.
# - Let's try to visualize the distributio of Item_Outlet_Sales of items.
data = data_BM[["Item_Outlet_Sales"]]
# create outlier point shape
red_diamond = dict(markerfacecolor="r", marker="D")
# set title
plt.title("Item Sales distribution")
# make the boxplot
plt.boxplot(data.values, labels=["Item Sales"], flierprops=red_diamond)
# - You can also create multiple boxplots for different columns of your dataset.
# - In order to plot multiple boxplots, you can use the same `subplots()` that we saw earlier.
# - Let's see Item_Weight, Item_MRP distribution together
data = data_BM[["Item_Weight", "Item_MRP"]]
# create outlier point shape
red_diamond = dict(markerfacecolor="r", marker="D")
# generate subplots
fig, ax = plt.subplots()
# make the boxplot
plt.boxplot(
data.values, labels=["Item Weight", "Item MRP (price)"], flierprops=red_diamond
)
# ### 7. Violin Plots
# - **Density distribution of Item weights and Item price**
data = data_BM[["Item_Weight", "Item_MRP"]]
# generate subplots
fig, ax = plt.subplots()
# add labels to x axis
plt.xticks(ticks=[1, 2], labels=["Item Weight", "Item MRP"])
# make the violinplot
plt.violinplot(data.values)
# ### 8. Scatter Plots
# - **Relative distribution of item weight and it's visibility**
# - It depicts the distribution of two variables using a cloud of points, where each point represents an observation in the dataset.
# - This depiction allows the eye to infer a substantial amount of information about whether there is any meaningful relationship between them.
# **NOTE : Here, we are going to use only a subset of the data for the plots.**
# set label of axes
plt.xlabel("Item_Weight")
plt.ylabel("Item_Visibility")
# plot
plt.scatter(data_BM["Item_Weight"][:200], data_BM["Item_Visibility"][:200])
# ### 9. Bubble Plots
# - **Relative distribution of sales, item price and item visibility**
# - Let's make a scatter plot of Item_Outlet_Sales and Item_MRP and make the **size** of bubbles by the column Item_Visibility.
# - Bubble plots let you understand the interdependent relations among 3 variables.
# **Note that we are only using a subset of data for the plots.**
# set label of axes
plt.xlabel("Item_MRP")
plt.ylabel("Item_Outlet_Sales")
# set title
plt.title("Item Outlet Sales vs Item MRP (price)")
# plot
plt.scatter(
data_BM["Item_MRP"][:100],
data_BM["Item_Outlet_Sales"][:100],
s=data_BM["Item_Visibility"][:100] * 1000,
c="red",
)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Import pliku do dataframe i wyświetlenie pliku
file_path = "/kaggle/input/bitcoin-historical-data/bitstampUSD_1-min_data_2012-01-01_to_2021-03-31.csv"
df = pd.read_csv(file_path)
df.head()
# Konwersja timestampu na datetime, ustawienie go jako index i resampling z sekundy na dzień
df["Timestamp"] = pd.to_datetime(df["Timestamp"], unit="s")
df.set_index("Timestamp", inplace=True)
daily_data = df.resample("D").mean()
daily_data.head()
# Sprawdzam czy są brakujące dane
daily_data.isna().sum()
# Uzupełniam brakujące dane
daily_data.fillna(method="ffill", inplace=True)
daily_data.isna().sum()
# Wykres ceny BTC w latach 2012-2021
plt.figure(figsize=(12, 6))
plt.plot(daily_data["Weighted_Price"])
plt.title("Dzienna ważona średnia cena Bitcoina w latach 2012-2021")
plt.xlabel("Rok")
plt.ylabel("Cena [$]")
plt.show()
# Liczę i wyświetlam dzienne zwroty
daily_data["Returns"] = daily_data["Weighted_Price"].pct_change() * 100
plt.figure(figsize=(12, 6))
plt.plot(daily_data["Returns"])
plt.title("Dzienne zwroty Bitcoina w latach 2012-2021")
plt.xlabel("Rok")
plt.ylabel("Zwrot [%]")
plt.show()
# Liczę ruchomą średnią 30-dniową zmienności ceny
daily_data["Volatility"] = (daily_data["Returns"]).rolling(window=30).std()
plt.figure(figsize=(12, 6))
plt.plot(daily_data["Volatility"])
plt.title("30-dniowa zmienność ceny Bitcoina w latach 2012-2021")
plt.xlabel("Rok")
plt.ylabel("Zmienność [%]")
plt.show()
# Liczę średnie ruchome ceny
daily_data["30_day_MA"] = daily_data["Weighted_Price"].rolling(window=30).mean()
daily_data["90_day_MA"] = daily_data["Weighted_Price"].rolling(window=90).mean()
daily_data["180_day_MA"] = daily_data["Weighted_Price"].rolling(window=180).mean()
plt.figure(figsize=(12, 6))
plt.plot(daily_data["Weighted_Price"], label="średnia ważona dzienna cena")
plt.plot(daily_data["30_day_MA"], label="30-dniowa średnia krocząca")
plt.plot(daily_data["90_day_MA"], label="90-dniowa średnia krocząca")
plt.plot(daily_data["180_day_MA"], label="180-dniowa średnia krocząca")
plt.title("Średnie ruchome ceny Bitcoina w latach 2012-2021")
plt.xlabel("Rok")
plt.ylabel("Cena [$]")
plt.legend()
plt.show()
# Liczę łączny zysk
daily_data["Cumulative_Returns"] = (1 + (daily_data["Returns"] / 100)).cumprod()
plt.figure(figsize=(12, 6))
plt.plot(daily_data["Cumulative_Returns"])
plt.title("Łączny zysk w latach 2012-2021")
plt.xlabel("Rok")
plt.ylabel("Łączny zysk [$]")
plt.show()
# Liczę korelację między dziennym zwrotem a wolumenem
correlation = daily_data["Returns"].corr(daily_data["Volume_(Currency)"])
print(f"Correlation between daily returns and daily trading volume: {correlation:.4f}")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from icrawler.builtin import BingImageCrawler, BaiduImageCrawler
from pathlib import Path
from tqdm import tqdm as tqdm
tobe_Filtered = True
licenses = [
"creativecommons",
"publicdomain",
"noncommercial",
"commercial",
"noncommercial,modify",
"commercial,modify",
]
howmany = 1000000
min_image_size = (128, 128)
ActorName = "Yogi Babu" # str(input("Enter Actor's Name:"))
Images_SavePath = "/kaggle/working/" + ActorName + "_BingCrawl"
if not os.path.exists(Images_SavePath):
os.makedirs(Images_SavePath)
for i in tqdm(licenses):
Bing_filters = dict(
type="photo", license=i
) # either photo, face, clipart, linedrawing, animated
crawler = BingImageCrawler(
parser_threads=1, downloader_threads=2, storage={"root_dir": Images_SavePath}
)
if tobe_Filtered == True:
crawler.crawl(
keyword=ActorName,
filters=Bing_filters,
max_num=howmany,
min_size=min_image_size,
)
else:
crawler.crawl(keyword=ActorName, max_num=howmany, min_size=min_image_size)
count = 0
for root_dir, cur_dir, files in os.walk(r"/kaggle/working/Yogi Babu_BingCrawl"):
count += len(files)
print("File count:", count)
|
# python3 -m pip install -U pip
# python3 -m pip install -U setuptools wheel
# Here we assume CUDA 10.1 is installed. You should change the number
# according to your own CUDA version (e.g. mxnet_cu100 for CUDA 10.0).
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from autogluon.tabular import TabularDataset, TabularPredictor
train_data = TabularDataset("../input/exhibit-art/dataset/train.csv")
subsample_size = 500 # subsample subset of data for faster demo, try setting this to much larger values
train_data = train_data.sample(n=subsample_size, random_state=0)
train_data.head()
label = "Cost"
print("Summary of class variable: \n", train_data[label].describe())
save_path = "./agModels-predictClass" # specifies folder to store trained models
predictor = TabularPredictor(label=label, path=save_path).fit(
train_data, presets="best_quality"
)
results = predictor.fit_summary()
results
test_data = TabularDataset("../input/exhibit-art/dataset/test.csv")
customer_ids = test_data["Customer Id"]
y_pred = predictor.predict(test_data)
submission = pd.read_csv("../input/exhibit-art/dataset/sample_submission.csv")
submission["Cost"] = y_pred
submission["Customer Id"] = customer_ids
submission.head()
submission.to_csv(directory + "my_submission.csv", index=False)
|
# ## S0: Introduction
# Fashion MNIST Dataset is a 'drop-in' replacement of the MNIST Digits dataset. It has 70,000 grayscale images of fashion clothing.
# Each dataset row is a grayscale image of 28x28 pixels, associated with 10 label classes.
# It is more diverse than the original digits dataset, with respect to complexity and diversity, making it significantly challenging!
# 
# Source: https://b2524211.smushcdn.com/2524211/wp-content/uploads/2019/02/fashion_mnist_obtaining.jpg?lossy=1&strip=1&webp=1
# We'll use Keras framework to make the Artificial Neural Network/ Multilayer Perceptrons to train on this dataset, using the Sequential API to build the NN. It is the simplest kind of model for neural networks which have a single stack of layers connected in a sequential manner.
# ## S1: Importing the dataset and TensorFlow library
import tensorflow as tf
import matplotlib.pyplot as plt
fashion = tf.keras.datasets.fashion_mnist.load_data() # Dataset loaded
(X, y), (X_test, y_test) = fashion
X_train, y_train = X[:55000], y[:55000]
X_train
# Validation datasets
X_val, y_val = X[55000:], y[55000:]
print(X_train.shape)
print(X_val.shape)
print(X_test.shape)
X_train.dtype
# Visualizations of the images present in the dataset
for i in range(1, 10):
plt.subplot(3, 3, i)
plt.imshow(X_train[i], cmap=plt.get_cmap("gray"))
# ## S2: Normalization of image pixel values
# When we pass image to a DNN, the computation of numeric values, especially having higher values becomes quite complex. Hence, to make computation easier, we normalize the pixellation values between 0 and 1.
#
X_train, X_val, X_test = X_train / 255.0, X_val / 255.0, X_test / 255.0
X_train
classes = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
no = int(input("Enter number:"))
plt.title(classes[y_train[no]])
plt.imshow(X_train[no], cmap=plt.get_cmap("gray"))
# ## S3: Building the Neural Network
nn_model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=[28, 28]),
tf.keras.layers.Dense(300, activation="relu"),
tf.keras.layers.Dense(100, activation="relu"),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
tf.keras.utils.plot_model(
nn_model,
to_file="neural_network_model_fashion_AbhiMango.jpg",
show_layer_names=True,
dpi=96,
layer_range=None,
show_layer_activations=True,
)
nn_model.summary()
# The first layer is a Flatten layer, which takes the 2D input image with dimensions 28 x 28 and flattens it into a 1D array of 784 values.
# The next two layers are Dense layers, which are fully connected layers that perform mathematical operations on their inputs. The first dense layer has 300 neurons, and the second has 100 neurons. Both layers use the rectified linear unit (ReLU) activation function, which sets negative inputs to 0 and passes positive inputs through unchanged.
# The final layer is another Dense layer with 10 neurons, each of which corresponds to one of the 10 classes in the Fashion MNIST dataset (such as T-shirt, dress, or sneaker). The activation function used for this layer is the softmax function, which normalizes the output so that it represents a probability distribution over the 10 classes.
nn_model.layers
weights, biases = nn_model.layers[1].get_weights()
# weights #Random weight initializations
biases
# Compilation to specify the loss functions and optimizers
nn_model.compile(
loss="sparse_categorical_crossentropy", optimizer="sgd", metrics=["accuracy"]
)
# **Loss function:** 'sparse_categorical_crossentropy' loss, which is appropriate for multi-class classification problems where the labels are integers (as opposed to one-hot encoded vectors).
# **Optimizer:** Stochastic Gradient Descent (SGD) is a simple and widely used optimization algorithm that updates the model's parameters based on the gradient of the loss function with respect to those parameters.
# **Metrics (extra):** Accuracy, which calculates the proportion of correctly classified images in the training or validation dataset.
# ## S4: Training and evaluating the model
nn_history = nn_model.fit(X_train, y_train, epochs=50, validation_data=(X_val, y_val))
# ## S5: Learning Curves
import pandas as pd
import matplotlib.pyplot as plt
pd.DataFrame(nn_history.history).plot(
figsize=(12, 10),
xlim=[0, 50],
ylim=[0, 1],
grid=True,
xlabel="Epoch",
style=["g--", "r--.", "b-", "b-*"],
)
# ## S6: Testing Time!
nn_model.evaluate(X_test, y_test)
# ## S7: Predictions:
X_pred = X_test[545:563]
y_prob = nn_model.predict(X_pred)
y_prob.round(2)
import numpy as np
y_pred = y_prob.argmax(axis=-1)
y_pred
np.array(classes)[y_pred]
plt.title(classes[y_test[546]])
plt.imshow(X_test[546], cmap=plt.get_cmap("gray"))
plt.title(classes[y_test[555]])
plt.imshow(X_test[555], cmap=plt.get_cmap("gray"))
# ## S8: Saving the model
nn_model.save("Mango_Fashion_MNIST_model", save_format="tf")
|
# # Import Libraries
import pandas as pd
from mlxtend.frequent_patterns import apriori
from mlxtend.frequent_patterns import association_rules
# # Import Dataset
df = pd.read_excel("/kaggle/input/online-retail/online_retail_II.xlsx")
df.head()
basket = (
df[df["Country"] == "France"]
.groupby(["InvoiceNo", "Description"])["Quantity"]
.sum()
.unstack()
.reset_index()
.fillna(0)
.set_index("InvoiceNo")
)
basket
# # Encode the Columns
def encode_units(x):
if x <= 0:
return 0
if x >= 1:
return 1
basket_sets = basket.applymap(encode_units)
basket_sets.drop("POSTAGE", inplace=True, axis=1)
basket_sets
# # Apply Apriori Algorithms
frequent_itemsets = apriori(basket_sets, min_support=0.07, use_colnames=True)
rules = association_rules(frequent_itemsets, metric="lift", min_threshold=1)
rules.head()
|
import os
import json
from PIL import Image
from typing import Dict
import random
import cv2
import pandas as pd
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import matplotlib.patches as mpatch
from rapidfuzz.distance.Levenshtein import distance as levenshtein
class CFG:
train_img_path: str = "/kaggle/input/benetech-making-graphs-accessible/train/images"
train_ann_path: str = (
"/kaggle/input/benetech-making-graphs-accessible/train/annotations"
)
test_img_path: str = "/kaggle/input/benetech-making-graphs-accessible/test/images"
class color:
PURPLE = "\033[95m"
CYAN = "\033[96m"
DARKCYAN = "\033[36m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
END = "\033[0m"
def get_annotations():
annotations_path = (
"/kaggle/input/benetech-making-graphs-accessible/train/annotations"
)
annotations = []
for annotation_path in os.listdir(CFG.train_ann_path):
with open(f"{CFG.train_ann_path}/{annotation_path}") as annotation_f:
annotations.append(json.load(annotation_f))
return annotations
def get_chart_type_counts(annotations):
chart_type_counts = {
"dot": 0,
"line": 0,
"scatter": 0,
"vertical_bar": 0,
"horizontal_bar": 0,
}
for annotation in annotations:
chart_type_counts[annotation["chart-type"]] += 1
return chart_type_counts
def load_annotation(name: str) -> Dict:
with open(f"{CFG.train_ann_path}/{name}.json") as annotation_f:
ann_example = json.load(annotation_f)
return ann_example
def get_coords(polygon, img_height):
xs = [polygon["x0"], polygon["x1"], polygon["x2"], polygon["x3"], polygon["x0"]]
ys = [
-polygon["y0"] + img_height,
-polygon["y1"] + img_height,
-polygon["y2"] + img_height,
-polygon["y3"] + img_height,
-polygon["y0"] + img_height,
]
return xs, ys
def add_line_breaks(text: str, break_num: int = 7) -> str:
words = text.split()
new_text = ""
for i, word in enumerate(words, start=1):
new_text += word
if i % break_num == 0:
new_text += "<br>"
else:
new_text += " "
return new_text
def get_tick_value(name, data_series):
for el in data_series:
if el["x"] == name:
return el["y"]
elif el["y"] == name:
return el["x"]
def plot_annotated_image(name: str, scale_factor: int = 1.0) -> None:
img_example = Image.open(f"{CFG.train_img_path}/{name}.jpg")
ann_example = load_annotation(name)
# create figure
fig = go.Figure()
# constants
img_width = img_example.size[0]
img_height = img_example.size[1]
# add invisible scatter trace
fig.add_trace(
go.Scatter(
x=[0, img_width], y=[0, img_height], mode="markers", marker_opacity=0
)
)
# configure axes
fig.update_xaxes(visible=False, range=[0, img_width])
fig.update_yaxes(
visible=False,
range=[0, img_height],
# the scaleanchor attribute ensures that the aspect ratio stays constant
scaleanchor="x",
)
# add image
fig.add_layout_image(
dict(
x=0,
sizex=img_width,
y=img_height,
sizey=img_height,
xref="x",
yref="y",
opacity=1.0,
layer="below",
sizing="stretch",
source=img_example,
)
)
# add bounding box
fig.add_shape(
type="rect",
x0=ann_example["plot-bb"]["x0"],
y0=-ann_example["plot-bb"]["y0"] + img_height,
x1=ann_example["plot-bb"]["x0"] + ann_example["plot-bb"]["width"],
y1=-(ann_example["plot-bb"]["y0"] + ann_example["plot-bb"]["height"])
+ img_height,
line=dict(color="RoyalBlue"),
)
# add polygons
for text in ann_example["text"]:
name = text["text"]
if text["role"] == "tick_label":
tick_value = get_tick_value(name, ann_example["data-series"])
if tick_value:
name = f"Text: {name}<br>Value: {tick_value}"
xs, ys = get_coords(text["polygon"], img_height)
fig.add_trace(
go.Scatter(
x=xs,
y=ys,
fill="toself",
name=add_line_breaks(name),
hovertemplate="%{name}",
mode="lines",
)
)
# add x-axis dots
xs = [dot["tick_pt"]["x"] for dot in ann_example["axes"]["x-axis"]["ticks"]]
ys = [
-dot["tick_pt"]["y"] + img_height
for dot in ann_example["axes"]["x-axis"]["ticks"]
]
fig.add_trace(go.Scatter(x=xs, y=ys, mode="markers", name="x-axis"))
# add y-axis dots
xs = [dot["tick_pt"]["x"] for dot in ann_example["axes"]["y-axis"]["ticks"]]
ys = [
-dot["tick_pt"]["y"] + img_height
for dot in ann_example["axes"]["y-axis"]["ticks"]
]
fig.add_trace(go.Scatter(x=xs, y=ys, mode="markers", name="y-axis"))
# configure other layout
fig.update_layout(
width=img_width * scale_factor,
height=img_height * scale_factor,
margin={"l": 0, "r": 0, "t": 0, "b": 0},
showlegend=False,
)
# disable the autosize on double click because it adds unwanted margins around the image
# and finally show figure
fig.show(config={"doubleClick": "reset"})
annotations = get_annotations()
annotation_example = load_annotation("0000ae6cbdb1")
# remove some data to make output it more readable
annotation_example["text"] = [annotation_example["text"][0]]
annotation_example["axes"]["x-axis"]["ticks"] = [
annotation_example["axes"]["x-axis"]["ticks"][0]
]
annotation_example["axes"]["y-axis"]["ticks"] = [
annotation_example["axes"]["y-axis"]["ticks"][0]
]
annotation_example["data-series"] = [annotation_example["data-series"][0]]
print(json.dumps(annotation_example, indent=2))
chart_type_counts = get_chart_type_counts(annotations)
fig = px.pie(values=chart_type_counts.values(), names=chart_type_counts.keys())
fig.update_traces(textposition="inside", textfont_size=14)
fig.update_layout(
title={
"text": "Pie distribution of chart-type label",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
}
)
fig.show()
Image.open(
"/kaggle/input/benetech-making-graphs-accessible/train/images/0000ae6cbdb1.jpg"
)
vb_example_name = "0000ae6cbdb1"
Image.open(
f"/kaggle/input/benetech-making-graphs-accessible/train/images/{vb_example_name}.jpg"
)
plot_annotated_image(vb_example_name, scale_factor=1.5)
hb_example_name = "8b6935f7ef04"
Image.open(
f"/kaggle/input/benetech-making-graphs-accessible/train/images/{hb_example_name}.jpg"
)
plot_annotated_image(hb_example_name, scale_factor=1.5)
h_example_name = "00cee4e08d80"
Image.open(
f"/kaggle/input/benetech-making-graphs-accessible/train/images/{h_example_name}.jpg"
)
plot_annotated_image(h_example_name, scale_factor=1.5)
cd_example_name = "000917f5d829"
Image.open(
f"/kaggle/input/benetech-making-graphs-accessible/train/images/{cd_example_name}.jpg"
)
plot_annotated_image(cd_example_name, scale_factor=1.3)
nd_example_name = "000944919c5c"
Image.open(
f"/kaggle/input/benetech-making-graphs-accessible/train/images/{nd_example_name}.jpg"
)
plot_annotated_image(nd_example_name, scale_factor=1.3)
l_example_name = "0005413054c9"
Image.open(
f"/kaggle/input/benetech-making-graphs-accessible/train/images/{l_example_name}.jpg"
)
plot_annotated_image(l_example_name, scale_factor=1.3)
s_example_name = "0005e64fdc6e"
Image.open(
f"/kaggle/input/benetech-making-graphs-accessible/train/images/{s_example_name}.jpg"
)
plot_annotated_image(s_example_name, scale_facotr=1.3)
load_annotation(s_example_name)["data-series"]
def sigmoid(x):
return 2 - (2 / (1 + np.exp(-x)))
x = np.linspace(0, 10, 1000)
fig = px.line(x=x, y=sigmoid(x))
fig.update_layout(
title={
"text": "Sigmoid transformation mapping plot",
"y": 0.98,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
margin=dict(l=0, r=0, t=35, b=0),
)
fig.show()
def rmse(y_true, y_pred):
return np.sqrt(np.mean(np.square(np.subtract(y_true, y_pred))))
def normalized_rmse(y_true, y_pred):
return sigmoid(rmse(y_true, y_pred) / rmse(y_true, np.mean(y_true)))
def normalized_levenshtein_score(y_true, y_pred):
total_distance = np.sum([levenshtein(yt, yp) for yt, yp in zip(y_true, y_pred)])
length_sum = np.sum([len(yt) for yt in y_true])
return sigmoid(total_distance / length_sum)
def score_series(y_true, y_pred):
if len(y_true) != len(y_pred):
return 0.0
if isinstance(y_true[0], str):
return normalized_levenshtein_score(y_true, y_pred)
else:
return normalized_rmse(y_true, y_pred)
def benetech_score(ground_truth: pd.DataFrame, predictions: pd.DataFrame) -> float:
"""Evaluate predictions using the metric from the Benetech - Making Graphs Accessible.
Parameters
----------
ground_truth: pd.DataFrame
Has columns `[data_series, chart_type]` and an index `id`. Values in `data_series`
should be either arrays of floats or arrays of strings.
predictions: pd.DataFrame
"""
if not ground_truth.index.equals(predictions.index):
raise ValueError(
"Must have exactly one prediction for each ground-truth instance."
)
if not ground_truth.columns.equals(predictions.columns):
raise ValueError(f"Predictions must have columns: {ground_truth.columns}.")
pairs = zip(
ground_truth.itertuples(index=False), predictions.itertuples(index=False)
)
scores = []
for (gt_series, gt_type), (pred_series, pred_type) in pairs:
if gt_type != pred_type: # Check chart_type condition
scores.append(0.0)
else: # Score with RMSE or Levenshtein as appropriate
scores.append(score_series(gt_series, pred_series))
return np.mean(scores)
test_img_1 = "000b92c3b098.jpg"
test_img_2 = "00dcf883a459.jpg"
fig, axs = plt.subplots(1, 2, figsize=(10, 30), dpi=350)
axs[0].axis("off")
axs[0].text(
0.5,
1.08,
test_img_1,
fontweight="bold",
fontsize=18,
transform=axs[0].transAxes,
horizontalalignment="center",
)
axs[0].imshow(Image.open(f"{CFG.test_img_path}/{test_img_1}"), cmap="gray")
axs[1].axis("off")
axs[1].text(
0.5,
1.08,
test_img_2,
fontweight="bold",
fontsize=18,
transform=axs[1].transAxes,
horizontalalignment="center",
)
axs[1].imshow(Image.open(f"{CFG.test_img_path}/{test_img_2}"), cmap="gray")
ground_truth = pd.DataFrame.from_dict(
{
"000b92c3b098_x": ([0, 6, 12, 18, 24], "line"),
"000b92c3b098_y": ([0, -0.8, -1.5, -2.1, -2.8], "line"),
"00dcf883a459_x": (["Group 1", "Group 2"], "vertical_bar"),
"00dcf883a459_y": ([3.6, 8.4], "vertical_bar"),
},
orient="index",
columns=["data_series", "chart_type"],
).rename_axis("id")
ground_truth
predictions = pd.DataFrame.from_dict(
{
"000b92c3b098_x": ([0, 6, 12, 18, 24], "line"),
"000b92c3b098_y": ([0, -0.9, -1.6, -2.2, -2.9], "line"),
"00dcf883a459_x": (["Group 1", "Group 2"], "vertical_bar"),
"00dcf883a459_y": ([3.0, 8.8], "vertical_bar"),
},
orient="index",
columns=["data_series", "chart_type"],
).rename_axis("id")
predictions
test_score = benetech_score(ground_truth, predictions)
print(f"Test Benetech Score: {color.BOLD}{color.CYAN}{test_score}{color.END}")
predictions.to_csv("sample_submission.csv")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
from matplotlib import pyplot as plt
import missingno as msno
from datetime import date
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import (
MinMaxScaler,
LabelEncoder,
StandardScaler,
RobustScaler,
)
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.float_format", lambda x: "%.3f" % x)
pd.set_option("display.width", 500)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/docspot/datasets_228_482_diabetes.csv")
df.head()
# # 1- Data Story
# Step 1: Whole Picture
def check_df(df):
print("*******Shape*******************")
print(df.shape)
print("*******Columns*****************")
print(df.columns)
print("*******Types*******************")
print(df.dtypes)
print("*******Head*********************")
print(df.head())
print("*******Missing Values********************")
print(df.isnull().sum())
print("*******Describe*****************")
print(df.describe([0, 0.05, 0.95, 0.99, 1]).T)
print("*******Info*****************")
print(df.info())
print("*********DuplicatedRows**************")
print(df.duplicated().sum())
check_df(df)
# # 2- Determine Numerical and Categorical Variables
# Step 2: Grab numerical and categorical columns
def catch_col(df, cat_thr=20, car_thr=20):
# type = Categorical
cat_cols = [col for col in df.columns if df[col].dtypes == "O"]
# Type is numeric but nunique < cat_th that is why they will be considered as Categorical
num_but_cat = [
col
for col in df.columns
if df[col].dtypes != "O" and df[col].nunique() < cat_thr
]
# Type is categorical but nunique is more than car_th, that is why they wont be considered as categorical
cat_but_car = [
col
for col in df.columns
if df[col].dtypes == "O" and df[col].nunique() > car_thr
]
# determine categorical columns
cat_cols = cat_cols + num_but_cat
cat_cols = [col for col in cat_cols if col not in cat_but_car]
cat_cols = [col for col in cat_cols if "Outcome" not in col]
# numerical columns
num_cols = [col for col in df.columns if df[col].dtypes != "O"]
num_cols = [col for col in num_cols if col not in num_but_cat]
print(f"Observations: {df.shape[0]}")
print(f"Variables: {df.shape[1]}")
print(f"cat_cols: {len(cat_cols)}")
print(f"num_cols: {len(num_cols)}")
print(f"cat_but_car: {len(cat_but_car)}")
print(f"num_but_cat: {len(num_but_cat)}")
return cat_cols, num_cols, cat_but_car, num_but_cat
cat_cols, num_cols, cat_but_car, num_but_cat = catch_col(df.iloc[:, 0:8])
# # 3- Numerical and Categorical Variable Analysis
# Step 3: Numerical and categorical variable analysis
cat_cols, num_cols, cat_but_car, num_but_cat = catch_col(df)
# numerical variable analysis
def num_summary(df, num_col, plot=False):
quantiles = [0.05, 0.25, 0.5, 0.75, 0.9, 0.95, 1.00]
print(pd.DataFrame(df[num_col].describe(quantiles).T))
if plot:
df[num_col].hist()
plt.xlabel(num_col)
plt.title(num_col)
plt.show(block=True)
print("****************************")
for col in num_cols:
num_summary(df, col, plot=True)
# categorical variable analysis
def cat_summary(df, cat_col, plot=False):
print(
pd.DataFrame(
{
cat_col: df[cat_col].value_counts(),
"Ratio": 100 * df[cat_col].value_counts() / len(df),
}
)
)
print("*****************************")
if plot:
sns.countplot(data=df, x=df[cat_col])
plt.show(block=True)
for col in cat_cols:
cat_summary(df, col, plot=True)
# # 4- Target Variable Analysis
# Step 4: target variable analysis
# target variable mean based on categorical variable
def target_mean_cat(df, cat_cols, target, plot=False):
print(df.groupby(cat_cols)[target].mean())
if plot:
sns.countplot(data=df, x=df[cat_cols])
plt.show(block=True)
for i in cat_cols:
target_mean_cat(df, i, "Outcome", plot=True)
# grafikte bar ile beraber ortalamaların çizgi grafiği de çizilebiliyor mu
# numerical variable mean based on target variable
def target_vs_numeric(df, num_cols, target):
print(df.pivot_table(num_cols, target, aggfunc="mean"))
for i in num_cols:
target_vs_numeric(df, i, "Outcome")
# # 5- Outlier Analysis
# Step 5: Outlier analysis
# Creating subplot axes
fig, axes = plt.subplots(3, 3)
# Iterating through axes and names
for name, ax in zip(num_cols, axes.flatten()):
sns.boxplot(y=name, data=df, orient="v", ax=ax)
df["Insulin"].plot(kind="box")
plt.show
# outlier function
def outlier_thresholds(df, col_name, q1=0.05, q3=0.95):
quartile1 = df[col_name].quantile(q1)
quartile3 = df[col_name].quantile(q3)
IQR = quartile3 - quartile1
up_limit = quartile3 + 1.5 * IQR
low_limit = quartile3 - 1.5 * IQR
return low_limit, up_limit
def check_outlier(df, col_name):
low_limit, up_limit = outlier_thresholds(df, col_name)
if df[(df[col_name] < low_limit) | (df[col_name] > up_limit)].any(axis=None):
return True
else:
return False
for col in num_cols:
print(f"{col} : {check_outlier(df, col)}")
low_limit, up_limit = outlier_thresholds(df, "Glucose")
df[(df["Glucose"] > up_limit) | (df["Glucose"] < low_limit)]
# replace with thresholds for outlier values
def replace_with_thresholds(df, cols):
low_limit, up_limit = outlier_thresholds(df, cols)
df.loc[df[cols] < low_limit, cols] = low_limit
df.loc[df[cols] > up_limit, cols] = up_limit
for i in num_cols:
if check_outlier(df, i):
replace_with_thresholds(df, i)
# after replacing with thresholds
for col in num_cols:
print(f"{col} : {check_outlier(df, col)}")
# # 5.1 - Local Outlier Factor
# Local Outlier Factor
CLF = LocalOutlierFactor(n_neighbors=20)
CLF.fit_predict(df)
df_scores = CLF.negative_outlier_factor_
Scores = pd.DataFrame(np.sort(df_scores))
Scores.plot(stacked=True, xlim=[0, 20], style=".-")
threshold = np.sort(df_scores)[2]
df[df_scores < threshold]
df.describe([0.01, 0.05, 0.75, 0.90, 0.99]).T
# # 6. Missing Value Analysis
# missing value analysis
df.isnull().sum()
# there is no missing values
def missing_values_table(df, na_name=False):
na_columns = [col for col in df.columns if df[col].isnull().sum() > 0]
n_miss = df[na_columns].isnull().sum()
ratio = df[na_columns].isnull().sum() / df.shape[0] * 100
missing_df = pd.concat(
[n_miss, np.round(ratio, 2)], axis=1, keys=["n_miss", "ratio"]
)
print(missing_df, end="\n")
if na_name:
return na_columns
missing_values_table(df, na_name=True)
def zero_values_table(df, zero_name=False):
zero_columns = [col for col in df.columns if ((df[col] == 0).sum() > 0)]
n_zeros = (df[zero_columns] == 0).sum()
ratio = (df[zero_columns] == 0).sum() / df.shape[0] * 100
zeros_df = pd.concat(
[n_zeros, np.round(ratio, 2)], axis=1, keys=["n_zeros", "ratio"]
)
print(zeros_df, end="\n")
if zero_name:
return zero_columns
zero_values_table(df, zero_name=True)
# corr analysis
f, ax = plt.subplots(figsize=(18, 18))
sns.heatmap(df.corr(), annot=True, linewidths=0.5, fmt=".1f", ax=ax)
plt.show()
# glucose-outcome
# age-pregnancies
# skin thickness-insulin
# bmi-skin thickness
# # 7- Feature Engineering
# Task 2: Feature Engineering
df.isnull().sum()
df.describe().T
# zero columns were filled with "NaN"
zero_columns = [i for i in num_cols if df[i].min() == 0]
df[zero_columns] = df[zero_columns].replace(0, np.nan)
df["Insulin"] = df["Insulin"].fillna(df["Insulin"].mean())
df["SkinThickness"] = df["SkinThickness"].fillna(df["SkinThickness"].mean())
df.describe().T
df.head()
# Adding new features
# age-pregnancies
# skin thickness-insulin
# bmi-skin thickness
df["AgeCategory"] = pd.qcut(df["Age"], 4)
df.groupby("AgeCategory")["Outcome"].mean()
df["PregnancyCategory"] = pd.qcut(df["Pregnancies"], 5)
df.groupby("PregnancyCategory")["Outcome"].mean()
df["HighBMI"] = np.where(df["BMI"] >= 33, 1, 0)
df.head()
# # 8- Encoding
df.dtypes
df.nunique()
binary_cols = [
col
for col in df.columns
if df[col].dtypes not in ["int", "float"] and df[col].nunique() == 2
]
binary_cols
# there are no binary columns, we'll have a look at one hot encoding
# one hot encoding
ohe_cols = [
col
for col in df.columns
if df[col].dtypes not in ["int", "float"] and 10 >= df[col].nunique() > 2
]
ohe_cols
def one_hot_encoder(df, cols, drop_first=True):
df = pd.get_dummies(df, columns=cols, drop_first=drop_first)
return df
one_hot_encoder(df, ohe_cols).head()
for col in cat_cols:
cat_summary(df, col, plot=True)
# we can write Rare Encoder greater than or equal 11 for pregnancies
# rare encoding
def rare_analyser(df, target, cat_cols):
for col in cat_cols:
print(col, ":", len(df[col].value_counts()))
print(
pd.DataFrame(
{
"Count": df[col].value_counts(),
"Ratio": df[col].value_counts() / len(df) * 100,
"Target Mean": df.groupby(col)[target].mean(),
}
),
end="\n\n\n",
)
df.dtypes
df["Pregnancies2"] = df["Pregnancies"].astype("object")
df.dtypes
def rare_analyser(df, target, cat_cols):
for col in cat_cols:
print(col, ":", len(df[col].value_counts()))
print(
pd.DataFrame(
{
"COUNT": df[col].value_counts(),
"RATIO": df[col].value_counts() / len(df),
"TARGET_MEAN": df.groupby(col)[target].mean(),
}
),
end="\n\n\n",
)
def rare_encoder(df, rare_columns, rare_perc):
temp_df = df.copy()
for var in rare_columns:
tmp = temp_df[var].value_counts() / len(temp_df)
rare_labels = tmp[tmp < rare_perc].index
temp_df[var] = np.where(temp_df[var].isin(rare_labels), 999, temp_df[var])
return temp_df
rare_analyser(df, "Outcome", ["Pregnancies2"])
new_df = rare_encoder(df, ["Pregnancies2"], 0.06)
rare_analyser(new_df, "Outcome", ["Pregnancies2"])
# Scaling
df.dtypes
# # 9- Scaling
# Standard Scaler
ss = StandardScaler()
df["Age_Standard_Scaler"] = ss.fit_transform(df[["Age"]])
df.head()
# Robust Scaler
rs = RobustScaler()
df["Age_Robust_Scaler"] = rs.fit_transform(df[["Age"]])
df.head()
df.isnull().sum()
# # 10- Model
# Model
y = df["Outcome"]
x = df[["Glucose", "BMI", "SkinThickness", "Pregnancies"]]
X_train, X_test, y_train, y_test = train_test_split(
x, y, test_size=0.30, random_state=17
)
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier(random_state=46).fit(X_train, y_train)
y_pred = rf_model.predict(X_test)
accuracy_score(y_pred, y_test)
|
# **REQUIRED LIBRARIES**
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import (
train_test_split,
cross_val_score,
StratifiedKFold,
KFold,
GridSearchCV,
)
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import warnings
from yellowbrick.classifier.rocauc import roc_auc
warnings.filterwarnings("ignore")
# from google.colab import files
# upload=files.upload()
file = r"C:\Users\pcd\Desktop\Masters work for 3 guys\Mr Joshua's work\dataset\REMS_Mars_Dataset.csv"
import chardet
with open(file, "rb") as rawdata:
result = chardet.detect(rawdata.read(100000))
result
# DATA IMPORTATION
df = pd.read_csv(file, encoding="ISO-8859-1")
# DATA EXPLORATION
df.head()
df.shape
df.columns
df.info()
df = df.replace("Value not available", np.nan)
df = df.astype(
{
"max_ground_temp(°C)": "float",
"min_ground_temp(°C)": "float",
"max_air_temp(°C)": "float",
"min_air_temp(°C)": "float",
"mean_pressure(Pa)": "float",
}
)
df.info()
df.isnull().sum()
plt.figure(figsize=(20, 10))
df2 = df.iloc[::200, :]
sns.lineplot(data=df2, x="sol_number", y="max_ground_temp(°C)")
plt.figure(figsize=(20, 10))
df2 = df.iloc[::200, :]
sns.lineplot(data=df2, x="sol_number", y="min_ground_temp(°C)")
plt.figure(figsize=(20, 10))
df2 = df.iloc[::200, :]
sns.lineplot(data=df2, x="sol_number", y="max_air_temp(°C)")
plt.figure(figsize=(20, 10))
df2 = df.iloc[::200, :]
sns.lineplot(data=df2, x="sol_number", y="UV_Radiation")
plt.figure(figsize=(20, 10))
df2 = df.iloc[::200, :]
sns.lineplot(data=df2, x="sol_number", y="mean_pressure(Pa)")
plt.figure(figsize=(20, 10))
df2 = df.iloc[::200, :]
sns.lineplot(data=df2, x="sol_number", y="min_air_temp(°C)")
# DATA CLEANING AND MAKING DATA READY FOR MODEL DEVELOPMENT
df.drop(
[
"wind_speed(m/h)",
"humidity(%)",
"earth_date_time",
"mars_date_time",
"sol_number",
"sunrise",
"sunset",
"weather",
],
inplace=True,
axis=1,
)
df.shape
df.info()
df.isnull().sum()
df.fillna(0.0)
df = df.dropna()
df.isnull().sum()
df.info()
plt.figure(figsize=(20, 10))
sns.lineplot(data=df, x="UV_Radiation", y="max_ground_temp(°C)")
plt.figure(figsize=(20, 10))
sns.lineplot(data=df, x="UV_Radiation", y="min_ground_temp(°C)")
plt.figure(figsize=(20, 10))
sns.lineplot(data=df, x="UV_Radiation", y="max_air_temp(°C)")
plt.figure(figsize=(20, 10))
sns.lineplot(data=df, x="UV_Radiation", y="min_air_temp(°C)")
plt.figure(figsize=(20, 10))
sns.lineplot(data=df, x="UV_Radiation", y="mean_pressure(Pa)")
plt.figure(figsize=(20, 10))
sns.heatmap(df.corr(), annot=True, cmap="viridis")
df["UV_Radiation"].value_counts()
labels = pd.Categorical(df["UV_Radiation"])
labels
plt.figure(figsize=(15, 7))
df["UV_Radiation"].value_counts().plot.bar()
plt.figure(figsize=(20, 80))
cols = df.columns
cols = cols[:-1]
for i in range(len(cols)):
plt.subplot(16, 1, i + 1)
sns.kdeplot(data=df, x=cols[i], hue="UV_Radiation")
sns.distplot(
df[cols[i]],
kde_kws={"color": "r", "lw": 1, "label": "KDE"},
hist_kws={"color": "g"},
)
plt.tight_layout()
# DATE PROCESSING AND SPLITTING
x = df.iloc[:, :-1]
y = df.iloc[:, -1]
scaler = MinMaxScaler(feature_range=(0, 1))
x = scaler.fit_transform(x)
lb = LabelEncoder()
lb.fit(y)
y = lb.transform(y)
classes = lb.classes_
classes
y
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=20
)
smote = SMOTE()
x_train_resampled, y_train_resampled = smote.fit_resample(x_train, y_train)
u, v = np.unique(y_train_resampled, return_counts=True)
for x in range(len(u)):
print(u[x], "=", v[x])
rf_model = RandomForestClassifier()
rf_model = rf_model.fit(x_train_resampled, y_train_resampled)
y_pred_rf = rf_model.predict(x_test)
d_model = DecisionTreeClassifier()
d_model = d_model.fit(x_train_resampled, y_train_resampled)
y_pred_d = d_model.predict(x_test)
model_imp = BaggingClassifier(
base_estimator=d_model, n_estimators=10, random_state=20
).fit(x_train_resampled, y_train_resampled)
y_pred2 = model_imp.predict(x_test)
print(classification_report(y_test, y_pred_rf))
print(classification_report(y_test, y_pred_d))
print(classification_report(y_test, y_pred2))
plt.rcParams["font.family"] = "cursive"
plt.rcParams["font.size"] = 30
plt.rc("axes", titlesize=20)
plt.rc("axes", labelsize=20)
plt.rcParams["text.color"] = "black"
plt.rcParams["axes.labelcolor"] = "black"
plt.rcParams["xtick.color"] = "black"
plt.rcParams["ytick.color"] = "black"
plt.rcParams["font.weight"] = "bold"
s = {
"Random Forest": 0.82,
"Decision Tree": 0.75,
"Bagging Classifier(On decision Tree)": 0.80,
}
plt.figure(figsize=(20, 6))
plt.bar(s.keys(), s.values())
plt.xlabel("Model Used")
plt.ylabel("Accuracy Scoreobtained")
plt.title("Accuracy score for the used models")
high = [0.85, 0.77, 0.80]
low = [0.90, 0.65, 0.85]
moderate = [0.82, 0.80, 0.83]
very_high = [0.66, 0.53, 0.67]
plt.figure(figsize=(20, 6))
n = 3
ind = np.arange(n)
width = 0.2
bar1 = plt.bar(ind, high, width, color="y")
bar2 = plt.bar(ind + width, low, width, color="r")
bar3 = plt.bar(ind + width * 2, moderate, width, color="g")
bar4 = plt.bar(ind + width * 3, very_high, width, color="b")
plt.xlabel("Algorithms")
plt.ylabel("Precision Score Obtained")
plt.xticks(
ind + width, ["Random Forest", "Decision Tree", "Bagging Classifier (On DT)"]
)
plt.legend(("high", "low", "moderate", "very high"))
plt.title("Precision score for the used models")
high = [0.76, 0.72, 0.78]
low = [0.86, 0.68, 0.77]
moderate = [0.87, 0.78, 0.82]
very_high = [0.87, 0.77, 0.83]
plt.figure(figsize=(20, 6))
n = 3
ind = np.arange(n)
width = 0.2
bar1 = plt.bar(ind, high, width, color="y")
bar2 = plt.bar(ind + width, low, width, color="r")
bar3 = plt.bar(ind + width * 2, moderate, width, color="g")
bar4 = plt.bar(ind + width * 3, very_high, width, color="b")
plt.xlabel("Algorithms")
plt.ylabel("Recall Score Obtained")
plt.xticks(
ind + width, ["Random Forest", "Decision Tree", "Bagging Classifier (On DT)"]
)
plt.legend(("high", "low", "moderate", "very high"), loc="upper left")
plt.title("Recall score for the used models")
high = [0.81, 0.74, 0.79]
low = [0.88, 0.67, 0.81]
moderate = [0.84, 0.79, 0.82]
very_high = [0.75, 0.63, 0.74]
plt.figure(figsize=(20, 6))
n = 3
ind = np.arange(n)
width = 0.2
bar1 = plt.bar(ind, high, width, color="y")
bar2 = plt.bar(ind + width, low, width, color="r")
bar3 = plt.bar(ind + width * 2, moderate, width, color="g")
bar4 = plt.bar(ind + width * 3, very_high, width, color="b")
plt.xlabel("Algorithms")
plt.ylabel("F1 Score Obtained")
plt.xticks(
ind + width, ["Random Forest", "Decision Tree", "Bagging Classifier (On DT)"]
)
plt.legend(("high", "low", "moderate", "very high"), loc="upper left")
plt.title("F1 score for the used models")
plt.figure(figsize=(10, 6))
fx = sns.heatmap(
confusion_matrix(y_test, y_pred_rf), annot=True, fmt=".0f", cmap="GnBu"
)
fx.set_xlabel("PREDICTED VALUES")
fx.set_ylabel("ACTUAL VALUES")
fx.xaxis.set_ticklabels(["high", "low", "moderate", "very_high"])
fx.yaxis.set_ticklabels(["high", "low", "moderate", "very_high"])
plt.show()
plt.figure(figsize=(10, 6))
fx = sns.heatmap(confusion_matrix(y_test, y_pred_d), annot=True, fmt=".0f", cmap="GnBu")
fx.set_xlabel("PREDICTED VALUES")
fx.set_ylabel("ACTUAL VALUES")
fx.xaxis.set_ticklabels(["high", "low", "moderate", "very_high"])
fx.yaxis.set_ticklabels(["high", "low", "moderate", "very_high"])
plt.show()
plt.figure(figsize=(10, 6))
fx = sns.heatmap(confusion_matrix(y_test, y_pred2), annot=True, fmt=".0f", cmap="GnBu")
fx.set_xlabel("PREDICTED VALUES")
fx.set_ylabel("ACTUAL VALUES")
fx.xaxis.set_ticklabels(["high", "low", "moderate", "very_high"])
fx.yaxis.set_ticklabels(["high", "low", "moderate", "very_high"])
plt.show()
rf_p = {"n_estimators": [100, 200, 300, 400], "max_depth": [x for x in range(5, 20, 2)]}
grid_rf = GridSearchCV(rf_model, rf_p, cv=10, n_jobs=-1, verbose=True)
grid_rf.fit(x_train_resampled, y_train_resampled)
pred_rf = grid_rf.predict(x_test)
dt_p = {
"max_depth": [1, 3, 5, 6, 10],
"min_samples_split": [x for x in range(1, 10, 2)],
"max_features": [x for x in range(1, 10)],
}
grid_dt = GridSearchCV(d_model, dt_p, cv=10, n_jobs=-1)
grid_dt.fit(x_train_resampled, y_train_resampled)
pred_dt = grid_dt.predict(x_test)
rf = grid_rf.best_estimator_
rf.fit(x_train_resampled, y_train_resampled)
pred_rf = rf.predict(x_test)
print(classification_report(y_test, pred_rf))
plt.figure(figsize=(10, 6))
fx = sns.heatmap(confusion_matrix(y_test, pred_rf), annot=True, fmt=".0f", cmap="GnBu")
fx.set_xlabel("PREDICTED VALUES")
fx.set_ylabel("ACTUAL VALUES")
fx.xaxis.set_ticklabels(["high", "low", "moderate", "very_high"])
fx.yaxis.set_ticklabels(["high", "low", "moderate", "very_high"])
plt.show()
dt = grid_dt.best_estimator_
model_imp = BaggingClassifier(base_estimator=dt, n_estimators=10, random_state=20).fit(
x_train_resampled, y_train_resampled
)
y_pred2 = model_imp.predict(x_test)
print(classification_report(y_test, y_pred2))
plt.figure(figsize=(10, 6))
fx = sns.heatmap(confusion_matrix(y_test, y_pred2), annot=True, fmt=".0f", cmap="GnBu")
fx.set_xlabel("PREDICTED VALUES")
fx.set_ylabel("ACTUAL VALUES")
fx.xaxis.set_ticklabels(["high", "low", "moderate", "very_high"])
fx.yaxis.set_ticklabels(["high", "low", "moderate", "very_high"])
plt.show()
s = {"Random Forest": 0.82, "Bagging Classifier(On decision Tree)": 0.81}
plt.figure(figsize=(20, 6))
plt.bar(s.keys(), s.values())
plt.xlabel("Model Used")
plt.ylabel("Accuracy Score obtained")
plt.title("Accuracy score for the used models (After hyper parameter fine tuning)")
high = [0.86, 0.84]
low = [0.90, 0.89]
moderate = [0.82, 0.82]
very_high = [0.67, 0.62]
plt.figure(figsize=(20, 6))
n = 2
ind = np.arange(n)
width = 0.2
bar1 = plt.bar(ind, high, width, color="y")
bar2 = plt.bar(ind + width, low, width, color="r")
bar3 = plt.bar(ind + width * 2, moderate, width, color="g")
bar4 = plt.bar(ind + width * 3, very_high, width, color="b")
plt.xlabel("Algorithms")
plt.ylabel("Precision Score Obtained")
plt.xticks(ind + width, ["Random Forest", "Bagging Classifier (On DT)"])
plt.legend(("high", "low", "moderate", "very high"))
plt.title("Precision score for the used models(After fine tuning the models)")
high = [0.76, 0.76]
low = [0.82, 0.77]
moderate = [0.89, 0.86]
very_high = [0.85, 0.83]
plt.figure(figsize=(20, 6))
n = 2
ind = np.arange(n)
width = 0.2
bar1 = plt.bar(ind, high, width, color="y")
bar2 = plt.bar(ind + width, low, width, color="r")
bar3 = plt.bar(ind + width * 2, moderate, width, color="g")
bar4 = plt.bar(ind + width * 3, very_high, width, color="b")
plt.xlabel("Algorithms")
plt.ylabel("Recall Score Obtained")
plt.xticks(ind + width, ["Random Forest", "Bagging Classifier (On DT)"])
plt.legend(("high", "low", "moderate", "very high"), loc="upper left")
plt.title("Recall score for the used models(After fine tuning models)")
high = [0.81, 0.80]
low = [0.86, 0.83]
moderate = [0.85, 0.84]
very_high = [0.75, 0.71]
plt.figure(figsize=(20, 6))
n = 2
ind = np.arange(n)
width = 0.2
bar1 = plt.bar(ind, high, width, color="y")
bar2 = plt.bar(ind + width, low, width, color="r")
bar3 = plt.bar(ind + width * 2, moderate, width, color="g")
bar4 = plt.bar(ind + width * 3, very_high, width, color="b")
plt.xlabel("Algorithms")
plt.ylabel("F1 Score Obtained")
plt.xticks(ind + width, ["Random Forest", "Bagging Classifier (On DT)"])
plt.legend(("high", "low", "moderate", "very high"), loc="upper right")
plt.title("F1 score for the used models(After fine tuning the models)")
roc_auc(
rf,
x_train_resampled,
y_train_resampled,
X_test=x_test,
y_test=y_test,
classes=["high", "low", "moderate", "very_high"],
)
roc_auc(
dt,
x_train_resampled,
y_train_resampled,
X_test=x_test,
y_test=y_test,
classes=["high", "low", "moderate", "very_high"],
)
roc_auc(
model_imp,
x_train_resampled,
y_train_resampled,
X_test=x_test,
y_test=y_test,
classes=["high", "low", "moderate", "very_high"],
)
|
import pandas as pd
inpath = "/kaggle/input/fathomnet-2023-first-glance/"
train = pd.read_csv(inpath + "train_with_labels.csv")
test = pd.read_csv(inpath + "eval_images.csv")
most_freq = train["categories"].mode()[0].replace(".0", "")
print(most_freq)
test["id"] = test["file_name"].str[:-4]
test["categories"] = most_freq
test["osd"] = 0.1
test[["id", "categories", "osd"]]
test[["id", "categories", "osd"]].to_csv("submission.csv", index=False)
|
# # **Feature Engineering and Data Pre-Processing**
# Data pre-processing involves cleaning, formattin and transforming raw data into a format that can be used for analysis. This includes handling missing values, dealing with outliers, scaling features and encoding categorical variables.
#
#
# Feature engineering involves creating new features from existing data to improve the performance of a machine learning model. This can involve transforming existing features, combining multiple features, or extracting new features from raw data. The goal is to create a set of features that captures the underlying relationships and patterns in the data.
# Effective feature engineering and data pre-processing can improve the accuracy and efficiency of machine learning models. It is important to carefully consider the specific characteristics of the data and the goals of the analysis to determine the most appropriate techniques to use.
# **About the Dataset**
# The dataset is a part of a large dataset maintained by the National Institute of Diabetes, Digestive, and Kidney Diseases in the United States.
# It includes data from a diabetes study conducted on Pima Indian women aged 21 years and above who reside in Phoenix, the fifth-largest city in the state of Arizona in the United States. The dataset comprises 768 observations and 8 numerical independent variables. The target variable is indicated as "outcome", where 1 denotes a positive diabetes test result, and 0 indicates a negative result.
# **Variables:** The target variable is specified as "outcome"; 1 indicates positive diabetes test result, 0 indicates negative
# * **Pregnancies** : Number of times a woman has been pregnant
# * **Glucose** : Plasma Glucose concentration of 2 hours in an oral glucose tolerance test
# * **BloodPressure** : Diastollic Blood Pressure (mm hg)
# * **SkinThickness** : Triceps skin fold thickness(mm)
# * **Insulin** : 2 hour serum insulin(mu U/ml)
# * **BMI** : Body Mass Index ((weight in kg/height in m)^2)
# * **Age** : Age(years)
# * **DiabetesPedigreeFunction** :scores likelihood of diabetes based on family history)
# * **Outcome** : 0(doesn't have diabetes) or 1 (has diabetes)
# **Table of Contents**
# 1. EDA (Exploratory Data Analysis)[](http://)
# 2. Base Model Installation
# 3. Missing Values and Outliers
# 4. Encoding & Scaling
# 5. Modeling
# # EDA (Exploratory Data Analysis)
# **Let's examine the overall picture.**
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
f1_score,
roc_auc_score,
)
from sklearn.model_selection import GridSearchCV, cross_validate
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
import warnings
warnings.simplefilter(action="ignore")
# Adjustment of visibility of Datafreames
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_rows", 20)
pd.set_option("display.float_format", lambda x: "%.3f" % x)
df = pd.read_csv("/kaggle/input/diabetes/diabetes.csv")
def check_df(dataframe, head=5):
print("##################### Shape #####################")
print(dataframe.shape)
print("##################### Types #####################")
print(dataframe.dtypes)
print("##################### Head #####################")
print(dataframe.head(head))
print("##################### Tail #####################")
print(dataframe.tail(head))
print("##################### NA #####################")
print(dataframe.isnull().sum())
print("##################### Quantiles #####################")
print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T)
check_df(df)
def grab_col_names(dataframe, cat_th=10, car_th=20):
"""
It gives the names of the numeric, categorical but cardinal variables in the data set.
Note: Including categorical variables includes numerical operation categorical variables are also included.
Parameters
------
dataframe: dataframe
The dataframe from which variable names are to be retrieved
cat_th: int, optional
class threshold for numeric but categorical variables
car_th: int, optional
class threshold for categorical but cardinal variables
Returns
------
cat_cols: list
Categorical variable list
num_cols: list
Numeric variable list
cat_but_car: list
Categorical view cardinal variable list
Examples
------
import seaborn as sns
df = sns.load_dataset("iris")
print(grab_col_names(df))
Notes
------
cat_cols + num_cols + cat_but_car = total number of variables
"""
# cat_cols, cat_but_car
cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"]
num_but_cat = [
col
for col in dataframe.columns
if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"
]
cat_but_car = [
col
for col in dataframe.columns
if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"
]
cat_cols = cat_cols + num_but_cat
cat_cols = [col for col in cat_cols if col not in cat_but_car]
# num_cols
num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"]
num_cols = [col for col in num_cols if col not in num_but_cat]
print(f"Observations: {dataframe.shape[0]}")
print(f"Variables: {dataframe.shape[1]}")
print(f"cat_cols: {len(cat_cols)}")
print(f"num_cols: {len(num_cols)}")
print(f"cat_but_car: {len(cat_but_car)}")
print(f"num_but_cat: {len(num_but_cat)}")
return cat_cols, num_cols, cat_but_car
cat_cols, num_cols, cat_but_car = grab_col_names(df)
def cat_summary(dataframe, col_name, plot=False):
print(
pd.DataFrame(
{
col_name: dataframe[col_name].value_counts(),
"Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe),
}
)
)
print("***************************************")
if plot:
sns.countplot(x=dataframe[col_name], data=dataframe)
plt.show()
cat_summary(df, "Outcome")
def num_summary(dataframe, numerical_col, plot=False):
quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99]
print(dataframe[numerical_col].describe(quantiles).T)
if plot:
dataframe[numerical_col].hist(bins=20)
plt.xlabel(numerical_col)
plt.title(numerical_col)
plt.show(block=True)
for col in num_cols:
num_summary(df, col, plot=True)
# **Target Variable Analysis**
# ANALYSIS OF NUMERICAL VARIABLES ACCORDING TO TARGET
def target_summary_with_num(dataframe, target, numerical_col):
print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n")
for col in num_cols:
target_summary_with_num(df, "Outcome", col)
# **Correlation**
# Correlation indicates the direction and strength of the linear relationship between two random variables in probability theory and statistics.
df.corr()
# Correlation Matrix
f, ax = plt.subplots(figsize=[18, 13])
sns.heatmap(df.corr(), annot=True, fmt=".2f", ax=ax, cmap="magma")
ax.set_title("Correlation Matrix", fontsize=20)
plt.show()
# # Base Model Installation
y = df["Outcome"]
X = df.drop("Outcome", axis=1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, random_state=17
)
rf_model = RandomForestClassifier(random_state=46).fit(X_train, y_train)
y_pred = rf_model.predict(X_test)
print(f"Accuracy: {round(accuracy_score(y_pred, y_test), 2)}")
print(f"Recall: {round(recall_score(y_pred,y_test),3)}")
print(f"Precision: {round(precision_score(y_pred,y_test), 2)}")
print(f"F1: {round(f1_score(y_pred,y_test), 2)}")
print(f"Auc: {round(roc_auc_score(y_pred,y_test), 2)}")
def plot_importance(model, features, num=len(X), save=False):
feature_imp = pd.DataFrame(
{"Value": model.feature_importances_, "Feature": features.columns}
)
plt.figure(figsize=(10, 10))
sns.set(font_scale=1)
sns.barplot(
x="Value",
y="Feature",
data=feature_imp.sort_values(by="Value", ascending=False)[0:num],
)
plt.title("Features")
plt.tight_layout()
plt.show()
if save:
plt.savefig("importances.png")
plot_importance(rf_model, X)
# # Missing and Outliers Value Analysis
# **Missing Value Analysis**
# It is known that variable values other than Pregnancies and Outcome cannot be 0 in a human.
# Therefore, an action decision should be taken regarding these values. Values that are 0 can be assigned NaN.
df.isnull().sum()
zero_columns = [
col
for col in df.columns
if (df[col].min() == 0 and col not in ["Pregnancies", "Outcome"])
]
zero_columns
df.isnull().sum()
def missing_values_table(dataframe, na_name=False):
na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0]
n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False)
ratio = (
dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100
).sort_values(ascending=False)
missing_df = pd.concat(
[n_miss, np.round(ratio, 2)], axis=1, keys=["n_miss", "ratio"]
)
print(missing_df, end="\n")
if na_name:
return na_columns
na_columns = missing_values_table(df, na_name=True)
# Examining the Relationship of Missing Values with the Dependent Variable
def missing_vs_target(dataframe, target, na_columns):
temp_df = dataframe.copy()
for col in na_columns:
temp_df[col + "_NA_FLAG"] = np.where(temp_df[col].isnull(), 1, 0)
na_flags = temp_df.loc[:, temp_df.columns.str.contains("_NA_")].columns
for col in na_flags:
print(
pd.DataFrame(
{
"TARGET_MEAN": temp_df.groupby(col)[target].mean(),
"Count": temp_df.groupby(col)[target].count(),
}
),
end="\n\n\n",
)
missing_vs_target(df, "Outcome", na_columns)
# Filling in Missing Values
for col in zero_columns:
df.loc[df[col].isnull(), col] = df[col].median()
df.isnull().sum()
# **Outliers Value Analysis**
def outlier_thresholds(dataframe, col_name, q1=0.05, q3=0.95):
quartile1 = dataframe[col_name].quantile(q1)
quartile3 = dataframe[col_name].quantile(q3)
interquantile_range = quartile3 - quartile1
up_limit = quartile3 + 1.5 * interquantile_range
low_limit = quartile1 - 1.5 * interquantile_range
return low_limit, up_limit
def check_outlier(dataframe, col_name):
low_limit, up_limit = outlier_thresholds(dataframe, col_name)
if dataframe[
(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)
].any(axis=None):
return True
else:
return False
check_outlier(df, df.columns)
def replace_with_thresholds(dataframe, variable, q1=0.05, q3=0.95):
low_limit, up_limit = outlier_thresholds(dataframe, variable, q1=0.05, q3=0.95)
dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit
dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit
# Creating a new age variable by categorizing the age variable
df.loc[(df["Age"] >= 21) & (df["Age"] < 50), "NEW_AGE_CAT"] = "mature"
df.loc[(df["Age"] >= 50), "NEW_AGE_CAT"] = "senior"
df["NEW_AGE_CAT"]
# BMI below 18.5 is underweight, between 18.5 and 24.9 is normal, between 24.9 and 29.9 is overweight and over 30 is obese
df["NEW_BMI"] = pd.cut(
x=df["BMI"],
bins=[0, 18.5, 24.9, 29.9, 100],
labels=["Underweight", "Healthy", "Overweight", "Obese"],
)
df.head()
# Convert glucose value to categorical variable
df["NEW_GLUCOSE"] = pd.cut(
x=df["Glucose"],
bins=[0, 140, 200, 300],
labels=["Normal", "Prediabetes", "Diabetes"],
)
# Creating a categorical variable by considering age and body mass index together 3 breakdowns were caught
df.loc[
(df["BMI"] < 18.5) & ((df["Age"] >= 21) & (df["Age"] < 50)), "NEW_AGE_BMI_NOM"
] = "underweightmature"
df.loc[(df["BMI"] < 18.5) & (df["Age"] >= 50), "NEW_AGE_BMI_NOM"] = "underweightsenior"
df.loc[
((df["BMI"] >= 18.5) & (df["BMI"] < 25)) & ((df["Age"] >= 21) & (df["Age"] < 50)),
"NEW_AGE_BMI_NOM",
] = "healthymature"
df.loc[
((df["BMI"] >= 18.5) & (df["BMI"] < 25)) & (df["Age"] >= 50), "NEW_AGE_BMI_NOM"
] = "healthysenior"
df.loc[
((df["BMI"] >= 25) & (df["BMI"] < 30)) & ((df["Age"] >= 21) & (df["Age"] < 50)),
"NEW_AGE_BMI_NOM",
] = "overweightmature"
df.loc[
((df["BMI"] >= 25) & (df["BMI"] < 30)) & (df["Age"] >= 50), "NEW_AGE_BMI_NOM"
] = "overweightsenior"
df.loc[
(df["BMI"] > 18.5) & ((df["Age"] >= 21) & (df["Age"] < 50)), "NEW_AGE_BMI_NOM"
] = "obesemature"
df.loc[(df["BMI"] > 18.5) & (df["Age"] >= 50), "NEW_AGE_BMI_NOM"] = "obesesenior"
df["NEW_AGE_BMI_NOM"]
df["NEW_AGE_BMI_NOM"]
# Derive Categorical Variable with Insulin Value
def set_insulin(dataframe, col_name="Insulin"):
if 16 <= dataframe[col_name] <= 166:
return "Normal"
else:
return "Abnormal"
df["NEW_INSULIN_SCORE"] = df.apply(set_insulin, axis=1)
df["NEW_INSULIN_SCORE"].head()
df["NEW_GLUCOSE*INSULIN"] = df["Glucose"] * df["Insulin"]
df["NEW_GLUCOSE*INSULIN"].head()
# values with zero attention!!!!
df["NEW_GLUCOSE*PREGNANCIES"] = df["Glucose"] * df["Pregnancies"]
df["NEW_GLUCOSE*PREGNANCIES"].head()
# Enlarging the columns
df.columns = [col.upper() for col in df.columns]
df.head()
# # Encoding & Scaling
# **Encoding**
# Separation of variables according to their types
cat_cols, num_cols, cat_but_car = grab_col_names(df)
# LABEL ENCODING
def label_encoder(dataframe, binary_col):
labelencoder = LabelEncoder()
dataframe[binary_col] = labelencoder.fit_transform(dataframe[binary_col])
return dataframe
binary_cols = [
col for col in df.columns if df[col].dtypes == "O" and df[col].nunique() == 2
]
binary_cols
for col in binary_cols:
df = label_encoder(df, col)
# One-Hot Encoding Process
# Update process of cat_cols list
cat_cols = [
col for col in cat_cols if col not in binary_cols and col not in ["OUTCOME"]
]
cat_cols
def one_hot_encoder(dataframe, categorical_cols, drop_first=False):
dataframe = pd.get_dummies(
dataframe, columns=categorical_cols, drop_first=drop_first
)
return dataframe
df = one_hot_encoder(df, cat_cols, drop_first=True)
df.head()
# **Standardization**
scaler = StandardScaler()
df[num_cols] = scaler.fit_transform(df[num_cols])
df.head()
# # Modeling
y = df["OUTCOME"]
X = df.drop("OUTCOME", axis=1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, random_state=17
)
rf_model = RandomForestClassifier(random_state=46).fit(X_train, y_train)
y_pred = rf_model.predict(X_test)
print(f"Accuracy: {round(accuracy_score(y_pred, y_test), 2)}")
print(f"Recall: {round(recall_score(y_pred,y_test),3)}")
print(f"Precision: {round(precision_score(y_pred,y_test), 2)}")
print(f"F1: {round(f1_score(y_pred,y_test), 2)}")
print(f"Auc: {round(roc_auc_score(y_pred,y_test), 2)}")
def plot_importance(model, features, num=len(X), save=False):
feature_imp = pd.DataFrame(
{"Value": model.feature_importances_, "Feature": features.columns}
)
print(feature_imp.sort_values("Value", ascending=False))
plt.figure(figsize=(10, 10))
sns.set(font_scale=1)
sns.barplot(
x="Value",
y="Feature",
data=feature_imp.sort_values(by="Value", ascending=False)[0:num],
)
plt.title("Features")
plt.tight_layout()
plt.show()
if save:
plt.savefig("importances.png")
plot_importance(rf_model, X)
|
# # **第一部分:程序前期准备
# # 第一次使用的话请全部运行一次
# # 后续如果选择了presistent不删除文件的话, 无须再运行**
#!pip uninstall tensorflow tensorflow-gpu -y
#!pip install llvmlite0.31 --ignore-installed
#!cd Wav2Lip && pip install --ignore-installed -r requirements.txt
#!wget "https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth" -O "Wav2Lip/face_detection/detection/sfd/s3fd.pth"
#!ls /kaggle/working/Wav2Lip/face_detection/detection/sfd/
# # 第二部分:合成视频
# # 把准备好的视频和音频文件上传到input里面, 预先把文件名称改好 (又或者更改代码上写好的文件名称), 然后运行就行
# # 方法:
# # 1. 首先在右侧的功能栏目, 点击上传的按钮
# # 2. 填写文件夹的名称
# # 3. 点击复制路径
# # 4. 把音频和视频的路径贴到代码上
# # 5. 运行
# 把文件从results文件夹中移出来, 在working目录的底下, 点击下载
|
import os
os.listdir("/kaggle/input/monthly-sales")
files = [file for file in os.listdir("/kaggle/input/monthly-sales")]
files.sort()
files
import pandas as pd
all_month = pd.DataFrame()
for file in files:
df = pd.read_csv("/kaggle/input/monthly-sales/" + file)
all_month = pd.concat([all_month, df])
all_month
nan_df = all_month[all_month.isna().any(axis=1)]
# nan_df = all_month[all_month.isna().any(axis=1)]
nan_df
all_month = all_month.dropna(how="all")
all_month
all_month["Month"] = all_month["Order_Date"].str[0:2]
all_month = all_month[all_month["Month"] != "Or"]
all_month
all_month["Month"] = all_month["Month"].astype("int32")
all_month.head()
all_month.dtypes
all_month["Quantity_Ordered"] = pd.to_numeric(all_month["Quantity_Ordered"])
all_month["Price_Each"] = pd.to_numeric(all_month["Price_Each"])
all_month.dtypes
all_month["sales"] = all_month["Quantity_Ordered"] * all_month["Price_Each"]
all_month.head()
for_month = all_month.groupby("Month")
print(for_month.sum())
months = [month for month, something in for_month]
sales = for_month.sum()["sales"]
import matplotlib.pyplot as plt
plt.bar(months, sales)
plt.xticks(months)
plt.ticklabel_format(style="plain")
plt.show()
def get_city(x):
city = x.split(",")[1]
return city
def get_state(x):
state = x.split(",")[2].split(" ")[1]
return state
all_month["City"] = all_month["Purchase_Address"].apply(
lambda x: (get_city(x)) + " " + (get_state(x))
)
all_month.head()
for_city = all_month.groupby("City")
for_city.sum()
Cities = [city for city, something in for_city]
Sales_data = for_city.sum()["sales"]
plt.bar(Cities, Sales_data)
plt.xticks(Cities, rotation="vertical")
plt.ticklabel_format(style="plain", axis="y")
plt.show()
for_product = all_month.groupby("Product")
for_product.sum()
product_list = [product for product, something in for_product]
Quantity_sold = for_product.sum()["Quantity_Ordered"]
plt.bar(product_list, Quantity_sold)
plt.xticks(product_list, rotation="vertical")
plt.ticklabel_format(style="plain", axis="y")
plt.show()
type(product_list[0])
product_sales = for_product.sum()["sales"]
# plt.bar(product_list,product_sales)
# plt.xticks(product_list,rotation = 'vertical')
# plt.ticklabel_format(style = 'plain',axis = 'y')
# plt.show()
# for_product.sum().values[0][3]
sales_product = []
for i in range(len(product_list)):
sales_product.append(int(for_product.sum().values[i][3]))
sales_product
plt.pie(sales_product, labels=product_list, radius=3.5)
font = {"family": "normal", "weight": "regular", "size": 15}
plt.rc("font", **font)
plt.show()
|
# # **This notebook is part of a Machine Learning course project**
# **By : Ayman Moumen**
# # Iris Species Classification
#
# The dataset studied in this project is the Iris dataset. It contains information about iris species classification by sepal and petal size.
#
# Machine learning model is needed in order to classify the appropriate iris species based on sepal and petal size
#
# The models used in this notebook:
# - **K-Nearest Neighbour (KNN)**
# - **Logistic Regression**
# - **Gaussian Naive Bayes**
# - **Support Vector Machine (SVM)**
# - **Decision Tree**
# - **Random Forest**
# - **Radius Neighbors Classifier**
# - **Quadratic Discriminant Analysis**
# - **Decision Tree Classifier**
# - **Ada Boost Classifier**
# - **Extra Trees Classifier**
# - **Bagging Classifier**
# - **Gradient Boosting Classifier**
# - **XGBoost Classifier**
# # About the Dataset
# Description of the data given:
# - **Id**: Unique number for each row
# - **SepalLengthCm**: Length of the sepal (in cm)
# - **SepalWidthCm**: Width of the sepal (in cm)
# - **PetalLengthCm**: Length of the petal (in cm)
# - **PetalWidthCm**: Width of the petal (in cm)
# - **Species**: Name of the species
# # Importing the Necessary Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import graphviz
import seaborn as sns
import plotly_express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import RadiusNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from xgboost import XGBClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import warnings
warnings.filterwarnings("ignore")
# View of the Dataset
df = pd.read_csv("/kaggle/input/iris/Iris.csv")
df
# Dataset info
# df.info()
# Looking at the statistics of our dataset
df.describe()
# Missing values
print("Total number of missing values")
print(30 * "-")
print(df.isna().sum())
print(30 * "-")
print("Total missing values are:", df.isna().sum().sum())
print(30 * "-")
# **On a aucune donnée manquante. Super !**
# sns.pairplot(df, hue = 'Species', height = 5)
# plt.show()
# Enlever la première colonne des ID car elle est en double
df.drop(columns=["Id"], axis=1, inplace=True)
df
# Les valeurs uniques de la variable "Species" :
print("The different Species in the dataset are:", df["Species"].unique())
print("The total number of unique species are:", df["Species"].nunique())
# # Exploratory Data Analysis
# Visualisation des données
df.groupby(["Species"]).sum().plot(kind="bar")
count_list = [
(df.Species == "Iris-setosa").sum(),
(df.Species == "Iris-versicolor").sum(),
(df.Species == "Iris-virginica").sum(),
]
label_list = list(df["Species"].unique())
plt.figure(figsize=(10, 7))
plt.pie(
count_list,
labels=label_list,
autopct="%.2f %%",
startangle=90,
explode=(0.1, 0.1, 0.0),
textprops={"fontsize": 12},
)
plt.title("Distribution of Classes", fontsize=30)
plt.show()
# The classes are equally balanced. Now, let us look at the count of each flower type in our dataset.
print("The distribution of the classes is:\n", df["Species"].value_counts(), sep="")
# # Visualisation en 3D Interactive !
import plotly.express as px
df2 = px.data.iris()
fig = px.scatter_3d(
df2, x="sepal_length", y="sepal_width", z="petal_width", color="species"
)
fig.show()
# Séparer Species et les longueurs des plantes :
y = df["Species"]
X = df.drop(["Species"], axis=1)
print(X, "\n\n", y)
# # Laber Encoding
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(y)
y
# # Scaling the data
scaler = StandardScaler()
X = scaler.fit_transform(X)
# # Visualisation des corrélations :
plt.figure(figsize=(15, 10))
sns.heatmap(df.corr(), cmap="Blues", square=True, annot=True)
plt.title("Visualizing Correlations", size=20)
plt.show()
# # Séparer les données train et les données test
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, stratify=y, random_state=0
)
# # Evaluation du modèle
from sklearn.metrics import accuracy_score, f1_score
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
acc_knn = knn.score(X_test, y_test)
print("The accuracy for KNN is:", acc_knn * 100, "%")
print(accuracy_score(y_test, y_pred))
print(f1_score(y_test, y_pred, average="macro"))
# # Précision du modèle selon la valeur de K
store_acc = []
neighbors = [i for i in range(1, 11)]
for i in range(len(neighbors)):
knn_improved = KNeighborsClassifier(n_neighbors=neighbors[i])
knn_improved.fit(X_train, y_train)
y_pred = knn_improved.predict(X_test)
acc_knn_for_diff_values = round((knn_improved.score(X_test, y_test)) * 100, 2)
store_acc.append(acc_knn_for_diff_values)
plt.figure(figsize=(10, 7))
plt.plot(neighbors, store_acc, color="blue", marker="o")
plt.title("N Neighbors VS Score", fontsize=20)
plt.xlabel("N Neighbors", fontsize=15)
plt.ylabel("Score", fontsize=15)
plt.grid(True)
plt.show()
# # Trouver les meilleurs paramètres de Knn avec GridSearch
from sklearn.metrics import accuracy_score, f1_score
for i in [0.15, 0.2, 0.3]:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=i, stratify=y, random_state=0
)
knn = KNeighborsClassifier()
from sklearn.model_selection import GridSearchCV
k_range = list(range(1, 11))
param_grid = dict(n_neighbors=k_range)
param_grid["metric"] = ["euclidean", "manhattan"]
param_grid["weights"] = ["uniform", "distance"]
# defining parameter range
grid = GridSearchCV(
knn, param_grid, cv=4, scoring="accuracy", return_train_score=False, verbose=1
)
print("pour test_size =", i)
# fitting the model for grid search
grid_search = grid.fit(X_train, y_train)
print(grid_search.best_params_)
print(grid_search.best_score_)
print("-" * 50)
# # 2ème algorithme :
# # Logistic Regression
lr = LogisticRegression()
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
acc_lr = lr.score(X_test, y_test)
print("The accuracy for Logistic Regression is:", acc_lr * 100, "%")
print(y_pred)
# # We can see that Knn performed better at classifying the iris dataset
#
# fig = plt.figure(figsize = (15, 10))
# ax1 = fig.add_subplot(2, 2, 1)
# ax2 = fig.add_subplot(2, 2, 2)
# ax3 = fig.add_subplot(2, 2, 3)
# ax4 = fig.add_subplot(2, 2, 4)
# ax1.plot((df[df['Species'] == 'Iris-setosa'])['SepalLengthCm'], np.zeros_like((df[df['Species'] == 'Iris-setosa'])['SepalLengthCm']), 'ro')
# ax1.plot((df[df['Species'] == 'Iris-virginica'])['SepalLengthCm'], np.zeros_like((df[df['Species'] == 'Iris-virginica'])['SepalLengthCm']), 'go')
# ax1.plot((df[df['Species'] == 'Iris-versicolor'])['SepalLengthCm'], np.zeros_like((df[df['Species'] == 'Iris-versicolor'])['SepalLengthCm']), 'bo')
# ax1.set_title('Analysis of Sepal Length', size = 20)
# ax1.get_yaxis().set_visible(False)
# ax2.plot((df[df['Species'] == 'Iris-setosa'])['SepalWidthCm'], np.zeros_like((df[df['Species'] == 'Iris-setosa'])['SepalWidthCm']), 'ro')
# ax2.plot((df[df['Species'] == 'Iris-virginica'])['SepalWidthCm'], np.zeros_like((df[df['Species'] == 'Iris-virginica'])['SepalWidthCm']), 'go')
# ax2.plot((df[df['Species'] == 'Iris-versicolor'])['SepalWidthCm'], np.zeros_like((df[df['Species'] == 'Iris-versicolor'])['SepalWidthCm']), 'bo')
# ax2.set_title('Analysis of Sepal Width', size = 20)
# ax2.get_yaxis().set_visible(False)
# ax3.plot((df[df['Species'] == 'Iris-setosa'])['PetalLengthCm'], np.zeros_like((df[df['Species'] == 'Iris-setosa'])['PetalLengthCm']), 'ro')
# ax3.plot((df[df['Species'] == 'Iris-virginica'])['PetalLengthCm'], np.zeros_like((df[df['Species'] == 'Iris-virginica'])['PetalLengthCm']), 'go')
# ax3.plot((df[df['Species'] == 'Iris-versicolor'])['PetalLengthCm'], np.zeros_like((df[df['Species'] == 'Iris-versicolor'])['PetalLengthCm']), 'bo')
# ax3.set_title('Analysis of Petal Length', size = 20)
# ax3.get_yaxis().set_visible(False)
# ax4.plot((df[df['Species'] == 'Iris-setosa'])['PetalWidthCm'], np.zeros_like((df[df['Species'] == 'Iris-setosa'])['PetalWidthCm']), 'ro')
# ax4.plot((df[df['Species'] == 'Iris-virginica'])['PetalWidthCm'], np.zeros_like((df[df['Species'] == 'Iris-virginica'])['PetalWidthCm']), 'go')
# ax4.plot((df[df['Species'] == 'Iris-versicolor'])['PetalWidthCm'], np.zeros_like((df[df['Species'] == 'Iris-versicolor'])['PetalWidthCm']), 'bo')
# ax4.set_title('Analysis of Petal Width', size = 20)
# ax4.get_yaxis().set_visible(False)
# plt.subplots_adjust(left = 0.1,
# bottom = 0.1,
# right = 0.9,
# top = 0.9,
# wspace = 0.4,
# hspace = 0.4)
# plt.show()
# **Observations**:
# - `SepalLengthCm`: Iris-setosa has the smallest sepal length. We can see an outlier (red dot) in the graph. Iris-versicolor has a sepal length of about 5 cm to 7 cm. Iris-virginica has the largest sepal length (above 7 cm).
# - `SepalWidthCm`: Iris-versicolor has the smallest sepal width. The distinction between setosa and virginica is not so prominent in the range of 3.5 cm to 4 cm. However for a sepal width equal to or greater than 4 cm all the flowers belong to the Iris-setosa species.
# - `PetalLengthCm`: Iris-setosa has the smallest petal length. The length of the petals do not exceed 2 cm. For Iris-versicolor the petal length is in the range of 3 cm to 5 cm. Iris-virginica has the largest petal length (5 cm or greater).
# - `PetalWidthCm`: Iris-setosa has the smallest petal width. Iris-versicolor has a petal length from 1 cm to slightly less than 2 cm. Iris-virginica has a petal width that is approximately greater than 1.8 cm.
# # Boxplots
plt.figure(figsize=(15, 20))
def create_boxplot(feature):
sns.boxplot(data=df, x="Species", y=feature)
if feature == "SepalLengthCm":
feature = "Sepal Length"
if feature == "SepalWidthCm":
feature = "Sepal Width"
if feature == "PetalLengthCm":
feature = "Petal Length"
if feature == "PetalWidthCm":
feature = "Petal Width"
plt.title("Boxplot for " + feature, fontsize=20)
plt.xlabel("Species", fontsize=15)
plt.ylabel(feature, fontsize=15)
plt.subplot(221)
create_boxplot("SepalLengthCm")
plt.subplot(222)
create_boxplot("SepalWidthCm")
plt.subplot(223)
create_boxplot("PetalLengthCm")
plt.subplot(224)
create_boxplot("PetalWidthCm")
plt.show()
# **Observations**:
# - `SepalLengthCm`: The median value for sepal length is the least for Iris-setosa and the most for Iris-virginica.
# - `SepalWidthCm`: The median value for sepal width is the least for Iris-versicolor and the largest for Iris-setosa. Based on the sepal width of the flowers it might be tough to differentiate between the 3 species.
# - `PetalLengthCm`: Visually it is very evident that Iris-setosa has the least petal length. Even though there are a few outliers all of them are less than 2 cm. Iris-versicolor has the second largest median petal length, whereas Iris-virginica has the largest median petal length.
# - `PetalWidthCm`: Similar to the petal lengths of the species, we can see that Iris-setosa has the lowest median petal width, whereas Iris-virginica has the largest median petal width.
# # Classification Report
# > A classification report is a performance evaluation metric in machine learning. It is used to show the precision, recall, F1 Score, and support of your trained classification model. Some of the common terms associated with a classification report are as follows:
# > - **Precision**: Precision is defined as the ratio of true positives to the sum of true and false positives.
# > - **Recall**: Recall is defined as the ratio of true positives to the sum of true positives and false negatives.
# > - **F1 Score**: The F1 Score is the weighted harmonic mean of precision and recall. The closer the value of the F1 score is to 1.0, the better the expected performance of the model is.
# > - **Support**: Support is the number of actual occurrences of the classes in the dataset. It doesn’t vary between models, it just diagnoses the performance evaluation process.
# **Credits for the ML algorithms**: https://www.kaggle.com/marcovasquez/top-machine-learning-algorithms-beginner#Machine-Learning-with-Scikit-Learn--
def generate_results(model, predictions, name):
cl_rep = classification_report(y_test, predictions)
print("\nThe classification report for " + name + " is:", cl_rep, sep="\n")
cm_model = confusion_matrix(y_test, predictions)
plt.figure(figsize=(8, 6))
sns.heatmap(cm_model, annot=True, cmap="Blues", annot_kws={"size": 15}, square=True)
plt.title("Confusion Matrix for " + name, size=15)
plt.xticks(size=15)
plt.yticks(size=15)
plt.show()
# # Logistic Regression
# Logistic regression, despite its name, is a classification model rather than regression model. It is a process of modeling the probability of a discrete outcome given input variables.
lr = LogisticRegression()
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
acc_lr = lr.score(X_test, y_test)
print("The accuracy for Logistic Regression is:", acc_lr * 100, "%")
generate_results(lr, y_pred, "Logistic Regression")
# # KNN
# KNN works by finding the distances between a query and all the examples in the data, selecting the specified number examples (K) closest to the query, then votes for the most frequent label.
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
acc_knn = knn.score(X_test, y_test)
print("The accuracy for KNN is:", acc_knn * 100, "%")
# # KNN Model Performance
# On the default KNN model we are getting a score of 96.67%. The default value for `n_neighbors` is 5 in KNN. Lets look at the score for different values of `n_neighbors` and select the value that gives us the best results.
store_acc = []
neighbors = [i for i in range(1, 11)]
for i in range(len(neighbors)):
knn_improved = KNeighborsClassifier(n_neighbors=neighbors[i])
knn_improved.fit(X_train, y_train)
y_pred = knn_improved.predict(X_test)
acc_knn_for_diff_values = round((knn_improved.score(X_test, y_test)) * 100, 2)
store_acc.append(acc_knn_for_diff_values)
plt.figure(figsize=(10, 7))
plt.plot(neighbors, store_acc, color="blue", marker="o")
plt.title("N Neighbors VS Score", fontsize=20)
plt.xlabel("N Neighbors", fontsize=15)
plt.ylabel("Score", fontsize=15)
plt.grid(True)
plt.show()
# From the graph above we can clearly observe that for all values of `n_neighbors` above 6, the accuracy is a 100%. Lets select the smallest value of `n_neighbors` that gives us the highest accuracy which is 7.
knn_improved = KNeighborsClassifier(n_neighbors=7)
knn_improved.fit(X_train, y_train)
y_pred = knn_improved.predict(X_test)
acc_knn_imp = knn_improved.score(X_test, y_test)
print("The accuracy for KNN is:", acc_knn_imp * 100, "%")
generate_results(knn_improved, y_pred, "KNN (n_neighbors = 7)")
# # Radius Neighbors Classifier
# Radius Neighbors Classifier is a classification machine learning algorithm. It is an extension to the k-nearest neighbors algorithm that makes predictions using all examples in the radius of a new example rather than the k-closest neighbors. As such, the radius-based approach to selecting neighbors is more appropriate for sparse data, preventing examples that are far away in the feature space from contributing to a prediction.
rnc = RadiusNeighborsClassifier()
rnc.fit(X_train, y_train)
y_pred = rnc.predict(X_test)
acc_rnc = rnc.score(X_test, y_test)
print("The accuracy for Radius Neighbors Classifier is:", acc_rnc * 100, "%")
generate_results(rnc, y_pred, "Radius Neighbors Classifier")
# # Gaussian Naive Bayes
# This is a variant of Naive Bayes which supports continuous values and has an assumption that each class is normally distributed.
gnb = GaussianNB()
gnb.fit(X_train, y_train)
y_pred = gnb.predict(X_test)
acc_gnb = gnb.score(X_test, y_test)
print("The accuracy for Gaussian Naive Bayes is:", acc_gnb * 100, "%")
generate_results(gnb, y_pred, "Gaussian Naive Bayes")
# # Support Vector Classifier
# It is a supervised classification algorithm. The idea of SVM is simple. It creates a line or a hyperplane which separates the data into classes.
svc = SVC()
svc.fit(X_train, y_train)
y_pred = svc.predict(X_test)
acc_svc = svc.score(X_test, y_test)
print("The accuracy for SVC is:", acc_svc * 100, "%")
generate_results(svc, y_pred, "Support Vector Classifier")
# # Quadratic Discriminant Analysis
# QDA is a variant of LDA (Linear Discriminant Analysis) in which an individual covariance matrix is estimated for every class of observations. QDA is particularly useful if there is prior knowledge that individual classes exhibit distinct covariances.
qda = QuadraticDiscriminantAnalysis()
qda.fit(X_train, y_train)
y_pred = qda.predict(X_test)
acc_qda = qda.score(X_test, y_test)
print("The accuracy for Quadratic Discriminant Analysis is:", acc_qda * 100, "%")
generate_results(qda, y_pred, "QDA")
# # Decision Tree Classifier
# A decision tree is a graphical representation of all possible solutions to a decision based on certain conditions. On each step or node of a decision tree, used for classification, we try to form a condition on the features to separate all the labels or classes contained in the dataset to the fullest purity.
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
y_pred = dtc.predict(X_test)
acc_dtc = dtc.score(X_test, y_test)
print("The accuracy of the Decision Tree Classifier is:", acc_dtc * 100, "%")
generate_results(dtc, y_pred, "Decision Tree Classifier")
# # Random Forest Classifier
# The term “Random Forest Classifier” refers to the classification algorithm made up of several decision trees. The algorithm uses randomness to build each individual tree to promote uncorrelated forests, which then uses the forest's predictive powers to make accurate decisions.
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
y_pred = rfc.predict(X_test)
acc_rfc = rfc.score(X_test, y_test)
print("The accuracy of the Random Forest Classifier is:", acc_rfc * 100, "%")
generate_results(rfc, y_pred, "Random Forest Classifier")
# # Ada Boost Classifier
# It combines multiple classifiers to increase the accuracy of classifiers. AdaBoost is an iterative ensemble method. AdaBoost classifier builds a strong classifier by combining multiple poorly performing classifiers so that you will get high accuracy strong classifier.
abc = AdaBoostClassifier()
abc.fit(X_train, y_train)
y_pred = abc.predict(X_test)
acc_abc = abc.score(X_test, y_test)
print("The accuracy for Ada Boost Classifier is:", acc_abc * 100, "%")
generate_results(abc, y_pred, "Ada Boost Classifier")
# # Extra Trees Classifier
# This is a type of ensemble learning technique which aggregates the results of multiple de-correlated decision trees collected in a “forest” to output it's classification result.
etc = ExtraTreesClassifier(random_state=0)
etc.fit(X_train, y_train)
y_pred = etc.predict(X_test)
acc_etc = etc.score(X_test, y_test)
print("The accuracy for Etra Trees Classifier is:", acc_etc * 100, "%")
generate_results(etc, y_pred, "Extra Tress Classifier")
# # Bagging Classifier
# Bagging classifier is an ensemble technique that fits base classifiers each on random subsets of the original dataset and then aggregates their individual predictions to form a final prediction.
bc = BaggingClassifier()
bc.fit(X_train, y_train)
y_pred = bc.predict(X_test)
acc_bc = bc.score(X_test, y_test)
print("The accuracy for Bagging Classifier is:", acc_bc * 100, "%")
generate_results(bc, y_pred, "Bagging Classifier")
# # Gradient Boosting Classifier
# Gradient boosting classifiers are a group of machine learning algorithms that combine many weak learning models together to create a strong predictive model.
gbc = GradientBoostingClassifier()
gbc.fit(X_train, y_train)
y_pred = gbc.predict(X_test)
acc_gbc = gbc.score(X_test, y_test)
print("The accuracy for the Gradient Boosting Classifier is:", acc_gbc * 100, "%")
generate_results(gbc, y_pred, "Gradient Boosting Classifier")
# # XGBoost Classifier
# XGBoost is a decision-tree-based ensemble Machine Learning algorithm that uses a gradient boosting framework. XGBoost provides a highly efficient implementation of the stochastic gradient boosting algorithm and access to a suite of model hyperparameters designed to provide control over the model training process.
xgbc = XGBClassifier(n_jobs=-1, silent=True, verbosity=0)
xgbc.fit(X_train, y_train)
y_pred = xgbc.predict(X_test)
acc_xgbc = xgbc.score(X_test, y_test)
print("The accuracy for XGB Classifier is:", acc_xgbc * 100, "%")
generate_results(xgbc, y_pred, "XGB Classifier")
# # Final Results
data = {
"Logistic Regression": acc_lr * 100,
"KNN (default parameters)": acc_knn * 100,
"KNN (n_neighbors = 7)": acc_knn_imp * 100,
"Radius Neighbors Classifier": acc_rnc * 100,
"Gaussian Naive Bayes": acc_gnb * 100,
"Support Vector Classifier": acc_svc * 100,
"Quadratic Discriminant Analysis": acc_qda * 100,
"Decision Tree Classifier": acc_dtc * 100,
"Random Forest Classifier": acc_rfc * 100,
"Ada Boost Classifier": acc_abc * 100,
"Extra Trees Classifier": acc_etc * 100,
"Bagging Classifier": acc_bc * 100,
"Gradient Boosting Classifier": acc_gbc * 100,
"XGBoost Classifier": acc_xgbc * 100,
}
data = dict(sorted(data.items(), key=lambda x: x[1], reverse=True))
models = list(data.keys())
score = list(data.values())
fig = plt.figure(figsize=(15, 10))
sns.barplot(x=score, y=models)
plt.xlabel("Models Used", size=20)
plt.xticks(size=12)
plt.ylabel("Score", size=20)
plt.yticks(size=12)
plt.title("Score for Different Models", size=25)
plt.show()
|
import numpy as np
import pandas as pd
import seaborn as sns
Air = pd.read_csv("/kaggle/input/air-quality-data-set/AirQuality.csv", sep=";")
Air.head()
Air.info()
Air = Air.iloc[:, :-2]
Air.replace(to_replace=",", value=".", regex=True, inplace=True)
columns_to_convert = ["CO(GT)", "C6H6(GT)", "T", "RH", "AH"]
for column in columns_to_convert:
Air[column] = pd.to_numeric(Air[column], errors="coerce")
Air.replace(-200, np.nan, inplace=True)
Air.info()
Air.drop("NMHC(GT)", axis=1, inplace=True)
Air["Date"] = pd.to_datetime(Air["Date"], format="%d/%m/%Y")
Air["Time"] = pd.to_datetime(Air["Time"], format="%H.%M.%S").dt.time
Air.describe()
import matplotlib.pyplot as plt
numerical_columns = Air.select_dtypes(include=[np.number]).columns
for column in numerical_columns:
plt.figure(figsize=(6, 3))
sns.boxplot(x=Air[column])
plt.title(f"Boxplot of {column}")
plt.show()
for column in numerical_columns:
plt.figure(figsize=(6, 3))
sns.histplot(x=Air[column], stat="count", color="blue", bins=15, kde={"alpha": 0.5})
plt.title(f"Histogram of {column}")
plt.xlabel(column)
plt.ylabel("Frequency")
plt.show()
sns.pairplot(Air, diag_kind="kde")
plt.show()
Air.isnull().sum()
Air = Air.apply(
lambda column: column.interpolate(method="linear")
if column.dtype != "datetime64[ns]" and column.dtype != "<m8[ns]"
else column
)
Air.info()
Air.isnull().sum()
numerical_columns = Air.select_dtypes(include=[np.number]).columns
for column in numerical_columns:
plt.figure(figsize=(6, 3))
sns.boxplot(x=Air[column])
plt.title(f"Boxplot of {column}")
plt.show()
for column in numerical_columns:
plt.figure(figsize=(6, 3))
sns.histplot(x=Air[column], stat="count", color="blue", bins=15, kde={"alpha": 0.5})
plt.title(f"Histogram of {column}")
plt.xlabel(column)
plt.ylabel("Frequency")
plt.show()
format(len(Air[Air.duplicated()]))
plt.figure(figsize=(15, 10))
sns.heatmap(Air.corr(method="pearson", min_periods=1), annot=True)
Air.corr()
pd.plotting.scatter_matrix(Air, figsize=[20, 20])
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# import time
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
from imblearn.ensemble import (
BalancedRandomForestClassifier,
EasyEnsembleClassifier,
RUSBoostClassifier,
)
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn.model_selection import (
RepeatedStratifiedKFold,
train_test_split,
StratifiedShuffleSplit,
KFold,
StratifiedKFold,
cross_validate,
GridSearchCV,
cross_val_score,
RandomizedSearchCV,
)
from sklearn.metrics import (
precision_recall_fscore_support,
average_precision_score,
recall_score,
confusion_matrix,
precision_score,
SCORERS,
precision_recall_curve,
fbeta_score,
make_scorer,
)
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
# from skopt import BayesSearchCV
# import seaborn as sns
from xgboost import XGBClassifier
from warnings import simplefilter
from hyperopt import tpe, STATUS_OK, Trials, hp, fmin, STATUS_OK, space_eval
# ## 1. Introduction
# * Our dataset has 284 807 entries and it consists of 31 columns (1 being the explanatory variable).
# * There are 492 frauds which gives 0.18% of the total transactions – high imbalance
# * Data was split for testing and training set with 0.25 ratio which results in 379 frauds for training and 113 frauds for testing
# * Most of the data was transformed with PCA (Principal Component Analysis) because of the confidentiality issues
# * We are going to make predictions using:
# * Simple decision tree
# * Bagging
# * Boosting
# * We will also try to take advantage of undersampling methods in each of the above
# * Some of the methods may require undertaking outlier removal in order to better reflect the real patterns
# * Average precision will serve as a main metric to rate the quality of the model
# * Precision, Recall and F-score will serve as a helping metrics to make the final choice
# ## 2. EDA
df = pd.read_csv("/kaggle/input/creditcardfraud/creditcard.csv")
df.head()
df.info()
df_train, df_test = train_test_split(df, test_size=0.25, random_state=42)
X = df.drop(["Class"], axis=1)
y = df["Class"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
print(f"Training data has {len(X_train)} records.")
print(f"Testing data has {len(X_test)} records.")
positives = len(y_train.loc[y_train == 1])
positives_test = len(y_test.loc[y_test == 1])
negatives = len(y_train.loc[y_train == 0])
print(
f"""
positive_train = {positives}
negative_train = {len(y_train) - positives}
ratio = {positives / negatives*100:.2f}%
positive_test = {positives_test}
"""
)
# For the training data we have only **379** fraud cases vs **213 226** valid transactions.
# It gives us **0.18%** positives to negatives ratio.
# Also in the test data there are only **113** frauds.
# It's hard to decide how much data we are willing to save for evaluation, but when it comes to frauds we must provide sufficient amount of data for training in order to help the model catch as many types of fraudulent transactions as possible.
# classes = y_train.astype('category')
classes = df["Class"].value_counts()
plt.figure(figsize=(8, 5))
plt.bar(classes.index, classes, color=["blue", "red"])
plt.show()
# There is no need for scaling the *Time* and *Amount* features, because we are going to use only **tree-based methods** in this notebook.
# In the Dataset description we are informed that:
# > Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset.
# We don't know when did the first transaction occured, so we cannot transform it into datetime format.
df = df.sort_values(by="Time") # , ascending=False)
print(
f'Difference in hours between first and last transaction is {df["Time"].iloc[-1] / 3600:.0f}.'
)
# So the data consists of transactions collected from two days only.
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
sns.histplot(ax=axes[0], data=df, x="Time", kde=True)
sns.histplot(ax=axes[1], data=df[df["Time"] < 24 * 3600], x="Time", kde=True)
plt.show()
# DODAJ TYTUŁY I ZMIEŃ ZNACZNIKI
# Clearly there is a pattern here. We can assume that the bottom parts of the graph are night periods and upper are for daytime, since this is when the majority of transactions take place.
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
sns.histplot(ax=axes[0], data=df, x="Amount", bins=100)
sns.histplot(ax=axes[1], data=df[df["Amount"] < 500], x="Amount", kde=True, bins=20)
plt.show()
# DODAJ TYTUŁY I ZMIEŃ ZNACZNIKI
# Out of **284 807** transactions **more than half of them** were lower than 25 euros. That should make sense if we think about how are our daily expenses distributed with regards to amount.
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
sns.kdeplot(ax=axes[0], data=df, x="Amount", hue="Class", common_norm=False)
axes[0].set_xlim(-100, 5000)
sns.kdeplot(ax=axes[1], data=df, x="Time", hue="Class", common_norm=False)
# axes[1].set_xlim(0, 200)
plt.show()
# These graphs are used for comparing previously analyzed features with regards to the class of transaction.
# We can see here that the peak of *Amount* distribution among fraudulent transactions is closer to zero, which may suggest that compromised credit cards were used mainly for small amounts. Maybe not to raise suspicion or to avoid authentication.
# For *Time* feature fraudulent transactions also follow a little different distribution. The plots' bottom parts are not overlaying, which may be the effect of timezone differences or simply different spending patterns for frauds.
# It is important to keep in mind, however, that the number of fraudulent transactions is not sufficient to draw any conclusions so it's better not to stick to our suspicions and explore the relationships in more reliable manner.
plt.figure(figsize=(15, 5))
df_corr = df.corr()
sns.heatmap(df_corr, cmap="coolwarm")
plt.show()
# UNDERSAMPLING
# train_set = pd.concat([X_train, y_train], axis=1)
train_set = df.sample(frac=1)
fraud_df = train_set.loc[train_set["Class"] == 1]
non_fraud_df = train_set.loc[train_set["Class"] == 0][:356]
normal_distributed_df = pd.concat([fraud_df, non_fraud_df])
new_df = normal_distributed_df.sample(frac=1, random_state=42)
plt.figure(figsize=(15, 5))
new_df_corr = new_df.corr()
sns.heatmap(new_df_corr, cmap="coolwarm", annot_kws={"size": 20})
plt.show()
# After neutralizing the class imbalance we received significantly different results. This is because the impact of fraud cases is now the same.
# Notice that if for 100 legitimate transactions (**0's**) range of some feature was from 50 to 100 and for 1 fraud (**1**) it was 200, then as a result the correlation between this feature and Class variable would still be quite low, because those 100 transactions would pull it hundred times more towards zero than this one fraudulent case towards one.
# In contrary, if for 100 valid transactions the feature is around 100 and for 100 frauds it is around 200, then correlation should be close to 1.
plt.figure(figsize=(13, 6))
new_df_corr = new_df.corr().sort_values(by="Class")
# new_df_corr.sort_values(by='V1')
sns.heatmap(new_df_corr, cmap="coolwarm", annot_kws={"size": 20})
plt.show()
# Interestingly, some features have similar correlation strength with other features. It looks like it is possible to reduce the number of them without losing too much information (e.g. leaving only one from V16-V18)
fig, axes = plt.subplots(1, 2, figsize=(10, 5), sharey=True)
sns.boxplot(ax=axes[0], data=df, y="V14", x="Class")
axes[0].set_title("Before undersampling")
sns.boxplot(ax=axes[1], data=new_df, y="V14", x="Class")
axes[1].set_title("After undersampling")
# axes[1].set_ylabel(None)
plt.show()
# * Interesting - when does the true distribution od data reveals?
# * Outlier removal for boosting
# * Reducing number of features
# ## 3. Data transformation
# No need for tree-based models!
# ## 4. Model building
# Function below will take the chosen classifier and perform training and 5-fold cross-validation on the whole training data. It is computationally expensive procedure, but given the small number of fraud cases (379) we want to make sure that the way we split the data won't affect the overall evaluation. We will see that indeed it does affect the metrics and it's better to look at how the model performs on average.
# The function is going to plot Precision-Recall curve which is build on... There are two different ways to count it and [this one](http://https://sanchom.wordpress.com/tag/average-precision/) is the more reliable one. `average_precision_score`
# The graph will include the value of AU-PR curve in the left-bottom corner for each cv iteration and one overall value being counted from the concatenation of all of the folds. This is preferable over averaging method because [of this](http://https://stats.stackexchange.com/questions/34611/meanscores-vs-scoreconcatenation-in-cross-validation)
# The supporting metrics have been added to the graph in form of a table, which tells us which threshold yields the highest F-beta score and which values of Precision and Recall it consists of.
dt = DecisionTreeClassifier(random_state=42)
cv = StratifiedKFold(shuffle=True, random_state=42, n_splits=5)
len(X_train) / 5
def count_supp_metrics(precisions, recalls, thresholds, beta):
f2_scores = (1 + beta**2) * (
(precisions * recalls) / ((beta**2) * precisions + recalls)
)
f2_score = np.max(f2_scores)
precision = precisions[np.argmax(f2_scores)]
recall = recalls[np.argmax(f2_scores)]
threshold = thresholds[np.argmax(f2_scores)]
return f2_score, precision, recall, threshold
def cv_metrics(classifier, X, y):
y_probas = []
test_indices = []
folds_results = []
for train, test in cv.split(X, y):
classifier.fit(X.iloc[train], y.iloc[train]) # fit the data
probas = classifier.predict_proba(X.iloc[test]) # predictions
precisions, recalls, thresholds = precision_recall_curve(
y.iloc[test], probas[:, 1]
)
test_indices.append(test)
y_probas.append(probas[:, 1])
folds_results.append([precisions, recalls, thresholds])
return y_probas, test_indices, folds_results
# cv_metrics(dt, X_train, y_train)
def draw_table(y_probas, test_indices, folds_results):
for i in range(0, 5):
(
p,
r,
t,
) = folds_results[i]
metrics = count_supp_metrics(p, r, t, beta=2)
print(
f"""
AU-PR for split {i+1} = {average_precision_score(y_train.iloc[test_indices[i]], y_probas[i]):.2f}
The best F2-score = {metrics[0]:.2f} for Precision = {metrics[1]:.2f} and Recall = {metrics[2]:.2f}
for probabilities >= {metrics[3]:.2f} classified as fraud."""
)
y_probas = np.concatenate(y_probas)
test_indices = np.concatenate(test_indices)
print(
f"""\n
Total AU-PR = {average_precision_score(y_train.iloc[test_indices], y_probas):.2f}
"""
)
dt_model = cv_metrics(dt, X_train, y_train)
# It's not that long for 5-fold cross-validation and passing whole training data.
draw_table(dt_model[0], dt_model[1], dt_model[2])
def plot_pr_curves(y_probas, test_indices, folds_results):
fig, ax = plt.subplots(figsize=(5, 5))
for i in range(0, 5):
(
p,
r,
t,
) = folds_results[i]
ax.plot(
r,
p,
lw=1,
alpha=0.3,
label="PR fold %d (AUC = %0.2f)"
% (i, average_precision_score(y_train.iloc[test_indices[i]], y_probas[i])),
)
# precision, recall, thresholds = precision_recall_curve(y_real, y_probas)
y_probas = np.concatenate(y_probas)
test_indices = np.concatenate(test_indices)
precision, recall, thresholds = precision_recall_curve(
y_train.iloc[test_indices], y_probas
)
ax.plot(
recall,
precision,
color="b",
label=r"Precision-Recall (AUC = %0.2f)"
% (average_precision_score(y_train.iloc[test_indices], y_probas)),
lw=2,
alpha=0.8,
)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("PR Curve")
plt.legend(loc="lower left")
plt.grid()
plt.show()
# [WEIGHTED DECISION TREES](http://https://machinelearningmastery.com/cost-sensitive-decision-trees-for-imbalanced-classification/)
len(X_train)
# There is a `class_weight` parameter defined for decision tree class that allows to account for imbalance between classes. Setting it to `'balanced'` is going to adjust the weights inversely proportional to class frequencies. It goes by the following formula `n_samples / (n_classes * frequency)` and citing from [SO](https://stackoverflow.com/a/30982811) *it basically means replicating the smaller class until you have as many samples as in the larger one, but in an implicit way*.
# This means that minority class cases in our example would receive weight equal to 213 605 / (2 * 376) = **284.1** and non-fraud cases would get 213 605 / (2 * 213 229) = **0.5**.
#
dt_weight = DecisionTreeClassifier(class_weight="balanced", random_state=42)
dt_weight_model = cv_metrics(dt_weight, X_train, y_train)
draw_table(dt_weight_model[0], dt_weight_model[1], dt_weight_model[2])
# Adding weights not only didn't help to improve the results, but it actually worsened them. But let's try to adjust weights in more subtle way and see how that can be different.
dt_weight = DecisionTreeClassifier(class_weight={0: 0.1, 1: 0.9}, random_state=42)
dt_weight_model = cv_metrics(dt_weight, X_train, y_train)
draw_table(dt_weight_model[0], dt_weight_model[1], dt_weight_model[2])
# It looks better, but we can't be sure if it's the best configuration for our case. In the next step we are going to tune this parameter along with the others like `max_depth` and `min_samples_leaf`.
dt_params = {
"max_depth": [2, 3, 4],
"min_samples_leaf": [5, 6, 7],
"class_weight": [{0: 1 - w, 1: w} for w in [0.5, 0.9, 0.99]],
} # 0.5 means equal weights
grid_dt = GridSearchCV(
DecisionTreeClassifier(random_state=42),
dt_params,
scoring="average_precision",
verbose=1,
)
grid_dt.fit(X_train, y_train)
grid_dt.cv_results_
grid_dt.cv_results_["params"][grid_dt.best_index_]
grid_dt.cv_results_["mean_fit_time"]
dt_params = {"max_depth": [4, 5, 6], "min_samples_leaf": [7, 8, 9]}
grid_dt = GridSearchCV(
DecisionTreeClassifier(random_state=42),
dt_params,
scoring="average_precision",
verbose=1,
)
grid_dt.fit(X_train, y_train)
grid_dt.cv_results_["params"][grid_dt.best_index_]
grid_dt.cv_results_["mean_fit_time"]
dt_best = DecisionTreeClassifier(
max_depth=5, min_samples_leaf=8, class_weight={0: 0.1, 1: 0.9}, random_state=42
)
dt_best_model = cv_metrics(dt_best, X_train, y_train)
draw_table(dt_best_model[0], dt_best_model[1], dt_best_model[2])
# Simple decision tree model after making little adjustments provides pretty good results. Let's see where we can go from here.
# `min_weight_fraction_leaf` and `ccp_alpha`
# ## Random Forest
# Next we are going to use RandomForestClassifier with specified `max_depth` and `n_jobs` in order to speed up the computation.
rf = RandomForestClassifier(max_depth=10, n_jobs=-1, random_state=42)
rf.get_params()
rf_model = cv_metrics(rf, X_train, y_train)
draw_table(rf_model[0], rf_model[1], rf_model[2])
# COMMENT
# In contrast to Decision Tree model, Random Forest seems to underestimate the true probability of being a fraud transaction. If we were to classify transactions based on default 0.5 threshold, we would get much worse results. Maybe we should try applying higher weight to fraudulent transactions to account for that.
# FEATURE IMPORTANCE
importances = rf.feature_importances_
importances
# importances = [tree.feature_importances_ for tree in rf.estimators_][1]
std = np.std([tree.feature_importances_ for tree in rf.estimators_], axis=0)
std
X_train.columns.tolist().index("V17")
print(importances[17])
print(std[17])
# importances = [tree.feature_importances_ for tree in rf.estimators_]
forest_importances = pd.Series(importances, index=X_train.columns)
fig, ax = plt.subplots(figsize=(10, 6))
forest_importances.plot.barh(ax=ax, xerr=std)
# ax.set_title("Feature importances using MDI")
# ax.set_ylabel("Mean decrease in impurity")
plt.grid()
fig.tight_layout()
clf = rf.estimators_[0]
n_nodes = clf.tree_.node_count
children_left = clf.tree_.children_left
children_right = clf.tree_.children_right
feature = clf.tree_.feature
threshold = clf.tree_.threshold
node_depth = np.zeros(shape=n_nodes, dtype=np.int64)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, 0)] # start with the root node id (0) and its depth (0)
while len(stack) > 0:
# `pop` ensures each node is only visited once
node_id, depth = stack.pop()
node_depth[node_id] = depth
# If the left and right child of a node is not the same we have a split
# node
is_split_node = children_left[node_id] != children_right[node_id]
# If a split node, append left and right children and depth to `stack`
# so we can loop through them
if is_split_node:
stack.append((children_left[node_id], depth + 1))
stack.append((children_right[node_id], depth + 1))
else:
is_leaves[node_id] = True
print(
"The binary tree structure has {n} nodes and has "
"the following tree structure:\n".format(n=n_nodes)
)
for i in range(n_nodes):
if is_leaves[i]:
print(
"{space}node={node} is a leaf node.".format(
space=node_depth[i] * "\t", node=i
)
)
else:
print(
"{space}node={node} is a split node: "
"go to node {left} if X[:, {feature}] <= {threshold} "
"else to node {right}.".format(
space=node_depth[i] * "\t",
node=i,
left=children_left[i],
feature=feature[i],
threshold=threshold[i],
right=children_right[i],
)
)
from sklearn import tree
fig = plt.figure(figsize=(25, 20))
_ = tree.plot_tree(
clf,
feature_names=X_train.columns,
# class_names=iris.target_names,
filled=True,
)
# This tree is too big. We have 100 of these and they shouldn't be so much grown. Algorithm needs to:
# 1. for each tree create bootstrap dataset
# 2. for each split
# 3. for sqrt(features)
# 4. for each possible split
# find the highest decrease in impurity
# (Let's try to impose some restrictions on the tree growing and see what impact does it have on overall performance.)
cv = StratifiedKFold(shuffle=True, random_state=42, n_splits=5)
bal_rus_rf = BalancedRandomForestClassifier(
max_depth=10, sampling_strategy=0.01, n_jobs=-1, random_state=42
)
# `sampling_strategy` parameter stands for the desirable ratio between the majority and minority class after in each bootstrapped sample.
# Resampling in this example is done by ...
bal_rus_rf_model = cv_metrics(bal_rus_rf, X_train, y_train)
# Default Random Forest 4min 19s
# BalancedRandomForestClassifier 51.2s! x5 faster
draw_table(bal_rus_rf_model[0], bal_rus_rf_model[1], bal_rus_rf_model[2])
# Significant decrease in time without suffering much loss on AU-PR metric. But there is a little decrease in total AU-PR.
# Can we find a value for `sampling_method` which provide as good results in more reasonable time?
# (Now thanks to faster computation we are able to perform GridSearch in order to tune the rest of parameters.)
bal_rf_params = {
"sampling_strategy": [0.1, 0.01, 0.005],
"class_weight": [{0: 1 - w, 1: w} for w in [0.5, 0.9, 0.99]],
}
grid_search_bal_rf = GridSearchCV(
BalancedRandomForestClassifier(n_jobs=-1, random_state=42),
bal_rf_params,
scoring="average_precision",
verbose=3,
)
grid_search_bal_rf.fit(X_train, y_train)
grid_search_bal_rf.cv_results_
avg_scores = grid_search_bal_rf.cv_results_["mean_test_score"]
grid_search_bal_rf.cv_results_["rank_test_score"]
print("Top 3 the best combinations are:")
for i in avg_scores.argsort()[-3:][::-1]:
print(
f"""{grid_search_bal_rf.cv_results_["params"][i]} for AU-PR = {grid_search_bal_rf.cv_results_["mean_test_score"][i]:.2f} +/- {grid_search_bal_rf.cv_results_["std_test_score"][i]:.2f}
{grid_search_bal_rf.cv_results_["mean_fit_time"][i]:.2f} seconds required for fitting"""
)
print("\n")
print("Top 3 the worst combinations are:")
for i in avg_scores.argsort()[:3][::-1]:
print(
f"""{grid_search_bal_rf.cv_results_["params"][i]} for AU-PR = {grid_search_bal_rf.cv_results_["mean_test_score"][i]:.2f} +/- {grid_search_bal_rf.cv_results_["std_test_score"][i]:.2f}
{grid_search_bal_rf.cv_results_["mean_fit_time"][i]:.2f} seconds required for fitting"""
)
# `sampling_strategy = 0.005` means that we want the ratio between minority and majority class to be equal to **1:200** so given 394 fraud cases in the training data the bootstrapped dataset will consist of only approx. **79 000** (200x394) legitimate cases instead of more than **213 000**, as it was originally. This decreases the time for single fitting from 1min 12s to 17.19s, more than five times and for **1:100** ratio to 8.32s!
rf_to_compare = RandomForestClassifier(n_jobs=-1, random_state=42)
rf_params = {"class_weight": [{0: 1 - w, 1: w} for w in [0.5, 0.9, 0.99]]}
grid_search_rf = GridSearchCV(
RandomForestClassifier(n_jobs=-1, random_state=42),
rf_params,
scoring="average_precision",
verbose=3,
)
grid_search_rf.fit(X_train, y_train)
avg_scores = grid_search_rf.cv_results_["mean_test_score"]
grid_search_rf.cv_results_["rank_test_score"]
for i in avg_scores.argsort()[-3:][::-1]:
print(
f"""{grid_search_rf.cv_results_["params"][i]} for AU-PR = {grid_search_rf.cv_results_["mean_test_score"][i]:.2f} +/- {grid_search_rf.cv_results_["std_test_score"][i]:.2f}
{grid_search_rf.cv_results_["mean_fit_time"][i]:.2f} seconds required for fitting"""
)
# It looks like introducing weights makes no difference to the model's performance.
# Since we want to focus more on correctly classifying our fraud cases we can make use of `min_weight_fraction_leaf` parameter which tells the model to divide nodes only if the resulting nodes will consists of enough **weighted** examples. This means that (for 1:99 ratio) classifying 1 fraud case is equal to classyfing 100 legit cases in terms of satisfying this condition. We'll see on visualization how that impacts the shape of a tree.
# the most promising
first_brfc = BalancedRandomForestClassifier(
class_weight={0: 0.1, 1: 0.9},
sampling_strategy=0.01,
# min_weight_fraction_leaf =
# min_impurity_decrease = 0.01
n_jobs=-1,
random_state=42,
)
first_brfc_params = {
"min_weight_fraction_leaf": [0, 0.01, 0.1, 0.5],
"min_impurity_decrease": [0, 0.01, 0.05, 0.1],
}
grid_search_first_brfc = GridSearchCV(
first_brfc, first_brfc_params, scoring="average_precision", verbose=3
)
grid_search_first_brfc.fit(X_train, y_train)
avg_scores = grid_search_first_brfc.cv_results_["mean_test_score"]
grid_search_first_brfc.cv_results_["rank_test_score"]
for i in avg_scores.argsort()[-3:][::-1]:
print(
f"""{grid_search_first_brfc.cv_results_["params"][i]} for AU-PR = {grid_search_first_brfc.cv_results_["mean_test_score"][i]:.2f} +/- {grid_search_first_brfc.cv_results_["std_test_score"][i]:.2f}
{grid_search_first_brfc.cv_results_["mean_fit_time"][i]:.2f} seconds required for fitting"""
)
first_brfc_model = cv_metrics(first_brfc, X_train, y_train)
draw_table(first_brfc_model[0], first_brfc_model[1], first_brfc_model[2])
clf = first_brfc.estimators_[1]
# clf = first_brfc.estimators_[1]
from sklearn import tree
fig = plt.figure(figsize=(25, 20))
_ = tree.plot_tree(
clf, feature_names=X_train.columns, class_names=["legit", "fraud"], filled=True
)
first_brfc_w = BalancedRandomForestClassifier(
class_weight={0: 0.1, 1: 0.9}, sampling_strategy=0.005, n_jobs=-1, random_state=42
)
first_brfc_model = cv_metrics(first_brfc, X_train, y_train)
# **STOP**
# for even faster computation!
second_brfc = BalancedRandomForestClassifier(
class_weight={0: 0.01, 1: 0.99},
n_estimators=200,
sampling_strategy=0.01,
max_depth=10,
# min_weight_fraction_leaf = 0.1, # to avoid overfitting
min_impurity_decrease=0.01, # to focus on frauds
n_jobs=-1,
random_state=42,
)
second_brfc_model = cv_metrics(second_brfc, X_train, y_train)
draw_table(second_brfc_model[0], second_brfc_model[1], second_brfc_model[2])
second_brfc.get_params()
clf = second_brfc.estimators_[4]
from sklearn import tree
fig = plt.figure(figsize=(10, 7))
_ = tree.plot_tree(
clf, feature_names=X_train.columns, class_names=["legit", "fraud"], filled=True
)
# bal_rf_params = {'max_depth': [3, 5, 7, 10],
# #'n_estimators': [100, 200],
# #'min_samples_split': [2, 6, 12],
# 'min_weight_fraction_leaf': [0.1, 0.05, 0.01]} # to prevent overfitting
# next_grid_search_bal_rf = GridSearchCV(BalancedRandomForestClassifier(class_weight={1: 0.99}, sampling_strategy= 0.01, n_jobs=-1, random_state=42),
# bal_rf_params, scoring='average_precision', verbose=3)
# next_grid_search_bal_rf.fit(X_train, y_train)
brfc_clf = BalancedRandomForestClassifier(
class_weight={0: 0.01, 1: 0.99},
sampling_strategy=0.01, # min_weight_fraction_leaf=0.01,
n_jobs=-1,
random_state=42,
)
brfc_clf_model = cv_metrics(brfc_clf, X_train, y_train)
draw_table(brfc_clf_model[0], brfc_clf_model[1], brfc_clf_model[2])
brfc_clf_sec = BalancedRandomForestClassifier(
class_weight={1: 0.99},
sampling_strategy=0.01,
min_weight_fraction_leaf=0.01,
n_jobs=-1,
random_state=42,
)
brfc_clf_sec.get_params()
brfc_clf_sec_model = cv_metrics(brfc_clf_sec, X_train, y_train)
draw_table(brfc_clf_sec_model[0], brfc_clf_sec_model[1], brfc_clf_sec_model[2])
clf = brfc_clf_sec.estimators_[0]
from sklearn import tree
fig = plt.figure(figsize=(25, 20))
_ = tree.plot_tree(
clf, feature_names=X_train.columns, class_names=["legit", "fraud"], filled=True
)
brfc_clf_two = BalancedRandomForestClassifier(
class_weight={1: 0.99},
sampling_strategy=0.01,
max_depth=10,
n_jobs=-1,
random_state=42,
)
brfc_clf_two_model = cv_metrics(brfc_clf_two, X_train, y_train)
draw_table(brfc_clf_two_model[0], brfc_clf_two_model[1], brfc_clf_two_model[2])
|
# Our Project is an Bitcoin Fradulent Transaction Detection where we will use Graphical Neuron Network implementing on Elliptic dataset of Bitcoin transaction after removing unlabeled data by Cynthia Li
# # Dataset information and Data Understanding
# ### Content
# The Elliptic Data Set maps Bitcoin transactions to real entities belonging to licit categories versus illicit ones (scams, malware, terrorist organizations, ransomware, Ponzi schemes, etc.). A node in the graph represents a transaction, an edge can be viewed as a flow of Bitcoins between one transaction and the other.
# #### Nodes and edges
# The graph is made of 203,769 nodes and 234,355 edges. Two percent (4,545) of the nodes are labelled class1 (illicit). Twenty-one percent (42,019) are labelled class2 (licit). The remaining transactions are not labelled with regard to licit versus illicit.
# #### Features
# There are 166 features associated with each node. Due to intellectual property issues, we cannot provide an exact description of all the features in the dataset. There is a time step associated to each node, representing a measure of the time when a transaction was broadcasted to the Bitcoin network. The time steps, running from 1 to 49, are evenly spaced with an interval of about two weeks. Each time step contains a single connected component of transactions that appeared on the blockchain within less than three hours between each other; there are no edges connecting the different time steps.
# The first 94 features represent local information about the transaction – including the time step described above, number of inputs/outputs, transaction fee, output volume and aggregated figures such as average BTC received (spent) by the inputs/outputs and average number of incoming (outgoing) transactions associated with the inputs/outputs. The remaining 72 features are aggregated features, obtained using transaction information one-hop backward/forward from the center node - giving the maximum, minimum, standard deviation and correlation coefficients of the neighbour transactions for the same information data (number of inputs/outputs, transaction fee, etc.).
## Import libraries
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(rc={"axes.facecolor": "dimgrey", "grid.color": "lightgrey"})
import numpy as np
import pandas as pd
import networkx as nx
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch_scatter
from torch_geometric.data import Data
print(torch.__version__)
# # The PyG built-in GCNConv
# from torch_geometric.nn import GCNConv
from torch_geometric.nn.conv import MessagePassing
import torch_geometric.transforms as T
from torch_geometric.utils import remove_self_loops, add_self_loops, softmax, degree
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, roc_auc_score
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
import scipy.sparse as scsp
from sklearn.cluster import KMeans
import copy
import time
# import and inspect edges data
df_edges = pd.read_csv("../input/ellipticdataset/edges.csv")
print(df_edges.shape)
df_edges.head()
# import and inspect nodes data
df_nodes = pd.read_csv("../input/ellipticdataset/nodes.csv")
print(df_nodes.shape)
df_nodes.head()
df_nodes.info()
# Inspect target distribution
print(df_nodes["class"].value_counts())
pie, ax = plt.subplots(figsize=[9, 6])
labels = ["Non-fraud", "Fraud"]
colors = ["#f9ae35", "#f64e38"]
plt.pie(
x=df_nodes["class"].value_counts(),
autopct="%.2f%%",
explode=[0.02] * 2,
labels=labels,
pctdistance=0.5,
textprops={"fontsize": 14},
colors=colors,
)
plt.title("Target distribution")
plt.show()
# Check if GPU is avaiable
device = "cuda" if torch.cuda.is_available() else "cpu"
device
# # Data Splitting
# ### Time-Step Splitting Script
# helper function that support time_step_split and community_split_transd function
def time_step_split_support(nodes, edges):
"""
Split the graph and store node features, edges (represented by adjacency list),
and labels separately by timestamp t (from 1 to 49).
Args:
nodes A dataframe of the node features
edges A dataframe of the graph's adjacency list
Returns:
features_t A list of (|N_t|, d) feature matrices by timestamp
edge_indices A list of (2, |E_t|) adjacency list by timestamp
labels_t A list of (|N_t|) labels by timestamp
"""
features = torch.FloatTensor(nodes.iloc[:, 2:].to_numpy())
times = nodes.iloc[:, 2].to_numpy()
times = torch.LongTensor(
times.reshape(
len(times),
)
)
labels = nodes.iloc[:, 1].to_numpy()
labels = torch.LongTensor(
labels.reshape(
len(labels),
)
)
nodes_id = nodes.iloc[:, 0].to_numpy()
nodes_id = torch.LongTensor(
nodes_id.reshape(
len(nodes_id),
)
)
min_t = torch.min(times) # 1
max_t = torch.max(times) # 49
# Construct nodes of the directed graph for each time step;
# features by time step are stored in "features_t"; labels by
# time step are stored in "labels_t"
features_t = []
labels_t = []
# Create a dictionary where
# <key, value> = <node_id, <<idx, node_index_in_time_t_subgraph>, <t, time_t>>>.
id2idx = {}
for t in range(min_t, max_t + 1):
features_t.append(features[times == t, :])
labels_t.append(labels[times == t])
nodes_t = nodes_id[times == t]
for i in range(nodes_t.shape[0]):
id2idx[nodes_t[i].item()] = {}
id2idx[nodes_t[i].item()]["idx"] = i
id2idx[nodes_t[i].item()]["t"] = t
# Construct adjacency lists of the directed graph (non-symmetric) for each time step;
# adjacency lists for each time step are stored in "edge_indices".
edge_idx_t = [[] for _ in range(min_t, max_t + 1)]
for index in range(edges.shape[0]):
node1_t = id2idx[edges.iloc[index, 0]]["t"]
node1_idx = id2idx[edges.iloc[index, 0]]["idx"]
node2_t = id2idx[edges.iloc[index, 1]]["t"]
node2_idx = id2idx[edges.iloc[index, 1]]["idx"]
edge_idx_t[node1_t - 1].append(
[node1_idx, node2_idx]
) # time_step starts from 1
edge_indices = [torch.LongTensor(edge_idx_t[i]).t() for i in range(len(edge_idx_t))]
return features_t, edge_indices, labels_t
def time_step_split(nodes, edges, device, train_lt=39, test_lt=49):
"""
Create and return the training, validation, and test set, splitted by time step,
where each subgraph at time t is considered as an input of GCN model.
Args:
nodes A dataframe of the node features
edges A dataframe of the graph's adjacency list
device Computing device
train_lt The last time step index of training set
test_lt The last time step index of test set
Returns:
data A dictionary that stores training, validation, and test set,
each value is a list of Data object
graph_info A matrix where each row contains information of the time-step subgraph
[time_step, num_of_nodes, num_of_edges, num_of_illicit_nodes]
"""
features_t, edge_indices, labels_t = time_step_split_support(nodes, edges)
graph_info = np.zeros((len(labels_t), 4), dtype=np.int64)
for t in range(len(labels_t)):
graph_info[t, :] = np.array(
[
t,
features_t[t].shape[0],
edge_indices[t].shape[1],
labels_t[t][labels_t[t] == 1].shape[0],
]
)
train_idx, test_idx = [np.arange(train_lt), np.arange(train_lt, test_lt)]
train_list = [
Data(x=features_t[idx], edge_index=edge_indices[idx], y=labels_t[idx]).to(
device
)
for idx in train_idx
]
test_list = [
Data(x=features_t[idx], edge_index=edge_indices[idx], y=labels_t[idx]).to(
device
)
for idx in test_idx
]
data = {}
data["train"] = train_list
data["test"] = test_list
return data, graph_info
# Split the data by time-step split
data, graph_info = time_step_split(df_nodes, df_edges, device)
for key in data:
print(key, len(data[key]))
# ### Time-Group Splitting Script
# def time_group_split(nodes, edges, device, train_lt = 39, test_lt = 49):
# """
# Create and return the training, validation, and test set, splitted by specific
# time step intervals, where the combination of subgraphs within the time step
# interval is considered as an input of GCN model.
# Args:
# nodes A dataframe of the node features
# edges A dataframe of the graph's adjacency list
# device Computing device
# train_lt The last time step index of training set
# test_lt The last time step index of test set
# Returns:
# data A dictionary that stores training, validation, and test set
# each value is one Data object
# """
# features = torch.FloatTensor(nodes.iloc[:, 2:].to_numpy())
# times = nodes.iloc[:, 2].to_numpy()
# times = torch.LongTensor(times.reshape(len(times),))
# labels = nodes.iloc[:, 1].to_numpy()
# labels = torch.LongTensor(labels.reshape(len(labels),))
# nodes_id = nodes.iloc[:, 0].to_numpy()
# nodes_id = torch.LongTensor(nodes_id.reshape(len(nodes_id),))
# train_idx, test_idx = [np.arange(1, train_lt + 1),np.arange(train_lt + 1, test_lt + 1)]
# data_names = {'train': train_idx, 'test': test_idx}
# # Construct nodes of the directed graph for specific time step intervals.
# # Features are stored in the given dataset name (train/val/test) of a dictionary,
# # 'raw_data', with key "features"; labels are stored with key "labels".
# min_t = torch.min(times) # 1
# max_t = torch.max(times) # 49
# id2idx = {}
# raw_data = {}
# for name in data_names.keys():
# features_set = []
# labels_set = []
# Id_set = []
# set_index = data_names[name]
# for time in set_index:
# features_set.append(features[times == time, :])
# labels_set.append(labels[times == time])
# Id_set.append(nodes_id[times == time])
# features_set = torch.cat(features_set, 0)
# labels_set = torch.cat(labels_set, 0)
# Id_set = torch.cat(Id_set, 0)
# for i in range((Id_set).shape[0]):
# id2idx[Id_set[i].item()] = {}
# id2idx[Id_set[i].item()]['idx'] = i
# id2idx[Id_set[i].item()]['set_name'] = name
# raw_data[name] = {'features': features_set, 'labels': labels_set}
# # Construct adjacency lists of the directed graph (non-symmetric) for
# # specific time intervals. Adjacency lists are stored with key "edge_indices".
# edge_idx_set = {name: [] for name in data_names.keys()}
# for index in range(edges.shape[0]):
# node1_set = id2idx[edges.iloc[index, 0]]['set_name']
# node1_idx = id2idx[edges.iloc[index, 0]]['idx']
# node2_set = id2idx[edges.iloc[index, 1]]['set_name']
# node2_idx = id2idx[edges.iloc[index, 1]]['idx']
# edge_idx_set[node1_set].append([node1_idx, node2_idx]) # time_stamp starts from 1
# for name in data_names.keys():
# raw_data[name]['edge_indices'] = torch.LongTensor(edge_idx_set[name]).t()
# # Construct the training, validation, test set by 'raw_data' and store
# # in a dictionary, 'data'.
# data = {}
# for name in data_names.keys():
# data[name] = Data(x = raw_data[name]['features'],
# edge_index = raw_data[name]['edge_indices'],
# y = raw_data[name]['labels']).to(device)
# return data
# # Split the data by time-group split
# data2 = time_group_split(df_nodes, df_edges, device)
# data2
# ### Random Splitting Script
def random_split_transd(nodes, edges, train_size, device, seed=42):
"""
Create and return the training, validation, and test set by randomly splitting
the node indices to these three sets. Keep edge_index known for all sets.
Args:
nodes A dataframe of the node features
edges A dataframe of the graph's adjacency list
train_size The node size proportion in training set
device Computing device
seed Random seed for data splitting
Returns:
data A Data object that stores node features, edge_index, and labels
dict A dictionary that stores training, validation, test set node indices
"""
features = torch.FloatTensor(nodes.iloc[:, 2:].to_numpy())
labels = nodes.iloc[:, 1].to_numpy()
labels = torch.LongTensor(
labels.reshape(
len(labels),
)
)
nodes_id = nodes.iloc[:, 0].to_numpy()
# Create a dictionary that maps nodeId to index in the dataframe.
id2idx = {}
for i in range(nodes.shape[0]):
id2idx[nodes.iloc[i, 0]] = i
# Construct edge_index with same node indexing as in features and labels
edge_idx = np.zeros((2, edges.shape[0]), dtype=np.int64)
for index in range(edges.shape[0]):
node1 = id2idx[edges.iloc[index, 0]]
node2 = id2idx[edges.iloc[index, 1]]
edge_idx[:, index] = [node1, node2]
edge_index = torch.LongTensor(edge_idx)
train_index, test_index = train_test_split(
np.arange(labels.shape[0]), test_size=1 - train_size, random_state=42
)
# Construct the training, validation, test set and store
# in a dictionary, 'data'.
device = "cuda" if torch.cuda.is_available() else "cpu"
data = Data(x=features, edge_index=edge_index, y=labels).to(device)
return data, {"train": train_index, "test": test_index}
node_sum = df_nodes.shape[0]
train_node_size = np.sum(graph_info[0:39, 1]) / node_sum
data3, split_idx3 = random_split_transd(
df_nodes, df_edges, train_size=train_node_size, device=device
)
data3
train_node_size
def random_split_ind(nodes, edges, train_size, device, seed=42):
"""
Create and return the training, validation, and test set by randomly splitting
the node indices to these three sets. Keep only the node-induced edges within
each set.
Args:
nodes A dataframe of the node features
edges A dataframe of the graph's adjacency list
train_size The node size proportion in training set
device Computing device
seed Random seed for data splitting
Returns:
data A dictionary that stores training, validation, and test set
each value is one Data object
"""
## Create PyG graph separated by time (merge graphs in each set in train/val/test).
features = torch.FloatTensor(nodes.iloc[:, 2:].to_numpy())
labels = nodes.iloc[:, 1].to_numpy()
labels = torch.LongTensor(
labels.reshape(
len(labels),
)
)
nodes_id = nodes.iloc[:, 0].to_numpy()
nodes_id = torch.LongTensor(
nodes_id.reshape(
len(nodes_id),
)
)
# Create random splitting node indices.
nodes_id_train, nodes_id_test, train_idx, test_idx = train_test_split(
nodes_id, range(nodes_id.shape[0]), test_size=1 - train_size, random_state=seed
)
features_set = {"train": features[train_idx], "test": features[test_idx]}
labels_set = {"train": labels[train_idx], "test": labels[test_idx]}
# Find the induced edge indices by the given node indices.
id2idx = {}
for i in range(nodes_id_train.shape[0]):
id2idx[int(nodes_id_train[i])] = (i, "train")
for i in range(nodes_id_test.shape[0]):
id2idx[int(nodes_id_test[i])] = (i, "test")
edge_index = {"train": [], "test": []}
for i in range(edges.shape[0]):
node1 = id2idx[edges.iloc[i, 0]]
node2 = id2idx[edges.iloc[i, 1]]
if node1[-1] == "train" and node2[-1] == "train":
edge_index["train"].append([node1[0], node2[0]])
elif node1[-1] == "test" and node2[-1] == "test":
edge_index["test"].append([node1[0], node2[0]])
data = {}
for name in ["train", "test"]:
edge_index[name] = torch.LongTensor(edge_index[name]).t()
data[name] = Data(
x=features_set[name], edge_index=edge_index[name], y=labels_set[name]
).to(device)
train_data = data["train"]
test_data = data["test"]
return data
data4 = random_split_ind(df_nodes, df_edges, train_size=train_node_size, device=device)
data4
# ### Community Splitting Script
def laplacian(A, alpha=0.1):
"""
Returns the Laplacian matrix of the given adjacency matrix. For the directed
acyclic graph (not connected) with adjacency matrix A, we define a modified
Laplacian matrix as follows:
A_tilde = (1 - alpha) * (A + A^T) + alpha * 11^T
L = I - D_tilde^{-1/2} A_tilde D_tilde^{-1/2}
Args:
A Adjacency matrix of certain graph
alpha Smoothing constant that prevents isolated nodes
Returns:
L Modified Laplacian matrix of the adjacency matrix A
"""
# A is sparse, csr format
A = (1 - alpha) * (A + A.T) + alpha * scsp.csr_matrix(
np.outer(np.ones(A.shape[0]), np.ones(A.shape[0]))
)
D = scsp.diags(np.asarray(np.sum(A, axis=0)).reshape(-1) ** (-1 / 2))
L = scsp.diags(np.ones(A.shape[0])) - D @ A @ D
return L
def adj_list_to_mtx(n, edge_index):
"""
Create a csr-format adjacency matrix by the given adjacency list.
Args:
n The number of nodes in the graph
edge_index The (2, |E|) adjacency list of the graph
Returns:
a csr-format adjacency matrix
"""
edge_mtx = np.zeros((n, n))
for i in range(edge_index.shape[1]):
node1 = int(edge_index[0, i])
node2 = int(edge_index[1, i])
edge_mtx[node1, node2] = 1
return scsp.csr_matrix(edge_mtx)
def nearest_sum(arr, target):
"""
Get a combination of numbers in the given array that sums nearest to
the target number.
Args:
arr The given array
target The target number
Returns:
resid The residual between the summation and the target number
elt_idx_list The indices of the subarray for summation
"""
n = len(arr)
opt_arr = np.zeros((n + 1, target + 1))
for i in range(1, n + 1):
opt_arr[i, :] = opt_arr[i - 1, :]
for j in np.arange(target, 0, step=-1):
if opt_arr[i, j] > 0 and j + arr[i - 1] <= target:
opt_arr[i, j + arr[i - 1]] += 1
opt_arr[i, arr[i - 1]] += 1
elt_list = []
elt_idx_list = []
target_sum = target
idx = n
if opt_arr[idx, target] == 0:
while opt_arr[idx, target_sum] == 0:
print(target_sum)
target_sum -= 1
resid = target - target_sum
while idx > 0 and target_sum != 0:
if opt_arr[idx, target_sum] - opt_arr[idx - 1, target_sum] > 0:
elt_list.append(arr[idx - 1])
elt_idx_list.append(idx - 1)
target_sum -= arr[idx - 1]
idx = idx - 1
return resid, elt_idx_list
def community_split_transd(nodes, edges, train_size, device):
"""
Create and return the training, validation, and test set by merging small
clusters of the graphs. Keep edge_index known for all sets.
Args:
nodes A dataframe of the node features
edges A dataframe of the graph's adjacency list
train_size The node size proportion in training set
Returns:
data A Data object that stores node features, edge_index, and labels
dict A dictionary that stores training, validation, test set node indices
"""
cluster_num = 500
features_t, edge_indices, labels_t = time_step_split_support(nodes, edges)
# Construct the features, labels, and edge_index
features = torch.FloatTensor(nodes.iloc[:, 2:].to_numpy())
labels = nodes.iloc[:, 1].to_numpy()
labels = torch.LongTensor(
labels.reshape(
len(labels),
)
)
nodes_id = nodes.iloc[:, 0].to_numpy()
# Create a dictionary that maps nodeId to index in the dataframe.
id2idx = {}
for i in range(nodes.shape[0]):
id2idx[nodes.iloc[i, 0]] = i
# Construct edge_index with same node indexing as in features and labels
edge_idx = np.zeros((2, edges.shape[0]), dtype=np.int64)
for index in range(edges.shape[0]):
node1 = id2idx[edges.iloc[index, 0]]
node2 = id2idx[edges.iloc[index, 1]]
edge_idx[:, index] = [node1, node2]
edge_index = torch.LongTensor(edge_idx)
# Perform spectral clustering on the entire graph.
# Since the entire graph's adjacency matrix A can be written as a block
# diagonal matrix (blocked by time steps), we can recreate the eigenvalues
# and eigenvectors of A by the eigenvalues and eigenvectors of blocks A_1,
# A_2, ..., A_49 of A.
t = 0
eval_dict = {}
node_num = []
for t in range(49): # max_t = 49
n = features_t[t].shape[0]
A = adj_list_to_mtx(n, edge_indices[t])
L = laplacian(A)
evals, evecs = scsp.linalg.eigsh(L, k=n // 40, which="SM")
for i in range(evals.shape[0]):
eval_dict[evals[i]] = [t, i, evecs[:, i]]
node_num.append(n)
# 'node_blk' store node indices that mark time group separation.
node_blk = np.insert(np.cumsum(node_num), 0, 0)
# Block diagonal matrix has the first number-of-block (i.e., 49) smallest
# eigenvalues to be 0.
small_evals = np.sort(np.array([*eval_dict]))[49 : (49 + cluster_num)]
node_mtx = np.zeros((node_blk[-1], cluster_num))
for i in range(small_evals.shape[0]):
eval = small_evals[i]
t, _, evec = eval_dict[eval]
node_mtx[node_blk[t] : node_blk[t + 1], i] = evec
# Use K-means algorithm to create certain number of clusters (e.g., 500).
kmeans = KMeans(
n_clusters=cluster_num, init="random", random_state=42, n_init=3, max_iter=10
).fit(node_mtx)
comm_count = np.bincount(kmeans.labels_)
# Split and merge the clusters into three sets by the given number of nodes
# in each set.
node_num = nodes.shape[0]
train_num = int(np.round(node_sum * train_size))
train_num_resid, train_clust_idx = nearest_sum(comm_count, train_num)
test_clust_idx = np.delete(np.arange(500), train_clust_idx)
# Split node indices by their clusters into three datasets.
train_idx = []
test_idx = []
train_clust_set = set(train_clust_idx)
test_clust_set = set(test_clust_idx)
for i in range(len(kmeans.labels_)):
if kmeans.labels_[i] in train_clust_set:
train_idx.append(i)
else:
test_idx.append(i)
if train_num_resid > 0:
train_idx.append(test_idx[-train_num_resid:])
test_idx = test_idx[:-train_num_resid]
train_idx = np.array(train_idx)
test_idx = np.array(test_idx)
# Construct the training, validation, test set and store
# in a dictionary, 'data'.
data = Data(x=features, edge_index=edge_index, y=labels).to(device)
return data, {"train": train_idx, "test": test_idx}
data5, split_idx5 = community_split_transd(
df_nodes, df_edges, train_size=train_node_size, device=device
)
data5
# # GCN Model
# Building GCN Model
class GCN(torch.nn.Module):
def __init__(
self,
input_dim,
hidden_dim,
output_dim,
num_layers,
dropout,
return_embeds=False,
):
"""
Initialize a GCN model.
Args:
input_dim Input dimension of node embeddings
hidden_dim Hidden dimension of node embeddings
output_dim Output dimension of node embeddings
num_layers The number of GCN layers
dropout The dropout ratio in (0, 1]
(dropout: the probability of an element getting zeroed)
return_embeds A boolean value determining whether we skip the
classification layer and return node embeddings
"""
super(GCN, self).__init__()
# Construct all convs
self.num_layers = num_layers
self.convs = torch.nn.ModuleList(
[
GCNLayer(hidden_dim, hidden_dim, directed=False)
for i in range(self.num_layers - 1)
]
)
# Construct batch normalization
self.bns = torch.nn.ModuleList(
[torch.nn.BatchNorm1d(hidden_dim) for i in range(self.num_layers - 1)]
)
# First GCN layer
self.convs[0] = GCNLayer(input_dim, hidden_dim, directed=False)
# Last GCN layer
self.last_conv = GCNLayer(hidden_dim, output_dim, directed=False)
self.softmax = torch.nn.LogSoftmax(dim=-1)
self.dropout = dropout
self.return_embeds = return_embeds
def reset_parameters(self):
"""
Reset all learnable parameters in GCN layers and Batch Normalization
Layers.
"""
for conv in self.convs:
conv.reset_parameters()
for bn in self.bns:
bn.reset_parameters()
def forward(self, x, edge_index):
"""
Produce a forward propagation of GCN model. Before the last GCN layer,
we transform the embedding (x) in the following sequence:
x -> GCN_Layer -> Batch_Norm -> ReLU -> Dropout.
At the last GCN layer, the following sequence is applied:
x -> GCN Layer -> Softmax -> output.
Args:
x The node embedding
edge_index The adjacency list of the graph
Returns:
out The predictions of labels / the updated node embedding
"""
x = torch.clone(x.detach())
for l in range(self.num_layers - 1):
# Unweighted graph has weight 1.
x = self.convs[l](x, edge_index, torch.ones(edge_index.shape[1]))
x = self.bns[l](x)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.last_conv(x, edge_index, torch.ones(edge_index.shape[1]))
if self.return_embeds:
out = x
else:
out = self.softmax(x)
return out
class GCNLayer(MessagePassing):
def __init__(
self,
in_channels,
out_channels,
bias=True,
directed=False,
self_loop=True,
**kwargs
):
"""
Initialize a GCN layer.
Args:
in_channels In-channel dimension of node embeddings
out_channels Out-channel dimension of node embeddings
bias A boolean value determining whether we add a
learnable bias term in linear transformation
directed A boolean value determining whether we use directed
message passing D^{-1}A or use symmetric normalized
adjacency matrix D^{-1/2}AD^{-1/2}
self_loop A boolean value determining whether we add a self-
loop for each node
"""
super(GCNLayer, self).__init__(**kwargs, aggr="add")
self.in_channels = in_channels
self.out_channels = out_channels
self.directed = directed
self.self_loop = self_loop
# Define the layers needed for the message and update functions below.
# self.lin is the linear transformation that we apply to the embedding.
self.lin = nn.Linear(self.in_channels, self.out_channels, bias=bias)
self.reset_parameters()
def reset_parameters(self):
"""
Reset all learnable parameters in the linear transformation.
"""
self.lin.reset_parameters()
def forward(self, x, edge_index, edge_weight):
"""
Produce a forward propagation of GCN layer.
Args:
x The node embedding
edge_index The (2, |E|) adjacency list of the graph
edge_weight The (|E|) vector specifying the edge weights in the graph
(for unweighted graph, edge weight is 1)
Returns:
An updated node embedding
"""
# Add self-loops to the adjacency matrix.
if self.self_loop:
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
edge_weight = torch.cat((edge_weight, torch.ones(x.size(0))), dim=-1)
# Apply linear transformation on node features.
x = self.lin(x)
# Compute normalization by updated node degree.
if self.directed:
row, _ = edge_index
deg = degree(row, x.size(0), dtype=x.dtype) # only out-degree
deg_inv = deg.pow(-1)
deg_inv[deg_inv == float("inf")] = 0
norm = deg_inv[row]
else:
row, col = edge_index
deg = degree(col, x.size(0), dtype=x.dtype)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float("inf")] = 0
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
return self.propagate(edge_index, x=(x, x), norm=norm, edge_weight=edge_weight)
def message(self, x_j, edge_weight, norm):
"""
Send the message of the neighboring node (i.e., x_j) to the source node (i.e., x_i).
Args:
x_j The embedding of the neighboring node of source node x_i
edge_weight The edge weight of certain edge
norm Normalization constant determined by self.directed
Returns:
A message sending from the neighboring node to the source node
"""
return norm.view(-1, 1) * x_j * edge_weight.view(-1, 1)
# def train_ind_time_step(model, train_data, optimizer, loss_fn):
# """
# Train the model by using the given optimizer and loss_fn.
# Args:
# model The GCN model
# train_data The Data object that stores x, edge_index, and labels
# only for training set
# optimizer The optimizer
# loss_fn The loss function
# Returns
# The average prediction loss of each time step in the training set
# by the given loss function
# """
# model.train()
# loss = torch.FloatTensor([0]*len(train_data)).to(device)
# optimizer.zero_grad()
# for i, data_t in enumerate(train_data):
# train_slice = model.forward(data_t.x, data_t.edge_index)
# train_label = data_t.y
# loss[i] = loss_fn(train_slice, train_label)
# loss.mean().backward()
# optimizer.step()
# return loss.mean().item()
# def train_ind(model, train_data, optimizer, loss_fn):
# """
# Train the model by using the given optimizer and loss_fn.
# Args:
# model The GCN model
# train_data The Data object that stores x, edge_index, and labels
# only for training set
# optimizer The optimizer
# loss_fn The loss function
# Returns
# The prediction loss by the given loss function
# """
# model.train()
# loss = 0
# optimizer.zero_grad()
# train_slice = model.forward(train_data.x, train_data.edge_index)
# train_label = train_data.y
# loss = loss_fn(train_slice, train_label)
# loss.backward()
# optimizer.step()
# return loss.item()
def train_transd(model, data, train_idx, optimizer, loss_fn):
"""
Train the model by using the given optimizer and loss_fn.
Args:
model The GCN model
data The Data object that stores x, edge_index, and labels
train_idx The node indices in the training set
optimizer The optimizer
loss_fn The loss function
Returns
The prediction loss by the given loss function
"""
model.train()
loss = 0
optimizer.zero_grad()
train_slice = model.forward(data.x, data.edge_index)[train_idx]
train_label = data.y[train_idx]
loss = loss_fn(train_slice, train_label)
loss.backward()
optimizer.step()
return loss.item()
# @torch.no_grad()
# def test_ind_time_step(model, data, save_model_results=False):
# """
# Test the model by using the given splitted datasets.
# Args:
# model The GCN model
# data A dictionary of Data objects that store x, edge_index, and labels
# for three sets
# save_model_results A boolean determining whether we save the model results
# Returns
# The accuracy and auc-roc score of training, validation, and test set
# """
# model.eval()
# # The output of model on each data sets
# eval = {}
# for name in data.keys():
# data_list = data[name]
# eval_report = []
# eval_auc_roc = 0
# for i,data_i in enumerate(data_list):
# out = model.forward(data_i.x, data_i.edge_index)
# y_pred = out.argmax(dim=-1, keepdim=True)
# acc = classification_report(torch.unsqueeze(data_i.y, -1),
# y_pred,output_dict=True, zero_division=0)
# eval_report.append(acc)
# auc_roc = roc_auc_score(torch.unsqueeze(data_i.y, -1),y_pred)
# eval_auc_roc += auc_roc
# report = {}
# for key in eval_report[0].keys():
# if type(eval_report[0][key]) is dict:
# df = pd.DataFrame([sub_report[key] for sub_report in eval_report])
# report[key] = df.mean().to_dict()
# else:
# report[key] = np.mean(np.array([sub_report[key] for sub_report in eval_report]))
# eval_auc_roc /= len(data_list)
# eval[name] = {'report': pd.DataFrame(report), 'auc_roc': eval_auc_roc}
# ### TODO: what is the criterion to save the model results, the whole prediction
# ### y_pred and y_true? or only the test sets' prediction?
# if save_model_results:
# print ("Saving Model Predictions")
# data_new = {}
# data_new ['y_pred'] = y_pred.view(-1).cpu().detach().numpy()
# df = pd.DataFrame(data=data_new )
# # Save locally as csv
# df.to_csv('gcn_ind.csv', sep=',', index=False)
# return eval['train']['report'], eval['test']['report'], eval['train']['auc_roc'], eval['test']['auc_roc']
# @torch.no_grad()
# def test_ind(model, data, save_model_results=False):
# """
# Test the model by using the given splitted datasets.
# Args:
# model The GCN model
# data A dictionary of Data objects that store x, edge_index, and labels
# for three sets
# save_model_results A boolean determining whether we save the model results
# Returns
# The accuracy and auc-roc score of training, validation, and test set
# """
# model.eval()
# # The output of model on each data sets
# train_out = model.forward(data['train'].x, data['train'].edge_index)
# train_pred = train_out.argmax(dim=-1, keepdim=True)
# train_acc = classification_report(torch.unsqueeze(data['train'].y, -1),
# train_pred, zero_division=0)
# train_auc_roc = roc_auc_score(torch.unsqueeze(data['train'].y, -1),
# train_pred)
# test_out = model.forward(data['test'].x, data['test'].edge_index)
# test_pred = test_out.argmax(dim=-1, keepdim=True)
# test_acc = classification_report(torch.unsqueeze(data['test'].y, -1),
# test_pred, zero_division=0, digits = 4)
# test_auc_roc = roc_auc_score(torch.unsqueeze(data['test'].y, -1),
# test_pred)
# ### TODO: what is the criterion to save the model results, the whole prediction
# ### y_pred and y_true? or only the test sets' prediction?
# if save_model_results:
# print ("Saving Model Predictions")
# data = {}
# data['y_pred'] = y_pred.view(-1).cpu().detach().numpy()
# df = pd.DataFrame(data=data)
# # Save locally as csv
# df.to_csv('gcn_ind.csv', sep=',', index=False)
# return train_acc, test_acc, train_auc_roc, test_auc_roc
@torch.no_grad()
def test_transd(model, data, split_idx, save_model_results=False):
"""
Test the model by using the given split_idx.
Args:
model The GCN model
data The Data object that stores x, edge_index, and labels
split_idx A dictionary that stores node indices for three sets
save_model_results A boolean determining whether we save the model results
Returns
The accuracy and auc-roc score of training, validation, and test set
"""
model.eval()
# The output of model on all data
out = model.forward(data.x, data.edge_index)
train_index = split_idx["train"]
test_index = split_idx["test"]
y_pred = out.argmax(dim=-1, keepdim=True)
train_acc = classification_report(
torch.unsqueeze(data.y[train_index], -1), y_pred[train_index], zero_division=0
)
test_acc = classification_report(
torch.unsqueeze(data.y[test_index], -1),
y_pred[test_index],
zero_division=0,
digits=4,
)
train_auc_roc = roc_auc_score(
torch.unsqueeze(data.y[train_index], -1), y_pred[train_index]
)
test_auc_roc = roc_auc_score(
torch.unsqueeze(data.y[test_index], -1), y_pred[test_index]
)
if save_model_results:
print("Saving Model Predictions")
data = {}
data["y_pred"] = y_pred.view(-1).cpu().detach().numpy()
df = pd.DataFrame(data=data)
# Save locally as csv
df.to_csv("gcn_transd.csv", sep=",", index=False)
return train_acc, test_acc, train_auc_roc, test_auc_roc
args = {
"device": device,
"num_layers": 2,
"hidden_dim": 256,
"dropout": 0.5,
"lr": 0.01,
"epochs": 300,
"label_weight": torch.Tensor([0.5, 0.5]),
}
args
# # Evaluate the Model
# Implement GCN model on different splitting method and evaluate them
# ### Random Split - Transductive
model = GCN(
data3.x.shape[1], args["hidden_dim"], 2, args["num_layers"], args["dropout"]
).to(device)
import copy
name = "random split"
np.random.seed(42)
model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=args["lr"])
loss_fn = torch.nn.NLLLoss(weight=args["label_weight"])
best_model = None
best_test_auc = 0
best_result = None
losses = []
test_time = {}
t0 = time.time()
for epoch in range(1, 1 + args["epochs"]):
# train with random split
loss = train_transd(model, data3, split_idx3["train"], optimizer, loss_fn)
losses.append(loss)
result = test_transd(model, data3, split_idx3)
train_acc, test_acc, train_auc, test_auc = result
if test_auc > best_test_auc:
best_test_auc = test_auc
best_model = copy.deepcopy(model)
best_result = [train_acc, test_acc, train_auc, test_auc]
if epoch % 50 == 0:
print(
"Test:\n{}\n".format(test_acc),
"Test_auc_roc: {:.4f}".format(test_auc),
"\n",
)
test_time[name] = time.time() - t0
# ### Community Split - Transductive
model = GCN(
data5.x.shape[1], args["hidden_dim"], 2, args["num_layers"], args["dropout"]
).to(device)
name = "community split"
model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=args["lr"])
loss_fn = torch.nn.NLLLoss(weight=args["label_weight"])
best_model = None
best_test_auc = 0
best_result = None
losses = []
t0 = time.time()
for epoch in range(1, 1 + args["epochs"]):
# train with random split
loss = train_transd(model, data5, split_idx5["train"], optimizer, loss_fn)
losses.append(loss)
result = test_transd(model, data5, split_idx5)
train_acc, test_acc, train_auc, test_auc = result
if test_auc > best_test_auc:
best_test_auc = test_auc
best_model = copy.deepcopy(model)
best_result = [train_acc, test_acc, train_auc, test_auc]
if epoch % 50 == 0:
print(
"Epoch: {:02},".format(epoch),
"Loss:{:.4f}".format(loss),
"Test:\n{}\n".format(test_acc),
"Test_auc_roc: {}".format(test_auc),
"\n",
)
test_time[name] = time.time() - t0
# ### Time group split - inductive
# model = GCN(data2['train'].x.shape[1], args['hidden_dim'],
# 2, args['num_layers'], args['dropout']).to(device)
# name = "time group split"
# model.reset_parameters()
# optimizer = torch.optim.Adam(model.parameters(), lr=args['lr'])
# loss_fn = torch.nn.NLLLoss(weight=args['label_weight'])
# best_model = None
# best_test_auc = 0
# best_result = None
# losses = []
# t0 = time.time()
# for epoch in range(1, 1 + args["epochs"]):
# # train with random split
# loss = train_ind(model, data2['train'], optimizer, loss_fn)
# losses.append(loss)
# result = test_ind(model, data2)
# train_acc, test_acc, train_auc, test_auc = result
# if test_auc > best_test_auc:
# best_test_auc = test_auc
# best_model = copy.deepcopy(model)
# best_result = [train_acc, test_acc, train_auc, test_auc]
# if epoch % 50 == 0:
# print('Epoch: {:02},'.format(epoch),
# 'Loss:{:.4f}'.format(loss),
# 'Test:\n{}\n'.format(test_acc),
# 'Test_auc_roc: {}'.format(test_auc),
# '\n'
# )
# test_time[name] = time.time()-t0
# ### Temporal step split - inductive
# model = GCN(data['train'][0].x.shape[1], args['hidden_dim'],
# 2, args['num_layers'], args['dropout']).to(device)
# name = "temporal step split"
# model.reset_parameters()
# optimizer = torch.optim.Adam(model.parameters(), lr=args['lr'])
# loss_fn = torch.nn.NLLLoss(weight=args['label_weight'])
# best_model = None
# best_test_auc = 0
# best_result = None
# losses = []
# t0 = time.time()
# for epoch in range(1, 1 + args["epochs"]):
# # train with random split
# loss = train_ind_time_step(model, data['train'], optimizer, loss_fn)
# losses.append(loss)
# result = test_ind_time_step(model, data)
# train_acc, test_acc, train_auc, test_auc= result
# if test_auc > best_test_auc:
# best_test_auc = test_auc
# best_model = copy.deepcopy(model)
# best_result = [train_acc, test_acc, train_auc, test_auc]
# if epoch % 50 == 0:
# print('Epoch: {:02},'.format(epoch),
# 'Loss:{:.4f}'.format(loss),
# 'Test:\n{}\n'.format(test_acc),
# 'Test_auc_roc: {}'.format(test_auc),
# '\n'
# )
# test_time[name] = time.time()-t0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.