script
stringlengths 113
767k
|
---|
# ### **[Coconut Leaf Pest Identification](https://www.kaggle.com/datasets/shravanatirtha/coconut-leaf-dataset-for-pest-identification)** using transfer learning
# **Problem**: classify and predict pest-infected leaves to be made easy for agriculture
# **Solution:**
# * model used: EfficientNet_B7
# * pretrained on: ImageNet (ILSVRC-2012-CLS)
# * new dataset: [Coconut Leaf Pest Identification](https://www.kaggle.com/datasets/shravanatirtha/coconut-leaf-dataset-for-pest-identification)
# * new classes: 5
#
# standard libraries
import numpy as np
import time
import PIL.Image as Image
import matplotlib.pylab as plt
import matplotlib.image as mpimg
import datetime
from tqdm.keras import TqdmCallback
from skimage import transform
import requests
# tensorflow libraries
import tensorflow as tf
import tensorflow_hub as hub
# path variables
train_path = "/kaggle/input/coconut-leaf-dataset-for-pest-identification/archive"
# define some variables
batch_size = 32
img_height = 300 # reduced from 600 to mitigate the memory issue
img_width = 300 # reduced from 600 to mitigate the memory issue
seed_train_validation = 1
shuffle_value = True
validation_split = 0.4
# load training images
train_ds = tf.keras.utils.image_dataset_from_directory(
train_path,
validation_split=validation_split,
subset="training",
image_size=(img_height, img_width),
batch_size=batch_size,
seed=seed_train_validation,
shuffle=shuffle_value,
)
# load validation images
val_ds = tf.keras.utils.image_dataset_from_directory(
train_path,
validation_split=validation_split,
subset="validation",
image_size=(img_height, img_width),
batch_size=batch_size,
seed=seed_train_validation,
shuffle=shuffle_value,
)
# target class names
class_names = train_ds.class_names
# cleaning the class names
class_names = [x.split("_")[1] for x in class_names]
# view class names
print("the target classes are: ", *class_names, sep=" ,")
# rescaling the images for the model
"""TensorFlow Hub's convention for image models is to expect float inputs in the [0, 1] range"""
normalization_layer = tf.keras.layers.Rescaling(1.0 / 255)
train_ds = train_ds.map(
lambda x, y: (normalization_layer(x), y)
) # Where x—images, y—labels.
val_ds = val_ds.map(
lambda x, y: (normalization_layer(x), y)
) # Where x—images, y—labels.
"""finish the input pipeline by using buffered prefetching with Dataset.prefetch, so you can yield the data from disk without I/O blocking issues."""
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
# get the headless model
"""TensorFlow Hub also distributes models without the top classification layer. These can be used to easily perform transfer learning."""
# feature vector model
efficientnet_b7_fv = "https://kaggle.com/models/tensorflow/efficientnet/frameworks/TensorFlow2/variations/b7-feature-vector/versions/1"
feature_extractor_model = efficientnet_b7_fv
# feature extraction layer
"""Create the feature extractor by wrapping the pre-trained model as a Keras layer with hub.KerasLayer. Use the trainable=False argument to freeze the variables, so that the training only modifies the new classifier layer"""
feature_extractor_layer = hub.KerasLayer(
feature_extractor_model, input_shape=(img_width, img_height, 3), trainable=False
)
# add a classification layer
num_classes = len(class_names)
model = tf.keras.Sequential(
[feature_extractor_layer, tf.keras.layers.Dense(num_classes)]
)
# model summary
model.summary()
# compile the model
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["acc"],
)
# early stopping
early_stopping = tf.keras.callbacks.EarlyStopping(monitor="loss", patience=3)
# Define Epochs
NUM_EPOCHS = 5
# train the model
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=NUM_EPOCHS,
callbacks=[early_stopping, TqdmCallback(verbose=0)],
verbose=0,
)
# view model accuracy
model_acc = "{:.2%}".format(history.history["acc"][-1])
print(f"\n Model Accuracy Reached: {model_acc}")
# summarize history for accuracy
plt.subplot(1, 2, 1)
plt.plot(history.history["acc"])
plt.plot(history.history["val_acc"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# summarize history for loss
plt.subplot(1, 2, 2)
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
#
# prediction on an random image from internet
# image downloaded from : https://guyanachronicle.com/2017/04/02/narei-vigilant-against-lethal-yellowing-disease/
# expected class = yellowing
#
img_url = "https://guyanachronicle.com/wp-content/uploads/elementor/thumbs/Lethal-Yellowing-n6gryb7qqe5m4mygkxug9jtwgjuaesqvwc09a87uum.jpg"
img_data = requests.get(img_url).content
with open("/kaggle/working/random_image_from_internet.jpg", "wb") as handler:
handler.write(img_data)
test_img_path = "/kaggle/working/random_image_from_internet.jpg"
test_image = Image.open(test_img_path)
test_image = np.array(test_image).astype("float32") / 255
test_image = transform.resize(test_image, (img_width, img_height, 3))
test_image = np.expand_dims(test_image, axis=0)
# make predictions
prediction = model.predict(test_image)
pred_class = prediction.argmax()
print(f"The Predicted Class: {class_names[pred_class]}\n")
# view the test-image
plt.figure(figsize=(8, 8))
test_img = mpimg.imread(test_img_path)
plt.imshow(test_img)
plt.title("predicted class: " + class_names[pred_class])
plt.axis("off")
plt.show()
|
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/insurance/insurance.csv")
df.head()
df.describe()
df.shape
df.columns
print(f"age : {df.age.nunique()}")
print(f"sex : {df.sex.nunique()}")
print(f"bmi : {df.bmi.nunique()}")
print(f"children : {df.children.nunique()}")
print(f"smoker : {df.smoker.nunique()}")
print(f"region : {df.region.nunique()}")
print(f"charges : {df.charges.nunique()}")
df.info()
df.corr()
plt.scatter(df.age, df.charges, color="black")
plt.title("Insurance Age verus Charges")
plt.xlabel("Age")
plt.ylabel("Charges")
plt.show()
df_smoker = df[df["smoker"] == "yes"]
df_nonsmoker = df[df["smoker"] == "no"]
plt.scatter(df_smoker.age, df_smoker.charges, label="Smokers", color="Aquamarine")
plt.scatter(df_nonsmoker.age, df_nonsmoker.charges, label="Non-smokers", color="Black")
plt.title("Insurance Age verus Charges")
plt.xlabel("Age")
plt.ylabel("Charges")
plt.legend()
plt.show()
sns.set_style("white")
df_smoker_reduced = df_smoker.sample(50)
df_nonsmoker_reduced = df_nonsmoker.sample(50)
plt.scatter(
df_smoker_reduced.age,
df_smoker_reduced.charges,
label="Smokers",
color="Aquamarine",
)
plt.scatter(
df_nonsmoker_reduced.age,
df_nonsmoker_reduced.charges,
label="Non-smokers",
color="Black",
)
plt.title("Insurance Age verus Charges")
plt.xlabel("Age")
plt.ylabel("Charges")
plt.legend()
plt.show()
sns.set(color_codes=True)
sns.set_style("white")
sns.jointplot(x="age", y="charges", data=df, color="Black")
sns.set(color_codes=True)
sns.set_style("white")
sns.jointplot(x="age", y="charges", data=df, color="Black")
sns.pairplot(df)
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
font = {"size": 8}
plt.rc("font", **font)
fig = plt.figure(figsize=(6, 12))
ax = fig.add_subplot(111, projection="3d")
# three_d_plot = Axes3D(fig)
ax.scatter(df.age, df.bmi, df.charges, color="black")
ax.set_xlabel("age")
ax.set_ylabel("bmi")
ax.set_zlabel("charges")
plt.show()
import plotly.express as px
fig = px.scatter_3d(df, x="age", y="bmi", z="charges", color="smoker", size="children")
fig.show()
sns.histplot(data=df, x="charges", hue="sex", kde=True)
ax = (
df["region"]
.value_counts()
.head(15)
.plot(kind="barh", title="Top 15 Team", color="black")
)
ax.set_xlabel("count")
ax.set_ylabel("region")
plt.figure(figsize=(20, 15))
sns.countplot(x="age", label="smoker", data=df)
plt.style.use("seaborn")
plt.title("Age")
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv")
df.info()
df.head()
def age_group(value):
if 18 <= value <= 24:
return "18-24"
elif 25 <= value <= 34:
return "25-34"
elif 35 <= value <= 44:
return "35-44"
elif 45 <= value <= 54:
return "45-54"
elif 55 <= value <= 64:
return "55-64"
elif 65 <= value:
return ">65"
def payment(value):
if value == "Cash":
return "Cash"
else:
return "Card"
df["age_group"] = df["age"].map(age_group)
df["payment"] = df["payment_method"].map(payment)
display(df.head())
# Gender, age, category, payment method.
sns.set()
df.groupby("gender").size().plot.bar(rot=0)
plt.show()
df.groupby("age_group").size().plot.bar(rot=0)
plt.show()
df.groupby("category").size().sort_values().plot.barh()
plt.show()
df.groupby(["payment", "payment_method"]).size().unstack().plot.bar(rot=0, stacked=True)
plt.ylim(top=7e4)
plt.legend(loc="upper center", ncol=3)
plt.show()
df.groupby("shopping_mall").size().sort_values().plot.barh()
plt.show()
sns.displot(df, x="price", bins=18)
plt.axvline(x=df.price.mean(), color="red")
plt.axvline(x=df.price.quantile(q=0.25), color="blue", ls="dashed", lw=0.6)
plt.axvline(x=df.price.quantile(q=0.5), color="blue")
plt.axvline(x=df.price.quantile(q=0.75), color="blue", ls="dashed", lw=0.6)
plt.show()
|
# # 1 | Multi Armed Bandits
# A classic example of Reinforcment Learning Problems
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import pandas as pd
import numpy as np
import random
import seaborn as sns
from IPython.display import IFrame
# # 2 | What other things you will learn
# * Probablity Density Function
# * Exploration and Exploitation
# Lets assume, you are a college, and unfortunetly, you have to mantain $75$% attendance. So you have to go to the college almost everyday. So you will be having different ways to reach your college
# * **Use your Night Fury Dragon (Toothless)**
# * **Use your McQueen Car**
# * **Use a parachute (Teared)**
# * **Do not go**
# Every path has its own benifits, but there is a twist. The benifits are not particular/deterministic. They are a group of probablities/sochastic. For example, for Deterministic we can say
# |Path|Benifits|
# |---|---|
# |Use your Night Fury Dragon (Toothless)|10
# |Use your McQueen Car|5
# |Use a parachute (Teared)|8
# |Do not go |100
# But you do not have the benifits like this
# Your benifits are like a group of probablities. For example if `you choose not to go`. You are not sure you will get $100$ benifts. Maybe you can get $69$, maybe $88$, maybe $0$, or maybe $100$, who knows.
# You will think that it is a really hard situation. For one ease, these probablities are in a normal ditribution curve.
# **IF YOU DONT KNOW ABOUT THE NORMAL DISTRIBUTION CURVE, HERE IS A GOOD VIDEO TO MAKE YOU A GLIMPSE [But What Is The Center Limit Theorem](https://youtu.be/zeJD6dqJ5lo)**
# # 2.1 | Probablity Density Function
# A Normal Distribution is something like this.
# A curve with the shape of a bell. standardly having a mean of $0$ and standard Deviation $1$.
# **As this topic will be used a lot in the Reinforcement Learning. Lets just get a little more deep into this curve. How this is formed, how can we play with this**
# x
# We know that the curve for $x=y$ seems to be like this
IFrame("https://www.desmos.com/calculator/512wkwdbob", 400, 400)
# If we tweeke the function a little bit like $y=e^x$ , we get
IFrame("https://www.desmos.com/calculator/7x5pbdhrrn", 400, 400)
# After tweeking the function more to $y=e^{−x}$ it becomes
IFrame("https://www.desmos.com/calculator/jtzzdklnrd", 400, 400)
# If we make a modulus function here $y=e^{−|x|}$ , it becomes
# **Modulus Function is a function that converts negative to a positive number**
IFrame("https://www.desmos.com/calculator/zodxwedpjj", 400, 400)
# An interesting fact is that, the functuon doesent really rely on $e$, it can be any constant number (except $1$, $−1$ and $0$ ) and it will show the same results.
# So if it doesnt change at all, why do we even put the $e$ there, why dont we just put anything else there
# Frankly speaking $e$ seems to be cool there
# Also if we square the function as $y=e^{x^2}$ , we get this
IFrame("https://www.desmos.com/calculator/dqd1vrha9i", 400, 400)
# Also we can move this function in the $x$ axis by adding/subtracting some constant from $x$ like $y=e^{−|x−a|^2}$
IFrame("https://www.desmos.com/calculator/0omydtyxdh", 400, 400)
# What if I want to calaculate the area under of this curve ??
IFrame("https://www.desmos.com/calculator/1xlnxhm7zj", 400, 400)
# Suprisingly this area is always is $\sqrt\pi$
# As we know the total probabalites of all the events happening will be $1$,
# Now for making the probablity $y$ to be $1$, we need to divide the function $e^{-|x - a|^2}$ by $\sqrt {π}$, to make it $1$.
# So now our formula becomes
# $$y = \frac {1}{\sqrt{\pi}}e^{-|x - a|^2}$$
# Going back some steps, I would like to add one thing to this formula $y = e^{-|x^2|}$ and adding just a variable term to control this function we can rewrite this as $y = e^{-c|x^2|}$
# Now if we put $c = 10$, we get this
IFrame("https://www.desmos.com/calculator/c5q3nl2t1g", 400, 400)
# After experiments it was found that $c = \frac {1}{2}$ is a great tuned parameter for the function to be more smooth and convineint for solutions
# So changing our formula a little bit we can rewrite it as $y = e^{-\frac {1}{2}|x^2|}$
# As we changed our $e^{-\frac {1}{2}|x^2|}$, this would aso affect the area under the curve a little bit, now we also need to divide it by $\frac {1}{2}$. Tweeking this change into the formula we get $$y = \frac {1}{\sqrt{2\pi}}e^{-\frac {1}{2}|x - a|^2}$$
# After extensive research we found that, for finding the best bell like curve, we need to change the values of $a$ and $b$. Suprisingly, the value seemed out to be
# $a = mean$ or $a = μ$ and $b = standard_-deviation^2$ or $b = σ^2$. Now our formula becomes like $$y = \frac {1}{\sigma\sqrt{2\pi}}e^{-\frac {1}{2}\frac {|x - μ|^2}{\sigma^2}}$$
# If you notice we are taking the square of a mode functon, What we can rather do is $$y = \frac {1}{\sigma \sqrt{2π}}e^{\frac {-(x - μ)^2}{2σ^2}}$$
# And this my friends is the `Normal Distribution Fucntion`
# $$F(x) = \frac {1}{\sigma \sqrt{2π}}e^{\frac {-(x - μ)^2}{2σ^2}}$$
# Comming back to the point, your amount of benifits lies in one of these type of `Normal Distribution Curve`. Consider that different values have different $\mu$ and $\sigma$. So every time you choose a path there is a high chnace of getting a benifit that is close to the mean of the corresponding `Normal Distribution Curve` of the path.
# Also its your first day at the college, so you dont know either about the experience nor you know that these exists, You are just a child who was thrown into a college.
# # 2.2 | Exploitation And Exploration
# Suppose there is a zombie apocalypse. The zombies are bit different this time, they are intelligent. They want to experiment on you and thus lock you into a room with $10$ Keys. But there is a twist, You can choose only $1$ key everyday, also again the probablity of unlocking the door with, in other words it is shochastic not deterministic
# Suppose you try all the $10$ keys succesively for $100$ days. So you are **Exploring** as much as possible, you are trying to perform every action.
# Suppose you are only trying $1$ out of the $10$ keys for $100$ days. So you are **Exploiting** as much as possible, you are trying to perform only $1$ action.
# When you are trying to do the same task/action repeteadly, you are exploiting the environment
# When you are trying to perform different tasks/actions, you are exploring the environment
# Comming back to our problem, you want to solve
# Obviously, you want to find the bestest path because you have a long life to go (probably). So what you can really do to solve this.
# One way is that you are lazy to think much and things just dont affect you, so you just choose randomly. This technique of choosing is called the **Random Method**.
# So we have found one way of solving this problem.
# * Random Method
# # 3 | Random Method
# Rather than using the example given above, we will be using another dataset, of `Advertisments`. Just have a look at that boi
data = pd.read_csv("/kaggle/input/ten-ads-optimization/Ads_CTR_Optimisation.csv")
data
# Consider this a data of a bot, that showed a random ad from a list of $10$ advertisments, if the user clicks the ad, bot gets a point, if the user doesent, the user gets $0$.
# As we are choosing a random ad
random.randrange(10)
# Now from the dataset, we will see if the ad selected worked for the user or not
x = random.randrange(10)
data.values[0, x]
# Lets try to do this for around $10,000$ values
epochs = 10000
for i in range(epochs):
x = random.randrange(10)
print(data.values[0, x])
# Now lets create a zero variable and add all of this into the variable
epochs = 10000
total_reward = 0
for i in range(epochs):
ad = random.randrange(10)
reward = data.values[i, ad]
total_reward += reward
# Lets see our `total_reward`
total_reward
#
# Lets try to plot some graphs using what we got. For this we also need to track all the rewards, lets make a list for that purpose
epochs = 10000
total_reward = 0
tr = []
for n in range(0, epochs):
ad = random.randrange(10)
reward = data.values[n, ad]
total_reward = total_reward + reward
tr.append(total_reward)
# Lets try to plot some graphs using what we got
sns.lineplot(np.array(tr))
# So we got around $1,200$, and thats really bad :(
# As this was a random approach, we could not have expected anything else
# # 3.1 | Random Method Final Soruce Code
def Random(data, epochs, trs=False):
total_reward = 0
tr = []
for n in range(0, epochs):
ad = random.randrange(len(data.columns))
reward = data.values[n, ad]
total_reward = total_reward + reward
tr.append(total_reward)
if trs:
return total_reward, trs
else:
return total_reward
# # 4 | Full Exploration
# Another way is trying every different task everytime, not randomly, giving each task an equal chance. This method is called **Full Exploration**
# So now we have two different methods
# * **Random Method**
# * **Full Exploration Method**
# We can actually do some tweeks in the function above only to make our work easier
# We just need to introdue a term `count` that intializes as $0$ and incremenets itself everytime with the loop, but when it becomes $10$ , then it converts itself into $0$
total_reward = 0
tr = []
i = 0
for n in range(10000):
reward = data.values[n, i]
total_reward = total_reward + reward
tr.append(total_reward)
i += 1
if i == 10:
i = 0
# Lets try to plot some fucntions for this too
sns.lineplot(np.array(tr))
# # 4.1 | Full Exploration Final Source Code
def full_exploration(data, epochs, trs=False):
total_reward = 0
tr = []
i = 0
for n in range(0, epochs):
reward = data.values[n, i]
total_reward = total_reward + reward
tr.append(total_reward)
i += 1
if i == 10:
i = 0
if trs:
return total_reward, trs
else:
return total_reward
# # 5 | Full Exploitation
# Lets assume you are lazy (probably no need to assume) , and you do not want to use your brain at all. You are in the same sitaution of zombie apocalypse and zombies have locked you. You just choose a random key and decide to make that only key your life, or you choose to use only that $1$ key for all $100$ days, So you are in **Full Exploitation** mode.
# So now we have three methods
# * **Random Method**
# * **Full Exploration**
# * **Full Exploitation**
# It has its own benifits and own cons, who knows...?. Out job is to somehow implement this type of approach and see the results. We can actually do small tweeks in the function we made for **Random Method**. This time we just need to choose randomly one time rather than everytime
total_reward = 0
tr = []
ad = random.randrange(len(data.columns))
for n in range(0, epochs):
reward = data.values[n, ad]
total_reward = total_reward + reward
tr.append(total_reward)
sns.lineplot(np.array(tr))
# Lets put all of this into a function for fun :)
# # 5.1 | Full Exploration Final Source Code
def full_exploitation(data, epochs, trs=False):
total_reward = 0
tr = []
ad = random.randrange(len(data.columns))
for n in range(0, epochs):
reward = data.values[n, ad]
total_reward = total_reward + reward
tr.append(total_reward)
if trs:
return total_reward, trs
else:
return total_reward
# # 6 | E-Greedy Method
# Lets assume you have somewhat got a little of out of nowwhere, so you decide to randomly explore your environment while doing exploitation. The intensity of the word `random` is determined by a hyperparameter $E$.
# So now we have $4$ methods.
# * **Random Method**
# * **Full Exploration**
# * **Full Exploitation**
# * **E-Greedy Method**
# It has its own benifits and own cons, who knows...?. Out job is to somehow implement this type of approach and see the results. We can actually do small tweeks in the function we made for **Full Exploitaion**. This time we just need to choose randomly one time rather than everytime
total_reward = 0
tr = []
ad = random.randrange(len(data.columns))
for n in range(10000):
if n % 10 == 0:
adv = random.randrange(len(data.columns))
if data.values[n, adv] == 1:
ad = adv
reward = data.values[n, ad]
total_reward = total_reward + reward
tr.append(total_reward)
sns.lineplot(np.array(tr))
# And we can see some improvement in our rewards
# Lets now put all of this into a function
def full_exploitation(data, epochs, trs=False, epsilon=0.1):
total_reward = 0
tr = []
ad = random.randrange(len(data.columns))
for n in range(0, epochs):
if n % 10 == 0:
adv = random.randrange(len(data.columns))
if data.values[n, adv] == 1:
ad = adv
reward = data.values[n, ad]
total_reward = total_reward + reward
tr.append(total_reward)
if trs:
return total_reward, trs
else:
return total_reward
|
# In this notebook, I will be fitting a LightGBM classification model on this simulated dataset, where I tune its hyperparameters through Bayesian Optimization (with Gaussian process as a surrogate model). I will also attempt to interpret the effects of the features.
# to install with optuna
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
plt.style.use("ggplot")
curr_data_path = Path("/kaggle/input/playground-series-s3e12/")
train = pd.read_csv(curr_data_path / "train.csv")
test = pd.read_csv(curr_data_path / "test.csv")
del train["id"]
train
# ## Basic EDA
# check for missing values if any
train.isnull().sum()
test.isnull().sum()
# summary statistics
train.describe()
corr_matrix = train.drop("target", axis=1).corr(method="pearson")
mask = np.triu(np.ones_like(corr_matrix, dtype=bool))
fig, ax = plt.subplots(1, 1, figsize=(6, 4), dpi=150)
_ = sns.heatmap(corr_matrix, annot=True, fmt=".2f", mask=mask, ax=ax)
_ = ax.set_facecolor("w")
# pair plots
_ = sns.pairplot(train, hue="target")
n_rows = 2
n_cols = 3
fig, axs = plt.subplots(n_rows, n_cols, figsize=(1.5 * n_cols, 2.5 * n_rows), dpi=150)
for i in range(n_rows):
for j in range(n_cols):
col_idx = n_cols * i + j
_ = sns.boxplot(train, y=train.columns[col_idx], x="target", ax=axs[i, j])
fig.tight_layout()
X = train.drop(["target"], axis=1)
y = train["target"].values
# ## Training a lightGBM model
# The function `fit_model` in the cell below, trains a lightgbm classification model. The function also allows the specification of hyperparameters as a dictionary through the `config` argument. If `config` is not specified, default values for the hyperparameters are used.
from lightgbm import LGBMClassifier
from typing import Optional, Dict
def fit_model(
X: pd.DataFrame,
y: np.ndarray,
config: Optional[Dict] = None,
n_jobs: int = 1,
) -> LGBMClassifier:
"""
Train a lightgbm classifier
"""
model = LGBMClassifier(
subsample_freq=1, # this can be specified in config as well
n_jobs=n_jobs,
importance_type="gain",
)
if config:
# if config is supplied, set the model hyperparameters
model.set_params(**config)
return model.fit(X, y)
# Let's fit a model using the default hyperparameters, and check the feature importances.
np.random.seed(12)
model_default = fit_model(X, y, n_jobs=4)
# **Note**: In lightgbm, the default feature imporances are based on the number of splits in which a feature appears. This is sort of different from the defaults in xgboost and catboost, which calculate importance based on the total gain from splits involving the feature. To enable this measure, pass `importance_type='gain'` in the model constructor. The importance measure based on number of splits can be misleading, since a feature can potentially be involved in many splits with miniscule effects on the response.
# gain based feature importances from the model with the default hyperparameter - not necessarily the most reliable
feat_imp = pd.Series(model_default.feature_importances_, X.columns).sort_values(
ascending=True
)
feat_imp.plot(kind="barh")
# ## Hyperparameter tuning via Bayesian Optimization
# I will be using 4 replicates of stratified 5 fold cross-validation to evaluate generalization performance.
# **Bayesian Optimization**:
# Optuna requires a sampling strategy to propose new trials. The `UnconstrainedGPEISampler` from the gp_bo library uses a Gaussian process (GP) based Bayesian Optimization (BO) strategy to propose a new trial given the history of trials. To explain the There are two parts:
# 1. First, a GP model is trained to predict the log-loss as a function of the XGBoost hyperparameters. This GP model can then generate predictions for any new hyperparameter configuration, as well as an estimate of uncertainty for this prediction.
# 2. Then, we optimze the Expected Improvement (EI) objective that balances (a) selecting the configuration with the least predicted log-loss vs (b) selecting the configuration with the largest prediction uncertainty
# Under many conditions, the `UnconstrainedGPEISample`r will be faster (i.e., will require lesser number of trials on average) than the optuna's default `TPESampler`. Currently, it supports unconstrained single-objective problems.
#
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import roc_auc_score
import optuna
from gp_bo.optim.optuna_ei import UnconstrainedGPEISampler
import joblib
import warnings
# folds
cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=4, random_state=1)
folds = list(cv.split(X, y))
def fit_and_test_fold(config: Dict, train_index, test_index) -> float:
X_train = X.iloc[train_index, :]
X_test = X.iloc[test_index, :]
y_train = y[train_index]
y_test = y[test_index]
# fit model on training data
with warnings.catch_warnings():
warnings.simplefilter("ignore")
model = fit_model(X_train, y_train, config)
# generate predictions on test data
test_prob = model.predict_proba(X_test)[:, 1]
return roc_auc_score(y_test, test_prob)
# loss function
def objective(trial):
config = {
"n_estimators": trial.suggest_int("n_estimators", 100, 1500, log=True),
"learning_rate": trial.suggest_float("learning_rate", 5e-4, 0.75, log=True),
"num_leaves": trial.suggest_int("num_leaves", 2, 64, log=True),
"reg_alpha": trial.suggest_float(
"reg_alpha", 1e-8, 10, log=True
), # l1 regularization
"reg_lambda": trial.suggest_float(
"reg_lambda", 1e-8, 10, log=True
), # l2 regularization
# 'colsample_bytree': trial.suggest_float("colsample_bytree", 0.17, 1),
# 'subsample': trial.suggest_float("subsample", 0.1, 1),
# 'subsample_freq': trial.suggest_int('subsample_freq',1,50),
"min_child_samples": trial.suggest_int("min_child_samples", 2, 40),
"min_split_gain": trial.suggest_float(
"min_split_gain", 1e-8, 10, log=True
), # gamma in xgboost
}
cv_auc = joblib.Parallel(n_jobs=4, verbose=0)(
joblib.delayed(fit_and_test_fold)(config, train_index, test_index)
for train_index, test_index in folds
)
# log the std error for the auc estimate
trial.set_user_attr("auc_std", np.std(cv_auc) / np.sqrt(len(cv_auc)))
return np.mean(cv_auc)
# the first `n_startup_trials` trials are drawn through QMC sampling
sampler = UnconstrainedGPEISampler(
n_startup_trials=8, seed=456 # recommended minimum: max(5,1+no_of_hyperparameters)
)
study = optuna.create_study(directions=["maximize"], sampler=sampler, study_name="lgbm")
joblib.dump(study, "lgbm_study.pkl")
try:
study.optimize(objective, n_trials=100, timeout=None)
joblib.dump(study, "lgbm_study.pkl") # save study for further runs
except Exception as e:
print(e)
results = study.trials_dataframe(
attrs=("number", "value", "user_attrs", "params", "duration")
).rename(columns={"value": "auc", "user_attrs_auc_std": "auc_std"})
results["duration"] = results["duration"] / np.timedelta64(1, "s")
results = results.sort_values(by="auc", ascending=False)
results.to_csv("cv_loss_history.csv", index=False)
# print the top 10 results
results.head(10)
# ## Final model
model = fit_model(X, y, study.best_params, n_jobs=4)
# save model
model.booster_.save_model(filename="model_lgbm.txt", importance_type="gain")
# gain based feature importances - not necessarily the most reliable
feat_imp = pd.Series(model.feature_importances_, X.columns).sort_values(ascending=True)
feat_imp.plot(kind="barh")
# ## SHAP measures
# The default feature importances computed by lightgbm (or any tree based model) can be misleading. Here, I'll use SHAP measures to check the importance of each feature.
import shap
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X)[1]
# The SHAP summary plot is below. When I ran the notebook with limited runs of BO based hyperparameter tuning, `calc` was again the important predictor by a distance, but there were some differences in the ordering of the remaining predictors.
# Average of SHAP value magnitudes across the dataset
shap.summary_plot(shap_values, X, plot_type="bar")
# ## ALE plots
# Alternatively, we can visualize the effects of the individual predictors using ALE plots (https://academic.oup.com/jrsssb/article/82/4/1059/7056085). The ALE plots can be used for
# 1. interpreting the individual effects of the predictors - e.g.: linear vs non-linear, monotonic vs non-monotonic, quadratic, etc.
# 2. determining the most influential predictors. The influence can be estimated through the range of y-axis values in each plot. The larger the range, the larger the effect.
# Unlike partial dependence plots, ALE plots are not sensitive to correlations between pairs of descriptors
# For binary classification problems, we will generate ALE plots for the predicted log-odds. If all the first-order ALE profiles for the log-odds are linear, then perhaps a logistic regression model can capture the dependencies better.
#
from functools import partial
def predict_log_odds(data: pd.DataFrame, model) -> np.ndarray:
"""
Convenience prediction to generate predicted log odds from
an estimator with scikit-learn API
"""
probs = np.clip(model.predict_proba(data)[:, -1], 1e-6, 1 - 1e-6)
return np.log(probs) - np.log(1 - probs)
model_log_odds = partial(predict_log_odds, model=model)
# We will be using the alepython library for generating ALE plots. The library is not posted to the offical PyPi repository, but is available from Github (https://github.com/blent-ai/ALEPython). Run the code cell below to install the library.
# install alepython library for visualizing feature effects
# The default plot function from this library has limited customizability. I will manually extract and plot the computed ALE values. The hidden cell below contains utility scripts for generating the 1st order ALE effects.
from alepython.ale import _first_order_ale_quant
from typing import Callable, Iterable, Union
def first_order_aleplot_quant(
predictor: Callable,
train_data: pd.DataFrame,
feature: str,
ax: plt.Axes,
bins: int = 10,
**kwargs
):
"""
Plot and return the first-order ALE function for a continuous feature.
Arguments
---------
predictor: callable
The prediction function. For scikit-learn regressors, pass the `.predict`
method. For scikit-learn classifiers, either pass the `.predict_proba` method
or pass a custom log-odds function. For other pass a custom predict function
that takes in a pandas dataframe as input
train_data: pd.DataFrame
Training data on which the model was trained. Cannot pass numpy ndarrays.
feature: str
Feature name. A single column label
ax: matplotlib.axes.Axes
Pre-existing axes to plot onto
bins : int
This defines the number of bins to compute. The effective number of bins may
be less than this as only unique quantile values of train_data[feature] are
used.
**kwargs: plot properties, optional
Additional keyword parameters passed to `ax.plot`.
Returns
---------
ax: matplotlib.axes.Axes
The matplotlib axes containing the plot
ale: np.ndarray
first order ALE
quantiles: np.ndarray
The quantiles used
"""
ale, quantiles = _first_order_ale_quant(predictor, train_data, feature, bins)
bin_centers = 0.5 * (quantiles[:-1] + quantiles[1:])
_ = ax.plot(bin_centers, ale, **kwargs)
_ = sns.rugplot(train_data[feature], ax=ax, color="k", alpha=0.2)
_ = ax.grid(linestyle="-", alpha=0.5)
_ = ax.set_xlabel(feature)
_ = ax.set_ylabel(r"$f_1$(%s)" % feature)
return ax, ale, quantiles
# The first-order ALE plots for the 6 predictors are shown below. When I ran the notebook with limited runs of BO based hyperparameter tuning, `calc` was the important predictor by a distance. The order of predictors in decreasing order of influence:
# `calc` >> `cond` > `gravity` > `osmo` > `ph` > `urea`
num_rows = 2
num_cols = 3
fig, axs = plt.subplots(
num_rows, num_cols, figsize=(4 * num_cols, 3 * num_rows), dpi=150
)
for i in range(num_rows):
for j in range(num_cols):
# select correct feature
feature = X.columns[num_cols * i + j]
_ = first_order_aleplot_quant(model_log_odds, X, feature, bins=50, ax=axs[i, j])
fig.tight_layout()
# save the plots
fig.savefig("ale_log_odds.png", bbox_inches="tight")
fig.savefig("ale_log_odds.pdf", bbox_inches="tight")
# ## Final submission
submission = pd.DataFrame(
{
"id": test["id"].values,
"target": model.predict_proba(test.drop(["id"], axis=1))[:, -1],
}
)
submission.to_csv("submission.csv", index=False)
|
# # Credit Card Customers - EDA and XGBoost
# **Introduction**
# This notebook uses data available at [https://www.kaggle.com/sakshigoyal7/credit-card-customers](http://) and:
# * Reads in the relevant data.
# * Performs exploratory data analysis (EDA) to identify trends and estimate feature importance.
# * Uses an XGBoost classifier, taking advantage of its scikit-learn API to perform cross-validation and hyperparameter tuning.
# * Once the final model is built, feature importance for the model is evaluated.
# * Finally, insights and potential next steps are discussed.
# To begin, we read in the data and get some basic information, checking column data types assigned by Pandas and establishing whether there is missing data in any columns.
import pandas as pd
# whilst not useful for analysis, the CLIENTNUM is taken as the index as, were the model productionised, it can be used as a key to match prediction to customer
# similarly, the original Naive Bayes classifier columns were included in the data, so these are removed
df = pd.read_csv(
"/kaggle/input/credit-card-customers/BankChurners.csv", index_col="CLIENTNUM"
).iloc[:, :-2]
print(df.shape)
print(df.info())
# Our dataframe has 10,127 rows and 20 columns, with *Attrition_Flag* our target variable. The majority of columns are numeric type, but we also have object dtype columns, which relate to several categorical variables. We will need to handle these columns before building the model.
# Firstly, let's visualize the data in each column, grouping results by *Attrition_Flag*. We'll use boxplots for numeric columns and countplots for categorical columns
from pandas.api.types import is_numeric_dtype
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
numeric_cols = df.select_dtypes("number").columns.tolist()
fig, ax = plt.subplots(4, 4, figsize=(20, 15))
ax = ax.flatten()
for i in range(len(numeric_cols)):
boxplot = sns.boxplot(
x=df["Attrition_Flag"],
y=df[numeric_cols[i]],
ax=ax[i],
order=["Attrited Customer", "Existing Customer"],
)
boxplot.set_title(numeric_cols[i])
plt.tight_layout()
category_cols = df.select_dtypes("object").columns.tolist()
print(category_cols)
fig, ax = plt.subplots(2, 3, figsize=(20, 15))
ax = ax.flatten()
for i in range(len(category_cols)):
boxplot = sns.boxplot(
x=df["Attrition_Flag"],
y=df[numeric_cols[i]],
ax=ax[i],
order=["Attrited Customer", "Existing Customer"],
)
boxplot.set_title(numeric_cols[i])
plt.tight_layout()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## The dataset that will be analyzed below is a dataset containing data of videogame sales over the period of 1980 to 2020. It has information regarding the Publisher, Genre of videogame, platform etc. The dataset also contains information about regional sales of videogames in the regions like North America, Europe, Japan as well as Global
# #### The dataset is a CSV file and it can be read into Pandas dataframe using the function pd.read_csv()
vgames = pd.read_csv("/kaggle/input/videogames/vgsales.csv")
# #### The data loaded into the dataframe will be verified using the function - 'head()'
vgames.head()
# #### Number of columns, their datatypes, counts of each column will be displayed using the function - 'info()'
vgames.info()
# #### It could be seen that there are a total of 16598 entries in the dataset. And there are 271 missing entries in the 'Year' column and 58 missing entries in 'Publisher' column
# #### One way to remove the missing values is deleting the entire column from the dataset where there are missing values and other is replacing Null with appropriate values
# #### Since 'Year' is a numerical data, we would be able to find the mean year from the dataset and replace the missing values with mean_year
# #### Since 'Publisher' is a non-numerical data, we will not be able to perform aggragate calculation on the data. Hence, we will delete the rows with missing values, since only 58 rows are missing with 'Publisher' values out of 16598 rows of data, which wouldn't affect our analysis to a greater stretch
mean_year = vgames["Year"].mean()
meanyear = round(mean_year)
meanyear
vgames["Year"].fillna(meanyear, inplace=True)
vgames.head()
vgames.info()
vgames.dropna(subset=["Publisher"], inplace=True)
vgames.info()
# #### We have replaced the 'Year' column with mean year and dropped the rows with missing 'Publisher' values using 'dropna' function
# #### Now, it could be seen that the column 'Year' is of datatype - 'object'. Hence it has to be converted into 'datetime' datatype
vgames["Year"] = pd.to_datetime(vgames["Year"], format="%Y")
vgames.info()
vgames.head()
# #### After convering the 'Year' column from object to date, it could be seen that the data has been displayed in yyyy-mm-dd format. This is because od pf.to_datetime(). To extract only the year, we could use 'dt.year' to desired column
vgames["Year"] = vgames["Year"].dt.year
vgames.head()
vgames.info()
vgames.head()
# #### But 'Year' column with integer type wouldn't be useful for further analysis. We will convert the data in 'Year'column to datetime once again and let the data stay in yyyy-mm-dd format. While analysing, we will extract only the year if required
vgames["Year"] = pd.to_datetime(vgames["Year"], format="%Y")
vgames.info()
vgames.head()
# #### To get the aggregate data of the numerical columns, we can use the 'describe()' function to know if there are any outliers among other things
vgames.describe()
vgames
# ### Since, the data has been cleaned and there are no missing values and no outliers, we can get started with our analysis of the data
vgames["Platform"].value_counts().nlargest(10)
vgames["Name"].nunique()
vgames["Genre"].value_counts()
vgames["Publisher"].nunique()
vgames["Publisher"].value_counts().nlargest(10)
|
import numpy as np
import pandas as pd
import re
data = pd.read_csv("/kaggle/input/chatgpt-paraphrases/chatgpt_paraphrases.csv")
data
category = {}
for i in range(len(data)):
chatgpt = data.iloc[i]["paraphrases"][1:-1].split(", ")
for j in chatgpt[:1]:
category[j[1:-1]] = "chatgpt"
category[data.iloc[i]["text"]] = "human"
# category
data = pd.DataFrame(category.items(), columns=["text", "category"])
data = data.sample(frac=1)
data = data[:10000]
data
data["category"].value_counts()
from sklearn.model_selection import train_test_split
X = data["text"]
y = data["category"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer()
X_train_tfidf = vectorizer.fit_transform(X_train)
X_test_tfidf = vectorizer.transform(X_test)
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import (
RandomForestClassifier,
ExtraTreesClassifier,
AdaBoostClassifier,
BaggingClassifier,
GradientBoostingClassifier,
)
import xgboost as xgb
lg = LogisticRegression(penalty="l1", solver="liblinear")
sv = SVC(kernel="sigmoid", gamma=1.0)
mnb = MultinomialNB()
dtc = DecisionTreeClassifier(max_depth=5)
knn = KNeighborsClassifier()
rfc = RandomForestClassifier(n_estimators=50, random_state=2)
etc = ExtraTreesClassifier(n_estimators=50, random_state=2)
abc = AdaBoostClassifier(n_estimators=50, random_state=2)
bg = BaggingClassifier(n_estimators=50, random_state=2)
gbc = GradientBoostingClassifier(n_estimators=50, random_state=2)
xgb = xgb.XGBClassifier(n_estimators=50, random_state=2)
from sklearn import metrics
def score_prediction(model, X_train, X_test, y_train, y_test):
model.fit(X_train, y_train)
pr = model.predict(X_test)
acc_score = metrics.accuracy_score(y_test, pr)
pre_score = metrics.precision_score(
y_test, pr, average="binary", pos_label="chatgpt"
)
recall = metrics.recall_score(y_test, pr, average="binary", pos_label="chatgpt")
f1 = metrics.f1_score(y_test, pr, average="binary", pos_label="chatgpt")
mcc = metrics.matthews_corrcoef(y_test, pr)
return acc_score, pre_score, recall, f1, mcc
acc_score = {}
pre_score = {}
recall_score = {}
f1_score = {}
mcc_score = {}
clfs = {
"LR": lg,
"SVM": sv,
# 'MNB':mnb,
"DTC": dtc,
"KNN": knn,
"RFC": rfc,
"ETC": etc,
"ABC": abc,
"BG": bg,
"GBC": gbc,
# 'XGB':xgb
}
for name, clf in clfs.items():
(
acc_score[name],
pre_score[name],
recall_score[name],
f1_score[name],
mcc_score[name],
) = score_prediction(clf, X_train_tfidf, X_test_tfidf, y_train, y_test)
acc_score
pre_score
recall_score
f1_score
mcc_score
|
# # Exercice détection du diabète
# ## Lecture des données
# Afficher les graphs dans Jupyther
# Import des librairies
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
# Lecture des données
t = pd.read_csv("../input/pima-indians-diabetes-database/diabetes.csv")
t.head().T
def replace_0(df, col):
df1 = df.copy()
n = df.shape[0]
m = df[col].mean()
s = df[col].std()
for i in range(n):
if df.loc[i, col] == 0:
df1.loc[i, col] = np.random.normal(m, s)
return df1
# ## Interprétation des paramètres
# ```
# - Pregnancies: nombre de grossesses
# - Glucose: millimoles de glucose par litre de sang
# - BloodPressure: la pression sanguine en mmHg
# - SkinThickness: l'épaisseur de la peau
# - Insulin: pmol/L d'insuline dans le sang
# - BMI: Indice de Masse Corporelle
# - DiabetesPedigreeFunction: Fonction représentant le potentiel génétique d'avoir un diabète
# - Age: Age
# - Outcome: 1 = A le diabète, 0 = n'a pas le diabète
# ```
t.count()
np.sum(~np.isnan(t))
np.amin(t.DiabetesPedigreeFunction)
np.amax(t.DiabetesPedigreeFunction)
# ### Définition de nouveaux booléens
# A partir des classifications par exemple sur la BMI on peut attribuer des noms aux différentes valeurs :
anorexie = t.BMI < 16.5
maigre = (t.BMI >= 16.5) & (t.BMI < 18.5)
ideal = (t.BMI >= 18.5) & (t.BMI < 25)
surpoids = (t.BMI >= 25) & (t.BMI < 30)
obesite_moderee = (t.BMI >= 30) & (t.BMI < 35)
obesite_severe = (t.BMI >= 35) & (t.BMI < 40)
obesite_morbide = t.BMI >= 40
jeune = t.Age < 30
adulte = (t.Age >= 30) & (t.Age < 50)
senior = t.Age >= 50
a_risque = t.DiabetesPedigreeFunction >= 0.5
malade = t.Outcome == 1
sain = ~malade
# ### Tests sur les données
# Un jeune en bonne santée sans antécédent
jeune1 = jeune & ideal & ~a_risque
# Un jeune en bonne santée avec antécédents
jeune2 = jeune & ideal & a_risque
# Un senior en obésité modérée sans antécédent
senior1 = senior & obesite_moderee & ~a_risque
# Un senior en obésité modérée avec antécédents
senior2 = senior & obesite_moderee & a_risque
p_jeune1 = t[jeune1 & malade].Age.count() / t[jeune1].Age.count() * 100
print(p_jeune1, "%")
p_jeune2 = t[jeune2 & malade].Age.count() / t[jeune2].Age.count() * 100
print(p_jeune2, "%")
p_senior1 = t[senior1 & malade].Age.count() / t[senior1].Age.count() * 100
print(p_senior1, "%")
p_senior2 = t[senior2 & malade].Age.count() / t[senior2].Age.count() * 100
print(p_senior2, "%")
# ## Représentation du dataset
# On commence par remplacer les 0 dans certaines colonnes :
t = replace_0(t, "Glucose")
t = replace_0(t, "SkinThickness")
t = replace_0(t, "BMI")
t = replace_0(t, "BloodPressure")
|
# **IMPORTING FILES PATHS**
import os
files = []
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
files += [os.path.join(dirname, filename)]
files
# **TAKING A SINGLE FILE**
# Model fits only for indexes: 0,6,7 (lat data)
file = files[5]
print(file)
# **READING FILE DATA FROM EXCEL AND DROPPING USELESS DATA**
data_set = pd.read_excel(file)
data_set = data_set[["Lat. N", "Long. E__3"]].dropna()
# data_set=data_set.drop(23)
# data_set=data_set.drop(25)
data_set
# **Saving lat and long data in lists**
lat = []
long = []
data = []
# for items in data_set['Tables_ __3']:
# item = items.split('/')
# # print(item)
# data.append(list(map(float, item)))
# lat.append(item[0])
# long.append(item[1])
datas = data_set.values
# print(datas)
data2 = []
for data in datas:
lat.append(data[0])
long.append(data[1])
data2.append(list(map(float, data)))
print(type(data2))
# print(len(lat))
# print(len(long))
# print(np.array(data))
# **Data Preprocessing**
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
x_data = np.arange(1, len(lat) + 1, 1).reshape(-1, 1)
y_data = np.array(lat).reshape(-1, 1)
# y_data=np.array(data2)
# Normalisation
y_data = scaler.fit_transform(y_data)
# print(y_data)
x_data = scaler.fit_transform(x_data)
# print(x_data)
training_size = round(0.8 * len(x_data))
x_train_data = x_data[0:training_size]
print(len(x_train_data))
y_train_data = y_data[0:training_size]
print(len(y_train_data))
x_test_data = x_data[training_size : len(x_data) + 1]
print(len(x_test_data))
y_test_data = y_data[training_size : len(y_data) + 1]
print(len(y_test_data))
# **Training the model (Polynomial Regression)**
# Training
import matplotlib.pyplot as mtp
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
score = []
max_degree = 10
for i in range(0, max_degree):
poly_regs = PolynomialFeatures(degree=i)
x_poly = poly_regs.fit_transform(x_train_data.reshape(-1, 1))
lin_reg_2 = LinearRegression()
lin_reg_2.fit(x_poly, y_train_data)
# mtp.scatter(x_data,y_data,color="blue")
# mtp.plot(x_data, lin_reg_2.predict(poly_regs.fit_transform(x_data)), color="red")
# mtp.title("Storm Path detection (Polynomial Regression) for degree = "+str(i))
# mtp.xlabel("Time Stamp")
# mtp.ylabel("Latitude")
# mtp.show()
x_test_poly = poly_regs.fit_transform(x_test_data.reshape(-1, 1))
score.append(lin_reg_2.score(x_test_poly, y_test_data))
score = np.array(score)
best_fit_poly = score.argmax()
print(score, "\n")
print("Max score is: ", score.max())
print("max score is for polynomial = ", best_fit_poly)
# **BEST FIT: Visualisation**
# Train again
poly_regs = PolynomialFeatures(degree=best_fit_poly)
x_poly = poly_regs.fit_transform(x_train_data)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(x_poly, y_train_data)
poly_pred = lin_reg_2.predict(poly_regs.fit_transform(x_test_data))
print(poly_pred)
# Visualisation
mtp.scatter(x_data, y_data, color="blue")
mtp.plot(x_data, lin_reg_2.predict(poly_regs.fit_transform(x_data)), color="red")
mtp.title(
"Storm Path detection (Polynomial Regression) for degree = " + str(best_fit_poly)
)
mtp.xlabel("Time Stamp")
mtp.ylabel("Latitude")
mtp.show()
# **Degree of Polynomial vs Score**
xx = np.arange(0, len(score), 1)
score
mtp.scatter(xx, score, color="red")
|
# # Bagging Classifier
# Bagging Classifier is an ensemble method, that helps in reducing the variance of individual estimators by introducing randomisation into the training stage of each of the estimators and making an ensemble out of all the estimators.
# ***
# It simply, takes all the predictions from different estimators and gives final predictions
# For training mulitple estimators, it uses different sampling techniques like **Pasting,Bagging/Bootstrap aggregation,Random subspaces and Random patches**
# Mostly, this Classifier is used on high variance classifiers like **Decision Tree**.
import warnings
warnings.filterwarnings("always")
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
iris = load_iris()
data = pd.DataFrame(iris.data, columns=iris.feature_names)
data["species"] = iris.target
data.head()
# getting to know missing values
import missingno as mn
mn.matrix(data)
data.columns
plt.figure()
fig, ax = plt.subplots(2, 2, figsize=(15, 6))
sns.distplot(
data["sepal length (cm)"],
ax=ax[0][0],
hist=True,
kde=True,
bins="auto",
color="darkblue",
hist_kws={"edgecolor": "black"},
kde_kws={"linewidth": 4},
)
sns.distplot(
data["sepal width (cm)"],
ax=ax[0][1],
hist=True,
kde=True,
bins="auto",
color="darkblue",
hist_kws={"edgecolor": "black"},
kde_kws={"linewidth": 4},
)
sns.distplot(
data["petal length (cm)"],
ax=ax[1][0],
hist=True,
kde=True,
bins="auto",
color="darkblue",
hist_kws={"edgecolor": "black"},
kde_kws={"linewidth": 4},
)
sns.distplot(
data["petal width (cm)"],
ax=ax[1][1],
hist=True,
kde=True,
bins="auto",
color="darkblue",
hist_kws={"edgecolor": "black"},
kde_kws={"linewidth": 4},
)
formatter = plt.FuncFormatter(lambda i, *args: iris.target_names[int(i)])
plt.figure(figsize=(15, 8))
plt.scatter(np.array(data.iloc[:, 0]), np.array(data.iloc[:, 1]), c=data.species)
plt.colorbar(ticks=[0, 1, 2], format=formatter)
plt.xlabel("Sepal Length (cm)")
plt.ylabel("Sepal Width (cm)")
plt.show()
# ## Facing off, Decision Tree Algorithm
X = data.iloc[:, 0:4].values
y = data.iloc[:, 4].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=4)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
pipeline = make_pipeline(
StandardScaler(), DecisionTreeClassifier(criterion="entropy", max_depth=4)
)
pipeline.fit(X_train, y_train)
pipeline.score(X_train, y_train), pipeline.score(X_test, y_test)
# Here you see,
# We can't speak like our model is trained with high accuracy, indeed it is an overfitting which cannot do well with test_data.
# ### Now, we can intro our Bagging Classifier
pipeline = make_pipeline(
StandardScaler(), DecisionTreeClassifier(criterion="entropy", max_depth=4)
)
from sklearn.ensemble import BaggingClassifier
bgclf = BaggingClassifier(
base_estimator=pipeline, n_estimators=100, max_samples=10, random_state=1, n_jobs=5
)
bgclf.fit(X_train, y_train)
bgclf.score(X_train, y_train), bgclf.score(X_test, y_test)
y_train_pred = bgclf.predict(X_train)
y_test_pred = bgclf.predict(X_test)
y_test_pred
# ### Evaluation
cm_train = metrics.confusion_matrix(y_train_pred, y_train)
print(cm_train)
sns.heatmap(cm_train, annot=True)
cm_test = metrics.confusion_matrix(y_test_pred, y_test)
print(cm_test)
sns.heatmap(cm_test, annot=True, cmap="Blues")
metrics.accuracy_score(y_test_pred, y_test)
# ## Cheers!!!
# ## Then, Plotting our Pretty Decision Tree
import graphviz
import pydotplus
from IPython.display import Image
clf = DecisionTreeClassifier(min_samples_leaf=20, max_depth=5)
clf.fit(X_train, y_train)
from sklearn import tree
dot_data = tree.export_graphviz(
clf, out_file=None, feature_names=iris.feature_names, filled=True
)
graph = pydotplus.graph_from_dot_data(dot_data)
Image(graph.create_png())
# ## Controlling Tree Growth
clftree2 = DecisionTreeClassifier(min_samples_leaf=20, max_depth=5)
clftree2.fit(X_train, y_train)
dot_data = tree.export_graphviz(
clftree2, out_file=None, feature_names=iris.feature_names, filled=True
)
graph = pydotplus.graph_from_dot_data(dot_data)
Image(graph.create_png())
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(r"/kaggle/input/titanic/train.csv")
dff = pd.read_csv(r"/kaggle/input/titanic/test.csv")
df.tail()
dff.tail()
df.describe()
df.info()
df.drop("Cabin", axis=1, inplace=True)
df.head()
# The columns Name, Ticket are useless. So, lets remove them
#
df.drop(["Name", "Ticket"], axis=1, inplace=True)
df.head()
df.info()
type(df[["Age"]])
df[880:891]
df.isna().sum()
df.Embarked.value_counts()
df.groupby(["Embarked", "Sex"])["Fare"].apply(lambda x: x.median())
# all(df.loc[:, 'Embarked'] == df.Embarked.isna())
df[df["Embarked"].isnull()] # .Embarked = 'C'
df.loc[[61, 829], "Embarked"] = "C"
df[df["Embarked"].isnull()]
# df.Age.hist();
import matplotlib.pyplot as plt
import seaborn as sns
# plt.hist(df.Age);
sns.histplot(df.Age)
df.groupby(["Sex", "Pclass"])["Age"].median()
# Filling the NA values of the Age column by grouping the based on Sex and Pclass
df[df.isna().any(axis=1)]
age_medians = df.groupby(["Sex", "Pclass"])["Age"].median()
def fill_age(row):
return age_medians[row["Sex"], row["Pclass"]]
df["Age"] = df.apply(
lambda row: fill_age(row) if pd.isnull(row["Age"]) else row["Age"], axis=1
)
# age_medians['']
df.isna().sum()
# (df.groupby(['Sex', 'Pclass'])['Age'].median())[df[0, :].Pclass]
from sklearn.preprocessing import OneHotEncoder
# Load the Titanic dataset into a DataFrame
# Define the categorical columns to encode
cat_cols = ["Sex", "Pclass", "Embarked"]
# Create an instance of the OneHotEncoder class
encoder = OneHotEncoder(handle_unknown="ignore")
# Fit the encoder to the categorical columns
encoder.fit(df[cat_cols])
# Transform the categorical columns using One-Hot Encoding
encoded_cols = encoder.transform(df[cat_cols]).toarray()
# Create new column names for the encoded columns
new_cols = []
for col in encoder.get_feature_names_out(cat_cols):
new_cols.append(col)
# Create a new DataFrame with the encoded columns
encoded_df = pd.DataFrame(encoded_cols, columns=new_cols)
encoded_df.tail(10)
df = pd.concat([df, encoded_df], axis=1)
df.shape
df.drop(["Pclass", "Sex", "Embarked"], axis=1, inplace=True)
df.shape
df.head(10)
df.set_index("PassengerId", inplace=True)
df.info()
# from sklearn.model_selection import GridSearchCV
# param_grid = {'svc__C': [1, 5, 10, 50],
# 'svc__gamma': [0.0001, 0.0005, 0.001, 0.005]}
# grid = GridSearchCV(svc, param_grid)
# %time grid.fit(Xtrain, ytrain)
# print(grid.best_params_)
Y_train = df.pop("Survived")
type(Y_train)
df
dff.head(5)
dff.drop(["Name", "Ticket", "Cabin"], axis=1, inplace=True)
dff.info()
# dff.loc[dff.Fare.isna() == True].Fare = np.round(dff.Fare.mean(), 2)
dff[dff.Fare.isnull()]
dff.groupby(["Sex", "Pclass"])["Fare"].median()
sns.scatterplot(data=(dff.query('Pclass == 3 and Sex == "male"')), x="Age", y="Fare")
dff.Fare.fillna(value=30, inplace=True)
dff.isna().sum()
age_medians = dff.groupby(["Sex", "Pclass"])["Age"].median()
def fill_age(row):
return age_medians[row["Sex"], row["Pclass"]]
dff["Age"] = dff.apply(
lambda row: fill_age(row) if pd.isnull(row["Age"]) else row["Age"], axis=1
)
# (dff.groupby(['Sex', 'Pclass'])['Age'].median())#.loc['female', 1]
dff.isna().sum()
sns.boxplot(x=dff.Age)
sns.displot(dff.Age)
dff.info()
# Define the categorical columns to encode
cat_cols = ["Sex", "Pclass", "Embarked"]
# Create an instance of the OneHotEncoder class
encoder1 = OneHotEncoder(handle_unknown="ignore")
# Fit the encoder to the categorical columns
encoder1.fit(dff[cat_cols])
# Transform the categorical columns using One-Hot Encoding
encoded_cols1 = encoder1.transform(dff[cat_cols]).toarray()
# Create new column names for the encoded columns
new_cols1 = []
for col in encoder.get_feature_names_out(cat_cols):
new_cols1.append(col)
# Create a new DataFrame with the encoded columns
encoded_df1 = pd.DataFrame(encoded_cols1, columns=new_cols)
encoded_df1.tail(10)
encoded_df1.head()
dff.head()
dff = pd.concat([dff, encoded_df1], axis=1)
dff.drop(["Pclass", "Sex", "Embarked"], inplace=True, axis=1)
dff.set_index("PassengerId", inplace=True)
dff.tail(10)
df
from sklearn import svm
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
model = make_pipeline(
StandardScaler(), svm.LinearSVC(random_state=20, max_iter=10000)
) # .fit(df, Y_train)#.predict(Xtest)
Y_Pred = model.fit(df, Y_train).predict(dff)
op1 = pd.DataFrame({"PassengerId": range(892, 892 + len(Y_Pred)), "Survived": Y_Pred})
op1
op1.set_index("PassengerId", inplace=True)
op1
print("Pruthvinth")
op1.to_csv("output.csv")
|
import pandas as pd
# ****Importing pandas module and naming it as "pd"****
# **1. Load the melb_data dataset into the Pandas dataframe.**
melb_data = pd.read_csv("/kaggle/input/melb-data/melb_data.csv")
melb_data.head()
# The melb_data dataset from Platon was empty, so we just added these kaggle data and worked on it.
# **2. Display the dataset shape**
melb_data.shape
# **3. Display the first row in the dataset.**
print(melb_data.iloc[0])
print("By using 'melb_data.head(1)'\n")
melb_data.head(1)
# **4. Display the last row in the dataset.**
print(melb_data.iloc[-1])
print("By using 'melb_data.tail(1)'\n")
melb_data.tail(1)
# 5. List the name of the numerical columns.
numerical_cols = melb_data.select_dtypes(include="number").columns.tolist()
print(numerical_cols)
# 6.To list the name of the categorical (text) columns, you can use the select_dtypes() function with the argument exclude='number'**
categorical_cols = melb_data.select_dtypes(exclude="number").columns.tolist()
print(categorical_cols)
# 7.To display the information about the categorical (text) columns using the describe() function, you can use the describe() function with the argument include='object'**
print(melb_data.describe(include="object"))
# 8. Display the information about the numerical columns using the describe() function.
#
print(melb_data.describe())
# 9. List the columns with missing values.
missing_cols = melb_data.columns[melb_data.isnull().any()].tolist()
print(missing_cols)
# 10. How many missing values are there in the column 'BuildingArea'.
print(melb_data["BuildingArea"].isnull().sum())
# 11.To find the unique values in the column 'CouncilArea', you can use the unique() function:
print(melb_data["CouncilArea"].unique())
# 12. Fill the missing values in the column ‘Car’ with the zero.
melb_data["Car"].fillna(0, inplace=True)
# 13. Fill the missing values in the column 'BuildingArea' with the average of that column
avarage_of_building_area = melb_data["BuildingArea"].mean().round(2)
melb_data["BuildingArea"].fillna(avarage_of_building_area, inplace=True)
melb_data.head()
# 14. Fill the missing values in the column 'YearBuilt' with the median value of that column
median_value_of_year_built = melb_data["YearBuilt"].median()
melb_data["YearBuilt"].fillna(median_value_of_year_built, inplace=True)
melb_data.head
# 15. Fill the missing values in the column ‘CouncilArea’ with the most frequent value of that column.
most_common_value_of_council_area = melb_data["CouncilArea"].mode()[0]
most_common_value_of_council_area
melb_data["CouncilArea"].fillna(most_common_value_of_council_area, inplace=True)
|
# EDA&
# Visualization
# Recommendation System
# ### Brief description of the project
# Assuming that the randomly generated movie lists A, B, and C under certain conditions are each 100 movies that Personas A, B, and C have seen, it is a project to define what type of person each person is and to recommend to that person
# ### Limits and Direction of Development
# There are many overlapping codes, so I need to increase the reuse of code with using function
# ## Import Modules
import pandas as pd
import numpy as np
import random
import pickle
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.express as px
import plotly.offline as pyo
from sklearn.preprocessing import MinMaxScaler
from skimage import io
import chart_studio
import chart_studio.plotly as py
import chart_studio.tools as tls
# plt.rc('font', family='Malgun Gothic')
plt.rcParams["font.family"] = "Arial"
pyo.init_notebook_mode()
# ## Data Loading
import pandas as pd
import numpy as np
import random
df = pd.read_csv("/kaggle/input/millions-of-movies/movies.csv")
# Poster import data
poster_df = df.copy()
poster_df = poster_df[["id", "title", "poster_path", "backdrop_path"]]
# Data frame to use
df.drop(columns=["backdrop_path", "poster_path", "recommendations"], inplace=True)
df.head()
# ## Defining Persona
# - There are Personas A, B, and C. Each variable is a list of movies that the person has seen. Please define what each persona is like and recommend movies you would like
random.seed(25)
# persona_A : Who watched 100 documentary movies (seed fixed random)
persona_A = df[df["genres"] == "Documentary"]
persona_A.index = np.arange(len(persona_A))
seen = list()
for i in range(100):
seen.append(random.randint(0, len(persona_A) - 1))
persona_A = persona_A.loc[seen]
# persona_B : Who watched 100 movies in it language (seed fixed random)
persona_B = df[df["original_language"] == "it"]
persona_B.index = np.arange(len(persona_B))
seen = list()
for i in range(100):
seen.append(random.randint(0, len(persona_B) - 1))
persona_B = persona_B.loc[seen]
# persona_C : Who watched 100 movies with a rating of 8 or higher (seed fixed random)
persona_C = df[df["vote_average"] > 8]
persona_C.index = np.arange(len(persona_C))
seen = list()
for i in range(100):
seen.append(random.randint(0, len(persona_C) - 1))
persona_C = persona_C.loc[seen]
# For smooth preprocessing,
# persona_A is a list containing index numbers, and later access to df using that index number
persona_A = persona_A.id.values.tolist()
persona_B = persona_B.id.values.tolist()
persona_C = persona_C.id.values.tolist()
# ## Data Preprocessing
# ### ID
# - Why is there the same ID? Why are the IDs the same but different variables different? So how do you deal with it?
# No persona ID duplicate value
print(len(set(persona_A)))
print(len(set(persona_B)))
print(len(set(persona_C)))
# id Overlapping 116362 pieces
print(len(df[df["id"].duplicated(keep=False)]["id"]))
# Unique number of duplicated IDs 55446
print(len(df[df["id"].duplicated(keep=False)]["id"].value_counts().index))
print("< What is the difference between duplicate IDs and other variables >")
for col in df.columns:
print(col, end=" : ")
dup_df = df.loc[df[df["id"].duplicated(keep=False)].index]
t = dup_df.groupby("id")[col].nunique(dropna=False)
t = t[t > 1]
print(t.value_counts().sum())
# popularity, vote_average, vote_count -> Average numeric variables
# production_companies 963060, 385282, 686971 -> It's just the same, #First substitute
# keywords, credits -> Longer contains more than short -> Replace with longer
# Temporary saving of id order
origin_id_order = df.drop_duplicates(subset="id", keep="first").id
# Only those with duplicate id are selected: dup_df
dup_df = df.loc[df[df["id"].duplicated(keep=False)].index]
# Keywords are replaced by long ones
dup_df["keywords"].fillna("", inplace=True)
new_keywords = dup_df.loc[
dup_df.groupby("id")["keywords"].apply(lambda x: x.str.len().idxmax())
][["id", "keywords"]]
new_keywords.set_index("id", inplace=True)
new_keywords = new_keywords["keywords"]
# credits are replaced by long ones
dup_df["credits"].fillna("", inplace=True)
new_credits = dup_df.loc[
dup_df.groupby("id")["credits"].apply(lambda x: x.str.len().idxmax())
][["id", "credits"]]
new_credits.set_index("id", inplace=True)
new_credits = new_credits["credits"]
# Three numeric variables are averaged to replace them
new_popularity = dup_df.groupby("id")["popularity"].mean()
new_vote_average = dup_df.groupby("id")["vote_average"].mean()
new_vote_count = dup_df.groupby("id")["vote_count"].mean()
# Remove all subordinate id from df
df.drop_duplicates(subset="id", keep=False, inplace=True)
# id changed unique in duplicate id data frame (dup_df)
dup_df.drop_duplicates(subset="id", keep="first", inplace=True)
# inner join to dup_df
join = [new_keywords, new_credits, new_popularity, new_vote_average, new_vote_count]
temp_df = dup_df.copy()
temp_df.drop(
columns=["keywords", "credits", "popularity", "vote_average", "vote_count"],
inplace=True,
)
for j in join:
temp_df = pd.merge(temp_df, j, on="id", how="left")
temp_df = temp_df[df.columns]
# Put dup_df into df and sort id in the existing order
df = pd.concat([df, temp_df], axis=0)
df = df.set_index("id").loc[origin_id_order].reset_index()
df
# ### Title
# - Delete the four missing values below
display(df[df["title"].isnull()])
title_drop = df[df["title"].isnull()]["id"].index.tolist()
df.drop(index=title_drop, inplace=True) # Drop the row bar below.
print(
len(df[df["title"].duplicated(keep=False)])
) # the number of movies with overlapping titles
df[
df["title"] == "Plane"
] # But since the title is the same and it's a different movie, let's distinguish it by ID
print("Persona A's title Missing values", list(set(title_drop).intersection(persona_A)))
print("Persona B's title Missing values", list(set(title_drop).intersection(persona_B)))
print("Persona C's title Missing values", list(set(title_drop).intersection(persona_C)))
# ### original_language
df["original_language"].value_counts() # 167 languages with all unique values
df["original_language"].isnull().sum() # No missing values, very nice
# ### vote_average & vote_counts
df[["vote_average", "vote_count"]].isnull().sum() # No missing values
len(df[df["vote_count"] == 0]) # The number of votes is 0. -> 426574 votes
df[(df["vote_count"] > 0) & (df["vote_average"] == 0)]["vote_count"].value_counts()
# If there were voters and the rating was zero, all of them had less than three voters -> The movie seems to have really given zero points because it was so bad.
# If several people voted and all zero points are zero, we can judge that there is an error in the data, but this seems to be true
# 0.5 votes is treated as -> 0
print(f"Vote count of 1352 index : {df['vote_count'].loc[1352]}")
df["vote_count"].loc[1352] = 0
print("__________After processing__________")
display(df.loc[1352])
display(
df[(df["vote_count"] == 0) & (df["vote_average"] > 0)]
) # The number of votes is zero, but the voting score is not zero?? Weirdness
# Hypothesis: There is an error in the data. -> Replace rating with 0
df["vote_average"][(df["vote_count"] == 0) & (df["vote_average"] > 0)] = 0
display(df[(df["vote_count"] == 0) & (df["vote_average"] == 0)])
# ### production_companies
df["production_companies"].isnull().sum()
df["production_companies"].fillna("", inplace=True)
df["production_companies"] = df["production_companies"].str.split("-")
df["production_companies"]
from collections import Counter
company_list = [word for words in df["production_companies"].values for word in words]
company_count = dict(Counter(company_list))
# company_count = dict(sorted(company_count.items())) #Dictionary Sequencing
company_count = dict(sorted(company_count.items(), key=lambda x: x[1], reverse=True))
company_count
# ### popularity
df["popularity"].isnull().sum() # No missing values
print(df["popularity"].min())
print(df["popularity"].max())
# ### budget & revenue & runtime
print(df["budget"].isnull().sum())
print(len(df[df["budget"] == 0]))
# The key to "budget" is 0,1,2,3..I don't know if the production cost is like that
df[
df["budget"] == 1
] # Did the movie "down" really cost a dollar? How am I going to deal with it...
print(df["revenue"].isnull().sum())
print(len(df[df["revenue"] == 0]))
df[df["revenue"] == 1] # The same is true of how we're going to do low-return cutoffs
df.sort_values("runtime", ascending=False)["runtime"]
# Mr Marcus FriendBoop Facebook Adventures Runtime Find out and replace
df["runtime"][df["id"] == 660968] = 1370 # 22h 50m 1370m
df["runtime"][df["id"] == 660969] = 861 # 14h 21m 861m
# ### credits & genres
df["credits"].value_counts()
df["genres"].value_counts()
df["genres"].fillna("", inplace=True)
df["credits"].fillna("", inplace=True)
df["genres"] = df["genres"].str.split("-")
df["credits"] = df["credits"].str.split("-")
# How to use it
has = df["genres"].apply(lambda x: "Horror" in x)
df[has] # Thriller genre of movies
from collections import Counter
genres_list = [word for words in df["genres"].values for word in words]
genres_count = dict(Counter(genres_list))
genres_count
credits_list = [word for words in df["credits"].values for word in words]
credits_count = dict(Counter(credits_list))
credits_count
# ### status & release_date
# The "status" in the movie database is information that represents the current state of the movie. This information includes information such as whether the movie has not yet been released, is it waiting to be released, or is it already being released.
# - Released: Indicates that the movie has already been released and released to the general public.
# - Post Production: Indicates that the film production is being completed and will be released soon.
# - In Production: It indicates that the movie is in production and the release schedule has not been confirmed yet.
# - Rumored: Indicates that there are rumors about the movie, but the production has not been confirmed.
# - Planned: Indicates that the production of the film is confirmed, but the production has not yet begun.
# - Canceled: Indicates that the film production has been canceled, or that it was scheduled to be released, but has not been released.
# This "status" information helps users understand the current state of the movie.
df["status"].value_counts()
df["status"].isnull().sum() # good
df["release_date"].isnull().sum()
# What are you, a movie released in the future...
# -> As of 4/6 on the day of the Insi-Con, it was changed to previously scheduled to be screened and released afterwards
released = df[df["status"] == "Released"]
(released["release_date"] > "2023-04-06").value_counts()
(released["release_date"] > "2023-12-31").value_counts()
# All of them will be released within 2023, so they are classified as Post Production
df["status"][df["release_date"] > "2023-04-06"] = "Post Production"
(df[df["status"] == "Released"]["release_date"] > "2023-04-06").value_counts()
# It's scheduled to be released (to be screened), but what has already been released?
# -> Let's interpret it as the scheduled release date
temp = df[df["status"] == "Post Production"]
(temp["release_date"] < "2023-03-23").value_counts()
# temp[temp['release_date'] < '2023-03-23']
# It's in the middle of production, but the release date is fixed?
# -> Let's interpret it as the scheduled release date
temp = df[df["status"] == "In Production"]
temp["release_date"]
# Six out of 150 rumors have release dates
# --> Let's interpret them as scheduled release dates (Rumor)
temp = df[df["status"] == "Rumored"]
temp["release_date"].value_counts()
# Let's interpret it as a planned release date, the rest of the 2480 missing
temp = df[df["status"] == "Planned"]
temp["release_date"].value_counts()
# Canceled canceled, the rest of the 222 missing values
# -> Below was the scheduled release date, but it was interpreted as canceled
temp = df[df["status"] == "Canceled"]
temp["release_date"].value_counts()
# ### 'overview', 'keywords', 'tagline'
# - The word "tagline" in a movie refers to a phrase or slogan representing the movie. In general, it is used in official posters and advertisements of movies, to briefly introduce the content or atmosphere of the movie, and to attract the attention of the audience.
# - overview, keywords, tagline
# -> Attempted natural language processing, but was cut because it did not fit the purpose of the first insicon. So overview, tagline are dropped
df["keywords"].fillna("", inplace=True)
df["keywords"] = df["keywords"].str.split("-")
df.drop(columns=["overview", "tagline"], inplace=True)
df
# ### Additional Preprocessing
# Some categorization by rounding to one decimal place after log_scaling because the variable has a wide range and many pole values
df["log_popularity"] = np.log10(df["popularity"]).round(1)
# Categorize integers with int() for categorizing ratings
df["vote_average"] = df["vote_average"].apply(lambda x: int(x))
df["release_year"] = df["release_date"].str[:4].values # Generating Year Columns
df["release_month"] = df["release_date"].str[5:7].values # Generating Month Columns
df["release_year"].fillna(0, inplace=True)
df["release_year"] = df["release_year"].astype(int)
# Approaching the preprocessed df with the persona id value
a_df = df[df["id"].isin(persona_A)]
b_df = df[df["id"].isin(persona_B)]
c_df = df[df["id"].isin(persona_C)]
# ## Selecting Variables
# - pointless variables : vote_count, budget, revenue, status, poster_path, backdrop_path
# - significant variables :
# - Variables to use only for persona definitions: overview , tagline , release_date, runtime,
# - Variables to use for Persona Definition + Recommendation Algorithm
# - Personalization variable : genres, original_language, production_companies, credits, keywords,
# - Popularity variable : popularity, vote_average
# #### pointless variables
# - Budget and revenge are variables that are difficult to grasp because all three personas consist of at least 92
# - Poster_path, backdrop_path is a link to the poster, so it is determined that it is not necessary for data analysis and EDA
# - Status is judged as a meaningless variable because all personas have only seen Released
# - The number of votes were often small, such as 0,1,2, etc., and they were judged to be variables that was difficult to obtain meaning
#
a_value = len(a_df[a_df["budget"] == 0])
b_value = len(b_df[b_df["budget"] == 0])
c_value = len(c_df[c_df["budget"] == 0])
fig = make_subplots(rows=1, cols=3, specs=[[{"type": "indicator"}] * 3])
fig1 = go.Figure(
go.Indicator(
mode="gauge+number",
value=a_value,
gauge={"axis": {"range": [1, 100]}, "bar": {"color": "#F0EAD6"}},
title={"text": "Persona_A"},
)
)
fig2 = go.Figure(
go.Indicator(
mode="gauge+number",
value=b_value,
gauge={"axis": {"range": [1, 100]}, "bar": {"color": "#E0CDA9"}},
title={"text": "Persona_B"},
)
)
fig3 = go.Figure(
go.Indicator(
mode="gauge+number",
value=c_value,
gauge={"axis": {"range": [1, 100]}, "bar": {"color": "#CDB79E"}},
title={"text": "Persona_C"},
)
)
fig.add_trace(fig1["data"][0], row=1, col=1)
fig.add_trace(fig2["data"][0], row=1, col=2)
fig.add_trace(fig3["data"][0], row=1, col=3)
fig.update_layout(
title={
"text": "< Ratio of ZERO values(BUDGET) >",
"y": 0.8,
"x": 0.5,
"font": {"size": 20},
"xanchor": "center",
"yanchor": "top",
},
margin=dict(l=0, r=0, t=0, b=0),
)
fig.show() # Therefore, do not use budget
a_value = len(a_df[a_df["revenue"] == 0])
b_value = len(b_df[b_df["revenue"] == 0])
c_value = len(c_df[c_df["revenue"] == 0])
from plotly.subplots import make_subplots
import plotly.graph_objects as go
fig = make_subplots(rows=1, cols=3, specs=[[{"type": "indicator"}] * 3])
fig1 = go.Figure(
go.Indicator(
mode="gauge+number",
value=a_value,
gauge={"axis": {"range": [1, 100]}, "bar": {"color": "#F0EAD6"}},
title={"text": "Persona_A"},
)
)
fig2 = go.Figure(
go.Indicator(
mode="gauge+number",
value=b_value,
gauge={"axis": {"range": [1, 100]}, "bar": {"color": "#E0CDA9"}},
title={"text": "Persona_B"},
)
)
fig3 = go.Figure(
go.Indicator(
mode="gauge+number",
value=c_value,
gauge={"axis": {"range": [1, 100]}, "bar": {"color": "#CDB79E"}},
title={"text": "Persona_C"},
)
)
fig.add_trace(fig1["data"][0], row=1, col=1)
fig.add_trace(fig2["data"][0], row=1, col=2)
fig.add_trace(fig3["data"][0], row=1, col=3)
fig.update_layout(
title={
"text": "< Ratio of ZERO values(REVENUE) >",
"y": 0.8,
"x": 0.5,
"font": {"size": 20},
"xanchor": "center",
"yanchor": "top",
},
margin=dict(l=0, r=0, t=0, b=0),
)
fig.show() # Therefore, do not use revenue
# Status
print("< persona_A >")
display(a_df.status.value_counts())
print("< persona_B >")
display(a_df.status.value_counts())
print("< persona_C >")
display(a_df.status.value_counts())
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 3.5))
sns.scatterplot(x="vote_average", y="vote_count", data=df, ax=ax[0])
x = ["A", "B", "C"]
y = [
a_df.status.value_counts().values[0],
b_df.status.value_counts().values[0],
c_df.status.value_counts().values[0],
]
ax[1].bar(x, y, color="#87CEEB")
ax[1].set_title("Released Statues")
ax[1].set_xlabel("Persona")
ax[1].set_ylabel("Released Status Ratio(%)")
for i, v in enumerate(y):
ax[1].text(i, v, str(v), ha="center")
plt.show()
# The higher the rating, the wider the number of votes. However, if the rating is extreme, the number of votes is extremely narrow
# But don't use it because it doesn't produce meaningful insights
# #### Significant variables:
# Variables to use only for persona definitions:
# - Overview, tagline: There were parts that were difficult to deal with, such as terminology, tense/plural endings. Although natural language processing was used, it was deleted because it was contrary to the purpose of the first inscription. However, since it is a wasteful variable to throw away, human natural language processing has been performed and cannot be used as an objective indicator, so it is used only in the persona definition and not in the algorithm
# - release_date, runtime: 0 or missing values, and no significant insights were obtained to apply to the algorithm. However, we find some characteristics for each persona and use it only in the persona definition
# - Credits: Few features stand out when viewed by each persona. So I'm going to use some of my favorite actors to define their personas
# Persona Definition + Variables to be used in algorithms: Variables containing individual preference information and variables containing preference information of others and the public were decided to be divided
# - Personalization variable : genres, original_language, production_companies, keywords
# - Popularity variable: popularity, note_average
# sns.set(font_scale=1.5)
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(18, 6))
sns.kdeplot(x="log_popularity", data=a_df, fill=True, ax=ax[0], color="#ADD8E6")
ax[0].set_title("persona_A", fontsize=14)
ax[0].tick_params(axis="both", labelsize=14)
ax[0].set_ylabel("Density", fontsize=14)
ax[0].set_xlabel("Density", fontsize=14)
sns.kdeplot(x="log_popularity", data=b_df, fill=True, ax=ax[1], color="#87CEFA")
ax[1].set_title("persona_B", fontsize=14)
ax[1].tick_params(axis="both", labelsize=14)
ax[1].set_ylabel("Density", fontsize=14)
ax[1].set_xlabel("Density", fontsize=14)
sns.kdeplot(x="log_popularity", data=c_df, fill=True, ax=ax[2], color="#00BFFF")
ax[2].set_title("persona_C", fontsize=14)
ax[2].tick_params(axis="both", labelsize=14)
ax[2].set_ylabel("Density", fontsize=14)
ax[2].set_xlabel("Density", fontsize=14)
fig.suptitle("Log Popularity Distribution", fontsize=20)
plt.show()
# Since the distribution is similar, popularity is used as a popularity variable rather than a personalization variable
fig, ax = plt.subplots(nrows=1, ncols=4, figsize=(24, 6))
a = np.array(df["vote_average"])
a_cut, bins = pd.cut(
a, bins=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], labels=False, retbins=True
)
# Find the number of data for each interval
counts = np.zeros(len(bins) - 1)
for i in range(len(bins) - 1):
counts[i] = ((a >= bins[i]) & (a < bins[i + 1])).sum()
ax[0].bar(range(len(bins) - 1), counts, color="#ADD8E6") # bar plot
ax[0].set_xlabel("vote_average", fontsize=15) # x, y labeling
ax[0].set_ylabel("Counts", fontsize=15)
ax[0].set_title("Whole Data", fontsize=20)
ax[0].set_xticks(range(len(bins) - 1))
ax[0].set_xticklabels(bins[:-1])
b = np.array(a_df["vote_average"])
b_cut, bins = pd.cut(
b, bins=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], labels=False, retbins=True
)
# Find the number of data for each interval
counts = np.zeros(len(bins) - 1)
for i in range(len(bins) - 1):
counts[i] = ((b >= bins[i]) & (b < bins[i + 1])).sum()
ax[1].bar(range(len(bins) - 1), counts, color="#87CEFA")
ax[1].set_xlabel("vote_average", fontsize=15)
ax[1].set_ylabel("Counts", fontsize=15)
ax[1].set_title("persona_A", fontsize=20)
ax[1].set_xticks(range(len(bins) - 1))
ax[1].set_xticklabels(bins[:-1])
c = np.array(b_df["vote_average"])
c_cut, bins = pd.cut(
c, bins=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], labels=False, retbins=True
)
# Find the number of data for each interval
counts = np.zeros(len(bins) - 1)
for i in range(len(bins) - 1):
counts[i] = ((c >= bins[i]) & (c < bins[i + 1])).sum()
# bar plot
ax[2].bar(range(len(bins) - 1), counts, color="#00BFFF")
ax[2].set_xlabel("vote_average", fontsize=15)
ax[2].set_ylabel("Counts", fontsize=15)
ax[2].set_title("persona_B", fontsize=20)
ax[2].set_xticks(range(len(bins) - 1))
ax[2].set_xticklabels(bins[:-1])
d = np.array(c_df["vote_average"])
d_cut, bins = pd.cut(
d, bins=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], labels=False, retbins=True
)
# Find the number of data for each interval
counts = np.zeros(len(bins) - 1)
for i in range(len(bins) - 1):
counts[i] = ((d >= bins[i]) & (d < bins[i + 1])).sum()
ax[3].bar(range(len(bins) - 1), counts, color="#6699CC")
ax[3].set_xlabel("vote_average", fontsize=15)
ax[3].set_ylabel("Counts", fontsize=15)
ax[3].set_title("persona_C", fontsize=20)
ax[3].set_xticks(range(len(bins) - 1))
ax[3].set_xticklabels(bins[:-1])
fig.suptitle("Vote_Average", fontsize=20)
plt.show()
# C preferred movies with high ratings, so those who prefer the popular -> forward correction/individual public ratio 1:2
# B has seen a lot of ratings of 4,5,6 points, so people with certain individual tastes -> Reverse correction method/individual public ratio 2:1
# A is similar to the overall distribution, so it shows a general and average person tendency -> forward correction/individual population ratio of 1:1
a_df["release_month"].value_counts()
fig, ax = plt.subplots(nrows=1, ncols=4, figsize=(18, 6))
df["release_month"] = df["release_date"].str[5:7].values
sns.barplot(
x=[int(i) for i in df["release_month"].value_counts().sort_index().index],
y=df["release_month"].value_counts().sort_index().values,
data=df,
fill=True,
ax=ax[0],
color="#87CEEB",
)
ax[0].set_title("Whole data", fontsize=14)
ax[0].tick_params(axis="both", labelsize=14)
ax[0].set_xlabel("month", fontsize=14)
ax[0].set_ylabel("Density", fontsize=14)
sns.barplot(
x=[int(i) for i in a_df["release_month"].value_counts().sort_index().index],
y=a_df["release_month"].value_counts().sort_index().values,
data=a_df,
fill=True,
ax=ax[1],
color="#6699CC",
)
ax[1].set_title("persona_A", fontsize=14)
ax[1].tick_params(axis="both", labelsize=14)
ax[1].set_xlabel("month", fontsize=14)
ax[1].set_ylabel("Density", fontsize=14)
sns.barplot(
x=[int(i) for i in b_df["release_month"].value_counts().sort_index().index],
y=b_df["release_month"].value_counts().sort_index().values,
data=b_df,
fill=True,
ax=ax[2],
color="#A2C4D9",
)
ax[2].set_title("persona_B", fontsize=14)
ax[2].tick_params(axis="both", labelsize=14)
ax[2].set_xlabel("month", fontsize=14)
ax[2].set_ylabel("Density", fontsize=14)
sns.barplot(
x=[int(i) for i in c_df["release_month"].value_counts().sort_index().index],
y=c_df["release_month"].value_counts().sort_index().values,
data=c_df,
fill=True,
ax=ax[3],
color="#7FB3D5",
)
ax[3].set_title("persona_C", fontsize=14)
ax[3].tick_params(axis="both", labelsize=14)
ax[3].set_xlabel("month", fontsize=14)
ax[3].set_ylabel("Density", fontsize=14)
fig.suptitle("Released_Month_Distribution", fontsize=20)
plt.show()
# There is a monthly difference, so I think we can talk about it at the time of persona definition
a_recommend = df.copy()
b_recommend = df.copy()
c_recommend = df.copy()
# Runtime
# Create a new df with runtime null or zero
drop_index = list(df[(df["runtime"] == 0) | (df["runtime"].isna())].index)
runtime_df_index = list(set(df.index) - set(drop_index))
runtime_df = df.loc[runtime_df_index]
# Divide categories of total runtime
runtime_df["runtime_catagory"] = 0
runtime_df.loc[
(runtime_df["runtime"] >= 1) & (runtime_df["runtime"] <= 30), "runtime_catagory"
] = "1~30 min"
runtime_df.loc[
(runtime_df["runtime"] > 30) & (runtime_df["runtime"] <= 60), "runtime_catagory"
] = "30~60 min"
runtime_df.loc[
(runtime_df["runtime"] > 60) & (runtime_df["runtime"] <= 90), "runtime_catagory"
] = "60~90 min"
runtime_df.loc[
(runtime_df["runtime"] > 90) & (runtime_df["runtime"] <= 120), "runtime_catagory"
] = "90~120 min"
runtime_df.loc[
(runtime_df["runtime"] > 120) & (runtime_df["runtime"] <= 150), "runtime_catagory"
] = "120~150 min"
runtime_df.loc[
(runtime_df["runtime"] > 150) & (runtime_df["runtime"] <= 180), "runtime_catagory"
] = "150~180 min"
runtime_df.loc[
(runtime_df["runtime"] > 180) & (runtime_df["runtime"] <= 210), "runtime_catagory"
] = "180~210 min"
runtime_df.loc[runtime_df["runtime"] > 210, "runtime_catagory"] = "210 min~"
# Define each persona by approaching it correctly
a_runtime_df = runtime_df[runtime_df["id"].isin(persona_A)]
b_runtime_df = runtime_df[runtime_df["id"].isin(persona_B)]
c_runtime_df = runtime_df[runtime_df["id"].isin(persona_C)]
fig, ax = plt.subplots(nrows=1, ncols=4, figsize=(16, 4))
pastel_rainbow_colors = [
"#ffb3ba",
"#ffdfba",
"#ffffba",
"#baffc9",
"#bae1ff",
"#ffb6d9",
]
# plt.rcParams.update({'font.size': 10})
pastel_rainbow_colors = [
"#ffb3ba",
"#ffdfba",
"#ffffba",
"#baffc9",
"#bae1ff",
"#ffb6d9",
]
# Visualize Full Data Runtime
order = list(runtime_df.sort_values(by="runtime")["runtime_catagory"].unique())
movies_by_runtime = runtime_df["runtime_catagory"].value_counts().reindex(order)
sns.barplot(
x=movies_by_runtime.index,
y=movies_by_runtime.values,
alpha=0.5,
ax=ax[0],
order=order,
)
ax[0].set_title("Runtime of Whole Data", fontsize=10)
ax[0].set_ylabel("Number of movies")
ax[0].set_xticklabels(order, rotation=-70)
# Persona A Runtime Visualization
a_movies_by_runtime = a_runtime_df["runtime_catagory"].value_counts().reindex(order)
sns.barplot(
x=a_movies_by_runtime.index,
y=a_movies_by_runtime.values,
alpha=0.5,
ax=ax[1],
order=order,
)
ax[1].set_title("Runtime of Persona A", fontsize=10)
ax[1].set_xticklabels(order, rotation=-70)
for i, v in enumerate(list(a_movies_by_runtime.values)):
ax[1].text(i, v, str(v), ha="center", fontsize=12)
# Persona B runtime Visualization
b_movies_by_runtime = b_runtime_df["runtime_catagory"].value_counts().reindex(order)
sns.barplot(
x=b_movies_by_runtime.index,
y=b_movies_by_runtime.values,
alpha=0.5,
ax=ax[2],
order=order,
)
ax[2].set_title("Runtime of Persona B", fontsize=10)
ax[2].set_xticklabels(order, rotation=-70)
for i, v in enumerate(list(b_movies_by_runtime.values)):
ax[2].text(i, v, str(v), ha="center", fontsize=12)
# Persona C runtime Visualization
c_movies_by_runtime = c_runtime_df["runtime_catagory"].value_counts().reindex(order)
sns.barplot(
x=c_movies_by_runtime.index,
y=c_movies_by_runtime.values,
alpha=0.5,
ax=ax[3],
order=order,
)
ax[3].set_title("Runtime of Persona C", fontsize=10)
ax[3].set_xticklabels(order, rotation=-70)
for i, v in enumerate(list(c_movies_by_runtime.values)):
ax[3].text(i, v, str(v), ha="center", fontsize=12)
plt.show()
# A and C tend to like short movies, even compared to the whole, and accordingly, they are likely to be impatient and impatient
# B, on the other hand, prefers longer movies than the overall distribution -> more likely to be patient middle-aged
# ## Persona_A
# - Forward correction method (individual/public ratio 1:1): Even if it has a similar pattern to the overall distribution, it looks at the person's preferred characteristics
# ### Release_date
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(18, 6))
list_df = []
a = (
df[df["release_year"] < 1920]["id"].count()
- df[df["release_year"] == 0]["id"].count()
)
list_df.append(a)
for i in np.arange(1920, 2020, 5):
list_df.append(
df[(df["release_year"] >= i) & (df["release_year"] < i + 5)]["id"].count()
)
b = df[df["release_year"] >= 2020]["id"].count()
list_df.append(b)
x = [
"~1920",
"1920~1924",
"1925~1929",
"1930~1934",
"1935~1939",
"1940~1944",
"1945~1949",
"1950~1954",
"1955~1959",
"1960~1964",
"1965~1969",
"1970~1974",
"1975~1979",
"1980~1984",
"1985~1989",
"1990~1994",
"1995~1999",
"2000~2004",
"2005~2009",
"2010~2014",
"2015~2019",
"2020~",
]
y = list_df
ax[0].set_title("Release_Year_Whole_Data", fontsize=20)
ax[0].step(x, y, lw=4)
ax[0].plot(x, y, "o--", color="#808080", alpha=0.7, linewidth=4)
ax[0].grid(axis="x", color="purple", alpha=0.2, ls="--", lw=1.2)
ax[0].set_xticklabels(x, fontsize=12, rotation=270)
ax[0].legend(loc="upper center", fontsize=15)
for idx, txt in enumerate(y):
ax[0].text(x[idx], y[idx] + 1.0, txt, ha="center", color="black")
list_A = []
a = (
a_df[a_df["release_year"] < 1920]["id"].count()
- a_df[a_df["release_year"] == 0]["id"].count()
)
list_A.append(a)
for i in np.arange(1920, 2020, 5):
list_A.append(
a_df[(a_df["release_year"] >= i) & (a_df["release_year"] < i + 5)]["id"].count()
)
b = a_df[a_df["release_year"] >= 2020]["id"].count()
list_A.append(b)
x = [
"~1920",
"1920~1924",
"1925~1929",
"1930~1934",
"1935~1939",
"1940~1944",
"1945~1949",
"1950~1954",
"1955~1959",
"1960~1964",
"1965~1969",
"1970~1974",
"1975~1979",
"1980~1984",
"1985~1989",
"1990~1994",
"1995~1999",
"2000~2004",
"2005~2009",
"2010~2014",
"2015~2019",
"2020~",
]
y = list_A
ax[1].set_title("Release_Year_seen_by_A", fontsize=20)
ax[1].step(x, y, lw=4)
ax[1].plot(x, y, "o--", color="#808080", alpha=0.7, linewidth=4)
ax[1].grid(axis="x", color="purple", alpha=0.2, ls="--", lw=1.2)
ax[1].set_xticklabels(x, fontsize=12, rotation=270)
ax[1].legend(loc="upper center", fontsize=15)
for idx, txt in enumerate(y):
ax[1].text(x[idx], y[idx] + 1.0, txt, ha="center", color="black")
# A has no different characteristics compared to the whole
# ### original_language
A_count = dict(Counter(a_df["original_language"].values.tolist()))
A_count = dict(sorted(A_count.items(), key=lambda x: x[1], reverse=True))
A_count
# Select top language based on at least 2 times
top_genres_A = dict()
etc = 0
thres = 2
for key in A_count:
if A_count[key] > thres:
top_genres_A[key] = A_count[key]
else:
etc += A_count[key]
# top_genres_B[f'under {thres*100}%'] = etc
top_genres_A["etc"] = etc
# custum piechart
def custom_autopct(pct):
return ("%.1f%%" % pct) if True else "" # Change it later and use it
fig, ax = plt.subplots(
nrows=1, ncols=2, figsize=(18, 6), gridspec_kw={"width_ratios": [3, 5]}
)
wedgeprops = {"width": 0.6, "edgecolor": "w", "linewidth": 2}
colors = ["#ffadad", "#ffd6a5", "#fdffb6", "#caffbf", "#98fb98", "#a0c4ff", "#dcdcdc"]
ax[0].pie(
top_genres_A.values(),
labels=top_genres_A.keys(),
autopct=custom_autopct,
startangle=90,
wedgeprops=wedgeprops,
colors=colors,
textprops={"fontsize": 14},
)
ax[0].legend(title="original_language", loc=(0.95, 0.65))
ax[0].set_title("original_language_A_Top", fontsize=20)
# bar chart
x = list(A_count.keys())
y = list(A_count.values())
palette = sns.color_palette("husl", len(x))
sns.barplot(x=x, y=y, alpha=0.8, ax=ax[1], palette=palette)
# ax[1].set_xlabel("언어")
ax[1].set_title("original_language_A_All", fontsize=20)
ax[1].set_xticklabels(x, rotation=-45)
ax[1].tick_params(axis="x", labelsize=14)
legend = ax[1].legend(
x, title="original_language", loc=(0.84, 0.05)
) # Import Legend Objects
lines = legend.get_lines()
for i in range(len(lines)):
lines[i].set_color(palette[i]) # Setting the color of a legend item
plt.show()
df_count = dict(Counter(df["original_language"].values.tolist()))
df_count = dict(sorted(df_count.items(), key=lambda x: x[1], reverse=True)[:20])
x = list(df_count.keys())
y = list(df_count.values())
plt.figure(figsize=(12, 5))
sns.barplot(x=x, y=y, alpha=0.8)
plt.title("Original_language_all_dataframe", fontsize=20)
plt.xticks(rotation=-45)
plt.show()
# ### Genres
temp_genre = [word for words in a_df["genres"].values for word in words]
a_df_genre = dict(Counter(temp_genre))
a_df_genre = dict(sorted(a_df_genre.items(), key=lambda x: x[1], reverse=True))
a_df_genre
from itertools import chain
doc_related = dict()
for words in ["Documentary"]:
has = df["genres"].apply(lambda x: words in x)
doc_related = list(chain(*df[has].genres.values))
doc_related = dict(Counter(doc_related))
doc_related = dict(sorted(doc_related.items(), key=lambda x: x[1], reverse=True))
doc_related
doc_related_copy = doc_related.copy()
del doc_related_copy["Documentary"]
temp_df = pd.DataFrame(doc_related_copy, index=["freq"]).T.reset_index()
temp_df["label"] = temp_df["index"] + "<br>" + temp_df["freq"].astype(str)
fig = px.treemap(
temp_df,
path=[px.Constant("Related Genres with Documentary"), "label"],
values="freq",
color="freq",
color_continuous_scale="RdBu",
color_continuous_midpoint=np.average(temp_df["freq"], weights=temp_df["freq"]),
)
# fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
fig.update_layout(
plot_bgcolor="white",
paper_bgcolor="white",
width=650,
height=300,
margin=dict(l=0, r=0, t=50, b=0),
)
fig.update_traces(textfont=dict(size=20), root_color="white")
fig.show()
# ### Company & Credits
# - It is not significant because almost all of them are once: variables not used
A_list = [word for words in a_df["production_companies"].values for word in words]
A_company = dict(Counter(A_list))
A_company = dict(sorted(A_company.items(), key=lambda x: x[1], reverse=True))
del A_company[""]
A_company
top_company_A = dict()
thres = 0
for key in A_company:
if (A_company[key]) >= thres:
top_company_A[key] = A_company[key]
else:
pass
# top_company_A : dict form
company_map = pd.DataFrame(top_company_A, index=["A_COMPANY_COUNT"])
display(company_map)
colorscale = [[0, "#AEC6CF"], [1, "#000080"]]
x = company_map.columns.tolist()
y = company_map.index.tolist()
annotations = []
for c in range(company_map.shape[1]):
if c == 0 or c == int((company_map.shape[1]) / 2):
annotations.append(
go.layout.Annotation(
text=str(company_map.iloc[0][c]),
x=x[c],
y=y[0],
font={"size": 12, "color": "white"},
showarrow=False,
)
)
else:
annotations.append(
go.layout.Annotation(
text="",
x=x[c],
y=y[0],
font={"size": 10, "color": "white"},
showarrow=False,
)
)
layout = go.Layout(
annotations=annotations,
width=800,
height=200,
coloraxis_colorbar=dict(thickness=0),
coloraxis_showscale=False,
margin=dict(l=0, r=0, t=0, b=180),
)
fig = go.Figure(
data=go.Heatmap(
z=company_map,
x=company_map.columns.tolist(), # text_auto=True,
y=company_map.index.tolist(),
colorscale=colorscale,
text=list(company_map.values),
showscale=False,
),
layout=layout,
)
fig.show()
A_list = [word for words in a_df["credits"].values for word in words]
A_credits = dict(Counter(A_list))
A_credits = dict(sorted(A_credits.items(), key=lambda x: x[1], reverse=True))
del A_credits[""]
A_credits
# A_credits : dict form
credit_map = pd.DataFrame(A_credits, index=["A_CREDITS_COUNT"])
display(credit_map)
colorscale = [[0, "#AEC6CF"], [1, "#AEC6CF"]]
x = credit_map.columns.tolist()
y = credit_map.index.tolist()
annotations = []
for c in range(credit_map.shape[1]):
if c == int((credit_map.shape[1]) / 2):
annotations.append(
go.layout.Annotation(
text=str(credit_map.iloc[0][c]),
x=x[c],
y=y[0],
font={"size": 12, "color": "white"},
showarrow=False,
)
)
else:
annotations.append(
go.layout.Annotation(
text="",
x=x[c],
y=y[0],
font={"size": 10, "color": "white"},
showarrow=False,
)
)
layout = go.Layout(
annotations=annotations,
width=800,
height=200,
coloraxis_colorbar=dict(thickness=0),
coloraxis_showscale=False,
margin=dict(l=0, r=0, t=0, b=180),
)
fig = go.Figure(
data=go.Heatmap(
z=credit_map,
x=credit_map.columns.tolist(), # text_auto=True,
y=credit_map.index.tolist(),
colorscale=colorscale,
text=list(credit_map.values),
showscale=False,
),
layout=layout,
)
fig.show()
# ### keywords
a_df_list = [word for words in a_df["keywords"].values for word in words]
a_df_keywords = dict(Counter(a_df_list))
a_df_keywords = dict(sorted(a_df_keywords.items(), key=lambda x: x[1], reverse=True))
del a_df_keywords[""]
a_df_keywords
from itertools import chain
a_key = set(a_df_keywords.keys())
a_related = pd.DataFrame(index=list(a_key), columns=list(a_key))
a_related.loc[:, :] = 0
# 가공
for words in list(a_key):
has = df["keywords"].apply(lambda x: words in x)
result = set(list(chain(*df[has].keywords.values)))
result.discard(words)
result = list(result & a_key)
a_related.loc[words, result] = 1
display(a_related)
A_heatmap = a_related.copy()
a_related["related_rank"] = a_related.sum(axis=1)
a_related = a_related.sort_values(by="related_rank", ascending=False)
temp_join = pd.DataFrame(a_df_keywords, index=["freq"]).T
temp_join
a_related = a_related.join(temp_join, how="inner")
a_related["keyword_score"] = a_related["freq"] * a_related["related_rank"]
display(a_related)
colorscale = [
[0, "#f7e6d2"],
[0.2, "#f1c6a6"],
[0.4, "#eaa783"],
[0.6, "#e2856e"],
[0.8, "#d15e5a"],
[1, "#b12c41"],
]
# Calculate the maximum and minimum values for each column
max_vals = A_heatmap.max().max()
min_vals = A_heatmap.min().min()
# Min-Max Scaling
A_heatmap = (A_heatmap - min_vals) / (max_vals - min_vals)
fig = go.Figure(
data=go.Heatmap(
z=A_heatmap,
x=A_heatmap.index.tolist(),
y=A_heatmap.columns.tolist(),
colorscale="YlGnBu",
)
)
fig.update_layout(
title={"text": "Keyword Network heatmap_A", "x": 0.55, "y": 0.96},
margin=dict(l=0, r=0, t=50, b=0)
# ,xaxis=dict(title='X Axis')
# ,yaxis=dict(title='Keyw')
)
fig.update_layout(width=600, height=600)
fig.show()
# ### Recommendation
keyword_dict = dict(a_related["keyword_score"])
a_recommend["keyword_score"] = 0
for keyword in keyword_dict:
a_recommend.loc[
a_recommend["keywords"].apply(lambda x: keyword in x), "keyword_score"
] += keyword_dict[keyword]
a_recommend["langu_score"] = a_recommend["original_language"].apply(
lambda x: A_count.get(x, 0)
)
a_recommend["genre_score"] = 0
for genre in doc_related:
a_recommend.loc[
a_recommend["genres"].apply(lambda x: genre in x), "genre_score"
] += doc_related[genre]
a_recommend
a_score = a_recommend[
[
"id",
"log_popularity",
"vote_average",
"langu_score",
"genre_score",
"keyword_score",
]
]
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(a_score.iloc[:, 1:])
scaled_a_score = pd.DataFrame(
X_scaled, columns=a_score.iloc[:, 1:].columns, index=a_score["id"]
)
scaled_a_score
scaled_a_score["total_score"] = (
((scaled_a_score["log_popularity"] + scaled_a_score["vote_average"]) / 2 * 3)
+ scaled_a_score["langu_score"]
+ scaled_a_score["genre_score"]
+ scaled_a_score["keyword_score"]
)
scaled_a_score = scaled_a_score.sort_values(by="total_score", ascending=False)
scaled_a_score = scaled_a_score[~scaled_a_score.index.isin(a_df.id.tolist())]
display(scaled_a_score)
print(
set(a_df.id.values.tolist()) & set(scaled_a_score.index.tolist())
) # No intersection
a_radar = scaled_a_score.iloc[:3, :5]
a_radar.reset_index(inplace=True)
a_radar_ranking = a_radar.id.values
display(a_radar)
labels = a_radar.iloc[:, 1:].columns
num_labels = len(labels)
angles = [
x / float(num_labels) * (2 * np.pi) for x in range(num_labels)
] ## angular equivocal point
angles += angles[
:1
] ## Add a starting point because you need to come back to the starting point
my_palette = plt.get_cmap("Set2", len(a_radar.iloc[:, 1:].index))
a_radar = a_radar.set_index("id").loc[a_radar_ranking].reset_index()
# 서브플롯 설정
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(24, 6), subplot_kw=dict(polar=True))
# 레이더 차트 그리기
for i, row in a_radar.iterrows():
color = my_palette(i)
data = row[1:].tolist()
data += data[:1]
ax = axes[i]
ax.set_theta_offset(np.pi / 2) ## the starting point
ax.set_theta_direction(-1) ## Direction drawn Clockwise
ax.set_rlabel_position(0) ## Set radial axis tick label angle (in degrees)
ax.plot(
angles,
data,
color=color,
linewidth=2,
linestyle="solid",
label=df[df["id"] == row.id].title.values[0],
) ## Radar Chart Output
ax.fill(
angles, data, color=color, alpha=0.2
) ## Fill the inside of the figure with color.
ax.set_xticks(angles[:-1])
ax.set_xticklabels(labels, fontsize=13) ## Angle axis tick label
ax.tick_params(
axis="x", which="major", pad=15
) ## Give a space between each axis and the scale.
# ax.set_rlim(0, 10) ## Set radius axis range
ax.set_title(df[df["id"] == row.id].title.values[0], fontsize=15, fontweight="bold")
# Legend Settings
# fig.legend(title = "Recommend_Movie", loc=(0.75,0.7), fontsize=15, title_fontsize=18)
plt.subplots_adjust(wspace=0.35)
plt.show()
top_number = 3
a_recommend_id = scaled_a_score.index.tolist()[:top_number]
df[df["id"].isin(a_recommend_id)]
result = poster_df[poster_df["id"].isin(a_recommend_id)]
result = result.set_index("id").loc[a_radar_ranking].reset_index()
fig, ax = plt.subplots(1, 3, figsize=(15, 15))
for i, j in enumerate(result.poster_path.unique()):
try:
ax[i].axis("off")
ax[i].set_title(result.iloc[i].title)
a = io.imread(f"https://image.tmdb.org/t/p/w500/{j}")
ax[i].imshow(a)
except:
pass
fig.tight_layout()
# ## Persona_B
# - Reverse correction method (personalization and popularization ratio 2:1)
# ### Release_date
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(18, 6))
list_df = []
a = (
df[df["release_year"] < 1920]["id"].count()
- df[df["release_year"] == 0]["id"].count()
)
list_df.append(a)
for i in np.arange(1920, 2020, 5):
list_df.append(
df[(df["release_year"] >= i) & (df["release_year"] < i + 5)]["id"].count()
)
b = df[df["release_year"] >= 2020]["id"].count()
list_df.append(b)
x = [
"~1920",
"1920~1924",
"1925~1929",
"1930~1934",
"1935~1939",
"1940~1944",
"1945~1949",
"1950~1954",
"1955~1959",
"1960~1964",
"1965~1969",
"1970~1974",
"1975~1979",
"1980~1984",
"1985~1989",
"1990~1994",
"1995~1999",
"2000~2004",
"2005~2009",
"2010~2014",
"2015~2019",
"2020~",
]
y = list_df
ax[0].set_title("Release_Year_Whole_Data", fontsize=20)
ax[0].step(x, y, lw=4)
ax[0].plot(x, y, "o--", color="#808080", alpha=0.7, linewidth=4)
ax[0].grid(axis="x", color="purple", alpha=0.2, ls="--", lw=1.2)
ax[0].set_xticklabels(x, fontsize=12, rotation=270)
ax[0].legend(loc="upper center", fontsize=15)
for idx, txt in enumerate(y):
ax[0].text(x[idx], y[idx] + 1.0, txt, ha="center", color="black")
list_B = []
a = (
b_df[b_df["release_year"] < 1920]["id"].count()
- b_df[b_df["release_year"] == 0]["id"].count()
)
list_B.append(a)
for i in np.arange(1920, 2020, 5):
list_B.append(
b_df[(b_df["release_year"] >= i) & (b_df["release_year"] < i + 5)]["id"].count()
)
b = b_df[b_df["release_year"] >= 2020]["id"].count()
list_B.append(b)
x = [
"~1920",
"1920~1924",
"1925~1929",
"1930~1934",
"1935~1939",
"1940~1944",
"1945~1949",
"1950~1954",
"1955~1959",
"1960~1964",
"1965~1969",
"1970~1974",
"1975~1979",
"1980~1984",
"1985~1989",
"1990~1994",
"1995~1999",
"2000~2004",
"2005~2009",
"2010~2014",
"2015~2019",
"2020~",
]
y = list_B
ax[1].set_title("Release_Year_seen_by_B", fontsize=20)
ax[1].step(x, y, lw=4)
ax[1].plot(x, y, "o--", color="#808080", alpha=0.7, linewidth=4)
ax[1].grid(axis="x", color="purple", alpha=0.2, ls="--", lw=1.2)
ax[1].set_xticklabels(x, fontsize=12, rotation=270)
ax[1].legend(loc="upper center", fontsize=15)
for idx, txt in enumerate(y):
ax[1].text(x[idx], y[idx] + 1.0, txt, ha="center", color="black")
# Unlike the whole, B seems to be old because he watched a lot of movies in the 60s
# ### genres
B_list = [word for words in b_df["genres"].values for word in words]
B_count = dict(Counter(B_list))
B_count = dict(sorted(B_count.items(), key=lambda x: x[1], reverse=True))
del B_count[""]
B_count
# Selection of top genre based on 3%
top_genres_B = dict()
etc = 0
thres = 0.03
for key in B_count:
if (B_count[key] / sum(B_count.values())) > thres:
top_genres_B[key] = B_count[key]
else:
etc += B_count[key]
# top_genres_B[f'under {thres*100}%'] = etc
top_genres_B["etc"] = etc
# Top genre pie chart
def custom_autopct(pct):
return ("%.1f%%" % pct) if True else ""
fig, ax = plt.subplots(
nrows=1, ncols=2, figsize=(18, 6), gridspec_kw={"width_ratios": [3, 5]}
)
wedgeprops = {"width": 0.6, "edgecolor": "w", "linewidth": 2}
colors = [
"#ffadad",
"#ffd6a5",
"#fdffb6",
"#caffbf",
"#98fb98",
"#9bf6ff",
"#a0c4ff",
"#dcdcdc",
]
ax[0].pie(
top_genres_B.values(),
labels=top_genres_B.keys(),
autopct=custom_autopct,
startangle=90,
wedgeprops=wedgeprops,
colors=colors,
textprops={"fontsize": 14},
)
ax[0].legend(title="genres", loc=(0.9, 0.6))
ax[0].set_title("Genres_B_Top", fontsize=20)
# bar chart
x = list(B_count.keys())
y = list(B_count.values())
palette = sns.color_palette("husl", len(x))
sns.barplot(x=x, y=y, alpha=0.8, ax=ax[1], palette=palette)
ax[1].set_title("Genres_B_All", fontsize=20)
ax[1].set_xticklabels(x, rotation=-45)
ax[1].tick_params(axis="x", labelsize=14)
legend = ax[1].legend(x, title="genres", loc=(0.81, 0.17))
lines = legend.get_lines()
for i in range(len(lines)):
lines[i].set_color(palette[i])
plt.show()
# discoverable characteristics
# It is characterized by a preference for comedy, drama, and documentary.
# I think I prefer comfortable atmosphere & realistic movies to crime, thriller, and fantasy.
df_list = [word for words in df["genres"].values for word in words]
df_count = dict(Counter(df_list))
df_count = dict(sorted(df_count.items(), key=lambda x: x[1], reverse=True))
del df_count[""]
x = list(df_count.keys())
y = list(df_count.values())
plt.figure(figsize=(12, 5))
sns.barplot(x=x, y=y, alpha=0.8)
plt.title("Genres_all_dataframe", fontsize=20)
plt.xticks(rotation=-45)
plt.show()
# ### Company & Credits
B_list = [word for words in b_df["production_companies"].values for word in words]
B_company = dict(Counter(B_list))
B_company = dict(sorted(B_company.items(), key=lambda x: x[1], reverse=True))
del B_company[""]
B_company
top_company_B = dict()
thres = 2
for key in B_company:
if (B_company[key]) >= thres:
top_company_B[key] = B_company[key]
else:
pass
top_company_B
B_list = [word for words in b_df["credits"].values for word in words]
B_credits = dict(Counter(B_list))
B_credits = dict(sorted(B_credits.items(), key=lambda x: x[1], reverse=True))
del B_credits[""]
B_credits
credit_map = pd.DataFrame(B_credits, index=["B_credits_COUNT"])
display(credit_map)
actor = credit_map.columns.tolist()
temp = credit_map.values[0]
while (len(temp) % 20) != 0:
temp = np.append(temp, 0)
actor.append("")
credit_heatmap = pd.DataFrame(temp.reshape(20, 44))
actor = np.array(actor).reshape(20, 44)
credit_heatmap.index = sorted(credit_heatmap.index.tolist(), reverse=True)
# credit_heatmap
colorscale = [[0, "white"], [0.3, "#EED8AE"], [0.6, "#CDB79E"], [1, "#95673F"]]
x = credit_heatmap.columns.tolist()
y = credit_heatmap.index.tolist()
annotations = []
for c in range(credit_heatmap.shape[1]):
if c == 0 or c == 1 or c == 19 or c == int((credit_heatmap.shape[1]) / 14):
annotations.append(
go.layout.Annotation(
text=str(credit_heatmap.iloc[0][c]),
x=x[c],
y=y[0],
font={"size": 12, "color": "white"},
showarrow=False,
)
)
else:
annotations.append(
go.layout.Annotation(
text="",
x=x[c],
y=y[0],
font={"size": 10, "color": "white"},
showarrow=False,
)
)
annotations = []
annotations.append(
go.layout.Annotation(
text=str(credit_heatmap.iloc[0][0]),
x=x[0],
y=y[0],
font={"size": 12, "color": "white"},
showarrow=False,
)
)
annotations.append(
go.layout.Annotation(
text=str(credit_heatmap.iloc[0][5]),
x=x[2],
y=y[0],
font={"size": 12, "color": "white"},
showarrow=False,
)
)
annotations.append(
go.layout.Annotation(
text=str(credit_heatmap.iloc[10][20]),
x=x[22],
y=y[9],
font={"size": 12, "color": "white"},
showarrow=False,
)
)
layout = go.Layout(
annotations=annotations,
width=600,
height=500,
coloraxis_colorbar=dict(thickness=0),
coloraxis_showscale=False,
)
fig_ = go.Figure(
data=go.Heatmap(
z=credit_heatmap,
x=credit_heatmap.columns.tolist(), # text_auto=True,
y=credit_heatmap.index.tolist(),
colorscale=colorscale,
text=actor,
showscale=False,
),
layout=layout,
)
fig_.update_xaxes(showticklabels=False)
fig_.update_yaxes(showticklabels=False)
fig_.update_layout(title={"text": "Credit_Count_B", "x": 0.5, "y": 0.85})
fig_.show() # Not suitable for use with algorithms
credit_dict = dict()
temp = credit_map.T.copy()
for i in range(1, 4):
credit_dict[i] = temp[temp["B_credits_COUNT"] == i].index.tolist()
credit_dict2 = dict()
for k in credit_dict:
new_list = [credit_dict[k][i : i + 10] for i in range(0, len(credit_dict[k]), 10)]
list1 = list()
list2 = list()
for i in range(len(new_list)):
list1.append("|".join(new_list[i]))
credit_dict2[k] = "<br>".join(list1)
# credit_dict2
"""credit_dict = dict()
temp = credit_map.T.copy()
for i in range(1,4):
credit_dict[i] = '|'.join(temp[temp['B_credits_COUNT']==i].index.tolist())"""
temp_df = pd.DataFrame(credit_dict2, index=["freq"]).T.reset_index()
temp_df.columns = ["Count", "credit"]
temp_df["freq"] = temp_df["credit"].apply(lambda x: len(x.split("|")))
temp_df["credit"] = temp_df["credit"].str[:30] + "..."
temp_df["label"] = (
"Actor_Freq : "
+ temp_df["Count"].astype(str)
+ "<br>"
+ "Count :"
+ temp_df["freq"].astype(str)
+ "<br>"
+ temp_df["credit"].astype(str)[:10]
)
temp_df
fig = px.treemap(
temp_df,
path=[px.Constant("Credit_Count_B"), "label"],
values="freq",
# color='freq', color_continuous_scale='RdBu',
# color_continuous_midpoint=np.average(temp_df['freq'], weights=temp_df['freq']),
color_discrete_sequence=["#f1ccb8", "#f1d4af", "#f2dfce"],
)
# fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
fig.update_layout(
plot_bgcolor="white",
paper_bgcolor="white",
width=650,
height=300,
margin=dict(l=0, r=0, t=50, b=0),
)
fig.update_traces(textfont=dict(size=20), root_color="#e6e6e6")
fig.show()
fig, ax = plt.subplots(
nrows=1, ncols=2, figsize=(18, 6), gridspec_kw={"width_ratios": [3, 5]}
)
B_temp = credit_map.T.value_counts()
def custom_autopct(pct):
return ("%.1f%%" % pct) if pct > 1 else ""
wedgeprops = {"width": 0.6, "edgecolor": "w", "linewidth": 2}
colors = ["#ffd6a5", "#fdffb6", "black", "#dcdcdc"]
ax[0].pie(
B_temp.values.tolist(),
labels=[1, 2, 3],
autopct=custom_autopct,
startangle=90,
wedgeprops=wedgeprops,
colors=colors,
textprops={"fontsize": 15},
)
ax[0].legend(title="genres")
ax[0].set_title("Credit_Count_B", fontsize=15)
x = [
"Sacher Film",
"Dania Film",
"Rizzoli Film",
"PEA",
"01Distribution",
"Italian\nInternational\nFilm",
"RAI",
"RAI Cinema",
]
colors = [
"#F0EAD6",
"#FFEFD5",
"#FFE4C4",
"#FFEBCD",
"#F3E5AB",
"#EED8AE",
"#CDB79E",
"#E0CDA9",
]
ax[1].barh(x, list(top_company_B.values())[::-1], color=colors[::-1])
ax[1].set_title("Top_B_Company", fontsize=20)
# plt.set_yticklabels(list(top_company_B.keys())[::-1],rotation = -45)
ax[1].set_xlabel("Frequency", fontsize=14)
ax[1].tick_params(axis="both", labelsize=13)
for i, v in enumerate(list(top_company_B.values())[::-1]):
ax[1].text(v + 0.1, i, str(v), ha="center", fontsize=12)
# ### Keywords
b_df_list = [word for words in b_df["keywords"].values for word in words]
b_df_keywords = dict(Counter(b_df_list))
b_df_keywords = dict(sorted(b_df_keywords.items(), key=lambda x: x[1], reverse=True))
del b_df_keywords[""]
b_df_keywords
from itertools import chain
b_key = set(b_df_keywords.keys())
b_related = pd.DataFrame(index=list(b_key), columns=list(b_key))
b_related.loc[:, :] = 0
for words in list(b_key):
has = df["keywords"].apply(lambda x: words in x)
result = set(list(chain(*df[has].keywords.values)))
result.discard(words)
result = list(result & b_key)
b_related.loc[words, result] = 1
display(b_related)
B_heatmap = b_related.copy()
b_related["related_rank"] = b_related.sum(axis=1)
b_related = b_related.sort_values(by="related_rank", ascending=False)
temp_join = pd.DataFrame(b_df_keywords, index=["freq"]).T
temp_join
b_related = b_related.join(temp_join, how="inner")
b_related["keyword_score"] = b_related["freq"] * b_related["related_rank"]
display(b_related)
colorscale = [
[0, "#f7e6d2"],
[0.2, "#f1c6a6"],
[0.4, "#eaa783"],
[0.6, "#e2856e"],
[0.8, "#d15e5a"],
[1, "#b12c41"],
]
max_vals = B_heatmap.max().max()
min_vals = B_heatmap.min().min()
# Min-Max Scaling
B_heatmap = (B_heatmap - min_vals) / (max_vals - min_vals)
fig = go.Figure(
data=go.Heatmap(
z=B_heatmap,
x=B_heatmap.index.tolist(),
y=B_heatmap.columns.tolist(),
colorscale="YlGnBu",
)
)
fig.update_layout(
title={"text": "Keyword Network heatmap_B", "x": 0.55, "y": 0.96},
margin=dict(l=0, r=0, t=50, b=0)
# ,xaxis=dict(title='X Axis')
# ,yaxis=dict(title='Keyw')
)
fig.update_layout(width=600, height=600)
fig.show()
# ### Recommendation
keyword_dict = dict(b_related["keyword_score"])
b_recommend["keyword_score"] = 0
for keyword in keyword_dict:
b_recommend.loc[
b_recommend["keywords"].apply(lambda x: keyword in x), "keyword_score"
] += keyword_dict[keyword]
b_recommend["genre_score"] = 0
for genre in B_count:
b_recommend.loc[
b_recommend["genres"].apply(lambda x: genre in x), "genre_score"
] += B_count[genre]
b_recommend
b_recommend["company_score"] = 0
for company in B_company:
b_recommend.loc[
b_recommend["production_companies"].apply(lambda x: company in x),
"company_score",
] += B_company[company]
b_recommend
b_score = b_recommend[
[
"id",
"log_popularity",
"vote_average",
"company_score",
"genre_score",
"keyword_score",
]
]
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(b_score.iloc[:, 1:])
scaled_b_score = pd.DataFrame(
X_scaled, columns=b_score.iloc[:, 1:].columns, index=b_score["id"]
)
scaled_b_score
scaled_b_score["total_score"] = (
((scaled_b_score["log_popularity"] + scaled_b_score["vote_average"]) / 4 * 3)
+ scaled_b_score["company_score"]
+ scaled_b_score["genre_score"]
+ scaled_b_score["keyword_score"]
)
scaled_b_score = scaled_b_score.sort_values(by="total_score", ascending=False)
scaled_b_score = scaled_b_score[~scaled_b_score.index.isin(b_df.id.tolist())]
display(scaled_b_score)
print(
set(b_df.id.values.tolist()) & set(scaled_b_score.index.tolist())
) # No intersection
b_radar = scaled_b_score.iloc[:3, :5]
b_radar.reset_index(inplace=True)
b_radar_ranking = b_radar.id.values
display(b_radar)
labels = b_radar.iloc[:, 1:].columns
num_labels = len(labels)
angles = [
x / float(num_labels) * (2 * np.pi) for x in range(num_labels)
] ## angular equivocal point
angles += angles[
:1
] ## Add a starting point because you need to come back to the starting point
my_palette = plt.get_cmap("Set2", len(b_radar.iloc[:, 1:].index))
b_radar = b_radar.set_index("id").loc[b_radar_ranking].reset_index()
# 서브플롯 설정
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(24, 6), subplot_kw=dict(polar=True))
# 레이더 차트 그리기
for i, row in b_radar.iterrows():
color = my_palette(i)
data = row[1:].tolist()
data += data[:1]
ax = axes[i]
ax.set_theta_offset(np.pi / 2) ## the starting point
ax.set_theta_direction(-1) ## Direction drawn Clockwise
ax.set_rlabel_position(0) ## Set radial axis tick label angle (in degrees)
ax.plot(
angles,
data,
color=color,
linewidth=2,
linestyle="solid",
label=df[df["id"] == row.id].title.values[0],
) ## Radar Chart Output
ax.fill(
angles, data, color=color, alpha=0.2
) ## Fill the inside of the figure with color.
ax.set_xticks(angles[:-1])
ax.set_xticklabels(labels, fontsize=13) ## Angle axis tick label
ax.tick_params(
axis="x", which="major", pad=15
) ## Give a space between each axis and the scale.
# ax.set_rlim(0, 10) ## Set radius axis range
ax.set_title(df[df["id"] == row.id].title.values[0], fontsize=15, fontweight="bold")
# Legend Settings
# fig.legend(title = "Recommend_Movie", loc=(0.75,0.7), fontsize=15, title_fontsize=18)
plt.subplots_adjust(wspace=0.35)
plt.show()
top_number = 3
b_recommend_id = scaled_b_score.index.tolist()[:top_number]
df[df["id"].isin(b_recommend_id)]
result = poster_df[poster_df["id"].isin(b_recommend_id)]
result = result.set_index("id").loc[b_radar_ranking].reset_index()
fig, ax = plt.subplots(1, 3, figsize=(15, 15))
for i, j in enumerate(result.poster_path.unique()):
try:
ax[i].axis("off")
ax[i].set_title(result.iloc[i].title)
a = io.imread(f"https://image.tmdb.org/t/p/w500/{j}")
ax[i].imshow(a)
except:
pass
fig.tight_layout()
# ## Persona_C
# -Forward correction method (personalization/popularization ratio 1:2): Even if it has a similar pattern to the overall distribution, it is viewed as a characteristic that the person prefers
# ### Release_date
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(18, 6))
list_df = []
a = (
df[df["release_year"] < 1920]["id"].count()
- df[df["release_year"] == 0]["id"].count()
)
list_df.append(a)
for i in np.arange(1920, 2020, 5):
list_df.append(
df[(df["release_year"] >= i) & (df["release_year"] < i + 5)]["id"].count()
)
b = df[df["release_year"] >= 2020]["id"].count()
list_df.append(b)
x = [
"~1920",
"1920~1924",
"1925~1929",
"1930~1934",
"1935~1939",
"1940~1944",
"1945~1949",
"1950~1954",
"1955~1959",
"1960~1964",
"1965~1969",
"1970~1974",
"1975~1979",
"1980~1984",
"1985~1989",
"1990~1994",
"1995~1999",
"2000~2004",
"2005~2009",
"2010~2014",
"2015~2019",
"2020~",
]
y = list_df
ax[0].set_title("Release_Year_Whole_Data", fontsize=20)
ax[0].step(x, y, lw=4)
ax[0].plot(x, y, "o--", color="#808080", alpha=0.7, linewidth=4)
ax[0].grid(axis="x", color="purple", alpha=0.2, ls="--", lw=1.2)
ax[0].set_xticklabels(x, fontsize=12, rotation=270)
ax[0].legend(loc="upper center", fontsize=15)
for idx, txt in enumerate(y):
ax[0].text(x[idx], y[idx] + 1.0, txt, ha="center", color="black")
list_C = []
a = (
c_df[c_df["release_year"] < 1920]["id"].count()
- c_df[c_df["release_year"] == 0]["id"].count()
)
list_C.append(a)
for i in np.arange(1920, 2020, 5):
list_C.append(
c_df[(c_df["release_year"] >= i) & (c_df["release_year"] < i + 5)]["id"].count()
)
b = c_df[c_df["release_year"] >= 2020]["id"].count()
list_C.append(b)
x = [
"~1920",
"1920~1924",
"1925~1929",
"1930~1934",
"1935~1939",
"1940~1944",
"1945~1949",
"1950~1954",
"1955~1959",
"1960~1964",
"1965~1969",
"1970~1974",
"1975~1979",
"1980~1984",
"1985~1989",
"1990~1994",
"1995~1999",
"2000~2004",
"2005~2009",
"2010~2014",
"2015~2019",
"2020~",
]
y = list_C
ax[1].set_title("Release_Year_seen_by_C", fontsize=20)
ax[1].step(x, y, lw=4)
ax[1].plot(x, y, "o--", color="#808080", alpha=0.7, linewidth=4)
ax[1].grid(axis="x", color="purple", alpha=0.2, ls="--", lw=1.2)
ax[1].set_xticklabels(x, fontsize=12, rotation=270)
ax[1].legend(loc="upper center", fontsize=15)
for idx, txt in enumerate(y):
ax[1].text(x[idx], y[idx] + 1.0, txt, ha="center", color="black")
# C has no different characteristics compared to the whole
# ### Original language
C_count = dict(Counter(c_df["original_language"].values.tolist()))
C_count_langu = dict(sorted(C_count.items(), key=lambda x: x[1], reverse=True))
C_count_langu
# ### Genres
C_list = [word for words in c_df["genres"].values for word in words]
C_count = dict(Counter(C_list))
C_count = dict(sorted(C_count.items(), key=lambda x: x[1], reverse=True))
del C_count[""]
C_count
top_genres_C = dict()
etc = 0
thres = 0.04
for key in C_count:
if (C_count[key] / sum(C_count.values())) > thres:
top_genres_C[key] = C_count[key]
else:
etc += C_count[key]
top_genres_C["etc"] = etc
def custom_autopct(pct):
return ("%.1f%%" % pct) if True else ""
fig, ax = plt.subplots(
nrows=1, ncols=2, figsize=(18, 6), gridspec_kw={"width_ratios": [5, 3]}
)
wedgeprops = {"width": 0.6, "edgecolor": "black", "linewidth": 2}
colors = [
"#ffadad",
"#ffd6a5",
"#fdffb6",
"#caffbf",
"#98fb98",
"#9bf6ff",
"#a0c4ff",
"#dcdcdc",
]
ax[1].pie(
top_genres_C.values(),
labels=top_genres_C.keys(),
autopct=custom_autopct,
startangle=90,
wedgeprops=wedgeprops,
colors=colors,
textprops={"fontsize": 14},
)
ax[1].legend(title="genres", loc=(0.9, 0.6))
ax[1].set_title("Genres_C_Top", fontsize=20)
x = list(C_count_langu.keys())
y = list(C_count_langu.values())
palette = sns.color_palette("husl", len(x))
sns.barplot(
x=x, y=y, alpha=0.8, ax=ax[0], palette=palette, edgecolor="black", linewidth=2
)
# ax[1].set_xlabel("장르")
ax[0].set_title("Origianl_Language_C", fontsize=20)
ax[0].set_xticklabels(ax[0].get_xticklabels(), rotation=-45, fontsize=14)
ax[0].tick_params(axis="x", labelsize=14)
legend = ax[0].legend(x, title="original_language", loc=(0.84, 0.14))
lines = legend.get_lines()
for i in range(len(lines)):
lines[i].set_color(palette[i])
plt.show()
# ### Company & Credits
# - Not significant because almost all of them are once: variables not used in algorithms
C_list = [word for words in c_df["production_companies"].values for word in words]
C_company = dict(Counter(C_list))
C_company = dict(sorted(C_company.items(), key=lambda x: x[1], reverse=True))
del C_company[""]
C_company
top_company_C = dict()
thres = 1
for key in C_company:
if (C_company[key]) >= thres:
top_company_C[key] = C_company[key]
else:
pass
# top_company_C : dict form
company_map = pd.DataFrame(top_company_C, index=["C_COMPANY_COUNT"])
display(company_map)
colorscale = [[0, "#AEC6CF"], [1, "#000080"]]
x = company_map.columns.tolist()
y = company_map.index.tolist()
annotations = []
for c in range(company_map.shape[1]):
if c == 0 or c == 1 or c == int((company_map.shape[1]) / 2):
annotations.append(
go.layout.Annotation(
text=str(company_map.iloc[0][c]),
x=x[c],
y=y[0],
font={"size": 12, "color": "white"},
showarrow=False,
)
)
else:
annotations.append(
go.layout.Annotation(
text="",
x=x[c],
y=y[0],
font={"size": 10, "color": "white"},
showarrow=False,
)
)
layout = go.Layout(
annotations=annotations,
width=800,
height=200,
coloraxis_colorbar=dict(thickness=0),
coloraxis_showscale=False,
margin=dict(l=0, r=0, t=0, b=180),
)
fig = go.Figure(
data=go.Heatmap(
z=company_map,
x=company_map.columns.tolist(), # text_auto=True,
y=company_map.index.tolist(),
colorscale=colorscale,
text=list(company_map.values),
showscale=False,
),
layout=layout,
)
fig.show()
c_df_list = [word for words in c_df["credits"].values for word in words]
c_df_credits = dict(Counter(c_df_list))
c_df_credits = dict(sorted(c_df_credits.items(), key=lambda x: x[1], reverse=True))
del c_df_credits[""]
C_list = [word for words in c_df["credits"].values for word in words]
C_credits = dict(Counter(C_list))
C_credits = dict(sorted(C_credits.items(), key=lambda x: x[1], reverse=True))
del C_credits[""]
# C_credits : dict form
credit_map = pd.DataFrame(C_credits, index=["C_CREDIT_COUNT"])
display(credit_map)
colorscale = [[0, "#AEC6CF"], [1, "#000080"]]
x = credit_map.columns.tolist()
y = credit_map.index.tolist()
annotations = []
for c in range(credit_map.shape[1]):
if c == 0 or c == 1 or c == int((credit_map.shape[1]) / 15):
annotations.append(
go.layout.Annotation(
text=str(credit_map.iloc[0][c]),
x=x[c],
y=y[0],
font={"size": 12, "color": "white"},
showarrow=False,
)
)
else:
annotations.append(
go.layout.Annotation(
text="",
x=x[c],
y=y[0],
font={"size": 10, "color": "white"},
showarrow=False,
)
)
layout = go.Layout(
annotations=annotations,
width=5000,
height=200,
coloraxis_colorbar=dict(thickness=0),
coloraxis_showscale=False,
margin=dict(l=0, r=0, t=0, b=180),
)
fig = go.Figure(
data=go.Heatmap(
z=credit_map,
x=credit_map.columns.tolist(), # text_auto=True,
y=credit_map.index.tolist(),
colorscale=colorscale,
text=list(credit_map.values),
showscale=False,
),
layout=layout,
)
# 그래프 출력
fig.show()
# ### keywords
c_df_list = [word for words in c_df["keywords"].values for word in words]
c_df_keywords = dict(Counter(c_df_list))
c_df_keywords = dict(sorted(c_df_keywords.items(), key=lambda x: x[1], reverse=True))
del c_df_keywords[""]
c_df_keywords
from itertools import chain
c_key = set(c_df_keywords.keys())
c_related = pd.DataFrame(index=list(c_key), columns=list(c_key))
c_related.loc[:, :] = 0
for words in list(c_key):
has = df["keywords"].apply(lambda x: words in x)
result = set(list(chain(*df[has].keywords.values)))
result.discard(words)
result = list(result & c_key)
c_related.loc[words, result] = 1
display(c_related)
C_heatmap = c_related.copy()
c_related["related_rank"] = c_related.sum(axis=1)
c_related = c_related.sort_values(by="related_rank", ascending=False)
temp_join = pd.DataFrame(c_df_keywords, index=["freq"]).T
temp_join
c_related = c_related.join(temp_join, how="inner")
c_related["keyword_score"] = c_related["freq"] * c_related["related_rank"]
display(c_related)
colorscale = [
[0, "#f7e6d2"],
[0.2, "#f1c6a6"],
[0.4, "#eaa783"],
[0.6, "#e2856e"],
[0.8, "#d15e5a"],
[1, "#b12c41"],
]
max_vals = C_heatmap.max().max()
min_vals = C_heatmap.min().min()
# Min-Max Scaling
C_heatmap = (C_heatmap - min_vals) / (max_vals - min_vals)
fig = go.Figure(
data=go.Heatmap(
z=C_heatmap,
x=C_heatmap.index.tolist(),
y=C_heatmap.columns.tolist(),
colorscale="YlGnBu",
)
)
fig.update_layout(
title={"text": "Keyword Network heatmap_C", "x": 0.55, "y": 0.96},
margin=dict(l=0, r=0, t=50, b=0)
# ,xaxis=dict(title='X Axis')
# ,yaxis=dict(title='Keyw')
)
fig.update_layout(width=600, height=600)
fig.show()
# ### Recommendation
keyword_dict = dict(c_related["keyword_score"])
c_recommend["keyword_score"] = 0
for keyword in keyword_dict:
c_recommend.loc[
c_recommend["keywords"].apply(lambda x: keyword in x), "keyword_score"
] += keyword_dict[keyword]
c_recommend["langu_score"] = c_recommend["original_language"].apply(
lambda x: C_count_langu.get(x, 0)
)
c_recommend["genre_score"] = 0
for genre in C_count:
c_recommend.loc[
c_recommend["genres"].apply(lambda x: genre in x), "genre_score"
] += C_count[genre]
c_recommend
c_score = c_recommend[
[
"id",
"log_popularity",
"vote_average",
"langu_score",
"genre_score",
"keyword_score",
]
]
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(c_score.iloc[:, 1:])
scaled_c_score = pd.DataFrame(
X_scaled, columns=c_score.iloc[:, 1:].columns, index=c_score["id"]
)
scaled_c_score
scaled_c_score["total_score"] = (
scaled_c_score["log_popularity"] + scaled_c_score["vote_average"]
) + (
(
scaled_c_score["langu_score"]
+ scaled_c_score["genre_score"]
+ scaled_c_score["keyword_score"]
)
/ 3
* 4
)
scaled_c_score = scaled_c_score.sort_values(by="total_score", ascending=False)
scaled_c_score = scaled_c_score[~scaled_c_score.index.isin(c_df.id.tolist())]
display(scaled_c_score)
print(
set(c_df.id.values.tolist()) & set(scaled_c_score.index.tolist())
) # No intersection
c_radar = scaled_c_score.iloc[:3, :5]
c_radar.reset_index(inplace=True)
c_radar_ranking = c_radar.id.values
display(c_radar)
labels = c_radar.iloc[:, 1:].columns
num_labels = len(labels)
angles = [
x / float(num_labels) * (2 * np.pi) for x in range(num_labels)
] ## angular equivocal point
angles += angles[
:1
] ## Add a starting point because you need to come back to the starting point
my_palette = plt.get_cmap("Set2", len(c_radar.iloc[:, 1:].index))
c_radar = c_radar.set_index("id").loc[c_radar_ranking].reset_index()
# Subplot settings
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(24, 6), subplot_kw=dict(polar=True))
# Draw a radar chart
for i, row in c_radar.iterrows():
color = my_palette(i)
data = row[1:].tolist()
data += data[:1]
ax = axes[i]
ax.set_theta_offset(np.pi / 2) ## the starting point
ax.set_theta_direction(-1) ## Direction drawn Clockwise
ax.set_rlabel_position(0) ## Set radial axis tick label angle (in degrees)
ax.plot(
angles,
data,
color=color,
linewidth=2,
linestyle="solid",
label=df[df["id"] == row.id].title.values[0],
) ## Radar Chart Output
ax.fill(
angles, data, color=color, alpha=0.2
) ## Fill the inside of the figure with color.
ax.set_xticks(angles[:-1])
ax.set_xticklabels(labels, fontsize=13) ## Angle axis tick label
ax.tick_params(
axis="x", which="major", pad=15
) ## Give a space between each axis and the scale.
# ax.set_rlim(0, 10) ## Set radius axis range
ax.set_title(df[df["id"] == row.id].title.values[0], fontsize=15, fontweight="bold")
# fig.legend(title = "Recommend_Movie", loc=(0.75,0.7), fontsize=15, title_fontsize=18)
plt.subplots_adjust(wspace=0.35)
plt.show()
top_number = 3
c_recommend_id = scaled_c_score.index.tolist()[:top_number]
df[df["id"].isin(c_recommend_id)]
result = poster_df[poster_df["id"].isin(c_recommend_id)]
result = result.set_index("id").loc[c_radar_ranking].reset_index()
fig, ax = plt.subplots(1, 3, figsize=(15, 15))
for i, j in enumerate(result.poster_path.unique()):
try:
ax[i].axis("off")
ax[i].set_title(result.iloc[i].title)
a = io.imread(f"https://image.tmdb.org/t/p/w500/{j}")
ax[i].imshow(a)
except:
pass
fig.tight_layout()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Hotel Reservations. Logistic Regression
# ## - Imports
# ## - Loading the dataset
# ## - Overview of the dataset
# imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from imblearn.over_sampling import SMOTE
# Loading the dataset
df_reservations = pd.read_csv(
"/kaggle/input/hotel-reservations-classification-dataset/Hotel Reservations.csv"
)
df_reservations.head()
df_reservations.shape
df_reservations.info()
# ## The 'booking_status' column
df_reservations["booking_status"].value_counts()
df_reservations["booking_status"].value_counts().plot(
kind="bar", color=["red", "green"]
)
# Values conversion to 0: Not_Canceled and 1:Canceled
df_reservations["booking_status"].replace(
["Not_Canceled", "Canceled"], [0, 1], inplace=True
)
df_reservations["booking_status"].value_counts()
# ### Columns with non numeric values
df_reservations.select_dtypes(exclude=["int64", "float"])
# #### - Checking for duplicates in 'Booking_ID'
df_reservations["Booking_ID"].nunique()
# #### - Taking a look to the others non-numeric columns
non_num_cols = ["type_of_meal_plan", "room_type_reserved", "market_segment_type"]
for col in non_num_cols:
print("Column: ", col)
print(df_reservations[col].value_counts())
print(df_reservations.value_counts(df_reservations[col]).sum())
print("\n")
for col in non_num_cols:
df_reservations[col].value_counts().plot(kind="bar")
plt.ylabel("Reservations #")
plt.xlabel(col)
plt.title(col)
plt.xticks(rotation=45)
plt.show()
# ### Columns with numeric values
# #### - Guests by age (columns: 'no_of_adults', 'no_of_children')
df_reservations["no_of_adults"].value_counts()
df_reservations["no_of_children"].value_counts()
# Checking the reservations that appears a little extrange (9 and 10 childrens)
df_reservations.loc[df_reservations["no_of_children"] == 10]
df_reservations.loc[df_reservations["no_of_children"] == 9]
# #### - Distribution by day of the week (columns: 'no_of_weekend_nights', 'no_of_week_nights')
df_reservations["no_of_weekend_nights"].value_counts()
df_reservations["no_of_week_nights"].value_counts()
# It seems that there are some reservations with no quantity of nights assigned
try:
both_zero = df_reservations.loc[
(df_reservations["no_of_weekend_nights"] == 0)
& (df_reservations["no_of_week_nights"] == 0)
].value_counts()
print("Number of rows where nights = 0: ", both_zero.sum())
except:
print("No zero nights reservations")
# Let's take a look at the parking needed by the guests
df_reservations["required_car_parking_space"].value_counts().plot(
kind="bar", color=["royalblue", "lime"]
)
# The 'lead time'. Intuitively, when the reservations has been done with a lot of time in advance, it' could be more likely that it's cancelled.
df_reservations["lead_time"].hist(bins=100)
# Let's look at the reservations that had been done with more than one year in advance
lead_time = df_reservations.loc[df_reservations["lead_time"] > 365]
lead_time.head()
len(lead_time)
lead_time.loc[lead_time["booking_status"] == 1]
# It seems about 95% of the reservations that had been done with more than a year of anticipation have been cancelled
# A brief look of the remaining columns
df_reservations["repeated_guest"].value_counts().plot(kind="bar")
df_reservations["no_of_previous_cancellations"].value_counts()
df_reservations["avg_price_per_room"].describe()
# There are some reservations without price
df_reservations.loc[df_reservations["avg_price_per_room"] == 0]
df_reservations["avg_price_per_room"].hist(bins=100)
# Finally, the correlation between the features
df_reservations.corr()
# ## Preparing the Data
# The first model to tray will be a Logistic Regression, with the data very similarly as it was provided. The preparation at this stage will consist in:
# - Eliminating the 'Booking_ID' column
# - Transforming the categorical values to numbers using OHE
df_reservations.shape
# Eliminate 'Booking_ID'ArithmeticError
df_reservations.drop("Booking_ID", axis=1, inplace=True)
df_reservations.head()
df_reservations.shape
# ### *'type_of_meal_plan'* OHE
# 'type_of_meal_plan' OHE
meals_ohe = pd.get_dummies(df_reservations["type_of_meal_plan"])
meals_ohe.head(10)
df_reservations_OHE = pd.concat([df_reservations, meals_ohe], axis=1)
df_reservations_OHE[
["type_of_meal_plan", "Meal Plan 1", "Meal Plan 2", "Meal Plan 3", "Not Selected"]
].head(10)
# ### *'room_type_reserved'* OHE
room_OHE = pd.get_dummies(df_reservations["room_type_reserved"])
room_OHE.head()
df_reservations_OHE = pd.concat([df_reservations_OHE, room_OHE], axis=1)
df_reservations_OHE[
[
"room_type_reserved",
"Room_Type 1",
"Room_Type 2",
"Room_Type 3",
"Room_Type 4",
"Room_Type 5",
"Room_Type 6",
"Room_Type 7",
]
].head(10)
# ### *'market_segment_type'* OHE
segment_OHE = pd.get_dummies(df_reservations["market_segment_type"])
segment_OHE.head(15)
df_reservations_OHE = pd.concat([df_reservations_OHE, segment_OHE], axis=1)
df_reservations_OHE[
[
"market_segment_type",
"Aviation",
"Complementary",
"Corporate",
"Offline",
"Online",
]
].head(15)
# ### Checking the new dataframe
df_reservations_OHE.info()
# ### Checking some correlations
meals = ["Meal Plan 1", "Meal Plan 2", "Meal Plan 3", "Not Selected"]
def cat_corr(cat_list):
cat_corr_dict = {}
for i in cat_list:
r_corr = df_reservations_OHE["booking_status"].corr(df_reservations_OHE[i])
cat_corr_dict[i] = r_corr
for k, v in cat_corr_dict.items():
print(f"\n {k}: {v}")
cat_corr(meals)
rooms = [
"Room_Type 1",
"Room_Type 2",
"Room_Type 3",
"Room_Type 4",
"Room_Type 5",
"Room_Type 6",
"Room_Type 7",
]
cat_corr(rooms)
segment = ["Aviation", "Complementary", "Corporate", "Offline", "Online"]
cat_corr(segment)
# ## Logistic Regression
selected_features = [
"no_of_adults",
"no_of_children",
"no_of_weekend_nights",
"no_of_week_nights",
"required_car_parking_space",
"lead_time",
"arrival_year",
"arrival_month",
"arrival_date",
"repeated_guest",
"no_of_previous_cancellations",
"no_of_previous_bookings_not_canceled",
"avg_price_per_room",
"no_of_special_requests",
"Meal Plan 1",
"Meal Plan 2",
"Meal Plan 3",
"Not Selected",
"Room_Type 1",
"Room_Type 2",
"Room_Type 3",
"Room_Type 4",
"Room_Type 5",
"Room_Type 6",
"Room_Type 7",
"Aviation",
"Complementary",
"Corporate",
"Offline",
"Online",
]
X = df_reservations_OHE[selected_features]
y = df_reservations["booking_status"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
print("Train set mean: ", np.mean(y_train))
print("Test set mean: ", np.mean(y_test))
model = LogisticRegression(solver="liblinear")
model.fit(X_train, y_train)
predictions = model.predict(X_test)
print(classification_report(y_test, predictions))
print("Predicted labels: ", predictions)
print("Accuracy: ", accuracy_score(y_test, predictions))
confusion_matrix(y_test, predictions)
y_predict_proba = model.predict_proba(X_test)
y_predict_proba
pos_proba = y_predict_proba[:, 1]
pos_proba
print("Median: ", np.median(pos_proba))
print("Mean: ", np.mean(pos_proba))
print("Standard deviation: ", np.std(pos_proba))
plt.hist(pos_proba)
pos_sample_pos_proba = pos_proba[y_test == 1]
neg_sample_pos_proba = pos_proba[y_test == 0]
plt.hist([pos_sample_pos_proba, neg_sample_pos_proba], histtype="barstacked")
plt.legend(["Positive Samples", "Negative Samples"])
plt.xlabel("Predicted probability of positive class")
plt.ylabel("Number of samples")
fpr, tpr, thresholds = metrics.roc_curve(y_test, pos_proba)
plt.plot(fpr, tpr, "*-")
plt.plot([0, 1], [0, 1], "r--")
plt.legend(["Logistic regression", "Random chance"])
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.title("ROC curve")
metrics.roc_auc_score(y_test, pos_proba)
# ### Balancing the dataset
print("Original label distribution:\n", df_reservations.booking_status.value_counts())
features_df = df_reservations_OHE.drop(
[
"booking_status",
"type_of_meal_plan",
"room_type_reserved",
"market_segment_type",
],
axis=1,
)
label_df = df_reservations_OHE["booking_status"]
features_df.head()
features_df.shape
label_df.shape
oversample = SMOTE()
transformed_features_df, transformed_label_df = oversample.fit_resample(
features_df, label_df
)
print(f"new_label_count:\n{transformed_label_df.value_counts()}")
print(f"old label count:\n{df_reservations_OHE.booking_status.value_counts()}")
transformed_df_reservations = pd.concat(
[transformed_label_df, transformed_features_df], axis=1, join="outer"
)
transformed_df_reservations.head()
transformed_df_reservations.info()
# ### Logistic Regression with the balanced dataset
selected_features = [
"no_of_adults",
"no_of_children",
"no_of_weekend_nights",
"no_of_week_nights",
"required_car_parking_space",
"lead_time",
"arrival_year",
"arrival_month",
"arrival_date",
"repeated_guest",
"no_of_previous_cancellations",
"no_of_previous_bookings_not_canceled",
"avg_price_per_room",
"no_of_special_requests",
"Meal Plan 1",
"Meal Plan 2",
"Meal Plan 3",
"Not Selected",
"Room_Type 1",
"Room_Type 2",
"Room_Type 3",
"Room_Type 4",
"Room_Type 5",
"Room_Type 6",
"Room_Type 7",
"Aviation",
"Complementary",
"Corporate",
"Offline",
"Online",
]
X_2 = transformed_df_reservations[selected_features]
y_2 = transformed_df_reservations["booking_status"]
X_train_2, X_test_2, y_train_2, y_test_2 = train_test_split(
X_2, y_2, test_size=0.3, random_state=0
)
print("Train set mean: ", np.mean(y_train_2))
print("Test set mean: ", np.mean(y_test_2))
model_2 = LogisticRegression(solver="liblinear")
model_2.fit(X_train_2, y_train_2)
predictions_2 = model_2.predict(X_test_2)
print(classification_report(y_test_2, predictions_2))
print("Predicted labels: ", predictions_2)
print("Accuracy: ", accuracy_score(y_test_2, predictions_2))
confusion_matrix(y_test_2, predictions_2)
y_2_predict_proba = model_2.predict_proba(X_test_2)
y_2_predict_proba
pos_proba_2 = y_2_predict_proba[:, 1]
pos_proba_2
print("Median: ", np.median(pos_proba_2))
print("Mean: ", np.mean(pos_proba_2))
print("Standard deviation: ", np.std(pos_proba_2))
plt.hist(pos_proba_2)
pos_sample_pos_proba_2 = pos_proba_2[y_test_2 == 1]
neg_sample_pos_proba_2 = pos_proba_2[y_test_2 == 0]
plt.hist([pos_sample_pos_proba_2, neg_sample_pos_proba_2], histtype="barstacked")
plt.legend(["Positive Samples", "Negative Samples"])
plt.xlabel("Predicted probability of positive class")
plt.ylabel("Number of samples")
fpr_2, tpr_2, thresholds_2 = metrics.roc_curve(y_test_2, pos_proba_2)
plt.plot(fpr_2, tpr_2, "*-")
plt.plot([0, 1], [0, 1], "r--")
plt.legend(["Logistic regression", "Random chance"])
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.title("ROC curve")
metrics.roc_auc_score(y_test_2, pos_proba_2)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
test = pd.read_csv("../input/usa-housing-dataset/housing_test.csv")
train = pd.read_csv("../input/usa-housing-dataset/housing_train.csv")
train.describe()
train.head()
(train.shape), (test.shape)
idsUnique = len(train.Id)
print(idsUnique)
idsTotal = train.shape[0]
print(idsTotal)
train.drop("Id", axis=1, inplace=True)
plt.figure(figsize=(20, 8), dpi=80)
corrmat = train.corr()
sns.heatmap(corrmat, vmax=0.8, annot=True)
corrmat = train.corr()
corrmat
corrmat = train.corr()
top_corr_features = corrmat.index[abs(corrmat["SalePrice"]) > 0.5]
plt.figure(figsize=(10, 10))
g = sns.heatmap(train[top_corr_features].corr(), annot=True, cmap="RdYlGn")
sns.barplot(train.OverallQual, train.SalePrice)
cols = [
"SalePrice",
"OverallQual",
"GrLivArea",
"GarageCars",
"TotalBsmtSF",
"FullBath",
"YearBuilt",
]
sns.pairplot(train[cols], size=2.5)
from scipy import stats
from scipy.stats import norm
sns.distplot(train["SalePrice"], fit=norm)
(mu, sigma) = norm.fit(train["SalePrice"])
print("\n mu = {:.2f} and sigma = {:.2f}\n".format(mu, sigma))
plt.legend(
["Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )".format(mu, sigma)], loc="best"
)
plt.ylabel("Frequency")
plt.title("SalePrice distribution")
fig = plt.figure()
res = stats.probplot(train["SalePrice"], plot=plt)
plt.show()
train["SalePrice"] = np.log1p(train["SalePrice"])
y = train["SalePrice"]
plt.scatter(x=train["GrLivArea"], y=train["SalePrice"])
train_nan = train.isnull().sum()
print(train_nan)
train_nas = train_nan[train_nan > 0]
print(train_nas)
train_nas.sort_values(ascending=False)
test_nan = test.isnull().sum()
test_nas = test_nan[test_nan > 0]
test_nas.sort_values(ascending=False)
categorical_features = train.select_dtypes(include=["object"]).columns
numerical_features = train.select_dtypes(exclude=["object"]).columns
numerical_features = numerical_features.drop("SalePrice")
train_num = train[numerical_features]
train_cat = train[categorical_features]
train_num = train_num.fillna(train_num.median())
from scipy.stats import skew
skewness = train_num.apply(lambda x: skew(x))
skewness.sort_values(ascending=False)
skewness = skewness[abs(skewness) > 0.5]
skewness.index
skew_features = train[skewness.index]
skew_features.columns
skew_features = np.log1p(skew_features)
train_cat.shape
train_cat = pd.get_dummies(train_cat)
train_cat.shape
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, RidgeCV, LassoCV, ElasticNetCV
from sklearn.metrics import mean_squared_error, make_scorer
train = pd.concat([train_cat, train_num], axis=1)
train.shape
X_train, X_test, y_train, y_test = train_test_split(
train, y, test_size=0.3, random_state=0
)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Import the Data
# Read the CSV file and store the resulting DataFrame in a variable called 'df'
df = pd.read_csv("/kaggle/input/fifa-23-steam-reviews/fifa23_steam_reviews.csv")
print(f"Dimensionality of Dataset: {df.shape}")
# ## Basic Manual EDA
# Quick Look at the Data
# So what do we see exactly?
# What is interesting to take note of here is the type of data columns - Both Quantitative (Discrete & Continuous) & Categorical Data
# We also have date data ("created" & "author_last_played") & text data ("review")
df.head(3)
# Sometimes, you might have like >30 columns and you can call this to get all your column names:
df.columns
# info() gives the count of non-null values for each column and its data type.
# It is interesting to see there are some null values for some column like "review"!
# date data ("created" & "author_last_played") seems to be having a wrong data type as well!
# In your data-preprocessing, it would be important to treat the null values and correct the data type!
df.info()
# To further check for missing values
df.isnull().sum()
# Lets check for any duplicates
num_duplicates = df.duplicated().sum()
print(f"Number of duplicates: {num_duplicates}")
# For a specific column:
num_duplicates = df.duplicated(subset=["review"]).sum()
print(
f"Number of duplicates for the 'review' column: {num_duplicates}"
) # Interesting to take note of such duplicates!
# Lets find out the number of unique values for each variable
# We can very easily identify 2 columns ("language" and "written_during_early_access") to have only 1 unique value
# Scroll up to df.head(3) and you will realise that those 2 columns' value are "english" and "false" respectively
# In your data-preprocessing, it would be apt to remove these 2 columns!
df.nunique(axis=0)
# This function will summarizes the count, mean, standard deviation, min, and max for NUMERIC variables.
# This allows you to get an overall sensing of the data in general
# It helps you to spot potential outliers too! Such as the max of "votes_up" and the min of "author_num_games_owned"!
df.describe()
# This is a function that can allow you to see all the different categories/types in your categorical variable of choice
df.voted_up.unique()
# df.voted_up.unique().tolist() #If you want the output to be in a list instead
# ## Now, let's move on to see some visualization!
import matplotlib.pyplot as plt
import seaborn as sns
# ### **Visualise Each Column**
# #### Categorical & Boolean Variables → Bar Chart
# #### Numerical Variables → Histogram
# Q: Why do we perform this?
# A: Plotting each of our column can help us visualise our data distribution better! We are able to observe the proportion of True VS False or how skewed our numerical data is at one glance!
# *Do note that this is not recommended if you have > 25 columns*
# Determine number of rows and columns
n_cols = df.shape[1]
n_rows = int(n_cols**0.5) + int(n_cols**0.5 != n_cols**0.5)
# Create square grid of subplots
fig, axs = plt.subplots(
n_rows, n_rows, figsize=(16, 16)
) # Change the figsize accordingly!
# Flatten axes array for easy iteration
axs = axs.ravel()
# Plot each column in a subplot
for i, column in enumerate(df.columns):
axs[i].set_title(column)
if pd.api.types.is_bool_dtype(df[column]) or pd.api.types.is_string_dtype(
df[column]
):
df[column].value_counts()[:10].plot(kind="bar", ax=axs[i])
elif pd.api.types.is_numeric_dtype(df[column]):
df[column].plot(kind="hist", ax=axs[i])
# Hide unused subplots
for i in range(n_cols, n_rows * n_rows):
axs[i].axis("off")
# Adjust spacing and layout
plt.tight_layout()
# Show plot
plt.show()
# ### Visualise Correlation Matrix/Heatmap
# Q: Why do we perform this?
# A: A correlation matrix is a table that summarizes and visualizes the correlation coefficients between variables in a dataset. The correlation coefficient is a statistical measure that quantifies the strength of the linear relationship between two variables, ranging from -1 to +1. Variables with high correlations are usually undesirable and it should be warning signs to you when you do encounter them!
corr = df.corr() # plot the heatmap
sns.heatmap(
corr, annot=True, fmt=".2f", annot_kws={"size": 8}
) # Change the Font size accordingly!
# ## SweetViz EDA
# A Tip here: add the -q to suppress the output
import sweetviz as sv
my_report = sv.analyze(df)
my_report.show_notebook()
# ## DataPrep EDA
from dataprep.eda import plot, plot_correlation, create_report, plot_missing
create_report(df)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
#!/usr/bin/nv python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------
# Filename: fAux.py
# --------------------------------------------------------------------
"""
Funciones auxiliares a svm.py
"""
import numpy as np
def backshift(day, x):
assert day > 0, "Invalid day"
shift = np.zeros((np.shape(x)))
shift[day:] = x[:-day]
shift[shift == 0] = np.nan
return shift
def calculateReturns(prices, lag):
prevPrices = backshift(lag, prices)
rlag = (prices - prevPrices) / prevPrices
return rlag
def fwdshift(day, x):
assert day > 0, "Invalid day"
shift = np.zeros((np.shape(x)))
shift[:-day] = x[day:]
shift[shift == 0] = np.nan
return shift
def calculateMaxDD(cumret):
highwatermark = np.zeros(len(cumret))
drawdown = np.zeros(len(cumret))
drawdownduration = np.zeros(len(cumret))
for t in range(1, len(cumret)):
highwatermark[t] = np.max([highwatermark[t - 1], cumret[t]])
drawdown[t] = (1 + cumret[t]) / (1 + highwatermark[t]) - 1
if drawdown[t] == 0:
drawdownduration[t] = 0
else:
drawdownduration[t] = drawdownduration[t - 1] + 1
return np.min(drawdown), np.max(drawdownduration)
def main():
pass
if __name__ == "__main__":
main()
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 17 13:14:48 2021
@author: loren
"""
# the inputs can be considered to be scaled already, but we are going to scale them to show you
# you the scaling is done in a pipeline
import warnings
warnings.simplefilter("ignore")
import scipy.io
import numpy as np
# import fAux
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
from sklearn.impute import SimpleImputer
import pandas as pd
sns.set()
xdf = pd.read_csv("/kaggle/input/xdataframe/xdf.csv")
ydf = pd.read_csv("/kaggle/input/ydataframe/ydf.csv")
xdf.drop(xdf.columns[0], axis=1, inplace=True)
ydf.drop(ydf.columns[0], axis=1, inplace=True)
x = xdf.values
y = ydf.values[:, 0].tolist()
tdaydf = pd.read_csv("/kaggle/input/tdaydataframe/tdaydf.csv", names=["0", "date"])[1:]
tdaydf.drop(tdaydf.columns[0], axis=1, inplace=True)
tdaydf["date"] = pd.to_datetime(tdaydf.date)
tday = tdaydf.date
# Build model on training data
train_set = round(len(tday) / 2)
np.random.seed(1)
##########################################################################################################################
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
import pandas as pd
from sklearn.metrics import accuracy_score
pipeline = Pipeline([("scaler", StandardScaler()), ("svc", SVC())])
scale = 1 / (np.std(x[:train_set, :]) * x.shape[1])
# #testing all parameters (do not run this, it takes too long)
# param_grid = [{'svc__kernel': ['rbf'],
# 'svc__C': [0.001, 0.01, 0.1, 1, 10, 100],
# 'svc__gamma': [0.001, 0.01, 0.1, 1, 10, 100, scale],
# 'svc__probability': [1, 0]},
# {'svc__kernel': ['linear'],
# 'svc__C': [0.001, 0.01, 0.1, 1, 10, 100]},
# {'svc__kernel': ['poly'],
# 'svc__C': [0.001, 0.01, 0.1, 1, 10, 100],
# 'svc__gamma': [0.001, 0.01, 0.1, 1, 10, 100, scale],
# 'svc__probability': [1, 0],
# 'svc__shrinking': [1,0],
# 'svc__degree': [0, 1, 2, 3, 4, 5, 6]}]
# testing the default values of the parameters only
param_grid = [
{
"svc__kernel": ["rbf"],
"svc__C": [20],
"svc__gamma": [scale],
"svc__probability": [1],
}
]
# param_grid = [{'svc__kernel': ['poly'],
# 'svc__C': [1],
# 'svc__gamma': [scale],
# 'svc__degree': [3]}]
rso = GridSearchCV(pipeline, param_grid, cv=2, return_train_score=True, scoring=None)
rso = RandomizedSearchCV(
pipeline, param_grid, cv=2, return_train_score=True, scoring=None
)
rso.fit(x[:train_set, :], y[:train_set])
best_parameters = rso.best_params_
print("Best parameters with scaling grid: {}".format(best_parameters))
print(
"Best cross-validation score with scaling grid: {:.2f}".format(
rso.best_score_ * 100
)
)
results = pd.DataFrame(rso.cv_results_)
# print(results.T)
results.to_csv("results_svc.csv")
best_model = rso.best_estimator_
test_score = accuracy_score(y[train_set:], rso.predict(x[train_set:, :]))
#########################################################################################################################
# Select best svc based on default criteria with n-fold
# cross validation
# Make "predictions" on training set (in-sample)
isRetPositiveOrZero = np.where(best_model.predict(x[:train_set, :]) > 0, 1, 0)
positions = isRetPositiveOrZero + (isRetPositiveOrZero - 1)
dailyRet = backshift(1, positions) * x[:train_set, 0] # x[:train_set,0] = ret1
dailyRet = np.nan_to_num(dailyRet)
cumret = np.cumprod(dailyRet + 1) - 1
plt.figure(1)
plt.plot(tday[:train_set], cumret)
plt.title("Cross-validated svc on SPY: train set")
plt.ylabel("Cumulative Returns")
plt.xlabel("Date")
plt.show()
cagr = (1 + cumret[-1]) ** (252 / len(cumret)) - 1
maxDD, maxDDD = calculateMaxDD(cumret)
ratio = (252.0 ** (1.0 / 2.0)) * np.mean(dailyRet) / np.std(dailyRet)
print(
(
"In-sample: CAGR={:0.6} Sharpe ratio={:0.6} maxDD={:0.6} maxDDD={:d} Calmar ratio={:0.6}\n"
).format(cagr, ratio, maxDD, maxDDD.astype(int), -cagr / maxDD)
)
# Test set
# Make "predictions" on test set (out-of-sample)
isRetPositiveOrZero = np.where(best_model[1].predict(x[train_set:, :]) > 0, 1, 0)
positions = isRetPositiveOrZero + (isRetPositiveOrZero - 1)
dailyRet = backshift(1, positions) * x[train_set:, 0] # x[:train_set,0] = ret1
dailyRet = np.nan_to_num(dailyRet)
cumret = np.cumprod(dailyRet + 1) - 1
plt.figure(2)
plt.xticks(rotation=70)
plt.plot(tday[train_set:], cumret)
plt.title("Cross-validated svc on SPY: test set")
plt.ylabel("Cumulative Returns")
plt.xlabel("Date")
plt.show()
cagr = (1 + cumret[-1]) ** (252 / len(cumret)) - 1
maxDD, maxDDD = calculateMaxDD(cumret)
ratio = (252.0 ** (1.0 / 2.0)) * np.mean(dailyRet) / np.std(dailyRet)
print(
(
"Out-of-sample: CAGR={:0.6} Sharpe ratio={:0.6} maxDD={:0.6} maxDDD={:d} Calmar ratio={:0.6}\n"
).format(cagr, ratio, maxDD, maxDDD.astype(int), -cagr / maxDD)
)
print("Best rso score: ", rso.best_score_)
|
# ## TR PDNN 2023
# diadaptasi dari: https://github.com/img88/ALPR_IndonesiaPlateNumber_ComputerVision
# ! pip install imutils -q
import os
import matplotlib.pyplot as plt
import numpy as np
import cv2
# import imutils
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.metrics import f1_score
from tensorflow.keras import optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense, Flatten, MaxPooling2D, Dropout, Conv2D
from skimage import io
import urllib.request
# Testing the above function
def display(img_, title=""):
img = cv2.cvtColor(img_, cv2.COLOR_BGR2RGB)
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot(111)
ax.imshow(img)
plt.axis("off")
plt.title(title)
plt.show()
# req = urllib.request.urlopen('https://awsimages.detik.net.id/customthumb/2011/05/09/648/plat-nomor-dalam.jpg')
req = urllib.request.urlopen(
"https://raw.githubusercontent.com/img88/ALPR_IndonesiaPlateNumber_ComputerVision/main/test%20images/AB2638XU.jpg"
)
# req = urllib.request.urlopen('https://raw.githubusercontent.com/img88/ALPR_IndonesiaPlateNumber_ComputerVision/main/test%20images/AB5592EG.jpg')
# req = urllib.request.urlopen('https://raw.githubusercontent.com/img88/ALPR_IndonesiaPlateNumber_ComputerVision/main/test%20images/AD2914JG.jpg')
arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
img = cv2.imdecode(arr, -1) # 'Load it as it is'
# img = cv2.imread('../input/ai-indian-license-plate-recognition-data/car.jpg')
# img = cv2.imread('/kaggle/input/haarcascadeplatenumber/H6240AKC.jpg')
display(img, "input image")
# # program untuk mendeteksi posisi plat nomor dari motor
#
img = cv2.imread("/kaggle/input/haarcascadeplatenumber/IMG20230410113718.jpg")
# grayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# hisimg = cv2.adaptiveThreshold(grayimg, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 125, 1)
# blured = cv2.GaussianBlur(hisimg, (29,29), 0)
# edged = cv2.Canny(blured, 10, 180)
# kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,2))
# dilate = cv2.dilate(edged, kernel, iterations=1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# thresh_inv = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
thresh_inv = cv2.adaptiveThreshold(
gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 125, 1
)
blur = cv2.GaussianBlur(thresh_inv, (63, 63), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
edged = cv2.Canny(thresh, 10, 180)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
dilate = cv2.dilate(thresh, kernel, iterations=1)
contours, _ = cv2.findContours(dilate, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
image_copy = img.copy()
i = 0
for c in contours:
x, y, w, h = cv2.boundingRect(c)
if h > 450 and w > 1500 and w / h < 2.3:
print(f"aspect: {w/h}, ")
cv2.drawContours(image_copy, contours, i, (0, 0, 255), 4)
i += 1
# plt.imshow(thresh)
display(image_copy)
indeximg = 0
filepath = []
for root, dirname, filename in os.walk("/kaggle/input/haarcascadeplatenumber"):
for file in filename:
filepath.append(os.path.join(root, file))
img = cv2.imread(filepath[indeximg])
grayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
hisimg = cv2.adaptiveThreshold(
grayimg, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 125, 1
)
blured = cv2.GaussianBlur(hisimg, (21, 21), 0)
edged = cv2.Canny(blured, 10, 180)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
dilate = cv2.dilate(edged, kernel, iterations=1)
contours, _ = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
image_copy = img.copy()
cv2.drawContours(image_copy, contours, -1, (0, 0, 255), 2)
platno = []
index = 0
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
aspect_ratio = w / h
if w > 560 and h > 350 and h < 1100 and aspect_ratio > 1.6 and aspect_ratio <= 2.6:
print(f"aspect ratio from index: {index}, {aspect_ratio}, w: {w}, h: {h}")
platno.append(index)
index += 1
print(f"detected plat no: {len(platno)}, index: {indeximg}")
# plt.imshow(img)
display(img)
# display(dilate)
# display(image_copy)
if len(platno) > 0:
indexplatno = platno[0]
detectedimg = img.copy()
x, y, w, h = cv2.boundingRect(contours[indexplatno])
croppedimg = img[y : y + h, x : x + w]
display(croppedimg, "cropped")
# increment indeximg
indeximg += 1
# img = cv2.imread('/kaggle/input/haarcascadeplatenumber/H6240AKC2.jpg')
# img = cv2.imread('/kaggle/input/haarcascadeplatenumber/IMG20230410113606.jpg')
# img = cv2.imread('/kaggle/input/haarcascadeplatenumber/IMG20230410113827.jpg')
# img = cv2.imread('/kaggle/input/haarcascadeplatenumber/IMG20230410113833.jpg')
# img = cv2.imread('/kaggle/input/haarcascadeplatenumber/IMG20230410113613.jpg')
# img = cv2.imread('/kaggle/input/haarcascadeplatenumber/IMG20230410084106.jpg')
# img = cv2.imread('/kaggle/input/haarcascadeplatenumber/IMG20230410084035.jpg')
# req = urllib.request.urlopen('https://t-2.tstatic.net/gorontalo/foto/bank/images/12102022_plat-nomor-putih.jpg')
# req = urllib.request.urlopen('https://image.cermati.com/q_70,w_1200,h_800,c_fit/cgbt1mggrzxkk5p5xtot')
# arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
# img = cv2.imdecode(arr, -1) # 'Load it as it is'
grayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
hisimg = cv2.adaptiveThreshold(
grayimg, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 125, 1
)
blured = cv2.GaussianBlur(hisimg, (23, 23), 0)
edged = cv2.Canny(blured, 10, 180)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
dilate = cv2.dilate(edged, kernel, iterations=1)
contours, _ = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
image_copy = img.copy()
cv2.drawContours(image_copy, contours, -1, (0, 0, 255), 2)
image_copy = img.copy()
cv2.drawContours(image_copy, contours, -1, (0, 0, 255), 2)
platno = []
index = 0
sx = sy = sw = sh = []
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
aspect_ratio = w / h
if (
w > 500
and h > 350
and h < 1100
and aspect_ratio <= 3
and aspect_ratio > 1.6
and aspect_ratio <= 2.6
):
print(f"aspect ratio from index: {index}, {aspect_ratio}, w: {w}, h: {h}")
platno.append(index)
index += 1
print(f"detected plat no: {len(platno)}")
if len(platno) <= 2:
indexplatno = platno[0]
detectedimg = img.copy()
x, y, w, h = cv2.boundingRect(contours[indexplatno])
# cv2.drawContours(detectedimg, contours, indexplatno, (0, 0, 255), 5)
croppedimg = img[y : y + h, x : x + w]
display(croppedimg, "cropped")
for p in platno:
x, y, w, h = cv2.boundingRect(contours[indexplatno])
cv2.rectangle(detectedimg, (x, y), (x + w, y + h), (0, 0, 255), 3)
print(indexplatno)
plt.imshow(img)
# display(hisimg, "contour")
# display(thresh, "edged")
# display(dilate, "position")
display(detectedimg, "position")
# display(image_copy, "dilated")
# display(croppedimg, "cropped")
# # program untuk mendeteksi digit dari plat nomor motor
# img = cv2.imread('/kaggle/input/haarcascadeplatenumber/H6240AKC.jpg')
# img = cv2.imread('/kaggle/input/haarcascadeplatenumber/H6240AKC2.jpg')
# img = cv2.imread('/kaggle/input/haarcascadeplatenumber/H6240AKC1.jpg')
# img = cv2.imread('/kaggle/input/haarcascadeplatenumber/H6498SI.jpg')
# req = urllib.request.urlopen('https://upload.wikimedia.org/wikipedia/commons/a/a5/Plat_Nomor_Nganjuk_%283_Huruf%29.jpg')
# arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
# img = cv2.imdecode(arr, -1)
"""
grayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
hisimg = cv2.adaptiveThreshold(grayimg, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 125, 1)
blured = cv2.GaussianBlur(hisimg, (29,29), 0)
edged = cv2.Canny(blured, 10, 180)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,2))
dilate = cv2.dilate(edged, kernel, iterations=1)
"""
grayimg = cv2.cvtColor(croppedimg, cv2.COLOR_BGR2GRAY)
hisimg = cv2.adaptiveThreshold(
grayimg, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 125, 1
)
blured = cv2.GaussianBlur(hisimg, (29, 29), 0)
edged = cv2.Canny(blured, 10, 120)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
dilate = cv2.dilate(edged, kernel, iterations=1)
contours, _ = cv2.findContours(dilate, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
image_copy = croppedimg.copy()
imgrec = croppedimg.copy()
platno = []
index = 0
realPlatno = 0
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
aspect_ratio = w / h
luas = w * h
if w > 120 and h >= 150 and h < 410 and aspect_ratio <= 0.7:
platno.append(index)
cv2.drawContours(image_copy, contours, index, (0, 255, 0), 2)
print(f"x: {x}, y: {y}, w: {w}, h: {h}")
realPlatno += 1
index += 1
print(f"detected candidate plat no: {len(platno)}")
urutx = []
for char in platno:
x, y, w, h = cv2.boundingRect(contours[char])
urutx.append(x)
urutx = sorted(urutx)
## urutkan karakter plat nomor dari kiri ke kanan
cropCharUrut = []
for ux in urutx:
for char in platno:
x, y, w, h = cv2.boundingRect(contours[char])
if x == ux:
cropCharUrut.append(char)
break
o = 0
for c in cropCharUrut:
x, y, w, h = cv2.boundingRect(contours[c])
o = 0
for i in cropCharUrut:
if c == i:
continue
x2, y2, w2, h2 = cv2.boundingRect(contours[i])
if abs(x - x2) < 10:
cropCharUrut.pop(o)
o += 1
print(cropCharUrut)
for c in cropCharUrut:
x, y, w, h = cv2.boundingRect(contours[c])
cv2.rectangle(imgrec, (x, y), (x + w, y + h), (0, 0, 255), 2)
display(imgrec, "box plat no")
# display(hisimg, "detected plat no")
# display(blured, "detected plat no")
# display(image_copy, "contour")
# display(edged, "edged")
# display(dilate, "dilated")
# # **Selanjutnya buat program untuk ekstract setiap digit dari plat nomor dan melakukan klasifikasi karakter yang didapatkan**
from datetime import datetime
import uuid
# tampilkan karakter yang telah terpotong
imgs = []
savepath = "/kaggle/working/"
imgHeight = 40
imgWidth = 40
for char in cropCharUrut:
filename = (
str(datetime.now().strftime("%d_%m_%Y-%H_%M_%S")) + str(uuid.uuid4()) + ".jpg"
)
filename = os.path.join(savepath, filename)
print(filename)
x, y, w, h = cv2.boundingRect(contours[char])
crop = grayimg[y : y + h, x : x + w]
ret, thresh = cv2.threshold(crop, 127, 255, cv2.THRESH_BINARY)
char_crop = cv2.resize(thresh, (imgWidth, imgHeight))
imgs.append(char_crop)
# cv2.imwrite(filename, crop)
display(char_crop)
# buat 1 plot dan tampilkan
# _, axs = plt.subplots(1, len(cropCharUrut), figsize=(12, 12))
# axs = axs.flatten()
# for img, ax in zip(imgs, axs):
# ax.imshow(img)
# plt.figtext(.5,.65,'Data Plat yang didapatkan',fontsize=30,ha='center')
# plt.show()
# # Buat Model Untuk Klasifikasi
# ## **Nek meh ubah" / edit, nggawe Cell dewe, trus jenengi, ngen ra bingung**
"""
Cell nggone AH... Ojo diubah ubah
Buat Model dari tutorial:
https://www.tensorflow.org/tutorials/images/classification
"""
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
path = "/kaggle/input/dataset-characters-indo-plate/dataset_characters"
batchSize = 32
imgHeight = 40
imgWidth = 40
trainDs = tf.keras.preprocessing.image_dataset_from_directory(
path,
validation_split=0.2,
subset="training",
seed=123,
image_size=(imgHeight, imgWidth),
batch_size=batchSize,
)
valDs = tf.keras.preprocessing.image_dataset_from_directory(
path,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(imgHeight, imgWidth),
batch_size=batchSize,
)
className = trainDs.class_names
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = trainDs.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = valDs.cache().prefetch(buffer_size=AUTOTUNE)
numClass = len(className)
# buat model dengan tensorflow sequential
model = Sequential(
[
layers.experimental.preprocessing.Rescaling(
1.0 / 255, input_shape=(imgHeight, imgWidth, 3)
),
layers.Conv2D(16, 3, padding="same", activation="relu"),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding="same", activation="relu"),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding="same", activation="relu"),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation="relu"),
layers.Dense(numClass),
]
)
model.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
model.summary()
epochs = 10
history = model.fit(trainDs, validation_data=valDs, epochs=epochs)
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label="Training Accuracy")
plt.plot(epochs_range, val_acc, label="Validation Accuracy")
plt.legend(loc="lower right")
plt.title("Training and Validation Accuracy")
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label="Training Loss")
plt.plot(epochs_range, val_loss, label="Validation Loss")
plt.legend(loc="upper right")
plt.title("Training and Validation Loss")
plt.show()
model.save("my_model") # Save trained model
model = keras.models.load_model("/kaggle/working/my_model")
# display(imgs[index])
class_names = [
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
]
predictedPlat = []
for index in range(0, len(imgs)):
image = cv2.adaptiveThreshold(
np.array(imgs[index]),
255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY,
125,
1,
)
char_crop = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
# resize citra karakternya
char_crop = cv2.resize(char_crop, (imgWidth, imgHeight))
# display(char_crop)
# preprocessing citra ke numpy array
img_array = keras.preprocessing.image.img_to_array(char_crop)
# agar shape menjadi [1, h, w, channels]
img_array = tf.expand_dims(img_array, 0)
pred = model.predict(img_array)
score = tf.nn.softmax(pred[0])
# print(class_names[np.argmax(score)])
# ax.imshow(image)
predictedPlat.append(class_names[np.argmax(score)])
_, axs = plt.subplots(1, len(cropCharUrut), figsize=(12, 12))
axs = axs.flatten()
for img, ax in zip(imgs, axs):
ax.imshow(img)
plt.figtext(0.5, 0.65, predictedPlat, fontsize=30, ha="center")
plt.show()
for dirpath, dirname, filename in os.walk("/kaggle/input/haarcascadeplatenumber/"):
for file in filename:
# print(file)
print(os.sep.join([dirpath, file]))
|
# # CNN
# import models
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from keras.models import Sequential
from keras.callbacks import EarlyStopping
from keras.layers.convolutional import Conv1D, MaxPooling1D
from keras.layers import Dense, LSTM, RepeatVector, Flatten, TimeDistributed, Dropout
color_pal = sns.color_palette()
plt.style.use("fivethirtyeight")
import os
# # import and Clean Data
# Read & fill empty Date
df = pd.read_csv("/kaggle/input/2015-2018/energy_load.csv")
df.fillna(method="ffill", inplace=True)
print(df.isnull().sum())
df = df.set_index("time")
df.index = pd.to_datetime(df.index, utc=True).tz_localize(
None
) # convert string to datetime type
df.index = df.index + pd.Timedelta(hours=3) # convert to iraq datetime
df
# # Plotting
# df.plot(style='.',
# figsize=(15, 5),
# color=color_pal[0],
# title='Load Power')
# plt.show()
# train = df.loc[df.index < '3-1-2018']
# test = df.loc[df.index >= '3-1-2018']
# fig, ax = plt.subplots(figsize=(15, 5))
# train.plot(ax=ax, label='Training Set', title='Data Train/Test Split')
# test.plot(ax=ax, label='Test Set')
# ax.axvline('3-1-2018', color='black', ls='--')
# ax.legend(['Training Set', 'Test Set'])
# plt.show()
# df.loc[(df.index >= '01-01-2015') & (df.index < '01-08-2015')] \
# .plot(figsize=(15, 5), title='Week Of Data')
# plt.show()
# # Feautre Creation
def create_features(df):
"""
Create time series features based on time series index.
"""
df = df.copy()
df["hour"] = df.index.hour
df["dayofweek"] = df.index.dayofweek
df["quarter"] = df.index.quarter
df["month"] = df.index.month
df["year"] = df.index.year
df["dayofyear"] = df.index.dayofyear
df["dayofmonth"] = df.index.day
df["weekofyear"] = df.index.isocalendar().week
return df
df = create_features(df)
df.head()
# # Visual Data
# fig, ax = plt.subplots(figsize=(12,3))
# sns.boxplot(data=df, x='hour', y='load')
# ax.set_title('MW by Hour')
# plt.show()
# fig, ax = plt.subplots(figsize=(12,3))
# sns.boxplot(data=df, x='month', y='load', palette='Blues')
# ax.set_title('MW by Month')
# plt.show()
# # split data to train and test
train_df = df.loc[df.index < "2-01-2018"]
test_df = df.loc[df.index >= "2-01-2018"]
fig, ax = plt.subplots(figsize=(15, 5))
train_df.plot(ax=ax, label="Training Set", title="Data Train/Test Split")
test_df.plot(ax=ax, label="Test Set")
ax.axvline("2-01-2018", color="black", ls="--")
ax.legend(["Training Set", "Test Set"])
plt.show()
# # split train test
FEATURES = ["dayofyear", "hour", "dayofweek", "quarter", "month", "year"]
LABELS = ["load"]
train = create_features(train_df)
test = create_features(test_df)
train_x = train[FEATURES].values.reshape(
train[FEATURES].shape[0], 1, train[FEATURES].shape[1]
)
train_x = train[FEATURES].values.reshape(
train[FEATURES].shape[0], 1, train[FEATURES].shape[1]
)
test_x = test[FEATURES].values.reshape(
test[FEATURES].shape[0], 1, test[FEATURES].shape[1]
)
train_y = train[LABELS].values
test_y = test[LABELS].values
# # Perform Models Algorithms
model = Sequential()
model.add(
Conv1D(filters=64, kernel_size=1, activation="relu", input_shape=(1, len(FEATURES)))
)
model.add(MaxPooling1D(pool_size=1))
model.add(Flatten())
model.add(Dense(128, name="LAYER2"))
model.add(Dense(64, name="LAYER3"))
model.add(Dense(100, name="LAYER4"))
model.add(Dense(1, name="OUTPUT_LAYER"))
model.add(RepeatVector(train_x.shape[1]))
model.add(LSTM(12, activation="relu", return_sequences=True))
model.add(Dense(128, name="LAYER5"))
model.add(Dense(128, name="LAYER6"))
model.add(Dropout(0.2))
model.add(TimeDistributed(Dense(32)))
model.add(TimeDistributed(Dense(1)))
model.compile(loss="MAPE", optimizer="adam")
early_stopping_monitor = EarlyStopping(patience=3)
print(train_x.shape)
history = model.fit(
train_x,
train_y,
epochs=12,
batch_size=32,
validation_data=(test_x, test_y),
callbacks=early_stopping_monitor,
)
# model.save('load_data.h5')
# model2=load_model('load_data.h5')
# # Analyse Outputs
plt.plot(history.history["loss"], "b", label="% Training Loss")
plt.plot(history.history["val_loss"], "r", label="% Validation Loss")
plt.title("Training and Validation Loss Precentage")
plt.xlabel("epoch")
plt.ylabel("Loss value")
plt.grid()
plt.legend()
plt.show()
|
import pandas as pd
r_file_path = "../input/drug-data/drug200.csv"
r_data = pd.read_csv(r_file_path)
r_data.columns
import pandas as pd
r_file_path = "../input/drug-data/drug200.csv"
r_data = pd.read_csv(r_file_path)
r_labels = ["Drug"]
r_features = ["Age", "Sex", "BP", "Cholesterol", "Na_to_K"]
y = r_data[r_labels]
X = r_data[r_features]
X = pd.get_dummies(X, ["Age", "BP", "Cholesterol"])
y = pd.get_dummies(y, ["Drug"])
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
train_X, validation_X, train_y, validation_y = train_test_split(
X, y, test_size=0.2, random_state=0
)
for max_leaf_nodes in [2, 3, 4, 5]:
r_model = DecisionTreeClassifier(max_leaf_nodes=max_leaf_nodes, random_state=0)
r_model.fit(train_X, train_y)
predictions = r_model.predict(validation_X)
print(mean_absolute_error(validation_y, predictions))
# Of the options listed, 4 is the optimal number of leaves
import pandas as pd
r_file_path = "../input/drug-data/drug200.csv"
r_data = pd.read_csv(r_file_path)
r_labels = ["Drug"]
r_features = ["Age", "Sex", "BP", "Cholesterol", "Na_to_K"]
y = r_data[r_labels]
X = r_data[r_features]
X = pd.get_dummies(X, ["Sex", "BP", "Cholesterol"])
y = pd.get_dummies(y, ["Drug"])
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
r_best_model = DecisionTreeClassifier(max_leaf_nodes=4, random_state=0)
import numpy as np
X_1, X_2, X_3, X_4, X_5 = np.split(X, 5, axis=0)
y_1, y_2, y_3, y_4, y_5 = np.split(y, 5, axis=0)
X_train_1 = pd.concat([X_2, X_3, X_4, X_5])
y_train_1 = pd.concat([y_2, y_3, y_4, y_5])
r_best_model.fit(X_train_1, y_train_1)
y_predictions_1 = r_best_model.predict(X_1)
error_1 = mean_absolute_error(y_1, y_predictions_1)
X_train_2 = pd.concat([X_1, X_3, X_4, X_5])
y_train_2 = pd.concat([y_1, y_3, y_4, y_5])
r_best_model.fit(X_train_2, y_train_2)
y_predictions_2 = r_best_model.predict(X_2)
error_2 = mean_absolute_error(y_2, y_predictions_2)
X_train_3 = pd.concat([X_1, X_2, X_4, X_5])
y_train_3 = pd.concat([y_1, y_2, y_4, y_5])
r_best_model.fit(X_train_3, y_train_3)
y_predictions_3 = r_best_model.predict(X_3)
error_3 = mean_absolute_error(y_3, y_predictions_3)
X_train_4 = pd.concat([X_1, X_2, X_3, X_5])
y_train_4 = pd.concat([y_1, y_2, y_3, y_5])
r_best_model.fit(X_train_4, y_train_4)
y_predictions_4 = r_best_model.predict(X_4)
error_4 = mean_absolute_error(y_4, y_predictions_4)
X_train_5 = pd.concat([X_1, X_2, X_3, X_4])
y_train_5 = pd.concat([y_1, y_2, y_3, y_4])
r_best_model.fit(X_train_5, y_train_5)
y_predictions_5 = r_best_model.predict(X_5)
error_5 = mean_absolute_error(y_5, y_predictions_5)
average = (error_1 + error_2 + error_3 + error_4 + error_5) / 5
print("average error is", average)
from sklearn.metrics import roc_curve, auc
y_prob_1 = r_best_model.predict_proba(X_1)[:, 1]
fpr, tpr, thre = roc_curve(y_test_1, y_prob_1)
import matplotlib.pyplot as plt
plt.title("Receiver Operating Characteristic")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.plot(fpr, tpr)
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df_eth = pd.read_csv(
"/kaggle/input/bitcoin-and-ethereum-prices-from-start-to-2023/Ethereum prices.csv"
)
df_bit = pd.read_csv(
"/kaggle/input/bitcoin-and-ethereum-prices-from-start-to-2023/Bitcoin prices.csv"
)
df_bit.head()
import seaborn as sns
from matplotlib import pyplot as plt
sns.lineplot(df_bit["Close"])
sns.lineplot(df_eth["Close"])
plt.title("Closing prices of Bitcoin Vs Ethereum")
plt.show()
sns.lineplot(df_bit["Low"], color="red")
sns.lineplot(df_bit["High"], color="green")
plt.title("Bitcoin High Vs Low")
plt.show()
sns.lineplot(df_eth["Low"], color="red")
sns.lineplot(df_eth["High"], color="green")
plt.title("Ethereum High Vs Low")
plt.show()
sns.lineplot(df_eth["Open"])
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
fig = go.Figure(
data=[
go.Candlestick(
x=df_eth["Date"],
open=df_eth["Open"],
high=df_eth["High"],
low=df_eth["Low"],
close=df_eth["Close"],
)
]
)
fig.show()
fig = go.Figure([go.Scatter(x=df_bit["Date"], y=df_bit["High"])])
fig.show()
fig = px.bar(df_bit, x=df_bit.Date, y="High")
fig.show()
df_eth["Date"] = pd.to_datetime(df_eth["Date"])
# Select only the rows with year 2019
df_2019 = df_eth[df_eth["Date"].dt.year == 2019]
df_2019.head()
fig = make_subplots(rows=1, cols=1, subplot_titles=("2019 Ethereum price"))
fig.append_trace(go.Scatter(x=df_2019["Date"], y=df_2019["Close"]), row=1, col=1)
fig.update_layout(height=1200, width=1200, title_text="Group Years Close")
fig.show()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
import warnings
warnings.filterwarnings("ignore")
df1 = pd.read_csv("/kaggle/input/happiness-index-2018-2019/2018.csv")
df2 = pd.read_csv("/kaggle/input/happiness-index-2018-2019/2019.csv")
df3 = pd.read_csv("/kaggle/input/happiness-index-2018-2019/report_2018-2019.csv")
df1.head()
df2.head()
df3.head()
print(df1.columns)
print("nn")
print(df2.columns)
print("nn")
print(df3.columns)
print("nn")
print("Number of null or missing values in 2018 report: ", df1.isnull().sum().sum())
print("Number of null or missing values in 2018 report: ", df2.isnull().sum().sum())
print(
"Number of null or missing values in consolidated report 2018-2019: ",
df3.isnull().sum().sum(),
)
# visualization of the dataset containing missing values in heatmap
# yellow = true (null)
plt.figure(figsize=(24, 8))
plt.subplot(1, 3, 1)
sns.heatmap(df1.isnull(), yticklabels=False, cmap="viridis")
plt.subplot(1, 3, 2)
sns.heatmap(df2.isnull(), yticklabels=False, cmap="viridis")
plt.subplot(1, 3, 3)
sns.heatmap(df3.isnull(), yticklabels=False, cmap="viridis")
print("No. of countries ranked in 2018: ", len(df1))
print("No. of countries ranked in 2019: ", len(df2))
plt.figure(figsize=(24, 8))
plt.subplot(1, 3, 1)
sns.barplot(x=df1["Country or region"][0:5], y=df1["Overall rank"][0:5])
plt.savefig("Country_vs_rank.png")
plt.subplot(1, 3, 2)
sns.barplot(x=df2["Country or region"][0:5], y=df2["Overall rank"][0:5])
plt.savefig("Country_vs_rank.png")
print(
"Average happiness score of Finland in 2018 is ",
(df1["Score"].iat[0] + df1["Score"].iat[0]) / 2,
)
print(
"Average happiness score of Finland in 2019 is ",
(df2["Score"].iat[0] + df2["Score"].iat[0]) / 2,
)
# *Remarkably, Finland has retained the highest position in the world happiness reports, with an average happiness score of 7.6 in 2018 and 7.7 in 2019 out of 10*
print(
"Most unphappiest Country in 2018: ",
df1["Country or region"].iat[-1],
"(",
df1["Score"].iat[-1],
")",
)
print(
"Most unphappiest Country in 2018: ",
df2["Country or region"].iat[-1],
"(",
df2["Score"].iat[-1],
")",
)
# *Getting into the details of the unhappiest nations, Burundi has been granted the lowest happiness score in all three years, with a score of 2.9 in 2018*
# Happiness rankings of nations in 2018
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
data = dict(
type="choropleth",
locations=df1["Country or region"],
locationmode="country names",
z=df1["Overall rank"],
text=df1["Country or region"],
colorbar={"title": "Happiness"},
)
layout = dict(title="Global Happiness 2018", geo=dict(showframe=False))
choromap3 = go.Figure(data=[data], layout=layout)
iplot(choromap3)
# Happiness rankings of nations in 2019
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
data = dict(
type="choropleth",
locations=df2["Country or region"],
locationmode="country names",
z=df2["Overall rank"],
text=df2["Country or region"],
colorbar={"title": "Happiness"},
)
layout = dict(title="Global Happiness 2019", geo=dict(showframe=False))
choromap3 = go.Figure(data=[data], layout=layout)
iplot(choromap3)
plt.savefig("global_happiness_2019.png")
# Analysis of relations among the happiness factors
plt.figure(figsize=(24, 8))
plt.subplot(1, 3, 1)
plt.scatter(df1["Score"], df1["GDP per capita"])
plt.scatter(df2["Score"], df2["GDP per capita"])
plt.xlabel("GDP per capita in 2018 and 2019")
plt.ylabel("Happiness score")
plt.legend(["2018", "2019"])
plt.subplot(1, 3, 2)
plt.scatter(df1["Score"], df1["Social support"])
plt.scatter(df2["Score"], df2["Social support"])
plt.xlabel("Social support in 2018 and 2019")
plt.ylabel("Happiness score")
plt.legend(["2018", "2019"])
plt.subplot(1, 3, 3)
plt.scatter(df1["Score"], df1["Healthy life expectancy"])
plt.scatter(df2["Score"], df2["Healthy life expectancy"])
plt.xlabel("Healthy life expectancy in 2018 and 2019")
plt.ylabel("Happiness score")
plt.legend(["2018", "2019"])
plt.figure(figsize=(24, 8))
plt.subplot(1, 3, 1)
plt.scatter(df1["Score"], df1["Freedom to make life choices"])
plt.scatter(df2["Score"], df2["Freedom to make life choices"])
plt.xlabel("Freedom to make life choices in 2018")
plt.ylabel("Happiness score")
plt.legend(["2018", "2019"])
plt.subplot(1, 3, 2)
plt.scatter(df1["Score"], df1["Generosity"])
plt.scatter(df2["Score"], df2["Generosity"])
plt.xlabel("Generosity in 2018")
plt.ylabel("Happiness score")
plt.legend(["2018", "2019"])
plt.subplot(1, 3, 3)
plt.scatter(df1["Score"], df1["Perceptions of corruption"])
plt.scatter(df2["Score"], df2["Perceptions of corruption"])
plt.xlabel("Perceptions of corruption in 2018")
plt.ylabel("Happiness score")
plt.legend(["2018", "2019"])
print(df1.columns)
# linear regression model for year 2018
import pandas as pd
import statsmodels.api as sm
# Define independent and dependent variables
X = df1[
[
"Overall rank",
"GDP per capita",
"Social support",
"Healthy life expectancy",
"Freedom to make life choices",
"Generosity",
"Perceptions of corruption",
]
]
y = df1["Score"]
# Fit linear regression model
X = sm.add_constant(X) # Add intercept term
lin_reg = sm.OLS(y, X).fit()
# Print results
print(lin_reg.summary())
# linear regression model for year 2019
import pandas as pd
import statsmodels.api as sm
# Define independent and dependent variables
X = df2[
[
"Overall rank",
"GDP per capita",
"Social support",
"Healthy life expectancy",
"Freedom to make life choices",
"Generosity",
"Perceptions of corruption",
]
]
y = df2["Score"]
# Fit linear regression model
X = sm.add_constant(X) # Add intercept term
lin_reg = sm.OLS(y, X).fit()
# Print results
print(lin_reg.summary())
# Import necessary libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# 1.1 Happiness vs GDP per capita for 2018
plt.scatter(df1["Score"], df1["GDP per capita"], s=10, c="blue", alpha=0.5)
z = np.polyfit(df1["Score"], df1["GDP per capita"], 1)
p = np.poly1d(z)
plt.plot(df1["Score"], p(df1["Score"]), "r--")
plt.annotate("slope={:.2f}".format(z[0]), xy=(3.5, 1.3), xytext=(3.5, 1.3))
plt.title("Impact on Score with the change of GDP per capita")
plt.xlabel("Score")
plt.ylabel("GDP per capita for 2018")
plt.show()
# 1.2 Happiness vs GDP per capita for 2019
plt.scatter(df2["Score"], df2["GDP per capita"], s=10, c="blue", alpha=0.5)
z = np.polyfit(df2["Score"], df2["GDP per capita"], 1)
p = np.poly1d(z)
plt.plot(df2["Score"], p(df2["Score"]), "r--")
plt.annotate("slope={:.2f}".format(z[0]), xy=(3.3, 1.7), xytext=(3.3, 1.7))
plt.title("Impact on Score with the change of GDP per capita")
plt.xlabel("Score")
plt.ylabel("GDP per capita for 2019")
plt.show()
# 2.1 Happiness vs Freedom to make life choices for 2018
plt.scatter(df1["Score"], df1["Freedom to make life choices"], s=10, c="red", alpha=0.5)
z = np.polyfit(df1["Score"], df1["Freedom to make life choices"], 1)
p = np.poly1d(z)
plt.plot(df1["Score"], p(df1["Score"]), "b--")
plt.annotate("slope={:.2f}".format(z[0]), xy=(3, 0.7), xytext=(3, 0.7))
plt.title("Impact on Score with the change of Freedom to make life choices")
plt.xlabel("Score")
plt.ylabel("Freedom to make life choices")
plt.show()
# 2.2 Happiness vs Freedom to make life choices for 2019
plt.scatter(df2["Score"], df2["Freedom to make life choices"], s=10, c="red", alpha=0.5)
z = np.polyfit(df2["Score"], df2["Freedom to make life choices"], 1)
p = np.poly1d(z)
plt.plot(df2["Score"], p(df2["Score"]), "b--")
plt.annotate("slope={:.2f}".format(z[0]), xy=(3, 0.62), xytext=(3, 0.62))
plt.title("Impact on Score with the change of Freedom to make life choices")
plt.xlabel("Score")
plt.ylabel("Freedom to make life choices")
plt.show()
import plotly.graph_objects as go
# list of variables to plot
dropdown_options1 = [
"GDP per capita",
"Social support",
"Healthy life expectancy",
"Freedom to make life choices",
"Generosity",
"Perceptions of corruption",
]
dropdown_options2 = [
"GDP per capita",
"Social support",
"Healthy life expectancy",
"Freedom to make life choices",
"Generosity",
"Perceptions of corruption",
]
# create scatter plot of Happiness Rank vs. other variables
fig1 = go.Figure()
for var in dropdown_options1:
fig1.add_trace(
go.Scatter(x=df3["Overall rank"], y=df3[var], mode="markers", name=var)
)
fig1.update_layout(
title="Happiness Rank vs. other variables",
xaxis_title="Happiness Rank",
yaxis_title="Variable",
)
# create scatter plot of Happiness Score vs. other variables
fig2 = go.Figure()
for var in dropdown_options2:
fig2.add_trace(go.Scatter(x=df3["Score"], y=df3[var], mode="markers", name=var))
fig2.update_layout(
title="Happiness Score vs. other variables",
xaxis_title="Happiness Score",
yaxis_title="Variable",
)
# add dropdown menu to select variables to plot
dropdown_options1.insert(0, "All Variables")
dropdown_options2.insert(0, "All Variables")
# create dropdown menu for Happiness Rank plot
dropdown1 = dict(
label="All variables",
method="update",
args=[
{
"visible": [True] * len(dropdown_options1[1:]),
"y": [df3[var] for var in dropdown_options1[1:]],
"name": [var for var in dropdown_options1[1:]],
},
{"title": "Happiness Rank vs. All Variables"},
],
)
# create dropdown menu for Happiness Score plot
dropdown2 = dict(
label="All Variables",
method="update",
args=[
{
"visible": [True] * len(dropdown_options2[1:]),
"y": [df3[var] for var in dropdown_options2[1:]],
"name": [var for var in dropdown_options2[1:]],
},
{"title": "Happiness Score vs. All Variables"},
],
)
# add dropdown menus to the plots
fig1.update_layout(
updatemenus=[
go.layout.Updatemenu(
buttons=[dropdown1]
+ [
dict(
label=var,
method="update",
args=[
{
"visible": [
var == dropdown_options1[i]
for i in range(len(dropdown_options1))
]
},
{"title": "Happiness Rank vs. " + var},
],
)
for var in dropdown_options1[1:]
]
)
]
)
fig2.update_layout(
updatemenus=[
go.layout.Updatemenu(
buttons=[dropdown2]
+ [
dict(
label=var,
method="update",
args=[
{
"visible": [
var == dropdown_options2[i]
for i in range(len(dropdown_options2))
]
},
{"title": "Happiness Score vs. " + var},
],
)
for var in dropdown_options2[1:]
]
)
]
)
# show the plots
fig1.show()
fig2.show()
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
import seaborn as sns
def country_comp1(country1: str, country2: str, cols1: list) -> tuple:
"""
Creates a comparison plot between two countries using a selected set of columns from the dataframe df1.
Parameters:
country1 (str): name of the first country to be compared
country2 (str): name of the second country to be compared
cols1 (list): list of column names to be used in the comparison plot
Returns:
tuple: a tuple of two axes objects containing the comparison plot for each country
"""
df4 = df1[df1["Country or region"].isin([country1, country2])]
df4 = df4[cols1].T
df4.columns = [country1, country2]
print(df4)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15, 10), gridspec_kw={"wspace": 0})
fig.suptitle(
f"Comparison {country1} and {country2}", family="Serif", weight="bold", size=20
)
sns.barplot(data=df4, y=df4.index, x=country1, color="#ffa600", ax=ax1)
sns.barplot(data=df4, y=df4.index, x=country2, color="#003f5c", ax=ax2)
ax1.invert_xaxis()
for ax in [ax1, ax2]:
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax2.yaxis.tick_right()
return ax1, ax2
# example usage
cols_to_use = [
"GDP per capita",
"Social support",
"Healthy life expectancy",
"Freedom to make life choices",
"Generosity",
"Perceptions of corruption",
]
ax1, ax2 = country_comp1("Finland", "India", cols_to_use)
plt.show()
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
import seaborn as sns
def country_comp2(country1: str, country2: str, cols1: list) -> tuple:
"""
Creates a comparison plot between two countries using a selected set of columns from the dataframe df1.
Parameters:
country1 (str): name of the first country to be compared
country2 (str): name of the second country to be compared
cols1 (list): list of column names to be used in the comparison plot
Returns:
tuple: a tuple of two axes objects containing the comparison plot for each country
"""
df5 = df2[df2["Country or region"].isin([country1, country2])]
df5 = df5[cols1].T
df5.columns = [country1, country2]
print(df5)
fig, (ax3, ax4) = plt.subplots(ncols=2, figsize=(15, 10), gridspec_kw={"wspace": 0})
fig.suptitle(
f"Comparison {country1} and {country2}", family="Serif", weight="bold", size=20
)
sns.barplot(data=df5, y=df5.index, x=country1, color="#ffa600", ax=ax3)
sns.barplot(data=df5, y=df5.index, x=country2, color="#003f5c", ax=ax4)
ax3.invert_xaxis()
for ax in [ax3, ax4]:
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax4.yaxis.tick_right()
return ax3, ax4
# example usage
cols_to_use = [
"GDP per capita",
"Social support",
"Healthy life expectancy",
"Freedom to make life choices",
"Generosity",
"Perceptions of corruption",
]
ax3, ax4 = country_comp2("Finland", "India", cols_to_use)
plt.show()
df3.head()
print(df3.columns)
import matplotlib.pyplot as plt
import seaborn as sns
def comp_country(country1, country2, df3):
cols = df3[
[
"Year",
"Score",
"GDP per capita",
"Social support",
"Healthy life expectancy",
"Freedom to make life choices",
"Generosity",
"Perceptions of corruption",
]
]
df3 = df3[df3["Country or region"].isin([country1, country2])]
df3["Year"] = df3["Year"].astype(int) # Convert Year column to integer
fig = plt.figure(figsize=(15, 10))
plt.suptitle(
"Comparing the Features that contribute for Happiness index for {} vs {}".format(
country1, country2
),
family="Serif",
weight="bold",
size=20,
)
j = 0
color = sns.color_palette("husl", 2)
for i in cols.columns[1:]:
ax = plt.subplot(421 + j)
try:
sns.lineplot(
data=df3, x="Year", y=i, hue="Country or region", color=color[-2]
)
ax.set_xticks(
df3["Year"].unique()
) # Set x-axis ticks to unique integer values in Year column
j += 1
except Exception as e:
print(f"Error occurred while plotting {i}: {str(e)}")
plt.show()
return
comp_country("India", "Finland", df3)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import datetime
import pandas_profiling
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import f_classif, mutual_info_classif
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc, roc_auc_score, roc_curve
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import cross_validate
from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS
from math import log as log
import os
# # **Смотрим на данные**
path = "/kaggle/input/sf-dst-scoring/"
train = pd.read_csv(path + "/train.csv")
test = pd.read_csv(path + "test.csv")
sample_sub = pd.read_csv(path + "/sample_submission.csv")
train.info()
print("Размер обучающей выборки", train.shape)
train.head(5)
test.info()
print("Размер тестовой выборки", train.shape)
test.head(5)
sample_sub.info()
print(sample_sub.shape)
sample_sub.head(5)
# Посмотрим на рспределение признака default в обучающей выбоке
sns.countplot(train["default"])
# Видим, что распределение неравномерное. Клиентов, которым выдают кредит,намного больше.
# ### Неудобно работать поочередно с каждым датасетом, поэтому объединим данные в один
# Чтобы различать обущающую и тестовые выборки, создадим столбец со значениями 1 и 0
train["sample"] = 1 # обучающая выборка
test["sample"] = 0 # тестовая выборка
test[
"default"
] = (
-1
) # т.к. изначально в тестовых данных у нас нет переменной default, создадим столбец со значениями -1
data = test.append(train, sort=False).reset_index(drop=True)
data.info()
data.shape
# ## *Описание данных:*
# client_id - идентификатор клиента
# education - уровень образования
# sex - пол заемщика
# age - возраст заемщика
# car - флаг наличия автомобиля
# car_type - флаг автомобиля иномарки
# decline_app_cnt - количество отказанных прошлых заявок
# good_work - флаг наличия “хорошей” работы
# bki_request_cnt - количество запросов в БКИ
# home_address - категоризатор домашнего адреса
# work_address - категоризатор рабочего адреса
# income - доход заемщика
# foreign_passport - наличие загранпаспорта
# sna - связь заемщика с клиентами банка
# first_time - давность наличия информации о заемщике
# score_bki - скоринговый балл по данным из БКИ
# region_rating - рейтинг региона
# app_date - дата подачи заявки
# default - флаг дефолта по кредиту
# # Визуализация данных
display(data)
num_cols = [
"age",
"decline_app_cnt",
"score_bki",
"income",
"bki_request_cnt",
"region_rating",
] # Числовые переменные
cat_cols = [
"education",
"work_address",
"home_address",
"sna",
"first_time",
] # категориальные переменные
bin_cols = [
"sex",
"car",
"car_type",
"good_work",
"foreign_passport",
] # бинарные переменные
# # *Numerical*
# Смотрим на распределение числовых данных
fig, axes = plt.subplots(2, 3, figsize=(25, 15))
for i, col in enumerate(num_cols):
sns.distplot(data[col], kde=False, ax=axes.flat[i])
# ### Видим, что распределение совсем не похоже на нормальное...Попробуем поработать с логарифмическими величинами наших переменных.
# Логарифмируем только те столбцы, распределение которых совсем не похоже на нормальное:
fig, axes = plt.subplots(1, 3, figsize=(10, 7))
for i, col in enumerate(["decline_app_cnt", "bki_request_cnt", "income"]):
data[col] = np.log(data[col] + 1)
sns.distplot(data[col], kde=False, ax=axes.flat[i], color="g")
# ## Некоторые столбцы теперь выглядят лучше, но выбросы все равно есть.
# ### Теперь построим боксплоты для наших числовых переменных:
def boxplot(col):
fig, axes = plt.subplots(figsize=(14, 4))
sns.boxplot(x="default", y=col, data=data, ax=axes)
axes.set_title("Boxplot for " + col)
plt.show()
for col in num_cols:
boxplot(col)
data.default.value_counts()
# ### С выбросами разберемся позже.
# # *Categorical*
data.education.value_counts().plot(kind="bar", figsize=(8, 6), color="r")
data.education.isna().value_counts()
# Пропущенные значения заменим на самое встречаемое "SCH".
data.education = data.education.fillna("SCH")
data.education.isna().value_counts()
# Смотрим зависимость дохода от уровня образования
plt.figure(figsize=(15, 8))
sns.boxplot(x="education", y="income", data=data, showfliers=False)
# Чем выше уровень образования, тем больше доход.
plt.figure(figsize=(15, 8))
sns.boxplot(x="region_rating", y="score_bki", data=data, showfliers=False)
# Рейтинг региона пусть и незначительно, но влияет на балл по БКИ.
#
plt.figure(figsize=(15, 8))
sns.boxplot(x="education", y="age", data=data, showfliers=False)
# # *Binary*
plt.figure(figsize=(15, 8))
sns.boxplot(x="good_work", y="income", data=data, showfliers=False)
plt.figure(figsize=(15, 8))
sns.boxplot(x="foreign_passport", y="score_bki", data=data, showfliers=False)
# У владельцев иностранного паспорта score_bki немного выше.
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data.corr(), vmin=0, vmax=1, annot=True)
# ### Сильно коррелирующих переменных практически нет
# # *EDA and feature engineering*
# Начнем, пожалуй, со столбца с датами.
data["app_date"] = pd.to_datetime(data.app_date)
data["app_date"].sample(3)
# Теперь можем посчитать количество дней, прошедших с самой первой даты в нашем датасете.
data["app_date"] = data["app_date"].apply(lambda x: (x - data["app_date"].min()).days)
# Запомним наш датасет на всякий случай
df = data.copy()
# средний доход по возрасту
mean_income = df.groupby("age")["income"].mean().to_dict()
df["mean_income_age"] = df["age"].map(mean_income)
# Теперь найдем максимальный доход по возрасту
max_income = df.groupby("age")["income"].max().to_dict()
df["max_income_age"] = df["age"].map(max_income)
# Нормализуем
df["normalized_income"] = abs(df.income - df.mean_income_age) / df.max_income_age
# Найдем среднее количество запросов в БКИ в зависимости от дохода и возраста
mean_bki = df.groupby("age")["bki_request_cnt"].mean().to_dict()
df["mean_requests_age"] = df["age"].map(mean_bki)
mean_bki_inc = df.groupby("income")["bki_request_cnt"].mean().to_dict()
df["mean_requests_income"] = df["income"].map(mean_bki_inc)
# Найдем средний уровень дохода в зависимости от рейтинга региона
mean_income_rat = df.groupby("region_rating")["income"].mean().to_dict()
df["mean_income_region"] = df["region_rating"].map(mean_income_rat)
# ### Теперь трансформируем наши бинарны и категориальные признаки с помощью LabelEncoder и OneHotEncoder.
mapped_data = {}
label_encoder = LabelEncoder()
for col in bin_cols:
df[col] = label_encoder.fit_transform(df[col])
mapped_data[col] = dict(enumerate(label_encoder.classes_))
mapped_data
mapped_data_cat = {}
enc = OneHotEncoder()
for col in cat_cols:
df[col] = label_encoder.fit_transform(df[col])
mapped_data_cat[col] = dict(enumerate(label_encoder.classes_))
mapped_data_cat
# ### Переобозначим наши колонки
df.columns
num_cols = [
"age",
"decline_app_cnt",
"score_bki",
"income",
"bki_request_cnt",
"app_date",
"mean_income_age",
"region_rating",
"max_income_age",
"normalized_income",
"mean_requests_age",
"mean_requests_income",
"mean_income_region",
] # Числовые переменные
cat_cols = [
"education",
"work_address",
"home_address",
"sna",
"first_time",
] # категориальные переменные
bin_cols = [
"sex",
"car",
"car_type",
"good_work",
"foreign_passport",
] # бинарные переменные
len(num_cols)
# ### Найдем границы выбросов
for col in num_cols:
median = df[col].median()
IQR = df[col].quantile(0.75) - df[col].quantile(0.25)
perc25 = df[col].quantile(0.25)
perc75 = df[col].quantile(0.75)
print("Колонка ", col)
print(
"25-й перцентиль: {},".format(perc25),
"75-й перцентиль: {},".format(perc75),
"IQR: {}, ".format(IQR),
"Границы выбросов: [{f}, {l}].".format(
f=perc25 - 1.5 * IQR, l=perc75 + 1.5 * IQR
),
)
df.age.hist(color="r", figsize=(10, 6))
df.decline_app_cnt.hist(color="g", figsize=(10, 6))
# У данного признака очень странные границы выбросов [0,0], поэтому не будем ничего с этим делать.
df.bki_request_cnt.hist(color="b", figsize=(10, 6))
# Выбросов нет.
df.mean_income_age.hist(color="purple", figsize=(10, 6))
df.region_rating.hist(color="pink", figsize=(10, 6))
# ## Стандартизация числовых признаков
dataset = df.copy()
dataset[num_cols] = pd.DataFrame(
StandardScaler().fit_transform(df[num_cols]), columns=df[num_cols].columns
)
# ## Оценка значимости переменных
# ### Числовые переменные
data_temp = dataset.loc[data["sample"] == 1] # обучающая выборка
imp_num = pd.Series(
f_classif(data_temp[num_cols], data_temp["default"])[0], index=num_cols
)
imp_num.sort_values(inplace=True)
imp_num.plot(kind="barh")
# Как можно увидеть, наибольшее влияние на "дефолтность" оказывает признак score_bki. Из всех сгенерированных признаков хоть какую-то значимость привносит признак mean_income_region.
# ### Категориальные и Бинарные переменные
imp_cat = pd.Series(
mutual_info_classif(
data_temp[bin_cols + cat_cols], data_temp["default"], discrete_features=True
),
index=bin_cols + cat_cols,
)
imp_cat.sort_values(inplace=True)
imp_cat.plot(kind="barh")
# Итак, здесь самым значимым является признак sna(связь заемщика с клиентами), а затем признаки first_time, home_address и education.
# ## *Get_dummies*
data = pd.get_dummies(
dataset, prefix=cat_cols, columns=cat_cols
) # dummy для категориальных признаков
data.head(3)
client_id = data.client_id
data.drop("client_id", axis=1, inplace=True)
# # Строим модель
train = data.query("sample == 1").drop(["sample"], axis=1)
test = data.query("sample == 0").drop(["sample"], axis=1)
X = train.drop(["default"], axis=1).values
y = train["default"].values # целевая переменная
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.20, random_state=42
)
# Логистическая регрессия
model = LogisticRegression(max_iter=1000)
model.fit(X_train, y_train)
y_pred = model.predict(X_valid)
probs = model.predict_proba(X_valid)
probs = probs[:, 1]
fpr, tpr, threshold = roc_curve(y_valid, probs)
roc_auc = roc_auc_score(y_valid, probs)
plt.figure()
plt.plot([0, 1], label="Baseline", linestyle="--")
plt.plot(fpr, tpr, label="Regression")
plt.title("Logistic Regression ROC AUC = %0.3f" % roc_auc)
plt.ylabel("True Positive Rate")
plt.xlabel("False Positive Rate")
plt.legend(loc="lower right")
plt.show()
|
# # `CROP RECOMMENDATION`
# Recommending the crop according to certain situations and features is basically Crop Recommendation. The selection of crops & cropping system plays a major role in improving the productivity and profitability of the farmers. Crop recommendation system thereby helps farmers during this decision making process by considering various parameters such as temperature, rainfall, and seasons and agro-ecological situations
# 
# So through this Machine Learning model we are going to predict the suitable crop we can grow on a particular by considering it's NPK value i.e. Nitrogen, Phosphorous and Potassium value of the soil (along with it's PH value) and focusing on weather conditions like Humidity and Temperature.
# **So the following main steps we are going to perform in this notebook :**
# * Installing and Importing Libraries
# * Loading the Dataset
# * Data Preprocessing (like LabelEncoding)
# * Splitting the Dataset
# * Creating the Machine Learning Pipeline
# * Saving the best Model
# ## `1. Installing and Importing Libraries`
import pandas as pd
import numpy as np
# ## `2. Loading and Analysing the Dataset`
df = pd.read_csv("/kaggle/input/crop-dataset/crop_recommendation.csv")
df.head()
df.isnull().sum()
df.shape
# **So, we don't have any null values in our dataset**
df.info()
df.describe()
df.label.value_counts()
# So we have 100 samples of each 22 crop in our dataset and hence the dataset can be said as balanced.
# ## `3. Data Preprocessing`
# ### Label Encoding the `label` Column
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder().fit(df["label"])
le_name_mapping = dict(zip(le.classes_, le.transform(le.classes_)))
print(le_name_mapping)
df["label"] = le.transform(df["label"])
df.head()
# ## `4. Splitting the Data`
from sklearn.model_selection import train_test_split
X = df.drop(["label"], axis=1)
y = df["label"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
X_train.head()
X_test.head()
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# ## `5. Creating the Pipeline`
# ### Importing the Important Libraries
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
ExtraTreesClassifier,
)
from sklearn.svm import SVC
from xgboost import XGBClassifier
# ### KNN Classifier
step1 = ColumnTransformer(
transformers=[
("scal_tnf", MinMaxScaler(), [0, 1, 2, 3, 4, 5, 6]),
],
remainder="passthrough",
)
step2 = KNeighborsClassifier()
pipe = Pipeline([("step1", step1), ("step2", step2)])
pipe.fit(X_train, y_train)
y_pred = pipe.predict(X_test)
print("Accuracy: ", accuracy_score(y_test, y_pred))
# ### SVM Classifier
step1 = ColumnTransformer(
transformers=[
("scal_tnf", MinMaxScaler(), [0, 1, 2, 3, 4, 5, 6]),
],
remainder="passthrough",
)
step2 = SVC()
pipe = Pipeline([("step1", step1), ("step2", step2)])
pipe.fit(X_train, y_train)
y_pred = pipe.predict(X_test)
print("Accuracy: ", accuracy_score(y_test, y_pred))
# ### Decision Tree Classifier
step1 = ColumnTransformer(
transformers=[
("scal_tnf", MinMaxScaler(), [0, 1, 2, 3, 4, 5, 6]),
],
remainder="passthrough",
)
step2 = DecisionTreeClassifier(max_depth=10)
pipe = Pipeline([("step1", step1), ("step2", step2)])
pipe.fit(X_train, y_train)
y_pred = pipe.predict(X_test)
print("Accuracy: ", accuracy_score(y_test, y_pred))
# ### Random Forest Classifier
step1 = ColumnTransformer(
transformers=[
("scal_tnf", MinMaxScaler(), [0, 1, 2, 3, 4, 5, 6]),
],
remainder="passthrough",
)
step2 = RandomForestClassifier(
n_jobs=-1,
random_state=42,
n_estimators=10,
max_features=5,
max_depth=40,
max_leaf_nodes=2**20,
)
pipe = Pipeline([("step1", step1), ("step2", step2)])
pipe.fit(X_train, y_train)
y_pred = pipe.predict(X_test)
print("Accuracy: ", accuracy_score(y_test, y_pred))
# ### Extra Tree Classifier
step1 = ColumnTransformer(
transformers=[
("scal_tnf", MinMaxScaler(), [0, 1, 2, 3, 4, 5, 6]),
],
remainder="passthrough",
)
step2 = ExtraTreesClassifier(
n_estimators=100, random_state=3, max_features=0.75, max_depth=15
)
pipe = Pipeline([("step1", step1), ("step2", step2)])
pipe.fit(X_train, y_train)
y_pred = pipe.predict(X_test)
print("Accuracy: ", accuracy_score(y_test, y_pred))
# ### Gradient Boosting Classifier
step1 = ColumnTransformer(
transformers=[
("scal_tnf", MinMaxScaler(), [0, 1, 2, 3, 4, 5, 6]),
],
remainder="passthrough",
)
step2 = GradientBoostingClassifier()
pipe = Pipeline([("step1", step1), ("step2", step2)])
pipe.fit(X_train, y_train)
y_pred = pipe.predict(X_test)
print("Accuracy: ", accuracy_score(y_test, y_pred))
# ### XgBoost Classifier
step1 = ColumnTransformer(
transformers=[
("scal_tnf", MinMaxScaler(), [0, 1, 2, 3, 4, 5, 6]),
],
remainder="passthrough",
)
step2 = XGBClassifier(n_estimators=50, max_depth=10, learning_rate=0.5)
pipe = Pipeline([("step1", step1), ("step2", step2)])
pipe.fit(X_train, y_train)
y_pred = pipe.predict(X_test)
print("Accuracy: ", accuracy_score(y_test, y_pred))
from sklearn.ensemble import VotingClassifier
# ### Voting Regressor
step1 = ColumnTransformer(
transformers=[
("scal_tnf", MinMaxScaler(), [0, 1, 2, 3, 4, 5, 6]),
],
remainder="passthrough",
)
rf = RandomForestClassifier(
n_jobs=-1,
random_state=42,
n_estimators=10,
max_features=5,
max_depth=40,
max_leaf_nodes=2**20,
)
gbdt = GradientBoostingClassifier()
xgb = XGBClassifier(n_estimators=50, max_depth=10, learning_rate=0.5)
et = ExtraTreesClassifier(
n_estimators=100, random_state=3, max_features=0.75, max_depth=15
)
step2 = VotingClassifier(
[("rf", rf), ("gbdt", gbdt), ("xgb", xgb), ("et", et)], weights=[5, 1, 2, 2]
)
pipe = Pipeline([("step1", step1), ("step2", step2)])
pipe.fit(X_train, y_train)
y_pred = pipe.predict(X_test)
print("Accuracy: ", accuracy_score(y_test, y_pred))
# We are finally going to use this **Random Forest Classfier Model** as it's giving highest accuracy of **99.1%** on the testing data
# ## `6. Saving the Best Model (into pickle file)`
import pickle
pickle.dump(pipe, open("pipe.pkl", "wb"))
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
pipe = pickle.load(open("pipe.pkl", "rb"))
df = [[90, 42, 43, 20.87, 82.01, 6.5, 202.93]]
pipe.predict(df)[0]
|
# One of the challenges that investors face is the likelihood of the price movement - i.e., what is the probability that the price will reach a certain level. One way to address this challenge is to simulate the price movement using [Monte Carlo method](https://en.wikipedia.org/wiki/Monte_Carlo_method).
# Briefly speaking, Monte Carlo method assumes that a certain phenomenon (price movement) is deterministic and that we know its probability distribution. These assumptions are naive as they do not hold even with a weak form of efficient-market hypothesis.
# Is then Monte Carlo method viable to model financial markets? Even if it provides an answer that might be heavy skewed, it addresses the question of probability whereas valuation models such as [Residual Income Valuation](https://www.cfainstitute.org/en/membership/professional-development/refresher-readings/residual-income-valuation) provide only an answer what a price should be.
# Let us begin our analysis. First, we will import necessary libraries, set key model parameters, and download latest data.
import math
import random
import io
import requests
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
# We will model the stock price of Apple for the next year using 1000 simulations.
# In setting the number of years that we look back to provide our model with data on expected return and expected annual voliatility, one must
ticker = "aapl.us" # Use ticker format from https://stooq.com
number_of_simulations = 1000 # Set desired number of simulations
interval_prediction = 1 # Forecast in years
look_back_x_years = 5
target_url = "https://stooq.com/q/d/l/?s={}&i=d&o=1111111".format(ticker)
ticker_data = requests.get(target_url)
ticker_data = ticker_data.text
buffer = io.StringIO(ticker_data)
ticker_dataframe = pd.read_csv(buffer, index_col="Date", parse_dates=True).drop(
["Open", "High", "Low", "Volume"], axis=1
)
ticker_dataframe = ticker_dataframe[
-253 * look_back_x_years :
] # On average, stocks are traded 253 days in a year
# $$S_{t + \Delta t}=S_{t} \exp[(\mu - \frac{\hat{\sigma}^{2}}{2})\Delta t + \sigma\epsilon\sqrt{\Delta t}]$$
# $$\hat{\sigma} = \frac{s}{\sqrt{\tau}}$$
# $$\tau = \frac{1}{N}$$
def simple_monte_carlo(function, args, number_of_simulations):
results = list()
for x in range(number_of_simulations):
results.append(function(*args))
return results
def geometric_brownian_motion_stock_price(
lastest_stock_price,
expected_annual_return,
expected_annual_volatility,
interval_prediction,
):
nrandom = random.normalvariate(0, 1)
return lastest_stock_price * math.exp(
(expected_annual_return - 0.5 * expected_annual_volatility**2)
* interval_prediction
+ expected_annual_volatility * nrandom * math.sqrt(interval_prediction)
)
def get_expected_annual_volatility(stock_prices):
stock_daily_return = np.diff(stock_prices) / stock_prices[: len(stock_prices) - 1]
stock_daily_return_sd = np.std(stock_daily_return)
return stock_daily_return_sd / math.sqrt(1 / len(stock_prices))
lastest_stock_price = ticker_dataframe["Close"][-1]
expected_annual_return = (
ticker_dataframe["Close"][-1] / ticker_dataframe["Close"][0]
) ** (1 / look_back_x_years) - 1
expected_annual_volatility = get_expected_annual_volatility(ticker_dataframe["Close"])
args = (
lastest_stock_price,
expected_annual_return,
expected_annual_volatility,
interval_prediction,
)
monte_carlo_simulation = simple_monte_carlo(
geometric_brownian_motion_stock_price, args, number_of_simulations
)
sns.displot(monte_carlo_simulation, color=(0.17, 0.74, 0.45))
|
import numpy as np
import sqlite3
import pandas as pd
import matplotlib.pyplot as plt
conn = sqlite3.connect("/kaggle/input/ipldatabase/database.sqlite")
c = conn.cursor()
# ### No of players in the IPL country wise
sql = """
select Country.Country_Name, COUNT(Player.Country_Name) AS Number_of_Players
FROM Country
JOIN Player ON Player.Country_Name=Country.Country_Id
GROUP BY Player.Country_Name
ORDER BY NUmber_of_Players Desc
"""
country = pd.read_sql(sql, conn)
country
# ### Number of matches played in each stadium
#
sql = """
select Venue.Venue_Name, Count(Match.Venue_id) AS Matches_Played, City.City_Name AS Location
from Venue
JOIN Match ON Venue.Venue_Id = Match.Venue_Id
JOIN City ON City.City_Id = Venue.City_Id
GROUP BY Match.Venue_id
ORDER BY Matches_Played DESC
"""
venue = pd.read_sql(sql, conn)
venue
# ### Number of matches played in each city
sql = """
select City.City_Name AS City, Count(Match.Venue_id) AS Matches_Played
from City
JOIN Venue ON City.City_Id = Venue.City_Id
JOIN Match ON Venue.Venue_Id = Match.Venue_Id
GROUP BY City.City_Name
ORDER BY Matches_Played DESC
"""
city = pd.read_sql(sql, conn)
city
# ### Man of the Series, Purple Cap, Orange Cap winners in every season
#
sql = """
SELECT s.Season_Year, mos.Player_Name Man_of_the_series, oc.Player_Name Orange_cap, pc.Player_Name Purple_Cap
FROM Season s
Join Player mos ON mos.Player_Id = s.Man_of_the_Series
Join Player oc ON oc.Player_Id = s.Orange_Cap
Join Player pc ON pc.Player_Id = s.Purple_Cap
"""
query = pd.read_sql(sql, conn)
query
# ### Total matches won by each team
sql = """
SELECT Team.Team_Name as Team_Name, Count(Match.Match_Winner) AS Number_of_wins
FROM Match
JOIN Team
ON Team.Team_Id = Match.Match_Winner
GROUP BY Team_Name
ORDER BY Number_of_wins DESC
"""
matches = pd.read_sql(sql, conn)
matches
# ### Wicket types
sql = """
select o.Out_Name AS Wicket_type, COUNT(w.Kind_Out) AS Total_dissmissals
from Out_Type o
JOIN Wicket_Taken w
ON o.Out_Id = w.Kind_Out
GROUP BY 1
ORDER BY Total_dissmissals DESC
"""
wickets = pd.read_sql(sql, conn)
wickets
# ### Top 10 Run scorers
#
sql = """
select p.player_name, SUM(ba.Runs_Scored) Total_Runs
from batsman_scored ba
JOIN Ball_by_ball bl
ON ba.Match_id = bl.Match_Id
AND ba.Over_id = bl.Over_id
AND ba.ball_id = bl.ball_id
AND ba.Innings_No = bl.Innings_No
JOIN player p
ON p.player_id = bl.striker
Group by bl.Striker
ORDER BY SUM(ba.Runs_Scored) DESC
LIMIT 10
"""
runs_scorers = pd.read_sql(sql, conn)
runs_scorers
# ### Top 10 wicket takers
sql = """
select p.player_name, COUNT(w.Ball_id) Total_Wickets
from Wicket_taken w
JOIN Ball_by_ball bl
ON w.Match_id = bl.Match_Id
AND w.Over_id = bl.Over_id
AND w.ball_id = bl.ball_id
AND w.Innings_No = bl.Innings_No
JOIN player p
ON p.player_id = bl.bowler
Group by bl.bowler
ORDER BY COUNT(w.Ball_id) DESC
LIMIT 10
"""
wicket_takers = pd.read_sql(sql, conn)
wicket_takers
# ### Players with the most Man of the Match
sql = """
select p.player_name, COUNT(m.Man_of_the_Match) as Total_Awards
from Match m
JOIN Player p
ON p.Player_Id = m.Man_of_the_Match
GROUP BY p.player_name
ORDER BY Total_Awards DESC
LIMIT 10
"""
m_o_m = pd.read_sql(sql, conn)
m_o_m
# ### Total runs scored and total wickets taken in each over
sql = """
SELECT Over_Id AS Over_Number, SUM(Runs_Scored) AS Total_Runs
FROM Batsman_Scored
GROUP BY Over_id
"""
runs = pd.read_sql(sql, conn)
runs
sql = """
SELECT Over_Id AS Over_Number, COUNT(Over_Id) AS Total_wickets
FROM Wicket_Taken
GROUP BY Over_Id
"""
wickets = pd.read_sql(sql, conn)
wickets
|
# **Type conversion -***Convert one data type to another*
# **Implicit**
Revenue_A = 22
Revenue_B = 20.4
type(Revenue_A)
type(Revenue_B)
Revenue = Revenue_A + Revenue_B
print("Total Revenue is", Revenue)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import librosa
import librosa.display
import seaborn as sns
from IPython.display import Audio
import tensorflow as tf
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Flatten, Dropout, BatchNormalization
from tensorflow.keras.callbacks import ModelCheckpoint
from pydub import AudioSegment, effects
print(tf.__version__)
f3_mfcc = []
extracted_features = []
labels = []
for dirname, _, filenames in os.walk("../input/toronto-emotional-speech-set-tess"):
for filename in filenames:
# Fetch the sample rate.
_, sr = librosa.load(path=os.path.join(dirname, filename), sr=None)
# Load the audio file.
rawsound = AudioSegment.from_file(os.path.join(dirname, filename))
# Normalize the audio to +5.0 dBFS.
normalizedsound = effects.normalize(rawsound, headroom=0)
# Transform the normalized audio to np.array of samples.
normal_x = np.array(normalizedsound.get_array_of_samples(), dtype="float32")
f3 = librosa.feature.mfcc(y=normal_x, sr=sr, n_mfcc=128) # MFCC
f3 = np.mean(f3.T, axis=0)
label = filename.split("_")[-1]
label = label.split(".")[0]
extracted_features.append([f3, label])
extracted_features = pd.DataFrame(extracted_features, columns=("features", "op_labels"))
extracted_features.tail()
x = np.array(extracted_features["features"].tolist())
y = np.array(extracted_features["op_labels"].tolist())
print(x.shape, y.shape)
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
y = to_categorical(labelencoder.fit_transform(y))
print(y.shape)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
x, y, test_size=0.15, random_state=0
)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
model = Sequential()
model.add(Dense(256, activation="relu", input_shape=(128,)))
model.add(Dense(256, activation="relu"))
model.add(Dense(256, activation="relu"))
model.add(Dense(7, activation="softmax"))
model.compile(
optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy", "Precision", "Recall", "AUC"],
)
model.fit(
X_train,
y_train,
epochs=50,
steps_per_epoch=50,
batch_size=4,
validation_data=(X_test, y_test),
)
model.evaluate(X_train, y_train)
model.evaluate(X_test, y_test)
extracted_features["op_labels"].unique()
filename = "/kaggle/input/toronto-emotional-speech-set-tess/TESS Toronto emotional speech set data/YAF_neutral/YAF_back_neutral.wav"
data, i_sr = librosa.load(filename)
i_rawsound = AudioSegment.from_file(filename)
i_normalizedsound = effects.normalize(i_rawsound, headroom=0)
i_normal_x = np.array(i_normalizedsound.get_array_of_samples(), dtype="float32")
i_f3 = librosa.feature.mfcc(y=i_normal_x, sr=i_sr, n_mfcc=128) # MFCC
i_f3 = np.mean(i_f3.T, axis=0)
i_f3 = i_f3.reshape(1, -1)
predicted_label = model.predict(i_f3)
a = np.argmax(predicted_label, axis=1)
b = labelencoder.inverse_transform(a)
print(b[0])
model.save("emotion detection through speech-2.9.h5")
|
import sys
sys.path.append("../input/pytorch-image-models/pytorch-image-models-master")
# sys.setrecursionlimit(10**6)
# ====================================================
# Directory settings
# ====================================================
import os
OUTPUT_DIR = "./"
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
TRAIN_PATH = "../input/ranzcr-clip-catheter-line-classification/train"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from tqdm.notebook import tqdm
from pprint import pprint
import cv2, glob, time, random, os, ast, glob
import warnings
warnings.filterwarnings("ignore")
import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
from torch.utils.data import Dataset, DataLoader
# https://nvlabs.github.io/iccv2019-mixed-precision-tutorial/files/dusan_stosic_intro_to_mixed_precision_training.pdf
# https://analyticsindiamag.com/pytorch-mixed-precision-training/
# https://pytorch.org/docs/stable/notes/amp_examples.html
from torch.cuda.amp import autocast, GradScaler
from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingWarmRestarts
from torch.optim import Adam, AdamW, SGD
import albumentations as A
from albumentations.pytorch import ToTensorV2
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import StratifiedKFold
# get the list of pretrained models
model_names = timm.list_models()
pprint(model_names)
#
# ## CFG
BATCH_SIZE = 8 # 8 for bigger architectures
VAL_BATCH_SIZE = 16
EPOCHS = 15 # train upto 10 epochs
IMG_SIZE = 640 # 384 for bigger architectures
if BATCH_SIZE == 8:
ITER_FREQ = 400
else:
ITER_FREQ = 200
NUM_WORKERS = 8
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
SEED = 999
N_FOLDS = 5
TR_FOLDS = [0, 1, 2, 3, 4]
START_FOLD = 0
target_cols = [
"ETT - Abnormal",
"ETT - Borderline",
"ETT - Normal",
"NGT - Abnormal",
"NGT - Borderline",
"NGT - Incompletely Imaged",
"NGT - Normal",
"CVC - Abnormal",
"CVC - Borderline",
"CVC - Normal",
"Swan Ganz Catheter Present",
]
MODEL_PATH = None
MODEL_ARCH = (
"resnet200d_320" # tf_efficientnet_b4_ns, tf_efficientnet_b5_ns, resnext50_32x4d
)
ITERS_TO_ACCUMULATE = 1
LR = 5e-4
MIN_LR = 1e-6 # SAM, CosineAnnealingWarmRestarts
WEIGHT_DECAY = 1e-6
MOMENTUM = 0.9
T_0 = EPOCHS # SAM, CosineAnnealingWarmRestarts
MAX_NORM = 1000
T_MAX = 5 # CosineAnnealingLR
BASE_OPTIMIZER = SGD # for SAM, Ranger
OPTIMIZER = "Adam" # Ranger, Adam, AdamP, SGD, SAM
SCHEDULER = "CosineAnnealingWarmRestarts" # ReduceLROnPlateau, CosineAnnealingLR, CosineAnnealingWarmRestarts, OneCycleLR
SCHEDULER_UPDATE = "epoch" # batch
CRITERION = "BCE" # CrossEntropyLoss, TaylorSmoothedLoss, LabelSmoothedLoss
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.sum = 0
self.avg = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def seed_torch(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_torch(SEED)
def macro_multilabel_auc(label, pred):
aucs = []
for i in range(len(target_cols)):
aucs.append(roc_auc_score(label[:, i], pred[:, i]))
# print(np.round(aucs, 4))
return np.mean(aucs)
TRAIN_DIR = "../input/ranzcr-clip-catheter-line-classification/train/"
train_df = pd.read_csv("../input/ranzcr-clip-catheter-line-classification/train.csv")
folds = pd.read_csv("../input/ranzcr-folds/train_folds.csv")
train_annotations = pd.read_csv(
"../input/ranzcr-clip-catheter-line-classification/train_annotations.csv"
)
COLOR_MAP = {
"ETT - Abnormal": (255, 0, 0),
"ETT - Borderline": (0, 255, 0),
"ETT - Normal": (0, 0, 255),
"NGT - Abnormal": (255, 255, 0),
"NGT - Borderline": (255, 0, 255),
"NGT - Incompletely Imaged": (0, 255, 255),
"NGT - Normal": (128, 0, 0),
"CVC - Abnormal": (0, 128, 0),
"CVC - Borderline": (0, 0, 128),
"CVC - Normal": (128, 128, 0),
"Swan Ganz Catheter Present": (128, 0, 128),
}
class RanzcrDataset(Dataset):
def __init__(self, df, df_annotations, annot_size=50, transform=None):
self.df = df
self.df_annotations = df_annotations
self.annot_size = annot_size
self.image_id = df["StudyInstanceUID"].values
self.labels = df[target_cols].values
self.transform = transform
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
file_name = self.image_id[idx]
file_path = f"{TRAIN_DIR}{file_name}.jpg"
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
query_string = f"StudyInstanceUID == '{file_name}'"
df = self.df_annotations.query(query_string)
for i, row in df.iterrows():
label = row["label"]
data = np.array(ast.literal_eval(row["data"]))
for d in data:
image[
d[1] - self.annot_size // 2 : d[1] + self.annot_size // 2,
d[0] - self.annot_size // 2 : d[0] + self.annot_size // 2,
:,
] = COLOR_MAP[label]
if self.transform:
augmented = self.transform(image=image)
image = augmented["image"]
label = torch.tensor(self.labels[idx]).float()
return image, label
def get_transform(*, train=True):
if train:
return A.Compose(
[
A.RandomResizedCrop(IMG_SIZE, IMG_SIZE, scale=(0.85, 1.0)),
A.HorizontalFlip(p=0.5),
A.RandomBrightnessContrast(
p=0.2, brightness_limit=(-0.2, 0.2), contrast_limit=(-0.2, 0.2)
),
A.HueSaturationValue(
p=0.2, hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2
),
A.ShiftScaleRotate(
p=0.2, shift_limit=0.0625, scale_limit=0.2, rotate_limit=20
),
A.CoarseDropout(p=0.2),
A.Cutout(
p=0.2,
max_h_size=16,
max_w_size=16,
fill_value=(0.0, 0.0, 0.0),
num_holes=16,
),
A.Normalize(mean=MEAN, std=STD),
ToTensorV2(),
]
)
else:
return A.Compose(
[
# A.CenterCrop(IMG_SIZE, IMG_SIZE),
A.Resize(IMG_SIZE, IMG_SIZE),
A.Normalize(mean=MEAN, std=STD, max_pixel_value=255.0, p=1.0),
ToTensorV2(),
]
)
# ## Model
class ResNet200D(nn.Module):
def __init__(self, model_arch, out_dim, pretrained=False):
super().__init__()
self.model = timm.create_model(model_arch, pretrained=False)
if pretrained:
pretrained_path = "../input/startingpointschestx/resnet200d_320_chestx.pth"
self.model.load_state_dict(torch.load(pretrained_path)["model"])
n_features = self.model.fc.in_features
self.model.global_pool = nn.Identity()
self.model.fc = nn.Identity()
self.pooling = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(n_features, out_dim)
def forward(self, x):
bs = x.size(0)
features = self.model(x)
pooled_features = self.pooling(features).view(bs, -1)
output = self.fc(pooled_features)
return output
class CustomResNet200D(nn.Module):
def __init__(self, model_arch, n_classes, pretrained=False):
super().__init__()
self.model = timm.create_model(model_arch, pretrained=False)
n_features = self.model.fc.in_features
self.model.fc = nn.Linear(n_features, n_classes)
if pretrained:
# pretrained_path = '../input/startingpointschestx/resnet200d_320_chestx.pth'
# state_dict = dict()
# for k, v in torch.load(pretrained_path, map_location='cpu')["model"].items():
# if k[:6] == "model.":
# k = k.replace("model.", "")
# state_dict[k] = v
# # base_model.load_state_dict(state_dict)
# self.model.load_state_dict(state_dict)
# self.model.reset_classifier(0, '')
# print(f'load {model_name} pretrained model')
pretrained_path = "../input/startingpointschestx/resnet200d_320_chestx.pth"
checkpoint = torch.load(pretrained_path)["model"]
for key in list(checkpoint.keys()):
if "model." in key:
checkpoint[key.replace("model.", "")] = checkpoint[key]
del checkpoint[key]
self.model.load_state_dict(checkpoint)
print(f"load {model_arch} pretrained model")
n_features = self.model.fc.in_features
self.model.global_pool = nn.Identity()
self.model.fc = nn.Identity()
self.pooling = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(n_features, n_classes)
def forward(self, x):
bs = x.size(0)
features = self.model(x)
pooled_features = self.pooling(features).view(bs, -1)
output = self.fc(pooled_features)
return features, pooled_features, output
class SeResnet152D(nn.Module):
def __init__(self, model_arch, n_classes, pretrained=False):
super().__init__()
self.model = timm.create_model(model_arch, pretrained=pretrained)
n_features = self.model.fc.in_features
self.model.global_pool = nn.Identity()
self.model.fc = nn.Identity()
self.pooling = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(n_features, n_classes)
def forward(self, x):
bs = x.size(0)
features = self.model(x)
pooled_features = self.pooling(features).view(bs, -1)
output = self.fc(pooled_features)
return output
class CustomEffNet(nn.Module):
def __init__(self, model_arch, n_classes, pretrained=True):
super().__init__()
self.model = timm.create_model(model_arch, pretrained, n_class)
n_features = self.model.classifier.in_features
self.model.global_pool = nn.Identity()
self.model.classifier = nn.Identity()
self.pooling = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(n_features, n_classes)
def forward(self, x):
bs = x.size(0)
features = self.model(x)
pooled_features = self.pooling(features).view(bs, -1)
output = self.fc(pooled_features)
return output
class CustomResNext(nn.Module):
def __init__(self, model_arch, n_classes, pretrained=False):
super().__init__()
self.model = timm.create_model(model_arch, pretrained=pretrained)
n_features = self.model.fc.in_features
self.model.global_pool = nn.Identity()
self.model.fc = nn.Identity()
self.pooling = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(n_features, n_classes)
def forward(self, x):
bs = x.size(0)
features = self.model(x)
pooled_features = self.pooling(features).view(bs, -1)
output = self.fc(pooled_features)
return output
# [Back to CFG(Click here)](#cont)
def GetCriterion(criterion_name, criterion=None):
# if criterion_name == 'BiTemperedLoss':
# criterion = BiTemperedLogistic()
# elif criterion_name == 'SymmetricCrossEntropyLoss':
# criterion = SymmetricCrossEntropy()
if criterion_name == "CrossEntropyLoss":
criterion = nn.CrossEntropyLoss()
elif criterion_name == "LabelSmoothingLoss":
criterion = LabelSmoothingLoss()
# elif criterion_name == 'FocalLoss':
# criterion = FocalLoss()
# elif criterion_name == 'FocalCosineLoss':
# criterion = FocalCosineLoss()
elif criterion_name == "TaylorCrossEntropyLoss":
criterion = TaylorCrossEntropyLoss()
elif criterion_name == "TaylorSmoothedLoss":
criterion = TaylorSmoothedLoss()
elif criterion_name == "CutMix":
criterion = CutMixCriterion(criterion)
elif criterion_name == "SnapMix":
criterion = SnapMixLoss()
elif criterion_name == "CustomLoss":
criterion = CustomLoss(WEIGHTS)
elif criterion_name == "BCE":
criterion = nn.BCEWithLogitsLoss()
return criterion
def GetScheduler(scheduler_name, optimizer, batches=None):
# ['ReduceLROnPlateau', 'CosineAnnealingLR', 'CosineAnnealingWarmRestarts', 'OneCycleLR', 'GradualWarmupSchedulerV2']
if scheduler_name == "OneCycleLR":
return torch.optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=1e-2,
epochs=CFG.EPOCHS,
steps_per_epoch=batches + 1,
pct_start=0.1,
)
if scheduler_name == "CosineAnnealingWarmRestarts":
return torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
optimizer, T_0=T_0, T_mult=1, eta_min=MIN_LR, last_epoch=-1
)
elif scheduler_name == "CosineAnnealingLR":
return torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=T_MAX, eta_min=0, last_epoch=-1
)
elif scheduler_name == "ReduceLROnPlateau":
return torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
factor=0.1,
patience=1,
threshold=0.0001,
cooldown=0,
min_lr=MIN_LR,
)
# elif scheduler_name == 'GradualWarmupSchedulerV2':
# return GradualWarmupSchedulerV2(optimizer=optimizer)
def GetOptimizer(optimizer_name, parameters):
# ['Adam','Ranger']
if optimizer_name == "Adam":
# if CFG.scheduler_name == 'GradualWarmupSchedulerV2':
# return torch.optim.Adam(parameters, lr=CFG.LR_START, weight_decay=CFG.weight_decay, amsgrad=False)
# else:
return torch.optim.Adam(
parameters, lr=LR, weight_decay=WEIGHT_DECAY, amsgrad=False
)
elif optimizer_name == "AdamW":
# if CFG.scheduler_name == 'GradualWarmupSchedulerV2':
# return torch.optim.AdamW(parameters, lr=CFG.LR_START, weight_decay=CFG.weight_decay, amsgrad=False)
# else:
return torch.optim.Adam(
parameters, lr=LR, weight_decay=WEIGHT_DECAY, amsgrad=False
)
elif optimizer_name == "AdamP":
# if CFG.scheduler_name == 'GradualWarmupSchedulerV2':
# return AdamP(parameters, lr=CFG.LR_START, weight_decay=CFG.weight_decay)
# else:
return AdamP(parameters, lr=LR, weight_decay=WEIGHT_DECAY)
elif optimizer_name == "Ranger":
return Ranger(
parameters,
lr=LR,
alpha=0.5,
k=6,
N_sma_threshhold=5,
betas=(0.95, 0.999),
weight_decay=WEIGHT_DECAY,
)
elif optimizer_name == "SAM":
return SAM(
parameters, BASE_OPTIMIZER, lr=0.1, momentum=0.9, weight_decay=0.0005
)
elif optimizer_name == "AdamP":
return AdamP(parameters, lr=LR, weight_decay=WEIGHT_DECAY)
# # Train and validation functions
def train_fn(model, dataloader, device, epoch, optimizer, criterion, scheduler):
data_time = AverageMeter()
batch_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
model.train()
scaler = GradScaler()
start_time = time.time()
loader = tqdm(dataloader, total=len(dataloader))
for step, (images, labels) in enumerate(loader):
images = images.to(device).float()
labels = labels.to(device)
data_time.update(time.time() - start_time)
with autocast():
_, _, output = model(images)
loss = criterion(output, labels)
losses.update(loss.item(), BATCH_SIZE)
scaler.scale(loss).backward()
grad_norm = nn.utils.clip_grad_norm_(model.parameters(), max_norm=MAX_NORM)
if (step + 1) % ITERS_TO_ACCUMULATE == 0:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
if scheduler is not None and SCHEDULER_UPDATE == "batch":
scheduler.step()
batch_time.update(time.time() - start_time)
start_time = time.time()
if step % ITER_FREQ == 0:
print(
"Epoch: [{0}][{1}/{2}]\t"
"Batch Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t"
"Data Time {data_time.val:.3f}s ({data_time.avg:.3f}s)\t"
"Loss: {loss.val:.4f} ({loss.avg:.4f})".format(
(epoch + 1),
step,
len(dataloader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
)
)
# accuracy=accuracies))
# To check the loss real-time while iterating over data. 'Accuracy {accuracy.val:.4f} ({accuracy.avg:.4f})'
loader.set_description(f"Training Epoch {epoch+1}/{EPOCHS}")
loader.set_postfix(loss=losses.avg) # accuracy=accuracies.avg)
# del images, labels
if scheduler is not None and SCHEDULER_UPDATE == "epoch":
scheduler.step()
return losses.avg # , accuracies.avg
def valid_fn(epoch, model, criterion, val_loader, device, scheduler):
model.eval()
losses = AverageMeter()
accuracies = AverageMeter()
PREDS = []
TARGETS = []
loader = tqdm(val_loader, total=len(val_loader))
with torch.no_grad(): # without torch.no_grad() will make the CUDA run OOM.
for step, (images, labels) in enumerate(loader):
images = images.to(device)
labels = labels.to(device)
_, _, output = model(images)
loss = criterion(output, labels)
losses.update(loss.item(), BATCH_SIZE)
PREDS += [output.sigmoid()]
TARGETS += [labels.detach().cpu()]
loader.set_description(f"Validating Epoch {epoch+1}/{EPOCHS}")
loader.set_postfix(loss=losses.avg) # , accuracy=accuracies.avg)
PREDS = torch.cat(PREDS).cpu().numpy()
TARGETS = torch.cat(TARGETS).cpu().numpy()
roc_auc = macro_multilabel_auc(TARGETS, PREDS)
if scheduler is not None:
scheduler.step()
return losses.avg, roc_auc # accuracies.avg
# [Back to CFG(Click here)](#cont)
# # Main
def engine(device, folds, fold, model_path=None):
trn_idx = folds[folds["kfold"] != fold].index
val_idx = folds[folds["kfold"] == fold].index
train_folds = folds.loc[trn_idx].reset_index(drop=True)
valid_folds = folds.loc[val_idx].reset_index(drop=True)
train_folds = train_folds[
train_folds["StudyInstanceUID"].isin(
train_annotations["StudyInstanceUID"].unique()
)
].reset_index(drop=True)
valid_folds = valid_folds[
valid_folds["StudyInstanceUID"].isin(
train_annotations["StudyInstanceUID"].unique()
)
].reset_index(drop=True)
train_data = RanzcrDataset(
train_folds, train_annotations, transform=get_transform()
)
val_data = RanzcrDataset(
valid_folds, train_annotations, transform=get_transform(train=False)
)
train_loader = DataLoader(
train_data,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=NUM_WORKERS,
pin_memory=True, # enables faster data transfer to CUDA-enabled GPUs.
drop_last=True,
)
val_loader = DataLoader(
val_data,
batch_size=VAL_BATCH_SIZE,
num_workers=NUM_WORKERS,
shuffle=False,
pin_memory=True,
drop_last=False,
)
if model_path is not None:
model = torch.load(model_path)
START_EPOCH = int(model_path.split("_")[-1])
else:
model = CustomResNet200D(MODEL_ARCH, 11, True)
START_EPOCH = 0
model.to(device)
params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = GetOptimizer(OPTIMIZER, params)
criterion = GetCriterion(CRITERION).to(device)
val_criterion = GetCriterion(CRITERION).to(device)
scheduler = GetScheduler(SCHEDULER, optimizer)
loss = []
accuracy = []
for epoch in range(START_EPOCH, EPOCHS):
epoch_start = time.time()
avg_loss = train_fn(
model, train_loader, device, epoch, optimizer, criterion, scheduler
)
torch.cuda.empty_cache()
avg_val_loss, roc_auc_score = valid_fn(
epoch, model, val_criterion, val_loader, device, scheduler
)
epoch_end = time.time() - epoch_start
print(f"Validation accuracy after epoch {epoch+1}: {roc_auc_score:.4f}")
loss.append(avg_loss)
# accuracy.append(avg_accuracy)
content = f"Fold {fold} Epoch {epoch+1} - avg_train_loss: {avg_loss:.4f} avg_val_loss: {avg_val_loss:.4f} roc_auc_score: {roc_auc_score:.4f} time: {epoch_end:.0f}s"
with open(f"GPU_{MODEL_ARCH}_{OPTIMIZER}_{CRITERION}.txt", "a") as appender:
appender.write(content + "\n") # avg_train_accuracy: {avg_accuracy:.4f}
# Save the model to use it for inference.
torch.save(
model.state_dict(), f"stage1_{MODEL_ARCH}_fold_{fold}_epoch_{(epoch+1)}.pth"
)
# torch.save(model, f'stage1_{MODEL_ARCH}_fold_{fold}_epoch_{(epoch+1)}')
torch.cuda.empty_cache()
return loss
if __name__ == "__main__":
if MODEL_PATH is not None:
START_FOLD = int(MODEL_PATH.split("_")[-3])
for fold in range(START_FOLD, N_FOLDS):
print(f"===== Fold {fold} Starting =====")
fold_start = time.time()
logs = engine(DEVICE, folds, fold, MODEL_PATH)
print(f"Time taken in fold {fold}: {time.time()-fold_start}")
|
# # LesionFinder: Bounding Box Regression for Chest CT (Evaluation)
# Here, we evaluate the previously trained LesionFinder model for lesion bounding box regression. The training notebook can be found [here](https://www.kaggle.com/code/benjaminahlbrecht/lesionfinder-bounding-box-regression-for-chest-ct).
# ## Preamble
# ---------------------------------------------------------------------------- #
import os
import torch
from torch.utils.data import DataLoader
import lightning.pytorch as pl
from lesionfinder_utilities import (
DeepLesionDataset,
ResizeWithBoundingBox,
LesionFinder,
)
MODEL_FNAME = "/kaggle/input/lesionfinder-bounding-box-regression-for-chest-ct/models/model_epoch=1_val_loss=0.56.ckpt"
DATA_DIR = "/kaggle/input/nih-deeplesion-tensor-slices/tensors"
DATA_DIR_TEST = os.path.join(DATA_DIR, "test")
if torch.cuda.is_available():
DEVICE = "cuda"
else:
DEVICE = "cpu"
# Feed data in by mini-batches using gradient accumulation
MINIBATCH_SIZE = 12
N_MINIBATCHES = 6
# Height and width to resize images
HEIGHT = 500
WIDTH = 500
# PyTorch Lightning Params
ACCELERATOR = "gpu"
N_DEVICES = 1
augmentations = ResizeWithBoundingBox((HEIGHT, WIDTH))
dataset_test = DeepLesionDataset(DATA_DIR_TEST, augmentations=augmentations)
dataloader_test = DataLoader(dataset_test, batch_size=MINIBATCH_SIZE)
# Retrieve the best model
model = LesionFinder.load_from_checkpoint(MODEL_FNAME)
# ## Model Evaluation
# Redefine our trainer to contain our evaluation metrics
trainer = pl.Trainer(
devices=N_DEVICES,
accelerator=ACCELERATOR,
accumulate_grad_batches=N_MINIBATCHES,
)
results = trainer.test(model, dataloaders=dataloader_test)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # 01. Importing Modules (Libraries)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno
# # 02. Loading Datasets
train = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv")
test = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv")
train
# Quick look at the data types and null contents
train.info()
test.info()
# # Note:
# - Train data has one label (target,'Transported') and 13 features
# - Test data has only 13 features
# - Label of train data has to be predict
# Quick look at missing data in 'train' dataset
missingno.matrix(train)
# Quick look at missing data in 'test' dataset
missingno.matrix(test)
# # Note:
# - Train data: 'PassengerId', 'Transported' have no missing value.
# - Test data: Except 'PassengerId' all of the index have missing value
# - In contrast, both of them have similar amount of missing value
train[train["HomePlanet"].isnull()]
test[test["HomePlanet"].isnull()]
# # 03. Joining Train and Test Datasets
# - For analyzing both train and test data on the joined dataset.
data = pd.concat([train, test], axis=0)
data.info()
missingno.matrix(data, figsize=(40, 30))
# # 04. Feature Analysis Using Numerics and Visualizations
data.info()
# # Note:
# #Two groups of features:
#
# - Numeric features: Age, Roomservice, Foodcourt, ShoppingMall, Spa, VRDeck
# - Categorical features: PassengerId, Homeplanet, Cryosleep, Cabin, Destination, VIP, Name, Transported
# # 4.1 Numerical Features
num_cols = ["Age", "RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"]
# # 4.1.1 Correlation Analysis
train[num_cols].corr()
sns.heatmap(data=train[num_cols].corr(), annot=True, cmap="YlGnBu")
plt.show()
cat_cols = [
"PassengerId",
"HomePlanet",
"CryoSleep",
"Cabin",
"Destination",
"VIP",
"Name",
"Transported",
]
# # a. Transported vs Age
sns.catplot(data=data, x="Transported", y="Age", kind="bar", aspect=1.2)
train[["Transported", "Age"]].groupby(by="Transported").mean()
sns.catplot(data=data, x="Transported", y="Age", kind="bar", aspect=1.2)
# # b. Transported vs RoomService
sns.catplot(
data=data, x="HomePlanet", y="Age", hue="Transported", kind="violin", aspect=1.2
)
sns.catplot(
data=data, x="Transported", y="Age", hue="HomePlanet", kind="bar", aspect=1.2
)
sns.catplot(
data=data, x="Destination", y="Spa", hue="Cabin", col="HomePlanet", kind="bar"
)
sns.catplot(data=data, x="Transported", y="FoodCourt", kind="bar", aspect=1.2)
# # d. Transported vs ShoppingMall
sns.catplot(data=data, x="Transported", y="ShoppingMall", kind="bar", aspect=1.2)
# # e. Transported vs Spa
sns.catplot(data=data, x="Transported", y="Spa", kind="bar", aspect=1.2)
# # f. Transported vs VRDeck
sns.catplot(data=data, x="Transported", y="VRDeck", kind="bar", aspect=1.2)
|
# ### Recently I published a self help book titled Inspiration: Thoughts on Spirituality, Technology, Wealth, Leadership and Motivation. The preview of the book can be read from the Amazon link https://lnkd.in/gj7bMQA
# Some years back I went for a trip to Kodaikanal in Tamil Nadu.There we had a Fruit Milk Shake which the locals refered to as Butter Fruit.The fruit indeed tasted like Butter.The memory of the fruit remained fresh in my Memory.After many years I found out this fruit are called as Avacado.In this kernel we will be exploring and vizualizaing the dataset and try to Forecast the price of Avacados.In this notebook we will be covering following topics
# 1.Data Import and Preprocessing
# 2.Exploratory Data Analysis
# 3.Facebook Prophet USA Price Forecast
# 4.Facebook Prophet California Price Forecast
# 5.Conclusion
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
|
# # Spam tweets detection
# The provided dataset consists of tweets, and the task is to define which of them are spam tweets.
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
from wordcloud import WordCloud
import nltk
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, roc_auc_score
# First of all, let us load the train and test datasets and see what data is at our disposal.
train_data = pd.read_csv(
"/kaggle/input/utkmls-twitter-spam-detection-competition/train.csv"
)
test_data = pd.read_csv(
"/kaggle/input/utkmls-twitter-spam-detection-competition/test.csv"
)
train_data.head()
test_data.head()
# Train and test datasets contain tweets themselves, number of accounts being followed and followers of a twitter account that posted the tweet, number of actions (total number of favorites, replies, and retweets), indication whether each post is a retweet and location of the user. As it is mentioned in the dataset [description](https://www.kaggle.com/competitions/utkmls-twitter-spam-detection-competition/data), these locations are not standardized, so they should be handled carefully.
# Train dataset also has 'Type' column with indication whether tweet is spam or quality one, and 'Unnamed' column.
# I will remove that 'unnamed' column from train dataset as it seems it was added by mistake and does not provide any information. Besides we need to convert string values in 'Type' column into digits for future processing. So let us create boolean 'IsSpam' column, with "1" score for spam tweets and "0" score for quality tweets.
train_data.drop("Unnamed: 7", axis=1, inplace=True)
train_data["IsSpam"] = np.where(train_data["Type"] == "Spam", 1, 0)
train_data.info()
train_data.describe()
print(
"Percentage of quality tweets: {:.4f}%".format(
sum(train_data["IsSpam"] == 0) / len(train_data["IsSpam"])
)
)
print(
"Percentage of spam tweets: {:.4f}%".format(
sum(train_data["IsSpam"] == 1) / len(train_data["IsSpam"])
)
)
# The train dataset is well-balanced, the distribution of spam and quality tweets are 50% and 50%, that will facilitate the training process.
# Let us also look at the test dataset to see if there are any oddities in the data:
test_data.info()
test_data.describe()
# Next, we can derive some additional data about tweets:
# - length of a tweet (number of characters);
# - used hashtags and their quantity,
# - used mentions of other accounts and their quantity,
# - indicated numbers and their length;
# - indicated links.
# There are some assumptions behind these indicators. Regarding the length of a tweet, my guess is that spam tweets tend to be shorter, as they are basicaly a random combination of words that make no sense. Instead, real users try to give as much information as they can fit into twitter's limit. Usage of an explisit amount of hashtags and mentions could also be a sign that it is a spam tweet. Numbers are widely used in scam schemes, and as one can expect, spam messages often provide information that way: "1,000,000 USD", while in quality messages it is more common to indicate short "1 mln USD". Besides, usage of specific links could potentially be an indicator of spam, but in this case investigation regarding domain names should be conducted.
# Nevertheless, these assumptions should be proved or rejected, and it is the next task to do.
# Define length of a tweet (number of characters)
def tweet_len(df):
df["TweetLen"] = df["Tweet"].str.len()
return df
# Extract used hashtags from a tweet
def extract_hashtags(df):
df["Hashtags"] = df["Tweet"].str.findall(r"\#\w+\b")
df["HashtagsCount"] = df["Hashtags"].str.len()
df["Hashtags"] = df["Hashtags"].str.join(" ")
return df
# Extract used mentions from a tweet
def extract_mentions(df):
df["Mentions"] = df["Tweet"].str.findall(r"\@\w+\b")
df["MentionsCount"] = df["Mentions"].str.len()
df["Mentions"] = df["Mentions"].str.join(" ")
return df
# Extract used digits from a tweet
def extract_digits(df):
df["Digits"] = df["Tweet"].str.findall(r"\d+")
df["DigitsCount"] = df["Digits"].str.len()
df["Digits"] = df["Digits"].str.join(" ")
df["DigitsLen"] = df["Digits"].str.len()
# Extract used links from a tweet
def extract_links(df):
df["Links"] = df["Tweet"].str.findall(
r"((?:(?:\w+)\.twitter\.com\/(?:\w+|)|https?://)(?:\w|\.|\/|\-|\#|\=|\?|\_|\&|\%)+\b)"
)
df["LinksCount"] = df["Links"].str.len()
df["Links"] = df["Links"].str.join(" ")
df["DomainNames"] = df["Links"].str.findall(r"\w+\.\w+(?:\.\w+|)")
df["DomainNames"] = df["DomainNames"].str.join(" ")
# Function to replace missing values
def replace_NaN(df, value):
df.fillna(value=value, inplace=True)
return df
datasets = [train_data, test_data]
for df in datasets:
tweet_len(df)
extract_hashtags(df)
extract_mentions(df)
extract_digits(df)
extract_links(df)
replace_NaN(df, value=-1)
# Let us look at what we have created so far.
train_data[
[
"Tweet",
"TweetLen",
"Hashtags",
"HashtagsCount",
"Mentions",
"MentionsCount",
"Digits",
"DigitsCount",
"DigitsLen",
"Links",
"LinksCount",
"DomainNames",
]
].head(5)
test_data[
[
"Tweet",
"TweetLen",
"Hashtags",
"HashtagsCount",
"Mentions",
"MentionsCount",
"Digits",
"DigitsCount",
"DigitsLen",
"Links",
"LinksCount",
"DomainNames",
]
].head(5)
# The correlation matrix may provide some insights of how different features depend (or not depend) on each other, and whether there is any dependency between those features and the type of a tweet.
plt.figure(figsize=(15, 8))
sns.heatmap(
train_data[
[
"following",
"followers",
"actions",
"is_retweet",
"TweetLen",
"HashtagsCount",
"MentionsCount",
"DigitsCount",
"DigitsLen",
"LinksCount",
"IsSpam",
]
].corr(),
annot=True,
fmt=".2f",
).set_title("Correlation matrix between features of tweets and their quality")
# Judging by the correlation matrix above, if a tweet is characterized by a lot of actions, or if it is a retweet, there are more chances that this (re)tweet is spam.
tweets = train_data[train_data["is_retweet"] == 0].copy()
retweets = train_data[train_data["is_retweet"] == 1].copy()
print("Number of tweets: {}".format(len(tweets)))
print("Number of retweets: {}".format(len(retweets)))
print("Number of spam retweets: {}".format(sum(retweets["IsSpam"] == 1)))
print("Number of quality retweets: {}".format(sum(retweets["IsSpam"] == 0)))
retweets_stat = pd.Series(
data={
"Tweets": len(tweets),
"ReTweets": len(retweets),
"Spam ReTweets": sum(retweets["IsSpam"] == 1),
"Quality ReTweets": sum(retweets["IsSpam"] == 0),
}
)
sns.barplot(x=retweets_stat.index, y=retweets_stat)
# As it can be seen from the figure above, most of the retweets were marked as spam in the provided train dataset. But it is also important to consider that the total number of retweets is approximately three times less than the number of tweets.
spam_train = train_data[train_data["IsSpam"] == 1].copy()
quality_train = train_data[train_data["IsSpam"] == 0].copy()
cols = ["HashtagsCount", "MentionsCount", "LinksCount"]
print("Quality tweets:")
for col in cols:
print(
"Average number of {} is {:.2f}".format(
col.replace("Count", ""), quality_train[col].mean()
)
)
print("\nSpam tweets:")
for col in cols:
print(
"Average number of {} is {:.2f}".format(
col.replace("Count", ""), spam_train[col].mean()
)
)
# It seems that there are no significant differences in hashtags, mentions and links usage in quality or spam tweets. Nevertheless, spam tweets tend to have slightly more hashtags and links that the quality ones.
def most_frequent(df, vocab):
words = []
for data in df[vocab]:
words.extend(str(data).lower().split())
fdist = nltk.FreqDist(words)
most_frequent = []
# Used different frequency thresholds for Tweets, Hashtags, Mentions, Domain Names
# because apparently Hashtags, Mentions, Domain Names are used less frequent due to their function
if vocab == "Tweet":
most_frequent = [
w for w in fdist if w.isalpha() and len(w) > 5 and fdist[w] > 50
]
else:
most_frequent = [w for w in fdist if fdist[w] > 15]
frequency = []
for word in most_frequent:
frequency.append(fdist[word])
sns.barplot(x=frequency, y=most_frequent)
return
most_frequent(quality_train, vocab="Tweet")
most_frequent(spam_train, vocab="Tweet")
# Judging by the most frequent words used in spam tweets, the main topics are news on politics and crime. For the quality tweets, it is more difficult to determine the discussion topics; it can be seen that some of them relate to day-to-day life (e.g. birthday congratulations, gratitude messages, school discussions).
most_frequent(quality_train, vocab="Hashtags")
most_frequent(spam_train, vocab="Hashtags")
# Most frequent hashtags in quality tweets turned out to be connected with k-pop music, whilst most frequent hashtags in spam tweets again relate to news.
most_frequent(quality_train, vocab="Mentions")
most_frequent(spam_train, vocab="Mentions")
# In quality tweets the most frequent mentions are of youtube and a k-pop singer (it seems this dataset contains a lot of tweets from k-pop fans 😉). The political figures are in turn frequently mentioned in spam tweets.
most_frequent(quality_train, vocab="DomainNames")
most_frequent(spam_train, vocab="DomainNames")
# That is an interesting finding - quality tweets have a great variety of websites being shared, whilst the most frequent domain in spam tweets is "t.co". Let us check how many instances of this website is in quality dataset:
print(
'Out of {} quality tweets, {} tweets contain the "t.co" domain'.format(
len(quality_train["DomainNames"]),
len(quality_train[quality_train["DomainNames"].str.contains("t.co")]),
)
)
# My thought regarding it, "t.co" is a twitter domain and not a sign of a spam message itself, but spam tweets are quite monotonous in website use.
# Below are wordclouds - visualisations of most frequent words used in the tweets and most frequent locations indicated in twitter accounts in the provided train dataset.
def show_wordcloud(df, vocab):
words = []
for text in df[vocab]:
word = str(text).lower().split()
words.extend(word)
all_words = " ".join(words)
# Remove links
all_words = re.sub("https?://(\w|\d|\.|\/|\-|\#|\=|\?|\_|\&|\%)+\s", "", all_words)
all_words = re.sub("(?:(?:\w+)\.twitter\.com\/(?:\w+|))", "", all_words)
wordcloud = WordCloud(colormap="plasma").generate(all_words)
plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
return
show_wordcloud(df=quality_train, vocab="Tweet")
show_wordcloud(df=spam_train, vocab="Tweet")
show_wordcloud(df=quality_train, vocab="location")
show_wordcloud(df=spam_train, vocab="location")
# I am also interested at what discussion topics can be derived from the provided quality and spam tweets, let us find that out by using Latent Dirichlet Allocation:
def get_discussion_topics(df, topics_quant):
"""Function determines main discussion topics.
Variables:
df - dataset,
topics_quant - desired number of topics to be found"""
words = []
for data in df["Tweet"]:
words.extend(str(data).lower().split())
exclude_words = stopwords.words("english")
words = [w for w in words if w.isalpha() and len(w) > 5 and w not in exclude_words]
tf_idf_vectorizer = TfidfVectorizer()
tf_idf_arr = tf_idf_vectorizer.fit_transform(words)
vocab_tf_idf = tf_idf_vectorizer.get_feature_names_out()
lda_model = LatentDirichletAllocation(
n_components=topics_quant, max_iter=20, random_state=12
)
X_topics = lda_model.fit_transform(tf_idf_arr)
topic_words = lda_model.components_
for i, topic_dist in enumerate(topic_words):
sorted_topic_dist = np.argsort(topic_dist)
detected_topics = np.array(vocab_tf_idf)[sorted_topic_dist]
detected_topics = detected_topics[:-11:-1]
print("Detected tweets' topic {}:".format(i + 1))
print(detected_topics)
get_discussion_topics(df=quality_train, topics_quant=5)
get_discussion_topics(df=spam_train, topics_quant=5)
# As within the analysis of the most frequent words in tweets, provided above, it looks like spam tweets are mainly focused on politics topics, whilst quality tweets are more neutral and focused on variety of topics such as everyday life, holidays congratulations, sharing of impressions, etc.
# Now as the additional information has been extracted from the provided datasets and analysed, we may proceed with spam detection modelling.
def apply_models(model, X_train, X_test, y_train, y_test):
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
y_score = model.predict_proba(X_test)[:, 1]
target_names = ["Spam", "Quality"]
accuracy = accuracy_score(y_test, y_pred)
roc_auc = roc_auc_score(y_test, y_score)
print("Model: {}".format(model))
print("Accuracy score is %0.4f" % accuracy)
print("ROC-AUC score is %0.4f" % roc_auc)
# I am going to try the simpliest approach and see if the indicators, derived from the initial dataset, can be useful at predicting whether the tweet is a spam tweet.
features = [
"following",
"followers",
"actions",
"is_retweet",
"TweetLen",
"HashtagsCount",
"MentionsCount",
"DigitsCount",
"DigitsLen",
"LinksCount",
]
X_train_t, X_test_t, y_train_t, y_test_t = train_test_split(
train_data[features], train_data["IsSpam"], random_state=42
)
models = [
GaussianNB(),
LogisticRegression(max_iter=1000),
SVC(probability=True),
RandomForestClassifier(),
]
for model in models:
apply_models(model, X_train_t, X_test_t, y_train_t, y_test_t)
# It seems that random forest classifier shows the best results, so I will apply it in my submission file...
rf = RandomForestClassifier()
rf.fit(train_data[features], train_data["IsSpam"])
y_pred = rf.predict(test_data[features])
|
from transformers import AutoTokenizer
from torch.utils.data import Dataset
import pandas as pd
import numpy as np
import warnings
import os
warnings.simplefilter("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
class PretrainingDataset(Dataset):
def __init__(self, texts, tokenizer, texts_pair=None, max_length=512):
super().__init__()
self.texts = texts
self.texts_pair = texts_pair
self.tokenizer = tokenizer
self.max_length = max_length
if self.texts_pair is not None:
assert len(self.texts) == len(self.texts_pair)
def __len__(self):
return len(self.texts)
def tokenize(self, text, text_pair=None):
return self.tokenizer(
text=text,
text_pair=text_pair,
max_length=self.max_length,
truncation=True,
padding=False,
return_attention_mask=True,
add_special_tokens=True,
return_special_tokens_mask=True,
return_token_type_ids=False,
return_offsets_mapping=False,
return_tensors=None,
)
def __getitem__(self, index):
text = self.texts[index]
text_pair = None
if self.texts_pair is not None:
text_pair = self.texts_pair[index]
tokenized = self.tokenize(text)
return tokenized
data_path = "/kaggle/input/feedback-prize-english-language-learning/train.csv"
data = pd.read_csv(data_path)
texts = data["full_text"].values
model_name_or_path = "microsoft/deberta-v3-base"
max_length = 512
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
dataset = PretrainingDataset(
texts=texts,
tokenizer=tokenizer,
max_length=max_length,
)
import sys
sys.path.append("/kaggle/input/pretraining/pretraining-main/src")
from transformers import (
AutoModelForMaskedLM,
TrainingArguments,
Trainer,
DataCollatorForLanguageModeling,
)
model = AutoModelForMaskedLM.from_pretrained(model_name_or_path)
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=True,
mlm_probability=0.15,
)
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.checkpoint import checkpoint
from transformers import AutoModel, AutoConfig
from torchmetrics import functional as metrics
from pytorch_lightning import LightningModule, Trainer
from pretraining.data_collators import MaskedLanguageModelingDataCollator
import math
import os
class MaskedLanguageModelingModel(LightningModule):
def __init__(
self,
model_name_or_path,
tokenizer,
config=None,
ignore_index=-100,
gradient_checkpointing=False,
):
super().__init__()
self.ignore_index = ignore_index
self.config = config
self.token_embeddings_size = len(tokenizer)
if self.config is None:
self.config = AutoConfig.from_pretrained(model_name_or_path)
self.config.output_hidden_states = True
self.backbone = AutoModel.from_pretrained(
model_name_or_path, config=self.config
)
self.backbone.resize_token_embeddings(self.token_embeddings_size)
self.head = nn.Linear(
in_features=self.config.hidden_size, out_features=self.token_embeddings_size
)
if gradient_checkpointing:
self.backbone.gradient_checkpointing_enable()
print(f"Gradient Checkpointing: {self.backbone.is_gradient_checkpointing}")
self.save_hyperparameters()
def forward(self, input_ids, attention_mask=None, **kwargs):
backbone_outputs = self.backbone(
input_ids=input_ids,
attention_mask=attention_mask,
**kwargs,
)
hidden_states = backbone_outputs.hidden_states
hidden_state = hidden_states[-1]
features = hidden_state[:, 0, :]
outputs = self.head(features)
return outputs
def training_step(self, batch, batch_index):
input_ids = batch["input_ids"].to(torch.int32)
attention_mask = batch["attention_mask"].to(torch.int32)
labels = batch["labels"].to(torch.float16)
outputs = self(input_ids=input_ids, attention_mask=attention_mask)
loss = F.cross_entropy(
input=outputs, target=labels, ignore_index=self.ignore_index
)
perplexity = math.exp(loss)
# accuracy
predictions = torch.softmax(outputs, dim=-1)
accuracy = self.compute_accuracy(predictions, labels)
logs = {
"train/loss": loss,
"train/perplexity": perplexity,
"train/accuracy": accuracy,
}
self.log_dict(logs, prog_bar=False, on_step=True, on_epoch=True)
return loss
def validation_step(self, batch, batch_index):
input_ids = batch["input_ids"].to(torch.int32)
attention_mask = batch["attention_mask"].to(torch.int32)
labels = batch["labels"].to(torch.float16)
outputs = self(input_ids=input_ids, attention_mask=attention_mask)
return {
"outputs": outputs,
"labels": labels,
}
def validation_epoch_end(self, validation_outputs):
outputs = torch.cat([output["outputs"] for output in validation_outputs], dim=0)
labels = torch.cat([output["labels"] for output in validation_outputs], dim=0)
loss = F.cross_entropy(
input=outputs, target=labels, ignore_index=self.ignore_index
)
perplexity = math.exp(loss)
# accuracy
predictions = torch.softmax(outputs, dim=-1)
accuracy = self.compute_accuracy(predictions, labels)
logs = {
"validation/loss": loss,
"validation/perplexity": perplexity,
"validation/accuracy": accuracy,
}
self.log_dict(logs, prog_bar=False, on_step=False, on_epoch=True)
def predict_step(self, batch, batch_index):
input_ids = batch["input_ids"].to(torch.int32)
attention_mask = batch["attention_mask"].to(torch.int32)
outputs = self(input_ids=input_ids, attention_mask=attention_mask)
return outputs
def compute_accuracy(self, predictions, labels):
predictions = predictions.view(-1)
labels = labels.view(-1)
mask = labels != self.ignore_index
predictions, labels = predictions[mask], labels[mask]
accuracy = metrics.accuracy(predictions, labels)
return accuracy
data_collator = MaskedLanguageModelingDataCollator(
input_key="input_ids",
label_key="label",
tokenizer=tokenizer,
special_tokens_mask_key="special_tokens_mask",
masking_probability=0.15,
padding_keys=["input_ids", "attention_mask", "special_tokens_mask"],
padding_values=[tokenizer.pad_token_id, 1, 1],
)
dataloader = DataLoader(
dataset=dataset,
collate_fn=data_collator,
)
model = MaskedLanguageModelingModel(
model_name_or_path=model_name_or_path,
tokenizer=tokenizer,
gradient_checkpointing=False,
)
# trainer = Trainer(...)
# trainer.fit(model=model, train_dataloaders=[dataloader], ckpt_path=None)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
leagues = {
"bundesliga": "Bundesliga",
"laliga": "Laliga",
"ligue1": "Ligue 1",
"premier_league": "Premier League",
"seriea": "Serie A",
}
footballer_info, player_stat, goalkeeper_stat = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for dirname, _, filenames in os.walk("/kaggle/input"):
fi_delim = len("footballer_info") + 1
ps_delim = len("player_stats") + 1
for filename in filenames:
file_dir = os.path.join(dirname, filename)
print(file_dir)
if dirname.find("eu-football-transfer-price") > 0:
table = pd.read_csv(file_dir, encoding="utf-16", sep="\t", header=0).assign(
League=leagues[filename[fi_delim:-4]]
)
footballer_info = footballer_info.append(table, ignore_index=True)
elif filename.find("_gks") > 0:
table = pd.read_csv(file_dir, encoding="utf-16", sep="\t", header=0).assign(
League=leagues[filename[ps_delim:-8]]
)
goalkeeper_stat = goalkeeper_stat.append(table, ignore_index=True)
else:
table = pd.read_csv(file_dir, encoding="utf-16", sep="\t", header=0).assign(
League=leagues[filename[ps_delim:-4]]
)
player_stat = player_stat.append(table, ignore_index=True)
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# a glance of footballer_info data
footballer_info.head(30)
# a glance of player_stat data
player_stat
# a glance of goalkeeper_stat data
goalkeeper_stat
footballer_info["Current Value"] = footballer_info.apply(
lambda row: float(row["Current Value"][1:-1]) / 1000
if row["Current Value"][-1] == "k"
else float(row["Current Value"][1:-1]),
axis=1,
)
footballer_info.head(30)
for league in leagues.values():
print(
f"{league}: {footballer_info[footballer_info['League'] == league].shape} - {player_stat[player_stat['League'] == league].shape} - {goalkeeper_stat[goalkeeper_stat['League'] == league].shape}"
)
for league in leagues.values():
fi_position = set(footballer_info[footballer_info["League"] == league]["Position"])
ps_position = set(player_stat[player_stat["League"] == league]["Position"])
print(
f"""{league}:
footballer_info only : {list(fi_position - ps_position)}
player_stat only : {list(ps_position - fi_position)}
intersection : {len(fi_position.intersection(ps_position))} elements
"""
)
for league in leagues.values():
fi_club = set(footballer_info[footballer_info["League"] == league]["Club"])
ps_club = set(player_stat[player_stat["League"] == league]["Club"])
gk_club = set(goalkeeper_stat[goalkeeper_stat["League"] == league]["Club"])
print(
f"""{league}:
footballer_info - player_stat : {list(fi_club - ps_club)}
player_stat - footballer_info : {list(ps_club - fi_club)}
goalkeeper_stat - footballer_info : {list(gk_club - fi_club)}
intersection : {len(fi_club.intersection(ps_club))} elements
"""
)
for_2_clubs_name = list(player_stat[player_stat["Club"] == "for 2 clubs"]["Name"])
latest_club = footballer_info[footballer_info["Name"].isin(for_2_clubs_name)][
["Name", "Club"]
]
print(f"""{len(latest_club)} / {len(for_2_clubs_name)} players""")
latest_club
m = latest_club.set_index("Name")["Club"]
player_stat["Club"] = player_stat["Name"].map(m).fillna(player_stat["Club"])
player_stat.loc[
player_stat["Name"].isin(latest_club["Name"]), ["Name", "Club"]
].sort_values(by=["Name"]).reset_index(drop=True).equals(
latest_club.sort_values(by=["Name"]).reset_index(drop=True)
)
column_join = footballer_info.columns.intersection(player_stat.columns)
football_stats = pd.merge(footballer_info, player_stat, on=list(column_join))
football_stats[football_stats["Nation"] != football_stats["Nationality"]]
football_stats.drop("Nationality", axis=1, inplace=True)
column_join = footballer_info.columns.intersection(goalkeeper_stat.columns)
goalkeeper_stats = pd.merge(footballer_info, goalkeeper_stat, on=list(column_join))
goalkeeper_stats.drop("Nationality", axis=1, inplace=True)
football_stats.to_csv("player_statistics.csv", encoding="utf-16", sep="\t", index=False)
goalkeeper_stats.to_csv(
"goalkeeper_statistics.csv", encoding="utf-16", sep="\t", index=False
)
# # Perform Feature Engineering
# You might be presented with hundreds or thousands of features without even a description to go by. Where do you even begin?
# A great first step is to construct a ranking with a feature utility metric, a function measuring associations between a feature and the target. Then you can choose a smaller set of the most useful features to develop initially and have more confidence that your time will be well spent.
# ## Mutual Information
# Encode string columns
le = LabelEncoder()
for col in ["Position", "Club", "Nation", "League"]:
football_stats[col] = le.fit_transform(football_stats[col].astype(str))
goalkeeper_stats[col] = le.fit_transform(goalkeeper_stats[col].astype(str))
# Feature engineering for football stats
def create_features(df):
# Create a feature for player age groups
df["Age_Group"] = pd.cut(
df["Age"], bins=[17, 24, 29, 34, 40], labels=["18-24", "25-29", "30-34", "35+"]
)
df["Age_Group"] = le.fit_transform(df["Age_Group"].astype(str))
# Create a feature for player experience
df["Experience"] = pd.cut(
df["Minutes Played"],
bins=[0, 500, 1000, 1500, 2000, np.inf],
labels=["0-500", "500-1000", "1000-1500", "1500-2000", "2000+"],
)
df["Experience"] = le.fit_transform(df["Experience"].astype(str))
# Create a feature for player performance
df["Appearances_Group"] = pd.cut(
df["Appearances"],
bins=[0, 10, 20, 30, 40, 50, np.inf],
labels=["0-10", "10-20", "20-30", "30-40", "40-50", "50+"],
)
df["Appearances_Group"] = le.fit_transform(df["Appearances_Group"].astype(str))
df["Assists_Group"] = pd.cut(
df["Assists"],
bins=[0, 2, 4, 6, 8, np.inf],
labels=["0-2", "2-4", "4-6", "6-8", "8+"],
)
df["Assists_Group"] = le.fit_transform(df["Assists_Group"].astype(str))
df["Penalty_Kicks_Group"] = pd.cut(
df["Penalty Kicks"],
bins=[0, 2, 4, 6, 8, np.inf],
labels=["0-2", "2-4", "4-6", "6-8", "8+"],
)
df["Penalty_Kicks_Group"] = le.fit_transform(df["Penalty_Kicks_Group"].astype(str))
df["Goals_Per_Match_Group"] = pd.cut(
df["Goals Per Match"],
bins=[0, 0.2, 0.4, 0.6, 0.8, 1.0, np.inf],
labels=["0-0.2", "0.2-0.4", "0.4-0.6", "0.6-0.8", "0.8-1.0", "1.0+"],
)
df["Goals_Per_Match_Group"] = le.fit_transform(
df["Goals_Per_Match_Group"].astype(str)
)
df["Minutes_Per_Goal_Group"] = pd.cut(
df["Minutes Per Goal"],
bins=[0, 30, 60, 90, 120, np.inf],
labels=["0-30", "30-60", "60-90", "90-120", "120+"],
)
df["Minutes_Per_Goal_Group"] = le.fit_transform(
df["Minutes_Per_Goal_Group"].astype(str)
)
# Create dummy variables for player position
df = pd.concat([df, pd.get_dummies(df["Position"], prefix="Position")], axis=1)
df[
[
"Position_0",
"Position_1",
"Position_2",
"Position_3",
"Position_4",
"Position_5",
"Position_6",
"Position_7",
"Position_8",
"Position_9",
"Position_10",
"Position_11",
]
] = df[
[
"Position_0",
"Position_1",
"Position_2",
"Position_3",
"Position_4",
"Position_5",
"Position_6",
"Position_7",
"Position_8",
"Position_9",
"Position_10",
"Position_11",
]
].astype(
"int64"
)
return df
football_stats = create_features(football_stats)
# Feature engineering for goalkeeper stats
def create_features_goalkeeper(df):
df["Age_Group"] = pd.cut(
df["Age"], bins=[17, 24, 29, 34, 40], labels=["18-24", "25-29", "30-34", "35+"]
)
df["Age_Group"] = le.fit_transform(df["Age_Group"].astype(str))
# Create a feature for goalkeeper experience
df["Experience"] = pd.cut(
df["Minutes Played"],
bins=[0, 500, 1000, 1500, 2000, np.inf],
labels=["0-500", "500-1000", "1000-1500", "1500-2000", "2000+"],
)
df["Experience"] = le.fit_transform(df["Experience"].astype(str))
# Create a feature for goalkeeper performance
df["Clean_Sheets_Group"] = pd.cut(
df["Clean Sheets"],
bins=[0, 5, 10, 15, 20, np.inf],
labels=["0-5", "5-10", "10-15", "15-20", "20+"],
)
df["Clean_Sheets_Group"] = le.fit_transform(df["Clean_Sheets_Group"].astype(str))
df["Minutes_Per_Goal_Against_Group"] = pd.cut(
df["Minutes Per Goal Against"],
bins=[0, 30, 60, 90, 120, np.inf],
labels=["0-30", "30-60", "60-90", "90-120", "120+"],
)
df["Minutes_Per_Goal_Against_Group"] = le.fit_transform(
df["Minutes_Per_Goal_Against_Group"].astype(str)
)
df["Goals_Conceded_Group"] = pd.cut(
df["Goal Conceded"],
bins=[0, 10, 20, 30, np.inf],
labels=["0-10", "10-20", "20-30", "30+"],
)
df["Goals_Conceded_Group"] = le.fit_transform(
df["Goals_Conceded_Group"].astype(str)
)
df["Percentage"] = pd.cut(
df["Percentage"],
bins=[0.0, 25.0, 50.0, 100.0, np.inf],
labels=["0.0-25.0", "25.0-50.0", "50.0-75.0", "75.0-100.0"],
)
df["Percentage"] = le.fit_transform(df["Percentage"].astype(str))
return df
goalkeeper_stats = create_features_goalkeeper(goalkeeper_stats)
print(football_stats.columns)
from sklearn.preprocessing import StandardScaler
# Scale the features using the standard scaler
# Select the numerical columns to be scaled
numerical_cols = [
"Position",
"Club",
"Nation",
"Age",
"Current Value",
"League",
"Appearances",
"Assists",
"Penalty Kicks",
"Minutes Played",
"Minutes Per Goal",
"Goals Per Match",
"Goals",
"Age_Group",
"Experience",
"Appearances_Group",
"Assists_Group",
"Penalty_Kicks_Group",
"Goals_Per_Match_Group",
"Minutes_Per_Goal_Group",
"Position_0",
"Position_1",
"Position_2",
"Position_3",
"Position_4",
"Position_5",
"Position_6",
"Position_7",
"Position_8",
"Position_9",
"Position_10",
"Position_11",
"Position_0",
"Position_1",
"Position_2",
"Position_3",
"Position_4",
"Position_5",
"Position_6",
"Position_7",
"Position_8",
"Position_9",
"Position_10",
"Position_11",
"Position_0",
"Position_1",
"Position_2",
"Position_3",
"Position_4",
"Position_5",
"Position_6",
"Position_7",
"Position_8",
"Position_9",
"Position_10",
"Position_11",
"Position_0",
"Position_1",
"Position_2",
"Position_3",
"Position_4",
"Position_5",
"Position_6",
"Position_7",
"Position_8",
"Position_9",
"Position_10",
"Position_11",
]
# Scale the numerical columns
scaler = StandardScaler()
football_stats[numerical_cols] = scaler.fit_transform(football_stats[numerical_cols])
# Select the numerical columns to be scaled
numerical_cols_1 = [
"Age",
"Current Value",
"Appearances",
"Clean Sheets",
"Goal Conceded",
"Minutes Played",
"Minutes Per Goal Against",
]
# Scale the numerical columns
scaler = StandardScaler()
goalkeeper_stats[numerical_cols_1] = scaler.fit_transform(
goalkeeper_stats[numerical_cols_1]
)
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
X = football_stats.copy()
y = X.pop("Current Value")
X_1 = goalkeeper_stats.copy()
y_1 = X_1.pop("Current Value")
# Drop unique column 'Name'
X.pop("Name")
X_1.pop("Name")
# Label encoding for categoricals
for colname in X.select_dtypes("object"):
X[colname], _ = X[colname].factorize()
for colname in X_1.select_dtypes("object"):
X_1[colname], _ = X_1[colname].factorize()
# All discrete features should now have integer dtypes (double-check this before using MI!)
discrete_features = X.dtypes == int
discrete_features_1 = X_1.dtypes == int
print(discrete_features)
print(discrete_features_1)
from sklearn.feature_selection import mutual_info_regression
# Calculate mutual information
mi = mutual_info_regression(X, y, discrete_features=discrete_features)
# Create a dataframe with feature names and mutual information values
feature_importance = pd.DataFrame({"Feature": X.columns, "Importance": mi})
# Sort the features based on their importance
feature_importance = feature_importance.sort_values(by="Importance", ascending=False)
print(feature_importance[:9])
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import StandardScaler
# Select the top 4 features
top_features = feature_importance["Feature"][:9].values
X_top = X[top_features]
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X_top, y, test_size=0.2, random_state=0
)
# Scale the features using the standard scaler
# Select the numerical columns to be scaled
numerical_cols = [
"Position",
"Club",
"Nation",
"Age",
"Current Value",
"League",
"Appearances",
"Assists",
"Penalty Kicks",
"Minutes Played",
"Minutes Per Goal",
"Goals Per Match",
"Goals",
"Age_Group",
"Experience",
"Appearances_Group",
"Assists_Group",
"Penalty_Kicks_Group",
"Goals_Per_Match_Group",
"Minutes_Per_Goal_Group",
"Position_0",
"Position_1",
"Position_2",
"Position_3",
"Position_4",
"Position_5",
"Position_6",
"Position_7",
"Position_8",
"Position_9",
"Position_10",
"Position_11",
"Position_0",
"Position_1",
"Position_2",
"Position_3",
"Position_4",
"Position_5",
"Position_6",
"Position_7",
"Position_8",
"Position_9",
"Position_10",
"Position_11",
"Position_0",
"Position_1",
"Position_2",
"Position_3",
"Position_4",
"Position_5",
"Position_6",
"Position_7",
"Position_8",
"Position_9",
"Position_10",
"Position_11",
"Position_0",
"Position_1",
"Position_2",
"Position_3",
"Position_4",
"Position_5",
"Position_6",
"Position_7",
"Position_8",
"Position_9",
"Position_10",
"Position_11",
]
# Scale the numerical columns
scaler = StandardScaler()
football_stats[numerical_cols] = scaler.fit_transform(football_stats[numerical_cols])
# Select the numerical columns to be scaled
numerical_cols_1 = [
"Age",
"Current Value",
"Appearances",
"Clean Sheets",
"Goal Conceded",
"Minutes Played",
"Minutes Per Goal Against",
]
# Scale the numerical columns
scaler = StandardScaler()
goalkeeper_stats[numerical_cols_1] = scaler.fit_transform(
goalkeeper_stats[numerical_cols_1]
)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Define the hyperparameters to search over
params = {
"n_estimators": [100, 500, 1000],
"max_depth": [None, 5, 10],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [1, 2, 4],
}
# Build a Random Forest model
rf_model = RandomForestRegressor(random_state=0)
# Use GridSearchCV to search over the hyperparameters
grid = GridSearchCV(
rf_model, params, cv=20, scoring="neg_mean_squared_error", n_jobs=-1
)
grid.fit(X_train, y_train)
# Print the best hyperparameters and the corresponding mean squared error
print("Best hyperparameters:", grid.best_params_)
print("Best MSE:", -grid.best_score_)
# Evaluate the best model on the testing set
best_model = grid.best_estimator_
y_pred = best_model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print("Mean squared error:", mse)
print("R-squared score: {:.2f}%".format(r2 * 100))
import matplotlib.pyplot as plt
# Plot the predicted values against the true values
plt.scatter(y_test, y_pred, alpha=0.5)
plt.xlabel("True Values")
plt.ylabel("Predicted Values")
plt.show()
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
# Define the SVM model
svm_model = SVR()
# Define the hyperparameters to tune
param_grid = {
"C": [0.1, 1, 10],
"gamma": [0.1, 1, "scale"],
"kernel": ["linear", "rbf", "sigmoid"],
}
# Perform GridSearchCV to find the best hyperparameters
grid_search = GridSearchCV(
svm_model, param_grid, cv=20, scoring="neg_mean_squared_error"
)
grid_search.fit(X_train, y_train)
# Print the best hyperparameters and the corresponding MSE
print("Best hyperparameters:", grid_search.best_params_)
y_pred = grid_search.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
print("MSE:", mse)
# Plot the predicted values against the true values
plt.scatter(y_test, y_pred)
plt.xlabel("True Values")
plt.ylabel("Predictions")
plt.show()
r2 = r2_score(y_test, y_pred)
print("R-squared score: {:.2f}%".format(r2 * 100))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
# # EDA
print(f"Train data : {train.shape}")
print(f"Test data : {test.shape}")
train.describe()
print(train.isna().sum())
print(test.isna().sum())
sns.countplot(x=train["target"])
counts = train["target"].value_counts()
for i, count in enumerate(counts):
plt.text(i, count - 10, str(count), ha="center")
sns.pairplot(data=train, diag_kind="kde", hue="target")
from sklearn import preprocessing
scaler = preprocessing.StandardScaler()
train_df = scaler.fit_transform(train.iloc[:, 0:-1])
test_df = scaler.fit_transform(test)
train_df = pd.DataFrame(train_df, columns=train.iloc[:, 0:-1].columns)
train_df = train_df.join(train.iloc[:, -1])
train_df
test_df = pd.DataFrame(test_df, columns=test.iloc[:, 0:].columns)
test_df
train_df = train_df.iloc[:, 1:]
test_df = test_df.iloc[:, 1:]
X = train_df.iloc[:, 0:-1]
Y = train_df.iloc[:, -1].to_frame()
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 15))
for i, j in enumerate(list(train.columns)):
plt.subplot(4, 2, i + 1)
ax = sns.lineplot(y=train[j], x=train["target"])
# From the line chart we can see that, when the predictor variables increases the likelihood of having kidney stone increases. Except ph values where decrease in ph increases the likelihood of having kidney stone.
# **Uric acid stones:** Uric acid is produced when the body metabolizes protein. When the pH of urine drops **below 5.5**, urine becomes saturated with uric acid crystals, a condition known as hypercalciuria. When there is too much uric acid in the urine, stones can form.
# (https://www.hopkinsmedicine.org/health/conditions-and-diseases/kidney-stones#:~:text=Uric%20acid%20stones%3A%20Uric%20acid,the%20urine%2C%20stones%20can%20form.)
# For this classification problem we will try available classification models and then select one based on the confusion matrix and AUC-ROC Curve we will fine tune that model
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
X, Y, test_size=0.33, random_state=5
)
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
models = []
model_list = [
"Logistic_regression",
"SVC",
"KNeighborsClassifier",
"DecisionTreeClassifier",
"GaussianNB",
"RandomForestClassifier",
]
models.append(LogisticRegression(solver="liblinear"))
models.append(SVC())
models.append(KNeighborsClassifier())
models.append(DecisionTreeClassifier())
models.append(GaussianNB())
models.append(RandomForestClassifier())
from sklearn.model_selection import StratifiedKFold, cross_val_score
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
for model in models:
scores = cross_val_score(model, X, Y, cv=cv)
print("Average accuracy:", np.mean(scores))
m = pd.DataFrame()
score = []
for model in models:
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
score.append(accuracy_score(y_test, y_pred))
d = {model_list[i]: score[i] for i in range(len(model_list))}
m = pd.DataFrame(d.items(), columns=["model", "score"])
m.sort_values(by="score", ascending=False, inplace=True)
m.reset_index(drop=True, inplace=True)
m
# # ROC_AUC_SCORE
# Logistic Regression
from sklearn.metrics import roc_auc_score
lr = LogisticRegression(solver="liblinear")
lr.fit(x_train, y_train)
y_pred = lr.predict(x_test)
y_prob = lr.predict_proba(x_test)
print(roc_auc_score(y_test, y_prob[:, 1]))
final_pred = lr.predict(test_df)
final = pd.DataFrame({"id": test["id"], "target": final_pred})
final.to_csv("Logistic_reg.csv", index=False)
# Random forest classifier
rfc = RandomForestClassifier()
rfc.fit(x_train, y_train)
final_pred = rfc.predict(test_df)
final = pd.DataFrame({"id": test["id"], "target": final_pred})
final.to_csv("RFC.csv", index=False)
y_prob = rfc.predict_proba(x_test)
print(roc_auc_score(y_test, y_prob[:, 1]))
import xgboost as xgb
xgbc = xgb.XGBClassifier()
xgbc.fit(x_train, y_train)
final_pred = xgbc.predict(test_df)
final = pd.DataFrame({"id": test["id"], "target": final_pred})
final.to_csv("xgbc.csv", index=False)
y_prob = xgbc.predict_proba(x_test)
print(roc_auc_score(y_test, y_prob[:, 1]))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
print(888 > 887)
print(888 == 455)
print(9 < 1)
a = 155
b = 65
if b > a:
print("b büyüktür a")
else:
print("b büyük değildir a")
print(bool("Buongirno"))
print(bool(8))
x = "Buonne Notte"
y = 8
print(bool(x))
print(bool(y))
bool("dodo")
bool(4376537464357347)
bool(["ananas", "avakado", "hindistan cevizi"])
bool(False)
bool(None)
bool(0)
bool("")
bool(())
bool([])
bool({})
def myFunction():
return True
print(myFunction())
def myFunction():
return True
if myFunction():
print("NO!")
else:
print("YES!")
x = 32423432423
print(isinstance(x, int))
y = "spor yapmak sağlıklıdır"
print(isinstance(y, str))
print(76 > 45)
print(156 == 945)
print(89 < 56)
print(bool("dodo"))
print(bool(0))
print(23423423 + 234234243)
x = 65756565
y = 23423423
print(x + y)
x = 9876567876
y = 1234543222
print(x - y)
x = 589
y = 342
print(x * y)
x = 7600
y = 300
print(x / y)
x = 56
y = 33
print(x % y)
x = 9
y = 6
print(x**y)
x = 89
y = 23
print(x // y)
x = 100
x
x = 80
x += 20
print(x)
x = 89
x -= 34
print(x)
x = 900
x *= 200
print(x)
x = 50
x /= 25
print(x)
x = 2342342
x %= 5554
print(x)
x = 999999999
x //= 2
print(x)
x = 55
x **= 21
print(x)
x = 456
y = 234
print(x == y)
x = 456
y = 234
print(x != y)
x = 333
y = 222
print(x > y)
x = 333
y = 222
print(x < y)
x = 678
y = 567
print(x >= y)
x = 678
y = 567
print(x <= y)
x = 188
print(x > 90 and x < 211)
x = 78
print(x > 56 or x < 67)
x = 93
print(not (x > 3 and x < 100))
x = ["armut", "portakal"]
y = ["armut", "portakal"]
z = x
print(x is z)
print(x is y)
print(x == y)
x = ["apple", "banana"]
y = ["apple", "banana"]
z = x
print(x is not z)
print(x is not y)
print(x != y)
|
# # 利用逻辑回归机器学习预测血糖值
# 在采集的血糖数据集基础上,进行机器学习,并预测血糖是否超标 (>6.1)
# 前期血糖采集原理:采用透射式近红外光谱法,嵌入式微处理器分别驱动红光和红外光发射端照射人体指尖部位,光电接收端负责收集透射光,再通过信息处理模块进行信号的放大滤波和光电转换,通过朗伯比尔定律计算得到红光与红外光透射能量的比例值与初始血糖值;再通过能量代谢守恒法进行血糖测量值的修正,修正参数包括人体血氧饱和度、人体心率值、手指指尖体表温度值、环境温度值及体辐射能量值。由于还原性血红蛋白对红光的吸收强,但对红外光的吸收相对较弱;血红蛋白并带有氧分子的血红细胞对红光的吸收比较弱,对红外光的吸收比较强,因此原始光电容积脉搏波信号在上位机进行处理后,通过检测二者的差异,就能得到在不同波段下的光吸收差异,进而就能得到红光与红外光的透射能量比值,将这个比值进行最小二乘建模能得到血氧饱和度值,同时再通过数字温度传感器获得所处的环境问题、被测部位体表温度以及辐射能量;讲这些数据计算完并以表格(.CSV)形式保存,再结合购买的有创血糖仪所测得的标准血糖浓度一起进行数据分析(机器学习)。
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# 计划利用kaggle平台提供的python库、机器学习算法库(逻辑回归算法)以及数据图形显示库,对采集到数据集进行机器学习与预测,从而实现对血糖的持续无创监测。
# (1)利用panda来读取采集到的数据(csv格式);
# (2)利用sklearn中的逻辑回归算法进行数据分析;
# (3)利用seaborn和matplotlib进行分析结果的可视化展示;
# (4)利用joblib用于存储模型,用于预测用。
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
import joblib
diabetesDF = pd.read_csv("../input/hmdata2019-lyh/hm_data_2019_lyh.csv")
print(diabetesDF.head())
# 数据检查(如没有空值)
diabetesDF.info() # output shown below
# 首先查找每一对特征(和结果变量)的相关性,并使用热图可视化相关性.
corr = diabetesDF.corr()
print(corr)
sns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns)
# sns.pairplot(corr, vars = ['Glucose', 'BloodPressure'])
# 在上面的热图中,更亮的颜色表示相关性更强。从表格和热图可以看出,人体血氧饱和度、人体心率值、手指指尖体表温度值、环境温度值及体辐射能量值都与血糖高低有显著相关性。还要注意两个特征之间的相关性,比如血氧饱和度和心跳,或者血压和体表温度。
# ****# 数据集准备 (拆分与正规化)**
# 在使用机器学习算法时,我们应该把数据分成训练集和测试集。(如果我们正在运行的实验数量很大,那么我们应该将数据分为3部分,即训练集、开发集和测试集)。在本例中,我们还将分离出一些数据进行手动交叉检查。
# 数据集包括总共297项记录。为了训练我们的模型,我们将使用200条记录。我们将使用82条记录进行测试,最后15条记录交叉检查我们的模型。
dfTrain = diabetesDF[:200]
dfTest = diabetesDF[200:282]
dfCheck = diabetesDF[282:15]
# 接下来,我们对标签和特征列进行拆分与定义(对于训练和测试数据集)。除此之外,我们还将把它们转换成NumPy数组,因为我们的机器学习算法处理NumPy数组格式的数据。
trainLabel = np.asarray(dfTrain["DiabetesPredicted"])
trainData = np.asarray(dfTrain.drop(["Date", "Time", "DiabetesPredicted"], 1))
testLabel = np.asarray(dfTest["DiabetesPredicted"])
testData = np.asarray(dfTest.drop(["Date", "Time", "DiabetesPredicted"], 1))
# 作为使用机器学习之前的最后一步,我们将规范化我们的输入。机器学习模型通常受益于输入规范化。这也使我们更容易理解每个特征的重要性,稍后我们将在查看模型权重时。我们将标准化数据,使每个变量的平均值为0,标准偏差为1
means = np.mean(trainData, axis=0)
stds = np.std(trainData, axis=0)
trainData = (trainData - means) / stds
testData = (testData - means) / stds
# np.mean(trainData, axis=0) => check that new means equal 0
# np.std(trainData, axis=0) => check that new stds equal 1
# # 训练和评估机器学习模型
# 我们现在可以训练我们的分类模型。我们将使用一个叫做逻辑回归的机器简单学习模型。由于该模型在sklearn中很容易获得,所以训练过程非常简单,我们可以用几行代码来完成。首先,我们创建一个名为diabetesCheck的实例,然后使用fit函数来训练模型。
diabetesCheck = LogisticRegression()
diabetesCheck.fit(trainData, trainLabel)
# 接下来,我们将使用我们的测试数据来找出模型的准确性。
accuracy = diabetesCheck.score(testData, testLabel)
print("accuracy = ", accuracy * 100, "%")
# # 解释机器学习模型
# 为了更好地了解logistic回归模型内部的情况,我们可以可视化我们的模型如何使用不同的特征以及哪些特征具有更大的影响。
print(corr.columns[1:9])
coeff = list(diabetesCheck.coef_[0])
labels = list(corr.columns[1:10])
features = pd.DataFrame()
features["Features"] = labels
features["importance"] = coeff
features.sort_values(by=["importance"], ascending=True, inplace=True)
features["positive"] = features["importance"] > 0
features.set_index("Features", inplace=True)
features.importance.plot(
kind="barh",
figsize=(11, 6),
color=features.positive.map({True: "blue", False: "red"}),
)
plt.xlabel("Importance")
# # 保存模型
# 将训练后的模型保存起来用于后续血糖预测.
joblib.dump([diabetesCheck, means, stds], "diabeteseModel.pkl")
# 为了检查我们是否正确地保存了模型,我们将使用我们的测试数据来检查我们保存的模型的准确性(如果我们正确地保存了模型,我们应该观察到准确性没有变化)。
diabetesLoadedModel, means, stds = joblib.load("diabeteseModel.pkl")
accuracyModel = diabetesLoadedModel.score(testData, testLabel)
print("accuracy = ", accuracyModel * 100, "%")
# # 利用模型进行预测
# 现在,我们将使用未使用的数据来了解如何进行预测。首先检查未使用的数据.
print(dfCheck.head())
# 用第一条数据记录来做预测
sampleData = dfCheck[:1]
# prepare sample
sampleDataFeatures = np.asarray(
sampleData.drop(["Date", "Time", "DiabetesPredicted"], 1)
)
sampleDataFeatures = (sampleDataFeatures - means) / stds
# predict
predictionProbability = diabetesLoadedModel.predict_proba(sampleDataFeatures)
prediction = diabetesLoadedModel.predict(sampleDataFeatures)
print("Probability:", predictionProbability)
print("prediction:", prediction)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
X_train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
y_train = X_train[["SalePrice"]]
X_test = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/test.csv"
)
# It has been checked that there are no duplicated rows in the training and test sets. The next important step is to look at the null entries. The following code reveals that each row in the training set contains null values, but not all columns contain null values.
print(f"Number of rows in the trainins set is {X_train.shape[0]}.")
print(f"Number of rows with a null value {X_train.isnull().any(axis = 1).sum()}.")
print(f"Number of columns is {X_train.shape[1]}.")
print(f"Number of columns with a null value is {X_train.isnull().any(axis = 0).sum()}.")
X_train.loc[:, X_train.isnull().any(axis=0)].columns
# A few things to note.
# * The categorical features with null values are Alley, MasVnrType, BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1, BsmtFinType2, Electrical, FireplaceQu, GarageType, GarageFinish, GarageQual, GarageCond, PoolQC, Fence, MiscFeature.
# * The numerical features with null values are LotFrontage, MasVnrArea, GarageYrBlt.
# * Upon inspection the Alley column with null values simply means there is no alley access. Similarly for MasVnrType, BsmtQual,
# BsmtCond, BsmtFinType1, BsmtFinType2, FireplaceQu, GarageType, GarageFinish, GarageQual, GarageCond, PoolQC, Fence,
# * BsmtExposure: can have no exposure as No. Null means no basement.
# MiscFeature: Features not previously covered.
# * For the houses without basement, other related numerical features such as BsmtFullBath will be set to zero. Nevertheless, a house can have a basement and zero BsmtFullBath. These are demonstrated in the following:
# Show the total number of full basement baths across all houses without basements.
X_train.loc[X_train["BsmtQual"].isnull(), :]["BsmtFullBath"].sum()
# Show that there are houses with basements but no basement baths
X_train.loc[X_train["BsmtFullBath"] == 0, :]["BsmtQual"].head()
# It can be intimdating to analyze all features supplied in the dataset. Nevertheless, there are some features that may be redundant in that they are similar to other features, and some features that are so skewed that they may be adverse to modeling. In the next stage, we identify features to be dropped in the training set. The first obvious ones are Id and SalePrice. To avoid adding repeated columns in the process, we will be using the set structure to record columns to be dropped.
cols_to_drop = set(["Id", "SalePrice"])
# ## Dropping similar features
# We soon notice that there are similar features concerning the condition and quality of certain parts of the house. We first explore if it is necessary to keep both for external and overall conditions (qualities). The most straightforward way is to visualize the data set with statistics. We supply strip plots, and since decision trees are optimized towards vairance in the leaves, we plot the variances among categories of each feature. We also plot histograms in order to detect highly skewed features.
# from matplotlib import pyplot as plt
# import seaborn as sns
# # Aggregate
# OverallQual_hist = X_train.groupby(by = 'OverallQual')['OverallQual'].count()
# OverallQual_std = X_train.groupby(by = 'OverallQual')['SalePrice'].std()
# OverallCond_hist = X_train.groupby(by = 'OverallCond')['OverallCond'].count()
# OverallCond_std = X_train.groupby(by = 'OverallCond')['SalePrice'].std()
# # Make consisten x axis when plotting
# ExterQual_hist = X_train.groupby(by = 'ExterQual')['ExterQual'].count()[['Fa','TA','Gd','Ex']]
# ExterQual_std = X_train.groupby(by = 'ExterQual')['SalePrice'].std()[['Fa','TA','Gd','Ex']]
# ExterCond_hist = X_train.groupby(by = 'ExterCond')['ExterCond'].count()[['Po','Fa','TA','Gd','Ex']]
# ExterCond_std = X_train.groupby(by = 'ExterCond')['SalePrice'].std()[['Po','Fa','TA','Gd','Ex']]
# fig = plt.figure(constrained_layout = True , figsize = (12,10))
# # Create rows of subfigs so that they share the same title
# subfigs = fig.subfigures(nrows = 4, ncols = 1)
# subfigs[0].suptitle("OverallCond Analytics")
# ax = subfigs[0].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['OverallCond', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'OverallCond', y = 'SalePrice',
# order = np.arange(1,11), jitter = False)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[0].set_xlabel(None)
# ax[1].bar(OverallCond_hist.index, OverallCond_hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(OverallCond_std.index, OverallCond_std/10000)
# ax[2].set_ylabel("Std in 10 K's")
# subfigs[1].suptitle("OverallQual Analytics")
# ax = subfigs[1].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['OverallQual', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'OverallQual', y = 'SalePrice',
# order = np.arange(1,11), jitter = False)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[0].set_xlabel(None)
# ax[1].bar(OverallQual_hist.index, OverallQual_hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(
# OverallQual_std.index, OverallQual_std/10000)
# ax[2].set_ylabel("Std in 10 K's")
# subfigs[2].suptitle("ExterCond Analytics")
# ax = subfigs[2].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['ExterCond', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'ExterCond', y = 'SalePrice',
# order = ['Po','Fa','TA','Gd','Ex'], jitter = False)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[0].set_xlabel(None)
# ax[1].bar(ExterCond_hist.index,ExterCond_hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(ExterCond_std.index, ExterCond_std/10000)
# ax[2].set_ylabel("Std in 10 K's")
# subfigs[3].suptitle("ExterQual Analytics")
# ax = subfigs[3].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['ExterQual', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'ExterQual', y = 'SalePrice',
# order = ['Fa','TA','Gd','Ex'], jitter = False)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[0].set_xlabel(None)
# ax[1].bar(ExterQual_hist.index, ExterQual_hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(
# ExterQual_std.index, ExterQual_std/10000)
# ax[2].set_ylabel("Std in 10 K's")
# plt.show()
# For Overall conditions and qualities, although the variances between the two appear to be a tie, the strip plot reveals that overall quality is a better indicator for house prices. For external conditions and qualities, the variance for quality us smaller and the strip plot also shows that quality is a better indicator as well.
cols_to_drop.add("OverallCond")
cols_to_drop.add("ExterCond")
# We now do the same for basement and garage.
# # Aggregate
# GarageCond_hist = X_train.groupby(by = 'GarageCond')['GarageCond'].count()[['Po','Fa','TA','Gd','Ex']]
# GarageCond_std = X_train.groupby(by = 'GarageCond')['SalePrice'].std()[['Po','Fa','TA','Gd','Ex']]
# GarageQual_hist = X_train.groupby(by = 'GarageQual')['GarageQual'].count()[['Po','Fa','TA','Gd','Ex']]
# GarageQual_std = X_train.groupby(by = 'GarageQual')['SalePrice'].std()[['Po','Fa','TA','Gd','Ex']]
# BsmtCond_hist = X_train.groupby(by = 'BsmtCond')['BsmtCond'].count()[['Po','Fa','TA','Gd']]
# BsmtCond_std = X_train.groupby(by = 'BsmtCond')['SalePrice'].std()[['Po','Fa','TA','Gd',]]
# BsmtQual_hist = X_train.groupby(by = 'BsmtQual')['BsmtQual'].count()[['Fa','TA','Gd','Ex']]
# BsmtQual_std = X_train.groupby(by = 'BsmtQual')['SalePrice'].std()[['Fa','TA','Gd','Ex']]
# fig = plt.figure(constrained_layout = True , figsize = (12,10))
# # Create rows of subfigs so that they share the same title
# subfigs = fig.subfigures(nrows = 4, ncols = 1)
# subfigs[0].suptitle("GarageCond Analytics")
# ax = subfigs[0].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['GarageCond', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'GarageCond', y = 'SalePrice',
# order = ['Po','Fa','TA','Gd','Ex'], jitter = False)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[0].set_xlabel(None)
# ax[1].bar(GarageCond_hist.index, GarageCond_hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(GarageCond_std.index, GarageCond_std/10000)
# ax[2].set_ylabel("Std in 10 K's")
# subfigs[1].suptitle("GarageQual Analytics")
# ax = subfigs[1].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['GarageQual', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'GarageQual', y = 'SalePrice',
# order = ['Po','Fa','TA','Gd','Ex'], jitter = False)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[0].set_xlabel(None)
# ax[1].bar(GarageQual_hist.index, GarageQual_hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(
# GarageQual_std.index, GarageQual_std/10000)
# ax[2].set_ylabel("Std in 10 K's")
# subfigs[2].suptitle("BsmtCond Analytics")
# ax = subfigs[2].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['BsmtCond', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'BsmtCond', y = 'SalePrice',
# order = ['Po','Fa','TA','Gd','Ex'], jitter = False)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[0].set_xlabel(None)
# ax[1].bar(BsmtCond_hist.index, BsmtCond_hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(BsmtCond_std.index, BsmtCond_std/10000)
# ax[2].set_ylabel("Std in 10 K's")
# subfigs[3].suptitle("BsmtQual Analytics")
# ax = subfigs[3].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['BsmtQual', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'BsmtQual', y = 'SalePrice',
# order = ['Po','Fa','TA','Gd','Ex'], jitter = False)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[0].set_xlabel(None)
# ax[1].bar(BsmtQual_hist.index, BsmtQual_hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(
# BsmtQual_std.index, BsmtQual_std/10000)
# ax[2].set_ylabel("Std in 10 K's")
# plt.show()
# From these analytics, it makes sense to exclude BsmtCond. First, the data is crowded around the TA cateogry. Second the strip plot for BsmtQual has a far better recognizable pattern. For GarageCond and GarageQual, they are both unevenly weighted around the TA cateogry. Before we consider dropping both, let us see if there are other garage features that are better.
# For this, we analyze GarageYrBlt, GarageFinish, GarageCars, and GarageArea.
# # Aggregate
# GarageYrBlt_hist = X_train.groupby(by = 'GarageYrBlt')['GarageYrBlt'].count()
# GarageYrBlt_avg = X_train.groupby(by = 'GarageYrBlt')['SalePrice'].mean()
# GarageFinish_hist = X_train.groupby(by = 'GarageFinish')['GarageFinish'].count()[['Unf', 'RFn', 'Fin']]
# GarageFinish_avg = X_train.groupby(by = 'GarageFinish')['SalePrice'].mean()[['Unf', 'RFn', 'Fin']]
# GarageCars_hist = X_train.groupby(by = 'GarageCars')['GarageCars'].count()
# GarageCars_avg = X_train.groupby(by = 'GarageCars')['SalePrice'].mean()
# GarageArea_hist = X_train.groupby(by = 'GarageArea')['GarageArea'].count()
# GarageArea_avg = X_train.groupby(by = 'GarageArea')['SalePrice'].mean()
# fig = plt.figure(constrained_layout = True , figsize = (12,10))
# subfigs = fig.subfigures(nrows = 4, ncols = 1)
# subfigs[0].suptitle("GarageYrBlt Analytics")
# ax = subfigs[0].subplots(1,3)
# ax[0].scatter(X_train.loc[~X_train['GarageYrBlt'].isnull()]['GarageYrBlt'], y_train[~X_train['GarageYrBlt'].isnull()]/100000)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[1].bar(GarageYrBlt_hist.index, GarageYrBlt_hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(GarageYrBlt_avg.index, GarageYrBlt_avg/100000)
# ax[2].set_ylabel("Average Price in 100 K's")
# subfigs[1].suptitle("GarageFinish Analytics")
# ax = subfigs[1].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['GarageFinish', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'GarageFinish', y = 'SalePrice', order = ['Unf', 'RFn', 'Fin'], jitter = False)
# ax[0].set_ylabel("HousePrices in 100 K's")
# ax[1].bar(GarageFinish_hist.index, GarageFinish_hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(GarageFinish_avg.index, GarageFinish_avg/10000)
# ax[2].set_ylabel("Average in Price 100 K's")
# subfigs[2].suptitle("GarageCars Analytics")
# ax = subfigs[2].subplots(1,3)
# ax[0].scatter(X_train.loc[~X_train['GarageCars'].isnull()]['GarageCars'], y_train[~X_train['GarageCars'].isnull()]/100000)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[1].bar(GarageCars_hist.index, GarageCars_hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(GarageCars_avg.index, GarageCars_avg/100000)
# ax[2].set_ylabel("Std in 100 K's")
# subfigs[3].suptitle("GarageArea Analytics")
# ax = subfigs[3].subplots(1,3)
# ax[0].scatter(X_train.loc[~X_train['GarageArea'].isnull()]['GarageArea'], y_train[~X_train['GarageArea'].isnull()]/100000)
# ax[0].set_ylabel("House Prices in 1000 K's")
# ax[1].bar(GarageArea_hist.index, GarageArea_hist, width = 7)
# ax[1].set_ylabel("Count")
# ax[2].bar(GarageArea_avg.index, GarageArea_avg/ 100000, width = 7)
# ax[2].set_ylabel("Average Price in 100 K's")
# plt.show()
# It appears that all the four features ploted are ideal for modeling house prices. For this reason we will just drop GarageQual and GarageCond.
cols_to_drop.add("GarageQual")
cols_to_drop.add("GarageCond")
cols_to_drop.add("BsmtCond")
# We now decide which of condition1 and condition2, Exterior1st, and Exterior2nd to include.
# # Aggregate
# condition1_hist = X_train.groupby(by = 'Condition1')['Condition1'].count()[['Norm', 'Feedr', 'PosN', 'Artery', 'RRAe', 'RRNn', 'PosA'
# , 'RRNe']]
# condition1_std = X_train.groupby(by = 'Condition1')['SalePrice'].std()[['Norm', 'Feedr', 'PosN', 'Artery', 'RRAe', 'RRNn', 'PosA'
# , 'RRNe']]
# condition2_hist = X_train.groupby(by = 'Condition2')['Condition2'].count()[['Norm', 'Feedr', 'PosN', 'Artery', 'RRAe', 'RRNn', 'PosA']]
# condition2_std = X_train.groupby(by = 'Condition2')['SalePrice'].std()[['Norm', 'Feedr', 'PosN', 'Artery', 'RRAe', 'RRNn', 'PosA']]
# Exterior1st_hist = X_train.groupby(by = 'Exterior1st')['Exterior1st'].count()
# Exterior1st_std = X_train.groupby(by = 'Exterior1st')['SalePrice'].std()[Exterior1st_hist.index]
# Exterior2nd_hist = X_train.groupby(by = 'Exterior2nd')['Exterior2nd'].count()[['AsbShng', 'AsphShn', 'BrkFace', 'CBlock', 'HdBoard', 'ImStucc', 'MetalSd',
# 'Plywood', 'Stone', 'Stucco', 'VinylSd', 'Wd Sdng']]
# Exterior2nd_std = X_train.groupby(by = 'Exterior2nd')['SalePrice'].std()[Exterior2nd_hist.index]
# fig = plt.figure(constrained_layout = True , figsize = (12,10))
# # Create rows of subfigs so that they share the same title
# subfigs = fig.subfigures(nrows = 4, ncols = 1)
# subfigs[0].suptitle("Condition1 Analytics")
# ax = subfigs[0].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['Condition1', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'Condition1', y = 'SalePrice',
# order = ['Norm', 'Feedr', 'PosN', 'Artery', 'RRAe', 'RRNn', 'PosA', 'RRNe'], jitter = False)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[0].set_xlabel(None)
# ax[1].bar(condition1_hist.index, condition1_hist)
# ax[1].set_xticks(ax[1].get_xticks())
# ax[1].set_xticklabels(['Norm', 'Feedr', 'PosN', 'Artery', 'RRAe', 'RRNn', 'PosA', 'RRNe'], rotation = 15, ha = 'right')
# ax[1].set_ylabel("Count")
# ax[2].bar(condition1_std.index, condition1_std/10000)
# ax[2].set_xticks(ax[2].get_xticks())
# ax[2].set_xticklabels(['Norm', 'Feedr', 'PosN', 'Artery', 'RRAe', 'RRNn', 'PosA', 'RRNe'], rotation = 15, ha = 'right')
# ax[2].set_ylabel("Std in 10 K's")
# subfigs[1].suptitle("Condition2 Analytics")
# ax = subfigs[1].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['Condition2', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'Condition2', y = 'SalePrice',
# order = ['Norm', 'Feedr', 'PosN', 'Artery', 'RRAe', 'RRNn', 'PosA'], jitter = False)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[0].set_xlabel(None)
# ax[1].bar(condition2_hist.index, condition2_hist)
# ax[1].set_xticks(ax[1].get_xticks())
# ax[1].set_xticklabels(['Norm', 'Feedr', 'PosN', 'Artery', 'RRAe', 'RRNn', 'PosA'], rotation = 15, ha = 'right')
# ax[1].set_ylabel("Count")
# ax[2].bar(
# condition2_std.index, condition2_std/10000)
# ax[2].set_xticks(ax[2].get_xticks())
# ax[2].set_xticklabels(['Norm', 'Feedr', 'PosN', 'Artery', 'RRAe', 'RRNn', 'PosA'], rotation = 15, ha = 'right')
# ax[2].set_ylabel("Std in 10 K's")
# subfigs[2].suptitle("Exterior1st Analytics")
# ax = subfigs[2].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['Exterior1st', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'Exterior1st', y = 'SalePrice',
# order = Exterior1st_hist.index, jitter = False)
# ax[0].set_xticks(ax[0].get_xticks())
# ax[0].set_xticklabels(Exterior1st_hist.index, rotation = 90, ha = 'right')
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[0].set_xlabel(None)
# ax[1].bar(Exterior1st_hist.index, Exterior1st_hist)
# ax[1].set_xticks(ax[1].get_xticks())
# ax[1].set_xticklabels(Exterior1st_hist.index, rotation = 90, ha = 'right')
# ax[1].set_ylabel("Count")
# ax[2].bar(Exterior1st_std.index, Exterior1st_std/10000)
# ax[2].set_xticks(ax[2].get_xticks())
# ax[2].set_xticklabels(Exterior1st_std.index, rotation = 90, ha = 'right')
# ax[2].set_ylabel("Std in 10 K's")
# subfigs[3].suptitle("Exterior2nd Analytics")
# ax = subfigs[3].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['Exterior2nd', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'Exterior2nd', y = 'SalePrice',
# order = Exterior2nd_hist.index, jitter = False)
# ax[0].set_xticks(ax[0].get_xticks())
# ax[0].set_xticklabels(Exterior2nd_hist.index, rotation = 90, ha = 'right')
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[0].set_xlabel(None)
# ax[1].bar(Exterior2nd_hist.index, Exterior2nd_hist)
# ax[1].set_xticks(ax[1].get_xticks())
# ax[1].set_xticklabels(Exterior2nd_hist.index, rotation = 90, ha = 'right')
# ax[1].set_ylabel("Count")
# ax[2].bar(Exterior2nd_hist.index, Exterior2nd_std/10000)
# ax[2].set_xticks(ax[2].get_xticks())
# ax[2].set_xticklabels(Exterior2nd_hist.index, rotation = 90, ha = 'right')
# ax[2].set_ylabel("Std in 10 K's")
# plt.show()
# It seems conditoin2 is quite skewed. For this reason, we drop it. Meanwhile, it appears that exterior1st and exterior2nd are highly similar. We run the following code to confirm.
# Number of rows where the two columns are the same
(X_train["Exterior1st"] == X_train["Exterior2nd"]).sum()
# We drop exterior1st as it has two more categories, making the model prone to overfit.
cols_to_drop.add("Condition2")
cols_to_drop.add("Exterior1st")
# Finally, we compare basement finish of type 1 and 2.
# # Agrregate
# bsmtFinType1_hist = X_train.groupby(by = 'BsmtFinType1')['BsmtFinType1'].count()
# bsmtFinType1_std = X_train.groupby(by = 'BsmtFinType1')['SalePrice'].std()[bsmtFinType1_hist.index]
# bsmtFinType2_hist = X_train.groupby(by = 'BsmtFinType2')['BsmtFinType2'].count()[bsmtFinType1_hist.index]
# bsmtFinType2_std = X_train.groupby(by = 'BsmtFinType2')['SalePrice'].std()[bsmtFinType1_hist.index]
# fig = plt.figure(constrained_layout = True, figsize = (10,6))
# # Create rows of subfigs so that they share the same title
# subfigs = fig.subfigures(nrows = 2, ncols = 1)
# subfigs[0].suptitle("BsmtFinType1 Analytics")
# ax = subfigs[0].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['BsmtFinType1', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'BsmtFinType1', y = 'SalePrice', order = bsmtFinType1_hist.index, jitter = False)
# ax[0].set_xlabel(None)
# ax[1].bar(bsmtFinType1_hist.index, bsmtFinType1_hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(bsmtFinType1_hist.index, bsmtFinType1_std/10000)
# ax[2].set_ylabel("Std in 10 K's")
# subfigs[1].suptitle("BsmtFinType2 Analytics")
# ax = subfigs[1].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[['BsmtFinType2', 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = 'BsmtFinType2', y = 'SalePrice', order = bsmtFinType1_hist.index, jitter = False)
# ax[0].set_xlabel(None)
# ax[1].bar(bsmtFinType1_hist.index, bsmtFinType2_hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(bsmtFinType1_hist.index, bsmtFinType2_std/10000)
# ax[2].set_ylabel("Std in 10 K's")
# plt.show()
# Although the strip plots between BsmtFinType1 and 2 are not entirely similar, BmstFinType2 is a bit skewed.
cols_to_drop.add("BsmtFinType2")
cols_to_drop.add("BsmtFinSF2")
# We overide the finished square feet by TotalBsmtSF completely.
cols_to_drop.add("BsmtFinSF1")
# ## Analyzing numerical features
# We manually identified the following numerical columns from the rest of the features.
num_cols = [
"LotFrontage",
"LotArea",
"YearBuilt",
"YearRemodAdd",
"MasVnrArea",
"BsmtUnfSF",
"TotalBsmtSF",
"1stFlrSF",
"2ndFlrSF",
"LowQualFinSF",
"GrLivArea",
"BsmtFullBath",
"BsmtHalfBath",
"FullBath",
"HalfBath",
"BedroomAbvGr",
"KitchenAbvGr",
"GarageYrBlt",
"TotRmsAbvGrd",
"Fireplaces",
"WoodDeckSF",
"OpenPorchSF",
"EnclosedPorch",
"3SsnPorch",
"ScreenPorch",
"PoolArea",
"MiscVal",
"MoSold",
"YrSold",
]
# Again we visualize these features to decide if more columns can be dropped.
# l = len(num_cols)
# fig = plt.figure(constrained_layout = True, figsize = (10,60))
# subfigs = fig.subfigures(nrows = l, ncols = 1)
# for i in range(l):
# group = X_train.groupby(by = num_cols[i])
# col_avg = group['SalePrice'].mean()
# subfigs[i].suptitle(num_cols[i])
# ax = subfigs[i].subplots(1,3)
# ax[0].scatter(X_train.loc[~X_train[num_cols[i]].isnull()][num_cols[i]],
# y_train[~X_train[num_cols[i]].isnull()]/100000)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[1].hist(X_train[num_cols[i]])
# ax[1].set_ylabel("Count")
# if len(col_avg)<20:
# ax[2].bar(col_avg.index, col_avg/100000)
# else:
# ax[2].text(0.5,0.5,"NA Due to High Numer of Groups",horizontalalignment='center',
# verticalalignment='center')
# ax[2].xaxis.set_ticklabels([])
# ax[2].yaxis.set_ticklabels([])
# ax[2].tick_params(bottom = False)
# ax[2].tick_params(left = False)
# ax[2].set_ylabel("Average Price in 100 K's")
# Let us analyze the features from top to the bottom. The first pair we notice is YearBuilt and YearRemodAdd (remodeling). The following codes suggest in general, the house remodeled has lower (resp. higher) prices than those built in the year same as the year remodeled (resp. the year when the remodeled house built). This makes sense.
# The average house prices of remodeled houses by the year remodeled
s = pd.DataFrame(
X_train.loc[X_train["YearBuilt"] != X_train["YearRemodAdd"]]
.groupby(by="YearRemodAdd")["SalePrice"]
.mean()
)
# The average house prices of houses without remodeling by year
t = pd.DataFrame(
X_train.loc[X_train["YearBuilt"] == X_train["YearRemodAdd"]]
.groupby(by="YearBuilt")["SalePrice"]
.mean()
)
s.merge(t, left_index=True, right_index=True).assign(
avg_diff=lambda x: x["SalePrice_x"] - x["SalePrice_y"]
).mean()
# The average house prices of remodeled houses by the year built
s = pd.DataFrame(
X_train.loc[X_train["YearBuilt"] != X_train["YearRemodAdd"]]
.groupby(by="YearBuilt")["SalePrice"]
.mean()
)
# The average house prices of houses without remodeling by year
t = pd.DataFrame(
X_train.loc[X_train["YearBuilt"] == X_train["YearRemodAdd"]]
.groupby(by="YearBuilt")["SalePrice"]
.mean()
)
s.merge(t, left_index=True, right_index=True).assign(
avg_diff=lambda x: x["SalePrice_x"] - x["SalePrice_y"]
).mean()
# Next, we see if the span between year built and remodeling year has effect on prices.
# # Date difference vs price
# l = pd.concat([X_train[['YearBuilt', 'YearRemodAdd']].assign(
# YearDiff = lambda x: x['YearRemodAdd'] - x['YearBuilt']), X_train[['SalePrice']]],
# axis = 1).groupby(by = 'YearDiff')['SalePrice'].mean()
# fig, ax = plt.subplots()
# ax.scatter(l.index, l/100000)
# ax.set_ylabel("Average Prices in 100 K's")
# ax.set_xlabel("Years Before Remodeling")
# plt.show()
# del(l,s,t)
# As the scatter plot suggests, there is no pattern that indicates how long before a house was remodeled affects its price. It is therefore more useful to replace YearRemodAdd simply as the column indicating if there was remodeling done or not.
cols_to_drop.add("YearRemodAdd")
X_train.insert(
X_train.columns.get_loc("YearRemodAdd"),
"Remodeled",
(X_train["YearBuilt"] != X_train["YearRemodAdd"]).astype(int),
)
X_test.insert(
X_test.columns.get_loc("YearRemodAdd"),
"Remodeled",
(X_test["YearBuilt"] != X_test["YearRemodAdd"]).astype(int),
)
# Next, we should also drop BsmtUnfSF.
cols_to_drop.add("BsmtUnfSF")
# We now decide what to do with 1stFlrSF and 2ndFlrSF. The following code shows that houses with second floors tend to have higher prices.
print(
f"Average price of houses with second floor is {X_train.loc[X_train['2ndFlrSF'] != 0, 'SalePrice'].mean()}"
)
print(
f"Average price of houses without second floor is {X_train.loc[X_train['2ndFlrSF'] == 0, 'SalePrice'].mean()}"
)
# Average total square feet of houses with and without prices. For houses with a second floor, 2ndFl is set to be True.
s = pd.concat(
[X_train["1stFlrSF"] + X_train["2ndFlrSF"], X_train["2ndFlrSF"] != 0], axis=1
)
s.columns = ["TotalSFAbvGr", "2ndFl"]
s.groupby(by="2ndFl")["TotalSFAbvGr"].mean()
# Since houses with 2nd floors have more square feet, and houses with more total square feet are higher in prices, we will drop the 2ndFlrSF column, and replace the 1stFlrSF column as TotalSF.
cols_to_drop.add("1stFlrSF")
cols_to_drop.add("2ndFlrSF")
X_train.insert(
X_train.columns.get_loc("1stFlrSF"),
"TotalSFAbvGr",
X_train["1stFlrSF"] + X_train["2ndFlrSF"],
)
X_test.insert(
X_test.columns.get_loc("1stFlrSF"),
"TotalSFAbvGr",
X_test["1stFlrSF"] + X_test["2ndFlrSF"],
)
# The plots for LowQualFinSF also suggest that we drop it.
cols_to_drop.add("LowQualFinSF")
# We drop BsmtFullBath, BsmtHalfBath, FullBath, and HalfBath, and replace them with (total) BsmtBath, and (total) BathAbvGr.
X_train.insert(
X_train.columns.get_loc("BsmtFullBath"),
"BsmtBath",
X_train["BsmtFullBath"] + X_train["BsmtHalfBath"] / 2,
)
X_train.insert(
X_train.columns.get_loc("FullBath"),
"BathAbvGr",
X_train["FullBath"] + X_train["HalfBath"] / 2,
)
X_test.insert(
X_test.columns.get_loc("BsmtFullBath"),
"BsmtBath",
X_test["BsmtFullBath"] + X_test["BsmtHalfBath"] / 2,
)
X_test.insert(
X_test.columns.get_loc("FullBath"),
"BathAbvGr",
X_test["FullBath"] + X_test["HalfBath"] / 2,
)
cols_to_drop.add("BsmtFullBath")
cols_to_drop.add("BsmtHalfBath")
cols_to_drop.add("FullBath")
cols_to_drop.add("HalfBath")
# The KitchenAbvGr and BedroomAbvGr columns can be dropped because of the TotRmsAbvGr column.
cols_to_drop.add("KitchenAbvGr")
cols_to_drop.add("BedroomAbvGr")
# We now decide what to do with several deck and porch related columns. It looks like the porch columns are very skwewed. For this reason, it might make more sense to replace them with their sum. The sum we name as AmbientStructSF.
# fig, ax = plt.subplots()
# ax.scatter(X_train['OpenPorchSF'] + X_train['EnclosedPorch'] +
# X_train['WoodDeckSF'] + X_train['3SsnPorch'] + X_train['ScreenPorch'],
# y_train/100000)
# ax.set_ylabel("House Prices in 100 K's")
# ax.set_xlabel("Ambient Structure Area")
# plt.show()
X_train.insert(
X_train.columns.get_loc("WoodDeckSF"),
"AmbientStructSF",
X_train["OpenPorchSF"]
+ X_train["EnclosedPorch"]
+ X_train["WoodDeckSF"]
+ X_train["3SsnPorch"]
+ X_train["ScreenPorch"],
)
X_test.insert(
X_test.columns.get_loc("WoodDeckSF"),
"AmbientStructSF",
X_test["OpenPorchSF"]
+ X_test["EnclosedPorch"]
+ X_test["WoodDeckSF"]
+ X_test["3SsnPorch"]
+ X_test["ScreenPorch"],
)
cols_to_drop.add("WoodDeckSF")
cols_to_drop.add("OpenPorchSF")
cols_to_drop.add("EnclosedPorch")
cols_to_drop.add("3SsnPorch")
cols_to_drop.add("ScreenPorch")
# Next, many houses do not have pools and miscellaneous features . We therefore drop the pool area the pool quality, and the miscellaneuouus columns.
cols_to_drop.add("PoolArea")
cols_to_drop.add("PoolQC")
cols_to_drop.add("MiscFeature")
cols_to_drop.add("MiscVal")
# Finally, we interpret months as cyclic data.
X_train.loc[X_train["MoSold"] >= 6, "MoSold"] = (
12 - X_train.loc[X_train["MoSold"] >= 6, "MoSold"]
)
X_test.loc[X_test["MoSold"] >= 6, "MoSold"] = (
12 - X_test.loc[X_test["MoSold"] >= 6, "MoSold"]
)
# We are done with analyzing numerical features. Let us move on to categorical feaetures.
# ## Analyzing categorical features
# First, we need to turn MSSubClass into a categorical column.
X_train["MSSubClass"] = X_train["MSSubClass"].astype("str")
X_test["MSSubClass"] = X_test["MSSubClass"].astype("str")
# We identify categorical columns, and fill any null with the string "NA".
s = X_train.drop(cols_to_drop, axis=1).dtypes
obj_cols = list(s[s == object].index)
del s
X_train[obj_cols] = X_train[obj_cols].fillna("NA")
X_test[obj_cols] = X_test[obj_cols].fillna("NA")
# Similar to what we have done for numerical features, let us visualize our categorical features. Due to the high number of categorical features, we split the plots into two chunks.
# l = len(obj_cols)
# fig = plt.figure(constrained_layout = True, figsize = (10,50))
# subfigs = fig.subfigures(nrows = 17, ncols = 1)
# for i in range(17):
# group = X_train.groupby(by = obj_cols[i])
# hist = group[obj_cols[i]].count()
# avg = group['SalePrice'].mean()[hist.index]/100000
# subfigs[i].suptitle(obj_cols[i])
# ax = subfigs[i].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[[obj_cols[i], 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = obj_cols[i], y = 'SalePrice', order = hist.index, jitter = False)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[0].set_xlabel(None)
# ax[1].bar(hist.index, hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(avg.index, avg)
# ax[2].set_ylabel("Average Price in 100 K's")
# if len(hist.index)>=6:
# ax[0].set_xticks(ax[0].get_xticks())
# ax[0].set_xticklabels(hist.index, rotation = 90, ha = 'right')
# ax[1].set_xticks(ax[0].get_xticks())
# ax[1].set_xticklabels(hist.index, rotation = 90, ha = 'right')
# ax[2].set_xticks(ax[0].get_xticks())
# ax[2].set_xticklabels(hist.index, rotation = 90, ha = 'right')
# We use the simple criterion. We drop features having more than 1300 counts in a single category. We also take into account how many categories are there for a feature. If the category has a lot of features, but only one category has more than 1250 counts, we also drop it. On the other hand, although Alley and Central Air are a bit skewed, they have 2 or 3 categories only. For this reason we keep the two features.
cols_to_drop.add("Street")
cols_to_drop.add("Utilities")
cols_to_drop.add("LandSlope")
cols_to_drop.add("RoofMatl")
# fig = plt.figure(constrained_layout = True, figsize = (10,50))
# subfigs = fig.subfigures(nrows = 18, ncols = 1)
# for i in range(18):
# group = X_train.groupby(by = obj_cols[i+17])
# hist = group[obj_cols[i+17]].count()
# avg = group['SalePrice'].mean()[hist.index]/100000
# subfigs[i].suptitle(obj_cols[i+17])
# ax = subfigs[i].subplots(1,3)
# sns.stripplot(ax = ax[0], data = X_train[[obj_cols[i+17], 'SalePrice']].assign(SalePrice = lambda x: x['SalePrice']/100000),
# x = obj_cols[i+17], y = 'SalePrice', order = hist.index, jitter = False)
# ax[0].set_ylabel("House Prices in 100 K's")
# ax[0].set_xlabel(None)
# ax[1].bar(hist.index, hist)
# ax[1].set_ylabel("Count")
# ax[2].bar(avg.index, avg)
# ax[2].set_ylabel("Average Price in 100 K's")
# if len(hist.index)>=6:
# ax[0].set_xticks(ax[0].get_xticks())
# ax[0].set_xticklabels(hist.index, rotation = 90, ha = 'right')
# ax[1].set_xticks(ax[0].get_xticks())
# ax[1].set_xticklabels(hist.index, rotation = 90, ha = 'right')
# ax[2].set_xticks(ax[0].get_xticks())
# ax[2].set_xticklabels(hist.index, rotation = 90, ha = 'right')
cols_to_drop.add("Heating")
cols_to_drop.add("Functional")
cols_to_drop.add("Electrical")
cols_to_drop.add("PavedDrive")
# ## One-hot encoding
# We manually enter the following features for one-hot encoding.
oh_cols = set()
oh_cols.add("MSSubClass")
oh_cols.add("MSZoning")
oh_cols.add("LotShape")
oh_cols.add("LotConfig")
oh_cols.add("Neighborhood")
oh_cols.add("Condition1")
oh_cols.add("BldgType")
oh_cols.add("HouseStyle")
oh_cols.add("RoofStyle")
oh_cols.add("Exterior2nd")
oh_cols.add("MasVnrType")
oh_cols.add("Foundation")
oh_cols.add("BsmtFinType1")
oh_cols.add("GarageType")
oh_cols.add("Fence")
oh_cols.add("SaleType")
oh_cols.add("SaleCondition")
# CentralAir only has two values, so we can manually encode it.
X_train["CentralAir"] = (X_train["CentralAir"] == "Y").astype(int)
X_test["CentralAir"] = (X_test["CentralAir"] == "Y").astype(int)
from sklearn.preprocessing import OneHotEncoder
oh_enc = OneHotEncoder(handle_unknown="ignore")
oh_encoded_train = pd.DataFrame(oh_enc.fit_transform(X_train[oh_cols]).toarray())
oh_encoded_test = pd.DataFrame(oh_enc.transform(X_test[oh_cols]).toarray())
oh_encoded_train.index = X_train.index
oh_encoded_test.index = X_test.index
oh_encoded_train.columns = oh_enc.get_feature_names_out()
oh_encoded_test.columns = oh_encoded_train.columns
# Add central air back!
oh_cols.add("CentralAir")
oh_encoded_train = pd.concat([oh_encoded_train, X_train[["CentralAir"]]], axis=1)
oh_encoded_test = pd.concat([oh_encoded_test, X_test[["CentralAir"]]], axis=1)
# ## Ordinal encoding
# We manually identified the following features for ordinal encoding. We customize the orders according to the average graphs plotted earlier.
ord_cols = set(
[
"Alley",
"LandContour",
"ExterQual",
"KitchenQual",
"BsmtQual",
"BsmtExposure",
"HeatingQC",
"FireplaceQu",
"GarageFinish",
]
)
from sklearn.preprocessing import OrdinalEncoder
# Alley
Alley_enc = OrdinalEncoder(categories=[["Grvl", "Pave", "NA"]])
ord_encoded_train = pd.DataFrame(Alley_enc.fit_transform(X_train[["Alley"]]))
ord_encoded_test = pd.DataFrame(Alley_enc.transform(X_test[["Alley"]]))
# LandContour
LandContour_enc = OrdinalEncoder(categories=[["Bnk", "Lvl", "Low", "HLS"]])
ord_encoded_train = pd.concat(
[
ord_encoded_train,
pd.DataFrame(LandContour_enc.fit_transform(X_train[["LandContour"]])),
],
axis=1,
)
ord_encoded_test = pd.concat(
[
ord_encoded_test,
pd.DataFrame(LandContour_enc.transform(X_test[["LandContour"]])),
],
axis=1,
)
# ExterQual and KitchenQual Test set has NA, so we need to set use value for unknown
ExKiQual_enc = OrdinalEncoder(
categories=[["Fa", "TA", "Gd", "Ex"], ["Fa", "TA", "Gd", "Ex"]],
handle_unknown="use_encoded_value",
unknown_value=-1,
)
ord_encoded_train = pd.concat(
[
ord_encoded_train,
pd.DataFrame(ExKiQual_enc.fit_transform(X_train[["ExterQual", "KitchenQual"]])),
],
axis=1,
)
ord_encoded_test = pd.concat(
[
ord_encoded_test,
pd.DataFrame(ExKiQual_enc.transform(X_test[["ExterQual", "KitchenQual"]])),
],
axis=1,
)
# BsmtQual
BsmtQual_enc = OrdinalEncoder(categories=[["NA", "Fa", "TA", "Gd", "Ex"]])
ord_encoded_train = pd.concat(
[
ord_encoded_train,
pd.DataFrame(BsmtQual_enc.fit_transform(X_train[["BsmtQual"]])),
],
axis=1,
)
ord_encoded_test = pd.concat(
[ord_encoded_test, pd.DataFrame(BsmtQual_enc.transform(X_test[["BsmtQual"]]))],
axis=1,
)
# BsmtQxposure
BsmtExposure_enc = OrdinalEncoder(categories=[["NA", "No", "Mn", "Av", "Gd"]])
ord_encoded_train = pd.concat(
[
ord_encoded_train,
pd.DataFrame(BsmtExposure_enc.fit_transform(X_train[["BsmtExposure"]])),
],
axis=1,
)
ord_encoded_test = pd.concat(
[
ord_encoded_test,
pd.DataFrame(BsmtExposure_enc.transform(X_test[["BsmtExposure"]])),
],
axis=1,
)
# HeatingQC
HeatingQC_enc = OrdinalEncoder(categories=[["Po", "Fa", "TA", "Gd", "Ex"]])
ord_encoded_train = pd.concat(
[
ord_encoded_train,
pd.DataFrame(HeatingQC_enc.fit_transform(X_train[["HeatingQC"]])),
],
axis=1,
)
ord_encoded_test = pd.concat(
[ord_encoded_test, pd.DataFrame(HeatingQC_enc.transform(X_test[["HeatingQC"]]))],
axis=1,
)
# FireplaceQu
FireplaceQu_enc = OrdinalEncoder(categories=[["Po", "NA", "Fa", "TA", "Gd", "Ex"]])
ord_encoded_train = pd.concat(
[
ord_encoded_train,
pd.DataFrame(FireplaceQu_enc.fit_transform(X_train[["FireplaceQu"]])),
],
axis=1,
)
ord_encoded_test = pd.concat(
[
ord_encoded_test,
pd.DataFrame(FireplaceQu_enc.transform(X_test[["FireplaceQu"]])),
],
axis=1,
)
# GarageFinish
GarageFinish_enc = OrdinalEncoder(categories=[["NA", "Unf", "RFn", "Fin"]])
ord_encoded_train = pd.concat(
[
ord_encoded_train,
pd.DataFrame(GarageFinish_enc.fit_transform(X_train[["GarageFinish"]])),
],
axis=1,
)
ord_encoded_test = pd.concat(
[
ord_encoded_test,
pd.DataFrame(GarageFinish_enc.fit_transform(X_test[["GarageFinish"]])),
],
axis=1,
)
ord_encoded_train.index = X_train.index
ord_encoded_test.index = X_test.index
ord_encoded_train.columns = [
"Alley",
"LandContour",
"ExterQual",
"KitchenQual",
"BsmtQual",
"BsmtExposure",
"HeatingQC",
"FireplaceQu",
"GarageFinish",
]
ord_encoded_test.columns = [
"Alley",
"LandContour",
"ExterQual",
"KitchenQual",
"BsmtQual",
"BsmtExposure",
"HeatingQC",
"FireplaceQu",
"GarageFinish",
]
# ## Imputing numerical features
# It remains to impute numerical features. Since there may be null values in different columns among training and test sets, we need to be more careful.
s = X_train.drop(cols_to_drop, axis=1).dtypes
num_cols = list(s[s != object].index)
# Remove CentralAir
num_cols.remove("CentralAir")
s = pd.concat([X_train[num_cols], X_test[num_cols]], axis=0).isnull().sum()
s[s != 0]
# Let us decide what to do with these columns with null values. It should be clear that missing values for basement bath and square feet can be replaced by zero. They can be done using the fillna function in pandas, after imputing GarageYrBlt, MasVnrArea, TotalBsmtSF.
# ## Imputing garage year built
# Let us compare the average prices for houses with and without garage year built.
print(
f"Average price for houses without garage: {X_train.loc[X_train['GarageYrBlt'].isnull()]['SalePrice'].mean()}."
)
print(
f"Average price for houses with garage: {X_train.loc[~X_train['GarageYrBlt'].isnull()]['SalePrice'].mean()}."
)
print(
f"The number of houses without the garage year built in the training set is: {X_train['GarageYrBlt'].isnull().sum()}."
)
# We conclude that houses without a garage tend to have lower prices. The scatter plot of the house price against garage year built showed a positive correlation. That is, the newer the garage, the pricier the house tends to be. For this reason, we will impute missing houses with the average year whose house prices are around $103317$.
def find_count(df, upper_bound):
dist = upper_bound - 103317
lower_bound = 103317 - dist
filtered = df.loc[
(~df["GarageYrBlt"].isnull())
& (df["SalePrice"] <= upper_bound)
& (df["SalePrice"] >= lower_bound)
]
return (
filtered.shape[0],
filtered["SalePrice"].mean(),
filtered["GarageYrBlt"].mean(),
)
find_count(X_train, 112000)
# For this reason, we impute the GarageYrBlt as 1959.
from sklearn.impute import SimpleImputer
GarageYrBlt_imp = SimpleImputer(strategy="constant", fill_value=1959)
# ## Imputing Masonry veneer area
# Based on the scatter plot earlier, houses with zero MasVnrArea have a wide range of sale prices. The price can be as high as nearly five hundred thousands. Since the price range for houses without actual MasVnrArea are also in this range, it is safe to impute null vaues as zero.
MasVnrArea_imp = SimpleImputer(strategy="constant", fill_value=0)
# ## Imputing lot frontage
print(
f"Average price for houses without LotFrontage: {X_train.loc[X_train['LotFrontage'].isnull()]['SalePrice'].mean()}."
)
print(
f"Average price for houses with LotFrontage: {X_train.loc[~X_train['LotFrontage'].isnull()]['SalePrice'].mean()}."
)
print(
f"The number of houses without the LotFrontage in the training set is: {X_train['LotFrontage'].isnull().sum()}."
)
# Based on these, it is easy to conclude that we can impute lot frontage simply with average values.
LotFrontage_imp = SimpleImputer()
# In sum, we would impute the missing
# 1. **GarageYrBlt as 1959**,
# 2. **MasVnrArea as 0**,
# 3. **LotFrontage as average**.
from sklearn.compose import ColumnTransformer
clt = ColumnTransformer(
[
("GarageYrBlt_clt", GarageYrBlt_imp, ["GarageYrBlt"]),
("MasVnrArea_clt", MasVnrArea_imp, ["MasVnrArea"]),
("LotFrontage_clt", LotFrontage_imp, ["LotFrontage"]),
]
)
X_train[["GarageYrBlt", "MasVnrArea", "LotFrontage"]] = pd.DataFrame(
clt.fit_transform(X_train)
)
X_test[["GarageYrBlt", "MasVnrArea", "LotFrontage"]] = pd.DataFrame(
clt.transform(X_test)
)
# Impute the remaining missing values as 0
X_train[num_cols] = X_train[num_cols].fillna(0)
X_test[num_cols] = X_test[num_cols].fillna(0)
oh_encoded_train.dtypes
ord_encoded_train.columns
# # XGBoost Modeling
from xgboost import XGBRegressor
my_model = XGBRegressor(
max_depth=2,
n_estimators=450,
subsample=0.83,
colsample_bynode=0.6,
colsample_bytree=0.6,
colsample_bylevel=0.6,
learning_rate=0.14,
)
from sklearn.model_selection import cross_val_score
-1 * cross_val_score(
my_model,
pd.concat([X_train[num_cols], ord_encoded_train, oh_encoded_train], axis=1),
y_train,
cv=4,
scoring="neg_mean_absolute_error",
).mean()
# ## With polynomial features
# We see if the model fits better with polynomial features.
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(degree=2)
scal = StandardScaler()
non_poly_features = [
"MoSold",
"YrSold",
"OverallQual",
"Remodeled",
"Fireplaces",
"GarageCars",
"BsmtBath",
"BathAbvGr",
"TotRmsAbvGrd",
]
poly_features_train = pd.DataFrame(
poly.fit_transform(X_train[num_cols].drop(non_poly_features, axis=1))
)
poly_features_test = pd.DataFrame(
poly.transform(X_test[num_cols].drop(non_poly_features, axis=1))
)
poly_features_train.index = X_train.index
poly_features_test.index = X_test.index
poly_features_train.columns = poly.get_feature_names_out()
poly_features_test.columns = poly_features_train.columns
# Drop the constant column that will not be useful for XGBoost?
poly_features_train = poly_features_train.drop("1", axis=1)
poly_features_test = poly_features_test.drop("1", axis=1)
# Feature scaling
X_train_scaled = scal.fit_transform(
pd.concat(
[
X_train[non_poly_features],
poly_features_train,
ord_encoded_train,
oh_encoded_train,
],
axis=1,
)
)
X_test_scaled = scal.transform(
pd.concat(
[
X_test[non_poly_features],
poly_features_test,
ord_encoded_test,
oh_encoded_test,
],
axis=1,
)
)
my_model = XGBRegressor(
max_depth=4,
n_estimators=450,
subsample=0.83,
colsample_bynode=0.6,
colsample_bytree=0.6,
colsample_bylevel=0.6,
learning_rate=0.056,
)
from sklearn.model_selection import cross_val_score
-1 * cross_val_score(
my_model, X_train_scaled, y_train, cv=4, scoring="neg_mean_absolute_error"
).mean()
my_model.fit(X_train_scaled, y_train)
preds_test = my_model.predict(X_test_scaled)
output = pd.DataFrame({"Id": X_test["Id"], "SalePrice": preds_test})
output.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # 1.Importing Data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from scipy.cluster.hierarchy import linkage, dendrogram
from sklearn.cluster import AgglomerativeClustering
menu = pd.read_csv("/kaggle/input/starbucks-menu/starbucks_drinkMenu_expanded.csv")
menu.head()
menu.shape
# # 2. Preprocessing
menu = menu.replace("Varies", np.nan)
menu = menu.replace("varies", np.nan)
menu = menu.dropna(axis=0)
menu.info()
menu2 = menu.iloc[:, [3, 17]]
menu2["Caffeine"] = menu["Caffeine (mg)"]
menu2 = menu2.drop("Caffeine (mg)", axis=1)
menu2 = menu2.astype("int64")
menu2
# # 3. Visualization
plt.figure(figsize=(6, 6))
plt.scatter(menu2["Calories"], menu2["Caffeine"], color="green")
plt.xlabel("Calories")
plt.ylabel("Caffeine (mg)")
plt.tight_layout()
plt.show()
# # 4. KMeans
wcss = []
for k in range(1, 15):
kmeans = KMeans(n_clusters=k)
kmeans.fit(menu2)
wcss.append(kmeans.inertia_)
plt.figure(figsize=(6, 6))
plt.plot(range(1, 15), wcss)
plt.xlabel("Number of k (cluster) value")
plt.ylabel("wcss")
plt.show()
# We can take elbow as 4
kmean2 = KMeans(n_clusters=4)
clusters = kmean2.fit_predict(menu2)
menu2["labels"] = clusters
plt.figure(figsize=(7, 6))
colors = {0: "red", 1: "blue", 2: "green", 3: "purple"}
for label, color in colors.items():
plt.scatter(
menu2.Calories[menu2.labels == label],
menu2.Caffeine[menu2.labels == label],
color=color,
)
plt.scatter(
kmean2.cluster_centers_[:, 0],
kmean2.cluster_centers_[:, 1],
color="black",
marker="*",
linewidths=2,
)
plt.show()
# # 5. Hierarchial Clustering
menu3 = menu2.iloc[:, menu2.columns != "labels"]
merg = linkage(menu3, method="ward")
dendrogram(merg, leaf_rotation=90)
plt.xlabel("Data points")
plt.ylabel("Euclidean Distance")
plt.show()
hierarc_cluster = AgglomerativeClustering(
n_clusters=4, affinity="euclidean", linkage="ward"
)
clust = hierarc_cluster.fit_predict(menu3)
menu3["labels"] = clust
plt.figure(figsize=(7, 6))
colors = {0: "red", 1: "blue", 2: "green", 3: "purple"}
for label, color in colors.items():
plt.scatter(
menu3.Calories[menu3.labels == label],
menu3.Caffeine[menu3.labels == label],
color=color,
)
plt.show()
|
# # Machine Learning
# **Machine learning is like teaching a computer how to learn from examples, rather than giving it step-by-step instructions. This allows the computer to find patterns and make predictions based on data, which can be useful for things like recognizing faces or making personalized recommendations.**
# # Deep Learning
# **Deep learning is like teaching a computer to learn and think like a human brain, but without emotions or consciousness. It's like training a superhero with superpowers to recognize things, understand language, and make decisions. Deep learning uses artificial neural networks to process huge amounts of data and extract complex features from it, allowing the computer to recognize patterns and make predictions with a high level of accuracy. This technology is used in a wide range of applications, from self-driving cars and virtual assistants to medical diagnosis and drug discovery. With deep learning, computers can perform tasks that were once thought to be impossible for machines, and continue to push the boundaries of what's possible in artificial intelligence.**
# ## Model
# **In machine learning, a model is like a virtual magician that can make predictions about the future based on patterns and tricks it has learned from a huge amount of data. It's like having a crystal ball that can tell you what's going to happen next! Think of the model as a magician's hat that can pull out a rabbit, a scarf, or even a prediction about the weather. The magician (or the model) has learned how to perform these tricks by practicing on different data sets, just like a magician practices their craft over time. The better the magician (or the model), the more accurate their predictions will be. So, when you hear people talk about machine learning models, just remember that they're talking about virtual magicians that can help us see into the future!**
# ### Language Model
# **A language model is like a virtual writer that can generate text based on the patterns it has learned from a vast amount of data. It's like having an AI-powered assistant that can help you write emails, articles, or even entire books! ChatGPT, on the other hand, is a language model that specializes in having conversations with people, just like we're doing right now. Think of ChatGPT as a friendly and intelligent chatbot that can answer your questions, tell you jokes, or even have philosophical discussions with you. With its impressive ability to understand human language and generate coherent responses, ChatGPT is a powerful tool that can help us communicate and learn in ways we never thought possible before.**
# ________________________________________________
# ## Convolution Neural Network in Pytorch
# **Dataset:**
# A Dataset is an abstraction provided by PyTorch that represents a collection of data samples. It is a Python class that should implement two main methods: __getitem__() and __len__().
# __getitem__: Given an index, this method should return a single data sample (usually a tuple containing an input and its corresponding target).
# __len__: This method should return the total number of data samples in the dataset.
# Custom datasets can be created by subclassing the torch.utils.data.Dataset class and implementing these two methods. PyTorch also provides built-in Dataset classes for common datasets like CIFAR-10, MNIST, and ImageNet.
# **DataLoader:**
# A DataLoader is a higher-level utility that is built on top of the Dataset class. It is responsible for efficiently loading, batching, and shuffling the data in a parallel and memory-efficient way. It is designed to work with any Dataset and takes care of the following functionalities:
# Batching: Combining multiple data samples into a single batch for processing.
# Shuffling: Randomly shuffling the order of data samples to avoid overfitting and improve the generalization of the model.
# Parallelism: Loading and preprocessing data using multiple worker processes to speed up the data loading process.
# Memory management: Efficiently handling large datasets by loading data in chunks, so that not all data needs to be loaded into memory at once.
# You can create a DataLoader by instantiating the torch.utils.data.DataLoader class and passing in the Dataset along with other configuration options like batch_size, shuffle, and num_workers.
# In summary, a Dataset represents the data samples in a structured way, while a DataLoader is responsible for efficiently loading, batching, and shuffling the data from the Dataset for use in training and evaluation of deep learning models.
# These lines import the necessary libraries, such as PyTorch, torchvision, and matplotlib.
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
# Device configuration
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyper-parameters
num_epochs = 5
batch_size = 4
learning_rate = 0.001
# dataset has PILImage images of range [0, 1].
# We transform them to Tensors of normalized range [-1, 1]
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
# CIFAR10: 60000 32x32 color images in 10 classes, with 6000 images per class
train_dataset = torchvision.datasets.CIFAR10(
root="./data", train=True, download=True, transform=transform
)
test_dataset = torchvision.datasets.CIFAR10(
root="./data", train=False, download=True, transform=transform
)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True
)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=batch_size, shuffle=False
)
classes = (
"plane",
"car",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
)
def imshow(img):
# This line unnormalizes the input image tensor.
# The images were initially normalized to have values in the range of [-1, 1].
# This line of code transforms the values back into the range [0, 1] so that they can be properly displayed.
img = img / 2 + 0.5 # unnormalize
# print(dir(img))
npimg = img.numpy()
# Transpose the array to match the expected format of matplotlib
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# get some random training images
# Here, we create a Python iterator from the train_loader.
# The train_loader is an instance of PyTorch's DataLoader that we created earlier
# to efficiently load, batch, and shuffle the training data.
dataiter = iter(train_loader)
# This line retrieves the next batch of training images and their corresponding labels from the iterator.
# The next() function returns the next item from the iterator. In this case, it returns a batch of images and their respective labels.
# Since our batch_size is set to 4, this line will return 4 random training images and their labels.
images, labels = next(dataiter)
# show images
imshow(torchvision.utils.make_grid(images))
# The lines of code define a Convolutional Neural Network (ConvNet) model in PyTorch for image classification. The ConvNet class inherits from the nn.Module class, which provides basic building blocks for neural networks. The __init__ method defines the layers of the model, which include convolutional layers (nn.Conv2d), max pooling layers (nn.MaxPool2d), and fully connected layers (nn.Linear). The forward method defines the forward pass of the model, which involves passing the input data through the layers in a sequential manner and returning the output.
# The model = ConvNet().to(device) line creates an instance of the ConvNet class and moves it to a specified device (e.g., CPU or GPU). The criterion and optimizer lines define the loss function and optimization algorithm to be used during training. Finally, n_total_steps is the total number of iterations required to process all the training data (i.e., batches) in one epoch.
# ### CNN
# This code defines a custom convolutional neural network called ConvNet by subclassing nn.Module. The ConvNet class has the following layers:
# self.conv1: The first convolutional layer has 3 input channels (for the RGB channels of the input image), 6 output channels (filters), and a kernel size of 5.
# self.pool: A max-pooling layer with a kernel size of 2 and a stride of 2. It is used after both convolutional layers.
# self.conv2: The second convolutional layer has 6 input channels (coming from the output of the first convolutional layer), 16 output channels (filters), and a kernel size of 5.
# self.fc1: The first fully connected (linear) layer maps the 16 * 5 * 5 input features to 120 output features.
# self.fc2: The second fully connected (linear) layer maps 120 input features to 84 output features.
# self.fc3: The third fully connected (linear) layer maps 84 input features to 10 output features, corresponding to the 10 classes in the CIFAR-10 dataset.
# ### Forward
# The forward method defines the forward pass of the network:
# The input x (shape: n, 3, 32, 32) is passed through the first convolutional layer followed by a ReLU activation function and then the max-pooling layer. The resulting tensor has a shape of (n, 6, 14, 14).
# The output from the previous step is passed through the second convolutional layer followed by a ReLU activation function and then the max-pooling layer. The resulting tensor has a shape of (n, 16, 5, 5).
# The output from the previous step is flattened to have a shape of (n, 400), where n is the number of samples in the input batch.
# The flattened tensor is passed through the first fully connected layer followed by a ReLU activation function. The resulting tensor has a shape of (n, 120).
# The output from the previous step is passed through the second fully connected layer followed by a ReLU activation function. The resulting tensor has a shape of (n, 84).
# The output from the previous step is passed through the third fully connected layer. The resulting tensor has a shape of (n, 10), which represents the logits for each of the 10 classes.
# The forward pass returns the logits for each class, which can then be used for calculating the loss and updating the model's weights during training.
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# -> n, 3, 32, 32
x = self.pool(F.relu(self.conv1(x))) # -> n, 6, 14, 14
x = self.pool(F.relu(self.conv2(x))) # -> n, 16, 5, 5
x = x.view(-1, 16 * 5 * 5) # -> n, 400
x = F.relu(self.fc1(x)) # -> n, 120
x = F.relu(self.fc2(x)) # -> n, 84
x = self.fc3(x) # -> n, 10
return x
model = ConvNet().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
n_total_steps = len(train_loader)
#!pip install torchsummary
#!pip install graphviz
#!pip install torchviz
from torchsummary import summary
# Load the saved model
model = ConvNet().to(device)
# Display the model architecture
summary(model, input_size=(3, 32, 32))
import torchviz
# Load the saved model
model = ConvNet()
# Create a sample input tensor
sample_input = torch.zeros([1, 3, 32, 32])
# Forward pass to get the output
sample_output = model(sample_input)
# Generate the model architecture plot
torchviz.make_dot(sample_output, params=dict(model.named_parameters()))
for epoch in range(num_epochs):
"""Loop over the dataset num_epochs times"""
for i, (images, labels) in enumerate(train_loader):
"""Loop through batches of images and labels"""
# Move images and labels to the appropriate device (GPU or CPU)
images = images.to(device)
labels = labels.to(device)
# Forward pass: Pass the images through the model and compute the loss
outputs = model(images)
loss = criterion(outputs, labels)
# Backward pass: Compute gradients of the loss with respect to model parameters
optimizer.zero_grad() # Reset gradients for each batch
loss.backward() # Compute gradients
optimizer.step() # Update model parameters
if (i + 1) % 2000 == 0:
"""Print progress every 2000 steps"""
print(
f"Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{n_total_steps}], Loss: {loss.item():.4f}"
)
print("Finished Training")
# Save the trained model
PATH = "./cnn.pth"
torch.save(model.state_dict(), PATH)
# Evaluate the model on the test dataset
with torch.no_grad():
n_correct = 0
n_samples = 0
n_class_correct = [0 for i in range(10)]
n_class_samples = [0 for i in range(10)]
for images, labels in test_loader:
"""Loop through batches of test images and labels"""
# Move images and labels to the appropriate device (GPU or CPU)
images = images.to(device)
labels = labels.to(device)
# Make predictions using the trained model
outputs = model(images)
_, predicted = torch.max(
outputs, 1
) # Get the class with the highest probability
# Update the overall and per-class accuracy statistics
n_samples += labels.size(0)
n_correct += (predicted == labels).sum().item()
for i in range(batch_size):
label = labels[i]
pred = predicted[i]
if label == pred:
n_class_correct[label] += 1
n_class_samples[label] += 1
# Compute and print overall and per-class accuracy
acc = 100.0 * n_correct / n_samples
print(f"Accuracy of the network: {acc} %")
for i in range(10):
acc = 100.0 * n_class_correct[i] / n_class_samples[i]
print(f"Accuracy of {classes[i]}: {acc} %")
import os
from PIL import Image
def predict_image(image_path, model):
"""Predict the class of an image using the trained model"""
# Load the image and convert it to RGB
img = Image.open(image_path).convert("RGB")
# Apply the same transformations used during training
transform = transforms.Compose(
[
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
img_tensor = transform(img)
# Add batch dimension
img_tensor = img_tensor.unsqueeze(0).to(device)
# Make a prediction using the trained model
with torch.no_grad():
output = model(img_tensor)
_, predicted_class = torch.max(output, 1)
return predicted_class.item()
# Load the saved model
model = ConvNet().to(device)
model.load_state_dict(torch.load(PATH))
# Set the model to evaluation mode
model.eval()
# Ask the user for the image path
image_path = input("Please enter the path to the image you'd like to classify: ")
# Check if the path is valid
if os.path.isfile(image_path):
# Predict the class of the image
predicted_class = predict_image(image_path, model)
print(f"The predicted class is: {classes[predicted_class]}")
else:
print("Invalid image path. Please check the path and try again.")
import os
import tkinter as tk
from tkinter import filedialog
from PIL import Image
def predict_image(image_path, model):
"""Predict the class of an image using the trained model"""
# Load the image and convert it to RGB
img = Image.open(image_path).convert("RGB")
# Apply the same transformations used during training
transform = transforms.Compose(
[
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
img_tensor = transform(img)
# Add batch dimension
img_tensor = img_tensor.unsqueeze(0).to(device)
# Make a prediction using the trained model
with torch.no_grad():
output = model(img_tensor)
_, predicted_class = torch.max(output, 1)
return predicted_class.item()
def select_image():
"""Open a file dialog to let the user select an image"""
# Open the file dialog and get the selected file path
file_path = filedialog.askopenfilename()
if file_path:
# Predict the class of the image
predicted_class = predict_image(file_path, model)
result_label.config(text=f"The predicted class is: {classes[predicted_class]}")
# Load the saved model
model = ConvNet().to(device)
model.load_state_dict(torch.load("cnn.pth"))
# Set the model to evaluation mode
model.eval()
# Create the tkinter GUI
root = tk.Tk()
root.title("Image Classifier")
# Create a button to select an image
select_button = tk.Button(root, text="Select Image", command=select_image)
select_button.pack(pady=10)
# Create a label to display the result
result_label = tk.Label(root, text="")
result_label.pack(pady=10)
# Run the tkinter GUI
root.mainloop()
# **note**:
# In PyTorch, the nn.functional module (often imported as F) and the nn module both provide functions for creating layers in a neural network. The main difference between the two is that the nn module provides classes for creating layers as objects that have internal state (i.e., trainable parameters), while nn.functional provides stateless functions that can be used to perform operations without parameters.
# For example, nn.Conv2d is a class that creates a convolutional layer with trainable parameters, while F.conv2d is a stateless function that performs the convolution operation without parameters. Similarly, nn.ReLU is a class that creates a ReLU activation layer with trainable parameters, while F.relu is a stateless function that performs the ReLU activation operation without parameters.
# So, if you want to use a layer with trainable parameters that can be optimized during training, you should use the nn module to create the layer. On the other hand, if you just want to apply a non-linear activation function to your data without any trainable parameters, you can use the corresponding function from nn.functional.
# **note:**
# A stateless function is a function that does not rely on or modify any external state or variables outside of its own inputs and outputs. In other words, given the same input, it will always produce the same output regardless of any other context or external variables.
# In the context of programming, a stateless function is often considered more predictable, testable, and easier to reason about than a function that relies on external state or variables. This is because it does not have any side effects or hidden dependencies that could affect its behavior.
import torch
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
num_params = count_parameters(model)
print(f"Number of trainable parameters: {num_params}")
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
dataset = pd.read_csv("/kaggle/input/mental-health-in-tech-survey/survey.csv")
dataset_head = dataset.head()
dataset_head
dataset.info()
dataset.columns
dataset.shape
dataset.isnull().sum()
dataset.sort_values(by="Age", ascending=True).tail()
dataset = dataset.drop(["Timestamp"], axis=1)
dataset = dataset.drop(["state"], axis=1)
dataset = dataset.drop(["comments"], axis=1)
dataset = dataset.drop(["state"], axis=1)
dataset.head()
dataset.info()
dataset["Gender"].unique()
replace_G = {
"Female": "F",
"Male": "M",
"male": "M",
"female": "F",
"m": "M",
"Male-ish": "M",
"maile": "M",
"Trans-female": "T",
"Cis Female": "F",
"something kinda male?": "M",
"Cis Male": "M",
"Woman": "F",
"f": "F",
"Mal": "M",
"Male (CIS)": "M",
"queer/she/they": "F",
"non-binary": "T",
"Enby": "T",
"Femake": "F",
"woman": "F",
"Make": "M",
"fluid": "T",
"Malr": "M",
"cis male": "M",
"Female (cis)": "F",
"Guy (-ish) ^_^": "M",
"queer": "T",
"Female (trans)": "T",
"male leaning androgynous": "T",
"Neuter": "T",
"cis-female/femme": "F",
"msle": "M",
"Agender": "T",
"Genderqueer": "T",
"Female": "F",
"Androgyne": "T",
"Nah": "T",
"All": "T",
"Female ": "F",
"Male ": "M",
"Man": "M",
"Trans woman": "T",
"Mail": "M",
"A little about you": "T",
"Malr": "T",
"p": "T",
"femail": "F",
"Cis Man": "M",
"ostensibly male": "M",
"unsure what that really means": "T",
}
dataset["Gender"] = dataset["Gender"].map(replace_G).fillna(dataset["Gender"])
dataset["Gender"].unique()
dataset["Age"].unique()
dataset = dataset[dataset["Age"] != 99999999999]
dataset = dataset[dataset["Age"] != -29]
dataset = dataset[dataset["Age"] != 329]
dataset = dataset[dataset["Age"] != -1726]
dataset = dataset[dataset["Age"] != 5]
dataset = dataset[dataset["Age"] != 8]
dataset = dataset[dataset["Age"] != -1]
dataset = dataset[dataset["Age"] != 11]
dataset["Age"].unique()
dataset["treatment"] = dataset["treatment"].replace("No", 0)
dataset["treatment"] = dataset["treatment"].replace("Yes", 1)
dataset["treatment"].unique()
dataset["work_interfere"] = dataset["work_interfere"].fillna("Not available")
sns.heatmap(dataset.isnull(), cbar=False)
plt.subplots(figsize=(15, 6))
ax = dataset.plot.bar()
ax.set_xlabel("no_employees")
ax.set_ylabel("Age")
ax.set_title("no_employees")
plt.show()
plt.figure(figsize=(10, 7))
sns.barplot(data=dataset, x="Age", y="mental_health_consequence")
plt.xticks(rotation=90)
plt.title("mental_health_consequence in age ")
plt.show()
corr_data = dataset.corr()
plt.figure(figsize=(12, 9))
sns.heatmap(corr_data, annot=True, cmap="YlGnBu")
dataset[dataset["work_interfere"].isna()]["treatment"].value_counts()
dataset.columns
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
for i in range(24):
dataset.iloc[:, i] = le.fit_transform(dataset.iloc[:, i])
# # Libraries import
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
# # Train Test Split
x = dataset.iloc[:, :-1]
y = dataset.obs_consequence
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
# # Model 1 (Logistic Regression)
def logisticRegression():
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print("########### Logistic Regression ###############")
print("Accuracy of Logistic Regression :", accuracy_score(y_test, y_pred))
print(
"Classification report for Logistic Regression :\n",
classification_report(y_test, y_pred),
)
logisticRegression()
# # Model 2 (Linear Regression)
# create a LinearRegression object
reg = LinearRegression()
# fit the data to the model
reg.fit(X_train, y_train)
# predict on new data
y_pred = reg.predict(X_test)
print("########### Linear Regression ###############")
print("Accuracy of Linear Regression :", accuracy_score(y_test, y_pred))
# # Model 3 (KNN)
knn = KNeighborsClassifier(n_neighbors=3)
# Fit the model to the data
knn.fit(X_train, y_train)
# Predict the classes of new data
y_pred = knn.predict(X_test)
# Print the accuracy score
print("########### KNN for k=3 ###############")
print("Accuracy:", knn.score(X_test, y_test))
# # Model 4 (Random Forest)
rf = RandomForestClassifier(n_estimators=100)
# Fit the model to the data
rf.fit(X_train, y_train)
# Predict the classes of new data
y_pred = rf.predict(X_test)
# accuracy evaluataion
print("Accuracy:", rf.score(X_test, y_test))
# # Model 5 (Decision Tree )
# object creation
dt = DecisionTreeClassifier(max_depth=3)
# Fit the model to the data
dt.fit(X_train, y_train)
# Predict the classes of new data
y_pred = dt.predict(X_test)
# Print the accuracy score
print("Accuracy:", dt.score(X_test, y_test))
|
# **IMPORTING FILES PATHS**
import os
files = []
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
files += [os.path.join(dirname, filename)]
files
# **TAKING A SINGLE FILE**
file = files[0]
print(file)
# **READING FILE DATA FROM EXCEL**
data_set = pd.read_excel(file)
data_set
# **DROPPING USELESS DATA**
# /// absolute painful line
data_set2 = data_set2.drop(25)
data_set2
# **Saving lat and long data in lists**
lat = []
long = []
data = []
for items in data_set2:
item = items.split("/")
# print(item)
data.append(list(map(float, item)))
lat.append(item[0])
long.append(item[1])
print(len(lat))
print(len(long))
print(np.array(data))
# **Data Preprocessing**
x_data = np.arange(1, len(lat) + 1, 1).reshape(-1, 1)
y_data = np.array(data)
print(y_data)
training_size = round(0.8 * len(x_data))
x_train_data = x_data[0:training_size]
print(len(x_train_data))
y_train_data = y_data[0:training_size]
print(len(y_train_data))
x_test_data = x_data[training_size : len(x_data) + 1]
print(len(x_test_data))
y_test_data = y_data[training_size : len(y_data) + 1]
print(len(y_test_data))
# **Training the model (Polynomial Regression)**
# Training
import matplotlib.pyplot as mtp
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures, MinMaxScaler
scaler = MinMaxScaler()
score = []
for i in range(1, 100):
poly_regs = PolynomialFeatures(degree=i)
x_poly = poly_regs.fit_transform(x_train_data.reshape(-1, 1))
lin_reg_2 = LinearRegression()
lin_reg_2.fit(x_poly, y_train_data)
# Prediction
poly_pred = lin_reg_2.predict(poly_regs.fit_transform(x_test_data))
# print(poly_pred)
x_test_poly = poly_regs.fit_transform(x_test_data.reshape(-1, 1))
score.append(lin_reg_2.score(x_test_poly, y_test_data))
print(np.array(score).max())
# **Visualisation**
mtp.scatter(x_train_data, y_train_data, color="blue")
mtp.plot(
x_train_data, lin_reg_2.predict(poly_regs.fit_transform(x_train_data)), color="red"
)
mtp.title("Storm Path detection (Polynomial Regression)")
mtp.xlabel("Time Stamp")
mtp.ylabel("Latitude")
mtp.show()
|
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
df = pd.read_excel("/kaggle/input/tiapose-dataset/bebidas.xlsx")
df.index = pd.to_datetime(df["DATA"], format="%Y-%m-%d")
bud = df[["BUD"]]
scaler = StandardScaler()
bud_scaled = scaler.fit_transform(bud)
def df_to_X_y(df, window_size=7):
df_as_np = df.flatten()
X = []
y = []
for i in range(len(df_as_np) - window_size):
row = [[a] for a in df_as_np[i : i + window_size]]
X.append(row)
label = df_as_np[i + window_size]
y.append(label)
return np.array(X), np.array(y)
WINDOW_SIZE = 7
X, y = df_to_X_y(bud_scaled, WINDOW_SIZE)
X.shape, y.shape
X_train, y_train = X[:500], y[:500]
X_val, y_val = X[500:583], y[500:583]
X_test, y_test = X[583:], y[583:]
X_train.shape, y_train.shape, X_test.shape, y_test.shape
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.metrics import RootMeanSquaredError
from tensorflow.keras.optimizers import Adam
model = Sequential()
model.add(InputLayer((7, 1)))
model.add(LSTM(64, dropout=0.2))
model.add(Dense(8, "relu"))
model.add(Dense(1, "linear"))
model.summary()
def root_mean_squared_error(y_true, y_pred):
mse = mean_squared_error(y_true, y_pred)
rmse = np.sqrt(mse)
return rmse
cp = ModelCheckpoint("model1/", save_best_only=True)
model.compile(
loss=MeanSquaredError(),
optimizer=Adam(learning_rate=0.03),
metrics=[RootMeanSquaredError()],
)
model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=10, callbacks=[cp])
test_predictions = model.predict(X_test)
X_test_back = scaler.inverse_transform(test_predictions)
y_test_back = scaler.inverse_transform([y_test])
root_mean_squared_error(X_test_back.flatten(), y_test_back.flatten())
plt.plot(X_test_back.flatten())
plt.plot(y_test_back.flatten())
# # Part 2
model2 = Sequential()
model2.add(InputLayer((7, 1)))
model2.add(Conv1D(64, kernel_size=2))
model2.add(Flatten())
model2.add(Dense(8, "relu"))
model2.add(Dense(1, "linear"))
model2.summary()
cp2 = ModelCheckpoint("model2/", save_best_only=True)
model2.compile(
loss=MeanSquaredError(),
optimizer=Adam(learning_rate=0.01),
metrics=[RootMeanSquaredError()],
)
model2.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=10, callbacks=[cp2])
test_predictions = model2.predict(X_test)
X_test_back = scaler.inverse_transform(test_predictions)
y_test_back = scaler.inverse_transform([y_test])
root_mean_squared_error(X_test_back.flatten(), y_test_back.flatten())
model3 = Sequential()
model3.add(InputLayer((5, 1)))
model3.add(GRU(64))
model3.add(Dense(8, "relu"))
model3.add(Dense(1, "linear"))
model3.summary()
cp3 = ModelCheckpoint("model3/", save_best_only=True)
model3.compile(
loss=MeanSquaredError(),
optimizer=Adam(learning_rate=0.001),
metrics=[RootMeanSquaredError()],
)
model3.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=10, callbacks=[cp3])
test_predictions = model3.predict(X_test)
X_test_back = scaler.inverse_transform(test_predictions)
y_test_back = scaler.inverse_transform([y_test])
root_mean_squared_error(X_test_back.flatten(), y_test_back.flatten())
|
# # TELECOMMUNICATION
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# # Data Preprocess
df = pd.read_csv("/kaggle/input/telecom-users-dataset/telecom_users.csv")
print(df.shape)
df.head()
df.info()
# * Data has no null variables so that is great.
# * We do not need some of these columns in both data analysis and prediction approaches.
# * Also 'Yes' and 'No' is not ideal to have a classification model so I will change them to 1 = 'Yes' and 0 = 'No'.
df = df.drop(["Unnamed: 0", "customerID"], axis=1)
df = df.replace("No", 0)
df = df.replace("Yes", 1)
df = df.replace("No internet service", 0)
df.head()
df["InternetService"].value_counts()
df["InternetService"] = df["InternetService"].replace("Fiber optic", 2)
df["InternetService"] = df["InternetService"].replace("DSL", 1)
# **Internet service:**
# * Fiber optic = 2
# * DSL = 1
df["Contract"].value_counts()
df["Contract"] = df["Contract"].replace("Two year", 24)
df["Contract"] = df["Contract"].replace("One year", 12)
df["Contract"] = df["Contract"].replace("Month-to-month", 1)
# **Contract:**
# * 'Month-to-month' = 1
# * 'Two Year' = 24
# * 'One Year' = 12
df["PaymentMethod"].value_counts()
df["PaymentMethod"] = df["PaymentMethod"].replace("Electronic check", 1)
df["PaymentMethod"] = df["PaymentMethod"].replace("Mailed check", 2)
df["PaymentMethod"] = df["PaymentMethod"].replace("Bank transfer (automatic)", 3)
df["PaymentMethod"] = df["PaymentMethod"].replace("Credit card (automatic)", 4)
df["MultipleLines"] = df["MultipleLines"].replace("No phone service", 0)
# **Payment Method:**
# * 'Electronic Check' = 1
# * 'Mailed Check' = 2
# * 'Bank Transfer' = 3
# * 'Credit Card' = 4
df["gender"] = df["gender"].replace("Male", 1)
df["gender"] = df["gender"].replace("Female", 0)
# Also as you can see on the above, we can change the *contract*, *internet service* and *payment method* types as well without changing column purpose. I labeled them accordingly so I will be showing these labels when we are visualizing the data.
# And finally, I do think that montly and total charge values can be integers. They are determinant factors but their fraction values are not that important.
df["MultipleLines"] = df["MultipleLines"].astype(int)
df["MonthlyCharges"] = df["MonthlyCharges"].astype(int)
df["TotalCharges"] = df["TotalCharges"].astype(str)
df["TotalCharges"] = df["TotalCharges"].replace(" ", 0)
df["TotalCharges"] = df["TotalCharges"].astype(float)
total_charge = df["TotalCharges"]
for i in range(0, len(df) - 1):
total_charge[i] = int(total_charge[i])
df["TotalCharges"] = df["TotalCharges"].astype(int)
df.head()
df.info()
# Now the data is ready. Every column is in integer form.
# I will be looking for correlations of the columns with the *Churn* and if I find any uncorrelated columns I will drop them.
# # Simple Data Analysis
sns.barplot(x="Partner", y="MonthlyCharges", data=df)
sns.barplot(x="Contract", y="MonthlyCharges", data=df)
plt.ylabel("Charge")
sns.barplot(x="SeniorCitizen", y="MonthlyCharges", data=df)
plt.xlabel("Non senior - Senior")
sns.barplot(x="InternetService", y="MonthlyCharges", data=df)
plt.xlabel("No Internet - DSL - Fiber optic")
# * Having a partner or not having a partner does not effect the monthly charges that much, the effect is very minimal.
# * Contract year effects monthly costs. The contract with the most length has the lowest prices because of the commitment of the customer.
# * Senior citizens pays more than non senior citizens. That might be because younger people use more internet so their contracts have more throughput.
# * Fiber optic is the fastest and most reliable internet right now. It is not surprise that it is the most expensive one.
# # Prediction
corr = df.corr()
plt.figure(figsize=(30, 10))
sns.heatmap(corr, cmap="coolwarm", annot=True)
plt.show()
# I will not be looking each column one by one. I will choose the most correlated ones and use them in my models.
corr[abs(corr["Churn"]) > 0.1].index
df = df[
[
"SeniorCitizen",
"Partner",
"Dependents",
"tenure",
"InternetService",
"OnlineSecurity",
"TechSupport",
"Contract",
"PaperlessBilling",
"PaymentMethod",
"MonthlyCharges",
"TotalCharges",
"Churn",
]
]
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn import metrics
x = df.drop(["Churn"], axis=1)
y = df["Churn"]
x.shape, y.shape
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
# **LOGISTIC REGRESSION**
from sklearn.linear_model import LogisticRegression
logistic = LogisticRegression()
logistic.fit(x_train, y_train)
prediction_lr = logistic.predict(x_test)
print(classification_report(y_test, prediction_lr))
metrics.plot_roc_curve(logistic, x_test, y_test)
# **DECISION TREE**
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier()
tree.fit(x_train, y_train)
prediction_dt = tree.predict(x_test)
print(classification_report(y_test, prediction_dt))
metrics.plot_roc_curve(tree, x_test, y_test)
# **RANDOM FOREST**
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier()
forest.fit(x_train, y_train)
prediction_rf = forest.predict(x_test)
print(classification_report(y_test, prediction_rf))
metrics.plot_roc_curve(forest, x_test, y_test)
# **ARTIFICIAL NEURAL NETWORK**
import keras
from keras.layers import Dense
from keras.models import Sequential
from keras.layers import Dropout
x.shape
model = Sequential(
[
Dense(32, activation="relu", input_dim=12),
Dropout(0.5),
Dense(16, activation="relu"),
Dense(1, activation="sigmoid"),
]
)
model.compile(optimizer="adam", loss="mean_squared_error", metrics=["accuracy"])
model.summary()
history = model.fit(x_train, y_train, batch_size=10, epochs=200, verbose=2)
print(history.history.keys())
plt.plot(history.history["accuracy"], label="Accuracy", color="blue")
plt.plot(history.history["loss"], label="Loss", color="red")
plt.legend()
prediction_nn = model.predict(x_test)
prediction_nn = [1 if y >= 0.5 else 0 for y in prediction_nn]
print(classification_report(y_test, prediction_nn))
|
import os
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.keras as keras
import rawpy
import random
import matplotlib.pyplot as plt
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import (
UpSampling2D,
Conv2D,
MaxPooling2D,
Dropout,
concatenate,
Layer,
)
from tensorflow.keras.utils import plot_model
# This might be useful later, but first understand what it does before usage.
# physical_devices = tf.config.list_physical_devices("GPU")
# tf.config.experimental.set_memory_growth(physical_devices[0], False)
# ## Data Loading
# ### Loading image paths
directory = "/kaggle/input/sid-sony"
def correct_path(row):
row = dict(row)
row["short"] = os.path.abspath(os.path.join(directory, row["short"]))
row["long"] = os.path.abspath(os.path.join(directory, row["long"]))
return pd.Series(row)
df_train = pd.read_csv(
"/kaggle/input/sid-sony/Sony_train_list.txt",
sep=" ",
names=["short", "long", "iso", "aperture"],
)[["short", "long"]].apply(correct_path, axis=1)
df_val = pd.read_csv(
"/kaggle/input/sid-sony/Sony_val_list.txt",
sep=" ",
names=["short", "long", "iso", "aperture"],
)[["short", "long"]].apply(correct_path, axis=1)
df_test = pd.read_csv(
"/kaggle/input/sid-sony/Sony_test_list.txt",
sep=" ",
names=["short", "long", "iso", "aperture"],
)[["short", "long"]].apply(correct_path, axis=1)
short_train = list(df_train["short"].values)
long_train = list(df_train["long"].values)
short_val = list(df_val["short"].values)
long_val = list(df_val["long"].values)
short_test = list(df_test["short"].values)
long_test = list(df_test["long"].values)
# ### Loading data
# patch size
ps = 256
def get_image_raw(path):
return rawpy.imread(path)
def pack_raw(raw):
"""
Packs a Bayer image to 4 channels. Also performs other pre - processing as needed for the short exposure images.
Steps:
1. Subtract the black level
2. Pack image into 4 channels
"""
im = raw.raw_image_visible.astype(np.float32)
im = np.maximum(im - 512, 0) / (
16383 - 512
) # subtract the black level and normalize
im = np.expand_dims(im, axis=2)
img_shape = im.shape
H = img_shape[0]
W = img_shape[1]
out = np.concatenate(
(
im[0:H:2, 0:W:2, :],
im[0:H:2, 1:W:2, :],
im[1:H:2, 1:W:2, :],
im[1:H:2, 0:W:2, :],
),
axis=2,
)
return out
def postprocess_raw(raw):
"""
Performs post - processing to a raw image, as needed for the long exposure images.
"""
im = raw.postprocess(
use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16
)
im = np.float32(im) / 65535.0
return im
def augment(images: tuple):
assert len(images) == 2, "Need 2 images for augmentation"
im1, im2 = images
if random.randint(0, 1) == 1: # random flip (axis 1)
im1 = np.flip(im1, axis=1)
im2 = np.flip(im2, axis=1)
if random.randint(0, 1) == 1: # random flip (axis 2)
im1 = np.flip(im1, axis=2)
im2 = np.flip(im2, axis=2)
if random.randint(0, 1) == 1: # random transpose
im1 = np.transpose(im1, (1, 0, 2))
im2 = np.transpose(im2, (1, 0, 2))
return im1, im2
def crop(im_s, im_l, ps):
H, W, _ = im_s.shape
xx = random.randint(0, W - ps)
yy = random.randint(0, H - ps)
patch_s = im_s[yy : yy + ps, xx : xx + ps, :]
patch_l = im_l[2 * yy : 2 * (yy + ps), 2 * xx : 2 * (xx + ps), :]
return patch_s, patch_l
def generator_func_train():
short_paths, long_paths = short_train, long_train
for short, long in zip(short_paths, long_paths):
yield augment(
crop(
pack_raw(get_image_raw(short)), postprocess_raw(get_image_raw(long)), ps
)
)
def generator_func_val():
short_paths, long_paths = short_val, long_val
for short, long in zip(short_paths, long_paths):
yield crop(
pack_raw(get_image_raw(short)), postprocess_raw(get_image_raw(long)), ps
)
def generator_func_test():
short_paths, long_paths = short_test, long_test
for short, long in zip(short_paths, long_paths):
yield crop(
pack_raw(get_image_raw(short)), postprocess_raw(get_image_raw(long)), ps
)
ds_train = tf.data.Dataset.from_generator(
generator_func_train,
output_signature=(
tf.TensorSpec(shape=(None, None, 4), dtype=np.float32),
tf.TensorSpec(shape=(None, None, 3), dtype=np.float32),
),
)
ds_val = tf.data.Dataset.from_generator(
generator_func_val,
output_signature=(
tf.TensorSpec(shape=(None, None, 4), dtype=np.float32),
tf.TensorSpec(shape=(None, None, 3), dtype=np.float32),
),
)
ds_test = tf.data.Dataset.from_generator(
generator_func_test,
output_signature=(
tf.TensorSpec(shape=(None, None, 4), dtype=np.float32),
tf.TensorSpec(shape=(None, None, 3), dtype=np.float32),
),
)
# ### Applying pre processing and augmentation as needed
# Already done in the generator function
# def augment(short, long):
# return short, long
# ds_train = ds_train.map(augment)
# ### Dataset optimization
AUTOTUNE = tf.data.experimental.AUTOTUNE
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(8)
ds_train = ds_train.batch(2)
ds_train = ds_train.prefetch(AUTOTUNE)
ds_val = ds_val.batch(2)
ds_val = ds_val.prefetch(AUTOTUNE)
ds_test = ds_test.batch(2)
ds_test = ds_test.prefetch(AUTOTUNE)
# ## Model Creation
# ### Utility Functions
def psnr(y_true, y_pred):
# assuming y_true and y_pred are in the range [0, 1]
max_pixel = 1.0
mse = tf.keras.losses.mean_squared_error(y_true, y_pred)
psnr = 10.0 * tf.math.log((max_pixel**2) / mse) / tf.math.log(10.0)
return psnr
def save_model(model, model_name):
"""
Save a Keras model with a custom name.
Args:
model: A Keras model object to save.
model_name: A string representing the desired name of the model.
Returns:
None
"""
model.save(model_name)
print(f"Model saved in folder '{model_name}'")
def load_model(model_path):
"""
Load a saved Keras model with the given path.
Args:
model_path: A string representing the path to the saved model's folder.
Returns:
A Keras model object.
"""
if not os.path.exists(model_path):
raise ValueError(f"No saved model found at {model_path}.")
return keras.models.load_model(model_path)
# ### Model Code
RECREATE_MODEL = True
# #### Amplification Ratio Layer
class AmplificationRatio(Layer):
def __init__(self, initial_ratio=300, **kwargs):
super(AmplificationRatio, self).__init__(**kwargs)
self.initial_ratio = initial_ratio
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(
name="kernel",
shape=(1,),
initializer="random_uniform", # tf.keras.initializers.Constant(self.initial_ratio),
trainable=True,
)
super(AmplificationRatio, self).build(input_shape)
def call(self, inputs):
inputs = tf.cast(inputs, dtype=self.dtype)
# Convert numpy array to tensor
x = tf.convert_to_tensor(inputs, dtype=self.dtype)
# Multiply the input tensor with the trainable weight
output = tf.multiply(x, self.kernel)
return output
def compute_output_shape(self, input_shape):
# The output shape will be the same as the input shape
return input_shape
if RECREATE_MODEL:
# U - Net Architecture [(h, w, 4) -> (2h, 2w, 3)]
# IMPORTANT: h and w MUST be powers of 2
input_shape = (None, None, 4) # (h, w, 4)
# Input Shape Correction
inputs = Input(shape=input_shape)
upsampled_input = UpSampling2D(size=(2, 2))(inputs) # (2h, 2w, 4)
upsampled_input = AmplificationRatio()(upsampled_input) # Activation ratio
# Contracting Path
conv1 = Conv2D(
64, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
upsampled_input
) # (2h, 2w, 64)
conv1 = Conv2D(
64, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
conv1
) # (2h, 2w, 64)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) # (h, w, 64)
conv2 = Conv2D(
128, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
pool1
) # (h, w, 128)
conv2 = Conv2D(
128, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
conv2
) # (h, w, 128)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) # (h/2, w/2, 128)
conv3 = Conv2D(
256, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
pool2
) # (h/2, w/2, 256)
conv3 = Conv2D(
256, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
conv3
) # (h/2, w/2, 256)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) # (h/4, w/4, 256)
conv4 = Conv2D(
512, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
pool3
) # (h/4, w/4, 512)
conv4 = Conv2D(
512, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
conv4
) # (h/4, w/4, 512)
# conv4 = Dropout(0.5)(conv4) # (h/4, w/4, 512)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) # (h/8, w/8, 512)
# Bottleneck
conv5 = Conv2D(
1024, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
pool4
) # (h/8, w/8, 1024)
conv5 = Conv2D(
1024, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
conv5
) # (h/8, w/8, 1024)
# conv5 = Dropout(0.5)(conv5) # (h/8, w/8, 1024)
# Expansive Path
up6 = Conv2D(
512, 2, activation="relu", padding="same", kernel_initializer="he_normal"
)(
UpSampling2D(size=(2, 2))(conv5)
) # (h/4, w/4, 512)
merge6 = concatenate([conv4, up6], axis=3) # (h/4, w/4, 1024)
conv6 = Conv2D(
512, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
merge6
) # (h/4, w/4, 512)
conv6 = Conv2D(
512, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
conv6
) # (h/4, w/4, 512)
up7 = Conv2D(
256, 2, activation="relu", padding="same", kernel_initializer="he_normal"
)(
UpSampling2D(size=(2, 2))(conv6)
) # (h/2, w/2, 256)
merge7 = concatenate([conv3, up7], axis=3) # (h/2, w/2, 512)
conv7 = Conv2D(
256, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
merge7
) # (h/2, w/2, 256)
conv7 = Conv2D(
256, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
conv7
) # (h/2, w/2, 256)
up8 = Conv2D(
128, 2, activation="relu", padding="same", kernel_initializer="he_normal"
)(
UpSampling2D(size=(2, 2))(conv7)
) # (h, w, 128)
merge8 = concatenate([conv2, up8], axis=3) # (h, w, 256)
conv8 = Conv2D(
128, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
merge8
) # (h, w, 128)
conv8 = Conv2D(
128, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
conv8
) # (h, w, 128)
up9 = Conv2D(
64, 2, activation="relu", padding="same", kernel_initializer="he_normal"
)(
UpSampling2D(size=(2, 2))(conv8)
) # (2h, 2w, 64)
merge9 = concatenate([conv1, up9], axis=3) # (2h, 2w, 128)
conv9 = Conv2D(
64, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
merge9
) # (2h, 2w, 64)
conv9 = Conv2D(
64, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(
conv9
) # (2h, 2w, 64)
# Output
conv10 = Conv2D(3, 1, activation="relu")(conv9) # (2h, 2w, 3)
model = Model(inputs=inputs, outputs=conv10)
else:
model = load_model("/kaggle/input/see-in-the-dark-model-store/kaggle/working/model")
model.summary()
# ### Model Visualization
plot_model(model, to_file="model.png", show_shapes=True)
# ### Model Debugging
DEBUG = False
# #### Training
if DEBUG:
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
loss=tf.keras.losses.MeanAbsoluteError(),
metrics=[psnr],
)
if DEBUG:
model.fit(
x=np.zeros((1, 256, 256, 4)).astype(np.float32) + 0.0003,
y=np.ones((1, 512, 512, 3)).astype(np.float32),
epochs=100,
)
# #### Prediction
if DEBUG:
res = model.predict(np.zeros((1, 256, 256, 4)) + 0.0003)
print(res)
# ## Model Training
# Datasets used: `train`, `val`
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=5e-5),
loss=tf.keras.losses.MeanAbsoluteError(),
metrics=[psnr],
)
hist = model.fit(x=ds_train, epochs=2000, validation_data=ds_val)
plt.plot(hist.history["loss"], label="Training Loss", c="red")
plt.plot(hist.history["val_loss"], label="Validation Loss", c="green")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
# ## Model Testing
# Datasets used: `test`
results = model.evaluate(x=ds_test)
print("Testing Results: ")
print(f"MAE: {results[0]}")
print(f"PSNR: {results[1]}")
# ## Model Persistence
save_model(model, "model")
|
# Сперва импортируем нужные нам библиотеки и взглянем на данные.
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
df = pd.read_csv("./drive/MyDrive/Colab_Notebooks/DATA/train.csv")
df.sample(10)
df.info()
df.isna().sum()
# Данные попались без пропусков в каждой колонке.
df["TotalSpent"] = pd.to_numeric(df["TotalSpent"], errors="coerce").fillna(0)
# Поменияем формат на числовой
type(df["TotalSpent"][2])
# проверили
df.shape
df.nunique()
# Числовые признаки
num_cols = ["ClientPeriod", "MonthlySpending", "TotalSpent"]
# Категориальные признаки
cat_cols = [
"Sex",
"IsSeniorCitizen",
"HasPartner",
"HasChild",
"HasPhoneService",
"HasMultiplePhoneNumbers",
"HasInternetService",
"HasOnlineSecurityService",
"HasOnlineBackup",
"HasDeviceProtection",
"HasTechSupportAccess",
"HasOnlineTV",
"HasMovieSubscription",
"HasContractPhone",
"IsBillingPaperless",
"PaymentMethod",
]
feature_cols = num_cols + cat_cols
target_col = "Churn"
print(f"Duplicated rows: {df.duplicated(keep=False).sum()}")
print(
f"Duplicated rows without target: {df.drop(target_col, axis=1).duplicated(keep=False).sum()}"
)
df.info()
df.drop_duplicates(inplace=True)
# Удалим повторяющиеся строки, они лишь "кинут палки под колеса" нашей "машины"
df.shape
# Построим графики для более детального анализа наших признаков.
# Гистрограммы численных признаков
fig, axes = plt.subplots(1, 3, figsize=(20, 6))
fig.suptitle("Гистограммы численных признаков", fontsize=15)
for i, col in enumerate(num_cols):
axes[i].hist(df[col])
axes[i].set_title(col).set_fontsize(15)
fig, axes = plt.subplots(4, 4, figsize=(20, 20))
fig.suptitle("Диаграммы для категориальных признаков", fontsize=15)
axes = axes.flatten()
for i, col in enumerate(cat_cols):
col_values = df.groupby(by=col).size()
axes[i].pie(x=col_values, autopct="%.0f%%", labels=col_values.index)
axes[i].set_title(col).set_fontsize(15)
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import (
StandardScaler,
RobustScaler,
LabelEncoder,
OneHotEncoder,
)
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
X_train = df.drop(target_col, axis=1)
y_train = df[target_col]
data_norm = ColumnTransformer(
[("num_cols", StandardScaler(), num_cols), ("cat_cols", OneHotEncoder(), cat_cols)]
)
pline = make_pipeline(data_norm, LogisticRegression())
param_grid = {
"logisticregression__C": [100, 10, 1, 0.1, 0.01, 0.001],
"logisticregression__penalty": ["l1", "l2", "elasticnet"],
"logisticregression__max_iter": np.linspace(100, 300, 5),
}
gs = GridSearchCV(
estimator=pline, param_grid=param_grid, scoring="roc_auc", refit=True, n_jobs=-1
)
gs.fit(X_train, y_train)
print(gs.best_score_, gs.best_params_, sep="\n")
# Наша база это Линейная регрессия с roc-auc 0.845
import catboost
from sklearn.metrics import roc_auc_score
X = df.drop(target_col, axis=1)
y = df[target_col]
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, train_size=0.8, random_state=666
)
model_catboost = catboost.CatBoostClassifier(cat_features=cat_cols, verbose=False)
model_catboost.fit(X_train, y_train)
predictions = model_catboost.predict_proba(X_valid)[:, -1]
roc_auc_score(y_valid, predictions)
# CatBoost с оптимальными параметрами выдал roc-auc 0.853
param_grid = {
"num_trees": np.linspace(400, 1000, 50),
"learning_rate": np.linspace(0.1, 1, 5),
"max_depth": np.linspace(1, 6, 6),
}
cb_cv = catboost.CatBoostClassifier(cat_features=cat_cols, verbose=False)
GSCV_catboost = GridSearchCV(
estimator=cb_cv, param_grid=param_grid, scoring="roc_auc", cv=5
)
GSCV_catboost.fit(X, y)
print(GSCV_catboost.best_score_, GSCV_catboost.best_params_, sep="\n")
# Выбираем лучшую модель catboost для baseline , так как она выдала самый лучший score(auc-roc).
# Загружаем тестовые данные и делаем предсказание.
X_test = pd.read_csv("./drive/MyDrive/Colab_Notebooks/DATA/test.csv")
X_test.head()
prediction = GSCV_catboost.best_estimator_.predict_proba(X_test)[:, 1]
submission = pd.read_csv("./drive/MyDrive/Colab_Notebooks/DATA/submission.csv")
submission["Churn"] = prediction
submission.shape
submission.head()
submission.to_csv(
"./drive/MyDrive/Colab_Notebooks/DATA/submission.baseline_1.csv", index=False
)
# Запишем ответ в csv
# Мы не пробовали самый сильный алгоритм , а именно XGBoost.
# Давайте реализуем его.
import xgboost as xgb
X = df.drop(target_col, axis=1)
y = df["Churn"]
X = pd.get_dummies(X, columns=cat_cols)
# оцифруем наши категориальные признаки.
X.shape
std = StandardScaler()
X[num_cols] = std.fit_transform(X[num_cols])
# Стандартизировали наши числовые значения
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, train_size=0.8, random_state=69
)
xgb_cl = xgb.XGBClassifier(random_state=69)
xgb_cl.fit(X_train, y_train)
presdiction_xgb = xgb_cl.predict_proba(X_valid)[:, 1] # Делаем предсказания
roc_auc_score(y_valid, presdiction_xgb)
# Наш baseline на XGBoost Cl 0.805
param_grid = {
"learning_rate": [0.01, 0.05, 0.1],
"max_depth": [2, 3, 4],
"n_estimators": range(300, 700, 200),
"eta": [0.05, 0.1, 0.15],
}
xgb_cl_cv = xgb.XGBClassifier(random_state=2, verbose=False)
GSCV_xgb_cl = GridSearchCV(
estimator=xgb_cl_cv, param_grid=param_grid, scoring="roc_auc", cv=5
)
GSCV_xgb_cl.fit(X_train, y_train)
print(
GSCV_xgb_cl.best_score_, GSCV_xgb_cl.best_params_, sep="\n"
) # Находим лучшие гиперпараметры
presdiction_xgb = GSCV_xgb_cl.predict_proba(X_valid)[:, 1] # Делаем предсказания
roc_auc_score(y_valid, presdiction_xgb)
submission = pd.read_csv("./drive/MyDrive/Colab_Notebooks/DATA/submission.csv")
submission["Churn"] = prediction
submission.to_csv(
"./drive/MyDrive/Colab_Notebooks/DATA/submission.xgb_cl.csv", index=False
)
# Запишем ответ в csv
|
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.figure_factory as ff
business = pd.read_json(
"../input/yelp-dataset/yelp_academic_dataset_business.json", lines=True
)
len(business.business_id.unique())
print(f"shape = {business.shape}")
print()
print("Null value list")
print(business.isnull().sum().sort_values(ascending=False))
stars = business.groupby("stars")["stars"].count()
stars = pd.DataFrame({"rate": stars.index, "count": stars.values}).astype({"rate": str})
fig = px.histogram(
stars, x="rate", y="count", color="rate", title="Star Distribution", text_auto=True
)
fig.show()
print(
f"Mean = {np.mean(business.stars)}, Standard Deviation = {np.std(business.stars)}"
)
city_business_reviews = (
business[["city", "review_count", "stars"]]
.groupby(["city"])
.agg({"review_count": "sum", "stars": "mean"})
.sort_values(by="review_count", ascending=False)
)
city_business_reviewstop10 = city_business_reviews.head(10)
figc = px.histogram(
city_business_reviewstop10,
x="review_count",
y=city_business_reviewstop10.index,
color=city_business_reviewstop10.index,
title="Cities with most reviews and its correspond ratings for the local businesses",
text_auto=True,
)
figc.update_traces(orientation="h")
for i in city_business_reviewstop10.index:
figc.add_annotation(
text=round(city_business_reviewstop10.at[i, "stars"], 2),
x=1000000,
y=i,
showarrow=False,
)
figc.add_annotation(
text="stars =", x=960000, y=city_business_reviewstop10.index[1], showarrow=False
)
figc.show()
business_cats = ", ".join(business["categories"].dropna())
cats = pd.DataFrame(business_cats.split(", "), columns=["Count"])
cats_ser = cats.Count.value_counts()
cats_df = pd.DataFrame(cats_ser)
cats_df.reset_index(inplace=True)
print(len(cats_df))
dum = cats_df.copy()
cats_df = cats_df.rename(columns={"index": "categories"}).head(10)
figcat = px.histogram(
cats_df,
x="Count",
y="categories",
color="categories",
title="Business Distribution (Top 10)",
text_auto=True,
)
figc.update_traces(orientation="h")
figcat.show()
dum = dum.rename(columns={"index": "name"})
dum.head(3)
for i in range(3):
print(f"{dum.name[i]} of % = {round(dum.Count[i]/dum.Count.sum() * 100, 2)}")
dum.loc[3:, "name"] = "others"
figp = px.pie(dum, values="Count", names="name")
figp.update_traces(textposition="inside", textinfo="percent+label")
figp.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras.applications import *
from tensorflow.keras.preprocessing.image import *
from tensorflow.keras.utils import plot_model
from livelossplot import PlotLossesKeras
from tensorflow.keras.callbacks import *
from tensorflow.keras import backend as K
import os
from PIL import Image
import cv2
from collections import Counter
from imutils import *
from scipy.spatial.distance import cosine, euclidean
import numpy as np
import pandas as pd
from glob import glob
import matplotlib.pyplot as plt
import seaborn as sns
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly as ply
from sklearn.metrics import *
ply.offline.init_notebook_mode(connected=True)
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
from tensorflow.keras import layers
import time
from IPython import display
import warnings
warnings.filterwarnings("ignore")
VAL_LOC = "../input/mias-classes-gdf/MIAS_Data/Val"
TRAIN_LOC = "../input/mias-classes-gdf/MIAS_Data/Train"
TEST_LOC = "../input/mias-classes-gdf/MIAS_Data/Test"
def load_data(img_location, name):
imgs = []
labels = []
for img in os.listdir(os.path.join(img_location, name)):
img_arr = cv2.imread(os.path.join(img_location, name, img))
img_arr = img_arr / 255.0
img_arr = cv2.resize(img_arr, (512, 512))
labels.append(name)
imgs.append(img_arr)
return imgs, labels
train_data = TRAIN_LOC
val_data = VAL_LOC
masked_data = TEST_LOC
G_train_imgs, G_train_labels = load_data(train_data, "G")
D_train_imgs, D_train_labels = load_data(train_data, "D")
G_val_imgs, G_val_labels = load_data(val_data, "G")
D_val_imgs, D_val_labels = load_data(val_data, "D")
F_train_imgs, F_train_labels = load_data(train_data, "F")
F_val_imgs, F_val_labels = load_data(val_data, "F")
plt.imshow(G_train_imgs[3])
# define the standalone discriminator model
def define_discriminator(in_shape=(512, 512, 3)):
model = Sequential()
# normal
model.add(Conv2D(64, (3, 3), padding="same", input_shape=in_shape))
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(128, (3, 3), strides=(2, 2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(128, (3, 3), strides=(2, 2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(256, (3, 3), strides=(2, 2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
# classifier
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(1, activation="sigmoid"))
# compile model
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
return model
# define the standalone generator model
def define_generator(latent_dim):
model = Sequential()
# foundation for 4x4 image
n_nodes = 256 * 4 * 4
model.add(Dense(n_nodes, input_dim=latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(Reshape((4, 4, 256)))
# upsample to 8x8
model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
# upsample to 16x16
model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
# upsample to 32x32
model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
# output layer
model.add(Conv2D(3, (3, 3), activation="tanh", padding="same"))
return model
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from sklearn.cluster import KMeans
from sklearn.model_selection import cross_val_score
from xgboost import XGBRegressor
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import GridSearchCV
import seaborn as sns
import matplotlib.pyplot as plt
# ### Read both test and train datasets
train_ori = pd.read_csv("../input/tabular-playground-series-mar-2021/train.csv")
test_ori = pd.read_csv("../input/tabular-playground-series-mar-2021/test.csv")
# ## Explore train dataset
# ### Shape:
print("Shape of data frame:", train_ori.shape)
train_ori.head()
# ### Check for NA values :
train_ori.info()
train_ori.isna().sum()
# Data set is clean as far as NA are concerned.
# ### Use .desscribe() method to look at different features
train_ori.describe(include="object")
obj_col = train_ori.select_dtypes("object").columns.tolist()
for colname in obj_col:
plt.figure() # this creates a new figure on which your plot will appear
sns.histplot(data=train_ori, x=colname)
for colname in obj_col:
plt.figure() # this creates a new figure on which your plot will appear
sns.histplot(data=train_ori, x="target", y=colname, hue=colname, legend=False)
train_ori.describe()
# ### Preprocessing :
# 1. dropping those features that have more than 5 categories.
# 2. There are no peculiear characteristics observed in continous varibales that lead to dropping entire columns now.
#
X = train_ori.drop(["id", "cat6", "cat7", "cat8", "cat9", "target"], axis=1)
y = train_ori["target"]
y.shape
# ### Split X into X_train and X_valid, same for y
from sklearn.model_selection import train_test_split
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.33, random_state=0
)
# ### Encoding categorical variables
object_cols = [col for col in X_train.columns if X_train[col].dtype == "object"]
# Columns that can be safely label encoded
good_label_cols = [col for col in object_cols if set(X_train[col]) == set(X_valid[col])]
# Problematic columns that will be dropped from the dataset
bad_label_cols = list(set(object_cols) - set(good_label_cols))
print("Categorical columns that will be label encoded:", good_label_cols)
print("\nCategorical columns that will be dropped from the dataset:", bad_label_cols)
label_X_train = X_train.drop(bad_label_cols, axis=1)
label_X_valid = X_valid.drop(bad_label_cols, axis=1)
# Apply label encoder
my_encoder = LabelEncoder()
for value in good_label_cols:
label_X_train[value] = my_encoder.fit_transform(label_X_train[value])
label_X_valid[value] = my_encoder.transform(label_X_valid[value])
label_X_train.shape
# To DO :
# 1. Split train into X_train and X_valid
# **data visulization**
# 2. One hot encoding for both test and valid
# 3. K cluster
# 4. xg BOOST , check rmse score
#
def score_dataset(
X,
y,
model=XGBRegressor(
n_estimators=500, max_depth=7, learning_rate=0.5, n_jobs=-1, verbose=1
),
):
# Label encoding for categoricals
for colname in X.select_dtypes(["category", "object"]):
X[colname], _ = X[colname].factorize()
# Metric for Housing competition is RMSE (Root Mean Squared Error)
score = cross_val_score(
model,
X,
y,
cv=5,
scoring="neg_mean_squared_error",
)
score = -1 * score.mean()
score = np.sqrt(score)
return score
# score_dataset(label_X_train,y_train)
# score_dataset(label_X_valid,y_valid)
def hyperParameterTuning(X_train, y_train):
param_tuning = {
"learning_rate": [0.01, 0.1],
"max_depth": [3, 5, 7, 10],
#'min_child_weight': [1, 3, 5],
#'subsample': [0.5, 0.7],
#'colsample_bytree': [0.5, 0.7],
"n_estimators": [100, 200, 500],
"objective": ["reg:squarederror"],
}
xgb_model = XGBRegressor()
gsearch = GridSearchCV(
estimator=xgb_model,
param_grid=param_tuning,
# scoring = 'neg_mean_absolute_error', #MAE
scoring="neg_mean_squared_error", # MSE
cv=5,
n_jobs=-1,
verbose=1,
)
gsearch.fit(X_train, y_train)
return gsearch.best_params_
best_parameters = hyperParameterTuning(label_X_train, y_train)
|
# # How does Quantum Computers represent data?
# Majority of computing in this era utilise classical computers for intense number crunching and other computationally expensive tasks. In classical computers, data is represented in classical bits, a single bit can only represent two values "0" and "1" in base 2, which corresponds to 0 and 1 in base 10, similarly, two bits can represent as much as 4 values "00", "01", "10" and "11" in base two, which corresponds to 0, 1, 2, 4 in base 10, however it can only represent these values one at a time, so in the case of a single bit, it is either "0" or "1" and with two bits, it is either "00", "01", "10" or "11" at a time, but not simultaneouly.
# In quantum computers, things work a bit differently. quantum computers represent data in "qbits" or "quantum-bits". Unlike classical bits, qbits are always in superpositioned states. Superposition is a quantum mechanics phenomenon where a given quantum entity tends to be in two or more states of being at once (typically as a wave and as a particle) prior to emperical measurement. with qbits, all possible data that can be represented by a given qbit or a collection of qbits will all be represented simultaneously, so inotherwords a single qbit can be in a superpositioned state of "0" and "1", hence representing two values at once, similarly 2 qbits would be in a superpositioned state of "00", "01", "10" and "11"
# From here onwards, it is easy to see how quantum computers can perform computing faster than classical computers, simply being in a superpositioned state of all possible states would make simultanneous computing feasible, however their seems to be a bit of an issue here, what could it be? Suppose we have a problem that would require us to brute force our way through stupendously large numbers within the range of $1$ to $10^{500}$, to represent a number as big as $10^{500}$ we would require $n$ number of bits, where:
# $n = log_2{10^500}$
# To simplify further, $n = 500 log_2{10} \approx 1661$
# Now, 1661 classical bits would only ever represent one value at a time regardless of how many bits in it, suppose it takes 1 second to iterate through 1000 values within the range of $1$ to $10^{500}$ to represent each, then it would take roughly $10^{497}$ seconds (far greater than the age of the universe) to represent all values within the specified range and it would take $10^{497}$ seconds or less to solve this bruteforcing problem depending on which value it is. However, with qbits it would take no time at all to represent all values within the specified range, this is because 1661 qbits would be in a superpositioned state of all values from $0$ to $2^1661$ at once, and $10^{500} \in Uniform(0, 2^{1661})$. However simply being in a superpositioned state of all values within the range of $0$ to $2^{1661}$ is not enough to solve this bruteforcing problem, why? Remember superposition is a state of being prior to emperical measurement, inotherwords the moment we attempt to measure this superposition, we get a single random state / value from range of all possible states / values and lose all information pertaining to all other states / values.
# From the example above, it is clear why quantum computers fail in most real life tasks where classical computers still strive. Quantum computers will not make your GTA 5 game run at ten million FPS. However, quantum computers still strive in some specific tasks, like decryption of encrypted messages and passcodes, large number factorisation, stochastic optimization, etc...
# # RSA Encryption
# RSA Encryption is a type of encrypion technique devised by Ron Rivest, Adi Shamir, and Leonard Adleman. The acronym "RSA" stand for the last names of all three inventors.
# The RSA encryption utilises a pair of keys (public and private keys) to encrypt and decrypt messages. The public key is a key usually made available to everyone for encrypting messages, while the private key is kept secret and only made available and used by owner of message and intended recipient to decrypt messages that were encrypted with the public key.
# RSA keys are numbers that are PEM (Privacy Enhanced Email) formatted, PEM format usually includes a header containing the type of key and a body which is a base64 encoded version of and ASN.1 (Abstract Syntax Notation 1) format of the original numbers that makes up the keys. The base64 encoding algorithm is a reversable algorithm that encodes texts, numbers and even images to base64 encoded texts. A typical RSA key can look like this:
# ```
# -----BEGIN RSA PRIVATE KEY-----
# MIIEpAIBAAKCAPEAxqE3PxG5fXm6+lZU6Bzj6dJU6PzUvyBmU8R7WQDLvMf0F9OZ
# J+8aOK3Gv3ZYj5fJz5MQ8itf0gd+StNduhJS9Ggy37B5yo1Vyx4FWt4lJ2Y4K0MK
# ... (key data continues) ...
# -----END RSA PRIVATE KEY-----
# ```
# If the base64 string above is decoded, we would typically have something like this:
# ```
# SEQUENCE {
# modulus INTEGER,
# publicExponent INTEGER
# }
# ```
# ### How are RSA keys computed?
# An RSA public keys is usually stupendously large number $n$ called a modulus and a public exponent $e$, $n$ is a product of two stupendously large and random prime numbers $p$ and $q$ respectively, their also happens to be a private exponent $d$. To compute the RSA keys, we:
# 1. Randomly select two stupendously large numbers $p$ and $q$ and multiply them to yield a value $n$.
# 2. Next, we compute the [Carmicheal's totient function](https://en.wikipedia.org/wiki/Carmichael%27s_totient_function) of the product of $p$ and $q$ ($n$) as:
# $\lambda (n) = LCM(p-1, q-1)$
#
# then select a coprime of $\lambda(n)$ $e$ such that $1 < e < \lambda(n)$
#
# **Note:** *two numbers are coprimes if their greatest common divisor is 1*
# 3. Next, compute the [Modular Multiplicative Inverse](https://en.wikipedia.org/wiki/Modular_multiplicative_inverse) of: $e (mod \lambda(n))$.
# 4. So therefore, the entire public key is $(n, e)$ and the private key is $(n, d)$ and for a padded and plaintext message, this message is encoded to interger form with some "agreed upon" algorithm. If the integer encoded message is $M$, then the encrypted message is:
# $Enc(M) = M^e \: mod \: n$
# 5. To decrypt $Enc(M)$ we compute $Dec(Enc(M))$ such that:
# $Dec(Enc(M)) = Enc(M)^d \: mod \: n$
# Now, without $d$, it is impossible to decrypt any RSA encrypted message. However, $d$ can be computed with $p$, $q$ (which should typically be private) which can then be used to decrpyt an RSA encrpyted message
# # Cracking an RSA Encryption
# To decrypt an ecrypted RSA message $Enc(M)$ without a private key, we do the following.
# 1. Decode the base64 encoded body of the PEM formatted RSA public key with a base64 decoding algorithm.
# 2. Extract the 'modulus' ($n$) and the public exponent $e$
# 3. Make a 'bad guess' of one of the prime numbers used to compute $n$, let this guess be $g$ such that $1<g<n$. No matter the 'bad guess' ($g$) chosen, it must always satisfy the equation:
# $g^r = mn + 1$
#
# where:
#
# r is an arbitrary integer that can range from $1$ to $\infty$ and m is a mulitplicative factor of n. What this entails is that their happens to be a number $r$ such that when we raise $g$ to that number and divide it by $n$ (the modulus of they public key), we would get a remainder equal to 1.
# 4. Next we compute $r$ that satisfies the equation $g^r = mn + 1$ via bruteforcing
# 5. We then rearrange the equation to be $g^r - 1= mn$ and factorize the left hand size such that:
# $(g^{r/2} - 1)(g^{r/2} + 1) = mn$
# 6. let $p = (g^{r/2} - 1)$ and $q = (g^{r/2} + 1)$, we compute the initial values of $p$ and $q$ and finetune with the Euclidean algorithm to get the final values of $p$ and $q$ (the two prime numbers used to compute the modulus of the public key $n$).
# **Note:** The Euclidean algorithm is one that computes the greatest common divisor between two numbers
# 7. After computing values of $p$ and $q$, we compute the Carmicheal's totient function of the modulus $n$ as:
# $\lambda(n) = LCM (p-1, q-1)$
# 8. The Carmicheal's totient function $lambda(n)$ will then be used to compute $d$, inotherwords:
#
# $d = modularInverse(e (mod\: \lambda(n)))$
# 9. Next we decrypt $Enc(M)$ like so:
# $Dec(Enc(M)) = Enc(M)^d \: mod \: n$
# And that is it.
# To illustrate further, given an RSA public key $(n=3233, e=17)$, we can calculate for $p$ and $q$ as shown in the cell below
import gmpy2, random
gmpy2.get_context().precision = 20000
def euclidean_gcd_algorithm(n: int, m: int):
r"""
Euclidean algorithm to find the greatest common divisor of a and b.
"""
if m == 0:
return n
return euclidean_gcd_algorithm(m, n % m)
def modular_inverse(a: int, m: int):
r"""
Modular inverse of a modulo m using the extended Euclidean algorithm.
"""
gcd, x, y = extended_gcd(a, m)
# If the gcd is not 1, then a does not have a modular inverse modulo m
if gcd != 1:
raise ValueError("No modular inverse exists")
return x % m
def extended_gcd(a: int, b: int):
r"""
Extended Euclidean algorithm to find the greatest common divisor of a and b,
as well as the coefficients x and y such that ax + by = gcd(a, b).
"""
if b == 0:
return a, 1, 0
else:
gcd, x1, y1 = extended_gcd(b, a % b)
x = y1
y = x1 - (a // b) * y1
return gcd, x, y
def get_coprimes(n: int):
coprimes = []
for i in range(n):
gcd = euclidean_gcd_algorithm(n, i)
if gcd != 1:
continue
coprimes.append(i)
return coprimes
def compute_r(n: int, g: int):
r"""
computes r given g in g^r = mn + 1
"""
r = 1
v = g
while True:
remainder = v % n
if remainder != 1:
r += 1
v = g**r
else:
return r
def compute_pq(rsa_modulus: int, bad_guess: int):
r"""
compute p and q, factors of RSA modulus n
"""
r = compute_r(rsa_modulus, g=bad_guess)
p = gmpy2.mpz(bad_guess) ** (r / 2) + 1
q = gmpy2.mpz(bad_guess) ** (r / 2) - 1
p = euclidean_gcd_algorithm(p, rsa_modulus)
q = euclidean_gcd_algorithm(q, rsa_modulus)
if (p * q != rsa_modulus) or (p == 1 or q == 1):
raise ValueError(
f"{bad_guess} is an improper guess for the RSA modulus: {rsa_modulus}"
)
return p, q
# Assuming we have a message $M$ that has been encoded from string to integer via a common str2int format, let $M = 65$, if $p$ and $q$ are 53 and 61 respectively, then we can simply RSA encrypt the message $M$ as illustrated in the code below
# define encoded message M to encrypt
original_msg = 65
# define p and q
p, q = 61, 53
# calculate modulus n
n = p * q
# compute Carmicheal's Totient function of n (lambda(n))
totient_val = int(((p - 1) * (q - 1)) / euclidean_gcd_algorithm(p - 1, q - 1))
# get all coprimes of lambda(n)
totient_coprimes = get_coprimes(totient_val)
# randomly select a coprime of lambda(n) to use as public exponent
e = random.choice(totient_coprimes[: len(totient_coprimes) // 16])
# calcuate the private exponent d by computing the modular inverse of e (mod lambda(n))
d = modular_inverse(e, totient_val)
# encrypt message with public exponent
encrypted_msg = gmpy2.mpz(original_msg**e % n)
print(f"Original message: {original_msg}")
print(f"Public key: (n={n}, e={e})")
print(f"Private key: (n={n}, d={d})")
print(f"message encryption: {encrypted_msg}")
# Now, assuming we do not know the values of $p$, $q$ and $d$, but we have the public key as shown above, then we can retrieve the value of $p$ and $q$ from the modulus $n$, as shown below
calculated_p, calculated_q = compute_pq(rsa_modulus=n, bad_guess=14)
calculated_p, calculated_q = int(calculated_p), int(calculated_q)
print(f" value of p: {calculated_p}", "\n", f"value of q: {calculated_q}")
# Now we have asserted $p$ and $q$ to be 53 and 61 respectively, all that is left is to compute the private exponent $d$
# Recall: $d = modularInverse(e \: (mod \: \lambda(n)))$
# Where: $\lambda(n) = LCM(q-1, p-1)$
# Given that $p = 53$ and $q = 61$, $\lambda(n)$ is computed as shown in the cell below:
calculated_totient_val = int(
((calculated_p - 1) * (calculated_q - 1))
/ euclidean_gcd_algorithm(calculated_p - 1, calculated_q - 1)
)
print(f"Carmicheal's Totient Function (λ(n)): {calculated_totient_val}")
# Now we have both variables $e$ and $\lambda(n)$ needed to compute the private exponent $d$ to decrypt the encrypted message above
# So, the private exponent $d$ is computed as shown below
calculated_d = modular_inverse(e, calculated_totient_val)
print(f"calculated private exponent: {calculated_d}")
# Finally we decypt the encrypted message with the calculated private exponent $d$ like so:
decrypted_msg = gmpy2.mpz(encrypted_msg) ** d % n
decrypted_msg = int(original_msg)
print(f"decrypted message: {decrypted_msg}")
# Notice how we have retrieved the original message (65) by decrypting the encrypted message without having any access to the private exponents $d$ and the factors of the modulus $p$ and $q$
# **Note:** The `gmpy2` module is utilised in this program to represent very large numbers with good enough precision.
# # How Classical and Quantum Computers Fare in RSA decryption without private keys
# This toy problem above was done on a classical computer, but ideally the values of $p$ and $q$ would be so random and so very large that it would take eternity (literally) to compute them from the stupendously large modulus $n$ which just happens to be a product of $p$ and $q$. So at best classical computers are just capable of illustrating how RSA encrypted messages can be illegally decrypted, but cannot actually decrypt a standard and ideal RSA encryption without access to a corresponding private key.
# Quantum computers on the other hand are a different story in this context. A quantum computer would typically be capable of breaking / decrypting an RSA encryption without need for a private key. Due to the superpositioned nature of qbits in a quantum computer, a quantum computer could simultaneously compute all the values of $r$ (one of the most computationally expensive parts of this task) for a given "bad guess" $g$ that satisfies the equation $g^r = mn + 1$, however because superpositioned state is only a state of being prior to emperical measurement, simply measuring the superposition will most likely be futile as we may simply get a random value of $r$ that does not satisfy the above equation and in-turn lose information of all other possible states. However, there's a catch; assuming we have two sets of qbits to represent two sets of superpositions A and B, where superposition A consists of $t$ number of qbits that represent all potential values of $r$ from $1$ to $t$, and superposition B just like A consists of $t$ number of qbits that represent all remainders corresponding to states / values of superposition A, then we can [entangle](https://en.wikipedia.org/wiki/Quantum_entanglement#:~:text=Quantum%20entanglement%20is%20the%20phenomenon,separated%20by%20a%20large%20distance) the superpositioned qbits of A and B such that if we measure only the remainder part of the entanglement, we get a repeating sequence of remainders that we measured and their corresponding r values.
# Why is this so? The reason for this stems from the periodic nature of the remainder superposition if expressed as a sequence, because it is periodic, a given value is bound to repeat a certain number of times through out the sequence regardless of the unique corresponding $r$ value. To illustrate the periodicity, suppose we have an RSA modulus $(n=121)$, a bad guess $(g=10)$, and a hypothetical quantum computer with 16 qbits, where each superposition (A and B) utilises 8 qbits each, then let us represent the variables and superpositions in the following code cell like so:
import numpy as np
# modulus
n = 121
# bad guess
g = 10
# number of qbits
n_qbits = 16
# number of qbits for each superpositions
n_qbits_per_superposition = n_qbits // 2
# number of superposition states
n_superposition_states = 2**n_qbits_per_superposition
# values of r
rs = [i for i in range(0, n_superposition_states + 1)]
# superposition A
superposition_A = np.array(rs, dtype=np.int32)
# superposition B
superposition_B = np.array([(g**r) % n for r in rs], dtype=np.int32)
print(
f"number of superposition states per set of superpositions: {n_superposition_states}"
)
from matplotlib import pyplot as plt
plt.figure(figsize=(15, 5))
plt.plot(superposition_A, superposition_B)
plt.title(f"Periodic superposition of remainders, given (r=r, g={g})")
plt.xlabel("r")
plt.ylabel("Remainders")
plt.grid()
plt.show()
# Notice the periodic plot in the graph above? if we happen to randomly select a single remainder value from this sequence, we would see that the value selected occurs in various other places as well.
# Although these illustrations are done on a classical computer, on a quantum computer, simply measuring any of the superpositions will prove futile with very high odds. One way to go around this is to entangle the periodic superposition (B) with the corresponding $r$ values (A), this way if we happen to only measure the remainder part of the entanglement, then we would be able to retrieve all occurences of that remainder and the corresponding $r$ value. To illustrate, suppose we use a pandas dataframe to represent out entanglement like so:
import pandas as pd
entanglement = pd.DataFrame()
entanglement["r"] = superposition_A
entanglement["remainders"] = superposition_B
entanglement.head()
# Measuring the entire entanglement would give us a random value and all other states that make up the superposition will be lost. However, measuring the remainder part of the entanglement will give all states where the remainder is the value measured, while all other states would be lost.
# Typically, it would be something similar to the code below:
# measure remainder part of the entanglement
random_remainder = np.random.choice(entanglement["remainders"])
print(f"measured remainder from entanglement: {random_remainder}")
# retrieve all entangled states where remainder is {random_remainder}
measured_entanglement = entanglement[entanglement["remainders"] == random_remainder]
# lose information of all other states
del entanglement, superposition_A, superposition_B
measured_entanglement.head()
|
import pandas as pd
import seaborn as sns
import numpy as np
cardio = pd.read_csv("/kaggle/input/cardiogoodfitness/CardioGoodFitness.csv")
cardio.columns
cardio.shape
cardio.info()
# No Null values in the data
cardio.head()
# ##Initial Data Exploration
cardio.describe().T
cardio.isna().sum()
# There are no missing values
cardio.hist(figsize=(10, 10))
corr_mat = cardio.corr()
print(corr_mat)
sns.heatmap(corr_mat, annot=True)
import seaborn as sns
sns.pairplot(cardio, hue="Product")
# In order to understand our data, we can look at each variable and try to understand their meaning and relevance to this problem.
# For each column we need to display : **Variable, Type :** (Categorical or numerical) **Expectation, Conclusion**
# ##**Let's Start with numerical variables**
# ##Miles
cardio["Miles"].describe()
import plotly.express as px
fig = px.histogram(cardio, x="Miles", color="Product")
fig.show()
# Variable : Miles
# Type : Numerical
# Expectation: 103.13 miles
# ##Income
cardio["Income"].describe()
fig = px.histogram(cardio, x="Income", color="Product")
fig.show()
# Variable : Income
# Type : Numerical
# Average income : 53720
# ## Fitness
cardio["Fitness"].describe()
# The average finess level of people buying all three threadmills is 3
# ## Usage
cardio["Usage"].describe()
# Most of the people use threadmill to a level of 3.46
# ## Education
cardio["Education"].describe()
# ## Age
cardio["Age"].describe()
# The average age of user is 29
# ##**Categorical variables**
# ##Marital Status
set(cardio.MaritalStatus)
print(
len(cardio[cardio["MaritalStatus"] == "Single"])
/ (len(cardio["MaritalStatus"]))
* 100
)
print(
len(cardio[cardio["MaritalStatus"] != "Single"])
/ (len(cardio["MaritalStatus"]))
* 100
)
# ##Gender
cardio.Gender.hist()
# ## Product
set(cardio.Product)
print(len(cardio[cardio["Product"] == "TM195"]) / (len(cardio["Product"])) * 100)
print(len(cardio[cardio["Product"] == "TM498"]) / (len(cardio["Product"])) * 100)
print(len(cardio[cardio["Product"] == "TM798"]) / (len(cardio["Product"])) * 100)
# Most of the users buy TM195
# **Data by Product**
TM195 = cardio[cardio["Product"] == "TM195"]
TM195 = TM195.reset_index()
TM195 = TM195.drop(columns=["Product", "index"])
TM195.head()
TM498 = cardio[cardio["Product"] == "TM498"]
TM498 = TM498.reset_index()
TM498 = TM498.drop(columns=["Product", "index"])
TM498.head()
TM798 = cardio[cardio["Product"] == "TM798"]
TM798 = TM798.reset_index()
TM798 = TM798.drop(columns=["Product", "index"])
TM798.head()
# #**TM195**
TM195.describe().T
# To get the statistics of categorical variables lets encode the categorical data
# **In Gender column 1 represents Male and 0 represents Female**
# **In the MaritalStatus 1 represents Single and 0 represents Patnered**
#
tm195 = pd.get_dummies(data=TM195, drop_first=True)
tm195 = tm195.rename(
columns={"Gender_Male": "Gender", "MaritalStatus_Single": "MaritalStatus"}
)
tm195.head()
tm195.describe().T
# **The mean column gives the profile of an average user : Age: 29**
# **with 15 years of Education who uses the threadmill of level 3 with fitness level 3 and runs 82.79 miles**
# **Income : 46418**
# **Gender : Both women and men are equally likeLy**
# **Patnered users are more likely to buy**
# #**TM498**
tm498 = pd.get_dummies(data=TM498, drop_first=True)
tm498 = tm498.rename(
columns={"Gender_Male": "Gender", "MaritalStatus_Single": "MaritalStatus"}
)
tm498.head()
tm498.mean()
# The average user profile of **TM498** is :
# Age : 29
# Education: 15+ years
# Usage level : 3
# Fitness level: 2.9
# Income : 48973
# Miles: Who runs 87.93
# Gender: Males are 1.6% more likely to buy
#
# Marital Status: Patnered are more likely to buy
# #**TM798**
tm798 = pd.get_dummies(data=TM798, drop_first=True)
tm798 = tm798.rename(
columns={"Gender_Male": "Gender", "MaritalStatus_Single": "MaritalStatus"}
)
tm798.head()
average = tm798.mean()
average
# Unlike other two TM798 is highly potential to be bought by males
# The average age of user is 29 with 17 years of education with 4.8 usage level and 4.6 fitness and who runs 166.9 miles
# ## All the three average user profiles at a glance
User_Profiles = {}
User_Profiles["TM195"] = tm195.mean()
User_Profiles["TM498"] = tm498.mean()
User_Profiles["TM798"] = tm798.mean()
User_Profiles_df = pd.DataFrame(User_Profiles)
User_Profiles_df
|
# # Titanic
# # 1.Load Data & Check Information
import pandas as pd
import numpy as np
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df_train = pd.read_csv("../input/titanic/train.csv")
df_test = pd.read_csv("../input/titanic/test.csv")
#
# As you can see, data is divided into two groups, which are train and test sets. Train set contains 891 personal information, and test set contains 417 personal information based on PassengerId. Each person has different information.
df_train.head()
df_train.tail()
df_test.head()
df_test.tail()
#
# Based on the describe, only 38% of the people in the titanic were survived.
# Pclass(=Ticket class) is divided into three classes which are 1st, 2nd, 3rd class.
# Oldest person in the titanic was 80 years old and yougest person was less than one year.
# More than 50% of people did not come with any siblings or spouses
# Also, more than 75% of people came along to the titanic
# Lastly, highest price for ticket was \$512.3 and lowest price was $0!
df_train.describe()
|
import pandas as pd
d = {
"Student": ["Aman", "Biswa", "Aman", "Disha", "Dhruvika", "Aman"],
"Marks": [23, 44, 33, 54, 78, 23],
"Age": [10, 19, 17, 18, 18, 18],
}
a = pd.DataFrame(d)
a
a["Student"].drop_duplicates(keep="last")
a.drop_duplicates(subset=["Student", "Marks", "Age"], ignore_index=True, keep=False)
a
x = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv")
x
x.drop_duplicates(subset=["ObservationDate", "Province/State", "Country/Region"])
# **Distinct Count**
x.head()
y = x["Country/Region"].unique()
y.ndim
x["Country/Region"].nunique() # gives the number of unique values
x["Confirmed"].sum() / x["Country/Region"].nunique()
|
# # Restaurant Recommendation System
# ## 2. Exploratory Data Analysis (EDA)
# ## Aim
# After getting our data ready, we still want to make sense of it. In EDA we look at various plots and actually let the data tell us its story. This step will give us a deeper understanding of data. We'll also try to make data more amenable to modelling. We'll look at various table in database.
# We'll be using matplolib and seaborn to make various plots.
# ### Importing Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import json
from sqlite3 import dbapi2 as sq3
from pathlib import Path
from collections import OrderedDict
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from time import time
from IPython.display import clear_output
from collections import OrderedDict
plt.style.use("ggplot")
# Functions to work with SQLite db
def make_query(sel):
"""Query database"""
c = db.cursor().execute(sel)
return c.fetchall()
def make_frame(list_of_tuples=None, legend=[], query=None):
"""
Returns DataFrame from a query or result of query
"""
framelist = []
if list_of_tuples is None:
if query is None:
print("Error: No query made")
return
list_of_tuples = make_query(query)
for i, cname in enumerate(legend):
framelist.append((cname, [e[i] for e in list_of_tuples]))
return pd.DataFrame.from_dict(OrderedDict(framelist))
# Connect to database
db = sq3.connect("../input/yelp-project/yelp_database.db")
# ### Reviews Table
# Looking at columns of 'reviews' table
make_query("PRAGMA table_info(reviews)")
# Getting pandas DataFrame from db
date_df = make_frame(query="SELECT stars, date FROM reviews", legend=["rating", "date"])
date_df.date = pd.to_datetime(date_df.date)
date_df.info()
# Creating new columns in reviews df
date_df["day"] = date_df.date.dt.day
date_df["month"] = date_df.date.dt.month
date_df["year"] = date_df.date.dt.year
date_df["hour"] = date_df.date.dt.hour
date_df["minute"] = date_df.date.dt.minute
date_df["second"] = date_df.date.dt.second
date_df.head()
sns.catplot(data=date_df, x="rating", kind="count", aspect=2)
ax = plt.gca()
ax.set(xticks=[0, 1, 2, 3, 4, 5], title="Rating Distribution")
ax.tick_params("x", labelsize=15)
ax.tick_params("y", labelsize=15)
# We see many most ratings are on the higher scale. Most ratings are either 5 star or 4 star.
g = sns.catplot(data=date_df, x="year", kind="count", aspect=2)
ax = plt.gca()
ax.set_title("Number of reviews by Year")
# We see number of reviews increased exponentially over the years. This could also be an evidence for yelp's popularity over the years.
g = sns.relplot(data=date_df, x="month", y="rating", aspect=2, kind="line")
ax = plt.gca()
ax.set_title("Ratings by Month")
# We see that average rating after faceting on month is minimum for 12th month ~ December. But if we look at the y-scale, change is not so significant to draw a trend.
g = sns.relplot(data=date_df, x="day", y="rating", aspect=2, kind="line")
ax = plt.gca()
ax.set_title("Ratings by Day")
# We see that average rating after faceting on day is max from 5th to 10th day. But if we look at the y-scale, change is not so significant to draw a trend.
sns.catplot(data=date_df, x="hour", aspect=2, kind="count")
ax = plt.gca()
ax.set_title("Review Time")
# This is interesting as we see minimum number of reviews were given during morning and count keeps rising throughout the day which seems intuitive. But maximum number of reviews were given at 2am which is counter intuitive. Why are more people reviewing this late? Is it possible that these are bars where people stay for long but why would drunks care about reviewing? Or are people reviewing after dinner during their commute home? We would have to look at further evidence to draw conclusions.
# Release memory
del date_df
# ### Users Table
# Looking at columns and making a DataFrame from users table
cols = list(zip(*make_query("PRAGMA table_info(users)")))[1]
users_df = make_frame(query="SELECT * FROM users;", legend=cols)
users_df.info()
plt.figure(figsize=(10, 5))
ax = plt.gca()
sns.boxplot(data=users_df, x="review_count", ax=ax)
plt.xscale("log")
# The boxplot for review count reveals a lot of outliers. There are some users who have written over 10k reviews.
# Type Cast to datetime format
users_df.yelping_since = pd.to_datetime(users_df.yelping_since)
sns.displot(data=users_df, x="yelping_since", aspect=2)
# Most people are yelping_since mid 2010s.
sns.displot(data=users_df, x="fans", aspect=2)
plt.xscale("log")
plt.yscale("log")
# Some users are wildly popular reviewers on yelp. Most people have few or no fans
sns.relplot(data=users_df, x="yelping_since", y="average_stars", alpha=0.1, aspect=2)
# We see that reviewers who have been yelping for long have higher avg rating. We can conclude that overtime users become less harsh reviewers.
# Release Memory
del users_df
# ### Businesses table
# Looking at business table's columns and theirdtypes
make_query("PRAGMA table_info(businesses)")
# Getting dataframe from table
business_df_cols = list(zip(*make_query("PRAGMA table_info(businesses)")))[1]
business_df = make_frame(query="SELECT * FROM businesses", legend=business_df_cols)
business_df.head()
sns.jointplot(data=business_df, x="latitude", y="longitude")
# We see that locations of businesses are concentrated in clusters. These clusters must be big cities. Lets plot these on a map.
BBox = (
business_df.longitude.min(),
business_df.longitude.max(),
business_df.latitude.min(),
business_df.latitude.max(),
)
img = plt.imread("../input/map-img-for-yelp-business-df/map(1).png")[:, :, :-1]
fig, ax = plt.subplots(figsize=(18, 14))
ax.scatter(business_df.longitude, business_df.latitude, zorder=1, alpha=0.1, s=5)
ax.set_title("Plotting Restaurant Locations on Map")
ax.set_xlim(-130, BBox[1])
ax.set_ylim(BBox[2], BBox[3])
ax.imshow(img, zorder=0, extent=BBox, aspect="equal")
# We see our data has businesses from certain cities of U.S. and not all over U.S.
def plot(feature):
plt.figure(figsize=(8, 8))
sns.relplot(data=business_df, y="stars", x="review_count", col=feature, alpha=0.4)
bool_features = []
for tup in make_query("PRAGMA table_info(businesses)"):
if tup[2] == "BOOLEAN":
bool_features.append(tup[1])
for feature in bool_features:
plot(feature)
# - Restaurants with Attire have higher ratings and more higher review count hence more popular than those without Attire.
# - Restaurants with TakeOut, AcceptCreditCard, GoodForKids, Reservation, GoodForGroups, BusinessParking, HasTV, Alcohol, BikeParking, Delivery, OutdoorSeating, WiFi, Ambience, DogsAllowed, GoodForDancing, CoatCheck, CounterService show similar trend.
# - Restaurnts with NoiseLevel also have higher avg ratings, this could be because they are located in prime locations.
# - ByAppointmentOnly restaurants show roughly similar trend to Not ByAppointmentOnly restaurants.
# - Restaurants with Outdoor Seating have better ratings and higher review count than those without OutdoorSeating. Hence restaurants with OutdoorSeating are more popular.
# - Restaurants which are WheelChairAccessible do not perform better than those who aren't.
# - Restaurants with WiFi are also more popular than those without.
# - Some restaurants without TableService have higher reviewCount than those with.
# - Restaurants with Dogs allowed also appear more popular.
# - A/c to plot, Ambience plays a big role in a restaurants rating and review count. Those with ambience are more popular.
# - Plots for restaurants with HappyHour, DriveThrough, Music, BestNights or AcceptsBitcoin do not differ significantly than those without.
# - Restaurants without DietaryRestrictions are more highly rated than those with.
# - Restaurants with Open24Hours=0 are underrepresented.
plt.figure(figsize=(8, 8))
top_10_zip = business_df.postal_code.value_counts()[:10]
sns.barplot(x=top_10_zip.index, y=top_10_zip.values)
ax = plt.gca()
ax.set_title("Most Popular Zip codes")
ax.set_xlabel("Zip Code")
ax.set_ylabel("Count")
# Our data has most number of businesses from zip code 89109 (Las Vegas). This can be verified on map.
toprating_df = business_df[business_df["stars"] == 5]
toprating_df = toprating_df.sort_values("review_count", ascending=False).head(20)
plt.figure(figsize=(15, 7))
p = sns.barplot(x="name", y="review_count", data=toprating_df, color="b")
p.set_xticklabels(p.get_xticklabels(), rotation=90, fontsize=8)
p.set_title("Top 5 star-rated Restuarants sorted by review count")
p.set(xlabel="Restaurant", ylabel="Review Count")
# Restaurant businesses with ratings=5 and highest review counts
df_restaurants = business_df.name.value_counts().index[:20].tolist()
df_top = business_df.loc[business_df["name"].isin(df_restaurants)]
mean_df = df_top.groupby("name")["stars"].mean()
meanrating_df = mean_df.reset_index()
topmean_rating_df = meanrating_df.sort_values("stars", ascending=False).head(20)
plt.figure(figsize=(15, 7))
p = sns.barplot(x="name", y="stars", data=topmean_rating_df, color="b")
p.set_xticklabels(p.get_xticklabels(), rotation=90, fontsize=8)
p.set_title("Top 5 star-rated Restaurants sorted by mean of ratings")
p.set(xlabel="Restaurant", ylabel="Rating")
# Restaurant businesses with highest mean scores of ratings, with 20 occurences
plt.figure(figsize=(15, 7))
sns.lineplot(
x=business_df["stars"],
y=business_df["review_count"],
hue=business_df["DriveThru"],
ci=80,
)
plt.legend(bbox_to_anchor=(1.00, 1), title="DriveThru")
plt.show()
# Restaurants with Drive Through have higher ratings and higher review count. Hence are more popular.
plt.figure(figsize=(15, 7))
sns.lineplot(
x=business_df["stars"],
y=business_df["review_count"],
hue=business_df["GoodForDancing"],
ci=80,
)
plt.legend(bbox_to_anchor=(1.00, 1), title="GoodForDancing")
plt.show()
# Restaurants GoodForDancing also appear more popular.
plt.figure(figsize=(20, 10))
sns.heatmap(business_df.corr(), annot=False)
plt.show()
|
# # What is single model for classification and regression?
# A single model for both classification and regression is a machine learning model that can handle both classification and regression tasks. This type of model is often called a "hybrid" or "multi-task" model because it can perform multiple tasks simultaneously.
# Classification is a type of machine learning task where the goal is to predict a categorical label or class for a given input, while regression is a type of task where the goal is to predict a continuous numerical value for a given input.
# By using a single model for both tasks, the model can share information and learn common features between the two tasks, which can improve its overall performance. This is especially useful when the two tasks are related, as the same input data can be used to make predictions for both classification and regression.
# ## Goal
# * Main focus here is to create a single model for classification and regression.
# * This is the dataset where we have a target column `diagnosis` for classification
# * We will consider ``area_mean`` column as a target of our regression task (for testing purpose)
# * Build a model that can give us the classification and regression output simultaneously
# * Let's go
# If you find this notebook helpful please upvote
# ## Import necessary libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Dropout, BatchNormalization
from tensorflow.keras.callbacks import EarlyStopping, CSVLogger
# this is for high quality images
plt.rcParams["figure.dpi"] = 120
plt.rcParams["savefig.dpi"] = 120
# ### Load the dataset
url = "https://raw.githubusercontent.com/shuvo14051/datasets/master/breast_cancer.csv"
df = pd.read_csv(url)
df.head()
# ### Delete unnecessay columns
df = df.drop(["Unnamed: 32"], axis=1)
df = df.drop(["id"], axis=1)
# ### Map the diagnosis column
# - Where 0 is ``B`` and 1 is ``M``
# - Display the value counts of each class
# - Also the percentage of each class
# - So that we can make sure is it a balanced or imblanaced problem
#
df["diagnosis"] = df["diagnosis"].map({"B": 0, "M": 1})
df["diagnosis"].value_counts()
df["diagnosis"].value_counts(normalize=True)
# ### Creating out features and target
# * ``X`` has all the features that we will feed into the ``ANN``
# * ``y_class`` is our target column for the ``classification`` task
# * ``y_reg`` is our target column for the ``regression`` task
X = df.drop(["diagnosis", "area_mean"], axis=1)
y_class = df["diagnosis"]
y_reg = df["area_mean"]
# ### Train test split
# - It will be a special type of split
# - ``X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)``
# - This is our typical train test split
# - But in this problem we have two target columns
# - One for classification one for regression
# - So our ``train_test_split()`` function will take four parameters
# - ``train_test_split(X, y_class, y_reg, test_size=0.2)``
# - on the left side of the equal sign we have ``X_train, X_test, y_class_train, y_class_test, y_reg_train, y_reg_test``
# - ``X_train, X_test, y_class_train, y_class_test, y_reg_train, y_reg_test = train_test_split(X, y_class, y_reg, test_size=0.2)``
(
X_train,
X_test,
y_class_train,
y_class_test,
y_reg_train,
y_reg_test,
) = train_test_split(X, y_class, y_reg, test_size=0.2)
# ## Scaling the features
# Scaling the input data for artificial neural networks (ANNs) is generally considered necessary, especially when the input features have different ranges of values. This is because ANNs are sensitive to the scale of the input data, and can produce unexpected results if the input features have different ranges.
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# ## Build the model using Keras funcation API
# This is an implementation of a neural network architecture that can perform both classification and regression tasks. The architecture consists of several layers of dense (fully connected) layers, which are used to extract features from the input data and make predictions.
# Here's a breakdown of the code:
# 1. input_shape: This line defines the shape of the input data. The X.shape[1] expression retrieves the number of features in the input data, which is used to define the shape of the first layer of the model.
# 2. inputs: This line creates an input layer for the model, which will accept data in the form of a tensor with the specified shape. The Input function is from the Keras library and is used to create input layers for neural network models.
# 3. hidden: This section defines four dense (fully connected) layers, which are used to extract features from the input data. Each layer takes the output of the previous layer as input, and applies a non-linear activation function (ReLU in this case) to the output. The name argument is used to give each layer a unique name.
# 4. output_class: This layer is a dense layer with a sigmoid activation function, which is used to predict the binary class label (0 or 1) for the input data. The binary_crossentropy loss function is used to train this layer.
# 5. output_reg: This layer is a dense layer without an activation function, which is used to predict a continuous numerical value for the input data. The mean_squared_error loss function is used to train this layer.
# 6. model: This line creates a Model object that combines the input and output layers into a complete model. The inputs argument specifies the input layer and the outputs argument specifies the two output layers (for classification and regression).
# 7. model.compile: This line compiles the model by specifying the optimizer (Adam), loss functions (binary_crossentropy and mean_squared_error), and metrics (accuracy) used for training.
# The architecture of this model is designed to handle both binary classification and regression tasks. The model is trained using two separate loss functions, one for each task, so that it can learn to make predictions for both tasks simultaneously.
input_shape = X.shape[1]
inputs = Input(shape=(input_shape,), name="input")
hidden = Dense(100, activation="relu", name="hidden1")(inputs)
hidden = Dense(50, activation="relu", name="hidden2")(hidden)
hidden = Dense(25, activation="relu", name="hidden3")(hidden)
hidden = Dense(10, activation="relu", name="hidden4")(hidden)
output_class = Dense(1, activation="sigmoid", name="output_classification")(hidden)
output_reg = Dense(1, name="output_regression")(hidden)
model = Model(inputs=inputs, outputs=[output_class, output_reg])
model.compile(
optimizer="adam",
loss=["binary_crossentropy", "mean_squared_error"],
metrics=["accuracy"],
)
model.summary()
# ## Model plot
# This figure clearly depits that we have two different output for this mode. One is for classification one is for regression.
# 1. 1 Input layer
# 2. 4 Hidden layers
# 3. 1 output layer seperated for two task
# - 1 is for classification
# - 1 is for regression
# to save it use this parameter to_file="model_name.png",
from tensorflow.keras.utils import plot_model
plot_model(model)
early_stop = EarlyStopping(monitor="val_loss", patience=6, mode="min")
log_csv = CSVLogger("train_val_logs.csv", separator=",", append=False)
callbacks = [log_csv]
# By setting verbose 0, 1 we just say how do you want to 'see' the training progress for each epoch.
# - ``verbose=0`` will show you nothing (silent)
# - ``verbose=1`` will show you an animated progress bar like this:
history = model.fit(
X_train,
[y_class_train, y_reg_train],
epochs=100,
verbose=0,
validation_data=(X_test, [y_class_test, y_reg_test]),
)
# ## Prediction for classification and regression
y_class_pred, y_reg_pred = model.predict(X_test)
y_class_pred = np.round(y_class_pred)
# ## Performance evaluation for classification
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
print(classification_report(y_class_test, y_class_pred))
print(accuracy_score(y_class_test, y_class_pred))
cn_mt = confusion_matrix(y_class_test, y_class_pred)
sns.heatmap(cn_mt, annot=True, fmt="g")
plt.show()
# ## Perfromance evaluation for regression
from sklearn.metrics import r2_score, mean_squared_error
print(mean_squared_error(y_reg_test, y_reg_pred, squared=False))
print(r2_score(y_reg_test, y_reg_pred))
history.history.keys()
# ## Learning Curve for classification
## Learning Curve for classification
plt.plot(history.history["output_classification_accuracy"])
plt.plot(history.history["val_output_classification_accuracy"])
plt.title("Model Accuracy for Classification")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend(["Train", "Validation"], loc="upper left")
plt.show()
## Learning Curve for classification
plt.plot(history.history["output_classification_loss"])
plt.plot(history.history["val_output_classification_loss"])
plt.title("Model Loss for Classification")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(["Train", "Validation"], loc="upper left")
plt.show()
# ## Learning Curve for Regression
## Learning Curve for classification
plt.plot(history.history["output_regression_loss"])
plt.plot(history.history["val_output_regression_loss"])
plt.title("Model Loss for Regression")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(["Train", "Validation"], loc="upper left")
plt.show()
|
# > # **DATA MINING**
# * Dwi Krisnawan
# * Bandem Mahatma
# * Gus Rai Surya Laksana
# # **Data Visualization Section**
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from mpl_toolkits.mplot3d import Axes3D
diabetes = pd.read_csv("../input/diabetes/diabetes.csv")
diabetes_df = diabetes
print(diabetes.shape)
diabetes.head(20)
print(diabetes.columns)
diabetes.describe()
plt.figure(figsize=(20, 10))
cor = diabetes.corr()
sns.heatmap(cor, annot=True, cmap=plt.cm.YlGnBu, vmin=0, vmax=1)
plt.show()
g = sns.FacetGrid(diabetes, hue="Outcome", height=8) # Menentukan Axis
g = g.map(plt.scatter, "Pregnancies", "Glucose").add_legend() # Plotting
plt.show()
sns.pairplot(diabetes, hue="Outcome", corner=True)
sns.pairplot(
diabetes[["Pregnancies", "Glucose", "BloodPressure", "BMI"]], diag_kind="kde"
)
sns.catplot(x="Pregnancies", y="Age", hue="Outcome", kind="box", data=diabetes)
import plotly.express as px
fig = px.scatter(
diabetes.query("Outcome==1"),
x="Glucose",
y="BloodPressure",
size="Insulin",
color="Pregnancies",
hover_name="Pregnancies",
log_x=True,
size_max=60,
title="outcome = 1",
)
fig.show()
fig = px.scatter(
diabetes.query("Outcome==0"),
x="Glucose",
y="BloodPressure",
size="Insulin",
color="Pregnancies",
hover_name="Pregnancies",
log_x=True,
size_max=60,
title="outcome = 0",
)
fig.show()
fig = px.parallel_categories(
diabetes, color="Glucose", color_continuous_scale=px.colors.sequential.Sunset
)
fig.show()
fig = px.parallel_categories(
diabetes, color="BloodPressure", color_continuous_scale=px.colors.sequential.deep
)
fig.show()
# Masih Error Jangan Dirubah
import plotly.graph_objects as go
from ipywidgets import widgets
import pandas as pd
import numpy as np
diabetes_df = diabetes
# Build parcats dimensions
categorical_dimensions = ["Pregnancies", "BloodPressure", "Glucose"]
dimensions = [
dict(values=diabetes_df[label], label=label) for label in categorical_dimensions
]
# Build colorscale
color = np.zeros(len(diabetes_df), dtype="uint8")
colorscale = [[0, "gray"], [1, "firebrick"]]
# Build figure as FigureWidget
fig = go.FigureWidget(
data=[
go.Scatter(
x=diabetes_df.Glucose,
y=diabetes_df["Outcome"],
marker={"color": "gray"},
mode="markers",
selected={"marker": {"color": "firebrick"}},
unselected={"marker": {"opacity": 0.3}},
),
go.Parcats(
domain={"y": [0, 0.4]},
dimensions=dimensions,
line={
"colorscale": colorscale,
"cmin": 0,
"cmax": 1,
"color": color,
"shape": "hspline",
},
),
]
)
fig.update_layout(
height=800,
xaxis={"title": "Glucose"},
yaxis={"title": "Outcome", "domain": [0.6, 1]},
dragmode="lasso",
hovermode="closest",
)
# Update color callback
def update_color(trace, points, state):
# Update scatter selection
fig.data[0].selectedpoints = points.point_inds
# Update parcats colors
new_color = np.zeros(len(diabetes_df), dtype="uint8")
new_color[points.point_inds] = 1
fig.data[1].line.color = new_color
# Register callback on scatter selection...
fig.data[0].on_selection(update_color)
# and parcats click
fig.data[1].on_click(update_color)
fig
import plotly.express as px
fig = px.scatter(
diabetes,
x="Glucose",
y="BloodPressure",
animation_frame="Age",
animation_group="BMI",
size="Pregnancies",
color="SkinThickness",
hover_name="Insulin",
facet_col="Outcome",
log_x=True,
size_max=45,
range_x=[1, 400],
range_y=[25, 90],
)
fig.show()
# # **Deep Learning Section**
import sys
import pandas as pd
import numpy as np
import sklearn
import matplotlib
import keras
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
# Print Versi dari Library
print("Python: {}".format(sys.version))
print("Pandas: {}".format(pd.__version__))
print("Numpy: {}".format(np.__version__))
print("Sklearn: {}".format(sklearn.__version__))
print("Matplotlib: {}".format(matplotlib.__version__))
print("Keras :{}".format(keras.__version__))
print("Pandas :{}".format(pd.__version__))
diabetes_df.head(10)
diabetes_df.describe()
diabetes_df.info()
dataset = diabetes_df.values
print(dataset.shape)
X = dataset[:, 0:8]
Y = dataset[:, 8].astype(int)
print(X.shape)
print(Y.shape)
print(Y[:5])
from sklearn.model_selection import GridSearchCV, KFold
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.optimizers import Adam
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(X)
X_standardized = scaler.transform(X)
data = pd.DataFrame(X_standardized)
data.describe()
# Menentukan Seed
seed = 6
np.random.seed(seed)
# Membuat Model
def create_model():
# create model
model = Sequential()
model.add(Dense(8, input_dim=8, kernel_initializer="normal", activation="relu"))
model.add(Dense(4, input_dim=8, kernel_initializer="normal", activation="relu"))
model.add(Dense(1, activation="sigmoid"))
# Compile Model
adam = Adam(lr=0.01)
model.compile(loss="binary_crossentropy", optimizer=adam, metrics=["accuracy"])
return model
model = KerasClassifier(build_fn=create_model, verbose=1)
# define the grid search parameters
batch_size = [10, 20, 40]
epochs = [10, 50, 100]
# make a dictionary of the grid search parameters
param_grid = dict(batch_size=batch_size, epochs=epochs)
# build and fit the GridSearchCV
grid = GridSearchCV(
estimator=model, param_grid=param_grid, cv=KFold(random_state=seed), verbose=10
)
grid_results = grid.fit(X_standardized, Y)
# summarize the results
print(
"Best: {0}, using {1}".format(grid_results.best_score_, grid_results.best_params_)
)
means = grid_results.cv_results_["mean_test_score"]
stds = grid_results.cv_results_["std_test_score"]
params = grid_results.cv_results_["params"]
for mean, stdev, param in zip(means, stds, params):
print("{0} ({1}) with: {2}".format(mean, stdev, param))
from datetime import datetime
from packaging import version
from tensorflow import keras
logdir = "logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
# # **SECTION 2 MACHINE LEARNING (Data Split)**
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
diabetes_df.head()
diabetes_df.describe()
not_zero = ["Glucose", "BloodPressure", "SkinThickness", "Insulin", "BMI"]
for column in not_zero:
diabetes_df[column] = diabetes_df[column].replace(0, np.NaN)
mean = int(diabetes_df[column].mean(skipna=True))
diabetes_df[column] = diabetes_df[column].replace(np.NaN, mean)
X = diabetes_df.iloc[:, 0:7]
y = diabetes_df["Outcome"]
# Ganti ukuran test size sesuai soal
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.6, test_size=0.4)
import math
math.sqrt(len(y_test))
knn = KNeighborsClassifier(n_neighbors=27, p=2, metric="euclidean")
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
y_pred
accuracy_score(y_pred, y_test)
prediction = knn.predict([[6, 148.0, 62.0, 35.0, 455.0, 33.6, 0.627, 30]])
if prediction == 1:
print("The person have Diabetes")
else:
print("The person is not have Diabetes")
prediction
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
print("Hello world")
x = 5
y = "hello"
print(x)
print(y)
if 5 > 2:
print("5 is greater")
else:
print("5 is not greater")
# print("5 is greater")
print("5 is greater")
"""
print("5 is greater")
print("5 is greater")
"""
x = str(3)
y = int(3)
z = float(3)
x
_my_var = 2
_my_var
x, y, z = 3, "hello", 4.5
print(x)
print(y)
print(z)
x = y = z = "Orenge"
print(x)
print(y)
print(z)
# unpacking
fruits = ["Orenge", "Mango", "Cherry"]
x, y, z = fruits
print(x)
print(y)
print(z)
x = "awesome"
print("Python is " + x)
y = "Python is "
z = y + x
print(z)
# typecasting
x = 9
y = " Hello"
print(str(x) + y)
# Function
x = "awesome"
def myFunction():
print("Python is " + x)
myFunction()
x = "hello world"
print(x[1:5])
print(x[:4])
print(x[2:])
print(x[-5:-3])
for x in "hello world":
print(x)
x = "Life is beautiful"
print("beautiful" in x)
x = "Life is beautiful"
x.upper()
x.lower()
x = "Life is beautiful"
x.replace("f", "k")
x = "Life, is, beautiful"
x.split(",")
# list
mylist1 = ["apple", "orenge", "banana"]
mylist1
mylist2 = [1, 2, 3]
mylist2
mylist3 = [True, False]
mylist3
mylist4 = [1, "apple", True]
mylist4
mylist2 = [1, 2, 3]
mylist3 = [True, False]
mylist2 + mylist3
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
mylist2[1:3]
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
mylist2[:3]
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
mylist2[1:]
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
mylist2[-5:-3]
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
mylist2[1] = "apple"
mylist2
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
mylist2[1:3] = ["apple", "orange"]
mylist2
mylist = list((1, 2, 3, 4, 5, 6, 7, 8, 9))
mylist
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
2 in mylist
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
mylist2.append(10)
mylist2
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
mylist2.insert(5, 10)
mylist2
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
mylist2.remove(5)
mylist2
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
mylist2.pop()
mylist2
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
mylist2.clear()
mylist2
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
for x in mylist:
print(x)
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
for i in range(len(mylist2)):
print(mylist2[i])
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
i = 0
while i < len(mylist2):
print(mylist2[i])
i = i + 1
mylist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
[print(x) for x in mylist2]
mylist1 = ["apple", "orenge", "banana"]
mylist1.sort()
mylist1
mylist1 = ["apple", "orenge", "banana"]
mylist1.sort(reverse=True)
mylist1
mylist1 = ["apple", "orenge", "banana"]
mylist = mylist1.copy()
mylist
mylist1 = ["apple", "orenge", "banana"]
mylist.count("orenge")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data1 = pd.read_csv("/kaggle/input/email-spam-dataset/lingSpam.csv")
data1.info()
data1.head()
data2 = pd.read_csv("/kaggle/input/email-spam-dataset/enronSpamSubset.csv")
data2.info()
data2.head()
data3 = pd.read_csv("/kaggle/input/email-spam-dataset/completeSpamAssassin.csv")
data3.info()
data3.head()
# delete unneeded columns
data1.drop("Unnamed: 0", inplace=True, axis=1)
data2.drop(["Unnamed: 0", "Unnamed: 0.1"], inplace=True, axis=1)
data3.drop("Unnamed: 0", inplace=True, axis=1)
# concatenate data
data = pd.concat([data1, data2, data3], axis=0)
# remove missing values (NaN)
data.dropna(inplace=True)
data.info()
data.head()
# # Text preprocessing
emails = data["Body"]
# lowering case
emails = [text.lower() for text in emails]
emails[0]
# removal of special characters and numbers
import re
emails = [re.sub("[^a-zA-Z]", " ", text) for text in emails]
emails[0]
# removal of extra spaces
emails = [re.sub(" +", " ", text) for text in emails]
emails[0]
# removal of hyperlinks
emails = [re.sub(r"http\S+", "", text) for text in emails]
# removal of HTML tags
emails = [re.sub(r"'<.*?>'", "", text) for text in emails]
emails[0]
# tokenization
import nltk
emails = [nltk.word_tokenize(text) for text in emails]
emails[0]
# removal of stopwords
stopwords = nltk.corpus.stopwords.words("english")
emails = [[word for word in text if word not in stopwords] for text in emails]
emails[0]
# Stemming or lemmatization - lemmatizators are slower, but change tenses and nouns. Use the Wordnet lemmatizer, but with POS tag)
# lemmatization
nltk.data.path.append("/kaggle/input/corpora/")
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
emails = [[lemmatizer.lemmatize(word) for word in text] for text in emails]
emails[0]
# # Feature extraction
# Creating a vector of features (words) for each email
# OpenAI:
# > Both CountVectorizer and TF-IDF (Term Frequency-Inverse Document Frequency) from scikit-learn are popular techniques for feature extraction in text data like emails, and each has its own merits.
# >
# > CountVectorizer creates a Bag of Words (BoW) model, where the features are the counts of each word in the document. This method is simple and easy to implement but can give more importance to words that appear frequently, regardless of their significance in distinguishing spam from non-spam emails.
# >
# > TF-IDF, on the other hand, takes into account not only the frequency of a word in a document but also its inverse frequency across all documents. This means that words that are common across all emails will receive lower weights, while words that are unique to specific emails will receive higher weights. This can be advantageous for spam detection, as spam emails often contain specific words or phrases that are less common in legitimate emails.
# >
# > In general, TF-IDF tends to work better than CountVectorizer for spam detection because it can better capture the importance of different words. However, the choice between the two methods will depend on the specific characteristics of the dataset and the problem you're trying to solve. It's a good idea to experiment with both techniques and evaluate their performance on your dataset using cross-validation or a separate validation set. This will help you determine which method works best for your particular spam detection task.
# Bag of Words, almost all preprocessing steps could be done there
# https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(max_features=25000)
x = vectorizer.fit_transform([" ".join(text) for text in emails]).toarray()
print(x.shape)
vectorizer.get_feature_names_out()[:10] # first 10 in alphabetical order
# TF-IDF
from sklearn.feature_extraction.text import TfidfVectorizer
tf_vectorizer = TfidfVectorizer(max_features=25000)
x2 = tf_vectorizer.fit_transform([" ".join(text) for text in emails]).toarray()
print(x2.shape)
tf_vectorizer.get_feature_names_out()[:10]
# # Word cloud
# from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
# from PIL import Image
# spam_emails = np.vstack((np.asarray(emails), np.asarray(data["Label"])))
# spam_emails[0]
# spams = ' '.join([' '.join(email) for email in emails])
# #create the wordcloud object
# wordcloud = WordCloud(stopwords = STOPWORDS, collocations=True).generate(spams)
# #plot the wordcloud object
# plt.imshow(wordcloud, interpolation='bilInear')
# plt.axis('off')
# plt.show()
# # create a dictionary of word frequencies
# text_dictionary = wordcloud.process_text(whole_text)
# # sort the dictionary
# word_freq={k: v for k, v in sorted(text_dictionary.items(),reverse=True, key=lambda item: item[1])}
# #use words_ to print relative word frequencies
# rel_freq=wordcloud.words_
# #print results
# print(list(word_freq.items())[:5])
# print(list(rel_freq.items())[:5])
# word cloud with the most frequent words
# TF-IDF, TF-IDF weighted W2V, and average W2
# try creating a length of text feature and average word length and find whether it’s practical
# LDA for topic modeling as a feature
# # Split to train and test data
# split to train and test data for CountVectorizer
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, np.asarray(data["Label"]), random_state=42, test_size=0.2
)
x_train.shape
# split to train and test data for TF-IDF
x2_train, x2_test, y2_train, y2_test = train_test_split(
x2, np.asarray(data["Label"]), random_state=42, test_size=0.2
)
# # Classification algorithms (supervised)
# ( https://towardsdatascience.com/top-10-binary-classification-algorithms-a-beginners-guide-feeacbd7a3e2 )
# ## Evaluation metrics:
# **Accuracy** = (True Positives + True Negatives) / (True Positives + False Positives + True Negatives + False Negatives)
# Accuracy measures the proportion of correct predictions made by the model out of the total number of predictions.
# **Precision** = True Positives / (True Positives + False Positives)
# Precision measures the proportion of true positive predictions out of all the positive predictions made by the model.
# **Recall** = True Positives / (True Positives + False Negatives)
# In the context of spam detection, recall indicates how well the classifier identifies spam emails out of all the actual spam emails.
# **F1 Score** = 2 * (Precision * Recall) / (Precision + Recall)
# An F1 score reaches its best value at 1 (perfect precision and recall) and its worst value at 0.
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
import seaborn
import matplotlib.pyplot as plt
def print_stats(algorithm, x_train_data, x_test_data, y_train_data, y_test_data):
# actually perform classification
y_pred = algorithm.predict(x_test_data)
# Thus in binary classification, the count of
# true negatives is 0,0
# false negatives is 1,0
# true positives is 1,1
# false positives is 0,1
conf = confusion_matrix(y_pred=y_pred, y_true=y_test_data)
tn, fp, fn, tp = conf.ravel()
print(
"Accuracy on training data: {:.2f}%".format(
100 * algorithm.score(x_train_data, y_train_data)
)
)
print(
"Accuracy on testing data: {:.2f}%".format(
100 * algorithm.score(x_test_data, y_test_data)
)
)
print("Precision: {:.2f}%".format(100 * precision_score(y_pred, y_test_data)))
print("Recall: {:.2f}%".format(100 * recall_score(y_pred, y_test_data)))
print("F1 Score: {:.2f}%".format(100 * f1_score(y_pred, y_test_data)))
ax = plt.subplot()
seaborn.heatmap(conf, annot=True, fmt="", linewidths=2, cmap="Greens")
ax.set_xlabel("Predicted")
ax.set_ylabel("Real")
ax.xaxis.set_ticklabels(["Ham", "Spam"])
ax.yaxis.set_ticklabels(["Ham", "Spam"])
plt.show()
# Naïve Bayes
from sklearn.naive_bayes import GaussianNB
NB = GaussianNB()
NB.fit(x_train, y_train)
print("Using Count Vectorizer: ")
print_stats(NB, x_train, x_test, y_train, y_test)
NB2 = GaussianNB()
NB2.fit(x2_train, y2_train)
print("Using TF-IDF: ")
print_stats(NB2, x2_train, x2_test, y2_train, y2_test)
from sklearn.naive_bayes import MultinomialNB
MNB = MultinomialNB()
MNB.fit(x_train, y_train)
print("Using Count Vectorizer: ")
print_stats(MNB, x_train, x_test, y_train, y_test)
MNB2 = MultinomialNB()
MNB2.fit(x2_train, y2_train)
print("Using TF-IDF: ")
print_stats(MNB2, x2_train, x2_test, y2_train, y2_test)
from sklearn.linear_model import LogisticRegression
LR = LogisticRegression(max_iter=1000)
LR.fit(x_train, y_train)
print("Using Count Vectorizer: ")
print_stats(LR, x_train, x_test, y_train, y_test)
LR2 = LogisticRegression(max_iter=1000)
LR2.fit(x2_train, y2_train)
print("Using TF-IDF: ")
print_stats(LR2, x2_train, x2_test, y2_train, y2_test)
# very long and not very accurate, 12 minutes
# from sklearn.neighbors import KNeighborsClassifier
# KNN = KNeighborsClassifier(algorithm = 'brute', n_jobs=-1)
# KNN.fit(x_train, y_train)
# print_stats(KNN)
from sklearn.svm import LinearSVC
SVM = LinearSVC(C=0.0001)
SVM.fit(x_train, y_train)
print("Using Count Vectorizer: ")
print_stats(SVM, x_train, x_test, y_train, y_test)
SVM2 = LinearSVC(C=10)
SVM2.fit(x2_train, y2_train)
print("Using TF-IDF: ")
print_stats(SVM2, x2_train, x2_test, y2_train, y2_test)
# 4 minutes
# from sklearn.tree import DecisionTreeClassifier
# CLF = DecisionTreeClassifier()
# CLF.fit(x_train, y_train)
# print_stats(CLF)
from sklearn.ensemble import RandomForestClassifier
# n_estimators = number of decision trees
RF = RandomForestClassifier(n_estimators=100, max_depth=50)
RF.fit(x_train, y_train)
print("Using Count Vectorizer: ")
print_stats(RF, x_train, x_test, y_train, y_test)
RF2 = RandomForestClassifier(n_estimators=100, max_depth=50)
RF2.fit(x2_train, y2_train)
print("Using TF-IDF: ")
print_stats(RF2, x2_train, x2_test, y2_train, y2_test)
# Voting Classifier
from sklearn.ensemble import VotingClassifier
EVC = VotingClassifier(
estimators=[("MNB", MNB), ("LR", LR), ("RF", RF), ("SVM", SVM)], voting="hard"
)
EVC.fit(x_train, y_train)
print("Using Count Vectorizer: ")
print_stats(EVC, x_train, x_test, y_train, y_test)
EVC2 = VotingClassifier(
estimators=[("MNB", MNB2), ("LR", LR2), ("RF", RF2), ("SVM", SVM2)], voting="hard"
)
EVC2.fit(x2_train, y2_train)
print("Using TF-IDF: ")
print_stats(EVC2, x2_train, x2_test, y2_train, y2_test)
# # Unsupervised algorithms
# ## Topic modelling
# https://towardsdatascience.com/latent-dirichlet-allocation-lda-9d1cd064ffa2
# https://towardsdatascience.com/the-ultimate-guide-to-clustering-algorithms-and-topic-modeling-3a65129df324
from sklearn.decomposition import LatentDirichletAllocation
LDA = LatentDirichletAllocation(n_components=2, random_state=42)
LDA.fit(x2)
# Explore the topics
for topic_idx, topic in enumerate(LDA.components_):
print(f"Topic {topic_idx + 1}:")
top_n_words = 10
feature_names = tf_vectorizer.get_feature_names_out()
top_words = [feature_names[i] for i in topic.argsort()[-top_n_words:]]
print(" ".join(top_words))
# ## Clustering
# https://stackoverflow.com/a/69024239
# https://stackoverflow.com/a/27586132
from sklearn.metrics import davies_bouldin_score
from sklearn.metrics import silhouette_score
def print_stats_for_unsupervised(algorithm, x_data):
y_pred = algorithm.fit_predict(x_data)
conf = confusion_matrix(y_pred=y_pred, y_true=np.asarray(data["Label"]))
# tn, fp, fn, tp = conf.ravel()
# print("Accuracy on data: {:.2f}%".format(100 * algorithm.score(x2,np.asarray(data["Label"]))))
print(
"Precision: {:.2f}%".format(
100 * precision_score(y_pred, np.asarray(data["Label"]))
)
)
print(
"Recall: {:.2f}%".format(100 * recall_score(y_pred, np.asarray(data["Label"])))
)
print("F1 Score: {:.2f}%".format(100 * f1_score(y_pred, np.asarray(data["Label"]))))
print(
"Davies-Bouldin score: {:.2f}%".format(
100 * davies_bouldin_score(x_data, y_pred)
)
)
print("Silhouette score: {:.2f}%".format(100 * silhouette_score(x_data, y_pred)))
ax = plt.subplot()
seaborn.heatmap(conf, annot=True, fmt="", linewidths=2, cmap="Greens")
ax.set_xlabel("Predicted")
ax.set_ylabel("Real")
ax.xaxis.set_ticklabels(["Ham", "Spam"])
ax.yaxis.set_ticklabels(["Ham", "Spam"])
plt.show()
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
# initialize PCA with 2 components
pca = PCA(n_components=2, random_state=42)
# pass our X to the pca and store the reduced vectors into pca_vecs
pca_vecs = pca.fit_transform(x2)
# save our two dimensions into x0 and x1
x0 = pca_vecs[:, 0]
x1 = pca_vecs[:, 1]
KM = KMeans(n_clusters=2, n_init=10, max_iter=300, random_state=42)
print_stats_for_unsupervised(KM, pca_vecs)
# assign clusters and pca vectors to our dataframe
clusters = KM.labels_
data["cluster"] = clusters
data["x0"] = x0
data["x1"] = x1
def get_top_keywords(n_terms):
"""This function returns the keywords for each centroid of the KMeans"""
data = (
pd.DataFrame(x2).groupby(clusters).mean()
) # groups the TF-IDF vector by cluster
terms = tf_vectorizer.get_feature_names_out() # access tf-idf terms
for i, r in data.iterrows():
print("\nCluster {}".format(i))
print(
",".join([terms[t] for t in np.argsort(r)[-n_terms:]])
) # for each row of the dataframe, find the n terms that have the highest tf idf score
get_top_keywords(100)
# map clusters to appropriate labels
cluster_map = {0: "ham", 1: "spam"}
# apply mapping
data["cluster"] = data["cluster"].map(cluster_map)
# set image size
plt.figure(figsize=(12, 7))
# set a title
plt.title("TF-IDF + KMeans clustering", fontdict={"fontsize": 18})
# set axes names
plt.xlabel("X0", fontdict={"fontsize": 16})
plt.ylabel("X1", fontdict={"fontsize": 16})
# create scatter plot with seaborn, where hue is the class used to group the data
seaborn.scatterplot(data=data, x="x0", y="x1", hue="cluster", palette="viridis")
plt.show()
# very long
# from sklearn.cluster import DBSCAN
# DB = DBSCAN(eps=3, min_samples=2)
# print_stats_for_unsupervised(DB, x2)
# very very very long
# from sklearn.cluster import AgglomerativeClustering
# AC = AgglomerativeClustering()
# print_stats_for_unsupervised(AC, x2)
# from sklearn.cluster import SpectralClustering
# SC = SpectralClustering(n_clusters=2,assign_labels='discretize',random_state=0)
# print_stats_for_unsupervised(SC, x2)
# ## Anomaly detection
# very long
# from sklearn.ensemble import IsolationForest
# IF = IsolationForest(random_state=0)
# print_stats_for_unsupervised(IF, x2)
# from sklearn.neighbors import LocalOutlierFactor
# LOF = LocalOutlierFactor(n_neighbors=2)
# print_stats_for_unsupervised(LOF, x2)
# very very very very long
# from sklearn.svm import OneClassSVM
# OCSVM = OneClassSVM(gamma='auto')
# print_stats_for_unsupervised(OCSVM, x2)
# # Deep learning algorithms
# (subset of neural network algorithms)
# ## Supervised Learning
# CNN
# RNN
# LTSM
# Transformer models: Transformers are a more recent innovation in NLP and have achieved state-of-the-art performance on various text classification tasks. The transformer architecture is based on self-attention mechanisms, allowing the model to efficiently capture context and dependencies in text. BERT, GPT, and RoBERTa are examples of transformer models that can be fine-tuned for spam detection.
# GPT
# ## Unsupervised Learning
# While unsupervised deep learning algorithms can help learn useful representations of the data, they typically need to be combined with a supervised classifier or clustering algorithm to perform the actual spam detection. For instance, you could use an autoencoder to learn a low-dimensional representation of the email data and then train a supervised classifier (e.g., logistic regression, SVM) on the extracted features to classify emails as spam or ham.
# ### Autoencoder
# from keras.layers import Input, Dense
# from keras.models import Model
# input_dim = x2.shape[1]
# encoding_dim = 64 # The dimensionality of the latent space
# # Define the encoder
# input_data = Input(shape=(input_dim,))
# encoded = Dense(encoding_dim, activation='relu')(input_data)
# # Define the decoder
# decoded = Dense(input_dim, activation='sigmoid')(encoded)
# # Create the autoencoder model
# autoencoder = Model(input_data, decoded)
# # Create the encoder model
# encoder = Model(input_data, encoded)
# # Compile and train the autoencoder
# autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
# autoencoder.fit(x2, x2, epochs=50, batch_size=256, shuffle=True,
# validation_data=(np.asarray(data["Label"]), np.asarray(data["Label"])))
|
import pandas as pd
import numpy as np
from datasets import Dataset, DatasetDict
from transformers import TrainingArguments, Trainer
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from sklearn.model_selection import train_test_split
df = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
df
# targetの平均が0.42なので、特に1が少ないわけでもないので、
# 交差検証は単純なKFoldで良さそう
df.describe()
df.describe(include=object)
# 一旦keywordとlocationはなしで考える
sep = "[s]"
df["inputs"] = df["text"]
df["inputs"] = df["inputs"].str.lower()
df
model_nm = "microsoft/deberta-v3-small"
tokz = AutoTokenizer.from_pretrained(model_nm)
def tokenize(ds):
return tokz(ds["inputs"])
ds = Dataset.from_pandas(df)
ds
tok_ds = ds.map(tokenize, batched=True)
tok_ds[0]
tok_ds = tok_ds.rename_columns({"target": "labels"})
tok_ds
# KFoldで交差検証
df_train, df_valid = train_test_split(
df, train_size=0.75, test_size=0.25, random_state=0
)
train_idx = df_train.index
valid_idx = df_valid.index
train_idx, valid_idx
dds = DatasetDict({"train": tok_ds.select(train_idx), "test": tok_ds.select(valid_idx)})
dds
lr, bs = 8e-5, 128
wd, epochs = 0.01, 4
args = TrainingArguments(
"outputs",
learning_rate=lr,
warmup_ratio=0.1,
lr_scheduler_type="cosine",
fp16=True,
evaluation_strategy="epoch",
per_device_train_batch_size=bs,
per_device_eval_batch_size=bs * 2,
num_train_epochs=epochs,
weight_decay=wd,
report_to="none",
)
from transformers import EvalPrediction
from typing import Dict
from sklearn.metrics import f1_score
# f1scoreの実装方法をもう少し調べる
def f1_metrics(res: EvalPrediction) -> Dict:
pred = res.predictions.argmax(axis=1)
labels = res.label_ids
return {"f1": f1_score(labels, pred, average="macro")}
model = AutoModelForSequenceClassification.from_pretrained(model_nm)
trainer = Trainer(
model,
args,
train_dataset=dds["train"],
eval_dataset=dds["test"],
tokenizer=tokz,
compute_metrics=f1_metrics
# compute_metrics=corr
)
trainer.train()
# 評価データのメトリクスの情報保存
trainer.save_state()
# モデル保存
trainer.save_model()
test_df = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
test_df
test_df["inputs"] = test_df["text"]
test_df["inputs"] = test_df["inputs"].str.lower()
test_df
test_ds = Dataset.from_pandas(test_df)
tok_test_ds = test_ds.map(tokenize, batched=True)
pred_result = trainer.predict(tok_test_ds)
pred_result
pd.DataFrame(pred_result[0])
test_df["predict"] = pred_result.predictions.argmax(axis=1).tolist()
test_df
pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv")
submit = test_df[["id", "predict"]]
submit = submit.rename(columns={"predict": "target"})
submit
submit.to_csv("submission.csv", index=False)
|
# BigQuery
PROJECT_ID = "hackernews-301014"
from google.cloud import bigquery
client = bigquery.Client(project=PROJECT_ID)
# Construct a reference to the "hacker_news" dataset
dataset_ref = client.dataset("hacker_news", project="bigquery-public-data")
# API request - fetch the dataset
dataset = client.get_dataset(dataset_ref)
# List all the tables in the "hacker_news" dataset
tables = list(client.list_tables(dataset))
# Print names of all tables in the dataset (there are four!)
for table in tables:
print(table.table_id)
# Construct a reference to the "full" table
table_ref = dataset_ref.table("full")
# API request - fetch the table
table = client.get_table(table_ref)
# Print information on all the columns in the "full" table in the "hacker_news" dataset
table.schema
# Preview the first five lines of the "full" table
client.list_rows(table, max_results=5).to_dataframe()
query = """
SELECT *
FROM `bigquery-public-data.hacker_news.full`
WHERE (REGEXP_CONTAINS(text, r"(p|P)rivacy") OR REGEXP_CONTAINS(title, r"(P|p)rivacy")) AND timestamp > '2020-06-01'
"""
# Set up the query
query_job = client.query(query)
# API request - run the query, and return a pandas DataFrame
df = query_job.to_dataframe()
df.head()
DATASET_ID = "priv"
dataset_ref = client.dataset(DATASET_ID)
dataset = bigquery.Dataset(dataset_ref)
dataset = client.create_dataset(dataset)
# Construct a reference to the "hacker_news" dataset
dataset_ref = client.dataset("priv", project=PROJECT_ID)
# API request - fetch the dataset
dataset = client.get_dataset(dataset_ref)
# TODO(developer): Set table_id to the ID of the destination table.
# table_id = "your-project.your_dataset.your_table_name"
table_id = "hackernews-301014.priv.priv_stories"
job_config = bigquery.QueryJobConfig(destination=table_id)
query = """
SELECT *
FROM `bigquery-public-data.hacker_news.full`
WHERE (REGEXP_CONTAINS(text, r"(p|P)rivacy") OR REGEXP_CONTAINS(title, r"(P|p)rivacy")) AND timestamp > '2018-01-01' AND type='story'
"""
# Start the query, passing in the extra configuration.
query_job = client.query(query, job_config=job_config) # Make an API request.
query_job.result() # Wait for the job to complete.
print("Query results loaded to the table {}".format(table_id))
# List all the tables
tables = list(client.list_tables(dataset))
# Print names of all tables in the dataset
for table in tables:
print(table.table_id)
# TODO(developer): Set table_id to the ID of the destination table.
# table_id = "your-project.your_dataset.your_table_name"
table_id = "hackernews-301014.priv.priv_c0"
job_config = bigquery.QueryJobConfig(destination=table_id)
query = """
SELECT p.title, p.timestamp, p.text, p.url, p.score, p.descendants, p.type, p.id, o.parent, o.text AS text_0, o.by, o.id AS id_0, o.type AS type_0
FROM `hackernews-301014.priv.priv_stories` p
LEFT JOIN `bigquery-public-data.hacker_news.full` o ON p.id = o.parent
"""
# Start the query, passing in the extra configuration.
query_job = client.query(query, job_config=job_config) # Make an API request.
query_job.result() # Wait for the job to complete.
print("Query results loaded to the table {}".format(table_id))
# TODO(developer): Set table_id to the ID of the destination table.
# table_id = "your-project.your_dataset.your_table_name"
table_id = "hackernews-301014.priv.priv_c1"
job_config = bigquery.QueryJobConfig(destination=table_id)
query = """
SELECT p.title, p.timestamp, p.text, p.url, p.score, p.descendants, p.type, p.type_0, p.id_0, p.text_0, o.parent, o.text AS text_1, o.by, o.id AS id_1
FROM `hackernews-301014.priv.priv_c0` p
LEFT JOIN `bigquery-public-data.hacker_news.full` o ON p.id_0 = o.parent
"""
# Start the query, passing in the extra configuration.
query_job = client.query(query, job_config=job_config) # Make an API request.
query_job.result() # Wait for the job to complete.
print("Query results loaded to the table {}".format(table_id))
# TODO(developer): Set table_id to the ID of the destination table.
# table_id = "your-project.your_dataset.your_table_name"
table_id = "hackernews-301014.priv.priv_c2"
job_config = bigquery.QueryJobConfig(destination=table_id)
query = """
SELECT p.title, p.timestamp, p.text, p.url, p.score, p.descendants, p.type, p.type_0, p.id_0, p.id_1, p.text_0, p.text_1, o.parent, o.text AS text_2, o.by, o.id AS id_2
FROM `hackernews-301014.priv.priv_c1` p
LEFT JOIN `bigquery-public-data.hacker_news.full` o ON p.id_1 = o.parent
"""
# Start the query, passing in the extra configuration.
query_job = client.query(query, job_config=job_config) # Make an API request.
query_job.result() # Wait for the job to complete.
print("Query results loaded to the table {}".format(table_id))
# TODO(developer): Set table_id to the ID of the destination table.
# table_id = "your-project.your_dataset.your_table_name"
table_id = "hackernews-301014.priv.priv_c3"
job_config = bigquery.QueryJobConfig(destination=table_id)
query = """
SELECT p.title, p.timestamp, p.text, p.url, p.score, p.descendants, p.type, p.type_0, p.id_0, p.id_1, p.id_2, p.text_0, p.text_1, p.text_2, o.parent, o.text AS text_3, o.by, o.id AS id_3
FROM `hackernews-301014.priv.priv_c2` p
LEFT JOIN `bigquery-public-data.hacker_news.full` o ON p.id_2 = o.parent
"""
# Start the query, passing in the extra configuration.
query_job = client.query(query, job_config=job_config) # Make an API request.
query_job.result() # Wait for the job to complete.
print("Query results loaded to the table {}".format(table_id))
# TODO(developer): Set table_id to the ID of the destination table.
# table_id = "your-project.your_dataset.your_table_name"
table_id = "hackernews-301014.priv.priv_c4"
job_config = bigquery.QueryJobConfig(destination=table_id)
query = """
SELECT p.title, p.timestamp, p.text, p.url, p.score, p.descendants, p.type, p.type_0, p.id_0, p.id_1, p.id_2, p.id_3, p.text_0, p.text_1, p.text_2, p.text_3, o.parent, o.text AS text_4, o.by, o.id AS id_4
FROM `hackernews-301014.priv.priv_c3` p
LEFT JOIN `bigquery-public-data.hacker_news.full` o ON p.id_3 = o.parent
"""
# Start the query, passing in the extra configuration.
query_job = client.query(query, job_config=job_config) # Make an API request.
query_job.result() # Wait for the job to complete.
print("Query results loaded to the table {}".format(table_id))
# TODO(developer): Set table_id to the ID of the destination table.
# table_id = "your-project.your_dataset.your_table_name"
table_id = "hackernews-301014.priv.priv_c5"
job_config = bigquery.QueryJobConfig(destination=table_id)
query = """
SELECT p.title, p.timestamp, p.text, p.url, p.score, p.descendants, p.type, p.type_0, p.id_0, p.id_1, p.id_2, p.id_3, p.id_4, p.text_0, p.text_1, p.text_2, p.text_3, p.text_4, o.parent, o.text AS text_5, o.by, o.id AS id_5
FROM `hackernews-301014.priv.priv_c4` p
LEFT JOIN `bigquery-public-data.hacker_news.full` o ON p.id_4 = o.parent
"""
# Start the query, passing in the extra configuration.
query_job = client.query(query, job_config=job_config) # Make an API request.
query_job.result() # Wait for the job to complete.
print("Query results loaded to the table {}".format(table_id))
# TODO(developer): Set table_id to the ID of the destination table.
# table_id = "your-project.your_dataset.your_table_name"
table_id = "hackernews-301014.priv.priv_c6"
job_config = bigquery.QueryJobConfig(destination=table_id)
query = """
SELECT p.title, p.timestamp, p.text, p.url, p.score, p.descendants, p.type, p.type_0, p.id_0, p.id_1, p.id_2, p.id_3, p.id_4, p.id_5, p.text_0, p.text_1, p.text_2, p.text_3, p.text_4, p.text_5, o.parent, o.text AS text_6, o.by, o.id AS id_6
FROM `hackernews-301014.priv.priv_c5` p
LEFT JOIN `bigquery-public-data.hacker_news.full` o ON p.id_5 = o.parent
"""
# Start the query, passing in the extra configuration.
query_job = client.query(query, job_config=job_config) # Make an API request.
query_job.result() # Wait for the job to complete.
print("Query results loaded to the table {}".format(table_id))
# List all the tables
tables = list(client.list_tables(dataset))
# Print names of all tables in the dataset
for table in tables:
print(table.table_id)
# Construct a reference to the "full" table
table_ref = dataset_ref.table("priv_c6")
# API request - fetch the table
table = client.get_table(table_ref)
# Preview the first five lines of the "full" table
df = client.list_rows(table).to_dataframe()
df
|
# # Health Insurance Lead Prediction
# [Link to competition here!](https://datahack.analyticsvidhya.com/contest/job-a-thon/)
# Go there and register to be able to download the dataset and submit your predictions.
# Your Client FinMan is a financial services company that provides various financial services like loan, investment funds, insurance etc. to its customers. FinMan wishes to cross-sell health insurance to the existing customers who may or may not hold insurance policies with the company. The company recommend health insurance to it's customers based on their profile once these customers land on the website. Customers might browse the recommended health insurance policy and consequently fill up a form to apply. When these customers fill-up the form, their Response towards the policy is considered positive and they are classified as a lead.
# Once these leads are acquired, the sales advisors approach them to convert and thus the company can sell proposed health insurance to these leads in a more efficient manner.
# Now the company needs your help in building a model to predict whether the person will be interested in their proposed Health plan/policy given the information about:
# - Demographics (city, age, region etc.)
# - Information regarding holding policies of the customer
# - Recommended Policy Information
# import useful libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
from sklearn.model_selection import train_test_split
from catboost import CatBoostClassifier
# load in data and set seed, do a bit of cleaning
BASE = "../input/jobathon-analytics-vidhya/"
SEED = 2021
train = pd.read_csv(f"{BASE}train.csv")
test = pd.read_csv(f"{BASE}test.csv")
ss = pd.read_csv(f"{BASE}sample_submission.csv")
# do a bit of cleaning
train["Holding_Policy_Duration"] = pd.to_numeric(
train["Holding_Policy_Duration"].str.replace("+", "")
)
test["Holding_Policy_Duration"] = pd.to_numeric(
test["Holding_Policy_Duration"].str.replace("+", "")
)
# Prepare a few key variables to classify columns into categorical and numeric
ID_COL, TARGET_COL = "ID", "Response"
features = [c for c in train.columns if c not in [ID_COL, TARGET_COL]]
cat_cols = [
"City_Code",
"Region_Code",
"Accomodation_Type",
"Reco_Insurance_Type",
"Is_Spouse",
"Health Indicator",
"Holding_Policy_Type",
"Reco_Policy_Cat",
]
num_cols = [c for c in features if c not in cat_cols]
# ## EDA starts
# First we look at the first few rows of train dataset.
train.head(3)
ss.head(3)
# look at distribution of target variable
train[TARGET_COL].value_counts(), train[TARGET_COL].value_counts(normalize=True)
# look at which variables are null and if they were parsed correctly
train.info()
test.info()
# look at unique values in all columns
train.nunique()
test.nunique()
# Looks like we have a lot of nulls in `Health Indicator`, `Holding_Policy_Duration`, and `Holding_Policy_Type`. :/ Otherwise pandas parsed out the columns quite well.
# ### Looking at categorical columns
# Because of all the categorical columns I decided to set a baseline in Catboost. Here are top 5 value counts and countplots for all of them, they prove useful.
# print top 5 values and plot data wrt target variable
for col in cat_cols:
if col != "Region_Code": # too high granularity
print(f"Analysing: {col}\nTrain top 5 counts:")
print(train[col].value_counts().head(5))
print("Test top 5 counts:")
print(test[col].value_counts().head(5))
plt.figure(figsize=(20, 5))
sns.countplot(x=col, hue=TARGET_COL, data=train)
plt.show()
print("\n")
# #### Observations
# Here I am interested in the ratio of target variable in each category. If it is a lot different from the other ratios, the signal conveyed for that category is useful.
# ### Analysis of continuous variables
# Plotted boxplots by target variable and kernel density estimates for each continuous variable to draw interesting insight.
# plot kernel density plot and a boxplot of data wrt target variable
for col in num_cols:
print(f"Analysing: {col}")
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 5))
sns.kdeplot(train[col], ax=ax1)
sns.boxplot(x=train[TARGET_COL], y=train[col], ax=ax2)
plt.show()
print("\n")
# #### Observations
# All num cols except `Reco_Policy_Premium` seem to have bimodal distribution. `Reco_Policy_Premium` is slightly skewed to the left, let's try log-transformation.
for col in ["Reco_Policy_Premium"]:
# plot kernel density plot and a boxplot of data wrt target variable
print(f"Analysing: {col}")
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 5))
sns.kdeplot(np.log1p(train[col]), ax=ax1)
sns.boxplot(x=train[TARGET_COL], y=np.log1p(train[col]), ax=ax2)
plt.show()
print("\n")
# #### Observations
# Looks like there are not too many differences in target var distributions. :/
# Correlation heatmap
# not that useful for classification, especially with GBDTs
# since DT-models are not influenced by multi-collinearity
plt.figure(figsize=(22, 8))
sns.heatmap(train[num_cols].corr(), annot=True)
# Pairplots => these might take longer to render
sns.pairplot(train[num_cols])
# ## Baseline Model
# Alright, after EDA of all variables, it's time to introduce the CatboostClassifier model with no tuning as a baseline.
# Data preparation
y = train[TARGET_COL]
X = train.drop([TARGET_COL, ID_COL], axis=1)
X.head()
# Categorical features reminder
cat_cols
# fillnas and convert to right data types
print(X[cat_cols].info())
X_filled = X.copy()
X_filled["Health Indicator"] = X["Health Indicator"].fillna("NA")
X_filled["Holding_Policy_Type"] = X["Holding_Policy_Type"].fillna(0).astype(np.int64)
X_filled[cat_cols].info()
# Import train test split, then split the data into train and test set
# Cross validation is not included in the baseline => model could overfit
X_train, X_validation, y_train, y_validation = train_test_split(
X_filled, y, train_size=0.8, random_state=SEED, shuffle=True, stratify=y
)
model = CatBoostClassifier(
random_seed=SEED,
eval_metric="AUC",
)
model.fit(
X_train,
y_train,
cat_features=cat_cols,
use_best_model=True,
eval_set=(X_validation, y_validation),
verbose=50,
)
print("Model is fitted: " + str(model.is_fitted()))
print("Model params:")
print(model.get_params())
print("Tree count: " + str(model.tree_count_))
model.get_feature_importance(prettified=True)
X_test = test.drop([ID_COL], axis=1)
X_test.head()
# fillnas and convert to right data types TEST
print(X_test[cat_cols].info())
X_test_filled = X_test.copy()
X_test_filled["Health Indicator"] = X_test["Health Indicator"].fillna("NA")
X_test_filled["Holding_Policy_Type"] = (
X_test["Holding_Policy_Type"].fillna(0).astype(np.int64)
)
X_test_filled[cat_cols].info()
contest_predictions = model.predict_proba(X_test_filled)[:, 1]
print("Predictions:")
print(contest_predictions)
ss[TARGET_COL] = contest_predictions
ss.head()
ss.to_csv("Catboost_Baseline.csv", index=False)
# and we're done!
"Done!"
|
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
from mlxtend.frequent_patterns import apriori, association_rules
df = pd.read_csv(r"/kaggle/input/home-basics-data/Home Basics Data.csv")
df.info()
df.head()
df.describe()
frequent_itemsets = apriori(df, min_support=150 / len(transactions), use_colnames=True)
rules = association_rules(frequent_itemsets, metric="confidence", min_threshold=0.5)
print(rules.sort_values("confidence", ascending=False).head(10))
# # The rules with a minimum lift ratio of 2 show that certain items are very often purchased together, indicating potential cross-selling opportunities.
frequent_itemsets = apriori(df, min_support=150 / len(transactions), use_colnames=True)
rules = association_rules(frequent_itemsets, metric="lift", min_threshold=2)
print(rules.sort_values("lift", ascending=False).head(10))
|
# ##### from skimage.metrics import structural_similarity as ssim
# import cv2
# import numpy as np
# import math
# from matplotlib import pyplot as plt
# def mse(imageA, imageB):
# err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
# err /= float(imageA.shape[0] * imageA.shape[1])
# return err
# def compare(img1, img2):
# img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
# img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
# MSE_val = mse(img1, img2)
# SSIM_val = ssim(img1, img2)
# RMSI_val =math.sqrt(MSE_val)
# if RMSI_val!=0:
# PSNR_val=20*math.log10(255/RMSI_val)
# RMSI_val=255/pow(10.0,(PSNR_val/20.0))
# else:
# PSNR_val=100
# return MSE_val,SSIM_val,RMSI_val,PSNR_val
# org_img = cv2.imread("../input/photos/mishka.jpg")
# org_img=cv2.cvtColor(org_img, cv2.COLOR_BGR2RGB)
# fig, ax = plt.subplots(nrows=10,ncols=2,figsize=(10, 40))
# h,w,c=org_img.shape
# #inter_type=cv2.INTER_NEAREST
# inter_type=cv2.INTER_LINEAR
# #inter_type=cv2.INTER_CUBIC
# #inter_type=cv2.INTER_AREA
# #inter_type=cv2.INTER_LANCZOS4
# for i,j in zip(range(1,20,2),range(10)):
# ax[j][0].imshow(org_img)
# resized_img=cv2.resize(org_img,(int(w/i),int(h/i)),interpolation=inter_type)
# res_h,res_w,_=resized_img.shape
#
# ax[j][0].title.set_text("({}:{})/{}=({}:{})".format(h,w,i,res_h,res_w))
# resized_img=cv2.resize(resized_img,(w,h),interpolation=inter_type)
# MSE,SSIM,RMSI,PSNR=compare(org_img,resized_img,)
# ax[j][1].imshow(resized_img)
# ax[j][1].title.set_text("MSE:{:.2f} SSIM:{:.2f}RMSE:{:.2f} PSNR:{:.2f}".format(MSE,SSIM,RMSI,PSNR))
# plt.show()
#
##### from skimage.metrics import structural_similarity as ssim
import cv2
import numpy as np
import math
from matplotlib import pyplot as plt
def mse(imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
def compare(img1, img2):
img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
MSE_val = mse(img1, img2)
SSIM_val = ssim(img1, img2)
RMSI_val = math.sqrt(MSE_val)
if RMSI_val != 0:
PSNR_val = 20 * math.log10(255 / RMSI_val)
RMSI_val = 255 / pow(10.0, (PSNR_val / 20.0))
else:
PSNR_val = 100
return MSE_val, SSIM_val, RMSI_val, PSNR_val
org_img = cv2.imread("../input/photos/mishka.jpg")
org_img = cv2.cvtColor(org_img, cv2.COLOR_BGR2RGB)
fig, ax = plt.subplots(nrows=10, ncols=2, figsize=(10, 40))
h, w, c = org_img.shape
# inter_type=cv2.INTER_NEAREST
inter_type = cv2.INTER_LINEAR
# inter_type=cv2.INTER_CUBIC
# inter_type=cv2.INTER_AREA
# inter_type=cv2.INTER_LANCZOS4
for i, j in zip(range(1, 20, 2), range(10)):
ax[j][0].imshow(org_img)
resized_img = cv2.resize(
org_img, (int(w / i), int(h / i)), interpolation=inter_type
)
res_h, res_w, _ = resized_img.shape
ax[j][0].title.set_text("({}:{})/{}=({}:{})".format(h, w, i, res_h, res_w))
resized_img = cv2.resize(resized_img, (w, h), interpolation=inter_type)
MSE, SSIM, RMSI, PSNR = compare(
org_img,
resized_img,
)
ax[j][1].imshow(resized_img)
ax[j][1].title.set_text(
"MSE:{:.2f} SSIM:{:.2f}RMSE:{:.2f} PSNR:{:.2f}".format(MSE, SSIM, RMSI, PSNR)
)
plt.show()
# Yuqori chastotali rasm uchun
from skimage.metrics import structural_similarity as ssim
import cv2
import numpy as np
import math
from matplotlib import pyplot as plt
def mse(imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
def compare(img1, img2):
img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
MSE_val = mse(img1, img2)
SSIM_val = ssim(img1, img2)
RMSI_val = math.sqrt(MSE_val)
if RMSI_val != 0:
PSNR_val = 20 * math.log10(255 / RMSI_val)
RMSI_val = 255 / pow(10.0, (PSNR_val / 20.0))
else:
PSNR_val = 100
return MSE_val, SSIM_val, RMSI_val, PSNR_val
org_img = cv2.imread("../input/photos/uzum.jpg")
org_img = cv2.cvtColor(org_img, cv2.COLOR_BGR2RGB)
fig, ax = plt.subplots(nrows=10, ncols=2, figsize=(10, 40))
h, w, c = org_img.shape
# inter_type=cv2.INTER_NEAREST
inter_type = cv2.INTER_LINEAR
# inter_type=cv2.INTER_CUBIC
# inter_type=cv2.INTER_AREA
# inter_type=cv2.INTER_LANCZOS4
for i, j in zip(range(1, 20, 2), range(10)):
ax[j][0].imshow(org_img)
resized_img = cv2.resize(
org_img, (int(w / i), int(h / i)), interpolation=inter_type
)
res_h, res_w, _ = resized_img.shape
ax[j][0].title.set_text("({}:{})/{}=({}:{})".format(h, w, i, res_h, res_w))
resized_img = cv2.resize(resized_img, (w, h), interpolation=inter_type)
MSE, SSIM, RMSI, PSNR = compare(
org_img,
resized_img,
)
ax[j][1].imshow(resized_img)
ax[j][1].title.set_text(
"MSE:{:.2f} SSIM:{:.2f}RMSE:{:.2f} PSNR:{:.2f}".format(MSE, SSIM, RMSI, PSNR)
)
plt.show()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import normalize
from sklearn.model_selection import train_test_split
from autogluon.tabular import TabularDataset, TabularPredictor
from autogluon.common.utils.utils import setup_outputdir
from autogluon.core.utils.loaders import load_pkl
from autogluon.core.utils.savers import save_pkl
import os.path
class MultilabelPredictor:
"""Tabular Predictor for predicting multiple columns in table.
Creates multiple TabularPredictor objects which you can also use individually.
You can access the TabularPredictor for a particular label via: `multilabel_predictor.get_predictor(label_i)`
Parameters
----------
labels : List[str]
The ith element of this list is the column (i.e. `label`) predicted by the ith TabularPredictor stored in this object.
path : str, default = None
Path to directory where models and intermediate outputs should be saved.
If unspecified, a time-stamped folder called "AutogluonModels/ag-[TIMESTAMP]" will be created in the working directory to store all models.
Note: To call `fit()` twice and save all results of each fit, you must specify different `path` locations or don't specify `path` at all.
Otherwise files from first `fit()` will be overwritten by second `fit()`.
Caution: when predicting many labels, this directory may grow large as it needs to store many TabularPredictors.
problem_types : List[str], default = None
The ith element is the `problem_type` for the ith TabularPredictor stored in this object.
eval_metrics : List[str], default = None
The ith element is the `eval_metric` for the ith TabularPredictor stored in this object.
consider_labels_correlation : bool, default = True
Whether the predictions of multiple labels should account for label correlations or predict each label independently of the others.
If True, the ordering of `labels` may affect resulting accuracy as each label is predicted conditional on the previous labels appearing earlier in this list (i.e. in an auto-regressive fashion).
Set to False if during inference you may want to individually use just the ith TabularPredictor without predicting all the other labels.
kwargs :
Arguments passed into the initialization of each TabularPredictor.
"""
multi_predictor_file = "multilabel_predictor.pkl"
def __init__(
self,
labels,
path=None,
problem_types=None,
eval_metrics=None,
consider_labels_correlation=True,
**kwargs,
):
if len(labels) < 2:
raise ValueError(
"MultilabelPredictor is only intended for predicting MULTIPLE labels (columns), use TabularPredictor for predicting one label (column)."
)
if (problem_types is not None) and (len(problem_types) != len(labels)):
raise ValueError(
"If provided, `problem_types` must have same length as `labels`"
)
if (eval_metrics is not None) and (len(eval_metrics) != len(labels)):
raise ValueError(
"If provided, `eval_metrics` must have same length as `labels`"
)
self.path = setup_outputdir(path, warn_if_exist=False)
self.labels = labels
self.consider_labels_correlation = consider_labels_correlation
self.predictors = (
{}
) # key = label, value = TabularPredictor or str path to the TabularPredictor for this label
if eval_metrics is None:
self.eval_metrics = {}
else:
self.eval_metrics = {labels[i]: eval_metrics[i] for i in range(len(labels))}
problem_type = None
eval_metric = None
for i in range(len(labels)):
label = labels[i]
path_i = self.path + "Predictor_" + label
if problem_types is not None:
problem_type = problem_types[i]
if eval_metrics is not None:
eval_metric = eval_metrics[i]
self.predictors[label] = TabularPredictor(
label=label,
problem_type=problem_type,
eval_metric=eval_metric,
path=path_i,
**kwargs,
)
def fit(self, train_data, tuning_data=None, **kwargs):
"""Fits a separate TabularPredictor to predict each of the labels.
Parameters
----------
train_data, tuning_data : str or autogluon.tabular.TabularDataset or pd.DataFrame
See documentation for `TabularPredictor.fit()`.
kwargs :
Arguments passed into the `fit()` call for each TabularPredictor.
"""
if isinstance(train_data, str):
train_data = TabularDataset(train_data)
if tuning_data is not None and isinstance(tuning_data, str):
tuning_data = TabularDataset(tuning_data)
train_data_og = train_data.copy()
if tuning_data is not None:
tuning_data_og = tuning_data.copy()
else:
tuning_data_og = None
save_metrics = len(self.eval_metrics) == 0
for i in range(len(self.labels)):
label = self.labels[i]
predictor = self.get_predictor(label)
if not self.consider_labels_correlation:
labels_to_drop = [l for l in self.labels if l != label]
else:
labels_to_drop = [
self.labels[j] for j in range(i + 1, len(self.labels))
]
train_data = train_data_og.drop(labels_to_drop, axis=1)
if tuning_data is not None:
tuning_data = tuning_data_og.drop(labels_to_drop, axis=1)
print(f"Fitting TabularPredictor for label: {label} ...")
predictor.fit(train_data=train_data, tuning_data=tuning_data, **kwargs)
self.predictors[label] = predictor.path
if save_metrics:
self.eval_metrics[label] = predictor.eval_metric
self.save()
def predict(self, data, **kwargs):
"""Returns DataFrame with label columns containing predictions for each label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to make predictions for. If label columns are present in this data, they will be ignored. See documentation for `TabularPredictor.predict()`.
kwargs :
Arguments passed into the predict() call for each TabularPredictor.
"""
return self._predict(data, as_proba=False, **kwargs)
def predict_proba(self, data, **kwargs):
"""Returns dict where each key is a label and the corresponding value is the `predict_proba()` output for just that label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to make predictions for. See documentation for `TabularPredictor.predict()` and `TabularPredictor.predict_proba()`.
kwargs :
Arguments passed into the `predict_proba()` call for each TabularPredictor (also passed into a `predict()` call).
"""
return self._predict(data, as_proba=True, **kwargs)
def evaluate(self, data, **kwargs):
"""Returns dict where each key is a label and the corresponding value is the `evaluate()` output for just that label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to evalate predictions of all labels for, must contain all labels as columns. See documentation for `TabularPredictor.evaluate()`.
kwargs :
Arguments passed into the `evaluate()` call for each TabularPredictor (also passed into the `predict()` call).
"""
data = self._get_data(data)
eval_dict = {}
for label in self.labels:
print(f"Evaluating TabularPredictor for label: {label} ...")
predictor = self.get_predictor(label)
eval_dict[label] = predictor.evaluate(data, **kwargs)
if self.consider_labels_correlation:
data[label] = predictor.predict(data, **kwargs)
return eval_dict
def save(self):
"""Save MultilabelPredictor to disk."""
for label in self.labels:
if not isinstance(self.predictors[label], str):
self.predictors[label] = self.predictors[label].path
save_pkl.save(path=self.path + self.multi_predictor_file, object=self)
print(
f"MultilabelPredictor saved to disk. Load with: MultilabelPredictor.load('{self.path}')"
)
@classmethod
def load(cls, path):
"""Load MultilabelPredictor from disk `path` previously specified when creating this MultilabelPredictor."""
path = os.path.expanduser(path)
if path[-1] != os.path.sep:
path = path + os.path.sep
return load_pkl.load(path=path + cls.multi_predictor_file)
def get_predictor(self, label):
"""Returns TabularPredictor which is used to predict this label."""
predictor = self.predictors[label]
if isinstance(predictor, str):
return TabularPredictor.load(path=predictor)
return predictor
def _get_data(self, data):
if isinstance(data, str):
return TabularDataset(data)
return data.copy()
def _predict(self, data, as_proba=False, **kwargs):
data = self._get_data(data)
if as_proba:
predproba_dict = {}
for label in self.labels:
print(f"Predicting with TabularPredictor for label: {label} ...")
predictor = self.get_predictor(label)
if as_proba:
predproba_dict[label] = predictor.predict_proba(
data, as_multiclass=True, **kwargs
)
data[label] = predictor.predict(data, **kwargs)
if not as_proba:
return data[self.labels]
else:
return predproba_dict
labels = ["min_feels_like", "max_feels_like"]
train = pd.read_csv(
"/kaggle/input/yasmines-meteorological-mystery-dsc-psut-comp/train_data.csv"
)
train = train.dropna().reset_index(drop=True)
train["date"] = pd.to_datetime(train["date"])
train["sunrise"] = pd.to_datetime(train["sunrise"])
train["sunset"] = pd.to_datetime(train["sunset"])
test = pd.read_csv(
"/kaggle/input/yasmines-meteorological-mystery-dsc-psut-comp/test_data.csv"
)
test["date"] = pd.to_datetime(test["date"])
test["sunrise"] = pd.to_datetime(test["sunrise"])
test["sunset"] = pd.to_datetime(test["sunset"])
submission = pd.read_csv(
"/kaggle/input/yasmines-meteorological-mystery-dsc-psut-comp/sample_sub.csv"
)
train["humidity*windspeed"] = (train["humidity"] * train["windspeed"]).astype("float64")
train["humidity**2/windspeed"] = (train["humidity"] ** 2 / train["windspeed"]).astype(
"float64"
)
train["windspeed*cloudcover"] = (train["windspeed"] * train["cloudcover"]).astype(
"float64"
)
test["humidity*windspeed"] = (test["humidity"] * test["windspeed"]).astype("float64")
test["humidity**2/windspeed"] = (test["humidity"] ** 2 / test["windspeed"]).astype(
"float64"
)
test["windspeed*cloudcover"] = (test["windspeed"] * test["cloudcover"]).astype(
"float64"
)
# SCORE MAY VARY DUE TO RANDOM HYPERPARAMETER OPT.
multi_predictor = MultilabelPredictor(
labels=labels,
problem_types=["regression", "regression"],
eval_metrics=["rmse", "rmse"],
)
multi_predictor.fit(
train,
excluded_model_types=["KNN"],
refit_full=True,
presets=["best_quality"],
hyperparameter_tune_kwargs="auto", # This parameter makes the output lag and not show everything.
)
predictions = multi_predictor.predict(test)
submission["min_feels_like"] = predictions["min_feels_like"]
submission["max_feels_like"] = predictions["max_feels_like"]
submission.to_csv("submission.csv", index=False)
submission
|
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import pandas as pd
from tqdm.notebook import tqdm
from copy import deepcopy
from itertools import chain
from math import isnan
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# load the data
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"../data",
train=True,
download=True,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Lambda(lambda t: t * 2 - 1),
# transforms.Normalize((0.1307,), (0.3081,)),
transforms.Lambda(torch.flatten),
]
),
),
batch_size=100,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"../data",
train=False,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Lambda(lambda t: t * 2 - 1),
transforms.Lambda(torch.flatten),
]
),
),
batch_size=100,
shuffle=True,
)
def accuracy(logits, y):
return torch.eq(F.softmax(logits, dim=1).max(1)[1], y).float().mean()
model_baseline = nn.Sequential(
nn.Linear(784, 1024),
nn.ReLU(),
nn.Linear(1024, 1024),
nn.ReLU(),
nn.Linear(1024, 1024),
nn.ReLU(),
nn.Linear(1024, 10)
# nn.Softmax()
).to(device)
model = model_baseline # @param ["model_baseline"] {type:"raw"}
(torch.nn.init.xavier_normal_(p) for p in model.parameters())
learning_rate = 1e-4 # @param {type:"number"}
epochs = 200 # @param {type:"integer"}
# optimizers and loss functions
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.5, 0.999))
lrs = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.97)
loss_fn = nn.CrossEntropyLoss()
train_losses = []
test_losses = []
for epoch in tqdm(range(epochs)):
epoch_loss = 0
for Xs, Ys in train_loader:
optimizer.zero_grad()
outputs = model(Xs.to(device))
loss = loss_fn(outputs, Ys.to(device))
loss.backward()
optimizer.step()
epoch_loss += loss.item() / len(train_loader)
train_losses.append(epoch_loss)
test_losses.append(
sum(
loss_fn(model(Xs.to(device)), Ys.to(device)).item() / len(test_loader)
for Xs, Ys in test_loader
)
)
if epoch % 2:
lrs.step()
acc = sum(
accuracy(model(Xs.to(device)), Ys.to(device)) / len(test_loader)
for Xs, Ys in test_loader
).item()
f"accuracy: {acc:.2%} error: {1-acc:.2%}"
# Deep Variational Information Bottleneck network
# $J_{IB} = \frac{1}{N}\sum_{n=1}^{N}E_{\epsilon \text{~} p(\epsilon)}[-log(q(y_{n}|f(x_{n}, \epsilon))) + \beta KL[p(Z|x_{n}, r(Z)]]]$
# οπου
# $p$ είναι η κατανομή p(X,Y,Z) με X,Y,Z η είσοδος, η έξοδος και η ενδιάμεση αναπαράσταση του ΝΝ
# $q$ είναι το variational approximation του $p$,
# $r$ είναι το variational approximation του $p(Z)$
K = 256 # @param {type:"integer"}
encoder = nn.Sequential(
nn.Linear(784, 1024),
nn.ReLU(),
nn.Linear(1024, 1024),
nn.ReLU(),
nn.Linear(1024, 1024),
nn.ReLU(),
nn.Linear(1024, 2 * K),
).to(device)
decoder = nn.Linear(K, 10).to(device)
# first K elements of output is mu
# last K elements is the diagonal covariance matrix of p
def encode(x, encoder=encoder, K=K):
m_out = encoder(x)
return m_out[:, :K], F.softplus(m_out[:, K:] - 5, beta=1)
def decode(mu, std, n_samples, decoder=decoder):
# reparametrize
epsilon = torch.autograd.Variable(mu.data.new(n_samples, *std.size()).normal_())
encoding = mu.expand(n_samples, *mu.size()) + epsilon * std.expand(
n_samples, *std.size()
)
encoding = encoding.mean(0)
return decoder(encoding)
VIB_loss = (
lambda beta: lambda mu, std, logits, y: F.cross_entropy(logits, y)
+ beta * (1 + 2 * std.clamp(min=1e-8).log() - mu.pow(2) - std.pow(2)).sum(1).mean()
)
@torch.no_grad()
def ema_update(model, model_ema, decay=0.999):
state_dict = model_ema.state_dict()
for key in state_dict.keys():
state_dict[key] = (
decay * state_dict[key] + (1 - decay) * model.state_dict()[key]
)
model_ema.load_state_dict(state_dict)
all_params = chain(encoder.parameters(), decoder.parameters())
beta = -0.25e-3 # @param
n_samples = 30 # @param
(torch.nn.init.xavier_normal_(p) for p in all_params)
encoder_ema, decoder_ema = deepcopy(encoder), deepcopy(decoder)
learning_rate = 1e-4 # @param {type:"number"}
epochs = 200 # @param {type:"integer"}
# optimizers and loss functions
optimizer = torch.optim.Adam(all_params, lr=learning_rate, betas=(0.5, 0.999))
lrs = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.97)
loss_fn = VIB_loss(beta)
def batch_loss(Xs, Ys, encoder=encoder, decoder=decoder):
mu, std = encode(Xs.to(device), encoder=encoder)
logits = decode(mu, std, n_samples, decoder=decoder)
# print(logits.size())
loss = loss_fn(mu, std, logits, Ys.to(device))
return loss
vib_train_losses = []
vib_test_losses = []
for epoch in tqdm(range(epochs)):
epoch_loss = 0
for Xs, Ys in train_loader:
optimizer.zero_grad()
loss = batch_loss(Xs, Ys)
if isnan(loss.item()):
break
loss.backward()
optimizer.step()
epoch_loss += loss.item() / len(train_loader)
ema_update(encoder, encoder_ema)
ema_update(decoder, decoder_ema)
else:
vib_train_losses.append(epoch_loss)
with torch.no_grad():
vib_test_losses.append(
sum(
batch_loss(Xs, Ys, encoder=encoder_ema, decoder=decoder_ema).item()
/ len(test_loader)
for Xs, Ys in test_loader
)
)
if epoch % 2:
lrs.step()
continue
break
vib_acc = sum(
accuracy(
decode(*encode(Xs.to(device), encoder=encoder_ema), 30, decoder=decoder_ema),
Ys.to(device),
)
/ len(test_loader)
for Xs, Ys in test_loader
).item()
f"accuracy: {vib_acc:.2%} error: {1-vib_acc:.2%}"
vib_test_losses
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Library Imports
import numpy as np
import pandas as pd
import seaborn as sns
# ### Load the datasets and verify the data load
authors = pd.read_csv("../input/nips-papers-1987-2019-updated/authors.csv")
authors.head()
# A quick look on the data structure and missing values
authors.info()
authors.isna().sum()
# Load the papers dataset
papers = pd.read_csv("../input/nips-papers-1987-2019-updated/papers.csv")
papers.head()
# A quick look on the papers data structure and missing values by columns
papers.info()
papers.isna().sum()
# we would perform the missing value imputations, by putting in the correspoinding title text for the missing values in both abstract and full_text columns
# perform the missing imputations on both full_text and abstract
papers["full_text"] = np.where(
papers["full_text"].isna(), papers["title"], papers["full_text"]
)
papers["abstract"] = np.where(
papers["abstract"].isna(), papers["title"], papers["abstract"]
)
# Verify the imputations
papers.isna().sum()
# ### Preprocessing
# #### Task: Use TF-IDF Vectorization to create a vectorized document term matrix. We may want to explore the max_df and min_df parameters.
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(
max_df=0.9, min_df=2, stop_words="english", token_pattern=r"(?u)\b[A-Za-z]+\b"
)
dtm = tfidf.fit_transform(papers["title"])
dtm
# ### Latent Drichilet Allocation
from sklearn.decomposition import LatentDirichletAllocation
LDA = LatentDirichletAllocation(n_components=7, random_state=101)
# This can take a while, we are dealing with large number of documents here
LDA.fit(dtm)
# Check the shape of the numpy array - This matches with 10 components which we wanted and columns equal to the sparse matrix
LDA.components_.shape
# Now we will attach the topic numbers to the original articles
# In order to do this, we would need to apply the transform method on our LDA model and DTM to associate the topic to each
# article
topic_results = LDA.transform(dtm)
# Now we will check the shape
topic_results.shape
# If we check the first element, we will get the probabilities of each of the articles belonging to one of the 7 topics
topic_results[0]
# There is a 69% probability, that it should belong to the 5th topic
# Associating topic to each of the documents
papers["Topic"] = topic_results.argmax(axis=1)
papers.head(10)
# #### Now we will setup a loop which will print out the top 10 words with highest probabilities for all the ten topics
for index, topic in enumerate(LDA.components_):
print(f"THE TOP 15 WORDS FOR TOPIC #{index}")
list_keywords = [
tfidf.get_feature_names()[index] for index in topic.argsort()[-10:]
]
print(list_keywords)
print("\n")
# > #### A basic visualization on number of documents by topics
sns.countplot(x="Topic", data=papers, order=papers["Topic"].value_counts().index)
papers["Topic"].value_counts().sort_values(ascending=False)
|
# # Kidney Stone Prediction
# ### Table of contents
# - [Importing Libraries & Exploring the data](#1)
# - [Exploratory Data Analysis](#2)
# - [Check for Information Bias in Train data and Original Data](#2.1)
# - [Linear Correlation between the features](#2.2)
# - [Feature Distributions for train & test data](#2.3)
# - [Feature Engineering](#2.4)
# - [Predictive Analysis](#3)
# - [Lazypredict : Finding the best perfoming models](#4)
# - [1. LGBMClassifier](#5)
# - [2. XGBClassifier](#6)
# - [3. AdaBoost Classifier](#7)
# - [4. Random Forest Classifier](#8)
# - [5. Logistic Regression](#9)
# - [6. Bonus one: Gaussian Naive Bayes](#10)
# - [Feature Importance](#11)
# ---
# # Importing Libraries & Exploring the data
# ---
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.express as px
import seaborn as sns
from itertools import product
# Setting Style
sns.set_style("darkgrid")
# Loading the Data
train_df = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
original_df = pd.read_csv(
"/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
)
test_df = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
sub = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
train_df
# Exploring the Data
Df = [train_df, original_df, test_df]
names = ["Training Data", "Original Data", "Test Data"]
print("Data Information")
for df, name in zip(Df, names):
print(name)
print(df.info())
print()
train_df.drop("id", axis=1, inplace=True)
test_df.drop("id", axis=1, inplace=True)
desc = train_df.describe()
desc = desc.style.background_gradient()
desc
# ---
# # Exploratory Data Analysis
# ---
# Check for Information Bias in Train data and Original Data
f, ax = plt.subplots(1, 2, figsize=(12, 10))
train_df["target"].value_counts().plot.pie(
explode=[0, 0.1], autopct="%1.1f%%", ax=ax[0], shadow=True
)
ax[0].set_title("Target class in training data")
ax[0].set_ylabel("")
original_df["target"].value_counts().plot.pie(
explode=[0, 0.1], autopct="%1.1f%%", ax=ax[1], shadow=True
)
ax[1].set_title("Target class in original data")
ax[1].set_ylabel("")
plt.show()
# As there is almost equal proportion of target class in both train data & original data, Hence there won't be any bias if we merge them
# Threfore let's merge the these two dataframes
train_df = pd.concat([train_df, original_df], ignore_index=True)
train_df
# Linear Correlation between the features
plt.figure(figsize=(12, 8))
sns.heatmap(train_df.corr(), annot=True, cmap="Greens")
plt.title("Correlation Matrix for Features of Train Data")
plt.show()
# Feature Distributions for train & test data
plt.figure(figsize=(10, 30))
i = 1
for col in train_df.columns[:6]:
plt.subplot(6, 2, i)
sns.histplot(x=train_df[col], color="#288BA8", kde=True, lw=1)
plt.title("training data: distribution of '{}' feature".format(col))
plt.subplot(6, 2, i + 1)
sns.histplot(x=test_df[col], color="#B22222", kde=True, lw=1)
plt.title("testing data: distribution of '{}' feature".format(col))
i += 2
plt.tight_layout()
# Insights
# After going through overall plots we see 1. some changes in distribution of urea in train & test data
plt.figure(figsize=(10, 5))
col = "urea"
plt.subplot(1, 2, 1)
sns.histplot(x=train_df["urea"], color="#288BA8", kde=True, lw=1)
plt.title("training data: distribution of '{}' feature".format(col))
plt.subplot(1, 2, 2)
sns.histplot(x=test_df["urea"], color="#B22222", kde=True, lw=1)
plt.title("testing data: distribution of '{}' feature".format(col))
plt.tight_layout()
# The values for urea feature is starting from 64 in test data whereas in train data it is starting from 10
train_df[train_df["urea"] < 50]
# ### So lets drop this values for better predictions
train_df = train_df[train_df["urea"] > 50]
# ---
# # Feature Engineering
# ---
# The features were added for better performance of the model on the data,
# This feature engineering is taken from [This Awesome Notebook](https://www.kaggle.com/code/phongnguyen1/a-framework-for-tabular-classification-e12-10) by Phong Nguyen. Kudos to this author for his work. You can check that out for more information
train_df.head(2)
test_df.head(2)
def add_features(df):
# Ratio of calcium concentration to urea concentration:
df["calc_urea_ratio"] = df["calc"] / df["urea"]
# Product of calcium concentration and osmolarity:
df["calc_osm_product"] = df["calc"] * df["osmo"]
# Ratio of calcium concentration to specific gravity:
df["calc_gravity_ratio"] = df["calc"] / df["gravity"]
# Ratio of calcium concentration to osmolarity:
df["calc_osm_ratio"] = df["calc"] / df["osmo"]
train_df
add_features(train_df)
add_features(test_df)
train_df.head(3)
test_df.head(3)
# ---
# # Predictive Analysis
# ---
# Standardization for numerical labels
from sklearn.preprocessing import StandardScaler, LabelEncoder
standardScaler = StandardScaler()
# Train Data
train = standardScaler.fit_transform(train_df.drop(["target"], axis=1))
train = pd.DataFrame(train, columns=train_df.drop(["target"], axis=1).columns)
train
# Test Data
test = standardScaler.fit_transform(test_df)
test = pd.DataFrame(test, columns=test_df.columns)
test
# ---
# # Lazypredict : Finding the best perfoming models
# ---
from lazypredict.Supervised import LazyClassifier, LazyRegressor
from sklearn.model_selection import train_test_split
X, y = train, train_df.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# fit all models
clf = LazyClassifier(predictions=True)
models, predictions = clf.fit(X_train, X_test, y_train, y_test)
models
# Above table gives the different models according to their perfomance on the dataThe models are according to descending ROC AUC which is being use for our model evalution over Test Data
# As we don't know the exact test data, we will try to create all the best models and submit their results into the competition We will use top 4 + logistic regression models for predicting the problem statement , Hence we will be dealing with following baseline models and try for hypertuning for better resultsLGBMClassifierXGBClassifierAdaBoostClassifier RandomForestClassifierLogistic Regression
# ---
# # 1. LGBMClassifier
# ---
X = train
y = train_df.target
X_test = test
import lightgbm as lgb
lgbm_params = {
"n_estimators": 27,
"num_leaves": 5,
"min_child_samples": 11,
"learning_rate": 0.1,
"colsample_bytree": 0.08,
"reg_alpha": 1.5,
"reg_lambda": 0.01,
}
lgb_clf = lgb.LGBMClassifier(**lgbm_params)
# Fitting the model
lgb_clf.fit(X, y)
# Predicting the probabilities of the classes using the model
pred = lgb_clf.predict_proba(X_test)
# Creting DataFrame of the predicted values
df = pd.DataFrame(pred[:, 1])
df.columns = ["target"]
df
# Creating the Data for the submission to competition
sub.drop("target", axis=1, inplace=True)
sub["target"] = df["target"].copy()
sub.to_csv("sub_LGBMc.csv", index=False)
sub
# ---
# # 2. XGBClassifier
# ---
from xgboost import XGBClassifier
from itertools import product
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
# ## Hyperparameter Tuning
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33, random_state=42)
search_space = {
"n_estimators": [10, 20, 30],
"max_depth": np.linspace(1, 9, num=5).astype("int"),
"learning_rate": np.logspace(-3, 1, num=5),
"reg_alpha": np.linspace(0, 1, num=3),
"reg_lambda": np.linspace(0, 1, num=3),
}
min_score = 0
best_params = {}
for val in product(*search_space.values()):
params = {}
for i, param in enumerate(search_space.keys()):
params[param] = val[i]
clf = XGBClassifier(**params).fit(X_train, y_train)
val_pred = clf.predict_proba(X_val)[:, 1]
score = roc_auc_score(y_val, val_pred)
if score > min_score:
min_score = score
best_params = params
best_params
params = {**best_params, "seed": 42, "eval_metric": "auc"}
xgb = XGBClassifier(**params)
xgb.fit(X, y)
# Predicting the probabilities of the classes using the model
pred = xgb.predict_proba(X_train)
# Creting DataFrame of the predicted values
df = pd.DataFrame(pred[:, 1])
df.columns = ["target"]
df
# Creating the Data for the submission to competition
sub.drop("target", axis=1, inplace=True)
sub["target"] = df["target"].copy()
sub.to_csv("sub_XGBc.csv", index=False)
sub
# ---
# # 3. AdaBoostClassifier
# ---
from sklearn.ensemble import AdaBoostClassifier
abc = AdaBoostClassifier(n_estimators=400, learning_rate=0.01)
model = abc.fit(X, y)
pred = model.predict_proba(X_test)
pred[:10]
# Creting DataFrame of the predicted values
df = pd.DataFrame(pred[:, 1])
df.columns = ["target"]
df
# Creating the Data for the submission to competition
sub.drop("target", axis=1, inplace=True)
sub["target"] = df["target"].copy()
sub.to_csv("sub_AdaBC.csv", index=False)
sub
# ---
# # 4. Random Forest Classifier
# ---
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
# ## Hyperparameter Tunning
from sklearn.model_selection import GridSearchCV
rfc = RandomForestClassifier()
random_grid = {
"bootstrap": [True],
"max_depth": [30, 35],
"max_features": ["log2"],
"min_samples_leaf": [2, 3, 4],
"min_samples_split": [1, 2, 3],
"n_estimators": [160, 170, 180],
}
rf_random = GridSearchCV(estimator=rfc, param_grid=random_grid, cv=3, n_jobs=-1)
rf_random.fit(X, y)
rf_random.best_params_
rfc = RandomForestClassifier(**rf_random.best_params_, n_jobs=-1)
rfc.fit(X, y)
pred_rfc = rfc.predict_proba(X_test)
pred_rfc[:5]
# Creting DataFrame of the predicted values
df_rfc = pd.DataFrame(pred_rfc[:, 1])
df_rfc.columns = ["target"]
df_rfc
# Creating the Data for the submission to competition
sub.drop("target", axis=1, inplace=True)
sub["target"] = df_rfc["target"].copy()
sub.to_csv("sub_RFc.csv", index=False)
sub
# ---
# # 5. Logistic Regression
# ---
from sklearn.linear_model import LogisticRegression
# ## Hyperparameter tunning
from sklearn.model_selection import GridSearchCV
# Split data into features and target
X = train
y = train_df.target
# Define the logistic regression model
logistic_reg = LogisticRegression()
# Define hyperparameters to tune
hyperparameters = {
"penalty": ["l1", "l2"],
"C": [0.01, 0.02, 0.05, 0.1],
"solver": ["liblinear", "saga", "lbfgs"],
"fit_intercept": [True, False],
"max_iter": [1, 5, 10, 50, 100],
"tol": [1e-4, 1e-5],
}
# Perform grid search to find the best hyperparameters
clf = GridSearchCV(logistic_reg, hyperparameters, cv=5)
clf.fit(X, y)
# Print the best hyperparameters and score
print("Best hyperparameters:", clf.best_params_)
print("Best score:", clf.best_score_)
lr = LogisticRegression(**clf.best_params_)
lr.fit(X, y.values)
pred = lr.predict_proba(X_test)
pred[:5]
df = pd.DataFrame(pred[:, 1])
df.columns = ["target"]
df
sub.drop("target", axis=1, inplace=True)
sub["target"] = df["target"].copy()
sub.to_csv("sub_LogR.csv", index=False)
sub
# ---
# # 6. Bonus One : Gaussian Naive Bayes
# ---
from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
nb.fit(X, y.values)
pred = nb.predict_proba(X_test)
df = pd.DataFrame(pred[:, 1])
df.columns = ["target"]
sub.drop("target", axis=1, inplace=True)
sub["target"] = df["target"].copy()
sub.to_csv("sub_GaussianNB.csv", index=False)
sub
# ---
# # Model Feature Importance
# ---
# **As of now Random forest model done well hence lets take a rfc model for calculating the importance of the features**
rfc
df_imp = pd.DataFrame(rfc.feature_names_in_, rfc.feature_importances_)
df_imp.columns = ["Feature_Names"]
df_imp["Importances"] = df_imp.index
df_imp = df_imp.sort_values(by="Importances", ascending=True)
df_imp.index = np.arange(0, len(df_imp))
df_imp
plt.figure(figsize=(18, 10))
ax = sns.barplot(x="Feature_Names", y="Importances", data=df_imp)
plt.title("Feature Importances", fontsize=20)
for bars in ax.containers:
ax.bar_label(bars)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
test_data = pd.read_csv(
"/kaggle/input/santander-customer-transaction-prediction/test.csv"
)
id_data_ts = test_data.pop("ID_code")
test_data.head()
train_data = pd.read_csv(
"/kaggle/input/santander-customer-transaction-prediction/train.csv"
)
id_data_tr = train_data.pop("ID_code")
target_data = train_data.pop("target")
train_data.head()
import tensorflow as tf
# Define the model architecture
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(
units=256, activation="relu", input_shape=[train_data.shape[1]]
),
tf.keras.layers.Dense(units=128, activation="relu"),
tf.keras.layers.Dense(units=1, activation="sigmoid"),
]
)
# Compile the model
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
# Train the model
history = model.fit(train_data, target_data, epochs=10, batch_size=32)
# Make predictions on the test set
predictions = model.predict(test_data)
predictions = np.round(predictions).astype(int).flatten()
# Create the submission file
data = np.column_stack((id_data_ts, predictions))
submission = pd.DataFrame(data, columns=["ID_code", "target"])
submission.to_csv("submission.csv", index=False)
print("Submission file created successfully!")
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# Load actual survival values from gender_submission.csv
actual_data = pd.read_csv(
"/kaggle/input/santander-customer-transaction-prediction/sample_submission.csv"
)
y_actual = actual_data["target"].values
# Load predicted survival values from submission.csv
predicted_data = pd.read_csv("/kaggle/working/submission.csv")
y_predicted = predicted_data["target"].values
# Calculate evaluation metrics
accuracy = accuracy_score(y_actual, y_predicted)
# Print evaluation metrics
print(f"Accuracy: {accuracy}")
|
import os
import matplotlib.pyplot as plt
import seaborn as sns
import IPython.display as ipd
import librosa.display
import numpy as np
import pandas as pd
from scipy.fftpack import fft
from scipy import signal
from scipy.io import wavfile
import librosa
import glob
import tensorflow as tf
import wave
import sys
from tqdm.notebook import tqdm
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers.convolutional import Conv1D
from keras.layers.recurrent import LSTM
from keras.layers.convolutional import MaxPooling1D, AveragePooling1D
from keras.regularizers import l2
from tensorflow.keras import layers
from tensorflow.keras import models
from kerastuner.tuners import RandomSearch
from kerastuner.engine.hypermodel import HyperModel
from kerastuner.engine.hyperparameters import HyperParameters
from keras.utils import to_categorical
from IPython import display
from sklearn.utils import shuffle
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.callbacks import TensorBoard
import gc
# # **Get Label Data(Types of Emotions)**
audio_files = glob.glob(
"../input/ravdess-emotional-song-audio/audio_song_actors_01-24/*"
)
audio_files
# Filename identifiers
# Modality (01 = full-AV, 02 = video-only, 03 = audio-only).
# Vocal channel (01 = speech, 02 = song).
# Emotion (01 = neutral, 02 = calm, 03 = happy, 04 = sad, 05 = angry, 06 = fearful, 07 = disgust, 08 = surprised).
# Emotional intensity (01 = normal, 02 = strong). NOTE: There is no strong intensity for the 'neutral' emotion.
# Statement (01 = "Kids are talking by the door", 02 = "Dogs are sitting by the door").
# Repetition (01 = 1st repetition, 02 = 2nd repetition).
# Actor (01 to 24. Odd numbered actors are male, even numbered actors are female).
# Filename example: 03-02-06-01-02-01-12.wav
# Audio-only (03)
# Song (02)
# Fearful (06)
# Normal intensity (01)
# Statement "dogs" (02)
# 1st Repetition (01)
# 12th Actor (12)
# Female, as the actor ID number is even.
male_song = []
female_song = []
male_emotion = []
female_emotion = []
for root, dirnames, filename in os.walk(
"../input/ravdess-emotional-song-audio/audio_song_actors_01-24/"
):
wave_file = glob.glob(root + "/*.wav")
for wave in wave_file:
gender = int(wave.split("/")[5].split("-")[6].split(".")[0])
emotion = int(wave.split("/")[5].split("-")[2])
if gender % 2 == 0:
female_song.append(glob.glob(root + "/*.wav"))
if emotion == 1:
female_emotion.append("neutral")
elif emotion == 3:
female_emotion.append("happy")
elif emotion == 4:
female_emotion.append("sad")
elif emotion == 5:
female_emotion.append("angry")
elif emotion == 6:
female_emotion.append("fear")
elif emotion == 7:
female_emotion.append("disgust")
else:
female_emotion.append("surprise")
else:
male_song.append(glob.glob(root + "/*.wav"))
if emotion == 1:
male_emotion.append("neutral")
elif emotion == 3:
male_emotion.append("happy")
elif emotion == 4:
male_emotion.append("sad")
elif emotion == 5:
male_emotion.append("angry")
elif emotion == 6:
male_emotion.append("fear")
elif emotion == 7:
male_emotion.append("disgust")
else:
male_emotion.append("surprise")
male_song[:50]
female_song[:50]
male_emotion[:50]
male_emotion_dir = glob.glob(
"../input/surrey-audiovisual-expressed-emotion-savee/ALL/*"
)
male_emotion_dir[0:5]
path_male_emotiondir = []
for root, dirnames, filenames in os.walk(
"../input/surrey-audiovisual-expressed-emotion-savee/ALL/"
):
path_male_emotiondir.extend(glob.glob(root + "/*.wav"))
print(path_male_emotiondir[:5])
male_emotion_1 = []
for male in male_emotion_dir:
emotion = male.split("/")[4].split("_")[1]
if emotion.startswith("a"):
male_emotion_1.append("angry")
elif emotion.startswith("d"):
male_emotion_1.append("disgust")
elif emotion.startswith("f"):
male_emotion_1.append("fear")
elif emotion.startswith("h"):
male_emotion_1.append("happy")
elif emotion.startswith("n"):
male_emotion_1.append("neutral")
elif emotion.startswith("sa"):
male_emotion_1.append("sad")
elif emotion.startswith("su"):
male_emotion_1.append("surprise")
else:
male_emotion_1.append("Wrong emotion")
male_emotion_1[:50]
female_emotion_1 = []
path_female_emotiondir = []
for root, dirnames, filenames in os.walk(
"../input/toronto-emotional-speech-set-tess/TESS Toronto emotional speech set data/"
):
for f in filenames:
emotion = root.split("/")[4]
if emotion == "OAF_angry" or emotion == "YAF_angry":
female_emotion_1.append("angry")
path = os.path.join(root + f)
path_female_emotiondir.append(path)
elif emotion == "OAF_disgust" or emotion == "YAF_disgust":
female_emotion_1.append("disgust")
path = os.path.join(root + f)
path_female_emotiondir.append(path)
elif emotion == "OAF_Fear" or emotion == "YAF_fear":
female_emotion_1.append("fear")
path = os.path.join(root + f)
path_female_emotiondir.append(path)
elif emotion == "OAF_happy" or emotion == "YAF_happy":
female_emotion_1.append("happy")
path = os.path.join(root + f)
path_female_emotiondir.append(path)
elif emotion == "OAF_Sad" or emotion == "YAF_sad":
female_emotion_1.append("sad")
path = os.path.join(root + f)
path_female_emotiondir.append(path)
elif emotion == "OAF_Pleasant_surprise" or emotion == "YAF_pleasant_surprised":
female_emotion_1.append("surprise")
path = os.path.join(root + f)
path_female_emotiondir.append(path)
elif emotion == "OAF_neutral" or emotion == "YAF_neutral":
female_emotion_1.append("neutral")
path = os.path.join(root + f)
path_female_emotiondir.append(path)
else:
female_emotion_1.append("Wrong emotion")
path = os.path.join(root + f)
path_female_emotiondir.append(path)
female_emotion[:50]
female_emotion_1[:50]
emotion = male_emotion + male_emotion_1 + female_emotion + female_emotion_1
# **Label DataFrame**
# Label dataframe
emotion_df = pd.DataFrame(emotion, columns=["Labels"])
print(emotion_df.shape)
emotion_df.head(10)
# # **Get Feature Data(From audio wave file-by adding data augmentation techniques)**
# # Process of getting male feature data
def log_spectrum(data):
spectrogram = librosa.feature.melspectrogram(
sample, sr=sample_rate, n_mels=128, fmax=8000
)
db_spec = librosa.power_to_db(spectrogram)
# temporally average spectrogram
log_spectrogram = np.mean(db_spec, axis=0)
return log_spectrogram
def noise(data):
noise_amp = 0.04 * np.random.uniform() * np.amax(data)
data = data + noise_amp * np.random.normal(size=data.shape[0])
return data
def stretch(data, rate=0.70):
return librosa.effects.time_stretch(data, rate)
def shift(data):
shift_range = int(np.random.uniform(low=-5, high=5) * 1000)
return np.roll(data, shift_range)
def pitch(data, sampling_rate, pitch_factor=0.8):
return librosa.effects.pitch_shift(data, sampling_rate, pitch_factor)
def higher_speed(data, speed_factor=1.25):
return librosa.effects.time_stretch(data, speed_factor)
def lower_speed(data, speed_factor=0.75):
return librosa.effects.time_stretch(data, speed_factor)
data, sr = librosa.load(
"../input/ravdess-emotional-song-audio/audio_song_actors_01-24/Actor_17/03-02-03-01-01-01-17.wav",
res_type="kaiser_fast",
)
plt.figure(figsize=(10, 3))
librosa.display.waveplot(y=data, sr=sr)
ipd.Audio(
"../input/ravdess-emotional-song-audio/audio_song_actors_01-24/Actor_17/03-02-03-01-01-01-17.wav"
)
data, sr = librosa.load(
"../input/ravdess-emotional-song-audio/audio_song_actors_01-24/Actor_17/03-02-02-02-01-02-17.wav",
res_type="kaiser_fast",
)
plt.figure(figsize=(10, 3))
librosa.display.waveplot(y=data, sr=sr)
ipd.Audio(
"../input/ravdess-emotional-song-audio/audio_song_actors_01-24/Actor_17/03-02-02-02-01-02-17.wav"
)
sample_rate = 22050
df = pd.DataFrame(columns=["feature"])
count = 0
for root, dirnames, filename in os.walk(
"../input/ravdess-emotional-song-audio/audio_song_actors_01-24/"
):
wave_file = glob.glob(root + "/*.wav")
if wave_file in male_song:
for a_file in tqdm(wave_file):
sample, sample_rate = librosa.load(
a_file, sr=sample_rate, res_type="kaiser_fast"
)
male_log_spec = log_spectrum(sample)
time_stretched = stretch(sample)
time_stretching = log_spectrum(time_stretched)
shifted = shift(sample)
shifting = log_spectrum(shifted)
pitch_scale = pitch(sample, sample_rate)
pitch_scaling = log_spectrum(pitch_scale)
high_speed = higher_speed(sample)
high_speeding = log_spectrum(high_speed)
low_speed = lower_speed(sample)
low_speeding = log_spectrum(low_speed)
df.loc[count] = [
male_log_spec
+ time_stretching
+ shifting
+ pitch_scaling
+ high_speeding
+ low_speeding
]
count = count + 1
gc.collect()
df
sample_rate = 22050
df1 = pd.DataFrame(columns=["feature"])
count1 = 0
for a_file in tqdm(path_male_emotiondir):
sample, sample_rate = librosa.load(a_file, sr=sample_rate, res_type="kaiser_fast")
male_log_spec_1 = log_spectrum(sample)
time_stretched = stretch(sample)
time_stretching_1 = log_spectrum(time_stretched)
shifted = shift(sample)
shifting_1 = log_spectrum(shifted)
pitch_scale = pitch(sample, sample_rate)
pitch_scaling_1 = log_spectrum(pitch_scale)
high_speed = higher_speed(sample)
high_speeding_1 = log_spectrum(high_speed)
low_speed = lower_speed(sample)
low_speeding_1 = log_spectrum(low_speed)
df1.loc[count1] = [
male_log_spec_1
+ time_stretching_1
+ shifting_1
+ pitch_scaling_1
+ high_speeding_1
+ low_speeding_1
]
count1 = count1 + 1
gc.collect()
df1
# # Process of getting female feature data
data, sr = librosa.load(
"../input/ravdess-emotional-song-audio/audio_song_actors_01-24/Actor_02/03-02-05-02-01-01-02.wav",
res_type="kaiser_fast",
)
plt.figure(figsize=(10, 3))
librosa.display.waveplot(y=data, sr=sr)
ipd.Audio(
"../input/ravdess-emotional-song-audio/audio_song_actors_01-24/Actor_02/03-02-05-02-01-01-02.wav"
)
sample_rate = 22050
df2 = pd.DataFrame(columns=["feature"])
count2 = 0
for root, dirnames, filename in os.walk(
"../input/ravdess-emotional-song-audio/audio_song_actors_01-24/"
):
wave_file = glob.glob(root + "/*.wav")
if wave_file in female_song:
for a_file in tqdm(wave_file):
sample, sample_rate = librosa.load(
a_file, sr=sample_rate, res_type="kaiser_fast"
)
female_log_spec = log_spectrum(sample)
time_stretched = stretch(sample)
time_stretching_2 = log_spectrum(time_stretched)
shifted = shift(sample)
shifting_2 = log_spectrum(shifted)
pitch_scale = pitch(sample, sample_rate)
pitch_scaling_2 = log_spectrum(pitch_scale)
high_speed = higher_speed(sample)
high_speeding_2 = log_spectrum(high_speed)
low_speed = lower_speed(sample)
low_speeding_2 = log_spectrum(low_speed)
df2.loc[count2] = [
female_log_spec
+ time_stretching_2
+ shifting_2
+ pitch_scaling_2
+ high_speeding_2
+ low_speeding_2
]
count2 = count2 + 1
gc.collect()
df2
path_emotiondir = []
for root, dirnames, filenames in os.walk(
"../input/toronto-emotional-speech-set-tess/TESS Toronto emotional speech set data/"
):
path_emotiondir.extend(glob.glob(root + "/*.wav"))
print(path_emotiondir[0:5])
sample_rate = 22050
df3 = pd.DataFrame(columns=["feature"])
count3 = 0
for a_file in tqdm(path_emotiondir):
sample, sample_rate = librosa.load(a_file, sr=sample_rate, res_type="kaiser_fast")
female_log_spec_1 = log_spectrum(sample)
time_stretched = stretch(sample)
time_stretching_3 = log_spectrum(time_stretched)
shifted = shift(sample)
shifting_3 = log_spectrum(shifted)
pitch_scale = pitch(sample, sample_rate)
pitch_scaling_3 = log_spectrum(pitch_scale)
high_speed = higher_speed(sample)
high_speeding_3 = log_spectrum(high_speed)
low_speed = lower_speed(sample)
low_speeding_3 = log_spectrum(low_speed)
df3.loc[count3] = [
female_log_spec_1
+ time_stretching_3
+ shifting_3
+ pitch_scaling_3
+ high_speeding_3
+ low_speeding_3
]
count3 = count3 + 1
gc.collect()
df3
frames = [df, df1, df2, df3]
new_df = pd.concat(frames)
new_df
# **feature Dataframe**
# feature Dataframe
new_data = pd.DataFrame(new_df["feature"].values.tolist())
new_data
# # **Merge feature and target variable & Preprocessing for model creation**
# merge feature and target variable
audio_data = pd.concat([emotion_df, new_data], axis=1)
audio_data
audio_data = shuffle(audio_data)
audio_data = audio_data.fillna(0)
audio_data
# # **Preprocessing for model creation**
le = LabelEncoder()
emotions_and_encoded_val = pd.DataFrame()
emotions_and_encoded_val["Emotions"] = audio_data["Labels"].unique()
emotions_and_encoded_val["Encoded value"] = le.fit_transform(
emotions_and_encoded_val["Emotions"]
)
emotions_and_encoded_val
n_class = len(emotions_and_encoded_val["Encoded value"])
n_class
# target variable- label and one hot encoding
Y = tf.keras.utils.to_categorical(le.fit_transform(audio_data["Labels"]))
Y.shape
np.unique(Y)
# feature Variable
X = audio_data.iloc[:, 1:].values
X
print(X.shape, Y.shape)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=5)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
X_train = np.expand_dims(X_train, axis=2)
X_test = np.expand_dims(X_test, axis=2)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# ## Model Creation
def model(input_shape, num_classes):
inputs = tf.keras.layers.Input(shape=input_shape, name="input")
x = Conv1D(64, 9, activation="relu", padding="same")(inputs)
x = Conv1D(64, 9, activation="relu", padding="same")(x)
x = MaxPooling1D(pool_size=2)(x)
# x= Dropout(0.2)(x)
x = Conv1D(128, 9, activation="relu", padding="same")(x)
x = Conv1D(128, 9, activation="relu", padding="same")(x)
x = MaxPooling1D(pool_size=2)(x)
# x= Dropout(0.2)(x)
x = Conv1D(256, 9, activation="relu", padding="same")(x)
x = Conv1D(256, 9, activation="relu", padding="same")(x)
x = MaxPooling1D(pool_size=2)(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(128, activation="relu")(x)
outputs = Dense(num_classes, activation="softmax", name="output")(x)
return models.Model(inputs=inputs, outputs=outputs)
model = model((X_train.shape[1], 1), n_class)
model.summary()
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history = model.fit(X, Y, epochs=40, validation_data=(X_test, y_test))
model.save("emotion_recognition.h5")
import os
os.chdir(r"../working")
from IPython.display import FileLink
FileLink(r"emotion_recognition.h5")
def show_graphs(history):
epochs = [i for i in range(40)]
fig, ax = plt.subplots(1, 2)
train_acc = history.history["accuracy"]
train_loss = history.history["loss"]
test_acc = history.history["val_accuracy"]
test_loss = history.history["val_loss"]
fig.set_size_inches(30, 12)
ax[0].plot(epochs, train_loss, label="Training Loss")
ax[0].plot(epochs, test_loss, label="Testing Loss")
ax[0].set_title("Training & Testing Loss")
ax[0].legend()
ax[0].set_xlabel("Epochs")
ax[1].plot(epochs, train_acc, label="Training Accuracy")
ax[1].plot(epochs, test_acc, label="Testing Accuracy")
ax[1].set_title("Training & Testing Accuracy")
ax[1].legend()
ax[1].set_xlabel("Epochs")
plt.show()
show_graphs(history)
# **Prediction Of Song**
from tensorflow import keras
model = keras.models.load_model("emotion_recognition.h5")
song = os.listdir("../input/song-data/")
song = wavfile
# convert .mp3 song into .wav
from pydub import AudioSegment
input = "../input/song-data/John-Mayer-I-Guess-I-Just-Feel-Like.mp3"
output = "./John-Mayer-I-Guess-I-Just-Feel-Like.wav"
sound = AudioSegment.from_mp3(input)
sound.export(output, format="wav")
testsong = "./John-Mayer-I-Guess-I-Just-Feel-Like.wav"
leng = []
from pydub.utils import make_chunks
from pydub import AudioSegment
audio = AudioSegment.from_file("./John-Mayer-I-Guess-I-Just-Feel-Like.wav", "wav")
chunk_length_ms = 7000 # pydub calculates in millisec
chunks = make_chunks(audio, chunk_length_ms) # Make chunks of one sec
# Export all of the individual chunks as wav files
for i, chunk in enumerate(chunks):
chunk_name = "chunk{0}.wav".format(i)
print("exporting", chunk_name)
chunk.export(chunk_name, format="wav")
leng.append(chunk_name)
filename = "chunk" + str(i) + ".wav"
print("Processing chunk " + str(i))
file = filename
print("Chunk File is", file)
song_sample, sample_rate = librosa.load(file, sr=22050, res_type="kaiser_fast")
print(song_sample.shape)
audio_spectrum = log_spectrum(song_sample)
print(audio_spectrum.shape)
sample_rate = 22050
test_df = pd.DataFrame(columns=["feature"])
count = 0
for i in leng:
sample, sample_rate = librosa.load(i, sr=sample_rate, res_type="kaiser_fast")
log_spec = log_spectrum(sample)
time_stretched = stretch(sample)
time_stretching = log_spectrum(time_stretched)
shifted = shift(sample)
shifting = log_spectrum(shifted)
pitch_scale = pitch(sample, sample_rate)
pitch_scaling = log_spectrum(pitch_scale)
high_speed = higher_speed(sample)
high_speeding = log_spectrum(high_speed)
low_speed = lower_speed(sample)
low_speeding = log_spectrum(low_speed)
test_df.loc[count] = [
log_spec
+ time_stretching
+ shifting
+ pitch_scaling
+ high_speeding
+ low_speeding
]
count = count + 1
type(test_df)
test_data = np.array(test_df)
test_data.shape
max_size_log = 308
def padded_log(Log_spec):
pad_log = []
for i in Log_spec:
app = (max_size_log - len(i)) * [0]
# print(app)
new_list = i.tolist() + app
pad_log.append(new_list)
arr = np.array([np.array(xi) for xi in pad_log])
return arr
test_data = padded_log(test_df["feature"])
type(test_data)
test = test_data.reshape(-1, 308, 1)
ans = model.predict(test)
ans
arr = ans.T
arr.shape
arr[0]
lst = []
for op in arr:
output = np.argmax(op)
print(type(output))
final_prediction = []
for i in ans:
i = i.tolist()
maxpos = i.index(max(i))
if maxpos == 0:
final_prediction.append("angry")
elif maxpos == 1:
final_prediction.append("disgust")
elif maxpos == 2:
final_prediction.append("fear")
elif maxpos == 3:
final_prediction.append("happy")
elif maxpos == 4:
final_prediction.append("neutral")
elif maxpos == 5:
final_prediction.append("sad")
elif maxpos == 6:
final_prediction.append("surprise")
len(final_prediction)
|
# # TABLE OF CONTENTS
# * [IMPORTS](#1)
# * [INTRODUCTION](#2)
# * [CONFIGURATION](#2.1)
# * [EXECUTIVE SUMMARY](#2.2)
# * [PREPROCESSING](#3)
# * [INFERENCES](#3.1)
# * [ADVERSARIAL CV](#4)
# * [INFERENCES](#4.1)
# * [EDA- VISUALS](#5)
# * [TARGET BALANCE](#5.1)
# * [PAIRPLOTS](#5.2)
# * [DISTRIUTION PLOTS](#5.3)
# * [INFERENCES](#5.4)
# * [UNIVARIATE FEATURE IMPORTANCE](#6)
# * [INFERENCES](#6.1)
# * [DATA TRANSFORMS](#7)
# * [MODEL TRAINING- BASELINE](#8)
# * [ENSEMBLE](#9)
# * [OUTRO](#10)
# # IMPORTS
#
# General library imports:-
import pandas as pd
import numpy as np
from scipy.stats import mode, kstest, normaltest, shapiro, anderson, jarque_bera
from collections import Counter
from itertools import product
from termcolor import colored
from colorama import Fore, Style, init
from warnings import filterwarnings
filterwarnings("ignore")
from tqdm.notebook import tqdm
from IPython.display import clear_output
import seaborn as sns
import matplotlib.pyplot as plt
from gc import collect
from pprint import pprint
pd.set_option("display.max_columns", 50)
pd.set_option("display.max_rows", 50)
# Setting rc parameters in seaborn for plots and graphs-
# Reference - https://matplotlib.org/stable/tutorials/introductory/customizing.html:-
# To alter this, refer to matplotlib.rcParams.keys()
sns.set(
{
"axes.facecolor": "#ffffff",
"figure.facecolor": "#ffffff",
"axes.edgecolor": "#000000",
"grid.color": "#ffffff",
"font.family": ["Cambria"],
"axes.labelcolor": "#000000",
"xtick.color": "#000000",
"ytick.color": "#000000",
"grid.linewidth": 0.90,
"grid.linestyle": "--",
"axes.titlecolor": "tab:blue",
"axes.titlesize": 10,
"axes.labelweight": "bold",
"legend.fontsize": 7.0,
"legend.title_fontsize": 7.0,
"font.size": 8.0,
"xtick.labelsize": 7.5,
"ytick.labelsize": 7.5,
}
)
print()
collect()
# Importing model and pipeline specifics:-
# Pipeline specifics:-
from sklearn.preprocessing import RobustScaler, MinMaxScaler, StandardScaler
from sklearn.model_selection import (
RepeatedStratifiedKFold as RSKF,
StratifiedKFold as SKF,
KFold,
RepeatedKFold as RKF,
cross_val_score,
)
from sklearn.inspection import permutation_importance, PartialDependenceDisplay as PDD
from sklearn.feature_selection import mutual_info_classif
from sklearn.inspection import permutation_importance
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
# ML Model training:-
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from xgboost import XGBClassifier, XGBRegressor
from lightgbm import LGBMClassifier, LGBMRegressor
from catboost import CatBoostClassifier, CatBoostRegressor
from sklearn.ensemble import (
ExtraTreesClassifier as ETC,
RandomForestClassifier as RFC,
RandomForestRegressor as RFR,
ExtraTreesRegressor as ETR,
)
print()
collect()
#
# # INTRODUCTION
# | VersionNumber | Version Details | Best CV score| Public LB score|
# | :-: | --- | :-: | :-: |
# | **V1** |* Extensive EDA with appropriate configuration class* No scaling* No extra features* Baseline XGB, XGBR, RFC, ETR, ETC, Logistic models* Simple ensemble with average |0.809739 |0.85866 |
# | **V2** |* Better feature choices* 10 ML models with better plots and hand tuning* Weighted average ensemble |0.822832 | 0.85006|
# | **V3** |* Better feature choices- refer AmbrosM's post* 11 ML models with better plots and hand tuning* Weighted average ensemble with selected models |0.821121 | 0.85200|
# | **V4** |* Configuration class description* Slight adjustment of features (secondary features)* Partial dependency plots in model training || |
# ## CONFIGURATION PARAMETERS
#
# |Section | Parameter | Description | Intended values |
# |--- | :-: | --- | :-:|
# |Data-preparation | gpu_switch | Turns the GPU ON/ OFF- here it os OFF as the data is too small| OFF/ON|
# |Data-preparation | state | Random seed integer | integer value|
# |Data-preparation | adv_cv_req | Checks if adversarial CV is needed | (Y/N)|
# |Data-preparation | ftre_plots_req | Checks if plots and visuals are needed in EDA | (Y/N)|
# |Data-preparation | ftre_imp_req | Checks if plots and visuals are needed for feature importance after transforms | (Y/N)|
# |Data-transforms | conjoin_orig_data | Checks if original data needs to be appended to training data | (Y/N)|
# |Data-transforms | sec_ftre_req | Checks if we need any secondary feature |(Y/N)|
# |Data-transforms | scale_req | Checks if we need any scaling method |(Y/N)|
# |Data-transforms | scl_method | Puts up a scaling method - **keep a value here even if scale_req == N**|(Robust/ Z/ MinMax) |
# |Model training | ML | Checks if ML models (except for GAM) are needed, for EDA only, keep this as N| (Y/N)|
# |Model training | n_splits | Provides the number of data splits in CV strategy|integer value, usually between 3 and 15|
# |Model training | n_repeats | Provides the number of data repeats in CV strategy| integer value|
# |Model training | nbrnd_erly_stp | Provides the number of early-stopping rounds in ensemble tree models to reduce overfitting| integer value|
# |Model training | prtldepplot_req | Plots partial dependency plots from model estimator on training| (Y/N)|
#
# Configuration class:-
class CFG:
"Configuration class for parameters and CV strategy for tuning and training"
# Data preparation:-
version_nb = 4
gpu_switch = "OFF"
state = 42
target = "target"
episode = 12
path = f"/kaggle/input/playground-series-s3e{episode}/"
orig_path = f"/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
adv_cv_req = "Y"
ftre_plots_req = "Y"
ftre_imp_req = "Y"
# Data transforms and scaling:-
conjoin_orig_data = "N"
sec_ftre_req = "Y"
scale_req = "Y"
scl_method = "Robust"
# Model Training:-
ML = "Y"
n_splits = 10
n_repeats = 5
nbrnd_erly_stp = 50
prtldepplot_req = "Y"
# Global variables for plotting:-
grid_specs = {
"visible": True,
"which": "both",
"linestyle": "--",
"color": "lightgrey",
"linewidth": 0.75,
}
title_specs = {"fontsize": 9, "fontweight": "bold", "color": "tab:blue"}
# Color printing
def PrintColor(text: str, color=Fore.BLUE, style=Style.BRIGHT):
"Prints color outputs using colorama using a text F-string"
print(colored(style + color + text + Style.RESET_ALL))
# Scaler to be used for continuous columns:-
all_scalers = {
"Robust": RobustScaler(),
"Z": StandardScaler(),
"MinMax": MinMaxScaler(),
}
scaler = all_scalers.get(CFG.scl_method)
# Commonly used CV strategies for later usage:-
all_cv = {
"KF": KFold(n_splits=CFG.n_splits, shuffle=True, random_state=CFG.state),
"RKF": RKF(n_splits=CFG.n_splits, n_repeats=CFG.n_repeats, random_state=CFG.state),
"RSKF": RSKF(
n_splits=CFG.n_splits, n_repeats=CFG.n_repeats, random_state=CFG.state
),
"SKF": SKF(n_splits=CFG.n_splits, shuffle=True, random_state=CFG.state),
}
print()
PrintColor(f"--> Configuration done!")
collect()
#
# ## EXECUTIVE SUMMARY
# This notebook is starter for the **Playground Series 3- Episode 12**. This is a binary classifier from a synthetic dataset created from the llink below
# https://www.kaggle.com/datasets/vuppalaadithyasairam/kidney-stone-prediction-based-on-urine-analysis
# The evaluation metric is **ROC-AUC**
# **Column description**
#
# | Column Name | Description |
# | :-: | --- |
# | specific gravity | Density of materials in the urine |
# | pH | Acidity of urine |
# | osmolarity | Molecule concentration |
# | conductivity | Concentration of charged ions in the sample |
# | urea concentration | Concentration of urea in milli-moles/ litre |
# | calcium concentration | Concentration of calcium in milli-moles/ litre |
# | **target** | Binary target variable |
# # PREPROCESSING
#
PrintColor(f"\n---------- Data Preprocessing ---------- \n", color=Fore.MAGENTA)
# Reading the train-test datasets:-
train = pd.read_csv(CFG.path + f"train.csv")
test = pd.read_csv(CFG.path + f"test.csv")
original = pd.read_csv(CFG.orig_path)
original.insert(0, "id", range(len(original)))
original["id"] = original["id"] + test["id"].max() + 1
train["Source"], test["Source"], original["Source"] = (
"Competition",
"Competition",
"Original",
)
PrintColor(
f"\nData shapes- [train, test, original]-- {train.shape} {test.shape} {original.shape}\n"
)
# Creating dataset information:
PrintColor(f"\nTrain information\n")
display(train.info())
PrintColor(f"\nTest information\n")
display(test.info())
PrintColor(f"\nOriginal data information\n")
display(original.info())
print()
# Displaying column description:-
PrintColor(f"\nTrain description\n")
display(
train.describe(percentiles=[0.05, 0.25, 0.50, 0.75, 0.9, 0.95, 0.99])
.transpose()
.drop(columns=["count"], errors="ignore")
.drop([CFG.target], axis=0, errors="ignore")
.style.format(precision=2)
)
PrintColor(f"\nTest description\n")
display(
test.describe(percentiles=[0.05, 0.25, 0.50, 0.75, 0.9, 0.95, 0.99])
.transpose()
.drop(columns=["count"], errors="ignore")
.drop([CFG.target], axis=0, errors="ignore")
.style.format(precision=2)
)
PrintColor(f"\nOriginal description\n")
display(
original.describe(percentiles=[0.05, 0.25, 0.50, 0.75, 0.9, 0.95, 0.99])
.transpose()
.drop(columns=["count"], errors="ignore")
.drop([CFG.target], axis=0, errors="ignore")
.style.format(precision=2)
)
# Collating the column information:-
strt_ftre = test.columns
PrintColor(f"\nStarting columns\n")
display(strt_ftre)
# Creating a copy of the datasets for further use:-
train_copy, test_copy, orig_copy = (
train.copy(deep=True),
test.copy(deep=True),
original.copy(deep=True),
)
# Dislaying the unique values across train-test-original:-
PrintColor(f"\nUnique values\n")
_ = pd.concat([train.nunique(), test.nunique(), original.nunique()], axis=1)
_.columns = ["Train", "Test", "Original"]
display(_.style.background_gradient(cmap="Blues").format(formatter="{:,.0f}"))
# Normality check:-
cols = list(strt_ftre[1:-1])
PrintColor(f"\nShapiro Wilk normality test analysis\n")
pprint(
{
col: [
np.round(shapiro(train[col]).pvalue, decimals=4),
np.round(shapiro(test[col]).pvalue, 4) if col != CFG.target else np.NaN,
np.round(shapiro(original[col]).pvalue, 4),
]
for col in cols
},
indent=5,
width=100,
depth=2,
compact=True,
)
PrintColor(f"\nNormal-test normality test analysis\n")
pprint(
{
col: [
np.round(normaltest(train[col]).pvalue, decimals=4),
np.round(normaltest(test[col]).pvalue, 4) if col != CFG.target else np.NaN,
np.round(normaltest(original[col]).pvalue, 4),
]
for col in cols
},
indent=5,
width=100,
depth=2,
compact=True,
)
PrintColor(f"\nK-S normality test analysis\n")
pprint(
{
col: [
np.round(kstest(train[col], cdf="norm").pvalue, decimals=4),
np.round(kstest(test[col], cdf="norm").pvalue, 4)
if col != CFG.target
else np.NaN,
np.round(kstest(original[col], cdf="norm").pvalue, 4),
]
for col in cols
},
indent=5,
width=100,
depth=2,
compact=True,
)
print()
#
# ## INFERENCES
# 1. Train and original data appear to have few outliers
# 2. Some columns are close to being normally distributed
# 3. We don't have any nulls in the data at all
# 4. We have a completely numeric dataset
#
# ### **Side-note -- Interpreting normality tests:-**
# 1. We are using Shapiro-Wilk, NormalTest and 1-sample Kolmogorov Smirnov tests for normality with the p-value evaluator
# 2. p-value illustrates the area under the tail region of the statistics test. Here, we may ensue the below-
# a. Null hypothesis- Data is non-normal
# b. Alternative hypothesis- Data is normally distributed
# c. p-value illustrates the tail area. If p-value is lower than the determined threshold, we reject the null hypothesis.
# d. Herewith, our p-values are usually lesser than 5%, a common threshold for statistical tests throughout. In some cases, the p-value crosses the 5% threshold too.
# e. Wherever the p-value is more than 5%, we cannot reject the null hypothesis (we have insufficient evidence to reject the null hypothesis). We can infer that the data in such cases is normally distributed.
# # ADVERSARIAL CV
#
#
# Performing adversarial CV between the 2 specified datasets:-
def Do_AdvCV(df1: pd.DataFrame, df2: pd.DataFrame, source1: str, source2: str):
"This function performs an adversarial CV between the 2 provided datasets if needed by the user"
# Adversarial CV per column:-
ftre = train.drop(columns=["id", CFG.target, "Source"], errors="ignore").columns
adv_cv = {}
for col in ftre:
PrintColor(f"---> Current feature = {col}", style=Style.NORMAL)
shuffle_state = np.random.randint(low=10, high=100, size=1)
full_df = pd.concat(
[df1[[col]].assign(Source=source1), df2[[col]].assign(Source=source2)],
axis=0,
ignore_index=True,
).sample(frac=1.00, random_state=shuffle_state)
full_df = full_df.assign(
Source_Nb=full_df["Source"].eq(source2).astype(np.int8)
)
# Checking for adversarial CV:-
model = LGBMClassifier(random_state=CFG.state, max_depth=3, learning_rate=0.05)
cv = all_cv["RSKF"]
score = np.mean(
cross_val_score(
model, full_df[[col]], full_df.Source_Nb, scoring="roc_auc", cv=cv
)
)
adv_cv.update({col: round(score, 4)})
collect()
del ftre
PrintColor(f"\nResults\n")
pprint(adv_cv, indent=5, width=20, depth=1)
collect()
fig, ax = plt.subplots(1, 1, figsize=(12, 5))
pd.Series(adv_cv).plot.bar(color="tab:blue", ax=ax)
ax.axhline(y=0.60, color="red", linewidth=2.75)
ax.grid(**CFG.grid_specs)
plt.yticks(np.arange(0.0, 0.81, 0.05))
plt.show()
# Implementing the adversarial CV:-
if CFG.adv_cv_req == "Y":
PrintColor(
f"\n---------- Adversarial CV - Train vs Original ----------\n",
color=Fore.MAGENTA,
)
Do_AdvCV(df1=train, df2=original, source1="Train", source2="Original")
PrintColor(
f"\n---------- Adversarial CV - Train vs Test ----------\n", color=Fore.MAGENTA
)
Do_AdvCV(df1=train, df2=test, source1="Train", source2="Test")
PrintColor(
f"\n---------- Adversarial CV - Original vs Test ----------\n",
color=Fore.MAGENTA,
)
Do_AdvCV(df1=train, df2=test, source1="Original", source2="Test")
if CFG.adv_cv_req == "N":
PrintColor(f"\nAdversarial CV is not needed\n", color=Fore.RED)
collect()
print()
#
# ## INFERENCES
# 1. We need to investigate the train-original distribution as the adversarial GINI is quite different from 50%
# 2. Train-test belong to the same distribution, we can perhaps rely on the CV score
# # EDA AND VISUALS
# ## TARGET BALANCE
#
if CFG.ftre_plots_req == "Y":
fig, axes = plt.subplots(
1, 2, figsize=(12, 5), sharey=True, gridspec_kw={"wspace": 0.25}
)
for i, df in tqdm(enumerate([train, original]), "Target balance ---> "):
ax = axes[i]
a = df[CFG.target].value_counts(normalize=True)
_ = ax.pie(
x=a,
labels=a.index.values,
explode=[0.0, 0.25],
startangle=30,
shadow=True,
colors=["#004d99", "#ac7339"],
textprops={"fontsize": 8, "fontweight": "bold", "color": "white"},
pctdistance=0.50,
autopct="%1.2f%%",
)
df_name = "Train" if i == 0 else "Original"
_ = ax.set_title(f"\n{df_name} data- target\n", **CFG.title_specs)
plt.tight_layout()
plt.show()
collect()
print()
#
# ## PAIR-PLOTS
#
if CFG.ftre_plots_req == "Y":
PrintColor(f"\nTrain data- pair plots\n")
_ = sns.pairplot(
data=train.drop(columns=["id", "Source", CFG.target], errors="ignore"),
diag_kind="kde",
markers="o",
plot_kws={"color": "tab:blue"},
)
print()
collect()
if CFG.ftre_plots_req == "Y":
PrintColor(f"\nOriginal data- pair plots\n")
_ = sns.pairplot(
data=original.drop(columns=["id", "Source", CFG.target], errors="ignore"),
diag_kind="kde",
markers="o",
plot_kws={"color": "tab:blue"},
)
print()
collect()
#
# ## DISTRIBUTION PLOTS
#
# Violin plots for numeric columns:-
if CFG.ftre_plots_req == "Y":
PrintColor(f"\nDistribution plots- numerical columns\n")
num_cols = strt_ftre[1:-1]
fig, axes = plt.subplots(
2,
len(num_cols),
figsize=(36, 16),
gridspec_kw={"wspace": 0.2, "hspace": 0.25},
sharex=True,
)
for i, col in enumerate(num_cols):
ax = axes[0, i]
sns.violinplot(data=train[col], linewidth=2.5, color="#0073e6", ax=ax)
ax.set_title(f"\n{col}_Train\n", **CFG.title_specs)
ax.grid(**CFG.grid_specs)
ax = axes[1, i]
sns.violinplot(data=original[col], linewidth=2.5, color="#004d4d", ax=ax)
ax.set_title(f"\n{col}_Original\n", **CFG.title_specs)
ax.grid(**CFG.grid_specs)
plt.tight_layout()
plt.show()
del num_cols
collect()
print()
# Distribution analysis by target:-
def AnalyzeDist(df: pd.DataFrame):
"Plots the KDE plot by the binary target"
fig, axes = plt.subplots(
2, 3, figsize=(18, 7.5), gridspec_kw={"wspace": 0.2, "hspace": 0.3}
)
for i, col in enumerate(strt_ftre[1:-1]):
ax = axes[i // 3, i % 3]
sns.kdeplot(
data=df[[col, CFG.target]],
x=col,
hue=CFG.target,
palette=["#005c99", "#e63900"],
shade=False,
linewidth=2.50,
ax=ax,
)
ax.set_title(f"\n{col}\n", **CFG.title_specs)
ax.grid(**CFG.grid_specs)
ax.set(xlabel="", ylabel="")
plt.tight_layout()
plt.show()
collect()
# Implementing the feature plots:-
if CFG.ftre_plots_req.upper() == "Y":
PrintColor(f"\nTrain features\n")
AnalyzeDist(df=train)
print()
PrintColor(f"\nOriginal features\n")
AnalyzeDist(df=original)
elif CFG.ftre_plots_req.upper() != "Y":
PrintColor(f"\nFeature plots are not needed\n", color=Fore.RED)
collect()
print()
# Distribution analysis by dataset:-
full_df = pd.concat(
[
train[strt_ftre[1:-1]].assign(Source="Train"),
test[strt_ftre[1:-1]].assign(Source="Test"),
original[strt_ftre[1:-1]].assign(Source="Original"),
],
ignore_index=True,
)
fig, axes = plt.subplots(
2, 3, figsize=(18, 8), gridspec_kw={"wspace": 0.25, "hspace": 0.30}
)
for i, col in enumerate(strt_ftre[1:-1]):
ax = axes[i // 3, i % 3]
sns.kdeplot(
data=full_df[["Source", col]],
x=col,
hue="Source",
palette=["#006bb3", "#e63900", "#00cc44"],
shade=None,
ax=ax,
linewidth=2.50,
)
ax.set_title(f"\n{col}\n", **CFG.title_specs)
ax.set(xlabel="", ylabel="")
ax.grid(**CFG.grid_specs)
plt.suptitle(
f"\nFeature distribution analysis across datasets\n",
fontsize=12,
color="#005266",
fontweight="bold",
)
plt.tight_layout()
plt.show()
print()
collect()
#
# ## INFERENCES
# 1. Target is balanced, no need to use imbalanced techniques
# 2. Meak feature interactions are seen across some columns, linear interactions are seen in the original data
# 3. pH is an important column, where the target values do not cause a significant change of distribution
# # UNIVARIATE FEATURE IMPORTANCE
#
# **We will do a leave-one-out analysis and singular feature analysis to check the strength of relationship with the target**
Unv_Prf_Sum = pd.DataFrame(
data=None, columns=["Unv_Clsf", "LOO"], index=strt_ftre[1:-1]
)
model = LGBMClassifier(
random_state=CFG.state,
max_depth=3,
learning_rate=0.085,
num_leaves=80,
)
for i, ftre in tqdm(enumerate(strt_ftre[1:-1]), "Univariate Analysis ---- "):
# Initiating single feature relationship analysis:-
score = cross_val_score(
model,
train[[ftre]],
train[CFG.target],
cv=all_cv["RSKF"],
scoring="roc_auc",
n_jobs=-1,
verbose=0,
)
Unv_Prf_Sum.loc[ftre, "Unv_Clsf"] = np.mean(score)
del score
# Initiating LOO:-
cols = [col for col in strt_ftre[1:-1] if col != ftre]
score = cross_val_score(
model,
train[cols],
train[CFG.target],
cv=all_cv["RSKF"],
scoring="roc_auc",
n_jobs=-1,
verbose=0,
)
Unv_Prf_Sum.loc[ftre, "LOO"] = np.mean(score)
del score, cols
collect()
# Plotting the feature analysis:-
fig, axes = plt.subplots(
1, 2, figsize=(13, 4.5), sharey=True, gridspec_kw={"hspace": 0.4}
)
for i, col in enumerate(Unv_Prf_Sum.columns):
ax = axes[i]
Unv_Prf_Sum.loc[:, col].plot.bar(color="#0059b3", ax=ax)
ax.set_title(f"{col}", **CFG.title_specs)
ax.grid(**CFG.grid_specs)
plt.yticks(np.arange(0.0, 0.9, 0.05))
plt.suptitle(
f"Univariate performance\n", color="#005266", fontsize=12, fontweight="bold"
)
plt.tight_layout()
plt.show()
collect()
print()
#
# ## INFERENCES
# 1. **Calc** seems like the most important column
# 2. **pH** seems like the lowest important column
# # DATA TRANSFORMS
#
# We will incorporate some inputs from -
# 1. https://www.kaggle.com/code/oscarm524/ps-s3-ep12-eda-modelinghttps://www.kaggle.com/code/oscarm524/ps-s3-ep12-eda-modeling
# 2. https://www.kaggle.com/competitions/playground-series-s3e12/discussion/400152https://www.kaggle.com/competitions/playground-series-s3e12/discussion/400152
# Making secondary features:-
class SecFtreMaker(BaseEstimator, TransformerMixin):
"Makes encoded features and other secondary features for the data transformation step"
def __init__(self):
pass
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X, y=None, **transform_params):
df = X.copy(deep=True)
if CFG.sec_ftre_req == "Y":
df["calc"] = df["calc"].clip(None, 8.00)
df["gravity"] = df.gravity.clip(None, 1.03)
df["Cond_Gvty_Rt"] = df["cond"] / df["gravity"]
df["Urea_Osmo"] = df["urea"] * df["osmo"]
df["Calc_Gvty"] = df["calc"] * df["gravity"]
df["Osmo_Gvty"] = df["osmo"] * df["gravity"]
df["Calc_Urea_Rt"] = df["calc"] / df["urea"]
df["Calc_Gvty_Rt"] = df["calc"] / df["gravity"]
df["RF_gravity"] = np.where(df["gravity"] >= 1.030, 1, 0).astype(np.int8)
df["RF_osmo"] = np.where(df["osmo"] >= 1200, 1, 0).astype(np.int8)
df["RF_calc"] = np.where(df["calc"] >= 7.50, 1, 0).astype(np.int8)
df["RF_total"] = df["RF_gravity"] + df["RF_osmo"] + df["RF_calc"]
df["Sq_cond"] = df["cond"] ** 2
df["Sq_calc"] = df["calc"] ** 2
df["Cond_Calc"] = df["cond"] * df["calc"]
df["Cond_Calc_Rt"] = df["cond"] / df["calc"]
df["Cond_Calc_Tot"] = df["cond"] + df["calc"]
df["Sq_Cond_Calc_Dif"] = (df["cond"] - df["calc"]) ** 2
self.op_cols = df.columns
return df
def get_feature_names_in(self, X, y):
return X.columns
def get_feature_names_out(self, X, y):
return self.op_cols
# Scaling the data if needed:-
class DataScaler(BaseEstimator, TransformerMixin):
"Scales the data columns based on the method specified"
def __init__(self):
pass
def fit(self, X, y=None, **fit_params):
"Calculates the metrics for scaling"
self.scl_cols = [
col
for col in X.drop(
columns=[CFG.target, "Source", "id"], errors="ignore"
).columns
if col.startswith("RF") == False
]
df = X[self.scl_cols]
self.mu = df.mean().values
self.std = df.std().values
self.M = df.max().values
self.m = df.min().values
self.q1 = np.percentile(df, axis=0, q=25)
self.q3 = np.percentile(df, axis=0, q=75)
self.IQR = self.q3 - self.q1
self.q2 = np.percentile(df, axis=0, q=50)
return self
def transform(self, X, y=None, **transform_params):
"Scales the data according to the method chosen"
df = X.copy()
if CFG.scale_req == "Y" and CFG.scl_method == "Robust":
df[self.scl_cols] = (df[self.scl_cols].values - self.q2) / self.IQR
elif CFG.scale_req == "Y" and CFG.scl_method == "Z":
df[self.scl_cols] = (df[self.scl_cols].values - self.mu) / self.IQR
elif CFG.scale_req == "Y" and CFG.scl_method == "MinMax":
df[self.scl_cols] = (df[self.scl_cols].values - self.m) / (self.M - self.m)
else:
PrintColor(f"--> Scaling is not needed", color=Fore.RED)
self.op_cols = df.columns
return df
def get_feature_names_in(self, X, y):
return X.columns
def get_feature_names_out(self, X, y):
return self.op_cols
print()
collect()
# Implementing the transform pipeline:-
PrintColor(
f"\n---------- Data transformation pipeline ----------\n", color=Fore.MAGENTA
)
PrintColor(
f"--> Shape before transform (train, test, original) = {train.shape} {test.shape} {original.shape}"
)
if CFG.conjoin_orig_data == "Y":
train = pd.concat([train, original], axis=0, ignore_index=True)
PrintColor(f"--> Shape after adding original data (train) = {train.shape}")
else:
PrintColor(f"--> Original data is not needed", color=Fore.RED)
train = train.drop_duplicates()
PrintColor(f"--> Shape after removing duplicates = {train.shape}")
ytrain = train[CFG.target]
xform = Pipeline(steps=[("Xform", SecFtreMaker()), ("S", DataScaler())])
xform.fit(train.drop(CFG.target, axis=1, errors="ignore"), ytrain)
Xtrain = xform.transform(train.drop(CFG.target, axis=1, errors="ignore"))
Xtest = xform.transform(test)
PrintColor(
f"--> Shape after transform (Xtrain, test, ytrain) = {Xtrain.shape} {test.shape} {ytrain.shape}"
)
PrintColor(f"\n--> Data after transform\n")
display(Xtrain.head(5).style.format(precision=2))
print("\n\n")
display(Xtest.head(5).style.format(precision=2))
collect()
print()
if CFG.ftre_imp_req.upper() == "Y":
fig, axes = plt.subplots(
3,
2,
figsize=(25, 28),
sharex=True,
gridspec_kw={"wspace": 0.2, "hspace": 0.25, "height_ratios": [0.6, 0.35, 0.4]},
)
# Train- feature correlations:-
corr_ = Xtrain.iloc[:, 1:].corr()
ax = axes[0, 0]
sns.heatmap(
data=corr_,
cmap="Blues",
linewidth=1.8,
linecolor="white",
annot=True,
fmt=".2f",
annot_kws={"fontsize": 7, "fontweight": "bold"},
mask=np.triu(np.ones_like(corr_)),
cbar=None,
ax=ax,
)
ax.set_title(f"\nTrain Correlations\n", **CFG.title_specs)
# Test-feature correlations:-
ax = axes[0, 1]
corr_ = Xtest.iloc[:, 1:].corr()
sns.heatmap(
data=corr_,
cmap="Blues",
linewidth=1.8,
linecolor="white",
annot=True,
fmt=".2f",
annot_kws={"fontsize": 7, "fontweight": "bold"},
mask=np.triu(np.ones_like(corr_)),
cbar=None,
ax=ax,
)
ax.set_title(f"\nTest Correlations\n", **CFG.title_specs)
# Target- feature correlations:-
ax = axes[1, 0]
corr_ = (
pd.concat([Xtrain, ytrain], axis=1)
.corr()[CFG.target]
.drop([CFG.target, "id"], axis=0)
)
corr_.plot.bar(ax=ax, color="tab:blue")
ax.set_title(f"\nTarget Correlations\n", **CFG.title_specs)
ax.grid(**CFG.grid_specs)
ax.set_yticks(np.arange(-1.0, 1.01, 0.10))
# Mutual information:-
ax = axes[1, 1]
pd.Series(
data=mutual_info_classif(
Xtrain.drop(["id", "Source"], axis=1, errors="ignore"), ytrain
),
index=Xtrain.drop(["id", "Source"], axis=1, errors="ignore").columns,
).plot.bar(ax=ax, color="tab:blue")
ax.set_title(f"\nMutual information\n", **CFG.title_specs)
ax.grid(**CFG.grid_specs)
# Permutation importance:-
ax = axes[2, 0]
model = LGBMClassifier(random_state=CFG.state)
model.fit(Xtrain.drop(["id", "Source"], axis=1, errors="ignore"), ytrain)
pd.Series(
data=np.mean(
permutation_importance(
model,
Xtrain.drop(["id", "Source"], axis=1, errors="ignore"),
ytrain,
scoring="neg_log_loss",
n_repeats=10,
n_jobs=-1,
random_state=CFG.state,
).get("importances"),
axis=1,
),
index=Xtrain.drop(["id", "Source"], axis=1, errors="ignore").columns,
).plot.bar(color="tab:blue", ax=ax)
ax.set_title(f"\nPermutation Importance\n", **CFG.title_specs)
ax.grid(**CFG.grid_specs)
# Univariate classification:-
ax = axes[2, 1]
all_cols = Xtrain.drop(["id", "Source"], axis=1, errors="ignore").columns
scores = []
for col in all_cols:
model = LGBMClassifier(
random_state=CFG.state, max_depth=4, learning_rate=0.85, num_leaves=90
)
score = cross_val_score(
model,
Xtrain[[col]],
ytrain,
scoring="roc_auc",
cv=all_cv["SKF"],
n_jobs=-1,
verbose=0,
)
scores.append(np.mean(score))
pd.Series(scores, index=all_cols).plot.bar(color="tab:blue", ax=ax)
ax.set_title(f"\nUnivariate classification\n", **CFG.title_specs)
ax.grid(**CFG.grid_specs)
ax.set_yticks(np.arange(0.0, 0.96, 0.05))
plt.tight_layout()
plt.show()
del corr_, score, scores, model, all_cols
else:
PrintColor(f"\nFeature importance plots are not required\n", color=Fore.RED)
PrintColor(f"\nAll transformed features after data pipeline---> \n")
display(Xtest.columns)
print()
collect()
#
# # BASELINE MODELS
#
# **Key notes**
# 1. Complex models are unlikely to work here, so we stick to the simplest ones
# 2. We will tune minimal parameters as we are unsure of the private data
# 3. We will not build models for the public LB, it has 56-57 samples and is unrealible
# Initializing baseline parameters:-
Mdl_Master = {
"XGB": XGBClassifier(
**{
"objective": "binary:logistic",
"tree_method": "gpu_hist" if CFG.gpu_switch == "ON" else "hist",
"eval_metric": "auc",
"random_state": CFG.state,
"colsample_bytree": 0.95,
"learning_rate": 0.095,
"min_child_weight": 3,
"max_depth": 4,
"n_estimators": 1200,
"reg_lambda": 4.5,
"reg_alpha": 4.0,
}
),
"XGBR": XGBRegressor(
**{
"colsample_bytree": 0.95,
"learning_rate": 0.035,
"max_depth": 3,
"min_child_weight": 11,
"n_estimators": 1000,
"objective": "reg:squarederror",
"tree_method": "gpu_hist" if CFG.gpu_switch == "ON" else "hist",
"eval_metric": "rmse",
"random_state": CFG.state,
"reg_lambda": 0.25,
"reg_alpha": 5.5,
}
),
"RFC": RFC(
n_estimators=100,
max_depth=3,
min_samples_leaf=3,
min_samples_split=13,
random_state=CFG.state,
),
"RFR": RFR(
n_estimators=200,
max_depth=3,
min_samples_leaf=10,
min_samples_split=14,
random_state=CFG.state,
),
"ETR": ETR(
n_estimators=180,
max_depth=3,
min_samples_leaf=4,
min_samples_split=12,
random_state=CFG.state,
),
"ETC": ETR(
n_estimators=140,
max_depth=3,
min_samples_leaf=4,
min_samples_split=14,
random_state=CFG.state,
),
"LREG": LogisticRegression(
max_iter=5000,
penalty="l2",
solver="saga",
C=2.5,
random_state=CFG.state,
tol=0.001,
),
"LGBM": LGBMClassifier(
random_state=CFG.state,
max_depth=3,
learning_rate=0.075,
num_leaves=45,
min_child_samples=3,
reg_alpha=3.5,
reg_lambda=8.5,
metric="auc",
objective="binary",
n_estimators=1000,
),
"LGBMR": LGBMRegressor(
random_state=CFG.state,
max_depth=3,
num_leaves=80,
learning_rate=0.065,
reg_alpha=0.5,
reg_lambda=5.5,
metric="rmse",
objective="regression",
min_child_samples=10,
n_estimators=1000,
),
"CB": CatBoostClassifier(
iterations=1000,
max_depth=4,
eval_metric="AUC",
random_strength=0.6,
min_data_in_leaf=4,
learning_rate=0.08,
verbose=0,
l2_leaf_reg=5.5,
bagging_temperature=1.6,
),
"CBR": CatBoostRegressor(
iterations=1000,
max_depth=3,
eval_metric="RMSE",
loss_function="RMSE",
random_strength=0.5,
min_data_in_leaf=5,
learning_rate=0.065,
verbose=0,
l2_leaf_reg=1.25,
bagging_temperature=0.75,
od_wait=7,
random_seed=CFG.state,
),
"SVC": SVC(
random_state=CFG.state, C=5.5, kernel="rbf", probability=True, tol=0.0001
),
}
# Shortlisted model features:-
sel_ftre = ["calc", "Cond_Calc_Rt", "Cond_Calc", "Source"]
cols = Mdl_Master.keys()
Mdl_Preds = pd.DataFrame(
index=test.id, columns=cols, data=np.zeros((len(test.id), len(cols)))
)
OOF_Preds = pd.DataFrame(
index=train.id, columns=cols, data=np.zeros((len(train.id), len(cols)))
)
Scores = pd.DataFrame(columns=cols)
FtreImp = pd.DataFrame(
columns=cols, index=[col for col in sel_ftre if col not in ["Source"]]
)
cv = all_cv["RSKF"]
print()
# Making the partial dependence plot:-
def MakePrtlDepPlot(model, method, ftre, X):
"Makes the partial dependence plot if necessary"
fig, axes = plt.subplots(
1,
len(ftre),
figsize=(len(ftre) * 6, 3.0),
sharey=True,
gridspec_kw={"wspace": 0.15, "hspace": 0.25},
)
plt.suptitle(
f"\n{method}- partial dependence\n",
y=1.0,
color="tab:blue",
fontsize=8.5,
fontweight="bold",
)
PDD.from_estimator(
model,
X[ftre],
ftre,
pd_line_kw={"color": "#0047b3", "linewidth": 1.50},
ice_lines_kw={"color": "#ccffff"},
kind="both",
ax=axes.ravel()[: len(ftre)],
random_state=CFG.state,
)
for i, ax in enumerate(axes.ravel()[: len(ftre)]):
ax.set(ylabel="", xlabel=ftre[i], title="")
ax.grid(**CFG.grid_specs)
plt.tight_layout(h_pad=0.3, w_pad=0.5)
plt.show()
collect()
print()
collect()
# Training the ML models:-
def TrainMdl(method: str):
global Mdl_Master, Mdl_Preds, OOF_Preds, cv, Scores, FtreImp, Xtrain, ytrain, Xtest, sel_ftre
model = Mdl_Master.get(method)
X, y, Xt = Xtrain[sel_ftre], ytrain, Xtest[sel_ftre]
ftre = X.drop(
["id", "Source", CFG.target, "Label"], axis=1, errors="ignore"
).columns
# Initializing I-O for the given seed:-
scores = []
oof_preds = pd.DataFrame()
test_preds = 0
ftre_imp = 0
PrintColor(f"--------------------- {method.upper()} model ---------------------")
for fold_nb, (train_idx, dev_idx) in enumerate(cv.split(X, y)):
Xtr = X.iloc[train_idx].drop(columns=["id", "Source", "Label"], errors="ignore")
Xdev = (
X.iloc[dev_idx]
.loc[X.Source == "Competition"]
.drop(columns=["id", "Source", "Label"], errors="ignore")
)
ytr = y.loc[y.index.isin(Xtr.index)]
ydev = y.loc[y.index.isin(Xdev.index)]
if method.upper() in ["XGB", "LGBM", "CB", "CBC", "XGBR", "LGBMR", "CBR"]:
model.fit(
Xtr,
ytr,
eval_set=[(Xdev, ydev)],
verbose=0,
early_stopping_rounds=CFG.nbrnd_erly_stp,
)
else:
model.fit(Xtr, ytr)
# Collecting predictions and scores:-
if method in ["XGB", "LGBM", "CB", "CBC", "RFC", "GBC", "LREG", "SVC"]:
dev_preds = np.clip(model.predict_proba(Xdev)[:, 1], a_max=1.0, a_min=0.0)
t_preds = np.clip(
model.predict_proba(
Xt.drop(columns=["id", "Source", "Label"], errors="ignore")
)[:, 1],
a_max=1.0,
a_min=0.0,
)
else:
dev_preds = np.clip(model.predict(Xdev), a_max=1.0, a_min=0.0)
t_preds = np.clip(
model.predict(
Xt.drop(columns=["id", "Source", "Label"], errors="ignore")
),
a_max=1.0,
a_min=0.0,
)
score = roc_auc_score(ydev, dev_preds)
Scores.loc[fold_nb, method] = np.round(score, 6)
scores.append(score)
oof_preds = pd.concat(
[
oof_preds,
pd.DataFrame(index=Xdev.index, data=dev_preds, columns=[f"{method}"]),
],
axis=0,
ignore_index=False,
)
test_preds = test_preds + t_preds / (CFG.n_splits * CFG.n_repeats)
if method not in ["LASSO", "RIDGE", "LREG", "SVC", "SVR"]:
ftre_imp += model.feature_importances_ / (CFG.n_splits * CFG.n_repeats)
# Collating results:-
mean_score = np.mean(scores)
print(
Style.BRIGHT
+ Fore.BLUE
+ f"Mean CV score = "
+ f"{' '* 2}"
+ Fore.YELLOW
+ Style.BRIGHT
+ f"{mean_score:.5f}"
)
oof_preds = pd.DataFrame(oof_preds.groupby(level=0)[f"{method}"].mean())
oof_preds.columns = [f"{method}"]
OOF_Preds[f"{method}"] = (
OOF_Preds[f"{method}"].values.flatten() + oof_preds.values.flatten()
)
Mdl_Preds[f"{method}"] = Mdl_Preds[f"{method}"].values.flatten() + test_preds
FtreImp[method] = ftre_imp
# Plotting the partial dependence plot:-
if CFG.prtldepplot_req == "Y":
MakePrtlDepPlot(model, method, ftre, X)
collect()
print()
collect()
print()
PrintColor(f"Training model suite:-")
display(cols)
print("\n\n")
# Implementing the training functions:-
if CFG.ML.upper() == "Y":
for method in tqdm(cols, "ML training --- "):
TrainMdl(method=method)
if CFG.ML.upper() != "Y":
PrintColor(f"\nML models are not required\n", color=Fore.RED)
if CFG.ML == "Y":
fig, axes = plt.subplots(1, 2, figsize=(25, 6))
# Plotting the mean CV scores on a scattergram:-
ax = axes[0]
sns.scatterplot(
x=Scores.mean(),
y=Scores.columns,
color="blue",
markers=True,
s=360,
marker="o",
ax=ax,
)
ax.set_title(f"\nMean CV scores across all ML models trained\n", **CFG.title_specs)
ax.grid(**CFG.grid_specs)
ax = axes[1]
sns.violinplot(
Scores,
palette="pastel",
linewidth=1.75,
inner="point",
saturation=0.999,
width=0.4,
ax=ax,
)
ax.set_title(f"\nMetric distribution across folds\n", **CFG.title_specs)
ax.grid(**CFG.grid_specs)
ax.set_yticks(np.arange(0.50, 1.01, 0.02))
plt.tight_layout()
plt.show()
print()
# Plotting feature importance:-
n_cols = int(np.ceil(len(cols) / 3))
fig, axes = plt.subplots(
3,
n_cols,
figsize=(20, len(cols) * 0.75),
sharey=True,
gridspec_kw={"wspace": 0.2, "hspace": 0.35},
)
plt.suptitle(f"\nFeature importance across all models", **CFG.title_specs)
for i, method in enumerate(FtreImp.columns):
ax = axes[i // n_cols, i % n_cols]
FtreImp[method].plot.barh(ax=ax, color="#0086b3")
ax.set_title(f"{method}", **CFG.title_specs)
ax.grid(**CFG.grid_specs)
plt.tight_layout()
plt.show()
PrintColor(f"\n\nPredictions and OOF results after training\n")
display(OOF_Preds.head(5).style.format(precision=2))
print()
display(Mdl_Preds.head(5).style.format(precision=2))
else:
PrintColor(f"\nPlots are not required as models are not trained\n", color=Fore.RED)
print()
collect()
#
# # ENSEMBLE
#
if CFG.ML == "Y":
sub_fl = pd.read_csv(CFG.path + f"sample_submission.csv")
sub_fl[CFG.target] = np.clip(
(
Mdl_Preds["CB"] * 0.00
+ Mdl_Preds["ETC"] * 1.00
+ Mdl_Preds["ETR"] * 0.00
+ Mdl_Preds["RFC"] * 0.00
+ Mdl_Preds["LGBM"] * 0.00
+ Mdl_Preds["XGB"] * 0.00
+ Mdl_Preds["XGBR"] * 0.00
+ Mdl_Preds["RFR"] * 0.00
+ Mdl_Preds["LGBMR"] * 0.00
+ Mdl_Preds["LREG"] * 0.00
+ Mdl_Preds["SVC"] * 0.00
+ Mdl_Preds["CBR"] * 0.00
).values,
a_min=0.0001,
a_max=0.9999,
)
sub_fl.to_csv(f"EnsSub_{CFG.version_nb}.csv", index=None)
display(sub_fl.head(5).style.format(precision=3))
Mdl_Preds.to_csv(f"Mdl_Preds_{CFG.version_nb}.csv")
OOF_Preds.to_csv(f"OOF_Preds_{CFG.version_nb}.csv")
PrintColor(f"\nMean scores across methods\n")
display(
pd.concat([Scores.mean(), Scores.std()], axis=1)
.rename({0: "Mean_CV", 1: "Std_CV"}, axis=1)
.sort_values(["Mean_CV", "Std_CV"], ascending=[False, True])
.style.bar(color="#b3ecff")
.format(formatter="{:.4%}")
)
else:
PrintColor(
f"\nNo need to save anything as we are not training any models\n",
color=Fore.RED,
)
collect()
print()
|
import math
import re
import string
from random import randint
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from nltk.corpus import stopwords
from scipy.stats import zscore
from sklearn.preprocessing import MinMaxScaler
from wordcloud import STOPWORDS, WordCloud
# Loading dataset
df = pd.read_csv("/kaggle/input/medium-articles/articles.csv")
df.sample(5)
df.info()
# No missing data
# Helper functions, just for extra touch 🍷
# Formatter to format anything
class CustomFormatter:
def __init__(self):
pass
# convert number to K
# eg. 1,000 to 1K
# can't think of any better name for this func
@staticmethod
def format_likes_number_to_str(number):
rounded_num = round(number / 1000, 2)
frac, whole = math.modf(rounded_num)
frac = round(frac, 2) if frac != 0 else 0
return f"{int(whole) + frac}K"
print(CustomFormatter.format_likes_number_to_str(5000))
print(CustomFormatter.format_likes_number_to_str(5200))
# # Data Preparation
# Convert claps dtype from str to int
def convert_clap_dtype(clap_str):
if "K" not in clap_str:
# 32
return int(clap_str)
if "K" in clap_str:
# 32K & 3.2K
return int(float(clap_str.split("K")[0]) * 1000)
print(f"🌊 Anomaly: {clap_str}")
return clap_str
df.claps = df.claps.apply(convert_clap_dtype)
df.claps.values[:10].tolist()
# Creating a domain column
def extract_domain(link):
return link.split("https://")[1].split("/")[0]
df["domain"] = df.link.apply(extract_domain)
df.domain.values[:10].tolist()
# Remove puncuation from word
def rm_punc_from_word(word):
clean_alphabet_list = [
alphabet for alphabet in word if alphabet not in string.punctuation
]
return "".join(clean_alphabet_list)
print(rm_punc_from_word("#cool!"))
# Remove puncuation from text
def rm_punc_from_text(text):
clean_word_list = [rm_punc_from_word(word) for word in text]
return "".join(clean_word_list)
print(rm_punc_from_text("Frankly, my dear, I don't give a damn"))
# Remove numbers from text
def rm_number_from_text(text):
text = re.sub("[0-9]+", "", text)
return " ".join(text.split()) # to rm `extra` white space
print(rm_number_from_text("You are 100times more sexier than me"))
print(
rm_number_from_text(
"If you taught yes then you are 10 times more delusional than me"
)
)
# Remove stopwords from text
def rm_stopwords_from_text(text):
_stopwords = stopwords.words("english")
text = text.split()
word_list = [word for word in text if word not in _stopwords]
return " ".join(word_list)
rm_stopwords_from_text("Love means never having to say you're sorry")
# Cleaning text
def clean_text(text):
text = text.lower()
text = rm_punc_from_text(text)
text = rm_number_from_text(text)
text = rm_stopwords_from_text(text)
# there are hyphen(–) in many titles, so replacing it with empty str
# this hyphen(–) is different from normal hyphen(-)
text = re.sub("–", "", text)
text = " ".join(text.split()) # removing `extra` white spaces
return text
clean_text("Mrs. Robinson, you're trying to seduce me, aren't you?")
df.text = df.text.apply(clean_text)
df.title = df.title.apply(clean_text)
df.title.values[:10].tolist()
# Getting articles length
def get_article_len(text):
return len(text)
df["article_length"] = df.text.apply(get_article_len)
df.article_length.values[:10].tolist()
# # Exploratory Data Analysis
df.columns.tolist()
# Distribution of claps in our data
def display_histplot_for_claps(df, claps_threshold=2_000):
claps_threshold_str = CustomFormatter.format_likes_number_to_str(claps_threshold)
f, axs = plt.subplots(1, 2, figsize=(16, 4))
sns.histplot(x=df.claps, kde=False, ax=axs[0])
sns.histplot(x=df[df.claps <= claps_threshold].claps, kde=False, ax=axs[1])
axs[0].set_xlabel("Distribution of all the claps")
axs[1].set_xlabel(f"Distribution of claps (<= {claps_threshold_str})")
# percentage of claps less than equal to claps_threshold
pct_of_clap = round(len(df[df.claps <= claps_threshold]) / len(df), 2) * 100
print(
f" {pct_of_clap}% of articles have less than eqaul to {claps_threshold_str} 👏 claps"
)
display_histplot_for_claps(df)
# The above distribution plots shows that there are some outliers in claps column
sns.boxplot(x=df.claps)
# The claps greater than 15K are the outliers as they are not included in the box
# of other observation i.e no where near the quartiles
# Detecting, correcting & removing outliers more ℹ️ [info](https://towardsdatascience.com/ways-to-detect-and-remove-the-outliers-404d16608dba)
# ### Removing outliers using Z score ###
# getting zscores of all the claps
claps_zscores = np.abs(zscore(df.claps))
# keeping the threshold of 3 (above which a clap will be an outlier)
# instead of 3, -3 can also be kept as threshold & in this case claps below -3 will be an outlier
clap_outliers_row_idx = np.where(claps_zscores > 3)[0].tolist()
# removing outliers
df.drop(clap_outliers_row_idx, axis="rows", inplace=True)
sns.boxplot(x=df.claps)
# ### Removing outliers using IQR ###
claps_q1 = df.claps.quantile(0.25)
claps_q3 = df.claps.quantile(0.75)
iqr = claps_q3 - claps_q1
print(f"IQR for claps: {iqr}")
clap_outliers_row_idx = df.claps[
(df.claps < (claps_q1 - 1.5 * iqr)) | (df.claps > (claps_q3 + 1.5 * iqr))
].index.tolist()
# removing outliers
df.drop(clap_outliers_row_idx, axis="rows", inplace=True)
sns.boxplot(x=df.claps)
# Helper functions to remove outliers
# Using IQR method
def rm_outliers_in_col_using_iqr(df, col, inplace=False):
Q1 = col.quantile(0.25)
Q3 = col.quantile(0.75)
IQR = Q3 - Q1
print(f"IQR: {IQR}")
outliers_row_idx = col[
(col < (Q1 - 1.5 * IQR)) | (col > (Q3 + 1.5 * IQR))
].index.tolist()
return df.drop(outliers_row_idx, axis="rows", inplace=inplace)
# Using the Zscore method
def rm_outliers_in_col_using_zscore(df, col, inplace=False, threshold=3):
zscores = np.abs(zscore(col))
outliers_row_idx = np.where(zscores > threshold)[0].tolist()
return df.drop(outliers_row_idx, axis="rows", inplace=inplace)
# removing remaining outliers
for _ in range(10):
rm_outliers_in_col_using_iqr(df, df.claps, inplace=True)
sns.boxplot(x=df.claps)
# removing the outliers for claps column multiple time reason `maybe` that
# majority of the claps are less 3K and the outliers were spread very far
# distribution of reading_time in our data
def display_histplot_for_reading_time(df):
sns.histplot(
x=df.reading_time,
kde=False,
bins=range(df.reading_time.max()),
color="#e61e64",
alpha=0.5,
)
avg_reading_time = round(df.reading_time.mean(), 2)
print(f"The average reading ⏰ time of an article is {avg_reading_time}mins")
display_histplot_for_reading_time(df)
sns.boxplot(x=df.reading_time)
# removing outliers in reading_time column
rm_outliers_in_col_using_iqr(df, df.reading_time, inplace=True)
sns.boxplot(x=df.reading_time)
def display_claps_and_reading_time(df):
f, axs = plt.subplots(1, 2, figsize=(16, 4))
sns.scatterplot(
x="claps",
y="reading_time",
hue="article_length",
data=df,
palette="mako",
s=80,
ax=axs[0],
)
sns.histplot(x="claps", y="reading_time", data=df, palette="mako", ax=axs[1])
display_claps_and_reading_time(df)
# Articles whose reading_time is more than 12.5mins won't get much claps
df[["claps", "reading_time"]].corr() # pearson corr == 0.28...
# claps & reading_time have a negligible correlation i.e. they are not correlated
# Color funcs to use different colours for wordcloud text
def wc_blue_color_func(
word, font_size, position, orientation, random_state=None, **kwargs
):
return "hsl(214, 67%%, %d%%)" % randint(60, 100)
def wc_grey_color_func(
word, font_size, position, orientation, random_state=None, **kwargs
):
return "hsl(0, 0%%, %d%%)" % randint(60, 100)
def wc_green_color_func(
word, font_size, position, orientation, random_state=None, **kwargs
):
return "hsl(123, 34%%, %d%%)" % randint(50, 100)
def wc_red_color_func(
word, font_size, position, orientation, random_state=None, **kwargs
):
return "hsl(23, 54%%, %d%%)" % randint(50, 100)
# stopwords for wordcloud
def get_wc_stopwords():
wc_stopwords = set(STOPWORDS)
# Adding words to stopwords
# these words showed up while plotting wordcloud for text
wc_stopwords.add("s")
wc_stopwords.add("one")
wc_stopwords.add("using")
wc_stopwords.add("example")
wc_stopwords.add("work")
wc_stopwords.add("use")
wc_stopwords.add("make")
return wc_stopwords
# get title mega str (combined str of all titles)
def get_title_combined_str(df):
title_words = []
for title in df.title.values:
title_words.extend(title.split())
return " ".join(title_words)
# get text mega str (combined str of all text)
def get_text_combined_str(df):
text_words = []
for text in df.text.values:
text_words.extend(text.split())
return " ".join(text_words)
# plot wordcloud
def plot_wordcloud_for_title_and_text(
title_wc, text_wc, title_color_func, text_color_func
):
f, axs = plt.subplots(1, 2, figsize=(20, 10))
with sns.axes_style("ticks"):
sns.despine(offset=10, trim=True)
if not title_color_func:
# default color
axs[0].imshow(title_wc, interpolation="bilinear")
axs[0].set_xlabel("Title WordCloud")
else:
# customized color
axs[0].imshow(
title_wc.recolor(color_func=title_color_func, random_state=0),
interpolation="bilinear",
)
axs[0].set_xlabel("Title WordCloud")
if not title_color_func:
axs[1].imshow(text_wc, interpolation="bilinear")
axs[1].set_xlabel("Text WordCloud")
else:
axs[1].imshow(
text_wc.recolor(color_func=text_color_func, random_state=0),
interpolation="bilinear",
)
axs[1].set_xlabel("Text WordCloud")
# display wordcloud
def wordcloud_for_title_and_text(df, title_color_func=None, text_color_func=None):
# This str will be used to create wordclouds for title & text
title_str = get_title_combined_str(df)
text_str = get_text_combined_str(df)
wc_stopwords = get_wc_stopwords()
title_wc = WordCloud(
stopwords=wc_stopwords, width=800, height=400, random_state=0
).generate(title_str)
text_wc = WordCloud(
stopwords=wc_stopwords, width=800, height=400, random_state=0
).generate(text_str)
plot_wordcloud_for_title_and_text(
title_wc, text_wc, title_color_func, text_color_func
)
wordcloud_for_title_and_text(df, wc_blue_color_func, wc_grey_color_func)
# Helper function to work with text & title columns
# This class will help us to encapsulate info about a word
class WordInfo:
def __init__(self, word, domain, reading_time):
self.word = word
self.count = 1
self.reading_time = reading_time
self.domains = set() # domains in which it appeared
self.domains.add(domain)
def increment(self, domain, reading_time):
self.count += 1
self.domains.add(domain)
self.reading_time += reading_time
def info(self):
print(f"Word: {self.word}")
print(f"Count: {self.count}")
print(f"Domains: {list(self.domains)}")
print(f"Reading time: {self.reading_time}mins")
@staticmethod
def exists(word, dictionary):
return dictionary[word] if word in dictionary.keys() else False
@staticmethod
def increment_or_create(dictionary, word, domain, reading_time):
if word not in stopwords.words("english"):
obj = WordInfo.exists(word, dictionary)
if not obj:
dictionary[word] = WordInfo(word, domain, reading_time)
else:
obj.increment(domain, reading_time)
@staticmethod
def export_count_dict(word_dict):
_dict = {}
for wordinfo in list(word_dict.values()):
_dict[wordinfo.word] = wordinfo.count
return _dict
@staticmethod
def sort_dict_using_values(_dict):
# in-place sorting
words = np.array(list(_dict.keys()))
counts = np.array(list(_dict.values()))
sorted_idxs = counts.argsort()
sorted_counts = counts[sorted_idxs]
new_words_order = words[sorted_idxs]
# reversing the list (making it from ascending to decending)
_counts = list(reversed(sorted_counts))
_words = list(reversed(new_words_order))
return (_counts, _words)
@classmethod
def word_count_df(cls, _dict):
word_count_dict = cls.export_count_dict(_dict)
word_count_sorted = cls.sort_dict_using_values(word_count_dict)
word_count_df = pd.DataFrame(
{"words": word_count_sorted[1], "counts": word_count_sorted[0]}
)
return word_count_df
# example
# key - words: str
# value - object: WordInfo
WORD_DICT = {}
# To test/see how our WORD_DICT will look
for word in ["hello", "world", "python", "python", "tensorflow"]:
WordInfo.increment_or_create(WORD_DICT, word, "deeplearning.io", 24)
print(WORD_DICT)
for obj in WORD_DICT.values():
print()
obj.info()
def get_title_and_text_word_dict(df):
title_word_dict = {}
text_word_dict = {}
for domain, title, text, reading_time in df[
["domain", "title", "text", "reading_time"]
].values:
for word_in_title in title.split():
WordInfo.increment_or_create(
title_word_dict, word_in_title, domain, reading_time
)
for word_in_text in text.split():
WordInfo.increment_or_create(
text_word_dict, word_in_text, domain, reading_time
)
return (title_word_dict, text_word_dict)
title_word_dict, text_word_dict = get_title_and_text_word_dict(df)
title_word_dict["medium"].info()
print()
text_word_dict["medium"].info()
print()
title_word_dict["neural"].info()
print()
text_word_dict["neural"].info()
title_word_count_df = WordInfo.word_count_df(title_word_dict)
text_word_count_df = WordInfo.word_count_df(text_word_dict)
def display_word_count(df, top=5, bottom=5):
# df here is word_count_df
f, axs = plt.subplots(1, 2, figsize=(16, 4))
# most used words
sns.barplot(
x=df.head(top).words,
y=df.head(top).counts,
color="#473991",
alpha=0.9,
ax=axs[0],
)
# least used words
sns.barplot(
x=df.tail(bottom).words,
y=df.tail(bottom).counts,
color="#399188",
alpha=0.9,
ax=axs[1],
)
axs[0].set_xlabel("Words")
axs[0].set_ylabel("Counts")
axs[1].set_xlabel("Words")
axs[1].set_ylabel("Counts")
display_word_count(title_word_count_df)
display_word_count(text_word_count_df)
# top 100 articles with respect to claps
top_atricles_wrt_claps = df.sort_values(by="claps", ascending=False).iloc[:100]
top_atricles_wrt_claps.sample(5)
wordcloud_for_title_and_text(
top_atricles_wrt_claps, wc_green_color_func, wc_red_color_func
)
# Most clapped titles & articles includes AI topics
def get_words_count(text):
info = {} # {word: count}
for word in text.split():
if word in info.keys():
info[word] += 1
else:
info[word] = 1
return info
class AuthorInfo:
# this will contains author info
authors_df = pd.DataFrame(
{
"name": [],
"total_claps": [],
"avg_claps": [],
"total_reading_time": [],
"avg_reading_time": [],
}
)
# this will contain author name & domains
domains_df = pd.DataFrame({"authors": [], "domains": []})
# this will contain words used by authors & their count i.e. how much
words_df = pd.DataFrame(
{
"authors": [],
"words": [],
"counts": [],
"where": [], # title or text (where is the word used)
}
)
def __init__(self, author_name, author_df):
# add author info
AuthorInfo.authors_df = AuthorInfo.authors_df.append(
{
"name": author_name,
"total_claps": author_df.claps.sum(),
"avg_claps": author_df.claps.mean(),
"total_reading_time": author_df.reading_time.sum(),
"avg_reading_time": author_df.reading_time.mean(),
},
ignore_index=True,
)
# add author domains
for domain in author_df.domain.values:
AuthorInfo.domains_df = AuthorInfo.domains_df.append(
{"authors": author_name, "domains": domain}, ignore_index=True
)
# add word count
for title, text in author_df[["title", "text"]].values:
title_info = get_words_count(title)
text_info = get_words_count(text)
AuthorInfo.add_wordcount_using_dict(title_info, author_name, "title")
AuthorInfo.add_wordcount_using_dict(text_info, author_name, "text")
@classmethod
def add_wordcount_using_dict(cls, _dict, author_name, where):
for word, count in _dict.items():
cls.words_df = cls.words_df.append(
{
"authors": author_name,
"words": word,
"counts": count,
"where": where,
},
ignore_index=True,
)
@classmethod
def get_domains_using_author_name(cls, author_name):
return (
AuthorInfo.domains_df[AuthorInfo.domains_df.authors == author_name]
.domains.unique()
.tolist()
)
@classmethod
def get_wordcount_df(cls, author_name, where, ascending=False):
return cls.words_df[
# using ['where'] since where is a method of pd.Series
(cls.words_df.authors == author_name)
& (cls.words_df["where"] == where)
].sort_values(by="counts", ascending=ascending)
@classmethod
def reset_df(cls):
cls.authors_df = pd.DataFrame(
{
"name": [],
"total_claps": [],
"avg_claps": [],
"total_reading_time": [],
"avg_reading_time": [],
}
)
cls.domains_df = pd.DataFrame({"authors": [], "domains": []})
cls.words_df = pd.DataFrame(
{"authors": [], "words": [], "counts": [], "where": []}
)
for author, author_df in top_atricles_wrt_claps.groupby(by="author"):
AuthorInfo(author, author_df)
AuthorInfo.domains_df.head()
AuthorInfo.words_df.head()
AuthorInfo.get_wordcount_df("Adam Geitgey", "title").head(10)
# the words column in words_df of AuthorInfo has word appeared in a title (or text) & the counts column in word_df of AuthorInfo has number of times the word appeared in a title (or text). So because of that their might be duplicate words in the words columns
# But since the counts of some duplicates are same so it might hint that there are some duplicate rows in df
# no duplicates
print(f"Number of duplicate rows: {len(df[df.duplicated()])}")
# checking duplication in author name, title text
print(
f"Number of duplicate rows: {len(df[df[['author', 'title', 'text']].duplicated()])}"
)
# checking where these duplicates differentiate from each other
print(
f"Number of duplicate rows: {len(df[df[['author', 'title', 'text', 'claps']].duplicated()])}"
)
print(
f"Number of duplicate rows: {len(df[df[['author', 'title', 'text', 'reading_time']].duplicated()])}"
)
print(
f"Number of duplicate rows: {len(df[df[['author', 'title', 'text', 'link']].duplicated()])}"
)
# so `link` is the column that differentiate duplicates
# duplicate rows
print(f"Number of duplicate titles: {len(df[df[['title']].duplicated()])}")
print(f"Number of duplicate texts: {len(df[df[['text']].duplicated()])}")
def get_duplicate_dfs(df, group_by, how_many=1):
dfs = []
# considering duplicates on the basis of title & text columns & then grouping them by author
author_grp = df[df.duplicated(["title", "text"])].groupby(by=group_by)
for idx, (author, author_df) in enumerate(author_grp):
if idx <= how_many:
dfs.append(author_df)
else:
return dfs
# the `duplicated` method on df by default returns all the duplicates `except the first`
duplicate_sample_df = get_duplicate_dfs(df, group_by="author", how_many=5)
def print_links(df):
for link in df.link.values.tolist():
print(link)
print_links(duplicate_sample_df[0])
duplicate_sample_df[0]
print_links(duplicate_sample_df[1])
duplicate_sample_df[1]
# dropping all the duplicates except for the first occurence
# since `link` column has all unique values even for the duplicates
# therefore removing the duplicate rows on the basis of author, claps, title & text
df.drop_duplicates(
["author", "claps", "title", "text"], ignore_index=True, inplace=True
)
len(df) # remaining rows
# After this `catastrophic` event we can re-run all of the analysis to correct all of the misinterpretation happened due to these duplicate rows
wordcloud_for_title_and_text(df, wc_blue_color_func, wc_grey_color_func)
title_word_dict, text_word_dict = get_title_and_text_word_dict(df)
title_word_count_df = WordInfo.word_count_df(title_word_dict)
text_word_count_df = WordInfo.word_count_df(text_word_dict)
display_word_count(title_word_count_df)
display_word_count(text_word_count_df)
df[["claps", "reading_time"]].corr()
# the corr increased from 0.28 to 0.32, but it is still a low positive correlation
# so claps and reading_time have a very low positive correlation
display_histplot_for_reading_time(df)
# top 100 articles with respect to claps
top_atricles_wrt_claps = df.sort_values(by="claps", ascending=False).iloc[:100]
top_atricles_wrt_claps.sample(5)
wordcloud_for_title_and_text(
top_atricles_wrt_claps, wc_green_color_func, wc_red_color_func
)
# Resetting the author infos with data (with no duplicates)
AuthorInfo.reset_df()
for author, author_df in top_atricles_wrt_claps.groupby(by="author"):
AuthorInfo(author, author_df)
def display_avg_claps_and_avg_reading_time(df):
f, axs = plt.subplots(1, 2, figsize=(16, 4))
sns.scatterplot(
x="avg_claps", y="avg_reading_time", data=df, palette="mako", s=80, ax=axs[0]
)
sns.histplot(
x="avg_claps", y="avg_reading_time", data=df, palette="mako", ax=axs[1]
)
display_avg_claps_and_avg_reading_time(AuthorInfo.authors_df)
# Articles whose reading_time is more than 12mins won't get much claps
def get_top_words(author_name, words_df, where, top_words):
df = (
words_df[
(AuthorInfo.words_df.authors == author_name)
& (AuthorInfo.words_df["where"] == where)
]
.sort_values(by="counts", ascending=False)
.iloc[:top_words]
.values.tolist()
)
data = {}
for _, word, count, _ in df:
if word in list(data.keys()):
data[word] += count
else:
data[word] = count
return data
def get_top_authors_info(authors_df, sort_by, top=5, top_words=5):
top_author_df = authors_df.sort_values(by=sort_by, ascending=False).iloc[:top]
df = top_author_df[["name", "total_claps", "total_reading_time"]]
for author_name, total_claps, total_reading_time in df.values:
print(f"Author name: {author_name}")
print(f"Total claps: {total_claps}")
print(f"Total reading time: {total_reading_time}")
top_words_in_title = get_top_words(
author_name, AuthorInfo.words_df, "title", top_words
)
top_words_in_text = get_top_words(
author_name, AuthorInfo.words_df, "text", top_words
)
print(f"Top words used in title:")
for word, count in top_words_in_title.items():
print(f"\t{word} => {int(count)}x")
print(f"Top words used in text:")
for word, count in top_words_in_text.items():
print(f"\t{word} => {int(count)}x")
print()
# Top 5 authors info with respect to total claps
get_top_authors_info(AuthorInfo.authors_df, "total_claps")
# Top 5 authors info with respect to total reading_time
get_top_authors_info(AuthorInfo.authors_df, "total_reading_time")
|
# Disaster Tweets: A Study of Various Approaches to Text Classification
# Table of Contents
# * [Introduction](#introduction)
# * [First Look and Some Initial Thoughts](#firstlook)
# * [Preprocessing](#preprocessing)
# * [Taking Care of the keyword and location Columns](#taking)
# * [Implementing a Bag of Words Model from Scratch](#implementing)
# * [Various Vectorization Approaches using Scikit-learn](#various)
# * [Pre-trained GloVe Embeddings along with LSTM](#glove)
# * [Pre-trained BERT](#bert)
# * [Average Word2Vec using Gensim](#word2vec_average)
# * [Gensim Word2Vec with LSTM](#word2vec_lstm)
# * [Comparing All Approaches So Far](#comparing)
# * [Conclusion](#conclusion)
# * [Thank you](#thanks)
# Introduction
# In this notebook, we will be working with the Disaster Tweets competition dataset. We will go over numerous approaches, explain them along the way, and compare their results.
# Before the modelling, we will do preprocessing such as making the text all lower-case, removing stopwords if needed, etc.
# Approaches used in this notebooks include:
# -> A frequency-based bag-of-words model implemented from scratch.
# -> A binary bag of words model using scikit-learn with unigrams and bigrams.
# -> A frequency-based bag-of-words model using scikit-learn with unigrams.
# -> TFIDF-based vectorization using scikit-learn with unigrams.
# -> Pre-trained GloVe Embeddings along with LSTM
# -> Pre-trained BERT
# -> Training our own Word2Vec word embeddings using Gensim and then applying two methods: Averaging and Sequence-based (LSTM)
# We will compare the results obtained from every approach.
# First Look and Some Initial Thoughts
#
import numpy as np
import pandas as pd
import gc
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
train.head()
# -> There are 5 columns. The id column will not give us any information pertaining to our modelling requirements, so we will drop it.
# -> The keyword column will definitely be useful as some keywords will be more associated with a disaster tweet and some less so.
# -> The location column we'll have to analyse first to be sure of its use.
# -> The text column is our main feature through which we will make our target prediction.
# Preprocessing
# Let's make some imports that we will require for our preprocessing steps.
# -> We will make the entire text lower-case. This will allow us to reduce the dimensionality of our vectors and will allow us to require less amount of training data as well.
# -> We will remove urls.
# -> We will remove html syntax.
# -> We will remove twitter mentions. If we don't do this, our vector space can have very large number of dimensions as each mention will correspond to a new, unnecessary word in the vocabulary.
# -> We will remove punctiation.
# -> For our tfidf approach, we will also remove stopwords. These are words that are commonly present in each tweet and do not contribute much to depicting the target. One may argue that with tfidf, removal of stopwords is not as such necessary due to usage of inverse document frequency, but I also wish to reduce the dimensionality of the vectors.
# -> We will use stemming (in particular, nltk's PorterStemmer) to reduce the words to their root form. This will reduce dimensionality of our vectors and will allow us to require less training data.
import re
import string
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import warnings
warnings.filterwarnings("ignore")
def make_lower(text):
return text.lower()
def remove_urls(text):
url = r"https?://\S+|www\.\S+" # regex for urls beginning with http or https, and those beginning with www.
return re.sub(
url, "", text
) # We will replace matched urls with the empty string to remove them
def remove_html(text):
html = r"<.*?>"
return re.sub(html, "", text)
def remove_mentions(text):
mention = "@[A-Za-z0-9_]+"
return re.sub(mention, "", text)
def remove_punct(text):
table = str.maketrans("", "", string.punctuation)
return text.translate(table)
# I am not entirely sure how the above code works. But it's one of the more efficient solutions for removing punctuation that can be found on the internet
def remove_stopwords(text):
new_text = []
for word in text.split():
if word in stopwords.words("english"):
new_text.append("")
else:
new_text.append(word)
return " ".join(new_text)
# Here we are creating a new text. Whenever we encounter a word in our text that is a stopword, we don't add it to our new text. If the word is not a stopword, we
# add it to our new text.
porter = PorterStemmer()
def do_stemming(text):
new_text = [porter.stem(word) for word in text.split()]
return " ".join(new_text)
# We go over each word in our text and do stemmatization, and then add it to our new text
train["text"] = train["text"].apply(lambda text: make_lower(text))
train["text"] = train["text"].apply(lambda text: remove_urls(text))
train["text"] = train["text"].apply(lambda text: remove_html(text))
train["text"] = train["text"].apply(lambda text: remove_mentions(text))
train["text"] = train["text"].apply(lambda text: remove_punct(text))
train["text"] = train["text"].apply(lambda text: remove_stopwords(text))
train["text"] = train["text"].apply(lambda text: do_stemming(text))
# Let's look at our dataset now.
train.head()
# Taking Care of the keyword and location Columns
#
train["keyword"].value_counts().to_frame().reset_index().head().rename(
columns={"index": "Keyword", "keyword": "Frequency"}
)
# Keywords such as 'fatalities', 'damage', etc. would imply a higher chance of the tweet being a disaster tweet.
train.groupby("keyword")["target"].mean().to_frame().reset_index().sort_values(
by=["target"], ascending=[False]
).head(10).rename(columns={"keyword": "Keyword", "target": "Target Proportion"})
# The above keywords have high mean target values, suggesting that when they are there, there is a high chance of the tweet being a disaster tweets. I will take the top 25 such keywords and make a binary feature out of them. The feature would be 1 if the row's keyword is one of these high-chance keywords, else 0.
keywords_with_high_chance_of_disaster = (
train.groupby("keyword")["target"]
.mean()
.to_frame()
.reset_index()
.sort_values(by=["target"], ascending=[False])[:25]["keyword"]
.tolist()
)
train["keyword"].fillna("n", inplace=True)
train["keyword_high_chance"] = train["keyword"].apply(
lambda keyword: 1 if keyword in keywords_with_high_chance_of_disaster else 0
)
train.drop("keyword", axis=1, inplace=True)
train.head()
# Let's look at location.
train["location"].value_counts().to_frame().reset_index().rename(
columns={"index": "Location", "location": "Frequency"}
).head(10)
# The above shows the 10 most commonly occurring locations. Let me check their survival proportions.
high_freq_locs = (
train["location"]
.value_counts()
.to_frame()
.reset_index()
.rename(columns={"index": "Location", "location": "Frequency"})
.head(10)["Location"]
.tolist()
)
high_freq_locs
train[train["location"].isin(high_freq_locs)].groupby("location")[
"target"
].mean().to_frame().reset_index().sort_values(by=["target"], ascending=[False])
# Nigeria, India and Mumbai have a fairly high proportion of disaster tweets. I will create a binary feature out of them.
train["location_high_chance"] = train["location"].apply(
lambda location: 1 if location in ["Mumbai", "India", "Nigeria"] else 0
)
train.head()
train.drop("id", axis=1, inplace=True)
train.drop("location", axis=1, inplace=True)
results = {
"BOW_fromScratch": -1,
"BOW_binary_sklearn_unigram": -1,
"BOW_binary_sklearn_bigram": -1,
"BOW_frequency_sklearn_unigram": -1,
"Tfidf_sklearn_unigram": -1,
"GloVe": -1,
"BERT": -1,
"Word2Vec_average": -1,
"Word2Vec_lstm": -1,
}
# Implementing a Bag of Words Model from Scratch
#
# We will first create a word to index mapping.
word_to_index_mapping = {}
for text in train["text"]:
for word in text.split():
if word not in word_to_index_mapping:
word_to_index_mapping[word] = len(word_to_index_mapping)
# Displaying some word-index pairs
count = 0
for key, value in word_to_index_mapping.items():
print(key, ": ", value)
count += 1
if count == 10:
break
print("Vocabulary Size: ", len(word_to_index_mapping))
train.shape[0]
vectorized_text = np.zeros((train.shape[0], len(word_to_index_mapping)))
vectorized_text.shape
rowNumber = 0
for text in train["text"]:
for word in text.split():
index = word_to_index_mapping[word]
vectorized_text[rowNumber][index] += 1
rowNumber += 1
vectorized_text
vectorized_text_df = pd.DataFrame(vectorized_text)
vectorized_text_df.head()
train_vectorized_fromScratch = pd.concat([train, vectorized_text_df], axis=1)
train_vectorized_fromScratch.head()
train_vectorized_fromScratch.drop("text", axis=1, inplace=True)
train_vectorized_fromScratch.head()
# Let us now apply various ML models to this vectorized text data.
train_vectorized_fromScratch_X = train_vectorized_fromScratch.drop("target", axis=1)
train_vectorized_fromScratch_y = train_vectorized_fromScratch["target"]
model_results_fromScratch = {"KNN": -1, "RandomForest": -1, "LogisticRegression": -1}
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
knn_param_grid = {"n_neighbors": [3, 5]}
cv_knn = GridSearchCV(
estimator=KNeighborsClassifier(), param_grid=knn_param_grid, verbose=10
)
cv_knn.fit(train_vectorized_fromScratch_X, train_vectorized_fromScratch_y)
cv_knn.best_score_
cv_knn.best_params_
model_results_fromScratch["KNN"] = cv_knn.best_score_
# rf_param_grid = {'n_estimators': [20, 40, 60], 'max_depth': [5, 7, 10], 'min_samples_leaf': [4, 6], 'min_samples_split': [5, 10]}
# cv_rf = RandomizedSearchCV(estimator=RandomForestClassifier(), param_distributions = rf_param_grid, random_state=42, verbose=10)
# cv_rf.fit(train_vectorized_fromScratch_X, train_vectorized_fromScratch_y)
# cv_rf.best_score_
# cv_rf.best_params_
# Upon running RandomizedSearchCV for Random Forest, I got optimal values as:
# {'n_estimators': 400,
# 'min_samples_split': 12,
# 'min_samples_leaf': 4,
# 'max_depth': 50}
# I will use these values now and will not be doing randomized search for subsequent versions of this notebook
# {'n_estimators': 400,
# 'min_samples_split': 12,
# 'min_samples_leaf': 4,
# 'max_depth': 50}
from sklearn.model_selection import KFold
def perform_cross_validation(model, dataset_X, dataset_y):
kf = KFold(n_splits=5)
cv_scores_test = []
for train_indices, test_indices in kf.split(dataset_X):
train = dataset_X.iloc[train_indices, :]
train_targets = dataset_y.iloc[train_indices]
test = dataset_X.iloc[test_indices, :]
test_targets = dataset_y.iloc[test_indices]
model.fit(train, train_targets)
cv_scores_test.append(model.score(test, test_targets))
return sum(cv_scores_test) / 5
model_results_fromScratch["RandomForest"] = perform_cross_validation(
RandomForestClassifier(
n_estimators=400, min_samples_split=12, min_samples_leaf=4, max_depth=50
),
train_vectorized_fromScratch_X,
train_vectorized_fromScratch_y,
)
model_results_fromScratch["LogisticRegression"] = perform_cross_validation(
LogisticRegression(), train_vectorized_fromScratch_X, train_vectorized_fromScratch_y
)
model_results_fromScratch
currentMax = -1
for key, value in model_results_fromScratch.items():
if value > currentMax:
currentMax = value
results["BOW_fromScratch"] = currentMax
results
del word_to_index_mapping
del vectorized_text
del train_vectorized_fromScratch
del train_vectorized_fromScratch_X
del train_vectorized_fromScratch_y
del model_results_fromScratch
gc.collect()
del vectorized_text_df
gc.collect()
# Various Vectorization Approaches using Scikit-learn
from sklearn.feature_extraction.text import CountVectorizer
bow_binary_unigram = CountVectorizer(max_features=6500, binary=True)
train_vectorized_bow_binary_unigram = pd.DataFrame(
bow_binary_unigram.fit_transform(train["text"]).toarray()
)
train_vectorized_bow_binary_unigram = pd.concat(
[train, train_vectorized_bow_binary_unigram], axis=1
)
train_vectorized_bow_binary_unigram.drop("text", axis=1, inplace=True)
train_vectorized_bow_binary_unigram.head()
train_vectorized_bow_binary_unigram_X = train_vectorized_bow_binary_unigram.drop(
"target", axis=1
)
train_vectorized_bow_binary_unigram_y = train_vectorized_bow_binary_unigram["target"]
model_results_bow_binary_unigram = {
"KNN": -1,
"RandomForest": -1,
"LogisticRegression": -1,
}
# For saving time, I am going to use the tuned hyperparameters found previously and won't be doing grid search cv again.
model_results_bow_binary_unigram["KNN"] = perform_cross_validation(
KNeighborsClassifier(n_neighbors=cv_knn.best_params_["n_neighbors"]),
train_vectorized_bow_binary_unigram_X,
train_vectorized_bow_binary_unigram_y,
)
model_results_bow_binary_unigram["RandomForest"] = perform_cross_validation(
RandomForestClassifier(
n_estimators=400, min_samples_split=12, min_samples_leaf=4, max_depth=50
),
train_vectorized_bow_binary_unigram_X,
train_vectorized_bow_binary_unigram_y,
)
model_results_bow_binary_unigram["LogisticRegression"] = perform_cross_validation(
LogisticRegression(),
train_vectorized_bow_binary_unigram_X,
train_vectorized_bow_binary_unigram_y,
)
model_results_bow_binary_unigram
currentMax = -1
for key, value in model_results_bow_binary_unigram.items():
if value > currentMax:
currentMax = value
results["BOW_binary_sklearn_unigram"] = currentMax
results
del train_vectorized_bow_binary_unigram
del train_vectorized_bow_binary_unigram_X
del train_vectorized_bow_binary_unigram_y
del model_results_bow_binary_unigram
gc.collect()
bow_binary_bigram = CountVectorizer(max_features=10000, binary=True, ngram_range=(2, 2))
train_vectorized_bow_binary_bigram = pd.DataFrame(
bow_binary_bigram.fit_transform(train["text"]).toarray()
)
train_vectorized_bow_binary_bigram = pd.concat(
[train, train_vectorized_bow_binary_bigram], axis=1
)
train_vectorized_bow_binary_bigram.drop("text", axis=1, inplace=True)
train_vectorized_bow_binary_bigram.head()
train_vectorized_bow_binary_bigram_X = train_vectorized_bow_binary_bigram.drop(
"target", axis=1
)
train_vectorized_bow_binary_bigram_y = train_vectorized_bow_binary_bigram["target"]
model_results_bow_binary_bigram = {
"KNN": -1,
"RandomForest": -1,
"LogisticRegression": -1,
}
model_results_bow_binary_bigram["KNN"] = perform_cross_validation(
KNeighborsClassifier(n_neighbors=cv_knn.best_params_["n_neighbors"]),
train_vectorized_bow_binary_bigram_X,
train_vectorized_bow_binary_bigram_y,
)
model_results_bow_binary_bigram["RandomForest"] = perform_cross_validation(
RandomForestClassifier(
n_estimators=400, min_samples_split=12, min_samples_leaf=4, max_depth=50
),
train_vectorized_bow_binary_bigram_X,
train_vectorized_bow_binary_bigram_y,
)
model_results_bow_binary_bigram["LogisticRegression"] = perform_cross_validation(
LogisticRegression(),
train_vectorized_bow_binary_bigram_X,
train_vectorized_bow_binary_bigram_y,
)
model_results_bow_binary_bigram
currentMax = -1
for key, value in model_results_bow_binary_bigram.items():
if value > currentMax:
currentMax = value
results["BOW_binary_sklearn_bigram"] = currentMax
results
del train_vectorized_bow_binary_bigram
del train_vectorized_bow_binary_bigram_X
del train_vectorized_bow_binary_bigram_y
del model_results_bow_binary_bigram
gc.collect()
bow_frequency_unigram = CountVectorizer(max_features=6500, binary=False)
train_vectorized_bow_frequency_unigram = pd.DataFrame(
bow_frequency_unigram.fit_transform(train["text"]).toarray()
)
train_vectorized_bow_frequency_unigram = pd.concat(
[train, train_vectorized_bow_frequency_unigram], axis=1
)
train_vectorized_bow_frequency_unigram.drop("text", axis=1, inplace=True)
train_vectorized_bow_frequency_unigram.head()
train_vectorized_bow_frequency_unigram_X = train_vectorized_bow_frequency_unigram.drop(
"target", axis=1
)
train_vectorized_bow_frequency_unigram_y = train_vectorized_bow_frequency_unigram[
"target"
]
model_results_bow_frequency_unigram = {
"KNN": -1,
"RandomForest": -1,
"LogisticRegression": -1,
}
model_results_bow_frequency_unigram["KNN"] = perform_cross_validation(
KNeighborsClassifier(n_neighbors=cv_knn.best_params_["n_neighbors"]),
train_vectorized_bow_frequency_unigram_X,
train_vectorized_bow_frequency_unigram_y,
)
model_results_bow_frequency_unigram["RandomForest"] = perform_cross_validation(
RandomForestClassifier(
n_estimators=400, min_samples_split=12, min_samples_leaf=4, max_depth=50
),
train_vectorized_bow_frequency_unigram_X,
train_vectorized_bow_frequency_unigram_y,
)
model_results_bow_frequency_unigram["LogisticRegression"] = perform_cross_validation(
LogisticRegression(),
train_vectorized_bow_frequency_unigram_X,
train_vectorized_bow_frequency_unigram_y,
)
model_results_bow_frequency_unigram
currentMax = -1
for key, value in model_results_bow_frequency_unigram.items():
if value > currentMax:
currentMax = value
results["BOW_frequency_sklearn_unigram"] = currentMax
results
del train_vectorized_bow_frequency_unigram
del train_vectorized_bow_frequency_unigram_X
del train_vectorized_bow_frequency_unigram_y
del model_results_bow_frequency_unigram
gc.collect()
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_unigram = TfidfVectorizer(max_features=6500)
train_vectorized_tfidf_unigram = pd.DataFrame(
tfidf_unigram.fit_transform(train["text"]).toarray()
)
train_vectorized_tfidf_unigram = pd.concat(
[train, train_vectorized_tfidf_unigram], axis=1
)
train_vectorized_tfidf_unigram.drop("text", axis=1, inplace=True)
train_vectorized_tfidf_unigram.head()
train_vectorized_tfidf_unigram_X = train_vectorized_tfidf_unigram.drop("target", axis=1)
train_vectorized_tfidf_unigram_y = train_vectorized_tfidf_unigram["target"]
model_results_tfidf_unigram = {"KNN": -1, "RandomForest": -1, "LogisticRegression": -1}
model_results_tfidf_unigram["KNN"] = perform_cross_validation(
KNeighborsClassifier(n_neighbors=cv_knn.best_params_["n_neighbors"]),
train_vectorized_tfidf_unigram_X,
train_vectorized_tfidf_unigram_y,
)
model_results_tfidf_unigram["RandomForest"] = perform_cross_validation(
RandomForestClassifier(
n_estimators=400, min_samples_split=12, min_samples_leaf=4, max_depth=50
),
train_vectorized_tfidf_unigram_X,
train_vectorized_tfidf_unigram_y,
)
model_results_tfidf_unigram["LogisticRegression"] = perform_cross_validation(
LogisticRegression(),
train_vectorized_tfidf_unigram_X,
train_vectorized_tfidf_unigram_y,
)
model_results_tfidf_unigram
currentMax = -1
for key, value in model_results_tfidf_unigram.items():
if value > currentMax:
currentMax = value
results["Tfidf_sklearn_unigram"] = currentMax
results
del train_vectorized_tfidf_unigram
del train_vectorized_tfidf_unigram_X
del train_vectorized_tfidf_unigram_y
del model_results_tfidf_unigram
gc.collect()
del train
gc.collect()
# Pretrained GloVe Embeddings along with LSTM
# Quoting nlp.stanford.edu, "GloVe is an unsupervised learning algorithm for obtaining vector representations for words. Training is performed on aggregated global word-word co-occurrence statistics from a corpus, and the resulting representations showcase interesting linear substructures of the word vector space."
# We will download pre-trained vector representations, available at https://github.com/stanfordnlp/GloVe. We will go with the 'Twitter (2B tweets, 27B tokens, 1.2M vocab, uncased, 200d vectors, 1.42 GB download)' one.
# We will reload our train dataset, and this time during preprocessing, we will not remove stopwords. The stopwords can provide additional context for a sequence-based model such as LSTM.
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
train["text"] = train["text"].apply(lambda text: make_lower(text))
train["text"] = train["text"].apply(lambda text: remove_urls(text))
train["text"] = train["text"].apply(lambda text: remove_html(text))
train["text"] = train["text"].apply(lambda text: remove_mentions(text))
train["text"] = train["text"].apply(lambda text: remove_punct(text))
train["text"] = train["text"].apply(lambda text: do_stemming(text))
train.head()
all_texts = train["text"].tolist()
all_targets = train["target"].tolist()
import random
random.Random(1337).shuffle(all_texts)
random.Random(1337).shuffle(all_targets)
del train
gc.collect()
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
max_vocab_size = 6500
tokenizer = Tokenizer(num_words=max_vocab_size)
tokenizer.fit_on_texts(all_texts)
int_texts = tokenizer.texts_to_sequences(all_texts)
int_texts[:1]
# The tokenizer's fit_on_texts function created an internal word to index mapping for each word in the vocabulary. Then, text_to_sequences converted the words of tweets to their indices using that mapping.
#
int_texts = pad_sequences(int_texts, maxlen=30)
int_texts[:1]
# Next, we pad each tweet with 0s (During word-index map formation, the indices started with 1, not 0, because it is expected that padding will be required in the future) such that they all have the same length. This is needed in order to feed these into our LSTM. I chose 30 as the length of each sequence as it seemed like a reasonable number. For tweets having number of words greater than 30, we will lose information as the tweet will be cut to only 30 words, however very few tweets will have a length greater than 30.
#
int_texts.shape
all_targets_np = np.array(all_targets)
# Let's now download and unzip the pretrained GloVe word vectors.
#
path_to_glove_file = "/kaggle/working/glove.twitter.27B.100d.txt"
embeddings_index = {}
with open(path_to_glove_file) as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, "f", sep=" ")
embeddings_index[word] = coefs
print(f"Found {len(embeddings_index)} word vectors")
#
# The dictionary embeddings_index now contains key-value pairs, where each key is a word and the corresponding value is the word's GloVe vector.
# Since we need to feed sequences of integers to our LSTM (since each tweet is now represented as a sequence of integers), we need a mapping from word's index to word's GloVe vector. Also, the index 0 is reserved for padding, and the corresponding vector for it we'll keep as a zero-vector. We will form a matrix (a 2-D numpy array) where row i corresponds to the index i's (corresponding to the ith word in the vocabulary) GloVe vector. The index 0 does not correspond to a word's index, it's there for padding, and so our actual vocabulary starts from index 1. So, our matrix will have number of rows = vocabulary_size + 1.
word2idx = tokenizer.word_index
vocab_size = len(word2idx)
embedding_matrix = np.zeros((vocab_size + 1, 100))
for word, index in word2idx.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
embedding_matrix.shape
#
# The number of columns is 100 since we have 100-dimensional vectors.
#
del embeddings_index
gc.collect()
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
embedding_layer = layers.Embedding(
vocab_size + 1,
100,
embeddings_initializer=keras.initializers.Constant(embedding_matrix),
trainable=False,
mask_zero=True,
)
inputs = keras.Input(shape=(30,), dtype="int64")
embedded = embedding_layer(inputs)
x = layers.Bidirectional(layers.LSTM(32))(embedded)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
from sklearn.model_selection import KFold
def perform_cross_validation_NN(model, dataset_X, dataset_y):
kf = KFold(n_splits=5)
cv_scores_test = []
for train_indices, test_indices in kf.split(dataset_X):
train = dataset_X[train_indices, :]
train_targets = dataset_y[train_indices]
test = dataset_X[test_indices, :]
test_targets = dataset_y[test_indices]
model.compile(
optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"]
)
model.fit(train, train_targets, epochs=10, batch_size=32)
cv_scores_test.append(model.evaluate(test, test_targets)[1])
print("-------------")
return sum(cv_scores_test) / 5
results["GloVe"] = perform_cross_validation_NN(model, int_texts, all_targets_np)
results
del embedding_matrix
gc.collect()
# Pre-trained BERT
# We are going to use tensorflow/bert_en_uncased_L-12_H-768_A-12 from Tensorflow Hub. Along with it, we are going to use the preprocessor bert_en_uncased_preprocess.
# Links for these:
# https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4
# https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text
bert_preprocess = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
)
bert_encoder = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4"
)
from tensorflow import keras
from tensorflow.keras import layers
text_input = layers.Input(shape=(), dtype=tf.string)
preprocessed_text = bert_preprocess(text_input)
outputs = bert_encoder(preprocessed_text)
l = layers.Dropout(0.1)(outputs["pooled_output"])
l = layers.Dense(1, activation="sigmoid")(l)
model = keras.Model(inputs=[text_input], outputs=[l])
all_texts_np = np.array(all_texts)
del all_texts
gc.collect()
del all_targets
gc.collect()
from sklearn.model_selection import KFold
def perform_cross_validation_BERT(model, dataset_X, dataset_y):
kf = KFold(n_splits=3)
cv_scores_test = []
for train_indices, test_indices in kf.split(dataset_X):
train = dataset_X[train_indices]
train_targets = dataset_y[train_indices]
test = dataset_X[test_indices]
test_targets = dataset_y[test_indices]
model.compile(
optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"]
)
train = tf.convert_to_tensor(train)
train_targets = tf.convert_to_tensor(train_targets)
test = tf.convert_to_tensor(test)
test_targets = tf.convert_to_tensor(test_targets)
model.fit(train, train_targets, epochs=5, batch_size=32)
cv_scores_test.append(model.evaluate(test, test_targets)[1])
print("-------------")
return sum(cv_scores_test) / 5
results["BERT"] = perform_cross_validation_BERT(model, all_texts_np, all_targets_np)
results["BERT"] = results["BERT"] * 5 / 3
results
# Average Word2Vec using Gensim
# We will train our own word embeddings using gensim. For a given sentence, we will average the word vectors of each word to obtain the sentence vector.
from gensim.models import Word2Vec
sentences = [sentence.split() for sentence in all_texts_np.tolist()]
w2v_model = Word2Vec(sentences, vector_size=100, window=5, workers=4)
w2v_model.wv["keep"]
def vectorize(sentence):
words = sentence.split()
words_vecs = [w2v_model.wv[word] for word in words if word in w2v_model.wv]
if len(words_vecs) == 0:
return np.zeros(100)
words_vecs = np.array(words_vecs)
return words_vecs.mean(axis=0)
all_texts_av_vecs = np.array(
[vectorize(sentence) for sentence in all_texts_np.tolist()]
)
all_texts_av_vecs[0]
# The above is the vector for the first sentence in all_texts
all_texts_av_vecs.shape
all_targets_np.shape
inputs = keras.Input(shape=(100,), dtype="int32")
x = layers.Dense(50, activation="relu")(inputs)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
from sklearn.model_selection import KFold
def perform_cross_validation_w2v(model, dataset_X, dataset_y):
kf = KFold(n_splits=3)
cv_scores_test = []
for train_indices, test_indices in kf.split(dataset_X):
train = dataset_X[train_indices, :]
train_targets = dataset_y[train_indices]
test = dataset_X[test_indices, :]
test_targets = dataset_y[test_indices]
model.compile(
optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"]
)
model.fit(train, train_targets, epochs=10, batch_size=32)
cv_scores_test.append(model.evaluate(test, test_targets)[1])
print("-------------")
return sum(cv_scores_test) / 3
del all_texts_np
gc.collect()
del sentences
gc.collect()
results["Word2Vec_average"] = perform_cross_validation_w2v(
model, all_texts_av_vecs, all_targets_np
)
results
del all_texts_av_vecs
gc.collect()
# Gensim Word2Vec with LSTM
# We will use our gensim-trained embeddings to create an embedding matrix. In case of GloVe, we used pre-trained embeddings. But this time, the embeddings have been obtained through this particular dataset that we are using. Then, similar to our GloVe case, we will use LSTM followed by a dense layer to do classification.
int_texts.shape
int_texts[:1]
embedding_matrix = np.zeros((vocab_size + 1, 100))
for word, index in word2idx.items():
try:
embedding_vector = w2v_model.wv[word]
except:
embedding_vector = None
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
embedding_matrix.shape
embedding_layer = layers.Embedding(
vocab_size + 1,
100,
embeddings_initializer=keras.initializers.Constant(embedding_matrix),
trainable=False,
mask_zero=True,
)
inputs = keras.Input(shape=(30,), dtype="int64")
embedded = embedding_layer(inputs)
x = layers.Bidirectional(layers.LSTM(32))(embedded)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
del w2v_model
gc.collect()
results["Word2Vec_lstm"] = perform_cross_validation_NN(model, int_texts, all_targets_np)
results
del embedding_matrix
gc.collect()
# Comparing All Approaches So Far
#
results
keys = list(results.keys())
values = list(results.values())
sorted_value_index = np.argsort(values)
sorted_results = {keys[i]: values[i] for i in sorted_value_index}
results_y = list(sorted_results.keys())
results_x = list(sorted_results.values())
results_y.reverse()
results_x.reverse()
import matplotlib.pyplot as plt
import seaborn as sns
fig, ax = plt.subplots()
sns.barplot(x=results_x, y=results_y, ax=ax, palette="Set1")
ax.set_title("Comparison of Various Approaches")
ax.set_xlabel("Accuracy Score")
ax.set_ylabel("Approach")
plt.show()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingClassifier
df = pd.read_csv("/kaggle/input/suv-nanze/suv.csv")
df.drop("User ID", axis=1, inplace=True)
df.head(5)
df.Gender = pd.get_dummies(df.Gender, drop_first=True)
X = df.to_numpy()
np.random.seed = 0
X = X[np.random.permutation(X.shape[0])]
y = X[:, -1]
X = X[:, :-1]
split = int(X.shape[0] * 0.8)
X_train = X[:split]
y_train = y[:split]
X_test = X[split:]
y_test = y[split:]
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
from sklearn.tree import DecisionTreeClassifier
"""
baggingClassifer > base estimator > number of differnet models to train from the base estimator
max_samples > to each of the models how many samples I show
max_features > how many features for each model
Bootstrap in BaggingClassifier in sklearn is a parameter that controls whether samples are drawn
with replacement or not1. If bootstrap is True, then each base classifier is trained on a random
subset of the original dataset with replacement, meaning that some samples may appear more than
once in the subset. This is also known as bagging2. If bootstrap is False, then each base classifier
is trained on a random subset of the original dataset without replacement, meaning that each sample
appears at most once in the subset. This is also known as pasting2.
"""
clf = BaggingClassifier(
DecisionTreeClassifier(max_depth=2, splitter="random"),
n_estimators=100,
max_samples=0.8,
)
# bootstrap > for bagging if true else pasting + max_samples > can be int or float(for percentage)
# bootstrap features > for features + max_features
# n jobs
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
# random search
for i in range(10000):
n = np.random.randint(10, 1000)
s = np.random.randint(4, 1000)
clf = BaggingClassifier(DecisionTreeClassifier(), n_estimators=n, max_samples=s)
# clf.fit(X_train,y_train)
# clf.score(X_test,y_test)
# compare the results
# grid search
for n in [10, 30, 50, 90, 130]:
for s in [40, 80, 130, 170, 260, 320]:
clf = BaggingClassifier(DecisionTreeClassifier(), n_estimators=n, max_samples=s)
# clf.fit(X_train,y_train)
# clf.score(X_test,y_test)
# compare the results
# n jobs=-1 > use all of the processors
clf = BaggingClassifier(
DecisionTreeClassifier(max_depth=2), n_estimators=10, max_samples=150, n_jobs=-1
)
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
|
# ***
# # *Predictive Maintenance Study*
# This study is based on a Kaggle database on predictive maintenance made available to the general public (https://www.kaggle.com/datasets/shivamb/machine-predictive-maintenance-classification). It's primary objectives are to build machine learning models able to make predictions about:
# 1. If a failure on the machine will occur during its operation, and;
# 2. If so, which kind of failure type is associated.
# Kaggle's repository has the following remarks on this database:
# Machine Predictive Maintenance Classification Dataset
# Since real predictive maintenance datasets are generally difficult to obtain and in particular difficult to publish, we present and provide a synthetic dataset that reflects real predictive maintenance encountered in the industry to the best of our knowledge.
# The dataset consists of 10 000 data points stored as rows with 14 features in columns
# * UID: unique identifier ranging from 1 to 10000
# * productID: consisting of a letter L, M, or H for low (50% of all products), medium (30%), and high (20%) as product quality variants and a variant-specific serial number
# * air temperature [K]: generated using a random walk process later normalized to a standard deviation of 2 K around 300 K
# * process temperature [K]: generated using a random walk process normalized to a standard deviation of 1 K, added to the air temperature plus 10 K.
# * rotational speed [rpm]: calculated from powepower of 2860 W, overlaid with a normally distributed noise
# * torque [Nm]: torque values are normally distributed around 40 Nm with an σ = 10 Nm and no negative values.
# * tool wear [min]: The quality variants H/M/L add 5/3/2 minutes of tool wear to the used tool in the process. and a
# * 'machine failure' label that indicates, whether the machine has failed in this particular data point for any of the following failure modes are true.
# ***
# ## Data And Module Importing - First Look - Renaming columns - Setting Theme
# Importing main modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Creating dataframe from source csv
data = pd.read_csv(
"/kaggle/input/machine-predictive-maintenance-classification/predictive_maintenance.csv"
)
# Data info: columns, features, datatypes, counts, nulls
data.info()
# First data samples
data.head()
# Summary statistics in a glance
data.describe(include="all")
# Renaming columns to facilitate understanding and coding
# Rename dictionary
old_labels = data.columns
new_labels = [
"uid",
"prod_id",
"prod_type",
"air_temp",
"process_temp",
"rot_speed",
"torque",
"tool_wear",
"target",
"failure_type",
]
data.columns = new_labels
# Setting color palette for plotting in advance
sns.set_theme(style="darkgrid", palette="icefire")
# ***
# ## Exploratory Data Analysis
# Before choosing and implementing any machine learning algorithm for failure prediction, it's important to explore the data, differ categorical and numerical data, get to know some statistics (centrality, frequency, spread), as well as look for associations between variables since they point to a better, prior understanding, helping to select the features which may enrich the most the model we are trying to derive.
# Categorical variables classes
print(data.prod_type.unique())
print(data.failure_type.unique())
# Product Type Categories and Counts
data.prod_type.value_counts()
plt.pie(data.prod_type.value_counts(), labels=data.prod_type.unique())
plt.show()
# Binary Target Variable - Failure / No Failure
data.target.value_counts()
data.target.value_counts() / len(data)
plt.pie(data.target.value_counts(), labels=data.target.unique())
plt.show()
# Failure Types Categories and Counts
data.failure_type.value_counts()
# **Note:**
# Target variable (Failure / No Failure is highly unbalanced!)
# **Note:** The number of failed samples from the target column is 9 units higher than the failure type columns suggests!
# Investigating the difference between No Failure labeled sample number in Target and in Type
failure_data = data[["target", "failure_type"]][data["target"] == 0]
failure_data.value_counts()
failure_data_type = data[["target", "failure_type"]][
data["failure_type"] == "No Failure"
]
failure_data_type.value_counts()
# **Note:**
# We can see that 9 events were classified as No Failure but their targets labeled with 1.
# Also, 18 labels of failure type events were classified as Random Failures but got their target labels as 0 (No Failure). These samples will be removed from the dataframe.
# Correcting Data Inconsistencies
data = data.drop(
data[
((data.failure_type == "Random Failures") & (data.target == 0))
| ((data.target == 1) & (data.failure_type == "No Failure"))
].index
)
# Contingency table of proportions for categorical variables
# Product Type and Target (Failure)
contingency_target_type = pd.crosstab(data.prod_type, data.target)
contingency_target_type / len(data) * 100
label_dict = dict(zip(new_labels, old_labels))
print(label_dict)
# Applying extra time to tool wear according to the rule H/M/L = 5/3/2 added minutes added in the process
data.tool_wear = data.apply(
lambda row: row["tool_wear"] + 5
if row["prod_type"] == "H"
else row["tool_wear"] + 3
if row["prod_type"] == "M"
else row["tool_wear"] + 2,
axis=1,
)
data.describe()
# Plotting scatter / histograms plots for numerical values
numerical = ["air_temp", "process_temp", "rot_speed", "torque", "tool_wear", "target"]
num_dict = {key: label_dict[key] for key in numerical}
for key, value in num_dict.items():
if key != "target":
plt.figure(figsize=(12, 5))
ax = plt.subplot(1, 2, 1)
sns.histplot(data[key], bins=20)
plt.xlabel(value)
plt.ylabel("Counts")
ax = plt.subplot(1, 2, 2)
sns.boxplot(x=key, data=data)
plt.xlabel(value)
plt.show()
# Inspecting Scatter Plots looking for Patterns in Numerical Data
plt.figure(figsize=(10, 10))
sns.pairplot(data[numerical], hue="target", plot_kws=dict(alpha=0.5))
# Investigating cross correlation between numerical variables
data_corr = data[numerical].corr()
plt.figure(figsize=(10, 8))
corrplot = sns.heatmap(data_corr, vmin=-1, vmax=1, cmap="icefire", annot=True)
# **Note:**
# Pair plots as well as the correlation plot show two pairs of variables which are highly correlated to each other, meaning that only one of them (within a pair) may be used as a feature, the other not contributing as much to model enhancement. The variable pairs are:
# * Air Temperature and Process Temperature (process temperatures are proportional to air temperatures)
# * Rotational Speed and Torque (which indicates the machine majoritarily operates in a flat power curve, since power is torque times rotational speed)
#
# For target (failure) classification purposes, the author chooses to carry on the analysis keeping Process Temperature and Torque as features. The reason why to choose this path is twofold:
# * Physical: In engineering, torque is directly correlated to stress. Process temperature, intuitively, has higher correlation to physical stresses induced on the sample than air temperature (not taking into account the way the data table was constructed)
# * Mathematical: Process temperature distribution curve resembles better a normal distribution pattern than ambient temperature does, which is one of the assumptions for logistic regression. With Torque, the same thing, in comparison with rotational speed, which related histogram presents a large skew.
# For the job of classifying failures among different categories, the other two discarded features (Air Temperature and Rotational Speed) may be used, given data transformations will be applied to make them more suitable for the algorithms.
# **Note:**
# The purple spots on pair plots show in which conditions the failure event happened. For chosen variables, specially Torque and Tool Wear, failures tend to happen on the extremes of values, as can be seen by the concentration of purple dots far from the middle of respective graphs.
# ***
# ## Failure Classification using Logistic Regression
# **Note:**
# Logistic Regression will be applied to derive a machine learning model for predicting failure using only four features:
# 1. Tool Wear
# 2. Torque
# 3. Process Temperature
#
# for the reasons discussed above.
# **Note:**
# Target classes are highly unbalanced (3.5% are labeled as failure, denoted by 1), so some sort of balancing measure is needed. First, a traditional approach will be applied, using stratification on data split, as well as class weighing during Logistic Regression fitting, and a variable threshold approach will be applied. Then, for a second path, SMOTE will be used.
# **Note:**
# In the context of this work (predict machine failure), I assume the most important metric is Recall, since the cost of a false negative (predicting no failure when it will happen) is of great importance for cost impacts. False positives may be thought of as the second most important issue, since deciding to stop production in favor of maintenance may as well impact costs and productivity, so F1_score will be taken into account as well. Since Recall is assumed to be the most important metric for the purposes of this work, SMOTE technique will be used as a data augmentation technique to counter class unbalance. SMOTE is recognized to favor recall over precision, enhancing the importance of false negatives over false positives.
# Importing relevant modules
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
confusion_matrix,
accuracy_score,
precision_score,
recall_score,
f1_score,
classification_report,
)
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, roc_auc_score
# X and y vectors (features and target)
X = data[["torque", "process_temp", "tool_wear"]]
y = data["target"]
# Stardardiztion
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
# Train and Test data split with stratification (counter class imbalance)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=74, stratify=y
)
print(y_train.value_counts())
print(y_test.value_counts())
# Model fit
lr = LogisticRegression(class_weight="balanced")
lr.fit(X_train, y_train)
# Predictions
y_pred = lr.predict(X_test)
# Predicted Probabilities
y_pred_prob = lr.predict_proba(X_test)
print(y_pred_prob)
# Confusion Matrix
print("Confusion matrix: ")
print(confusion_matrix(y_test, y_pred))
# Accuracy Score ( TP + TN / (TP + FP + TN + FN) )
print("\nAccuracy Score: ")
print(accuracy_score(y_test, y_pred))
# Recall Score ( TP / (TP + FN) )
print("\nRecall Score: ")
print(recall_score(y_test, y_pred))
# Precision Score ( TP / (TP + FP) )
print("\nPrecision Score: ")
print(precision_score(y_test, y_pred))
# F1 Score ( 2 * Precision * Recall / (Precision + Recall) )
print("\nF1 Score: ")
print(f1_score(y_test, y_pred))
# Deriving the ROC curve
fpr, tpr, thresholds = roc_curve(y_test, y_pred_prob[:, 1])
plt.figure(figsize=(10, 8))
plt.plot(fpr, tpr, color="darkorange", label="ROC curve")
idx = list(range(len(thresholds)))[1::10]
for i in idx:
plt.text(fpr[i], tpr[i], thresholds[i].round(2))
plt.show()
# ROC AUC Score
roc_auc = roc_auc_score(y_test, y_pred_prob[:, 1])
print(f"ROC AUC score: {roc_auc}")
# **Note:**
# Now SMOTE technique will be applied to balance the classes so as to improve the most important classification metrics.
# Importing SMOTE module from imbalance library
from imblearn.over_sampling import SMOTE
# Apply SMOTE to train data
smote = SMOTE()
X_resampled, y_resampled = smote.fit_resample(X_train, y_train)
y_resampled.value_counts()
# Fitting new Logistic Regression Model
lr_smote = LogisticRegression()
lr_smote.fit(X_resampled, y_resampled)
# Predicting outcomes
y_smote_pred = lr_smote.predict(X_test)
# Predicted Probabilities
y_smote_prob = lr.predict_proba(X_test)
# Confusion Matrix
print("Confusion matrix: ")
print(confusion_matrix(y_test, y_smote_pred))
# Accuracy Score ( TP + TN / (TP + FP + TN + FN) )
print("\nAccuracy Score: ")
print(accuracy_score(y_test, y_smote_pred))
# Recall Score ( TP / (TP + FN) )
print("\nRecall Score: ")
print(recall_score(y_test, y_smote_pred))
# Precision Score ( TP / (TP + FP) )
print("\nPrecision Score: ")
print(precision_score(y_test, y_smote_pred))
# F1 Score ( 2 * Precision * Recall / (Precision + Recall) )
print("\nF1 Score: ")
print(f1_score(y_test, y_smote_pred))
# Deriving the ROC curve
fpr, tpr, thresholds = roc_curve(y_test, y_smote_prob[:, 1])
plt.figure(figsize=(10, 8))
plt.plot(fpr, tpr, color="darkorange", label="ROC curve")
idx = list(range(len(thresholds)))[1::10]
for i in idx:
plt.text(fpr[i], tpr[i], thresholds[i].round(2))
plt.show()
# ROC AUC Score
roc_auc = roc_auc_score(y_test, y_pred_prob[:, 1])
print(f"ROC AUC score: {roc_auc}")
# **Note:**
# Classification metrics are very similar between applying logistic regression using either class_weight = balanced parameter or applying SMOTE techniques with prior class balance, suggesting logistic regression model have some sort of SMOTE technique embedded when appying the above mentioned parameter. Remembering that these results are for a standard classification threshold of 0.5.
# Optimal threshold research
threshold_list = [i / 10 for i in range(1, 10)]
conf_list = []
recall_list = []
f1_score_list = []
for i in range(1, 10):
y_pred_class = (y_pred_prob[:, 1] > i / 10) * 1.0
conf_list.append(confusion_matrix(y_test, y_pred_class))
for j in conf_list:
recall = j[0][0] / (j[0][0] + j[1][0])
precision = j[0][0] / (j[0][0] + j[0][1])
f1_score = 2 * recall * precision / (recall + precision)
recall_list.append(recall)
f1_score_list.append(f1_score)
print(recall_list)
print(f1_score_list)
plt.figure(figsize=(10, 8))
plt.plot(threshold_list, recall_list)
plt.plot(threshold_list, f1_score_list)
plt.xlabel("Threshold")
plt.ylabel("Score")
plt.show()
from sklearn.metrics import ConfusionMatrixDisplay
conf = ConfusionMatrixDisplay(conf_list[6])
conf.plot(cmap="mako")
plt.grid(visible=None)
plt.show()
# **Note:**
# As can be deduced from above analysis, choosing a threshold value of 0.70 for classification results yields both good recall and f1 scores, 0.98 and 0.97, respectively. As discussed previously, these are considered the most important metrics for predictive maintenance as judged by the author.
# ***
# ## Failure Type Discrimination applying KNN, SVM
# Now we turn the attention to the problem of classifying failure types of failed articles, given the set of features of the dataset. Two different algorithms will be applied, hyperparameter tuning will be carried out and metrics comparisons between then will be made.
data_fail_type = data[
[
"air_temp",
"process_temp",
"rot_speed",
"torque",
"tool_wear",
"target",
"failure_type",
]
]
# Inspecting Scatter Plots looking for Patterns in Numerical Data
plt.figure(figsize=(10, 10))
sns.pairplot(data_fail_type, hue="failure_type", plot_kws=dict(alpha=0.5))
# **Note:**
# Analysing pair scatter plots, we can derive some interesting conclusions:
# * Power Failures tend to occur on the extremes of Torque and Rotation Speed values. Since these values are strongly and negatively correlated, picking one of these variables may be sufficient for prediction. Another idea is to try to come up with a variable that highlights the limits of both variables combined: multiplying both variables, Torque and Rotational Speed (known as Power), yields a variable that is both phisically and mathematically appealing.
# * Tool Wear failures, as expected, tend to occur in the limit values of Tool Wear
# * Overstrain Failures seems to occur with combinations of high Tool Wear and Torque (or Power)
# * Heat Dissipation Failures appear to be spread out, but majoritarily occuring in high values of Air Temperature and high values of Torque. Phisical reasoning suggests that we can investigate these kind of failures using two derived features: one already mentioned, Power, and the other would be the difference between Process Temperature and Air Temperature, which will be called Delta Temperature. This makes sense since Heat Dissipation issues tend to occur in scenarios of high heat generation (high Power) and low cooling (low Delta Temperature)
# Data transformations: Selecting only failed data points / Creating new features derived from primary ones
data_fail_type = data_fail_type[data_fail_type["target"] == 1]
data_fail_type.reset_index(inplace=True)
data_fail_type["power"] = data_fail_type["torque"] * data_fail_type["rot_speed"]
data_fail_type["delta_temp"] = (
data_fail_type["process_temp"] - data_fail_type["air_temp"]
)
data_fail_type = data_fail_type.drop(columns=["target"])
data_fail_type.head()
# Inspecting Scatter Plots looking for Patterns in Numerical Data
plt.figure(figsize=(10, 10))
sns.pairplot(
data_fail_type[["delta_temp", "power", "tool_wear", "failure_type"]],
hue="failure_type",
plot_kws=dict(alpha=0.5),
)
# **Note:**
# The above plot helps to confirm previous assumptions:
# * Heat Disspation Failures are concentrated where values for Delta Temperature are low
# * Power Failures tend to occur where Power variables are near extremes (high and low)
# * Tool Wear tend to occur when Tool Wear is high
# * Overstrain Failures tend to occur with a combination of high Tool Wear and Power
# ### Data Preparation for Machine Learning Algorithms
# Data Preparation: Selection, Split, Normalization
data_set = data_fail_type[["delta_temp", "power", "tool_wear", "failure_type"]]
data_set.head(5)
# Removing Random Failure Samples
data_set = data_set[data_set["failure_type"] != "Random Failures"]
# Train-Test Split
X = data_set[["delta_temp", "power", "tool_wear"]]
y = data_set["failure_type"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=74
)
# ### Applying KNN (k-Nearest Neighbors algorithm) for Failure Type Classification
# Module importing
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler
# Scaling Features
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.fit_transform(X_test)
data_scaled = pd.DataFrame(X_train_scaled, columns=["delta_temp", "power", "tool_wear"])
data_scaled.head()
data_scaled.describe()
# Training KNN Model
knn_class = KNeighborsClassifier()
knn_class.fit(X_train_scaled, y_train)
# Predicting Outcomes
y_pred = knn_class.predict(X_test_scaled)
# **Note:**
# For purposes of classifying failures according their types, it seems the best metric is now accuracy, since this is a multiclass classification problem and this metric is an overall classification score regarding all failure types.
# Accuracy Score
print(accuracy_score(y_test, y_pred))
# Failure Type wise classification scores
print(classification_report(y_test, y_pred))
# **Note:**
# From the classification report, we can see that the algorithm was very precise in classifying Power failures (100%), but precision results for Overstrain and Tool Wear failures were not so high (70%). In the following section, hyperparameter tuniing (varying k) will be carried out in order to improve the overall accuracy.
# Hyperparameter Tuning (varying the nuber of neighbors, k)
k_list = list(range(2, 21))
accuracy_list = []
pred_list = []
for k in k_list:
knn_class_model = KNeighborsClassifier(n_neighbors=k)
knn_class_model.fit(X_train_scaled, y_train)
y_pred_k = knn_class_model.predict(X_test_scaled)
pred_list.append(y_pred_k)
accuracy_list.append(accuracy_score(y_test, y_pred_k))
plt.figure(figsize=(8, 6))
plt.plot(k_list, accuracy_list)
plt.xlabel("k - Number of Neighbors")
plt.ylabel("Accuracy Score")
plt.show()
# Maximum accuracy reached and corresponding k
accuracy_scores = list(zip(accuracy_list, k_list))
print(max(accuracy_scores))
# **Note:**
# Maximum accuracy score is around 85% when we choose k as 6 nearest neighbors, the optimum value in this context.
y_pred_6 = pred_list[4]
print(classification_report(y_test, y_pred_6))
# **Note:**
# Choosing k = 6, there was a shift in precision scores. Power Failures classifications remained as 1000% precise, Tool Wear classification precision raised to 100%. Heat Dissipation had a slight drop of 2% (90 to 88%), as well as Overstrain Failure (dropped from 70 to 68%).
# ### Applying SVM (Support Vector Machine) for Failure Type Classification
# Module Importing
from sklearn.svm import SVC
# Model fitteing and predictions using linear kernel
sv_class = SVC(kernel="linear")
sv_class.fit(X_train_scaled, y_train)
y_pred = sv_class.predict(X_test_scaled)
print(accuracy_score(y_test, y_pred))
print(classification_report(y_test, y_pred))
# **Note:**
# Overall accuracy was 72.7%, a poorer performance compared to KNN algorithm. Power Failure precision was 100%, same as using KNN, but other scores were poor. Let's see if we can improve varying hyperparameters such as kernel and regularization parameter C.
# Investigating different kernels
kernel_list = ["poly", "rbf", "sigmoid"]
pred_list = []
accuracy_list = []
for k in kernel_list:
sv_class = SVC(kernel=k)
sv_class.fit(X_train_scaled, y_train)
y_pred_k = sv_class.predict(X_test_scaled)
pred_list.append(y_pred_k)
accuracy_list.append(accuracy_score(y_test, y_pred_k))
plt.figure(figsize=(8, 6))
plt.bar(kernel_list, accuracy_list, color=["darkblue", "royalblue", "darkred"])
plt.show()
print(accuracy_list)
# **Note:**
# Poly and RBF kernel types provided the best accuracy results, around 83% and somewhat equivalent to KNN score (85% accuracy in its best k). Sigmoid kernel clearly is not well suited for this problem.
# Classification Report for the Best SVM Kernel
print(classification_report(y_test, pred_list[1]))
# Investigating the hyperparameter C (regularization) using RBF kernel (default) and GridSearchCV
from sklearn.model_selection import GridSearchCV
C_list = [0.0001, 0.01, 0.1, 1, 10, 100, 1000]
gamma_list = [0.1, 1, 10, 100]
pred_dict = {}
accuracy_dict = {}
for c in C_list:
for gamma in gamma_list:
sv_class = SVC(C=c, gamma=gamma)
sv_class.fit(X_train_scaled, y_train)
y_pred = sv_class.predict(X_test_scaled)
pred_dict[(c, gamma)] = y_pred
accuracy_dict[(c, gamma)] = accuracy_score(y_test, y_pred)
x, y = zip(*accuracy_dict.keys())
z = list(accuracy_dict.values())
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(projection="3d")
ax.scatter(x, y, z, c=z)
plt.show()
print(max(accuracy_dict.values()))
print(accuracy_dict)
# **Note:**
# By regularizing the model varying hyperparameters C and gamma, it was possible to get a 89% accuracy for C = 1000 and gamma = 1.
# Individual category scores for the optimal hyperparameter combination
print(classification_report(y_test, pred_dict[(1000, 1)]))
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Loading Files
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
train = pd.read_csv("../input/playground-series-s3e12/train.csv")
test = pd.read_csv("../input/playground-series-s3e12/test.csv")
sub = pd.read_csv("../input/playground-series-s3e12/sample_submission.csv")
print("train shape:", train.shape)
print("test shape:", test.shape)
# check head of data
train.head()
# check null count and data type of data
train.info()
# check data info
train.describe()
# ## EDA
sns.countplot(x="target", hue="target", data=train)
fig, ax = plt.subplots(2, 3, figsize=(22, 12))
sns.boxplot(ax=ax[0, 0], x="target", y="gravity", hue="target", data=train)
sns.boxplot(ax=ax[0, 1], x="target", y="ph", hue="target", data=train)
sns.boxplot(ax=ax[0, 2], x="target", y="osmo", hue="target", data=train)
sns.boxplot(ax=ax[1, 0], x="target", y="cond", hue="target", data=train)
sns.boxplot(ax=ax[1, 1], x="target", y="urea", hue="target", data=train)
sns.boxplot(ax=ax[1, 2], x="target", y="calc", hue="target", data=train)
# Get Correlation
train_corr = train.drop(["id", "target"], axis=1).corr()
test_corr = test.drop(["id"], axis=1).corr()
# Generate HeatMap
fig, ax = plt.subplots(1, 2, figsize=(22, 8))
sns.heatmap(train_corr, annot=True, ax=ax[0]).set_title("Correlation of train")
sns.heatmap(test_corr, annot=True, ax=ax[1]).set_title("COrrelation of test")
# copy data
train_sub = train.drop(["id", "target"], axis=1).copy()
test_sub = test.drop(["id"], axis=1).copy()
# add new column to indicate which data they are from
train_sub["dataset"] = "train"
test_sub["dataset"] = "test"
# conbine two data
Con_df = pd.concat([train_sub, test_sub]).reset_index(drop=True)
# plot the two data to see any difference in distribution
fig, ax = plt.subplots(2, 3, figsize=(22, 12))
sns.kdeplot(data=Con_df, x="gravity", hue="dataset", ax=ax[0, 0], fill=True)
sns.kdeplot(data=Con_df, x="ph", hue="dataset", ax=ax[0, 1], fill=True)
sns.kdeplot(data=Con_df, x="osmo", hue="dataset", ax=ax[0, 2], fill=True)
sns.kdeplot(data=Con_df, x="cond", hue="dataset", ax=ax[1, 0], fill=True)
sns.kdeplot(data=Con_df, x="urea", hue="dataset", ax=ax[1, 1], fill=True)
sns.kdeplot(data=Con_df, x="calc", hue="dataset", ax=ax[1, 2], fill=True)
# Check for duplicates
dup = Con_df.duplicated().sum()
print("there are", dup, "duplicates in the df")
# From the heatmap it is clear that
# gravity~osmo, gravity~urea, osmo~urea, osmo~cond
# has high correlation and needs further investigation
# subplot size
fig, ax = plt.subplots(2, 2, figsize=(22, 12))
# subplot
sns.scatterplot(data=train, x="gravity", y="osmo", hue="target", ax=ax[0, 0])
sns.scatterplot(data=train, x="gravity", y="urea", hue="target", ax=ax[0, 1])
sns.scatterplot(data=train, x="osmo", y="urea", hue="target", ax=ax[1, 0])
sns.scatterplot(data=train, x="osmo", y="cond", hue="target", ax=ax[1, 1])
|
# importing libraries
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import random
# Define the network architecture
class QNetwork(nn.Module):
def __init__(self, state_size, action_size):
super(QNetwork, self).__init__()
self.fc1 = nn.Linear(state_size, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, action_size)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
# Define the replay buffer
class ReplayBuffer:
def __init__(self, capacity):
self.capacity = capacity
self.buffer = []
self.index = 0
def push(self, state, action, reward, next_state, done):
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.index] = (state, action, reward, next_state, done)
self.index = (self.index + 1) % self.capacity
def sample(self, batch_size):
batch = np.random.choice(len(self.buffer), batch_size, replace=False)
states, actions, rewards, next_states, dones = [], [], [], [], []
for i in batch:
state, action, reward, next_state, done = self.buffer[i]
states.append(state)
actions.append(action)
rewards.append(reward)
next_states.append(next_state)
dones.append(done)
return (
torch.tensor(np.array(states)).float(),
torch.tensor(np.array(actions)).long(),
torch.tensor(np.array(rewards)).unsqueeze(1).float(),
torch.tensor(np.array(next_states)).float(),
torch.tensor(np.array(dones)).unsqueeze(1).int(),
)
def __len__(self):
return len(self.buffer)
# Define the Double DQN agent
class DDQNAgent:
def __init__(
self,
state_size,
action_size,
seed,
learning_rate=1e-3,
capacity=1000000,
discount_factor=0.99,
tau=1e-3,
update_every=4,
batch_size=64,
):
self.state_size = state_size
self.action_size = action_size
self.seed = seed
self.learning_rate = learning_rate
self.discount_factor = discount_factor
self.tau = tau
self.update_every = update_every
self.batch_size = batch_size
self.steps = 0
self.qnetwork_local = QNetwork(state_size, action_size)
self.qnetwork_target = QNetwork(state_size, action_size)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=learning_rate)
self.replay_buffer = ReplayBuffer(capacity)
self.update_target_network()
def step(self, state, action, reward, next_state, done):
# Save experience in replay buffer
self.replay_buffer.push(state, action, reward, next_state, done)
# Learn every update_every steps
self.steps += 1
if self.steps % self.update_every == 0:
if len(self.replay_buffer) > self.batch_size:
experiences = self.replay_buffer.sample(self.batch_size)
self.learn(experiences)
def act(self, state, eps=0.0):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences):
states, actions, rewards, next_states, dones = experiences
# Get max predicted Q values (for next states) from target model
Q_targets_next = (
self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
)
# Compute Q targets for current states
Q_targets = rewards + self.discount_factor * (Q_targets_next * (1 - dones))
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions.view(-1, 1))
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Update target network
self.soft_update(self.qnetwork_local, self.qnetwork_target)
def update_target_network(self):
# Update target network parameters with polyak averaging
for target_param, local_param in zip(
self.qnetwork_target.parameters(), self.qnetwork_local.parameters()
):
target_param.data.copy_(
self.tau * local_param.data + (1.0 - self.tau) * target_param.data
)
def soft_update(self, local_model, target_model):
for target_param, local_param in zip(
target_model.parameters(), local_model.parameters()
):
target_param.data.copy_(
self.tau * local_param.data + (1.0 - self.tau) * target_param.data
)
import gym
import numpy as np
import matplotlib.pyplot as plt
# Create the environment
env = gym.make("CartPole-v1", render_mode="rgb_array")
env.reset()
# Plot the environment
def plot_environment(env, figsize=(5, 4)):
plt.figure(figsize=figsize)
img = env.render()
plt.imshow(img)
plt.axis("off")
return img
plot_environment(env)
plt.show()
# Get the state and action sizes
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
# Set the random seed
seed = 0
# Create the DDQN agent
agent = DDQNAgent(state_size, action_size, seed)
# Set the number of episodes and the maximum number of steps per episode
num_episodes = 1000
max_steps = 1000
# Set the exploration rate
eps = eps_start = 1.0
eps_end = 0.01
eps_decay = 0.995
# Set the rewards and scores lists
rewards = []
scores = []
# Run the training loop
for i_episode in range(num_episodes):
print(f"Episode: {i_episode}")
# Initialize the environment and the state
state = env.reset()[0]
score = 0
# eps = eps_end + (eps_start - eps_end) * np.exp(-i_episode / eps_decay)
# Update the exploration rate
eps = max(eps_end, eps_decay * eps)
# Run the episode
for t in range(max_steps):
# Select an action and take a step in the environment
action = agent.act(state, eps)
next_state, reward, done, trunc, _ = env.step(action)
# Store the experience in the replay buffer and learn from it
agent.step(state, action, reward, next_state, done)
# Update the state and the score
state = next_state
score += reward
# Break the loop if the episode is done or truncated
if done or trunc:
break
print(f"\tScore: {score}, Epsilon: {eps}")
# Save the rewards and scores
rewards.append(score)
scores.append(np.mean(rewards[-100:]))
# Close the environment
env.close()
plt.ylabel("Score")
plt.xlabel("Episode")
plt.plot(range(len(rewards)), rewards)
plt.plot(range(len(rewards)), scores)
plt.legend(["Reward", "Score"])
plt.show()
|
# Importing Libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# reading data set (Salary_Data.csv) using pandas
df = pd.read_csv("../input/random-salary-data-of-employes-age-wise/Salary_Data.csv")
## Now some Eploratory Data Analysis EDA
# Checking first five rows
df.head()
# Data set information
df.info()
# Data Set statistical information
df.describe()
# isnull() check if there is any null (missing) value
df.isnull()
# summing up all null value if there is any
df.isnull().sum()
# Visualize data with matplotlib
df.plot("YearsExperience", "Salary", kind="scatter")
plt.show()
# Now Splitting Data --- Train and Test Data
X = df["YearsExperience"]
y = df["Salary"]
# importing sklearn and using train_test_split to split data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=100
)
# test_size=0.2 means we select 20% for test data and remaining 80% for train data
# Now check the dimension of train and test data and modify with correct dimension
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
print(X_train.ndim)
print(X_test.ndim)
print(y_train.ndim)
print(y_test.ndim)
# Now we have to convert our data into 2D otherwise Expected 2D array error occurs
X_train = np.X_train
|
# Bristol-Myers Squibb – Molecular Translation
# Exploratory Data Analysis (EDA)
# CREATED BY: DARIEN SCHETTLER
# TABLE OF CONTENTS
# ---
# 0 IMPORTS
# ---
# 1 BACKGROUND INFORMATION
# ---
# 2 SETUP
# ---
# 3 HELPER FUNCTIONS
# ---
# 4 LABEL EXPLORATION
# ---
# 5 IMAGE EXPLORATION
# ---
# 6 SEGMENTATION EXPLORATION
# ---
# 7 SINGLE WEAK-LABEL INDIVIDUAL CELL EXPLPORATION
# ---
# 0 IMPORTS
print("\n... IMPORTS STARTING ...\n")
print("\n\tVERSION INFORMATION")
# Machine Learning and Data Science Imports
import tensorflow as tf
print(f"\t\t– TENSORFLOW VERSION: {tf.__version__}")
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
print(f"\t\t– NUMPY VERSION: {np.__version__}")
# Built In Imports
from collections import Counter
from datetime import datetime
from glob import glob
import warnings
import requests
import imageio
import IPython
import urllib
import zipfile
import pickle
import random
import shutil
import string
import math
import tqdm
import time
import gzip
import ast
import io
import os
import gc
import re
# Visualization Imports
from matplotlib.colors import ListedColormap
import matplotlib.patches as patches
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
import plotly.express as px
import seaborn as sns
from PIL import Image
import matplotlib
print(f"\t\t– MATPLOTLIB VERSION: {matplotlib.__version__}")
import plotly
import PIL
import cv2
print("\n\n... IMPORTS COMPLETE ...\n")
# Stop Tensorflow From Eating All The Memory
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices("GPU")
print(len(gpus), "... Physical GPUs,", len(logical_gpus), "Logical GPUs ...\n")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
# 1 BACKGROUND INFORMATION
# 1.1 THE DATA
# ---
# BACKGROUND INFORMATION
# In this competition, you are provided with images of chemicals, with the objective of predicting the corresponding [**International Chemical Identifier**](https://en.wikipedia.org/wiki/International_Chemical_Identifier) (**InChI**) text string of the image. The images provided (both in the training data as well as the test data) may be rotated to different angles, be at various resolutions, and have different noise levels.
# * Possibilities for augmentation via rotation and noise
# * There are about 4 Million total images in this dataset. Unzipping the downloaded data will take a non-trivial amount of time.
# ***Author's Take:***
# **1. Identify Feature Vector From Image of Chemical Structure**
# **2. Generate the Required String**
# Submissions are evaluated on the mean Levenshtein distance between the InChi strings you submit and the ground truth InChi values.
# For each **image_id** in the test set, **you must predict the InChi string of the molecule in the corresponding image**. The file should contain a header and have the following format:
# ```txt
# image_id,InChI
# 00000d2a601c,InChI=1S/H2O/h1H2
# 00001f7fc849,InChI=1S/H2O/h1H2
# 000037687605,InChI=1S/H2O/h1H2
# etc.
# ```
# DATA FILES & DIRECTORIES
# > **`train/`** - the training images, arranged in a 3-level folder structure by **image_id**
# > **`test/`** - the test images, arranged in the same folder structure as **`train/`**
# > **`train_labels.csv`** - ground truth **InChi** labels for the training images
# > **`sample_submission.csv`** - a sample submission file in the correct format
# 1.2 INTERNATIONAL CHEMICAL IDENTIFIER (InChi)
# [***Most information is from this wikipedia page***](https://en.wikipedia.org/wiki/International_Chemical_Identifier)
# ---
# The IUPAC International Chemical Identifier *(InChI /ˈɪntʃiː/ IN-chee or /ˈɪŋkiː/ ING-kee)* is a textual identifier for chemical substances, designed to provide a standard way to encode molecular information and to facilitate the search for such information in databases and on the web. Initially developed by **IUPAC** (**I**nternational **U**nion of **P**ure and **A**pplied **C**hemistry) and **NIST** (**N**ational **I**nstitute of **S**tandards and **T**echnology) from 2000 to 2005, the format and algorithms are non-proprietary.
# The continuing development of the standard has been supported since 2010 by the not-for-profit **InChI Trust**, of which **IUPAC** is a member. The current software version is 1.06 and was released in December 2020.
# – Prior to 1.04, the software was freely available under the open-source LGPL license.
# – It now uses a custom license called IUPAC-InChI Trust License.
# OVERVIEW
# The identifiers describe chemical substances in terms of **layers of information**
# – The Atoms and Their Bond Connectivity
# – [Tautomeric Information](https://en.wikipedia.org/wiki/Tautomer)
# – [Isotope Information](https://en.wikipedia.org/wiki/Isotope)
# – [Stereochemistry](https://en.wikipedia.org/wiki/Stereochemistry)
# – [Electronic Charge Information](https://en.wikipedia.org/wiki/Electric_charge)
# **NOTE:**
# Not all layers have to be provided; for instance, the tautomer layer can be omitted if that type of information is not relevant to the particular application.
# **InChIs** differ from the widely used CAS registry numbers in three respects:
# – **firstly**, they are freely usable and non-proprietary;
# – **secondly**, they can be computed from structural information and do not have to be assigned by some organization;
# – **thirdly**, most of the information in an **InChI** is human readable (with practice).
# InChIs can thus be seen as akin to a general and extremely formalized version of IUPAC names. They can express more information than the simpler SMILES notation and differ in that every structure has a unique InChI string, which is important in database applications. Information about the 3-dimensional coordinates of atoms is not represented in InChI; for this purpose a format such as PDB can be used.
# The InChI algorithm converts input structural information into a unique InChI identifier in a three-step process: normalization (to remove redundant information), canonicalization (to generate a unique number label for each atom), and serialization (to give a string of characters).
# The InChIKey, sometimes referred to as a hashed InChI, is a fixed length (27 character) condensed digital representation of the InChI that is not human-understandable. The InChIKey specification was released in September 2007 in order to facilitate web searches for chemical compounds, since these were problematic with the full-length InChI.[6] Unlike the InChI, the InChIKey is not unique: though collisions can be calculated to be very rare, they happen.[7]
# In January 2009 the 1.02 version of the InChI software was released. This provided a means to generate so called standard InChI, which does not allow for user selectable options in dealing with the stereochemistry and tautomeric layers of the InChI string. The standard InChIKey is then the hashed version of the standard InChI string. The standard InChI will simplify comparison of InChI strings and keys generated by different groups, and subsequently accessed via diverse sources such as databases and web resources.
# 2 SETUP
# Define the root and data directories
ROOT_DIR = "/kaggle/input"
DATA_DIR = os.path.join(ROOT_DIR, "bms-molecular-translation")
TRAIN_DIR = os.path.join(DATA_DIR, "train")
TEST_DIR = os.path.join(DATA_DIR, "test")
TRAIN_CSV_PATH = os.path.join(DATA_DIR, "train_labels.csv")
SS_CSV_PATH = os.path.join(DATA_DIR, "sample_submission.csv")
train_df = pd.read_csv(TRAIN_CSV_PATH)
train_df["img_path"] = train_df.image_id.apply(
lambda x: os.path.join(TRAIN_DIR, x[0], x[1], x[2], x + ".png")
)
print("\n... TRAIN DATAFRAME W/ PATHS ...\n")
display(train_df)
ss_df = pd.read_csv(SS_CSV_PATH)
print("\n... SUBMISSION DATAFRAME ...\n")
display(ss_df)
|
# # Quick Australian population EDA and Visualisation
# ## 1. Importing libraries and dataset
import os
import numpy as np
import pandas as pd
import plotly.express as px
import numpy as np
import matplotlib.pyplot as plt
df_master = pd.read_csv("/kaggle/input/worldcities-australia/au.csv")
# ## 2. A brief EDA
df_master.info()
# * Only missing data in the "captial" column. This is not a concern for this analysis
df_master.head(10)
# Number of cities in each state and territory (identified by the admin name)
df_master["admin_name"].value_counts()
df_master.sort_values(by="population", ascending=False, inplace=True)
state_territory_list = np.unique(df_master["admin_name"])
populations = []
for st in state_territory_list:
pop = sum(df_master[df_master["admin_name"] == st]["population"])
print(f"Population of all cities in {st} = {pop}\n")
populations.append(pop)
# Create bars
plt.barh(state_territory_list, populations)
# Create names on the x-axis
plt.yticks(state_territory_list, state_territory_list)
# Show graphic
plt.show()
print(df_master[["city", "population"]].head(10))
# Create bars
plt.barh(df_master.loc[0:9, "city"], df_master.loc[0:9, "population"])
# Create names on the x-axis
plt.yticks(df_master.loc[0:9, "city"], df_master.loc[0:9, "city"])
# Show graphic
plt.show()
def summary_stats(df, columns):
for col in columns:
print(f"Column --> {col}")
print(f"Mean --> {df[col].mean()}")
print(f"Median -- {df[col].median()}")
print(f"Standard Deviation --> {df[col].std()}")
print(f"Quantile 0.05 --> {df[col].quantile(0.05)}")
print(f"Quantile 0.25 --> {df[col].quantile(0.25)}")
print(f"Quantile 0.50 --> {df[col].quantile(0.50)}")
print(f"Quantile 0.75 --> {df[col].quantile(0.75)}")
print(f"Quantile 0.95 --> {df[col].quantile(0.95)}")
print("\n\n")
summary_stats(df_master, ["population", "population_proper"])
quantiles = df_master["population"].quantile([0.05, 0.25, 0.5, 0.75, 0.95])
df_master["quantile_id"] = 0
for index, row in df_master.iterrows():
if df_master.at[index, "population"] < quantiles[0.05]:
df_master.at[index, "quantile_id"] = 1
elif (
df_master.at[index, "population"] >= quantiles[0.05]
and df_master.at[index, "population"] < quantiles[0.25]
):
df_master.at[index, "quantile_id"] = 2
elif (
df_master.at[index, "population"] >= quantiles[0.25]
and df_master.at[index, "population"] < quantiles[0.5]
):
df_master.at[index, "quantile_id"] = 3
elif (
df_master.at[index, "population"] >= quantiles[0.5]
and df_master.at[index, "population"] < quantiles[0.75]
):
df_master.at[index, "quantile_id"] = 4
elif (
df_master.at[index, "population"] >= quantiles[0.75]
and df_master.at[index, "population"] < quantiles[0.95]
):
df_master.at[index, "quantile_id"] = 5
else:
df_master.at[index, "quantile_id"] = 6
ax = (df_master[df_master["quantile_id"] == 1]["population"]).plot.box()
ax.set_ylabel("population")
ax.xaxis.set_label_text("fefe")
plt.title("Data < 0.05 quantile\n")
plt.show()
ax = sns.violinplot(
x="quantile_id", y="population", data=(df_master[df_master["quantile_id"] == 1])
)
ax = sns.stripplot(
x="quantile_id",
y="population",
data=(df_master[df_master["quantile_id"] == 1]),
color="red",
jitter=0.2,
size=6,
)
# add title
plt.title("Boxplot with jitter", loc="left")
plt.show()
sns.kdeplot(df_master[df_master["quantile_id"] == 1]["population"], shade=True)
plt.title("Data < 0.05 quantile\n")
plt.show()
# ax = (df_master[df_master['quantile_id'] == 2]['population']).plot.box()
import seaborn as sns
# density plot with shade
sns.kdeplot(df_master[df_master["quantile_id"] == 2]["population"], shade=True)
ax = (df_master[df_master["quantile_id"] == 3]["population"]).plot.box()
# sns.kdeplot(df_master[df_master['quantile_id'] == 3]['population'], shade=True)
# ax = (df_master[df_master['quantile_id'] == 4]['population']).plot.box()
sns.kdeplot(df_master[df_master["quantile_id"] == 4]["population"], shade=True)
# ax = (df_master[df_master['quantile_id'] == 5]['population']).plot.box()
sns.kdeplot(df_master[df_master["quantile_id"] == 5]["population"], shade=True)
# ax = (df_master[df_master['quantile_id'] == 6]['population']).plot.box()
sns.kdeplot(df_master[df_master["quantile_id"] == 6]["population"], shade=True)
# Geospatial visualisation of the cities in the dataset
fig = px.scatter_geo(
df_master,
lat=df_master["lat"],
lon=df_master["lng"],
color="quantile_id", # which column to use to set the color of markers
hover_name="city", # column added to hover information,
hover_data=["population"],
size="quantile_id", # size of markers
projection="natural earth",
)
fig.show()
|
# ### Bitte denken Sie vor der Abgabe des Links daran, Ihr Notebook mit Klick auf den Button "Save Version" (oben rechts) zu speichern.
# Bearbeitungshinweis: Sie sind frei in der Art und Weise, wie Sie die Aufgaben lösen. Sie können z.B. auch weitere Code-Blöcke einfügen. Wenn Sie nicht weiterkommen, fragen Sie im Forum oder konsultieren Sie die üblichen Quellen (Google, Stackoverflow, ChatGPT)
import pandas as pd
# Importieren des zu bereinigenden Datensatzes. Er enthält Informationen zu Büchern der Nationalbibliothek des Vereinigten Königreichs ("British Library").
df = pd.read_csv(
"https://raw.githubusercontent.com/realpython/python-data-cleaning/master/Datasets/BL-Flickr-Images-Book.csv"
)
# Zeigen Sie, wie viele Beobachtungen und wie viele Variablen der Datensatz enthält.
data = pd.read_csv(
"https://raw.githubusercontent.com/realpython/python-data-cleaning/master/Datasets/BL-Flickr-Images-Book.csv"
)
n_obs = len(data)
n_vars = len(data.columns)
print("Der Datensatz enthält", n_obs, "Beobachtungen und", n_vars, "Variablen.")
# Löschen Sie alle Variablen, für die für mehr als 50% der Beobachtungen keine Informationen vorliegen.
# Berechne den Prozentsatz fehlender Werte pro Variable
fehlende_werte_prozent = df.isnull().mean() * 100
# Wähle die Variablen aus, für die mehr als 50% der Beobachtungen fehlen
variablen_mit_zu_vielen_fehlenden_werten = fehlende_werte_prozent[
fehlende_werte_prozent > 50
].index
# Lösche die Variablen aus dem DataFrame
df.drop(variablen_mit_zu_vielen_fehlenden_werten, axis=1, inplace=True)
# Gib eine Bestätigung aus
print("Folgende Variablen wurden gelöscht, da mehr als 50% der Beobachtungen fehlen:")
print(variablen_mit_zu_vielen_fehlenden_werten)
# Optional: Speichere das bereinigte DataFrame in eine neue Datei
df.to_csv("dein_bereinigter_datensatz.csv", index=False)
# Ersetze 'dein_bereinigter_datensatz.csv' mit dem gewünschten Dateinamen
df.head(5)
# Wählen Sie die Variable "identifier" zum Labelindex für Ihre Beobachtungen.
df.set_index("Identifier", inplace=True)
df.head(5)
# Prüfen Sie, ob der gewählte Index die Beobachtungen eindeutig identifiziert, ober ob Duplikate bezüglich des "identifier" vorliegen.
duplicates = df.index.duplicated()
if duplicates.any():
print("Der Index identifiziert nicht alle Beobachtungen eindeutig.")
else:
print("Der Index identifiziert alle Beobachtungen eindeutig.")
pd.set_option("display.max_rows", None)
df[df.index.duplicated(keep=False)]
# Das Publikationsjahr spielt für Ihre Analyse eine wichtige Rolle. Löschen Sie daher alle Beobachtungen, für die kein Publikationsjahr vorliegt ("NaN").
df.dropna(subset=["Date of Publication"], inplace=True)
print("Alle Beobachtungen mit fehlendem Publikationsjahr wurden gelöscht.")
print("Aktueller DataFrame:")
print(df)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(os.path.join(dirname, filename))
df.head().T
df.info()
categorical_cols = [
"checking_status",
"credit_history",
"purpose",
"savings_status",
"employment",
"personal_status",
"other_parties",
"property_magnitude",
"other_payment_plans",
"housing",
"job",
"own_telephone",
"foreign_worker",
"class",
]
numerical_cols = [
"duration",
"credit_amount",
"installment_commitment",
"residence_since",
"age",
"existing_credits",
"num_dependents",
]
# View all Categorical Columns in the dataset
for i in categorical_cols:
print(f"{i} : {df[i].unique()}\n")
# Handling ordinal categories
from sklearn.preprocessing import LabelEncoder
cat_df = pd.DataFrame()
le = LabelEncoder()
checking_status_le = le.fit(["no checking", "<0", "0<=X<200", ">=200"])
cat_df["checking_status"] = checking_status_le.transform(df.checking_status)
credit_history_le = le.fit(
[
"critical/other existing credit",
"no credits/all paid",
"delayed previously",
"existing paid",
"all paid",
]
)
cat_df["credit_history"] = credit_history_le.transform(df["credit_history"])
saving_status_le = le.fit(
["no known savings", "<100", "100<=X<500", "500<=X<1000", ">=1000"]
)
cat_df["savings_status"] = saving_status_le.transform(df["savings_status"])
employment_le = le.fit(["unemployed", "<1", "1<=X<4", "4<=X<7", ">=7"])
cat_df["employment"] = employment_le.transform(df["employment"])
employment_le = le.fit(
[
"unemp/unskilled non res",
"unskilled resident",
"skilled",
"high qualif/self emp/mgmt",
]
)
cat_df["job"] = employment_le.transform(df["job"])
# Handling Nominal Data
dummy_col = [
"own_telephone",
"purpose",
"foreign_worker",
"class",
"other_parties",
"property_magnitude",
"other_payment_plans",
"housing",
]
dummy_df = pd.get_dummies(df[dummy_col])
dummy_col_to_include = [
"own_telephone_yes",
"purpose_business",
"purpose_domestic appliance",
"purpose_education",
"purpose_furniture/equipment",
"purpose_new car",
"purpose_other",
"purpose_radio/tv",
"purpose_repairs",
"purpose_retraining",
"purpose_used car",
"foreign_worker_yes",
"other_parties_none",
"property_magnitude_car",
"property_magnitude_life insurance",
"property_magnitude_no known property",
"property_magnitude_real estate",
"other_payment_plans_bank",
"other_payment_plans_none",
"other_payment_plans_stores",
"housing_for free",
"housing_own",
"housing_rent",
"class_good",
]
dummy_df = dummy_df[dummy_col_to_include]
# Personal_status columns has mix of gender and status, however status are ambigious e.g. div/dep/mar. Single status is the only identifiable feature.
gender_df, status_df = [], []
for num, row in df.iterrows():
gender, status = row.personal_status.split(" ")
gender_df.append(gender)
status_df.append(status)
d = {"gender": gender_df, "status": status_df}
gender_status_df = pd.DataFrame(data=d)
gender_status_df = pd.get_dummies(gender_status_df)
gender_status_df = gender_status_df[
["gender_male", "status_single"]
] # if male = 1 then female = 0 and vice versa.
cat_df[["gender_male", "status_single"]] = gender_status_df
cat_df[dummy_col_to_include] = dummy_df
import seaborn as sns
import matplotlib.pyplot as plt
ax = plt.figure(figsize=(10, 10))
ax = sns.heatmap(cat_df.corr())
|
# Importing the required libraries
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
# Reading the dataset
data = pd.read_csv("train.csv")
data2 = pd.read_csv("test.csv")
data3 = pd.read_csv("additional_data.csv")
# Splitting the dataset into training and testing sets
X1 = data.drop(["Unnamed: 0", "id", "price"], axis=1)
X2 = data3.drop(["Unnamed: 0", "id", "price"], axis=1)
y1 = data["price"]
y2 = data3["price"]
X = pd.concat([X1, X2])
y = pd.concat([y1, y2])
for column in X:
avg_value = X[column].mean()
X[column] = X[column].fillna(avg_value)
print(X.shape, y.shape)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=42
)
X_train = X
y_train = y
# Define the model architecture
model = Sequential()
model.add(Dense(26, input_dim=26, activation="relu"))
model.add(Dense(104, activation="relu"))
model.add(Dense(52, activation="relu"))
model.add(Dense(26, activation="relu"))
model.add(Dense(1, activation="linear"))
# Compile the model
model.compile(loss="mean_squared_error", optimizer=Adam(lr=0.0001))
# Train the model
history = model.fit(
X_train, y_train, epochs=100, batch_size=128, validation_data=(X_test, y_test)
)
X_test = data2.drop(["Unnamed: 0", "id"], axis=1)
# Predict on test set
y_pred = model.predict(X_test)
y_pred
y_test
# Evaluating the model using Mean Squared Error
mse = mean_squared_error(y_test, y_pred)
print("Mean Squared Error:", mse**0.5)
import pandas as pd
# read the CSV file into a DataFrame
df = pd.read_csv("sample_submission.csv")
df["price"] = y_pred
df
# save the changes to the CSV file
df.to_csv("example7.csv", index=False)
|
# # MNIST Improved Model
# In this notebook, we create an improved (from our baseline) model for handwritten digit recognition on the MNIST data set.
# ### Import packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
# ### Load data
train_data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
X_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
X_train = train_data.drop("label", axis=1)
y_train = train_data["label"].values
X_train = X_train / 255.0
X_test = X_test / 255.0
X_train = X_train.values.reshape(-1, 28, 28)
X_test = X_test.values.reshape(-1, 28, 28)
model = tf.keras.models.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10),
tf.keras.layers.Softmax(),
]
)
model.compile(
optimizer=tf.keras.optimizers.legacy.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
model.fit(X_train, y_train, epochs=100)
model_pred = model(X_test)
y_pred = [np.argmax(pred) for pred in model_pred]
submission = pd.DataFrame({"Label": y_pred})
submission.index += 1
submission
submission.to_csv("submission.csv", index=True, index_label="ImageId")
|
import scipy.io
import numpy as np
import math
from progressbar import ProgressBar
data = scipy.io.loadmat("/kaggle/input/admlhw4/data1.mat")
X_train = np.asarray(data["TrainingX"])
X_test = np.asarray(data["TestX"])
Y_train = np.asarray(data["TrainingY"])
Y_test = np.asarray(data["TestY"])
def find_sigma(X):
p_bar = ProgressBar()
sigma = 0
for i in p_bar(range(len(X))):
for j in range(len(X)):
sigma += np.linalg.norm(X[i] - X[j]) ** 2
sigma = sigma / (len(X) ** 2)
return sigma
# Define RBF kernel
def rbf_kernel(x1, x2, sigma):
return np.exp(-np.linalg.norm(x1 - x2) ** 2 / (2 * sigma**2))
# Define train empirical kernel map function
def train_empirical_kernel_map(X, sigma):
p_bar = ProgressBar()
N = len(X)
K = np.zeros((N, N))
for i in p_bar(range(N)):
for j in range(i, N):
K[i, j] = rbf_kernel(X[i], X[j], sigma)
K[j, i] = K[i, j]
return K
# Define test empirical kernel map function
def test_empirical_kernel_map(X_test, X_train, sigma):
p_bar = ProgressBar()
K = np.zeros((len(X_test), len(X_train)))
for i in p_bar(range(len(X_test))):
for j in range(len(X_train)):
K[i, j] = rbf_kernel(X_test[i], X_train[j], sigma)
return K
find_sigma(X_train)
find_sigma(X_test)
SIGMA_TRAIN = math.sqrt(103.46796422816307)
SIGMA_TEST = math.sqrt(102.8186545744530)
K_train = train_empirical_kernel_map(X_train, SIGMA_TRAIN)
np.save("k_train.npy", K_train)
K_test = test_empirical_kernel_map(X_test, X_train, SIGMA_TEST)
np.save("k_test.npy", K_test)
K_TRAIN = np.load("/kaggle/input/admlhw4/k_train.npy")
K_TEST = np.load("/kaggle/input/admlhw4/k_test.npy")
# Define sigmoid function
def sigmoid(z):
return 1 / (1 + np.exp(-np.clip(z, -100, 100)))
# Define loss function
def loss(w, y, K, lamb):
z = np.multiply(y, np.dot(K, w))
return -np.sum(np.log(sigmoid(z))) + lamb * np.dot(w.T, w)
# Define gradient of loss function
def gradient(w, y, K, lamb):
z = np.multiply(y, np.dot(K, w))
return -np.dot(K.T, np.multiply(y, (1 - sigmoid(z)))) + 2 * lamb * w
# Define training function
def train(K, y, sigma, lamb, lr):
N, D = K.shape
losses = []
train_accs = []
test_accs = []
best_test_acc = 0
best_w = np.zeros((N, 1))
w = np.zeros((N, 1))
y = y.reshape(N, 1)
tolerance = 1e-5
max_iter = 1000
for epoch in range(max_iter):
prev_w = np.copy(w)
grad = gradient(w, y, K, lamb)
w -= lr * grad
train_loss = loss(w, y, K, lamb)
train_preds = predict(K, w)
train_acc = np.mean(train_preds == y)
test_preds = predict(K_TEST, w)
test_acc = np.mean(test_preds == Y_test.reshape(1000, 1))
if test_acc > best_test_acc:
best_test_acc = test_acc
best_w = np.copy(w)
losses.append(train_loss)
train_accs.append(train_acc)
test_accs.append(test_acc)
print(
f"Epoch {epoch}: Train Loss = {train_loss[0][0]} \t Train Accuracy = {train_acc} \t Test Accuracy = {test_acc} \t Best Test Accuracy = {best_test_acc}"
)
if np.linalg.norm(grad) < tolerance:
print(f"Converged at Epoch {epoch+1}")
break
return best_w
# Define test function
def predict(K, w):
y_pred = sigmoid(np.dot(K, w))
y_pred[y_pred > 0.5] = 1
y_pred[y_pred <= 0.5] = -1
return y_pred
# Train the model
w = train(K_TRAIN, Y_train, SIGMA_TRAIN, lamb=1e-3, lr=1e-3)
# Test the model and report accuracy
y_pred = predict(K_TEST, w)
accuracy = np.mean(y_pred == Y_test)
print(f"Accuracy = {accuracy * 100}%")
|
from keras.preprocessing.image import img_to_array, load_img
import numpy as np
import glob
import os
from skimage import data, color
from skimage.transform import rescale, resize, downscale_local_mean
import argparse
from PIL import Image
from keras.callbacks import ModelCheckpoint, EarlyStopping
import numpy as np
import argparse
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow import keras
from keras.layers import Conv3D, ConvLSTM2D, Conv3DTranspose
from keras.models import Sequential
from matplotlib import pyplot as plt
from PIL import Image
from sklearn import preprocessing as pre
imagestore = []
video_source_path = "/kaggle/input/avenue-dataset/Avenue Dataset/training_videos"
fps = 3
# fps refers to the number of seconds after which one frame will be taken . fps=5 means 1 frame after every 5 seconds. More like seconds per frame.
def create_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def remove_old_images(path):
filelist = glob.glob(os.path.join(path, "*.jpg"))
for f in filelist:
os.remove(f)
def store(image_path):
img_rgb = Image.open(image_path)
img_gray = img_rgb.convert("L")
img = np.array(img_gray)
img = resize(img, (256, 256))
imagestore.append(img)
# List of all Videos in the Source Directory.
videos = os.listdir(video_source_path)
print("Found ", len(videos), " training videos")
# Make a temp dir to store all the frames
create_dir("./frames")
# Remove old images
remove_old_images("./frames")
framepath = "./frames"
for video in videos:
os.system(
"ffmpeg -i '{}/{}' -vf fps={} ./frames/%04d.jpg -loglevel quiet".format(
video_source_path, video, fps
)
)
images = os.listdir(framepath)
for image in images:
image_path = framepath + "/" + image
store(image_path)
remove_old_images("./frames")
imagestore = np.array(imagestore)
# Reshape to (227,227,batch_size)
imagestore = np.transpose(imagestore, (1, 2, 0))
# Normalize
# imagestore=(imagestore-imagestore.mean())/(imagestore.std())
# imagestore=(imagestore-np.min(imagestore))/(np.max(imagestore)-np.min(imagestore))
# imagestore=imagestore/255.0
# Clip negative Values
# imagestore=np.clip(imagestore,0,1)
plt.imshow(imagestore[:, :, 0])
np.save("trainer.npy", imagestore)
# Remove Buffer Directory
os.system("rm -r {}".format(framepath))
print("Program ended. Please wait while trainer.npy is created. \nRefresh when needed")
print("Number of frames created :", int(len(imagestore)))
import cv2
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from PIL import Image
from skimage import data, color
# Load YOLOv3 model
yolo_model = cv2.dnn.readNetFromDarknet(
"/kaggle/input/yolov3/yolov3rgb.cfg", "/kaggle/input/yolov3/yolov3.weights"
)
# Set input and output layer names
layer_names = yolo_model.getLayerNames()
# Getting only output layers' names that we need from YOLO algorithm
output_layers = [layer_names[i - 1] for i in yolo_model.getUnconnectedOutLayers()]
# Load image
I = imagestore[:, :, 200:202]
I = np.transpose(I, (2, 0, 1))
# Resize image for better processing
# image = cv2.resize(image, None, fx=0.4, fy=0.4)
def yolo(I):
image = I.astype(np.float32)
image = np.stack((image,) * 3, axis=-1)
print(image.shape)
# Convert image to blob format
blob = cv2.dnn.blobFromImages(image, 1, (416, 416), swapRB=True, crop=False)
# Pass blob through the network to detect objects
yolo_model.setInput(blob)
layer_outputs = yolo_model.forward(output_layers)
# Get bounding box coordinates and confidence scores for each detection
images = []
boxes = []
confidences = []
class_ids = []
group = [
{"boxes": [], "confidences": [], "class_ids": []} for i in range(image.shape[0])
]
for output in layer_outputs:
for i, out in enumerate(output):
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
# print(detection)
if class_id == 0 and confidence > 0.5: # 0 is the class ID for person
# Get bounding box coordinates
center_x = int(detection[0] * image[i].shape[1])
center_y = int(detection[1] * image[i].shape[0])
width = int(detection[2] * image[i].shape[1])
height = int(detection[3] * image[i].shape[0])
left = int(center_x - width / 2)
top = int(center_y - height / 2)
# Save bounding box coordinates and confidence score
group[i]["boxes"].append([left, top, width, height])
group[i]["confidences"].append(float(confidence))
group[i]["class_ids"].append(class_id)
# Apply non-maximum suppression to eliminate overlapping bounding boxes
for j, d in enumerate(group):
b = d["boxes"]
c = d["confidences"]
indices = cv2.dnn.NMSBoxes(b, c, 0.5, 0.4)
# Create a figure and add the image and bounding box to it
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(111, aspect="equal")
# ax = fig.add_subplot(111)
ax.imshow(image[j])
ax.axis("off")
# Draw bounding box around detected person
for i in indices:
left, top, width, height = b[i]
right = left + width
bottom = top + height
ax.add_patch(
plt.Rectangle(
(left, top), width, height, fill=False, edgecolor="red", linewidth=2
)
)
# Remove padding and save the figure as a numpy array
plt.tight_layout(pad=0)
ax.margins(0)
canvas = plt.gca().figure.canvas
canvas.draw()
w, h = fig.canvas.get_width_height()
image_np = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(
(h, w, 3)
)
# plt.show()
plt.close()
images.append(image_np)
return np.array(images)
img = yolo(I) # INPUT DIM: (256x256)
img.shape
# DATA
bunch_size = 10
AUTO = tf.data.AUTOTUNE
INPUT_SHAPE = (256, 256, bunch_size, 1)
# OPTIMIZER
LEARNING_RATE = 1e-4
WEIGHT_DECAY = 1e-5
# TRAINING
EPOCHS = 60
# TUBELET EMBEDDING
PATCH_SIZE = (4, 4, 4)
NUM_PATCHES = (INPUT_SHAPE[0] // PATCH_SIZE[0]) ** 2
# ViViT ARCHITECTURE
LAYER_NORM_EPS = 1e-6
PROJECTION_DIM = 2
NUM_HEADS = 2
NUM_LAYERS = 2
NUM_CLASSES = 4 * 4 * bunch_size * 32
class TubeletEmbedding(layers.Layer):
def __init__(self, embed_dim, patch_size, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.patch_size = patch_size
self.projection = layers.Conv3D(
filters=self.embed_dim,
kernel_size=self.patch_size,
strides=self.patch_size,
padding="VALID",
)
self.flatten = layers.Reshape(target_shape=(-1, self.embed_dim))
def get_config(self):
config = super().get_config().copy()
config.update(
{
"embed_dim": self.embed_dim,
"patch_size": self.patch_size,
}
)
return config
def call(self, videos):
projected_patches = self.projection(videos)
flattened_patches = self.flatten(projected_patches)
return flattened_patches
class PositionalEncoder(layers.Layer):
def __init__(self, embed_dim, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
def get_config(self):
config = super().get_config().copy()
config.update(
{
"embed_dim": self.embed_dim,
}
)
return config
def build(self, input_shape):
_, num_tokens, _ = input_shape
self.position_embedding = layers.Embedding(
input_dim=num_tokens, output_dim=self.embed_dim
)
self.positions = tf.range(start=0, limit=num_tokens, delta=1)
def call(self, encoded_tokens):
# Encode the positions and add it to the encoded tokens
encoded_positions = self.position_embedding(self.positions)
encoded_tokens = encoded_tokens + encoded_positions
return encoded_tokens
class ViViT(keras.layers.Layer):
def __init__(
self,
tubelet_embedder,
positional_encoder,
transformer_layers=NUM_LAYERS,
num_heads=NUM_HEADS,
embed_dim=PROJECTION_DIM,
layer_norm_eps=LAYER_NORM_EPS,
num_classes=NUM_CLASSES,
**kwargs
):
super().__init__(**kwargs)
self.tubelet_embedder = tubelet_embedder
self.positional_encoder = positional_encoder
self.transformer_layers = transformer_layers
self.num_heads = num_heads
self.embed_dim = embed_dim
self.layer_norm_eps = layer_norm_eps
self.num_classes = num_classes
def get_config(self):
config = super().get_config().copy()
config.update(
{
"tubelet_embedder": self.tubelet_embedder,
"positional_encoder": self.positional_encoder,
"transformer_layers": self.transformer_layers,
"num_heads": self.num_heads,
"embed_dim": self.embed_dim,
"layer_norm_eps": self.layer_norm_eps,
"num_classes": self.num_classes,
}
)
return config
def build(self, inputs):
self.dense = layers.Dense(units=self.num_classes, activation="sigmoid")
self.layerNorm2 = layers.LayerNormalization(epsilon=self.layer_norm_eps)
self.layerNorm1 = layers.LayerNormalization(epsilon=1e-6)
self.pool = layers.GlobalAvgPool1D()
self.sequential = keras.Sequential(
[
layers.Dense(units=self.embed_dim * 4, activation=tf.nn.gelu),
layers.Dense(units=self.embed_dim, activation=tf.nn.gelu),
]
)
self.mha = layers.MultiHeadAttention(
num_heads=self.num_heads,
key_dim=self.embed_dim // self.num_heads,
dropout=0.1,
)
self.reshape = layers.Reshape(target_shape=(4, 4, bunch_size, 32))
def call(self, inputs):
# Create patches.
patches = self.tubelet_embedder(inputs)
# Encode patches.
encoded_patches = self.positional_encoder(patches)
# Create multiple layers of the Transformer block.
for _ in range(self.transformer_layers):
# Layer normalization and MHSA
x1 = self.layerNorm1(encoded_patches)
attention_output = self.mha(x1, x1)
# Skip connection
x2 = layers.Add()([attention_output, encoded_patches])
# Layer Normalization and MLP
x3 = self.layerNorm1(x2)
x3 = self.sequential(x3)
# Skip connection
encoded_patches = layers.Add()([x3, x2])
# Layer normalization and Global average pooling.
representation = self.layerNorm2(encoded_patches)
representation = self.pool(representation)
# Classify outputs.
outputs = self.dense(representation)
outputs = self.reshape(outputs)
return outputs
""" The following load_model function code has been taken from
Abnormal Event Detection in Videos using Spatiotemporal Autoencoder
by Yong Shean Chong Yong Haur Tay
Lee Kong Chian Faculty of Engineering Science, Universiti Tunku Abdul Rahman, 43000 Kajang, Malaysia.
It's main purpose is to help us generate the anomaly detector model
"""
# load_model starts here :----------------------------------------------------
def load_model():
"""
Return the model used for abnormal event
detection in videos using spatiotemporal autoencoder
"""
model = Sequential()
model.add(
Conv3D(
filters=8,
kernel_size=(3, 3, 1),
strides=(2, 2, 1),
padding="same",
input_shape=(256, 256, bunch_size, 1),
activation="relu",
)
)
model.add(
Conv3D(
filters=16,
kernel_size=(5, 5, 1),
strides=(2, 2, 1),
padding="same",
activation="relu",
)
)
model.add(
Conv3D(
filters=32,
kernel_size=(7, 7, 1),
strides=(4, 4, 1),
padding="same",
activation="relu",
)
)
model.add(
ViViT(
tubelet_embedder=TubeletEmbedding(
embed_dim=PROJECTION_DIM, patch_size=PATCH_SIZE
),
positional_encoder=PositionalEncoder(embed_dim=PROJECTION_DIM),
)
)
model.add(
Conv3DTranspose(
filters=32,
kernel_size=(7, 7, 1),
strides=(4, 4, 1),
padding="same",
activation="relu",
)
)
model.add(
Conv3DTranspose(
filters=16,
kernel_size=(5, 5, 1),
strides=(4, 4, 1),
padding="same",
activation="relu",
)
)
model.add(
Conv3DTranspose(
filters=8,
kernel_size=(3, 3, 1),
strides=(2, 2, 1),
padding="same",
activation="relu",
)
)
model.add(
Conv3DTranspose(
filters=1,
kernel_size=(3, 3, 1),
strides=(2, 2, 1),
padding="same",
activation="sigmoid",
)
)
model.compile(optimizer="adam", loss="mean_squared_error", metrics=["accuracy"])
return model
# load_model ends here :----------------------------------------------------
X_train = np.load("trainer.npy")
# X_train=(X_train-X_train.mean())/(X_train.std())
frames = X_train.shape[2]
# Need to make number of frames divisible by 10 to ease the load_model
plt.imshow(X_train[:, :, 0])
frames = frames - frames % bunch_size
print(X_train.shape)
X_train = X_train[:, :, :frames]
X_train = X_train.reshape(256, 256, -1, bunch_size)
X_train = X_train.transpose((2, 0, 1, 3))
X_train = np.expand_dims(X_train, axis=4)
Y_train = X_train.copy()
print(X_train.shape)
plt.imshow(X_train[0, :, :, 0, 0])
epochs = 100
batch_size = 1
if __name__ == "__main__":
model = load_model()
model.summary()
config = model.get_config()
callback_save = ModelCheckpoint("AnomalyDetector.h5", monitor="mean_squared_error")
callback_early_stopping = EarlyStopping(monitor="val_loss", patience=3)
print("Trainer has been loaded")
model.fit(
X_train,
Y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=[callback_save, callback_early_stopping],
)
print(
"Done\n Please wait while AnomalyDetector.h5 has been created \nRefresh when needed"
)
a = model.predict(X_train[:21])
plt.imshow(X_train[20, :, :, 10 % bunch_size, 0])
plt.imshow(a[20, :, :, 10 % bunch_size, 0])
from keras.preprocessing.image import img_to_array, load_img
import numpy as np
import glob
import os
from skimage import data, color
from skimage.transform import rescale, resize, downscale_local_mean
import argparse
from PIL import Image
imagestore = []
video_source_path = "/kaggle/input/avenue-dataset/Avenue Dataset/testing_videos"
# fps refers to the number of seconds after which one frame will be taken . fps=5 means 1 frame after every 5 seconds. More like seconds per frame.
def create_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def remove_old_images(path):
filelist = glob.glob(os.path.join(path, "*.jpg"))
for f in filelist:
os.remove(f)
def store(image_path):
img_rgb = Image.open(image_path)
img_gray = img_rgb.convert("L")
img = np.array(img_gray)
img = resize(img, (256, 256))
imagestore.append(img)
# List of all Videos in the Source Directory.
videos = os.listdir(video_source_path)
print("Found ", len(videos), " testing videos")
# Make a temp dir to store all the frames
create_dir("./frames")
# Remove old images
remove_old_images("./frames")
framepath = "./frames"
total = 0
video_count = 0
for video in videos:
video_count += 1
print("Video number: ", video_count)
print("Video:", str(video))
image_count = 0
os.system(
"ffmpeg -i '{}/{}' -vf fps={} ./frames/%04d.jpg -loglevel quiet".format(
video_source_path, video, fps
)
)
images = os.listdir(framepath)
image_count = len(images)
for image in images:
image_path = framepath + "/" + image
store(image_path)
total = len(images) + total
print("Number of images:", image_count, "\n----------\n")
remove_old_images("./frames")
imagestore = np.array(imagestore)
imagestore = np.transpose(imagestore, (1, 2, 0))
# Normalize
# imagestore=(imagestore-imagestore.mean())/(imagestore.std())
# imagestore=(imagestore-np.min(imagestore))/(np.max(imagestore)-np.min(imagestore))
# Clip negative Values
# imagestore=np.clip(imagestore,0,1)
np.save("./tester.npy", imagestore)
# Remove Buffer Directory
os.system("rm -r {}".format(framepath))
print(
"Program ended. All testing videos shall be stored in tester.npy \n Please wait while tester.npy is created. \nRefresh when needed"
)
print("Number of frames created :", int(total))
print("Number of bunches=", int(total), "/10 = ", int(total / 10))
print("\nCorrupted and unreadable bunches were ignored")
from keras.models import load_model
import numpy as np
def mean_squared_loss(x1, x2):
"""Compute Euclidean Distance Loss between
input frame and the reconstructed frame"""
diff = x1 - x2
a, b, c, d, e = diff.shape
n_samples = a * b * c * d * e
sq_diff = diff**2
Sum = sq_diff.sum()
dist = np.sqrt(Sum)
mean_dist = dist / n_samples
return mean_dist
"""Define threshold for Sensitivity
Lower the Threshhold,higher the chances that a bunch of frames will be flagged as Anomalous.
"""
# threshold=0.0004 #(Accuracy level 1)
# threshold=0.00042 #(Accuracy level 2)
threshold = 0.0008 # (Accuracy level Vishakha)
threshold = 0.0002
model = load_model(
"./AnomalyDetector.h5",
custom_objects={
"ViViT": ViViT,
"TubeletEmbedding": TubeletEmbedding,
"PositionalEncoder": PositionalEncoder,
},
)
X_test = np.load("./tester.npy")
frames = X_test.shape[2]
# Need to make number of frames divisible by 10
flag = 0 # Overall video flagq
frames = frames - frames % bunch_size
X_test = X_test[:, :, :frames]
X_test = X_test.reshape(256, 256, -1, bunch_size)
X_test = X_test.transpose((2, 0, 1, 3))
X_test = np.expand_dims(X_test, axis=4)
X_test.shape
import numpy as np
import io
import base64
import imageio
from IPython.display import display
def create_gif(bunch):
# create a list of numpy arrays, each representing a single frame
frames = [bunch[i, :, :, :] for i in range(bunch.shape[0])]
# create a buffer to store the gif file
gif_buffer = io.BytesIO()
# set the frame rate to 5 frames per second
frame_rate = 5
# calculate the duration of each frame based on the frame rate
frame_duration = 1.0 / frame_rate
# save the frames as a gif with the desired frame rate and duration
imageio.mimsave(gif_buffer, frames, format="gif", duration=frame_duration)
# encode the gif file in base64
gif_bytes = base64.b64encode(gif_buffer.getvalue()).decode("ascii")
# display the gif file in the notebook
display({"image/gif": gif_bytes}, raw=True)
create_gif(yolo(np.transpose(X_train[20, :, :, :, 0], (2, 0, 1))))
counter = 0
threshold = 0.00013
for number, bunch in enumerate(X_test):
n_bunch = np.expand_dims(bunch, axis=0)
reconstructed_bunch = model.predict(n_bunch)
loss = mean_squared_loss(n_bunch, reconstructed_bunch)
if loss > threshold:
print("Anomalous bunch of frames at bunch number {}".format(number))
counter = counter + 1
print("bunch number: ", counter)
flag = 1
create_gif(yolo(np.transpose(bunch[:, :, :, 0], (2, 0, 1))))
else:
print("No anomaly")
counter = counter + 1
print("bunch number: ", counter)
if flag == 1:
print("Anomalous Events detected")
else:
print("No anomaly detected")
print("\nCorrupted and unreadable bunches were ignored")
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Input,
Conv2D,
MaxPooling2D,
Dropout,
concatenate,
Conv2DTranspose,
UpSampling2D,
Dropout,
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
from tensorflow.keras.metrics import MeanIoU
from tensorflow.keras.utils import plot_model
from tensorflow.keras.metrics import Precision, Recall
import os
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
# # Brain Scanse Segmentation
# ## Reading Data
train_dir = "/kaggle/input/brain-and-breast-scans/Dataset/Brain scans/Tumor/TRAIN"
train_mask_dir = (
"/kaggle/input/brain-and-breast-scans/Dataset/Brain scans/Tumor/TRAIN_masks"
)
test_dir = "/kaggle/input/brain-and-breast-scans/Dataset/Brain scans/Tumor/TEST"
test_mask_dir = (
"/kaggle/input/brain-and-breast-scans/Dataset/Brain scans/Tumor/TEST_masks"
)
def read_resize_img(img_path, img_size):
img = cv2.imread(img_path)
img = cv2.resize(img, img_size)
return img
def load_image(img_dir, mask_dir):
X = []
y = []
for img_name in tqdm(os.listdir(img_dir)):
img_path = os.path.join(img_dir, img_name)
mask_path = os.path.join(mask_dir, img_name.replace(".jpg", ".png"))
img = read_resize_img(img_path, (128, 128))[:, :, 0]
mask = read_resize_img(mask_path, (128, 128))[:, :, 0]
X.append(img)
y.append(mask)
return np.array(X).reshape(-1, 128, 128, 1), np.array(y) / 255
X_train, y_train = load_image(train_dir, train_mask_dir)
X_test, y_test = load_image(test_dir, test_mask_dir)
# ## Data Visualization
for i in range(5):
fig, ax = plt.subplots(1, 3, figsize=(10, 8))
cmap = ListedColormap(["black", "red"])
ax[0].imshow(X_train[i], cmap="gray")
ax[0].set_title("Original Scan")
ax[1].imshow(y_train[i], cmap="gray")
ax[1].set_title("Actual Mask")
ax[2].imshow(X_train[i], cmap="gray")
ax[2].imshow(y_train[i], alpha=0.5, cmap=cmap)
ax[2].set_title("Scan with Actual Mask")
plt.tight_layout()
plt.show()
# ## Modelling
def UNET(input_shape):
# Define the U-Net model
inputs = Input(input_shape)
# Downsample path
conv1 = Conv2D(64, 3, activation="relu", padding="same")(inputs)
conv1 = Conv2D(64, 3, activation="relu", padding="same")(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation="relu", padding="same")(pool1)
conv2 = Conv2D(128, 3, activation="relu", padding="same")(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation="relu", padding="same")(pool2)
conv3 = Conv2D(256, 3, activation="relu", padding="same")(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation="relu", padding="same")(pool3)
conv4 = Conv2D(512, 3, activation="relu", padding="same")(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
# Bottleneck
conv5 = Conv2D(1024, 3, activation="relu", padding="same")(pool4)
conv5 = Conv2D(1024, 3, activation="relu", padding="same")(conv5)
drop5 = Dropout(0.5)(conv5)
# Upsample path
up6 = UpSampling2D(size=(2, 2))(drop5)
up6 = Conv2D(512, 2, activation="relu", padding="same")(up6)
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(512, 3, activation="relu", padding="same")(merge6)
conv6 = Conv2D(512, 3, activation="relu", padding="same")(conv6)
up7 = UpSampling2D(size=(2, 2))(conv6)
up7 = Conv2D(256, 2, activation="relu", padding="same")(up7)
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256, 3, activation="relu", padding="same")(merge7)
conv7 = Conv2D(256, 3, activation="relu", padding="same")(conv7)
up8 = UpSampling2D(size=(2, 2))(conv7)
up8 = Conv2D(128, 2, activation="relu", padding="same")(up8)
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128, 3, activation="relu", padding="same")(merge8)
conv8 = Conv2D(128, 3, activation="relu", padding="same")(conv8)
up9 = UpSampling2D(size=(2, 2))(conv8)
up9 = Conv2D(64, 2, activation="relu", padding="same")(up9)
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(64, 3, activation="relu", padding="same")(merge9)
conv9 = Conv2D(64, 3, activation="relu", padding="same")(conv9)
conv9 = Conv2D(1, 3, activation="relu", padding="same")(conv9)
# Output
output = Conv2D(1, 1, activation="sigmoid")(conv9)
# Define the model
model = Model(inputs=inputs, outputs=output)
return model
def UNET(input_shape):
# Define the U-Net model
inputs = Input(input_shape)
# Downsample path
conv1 = Conv2D(64, 3, activation="relu", padding="same")(inputs)
conv1 = Conv2D(64, 3, activation="relu", padding="same")(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation="relu", padding="same")(pool1)
conv2 = Conv2D(128, 3, activation="relu", padding="same")(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation="relu", padding="same")(pool2)
conv3 = Conv2D(256, 3, activation="relu", padding="same")(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation="relu", padding="same")(pool3)
conv4 = Conv2D(512, 3, activation="relu", padding="same")(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
# Bottleneck
conv5 = Conv2D(1024, 3, activation="relu", padding="same")(pool4)
conv5 = Conv2D(1024, 3, activation="relu", padding="same")(conv5)
drop5 = Dropout(0.5)(conv5)
# Upsample path
up6 = UpSampling2D(size=(2, 2))(drop5)
up6 = Conv2D(512, 2, activation="relu", padding="same")(up6)
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(512, 3, activation="relu", padding="same")(merge6)
conv6 = Conv2D(512, 3, activation="relu", padding="same")(conv6)
drop6 = Dropout(0.5)(conv6)
up7 = UpSampling2D(size=(2, 2))(drop6)
up7 = Conv2D(256, 2, activation="relu", padding="same")(up7)
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256, 3, activation="relu", padding="same")(merge7)
conv7 = Conv2D(256, 3, activation="relu", padding="same")(conv7)
drop7 = Dropout(0.5)(conv7)
up8 = UpSampling2D(size=(2, 2))(drop7)
up8 = Conv2D(128, 2, activation="relu", padding="same")(up8)
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128, 3, activation="relu", padding="same")(merge8)
conv8 = Conv2D(128, 3, activation="relu", padding="same")(conv8)
drop8 = Dropout(0.5)(conv8)
up9 = UpSampling2D(size=(2, 2))(drop8)
up9 = Conv2D(64, 2, activation="relu", padding="same")(up9)
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(64, 3, activation="relu", padding="same")(merge9)
conv9 = Conv2D(64, 3, activation="relu", padding="same")(conv9)
conv9 = Conv2D(1, 3, activation="relu", padding="same")(conv9)
# Output
output = Conv2D(1, 1, activation="sigmoid")(conv9)
# Define the model
model = Model(inputs=inputs, outputs=output)
return model
# define function to calculate dice coeffiectint
def dice_coef(y_true, y_pred):
smooth = 1
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2.0 * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
try:
del brain_unet_model
except:
print("model is not defined")
brain_unet_model = UNET(X_train.shape[1:])
# compile model with Adam optimizer and binary crossentropy as loss function
brain_unet_model.compile(
optimizer=Adam(learning_rate=0.0001),
loss="binary_crossentropy",
metrics=[dice_coef],
)
unet_brian_history = brain_unet_model.fit(
X_train, y_train, epochs=320, validation_data=(X_test, y_test)
)
loss, dice = brain_unet_model.evaluate(X_train, y_train, verbose=0)
print("Train Loss =", loss)
print("Train Dice Coef. =", dice)
loss, dice = brain_unet_model.evaluate(X_test, y_test, verbose=0)
y_pred = brain_unet_model.predict(X_test, verbose=0)
y_pred = np.where(y_pred > 0.5, 1, 0)
precision = Precision()(y_test, y_pred)
recall = Recall()(y_test, y_pred)
# Calculate mean IoU
miou = MeanIoU(num_classes=2)
miou.update_state(y_test, y_pred)
mean_iou = miou.result().numpy()
print("Test Loss =", loss)
print("Test Dice Coef. =", dice)
print("Test Mean IoU: {:.4f}".format(mean_iou))
print("Test Precision: {:.4f}".format(precision))
print("Test Recall: {:.4f}".format(recall))
brain_unet_model.save("./BRAIN-UNET-320epochs.h5")
# Plot the training and validation metrics for the first 100 epochs
train_loss = unet_brian_history.history["loss"][:100]
val_loss = unet_brian_history.history["val_loss"][:100]
train_dice = unet_brian_history.history["dice_coef"][:100]
val_dice = unet_brian_history.history["val_dice_coef"][:100]
epochs = range(1, len(train_loss[:100]) + 1)
plt.figure(figsize=(20, 5))
plt.subplot(1, 3, 1)
plt.plot(epochs, train_loss, label="Training loss")
plt.plot(epochs, val_loss, label="Validation loss")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.subplot(1, 3, 2)
plt.plot(epochs, train_dice, label="Training dice")
plt.plot(epochs, val_dice, label="Validation dice")
plt.title("Training and validation dice")
plt.xlabel("Epochs")
plt.ylabel("Dice coefficient")
plt.legend()
plt.show()
# ## Visualizing Results
idx_list = []
for i in range(5):
idx = np.random.randint(len(X_test))
idx_list.append(idx)
fig, ax = plt.subplots(1, 5, figsize=(10, 8))
mask_pred = np.squeeze(
brain_unet_model.predict(np.expand_dims(X_test[idx], axis=0), verbose=0), axis=0
)
mask_pred = np.where(mask_pred > 0.5, 1, 0)
x, y, w, h = cv2.boundingRect(mask_pred.astype(np.uint8))
cmap = ListedColormap(["black", "red"])
ax[0].imshow(X_test[idx], cmap="gray")
ax[0].set_title("Original Scan")
ax[1].imshow(y_test[idx], cmap="gray")
ax[1].set_title("Actual Mask")
ax[2].imshow(mask_pred, cmap="gray")
ax[2].set_title("Pred. Mask")
ax[3].imshow(X_test[idx], cmap="gray")
ax[3].imshow(y_test[idx], alpha=0.5, cmap=cmap)
ax[3].set_title("Scan with Actual Mask")
ax[4].imshow(X_test[idx], cmap="gray")
ax[4].imshow(mask_pred, alpha=0.5, cmap=cmap)
ax[4].set_title("Scan with Pred. Mask")
plt.tight_layout()
plt.show()
print("Predicted Tumor width in pixels : {}".format(w))
print("Predicted Tumor height in pixels : {}".format(h))
idx_list = []
for i in range(20):
idx = np.random.randint(len(X_test))
idx_list.append(idx)
fig, ax = plt.subplots(1, 5, figsize=(10, 8))
mask_pred = np.squeeze(
brain_unet_model.predict(np.expand_dims(X_test[idx], axis=0), verbose=0), axis=0
)
mask_pred = np.where(mask_pred > 0.5, 1, 0)
x, y, w, h = cv2.boundingRect(mask_pred.astype(np.uint8))
cmap = ListedColormap(["black", "red"])
ax[0].imshow(X_test[idx], cmap="gray")
ax[0].set_title("Original Scan")
ax[1].imshow(y_test[idx], cmap="gray")
ax[1].set_title("Actual Mask")
ax[2].imshow(mask_pred, cmap="gray")
ax[2].set_title("Pred. Mask")
ax[3].imshow(X_test[idx], cmap="gray")
ax[3].imshow(y_test[idx], alpha=0.5, cmap=cmap)
ax[3].set_title("Scan with Actual Mask")
ax[4].imshow(X_test[idx], cmap="gray")
ax[4].imshow(mask_pred, alpha=0.5, cmap=cmap)
ax[4].set_title("Scan with Pred. Mask")
plt.tight_layout()
plt.show()
print("Predicted Tumor width in pixels : {}".format(w))
print("Predicted Tumor height in pixels : {}".format(h))
|
import numpy as np
import pandas as pd
train_original = pd.read_csv(
"/kaggle/input/yasmines-meteorological-mystery-dsc-psut-comp/train_data.csv",
parse_dates=["date"],
)
test_original = pd.read_csv(
"/kaggle/input/yasmines-meteorological-mystery-dsc-psut-comp/test_data.csv",
parse_dates=["date"],
)
sample_original = pd.read_csv(
"/kaggle/input/yasmines-meteorological-mystery-dsc-psut-comp/sample_sub.csv"
)
# # S15
tr = train_original.copy()
ts = test_original.copy()
sample = sample_original.copy()
tr.fillna(method="bfill", inplace=True)
train = tr.drop(tr.tail(373).index)
train["precipprob"] = train["precipprob"].astype(bool)
train["month"] = train["date"].dt.month
train["dayofyear"] = train["date"].dt.day_of_year
ts["month"] = ts["date"].dt.month
ts["dayofyear"] = ts["date"].dt.day_of_year
train.drop(
["sunrise", "sunset", "date", "windspeed", "visibility", "precip"],
axis=1,
inplace=True,
)
test = ts.drop(
["sunrise", "sunset", "date", "windspeed", "visibility", "precip"],
axis=1,
inplace=True,
)
import xgboost as xgb
# separate the target variables from the train data
y_train = train[["max_feels_like", "min_feels_like"]]
X_train = train.drop(["max_feels_like", "min_feels_like"], axis=1)
# separate the test data from the sample data
test = ts
X_test = ts
# fit the model
model = xgb.XGBRegressor()
model.fit(X_train, y_train)
# predict on the test data
y_pred = model.predict(X_test)
# add the predictions to the sample data
sample["max_feels_like"] = y_pred[:, 0]
sample["min_feels_like"] = y_pred[:, 1]
# sample.to_csv('s15.csv', index = False)
s15 = sample.copy()
s15.head()
# # S29
tr = train_original.copy()
ts = test_original.copy()
sample = sample_original.copy()
train = tr
test = ts
train.fillna(method="bfill", inplace=True)
eval = tr.tail(373)
train = train.drop(tr.tail(373).index)
train["precipprob"] = train["precipprob"].astype(bool)
test["precipprob"] = test["precipprob"].astype(bool)
train["month"] = train["date"].dt.month
train["dayofyear"] = train["date"].dt.day_of_year
test["month"] = test["date"].dt.month
test["dayofyear"] = test["date"].dt.day_of_year
def wind_direction(winddir):
if winddir >= 337.5 or winddir < 22.5:
return "N"
elif winddir >= 22.5 and winddir < 67.5:
return "NE"
elif winddir >= 67.5 and winddir < 112.5:
return "E"
elif winddir >= 112.5 and winddir < 157.5:
return "SE"
elif winddir >= 157.5 and winddir < 202.5:
return "S"
elif winddir >= 202.5 and winddir < 247.5:
return "SW"
elif winddir >= 247.5 and winddir < 292.5:
return "W"
else:
return "NW"
train["winddir_"] = train["winddir"].apply(wind_direction)
test["winddir_"] = test["winddir"].apply(wind_direction)
train = pd.get_dummies(train, columns=["winddir_"], prefix="", prefix_sep="")
train.drop("SW", axis=1, inplace=True)
test = pd.get_dummies(test, columns=["winddir_"], prefix="", prefix_sep="")
train["precip"] = train["precip"].astype(bool)
test["precip"] = test["precip"].astype(bool)
train.drop(
["sunrise", "sunset", "date", "windspeed", "visibility"], axis=1, inplace=True
)
test = test.drop(["sunrise", "sunset", "date", "windspeed", "visibility"], axis=1)
import xgboost as xgb
# separate the target variables from the train data
y_train = train["max_feels_like"]
X_train = train.drop(["max_feels_like", "min_feels_like"], axis=1)
# separate the test data from the sample data
X_test = test
# fit the model
model = xgb.XGBRegressor()
model.fit(X_train, y_train)
# predict on the test data
y_pred = model.predict(X_test)
# add the predictions to the sample data
sample["max_feels_like"] = y_pred[:]
import xgboost as xgb
# separate the target variables from the train data
y_train = train["min_feels_like"]
X_train = train.drop(["max_feels_like", "min_feels_like"], axis=1)
# separate the test data from the sample data
X_test = test
# fit the model
model = xgb.XGBRegressor()
model.fit(X_train, y_train)
# predict on the test data
y_pred = model.predict(X_test)
# add the predictions to the sample data
sample["min_feels_like"] = y_pred[:]
s29 = sample.copy()
s29.head()
# S32
from sklearn.metrics import mean_absolute_error
print(mean_absolute_error(s15["min_feels_like"], s29["min_feels_like"]))
print(mean_absolute_error(s15["max_feels_like"], s29["max_feels_like"]))
s32 = 0.6 * s29 + 0.4 * s15
s32["ID"] = s15.ID
s32.head()
# # S33
tr = train_original.copy()
ts = test_original.copy()
sample = sample_original.copy()
# **MAX**
train_max = tr
test_max = ts
train_max.fillna(method="bfill", inplace=True)
eval = tr.tail(373)
train_max = train_max.drop(tr.tail(373).index)
train_max["precipprob"] = train_max["precipprob"].astype(bool)
test_max["precipprob"] = test_max["precipprob"].astype(bool)
train_max["month"] = train_max["date"].dt.month
train_max["dayofyear"] = train_max["date"].dt.day_of_year
test_max["month"] = test_max["date"].dt.month
test_max["dayofyear"] = test_max["date"].dt.day_of_year
def wind_direction(winddir):
if winddir >= 337.5 or winddir < 22.5:
return "N"
elif winddir >= 22.5 and winddir < 67.5:
return "NE"
elif winddir >= 67.5 and winddir < 112.5:
return "E"
elif winddir >= 112.5 and winddir < 157.5:
return "SE"
elif winddir >= 157.5 and winddir < 202.5:
return "S"
elif winddir >= 202.5 and winddir < 247.5:
return "SW"
elif winddir >= 247.5 and winddir < 292.5:
return "W"
else:
return "NW"
train_max["winddir_"] = train_max["winddir"].apply(wind_direction)
test_max["winddir_"] = test_max["winddir"].apply(wind_direction)
train_max = pd.get_dummies(train_max, columns=["winddir_"], prefix="", prefix_sep="")
train_max.drop("SW", axis=1, inplace=True)
test_max = pd.get_dummies(test_max, columns=["winddir_"], prefix="", prefix_sep="")
train_max["precip"] = train_max["precip"].astype(bool)
test_max["precip"] = test_max["precip"].astype(bool)
train_max.drop(
["sunrise", "sunset", "date", "windspeed", "visibility"], axis=1, inplace=True
)
test_max = test_max.drop(
["sunrise", "sunset", "date", "windspeed", "visibility"], axis=1
)
import xgboost as xgb
# separate the target variables from the train data
y_train = train_max["max_feels_like"]
X_train = train_max.drop(["max_feels_like", "min_feels_like"], axis=1)
# separate the test data from the sample data
X_test = test_max
# fit the model
model = xgb.XGBRegressor()
model.fit(X_train, y_train)
# predict on the test data
y_pred = model.predict(X_test)
# add the predictions to the sample data
sample["max_feels_like"] = y_pred[:]
# **MIN**
train_min = tr
test_min = ts
train_min.fillna(method="bfill", inplace=True)
eval = tr.tail(373)
train_min = train_min.drop(tr.tail(373).index)
train_min["precipprob"] = train_min["precipprob"].astype(bool)
test_min["precipprob"] = test_min["precipprob"].astype(bool)
import datetime
train_min["date"] -= datetime.timedelta(days=1)
train_min["month"] = train_min["date"].dt.month
train_min["dayofyear"] = train_min["date"].dt.day_of_year
test_min["month"] = test_min["date"].dt.month
test_min["dayofyear"] = test_min["date"].dt.day_of_year
train_min["winddir_"] = train_min["winddir"].apply(wind_direction)
test_min["winddir_"] = test_min["winddir"].apply(wind_direction)
train_min = pd.get_dummies(train_min, columns=["winddir_"], prefix="", prefix_sep="")
train_min.drop("SW", axis=1, inplace=True)
test_min = pd.get_dummies(test_min, columns=["winddir_"], prefix="", prefix_sep="")
train_min["precip"] = train_min["precip"].astype(bool)
test_min["precip"] = test_min["precip"].astype(bool)
train_min.drop(
["sunrise", "sunset", "date", "windspeed", "visibility"], axis=1, inplace=True
)
test_min = test_min.drop(
["sunrise", "sunset", "date", "windspeed", "visibility"], axis=1
)
import xgboost as xgb
# separate the target variables from the train data
y_train = train_min["min_feels_like"]
X_train = train_min.drop(["min_feels_like", "max_feels_like"], axis=1)
# separate the test data from the sample data
X_test = test_min
# fit the model
model = xgb.XGBRegressor()
model.fit(X_train, y_train)
# predict on the test data
y_pred = model.predict(X_test)
# add the predictions to the sample data
sample["min_feels_like"] = y_pred[:]
s33 = sample.copy()
s33.head()
# # S37
tr = train_original.copy()
ts = test_original.copy()
sample = sample_original.copy()
train = tr
test = ts
train.fillna(method="bfill", inplace=True)
eval = tr.tail(373)
train = train.drop(tr.tail(373).index)
train["precipprob"] = train["precipprob"].astype(bool)
test["precipprob"] = test["precipprob"].astype(bool)
train["month"] = train["date"].dt.month
train["dayofyear"] = train["date"].dt.day_of_year
test["month"] = test["date"].dt.month
test["dayofyear"] = test["date"].dt.day_of_year
def wind_direction(winddir):
if winddir >= 337.5 or winddir < 22.5:
return "N"
elif winddir >= 22.5 and winddir < 67.5:
return "NE"
elif winddir >= 67.5 and winddir < 112.5:
return "E"
elif winddir >= 112.5 and winddir < 157.5:
return "SE"
elif winddir >= 157.5 and winddir < 202.5:
return "S"
elif winddir >= 202.5 and winddir < 247.5:
return "SW"
elif winddir >= 247.5 and winddir < 292.5:
return "W"
else:
return "NW"
train["winddir_"] = train["winddir"].apply(wind_direction)
test["winddir_"] = test["winddir"].apply(wind_direction)
train = pd.get_dummies(train, columns=["winddir_"], prefix="", prefix_sep="")
train.drop("SW", axis=1, inplace=True)
test = pd.get_dummies(test, columns=["winddir_"], prefix="", prefix_sep="")
train["precip"] = train["precip"].astype(bool)
test["precip"] = test["precip"].astype(bool)
train.drop(
["sunrise", "sunset", "date", "windspeed", "visibility"], axis=1, inplace=True
)
test = test.drop(["sunrise", "sunset", "date", "windspeed", "visibility"], axis=1)
from catboost import CatBoostRegressor
# separate the target variables from the train data
y_train = train["max_feels_like"]
X_train = train.drop(["max_feels_like", "min_feels_like"], axis=1)
# separate the test data from the sample data
X_test = test
# fit the model
model = CatBoostRegressor(
iterations=1000,
learning_rate=0.1,
depth=6,
l2_leaf_reg=3,
random_seed=42,
loss_function="RMSE",
)
model.fit(X_train, y_train)
# predict on the test data
y_pred = model.predict(X_test)
# add the predictions to the sample data
sample["max_feels_like"] = y_pred[:]
sample.head()
from catboost import CatBoostRegressor
# separate the target variables from the train data
y_train = train["min_feels_like"]
X_train = train.drop(["max_feels_like", "min_feels_like"], axis=1)
# separate the test data from the sample data
X_test = test
model = CatBoostRegressor(
iterations=1000,
learning_rate=0.1,
depth=6,
l2_leaf_reg=3,
random_seed=42,
loss_function="RMSE",
)
model.fit(X_train, y_train)
# predict on the test data
y_pred = model.predict(X_test)
# add the predictions to the sample data
sample["min_feels_like"] = y_pred[:]
s37 = sample.copy()
s37.head()
# # S59
tr = train_original.copy()
ts = test_original.copy()
sample = sample_original.copy()
# **MAX**
train_max = tr
test_max = ts
train_max.fillna(method="bfill", inplace=True)
eval = tr.tail(373)
train_max = train_max.drop(tr.tail(373).index)
train_max["precipprob"] = train_max["precipprob"].astype(bool)
test_max["precipprob"] = test_max["precipprob"].astype(bool)
train_max["month"] = train_max["date"].dt.month
train_max["dayofyear"] = train_max["date"].dt.day_of_year
test_max["month"] = test_max["date"].dt.month
test_max["dayofyear"] = test_max["date"].dt.day_of_year
def wind_direction(winddir):
if winddir >= 337.5 or winddir < 22.5:
return "N"
elif winddir >= 22.5 and winddir < 67.5:
return "NE"
elif winddir >= 67.5 and winddir < 112.5:
return "E"
elif winddir >= 112.5 and winddir < 157.5:
return "SE"
elif winddir >= 157.5 and winddir < 202.5:
return "S"
elif winddir >= 202.5 and winddir < 247.5:
return "SW"
elif winddir >= 247.5 and winddir < 292.5:
return "W"
else:
return "NW"
train_max["winddir_"] = train_max["winddir"].apply(wind_direction)
test_max["winddir_"] = test_max["winddir"].apply(wind_direction)
train_max = pd.get_dummies(train_max, columns=["winddir_"], prefix="", prefix_sep="")
train_max.drop("SW", axis=1, inplace=True)
test_max = pd.get_dummies(test_max, columns=["winddir_"], prefix="", prefix_sep="")
train_max["precip"] = train_max["precip"].astype(bool)
test_max["precip"] = test_max["precip"].astype(bool)
train_max.drop(
["sunrise", "sunset", "date", "windspeed", "visibility"], axis=1, inplace=True
)
test_max = test_max.drop(
["sunrise", "sunset", "date", "windspeed", "visibility"], axis=1
)
from catboost import CatBoostRegressor
# separate the target variables from the train data
y_train = train_max["max_feels_like"]
X_train = train_max.drop(["max_feels_like", "min_feels_like"], axis=1)
# separate the test data from the sample data
X_test = test_max
# fit the model
model = CatBoostRegressor(
iterations=1000,
learning_rate=0.1,
depth=6,
l2_leaf_reg=3,
random_seed=42,
loss_function="RMSE",
)
model.fit(X_train, y_train)
# predict on the test data
y_pred = model.predict(X_test)
# add the predictions to the sample data
sample["max_feels_like"] = y_pred[:]
sample.head()
# **MIN**
train_min = tr
test_min = ts
train_min.fillna(method="bfill", inplace=True)
eval = tr.tail(373)
train_min = train_min.drop(tr.tail(373).index)
train_min["precipprob"] = train_min["precipprob"].astype(bool)
test_min["precipprob"] = test_min["precipprob"].astype(bool)
train_min["date"].head()
import datetime
train_min["month"] = train_min["date"].dt.month
train_min["dayofyear"] = train_min["date"].dt.day_of_year
test_min["month"] = test_min["date"].dt.month
test_min["dayofyear"] = test_min["date"].dt.day_of_year
train_min["winddir_"] = train_min["winddir"].apply(wind_direction)
test_min["winddir_"] = test_min["winddir"].apply(wind_direction)
train_min = pd.get_dummies(train_min, columns=["winddir_"], prefix="", prefix_sep="")
train_min.drop("SW", axis=1, inplace=True)
test_min = pd.get_dummies(test_min, columns=["winddir_"], prefix="", prefix_sep="")
train_min["precip"] = train_min["precip"].astype(bool)
test_min["precip"] = test_min["precip"].astype(bool)
train_min.drop(
["sunrise", "sunset", "date", "windspeed", "visibility"], axis=1, inplace=True
)
test_min = test_min.drop(
["sunrise", "sunset", "date", "windspeed", "visibility"], axis=1
)
from catboost import CatBoostRegressor
# separate the target variables from the train data
y_train = train_min["min_feels_like"]
X_train = train_min.drop(["max_feels_like", "min_feels_like"], axis=1)
# separate the test data from the sample data
X_test = test_min
model = CatBoostRegressor(
iterations=700,
learning_rate=0.1,
depth=5,
l2_leaf_reg=3,
random_seed=42,
loss_function="RMSE",
)
model.fit(X_train, y_train)
# predict on the test data
y_pred = model.predict(X_test)
# add the predictions to the sample data
sample["min_feels_like"] = y_pred[:]
s59 = sample.copy()
s59.head()
# # NEW
print(mean_absolute_error(s59["min_feels_like"], s37["min_feels_like"]))
print(mean_absolute_error(s59["max_feels_like"], s37["max_feels_like"]))
new = s59 * 0.5 + s37 * 0.5
new["ID"] = s59["ID"]
# # Ensemble Modeling
# > Using Doleh's method
print(mean_absolute_error(new["min_feels_like"], s32["min_feels_like"]))
print(mean_absolute_error(new["max_feels_like"], s32["max_feels_like"]))
print("--------------")
print(mean_absolute_error(new["min_feels_like"], s33["min_feels_like"]))
print(mean_absolute_error(new["max_feels_like"], s33["max_feels_like"]))
print("--------------")
print(mean_absolute_error(s32["min_feels_like"], s33["min_feels_like"]))
print(mean_absolute_error(s32["max_feels_like"], s33["max_feels_like"]))
# We use: new, s32, s33
# Applying Doleh's Ensemble Method
predictions_df = pd.DataFrame(
{
"new": new["min_feels_like"],
"s32": s32["min_feels_like"],
"s33": s33["min_feels_like"],
}
)
# create an empty list to store the chosen predictions
preds = []
for _, row in predictions_df.iterrows():
# get the median of the row
row_median = row.median()
# get the mean of the row excluding the median
row_mean = row[row != row_median].mean()
# calculate the mean between the median and the mean
row_mean_median = (row_median + row_mean) / 2
preds.append(row_mean_median)
preds1 = pd.DataFrame()
preds1["ID"] = s33["ID"]
preds1["min_feels_like"] = preds
predictions_df2 = pd.DataFrame(
{
"new": new["max_feels_like"],
"s32": s32["max_feels_like"],
"s33": s33["max_feels_like"],
}
)
# create an empty list to store the chosen predictions
preds = []
for _, row in predictions_df2.iterrows():
# get the median of the row
row_median = row.median()
# get the mean of the row excluding the median
row_mean = row[row != row_median].mean()
# calculate the mean between the median and the mean
row_mean_median = (row_median + row_mean) / 2
preds.append(row_mean_median)
preds1["max_feels_like"] = preds
preds1.head()
preds1.to_csv("submission.csv", index=False)
|
# ### Entrance Hall
# In this kernel, I will be working with a surgery time prediction dataset and my main objective is to transform the data into
# a more useful format for machine learning models. To achieve this goal, I will focus on data preprocessing steps such as
# handling missing values, dealing with categorical data, and feature scaling. One of the main challenges in preprocessing this dataset
# is converting string and paragraph data into numerical values that can be used in machine learning algorithms.
# I will be using various Python libraries including Pandas, NumPy, and Scikit-learn to perform these transformations and
# create a cleaned and transformed dataset that can be used for further analysis and modeling. Ultimately, the goal of this project
# is to create a robust surgery time prediction model that can assist healthcare providers in improving patient outcomes and reducing overall healthcare costs.
# #### Import Section
# Let's quickly check the libraries that will use on our kernel.
# - NumPy :
# Numpy is a Python library for numerical computation. It provides fast and efficient operations for arrays and matrices, including mathematical, logical,
# shape manipulation, sorting, selecting, I/O, discrete Fourier transforms, basic linear algebra, basic statistical operations, random simulation, and much more.
# - Pandas :
# Pandas on the other hand, is a data manipulation library that provides easy-to-use data structures for data analysis. It's built on top of NumPy and
# provides tools for working with data from a variety of sources, including CSV, Excel, SQL databases, and web APIs. Pandas provides two main data structures:Series and DataFrame.
# Series are one-dimensional labeled arrays that can hold any data type, while DataFrames are two-dimensional labeled data structures with columns of potentially different types.
# Pandas also provides tools for grouping, pivoting, merging, reshaping, and transforming data, as well as handling missing or duplicate data.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Data set import section
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# #### Data Import Section
# CSV (Comma Separated Values) is a simple text-based file format that stores tabular data in plain text format, with each row representing
# a record and each column separated by a comma delimiter. The first row of a CSV file often contains the column names or headers.
# It is a popular file format for storing and exchanging data between different applications, as it is simple, lightweight,
# and widely supported by many software tools and programming languages. CSV files can be easily imported into spreadsheet software,
# databases, and other data analysis tools for further processing and analysis.
# Let's import our data into our kernal
data = pd.read_csv(
"/kaggle/input/prediction-of-surgery-duration/train.csv", encoding="iso8859_9"
)
test_data = pd.read_csv(
"/kaggle/input/prediction-of-surgery-duration/test.csv", encoding="iso8859_9"
)
data.head(2)
# Let's give a shot for what we have on our data set
data.info()
# Also we can check the statistical values on our data
data.describe().transpose()
# ### Data Transform Stage
# In machine learning, datasets may contain non-numeric data such as text or categorical values. However, most machine learning algorithms are designed to work with numerical data.
# Therefore, it's important to convert these non-numeric data into numerical data to make it possible for algorithms to learn from them. One common technique for transforming string data into
# integer values is called label encoding.
# Label encoding assigns a unique integer value to each category in a categorical feature.
# For example, if a feature has three categories:
# - red
# - green
# - blue
# label encoding will convert these values to integers: "red" = 0, "green" = 1, and "blue" = 2.
# However, it's important to note that label encoding does not imply any order or ranking between categories. If the categories have an inherent order, such as "low", "medium", and "high",
# then it's better to use ordinal encoding, which assigns a unique integer value to each category based on their order.
# Another technique for transforming string data is one-hot encoding, which creates a binary variable for each category in a categorical feature.
# For example, if a feature has three categories:
# - red
# - green
# - blue
# one-hot encoding will create three binary variables:
# - "red" = (1, 0, 0)
# - "green" = (0, 1, 0)
# - "blue" = (0, 0, 1)
# One-hot encoding is useful for categorical features with no inherent order or ranking. However, it can lead to high-dimensional data and cause the curse of dimensionality. Therefore, it's important to choose the appropriate encoding technique based on the characteristics of the categorical feature and the requirements of the machine learning algorithm.
# I just copy the data set to protect the original one
sub_data = data.copy()
sub_data.head(2)
# #### Diognastic Code Transformation
# ICD-10 codes are used to classify diseases, injuries, and other medical conditions. They do not necessarily indicate the level of danger associated with a condition. However, some conditions may be considered more severe than others. Here are some examples of ICD-10 codes that may be considered high danger to low danger:
# High danger:
# - I63.9 - Cerebral infarction, unspecified
# - S06.9 - Unspecified intracranial injury
# - G93.9 - Disorder of brain, unspecified
# - T79.4 - Traumatic shock
# Moderate danger:
# - J18.9 - Pneumonia, unspecified organism
# - I50.9 - Heart failure, unspecified
# - N17.9 - Acute kidney failure, unspecified
# - R09.82 - Postnasal drip
# Low danger:
# - R05 - Cough
# - R07.9 - Chest pain, unspecified
# - J06.9 - Acute upper respiratory infection, unspecified
# - R51 - Headache
# It's important to note that the severity of a medical condition can vary depending on the individual and other factors. This list is just a general guideline and should not be used as a substitute for medical advice.
set_ICD10_unique = set()
list_len_ICD10 = []
list_spell_data = []
def DiagnosticICD10Code(values):
# We throw build as try except block to catch the Null values.
try:
splited_value = values.split("'")
except AttributeError:
# We have to throw back the null values back. To keep our data on a order.
list_len_ICD10.append(np.nan)
list_spell_data.append(np.nan)
return values
# List to store the return values
return_str = ""
temp_list = []
# If they are not a Null values we have buch of split opreation.
# Firstly we create loop which is target the wanted values on splited data.
for index in range(1, len(splited_value), 2):
# Second split is split the wanted data based on '.' -dot- to simplify it more usable way.
val = splited_value[index]
# And add the values into our return list.
return_str += val + ","
set_ICD10_unique.add(val[0])
temp_list.append(val[0])
list_len_ICD10.append(len(temp_list))
list_spell_data.append(temp_list)
return return_str
# We transform our Diagnostic ICD10 Code into more usable way. That is kinda complicated coding so we deal with external function.
sub_data["DiagnosticICD10Code"] = data["DiagnosticICD10Code"].apply(DiagnosticICD10Code)
# Add the other column we derive from original one
sub_data["Lenght_ICD10_Code"] = list_len_ICD10
# Let's give a quick look for our data set
sub_data.head(2)
# #### Surgery Group Transformation
# In medical research, surgeries are often classified into different groups based on various factors such as the type of surgery, the complexity of the surgery,
# the area of the body being operated on, and the intended outcome of the surgery. The classification of surgeries into different groups allows for easier analysis
# and comparison of surgical outcomes and can aid in developing best practices for specific types of surgeries. For example, a study may compare the surgical outcomes of
# two different groups, such as minimally invasive surgery versus traditional open surgery, to determine which approach yields better results for a particular medical condition.
# Overall, grouping surgeries is a common practice in medical research and helps to inform clinical decision-making and improve patient outcomes.
set_SurgeyGroup_unique = set(["A1", "A2", "A3", "B", "C", "D", "E"])
def SurgeryGroup(values):
# Again we build the Null value trap to throw these back
try:
values.split(",")
# Don't forget the null values
except AttributeError:
return np.nan
# Quick fix for our str input
values = values[1:-1]
# Check if it is include 0 or not
start_index = values.find("0")
if start_index + 1:
values = values[start_index + 3 : -1]
return_str = ""
# If its not we can change the our str data more usefull form
for val in values.split(","):
val = val.strip(" ").strip("'")
return_str += val + ","
return return_str
# Change our str NaN values to actual "np.nan" values. Otherwise our transformation function does not work properly
sub_data.replace("{0}", np.nan, inplace=True)
sub_data.info()
# Add the our fresh column into data set
sub_data["SurgeryGroup"] = data["SurgeryGroup"].apply(SurgeryGroup)
sub_data.replace(",", np.nan, inplace=True)
# Let's give a quick look for our data set
sub_data.head(2)
# #### Surgey Group One Hot Encoding
list_Index = ["A1", "A2", "A3", "B", "C", "D", "E"]
def SurgeryGroup_OneHotEncoder(data_fream):
# Let's create the our storage lists
list_New_columns = [[], [], [], [], [], [], []]
# Go throug the all the values on our columns
for val in data_fream.SurgeryGroup.values:
# Split the our values create the list to easy check
val = str(val).split(",")
# Fast tour on our list
for index in range(len(list_Index)):
# Check the is value NaN or not
if "nan" in val:
list_New_columns[index].append(np.nan)
# Quick check the we have the value in our data point
elif list_Index[index] in val:
list_New_columns[index].append(1)
else:
list_New_columns[index].append(0)
return list_New_columns
# Get the result for our new columns
new_col_list = SurgeryGroup_OneHotEncoder(sub_data)
# Little for loop go trough our list
for index in range(len(list_Index)):
sub_data[list_Index[index]] = new_col_list[index]
# Quick check if its work or not
sub_data.head()
# We also have to check null values are protected or not
sub_data.info()
# #### Surgery Code Fourier Transformation
# Sure, Fourier Transform is a mathematical technique used to transform a signal, which is represented in the time domain, into its frequency domain representation.
# This transformation is achieved by decomposing the signal into its constituent frequencies. By doing this, we can analyze the frequency components of the signal and extract useful information about it. .
# Fourier Transform has many applications in signal processing, image processing, audio processing, and data analysis. In data analysis, .
# Fourier Transform can be used for feature extraction and data compression. It is an essential tool in many areas of science and engineering.
from math import cos, pi, tan, sin
# Create the our dictionry to freq
dict_freq = {"A1": 1, "A2": 2, "A3": 3, "B": 4, "C": 5, "D": 6, "E": 7}
# Define the our time constant
time_val = pi / 21
def SurgeryCode_Fourier(dataFream):
# Return list storage for our function
return_col_list = []
# Quick go trough the values
for val in dataFream.SurgeryGroup.values:
# Assign the our sum variable
res_val = 0
# Little " try except " block for our NaN values
try:
list_val = val.split(",")[:-1]
except AttributeError:
res_val = np.nan
list_val = []
# Quick go trough of in our data point values
for freq_key in list_val:
# Determine the our frequenve values help of our dictionry
freq = dict_freq[freq_key]
# Little calculation
res_val += 10 * cos(freq * time_val)
return_col_list.append(res_val)
return return_col_list
new_col = SurgeryCode_Fourier(sub_data)
sub_data["SurgeryGroup_Fourier"] = new_col
sub_data.head(2)
# #### ICD10 Code Fourier Transformation
# Define the our time constant
time_val = pi / 21
def ICD10_Code_Fourier(dataFream):
# Return list storage for our function
return_list_col = []
# Quick go trough the values
for val in dataFream.DiagnosticICD10Code.values:
# Assign the our sum variable
res_val = 0
# Little " try except " block for our NaN values
try:
list_val = val.split(",")[:-1]
except AttributeError:
res_val = np.nan
list_val = []
# Quick go trough of in our data point values
for str_val in list_val:
# Determine the our frequenve values wıth little math
val = ord(str_val[0:1]) - 64
freq = val / 4
freq_2 = int(str_val[1:3]) / 20
# Little calculation
res_val += 10 * cos(freq * time_val)
return_list_col.append(res_val)
return return_list_col
new_col = ICD10_Code_Fourier(sub_data)
sub_data["ICD10_Fourier"] = new_col
sub_data.head()
# #### Test Data Transfomation
# ##### Diognastic Code Transformation
# These are fresh set for our global list.
set_ICD10_unique = set()
list_len_ICD10 = []
list_spell_data = []
# We transform our Diagnostic ICD10 Code into more usable way. That is kinda complicated coding so we deal with external function.
test_data["DiagnosticICD10Code"] = test_data["DiagnosticICD10Code"].apply(
DiagnosticICD10Code
)
# Add the other column we derive from original one
test_data["Lenght_ICD10_Code"] = list_len_ICD10
# ##### Surgery Group Transformation
# We change the 'Formal NaN' value into 'Actual NaN' value to make our algorith work fine
test_data.replace("{0}", np.nan, inplace=True)
# Throw the our data into fuction to get workable data set
test_data["SurgeryGroup"] = test_data["SurgeryGroup"].apply(SurgeryGroup)
test_data.head(2)
# ##### Surgey Group One Hot Encoding
# We throw our data set into fuction to get fresh OHE columns back
new_col_list = SurgeryGroup_OneHotEncoder(test_data)
# Little for loop go trough our list
for index in range(len(list_Index)):
test_data[list_Index[index]] = new_col_list[index]
# ##### Surgery Code Fourier Transformation
# Quick call for our transformer fuction
new_col = SurgeryCode_Fourier(test_data)
# Let's add the our new column into our data
test_data["SurgeryGroup_Fourier"] = new_col
# ##### ICD10 Code Fourier Transformation
# Quick call for our transformer fuction
new_col = ICD10_Code_Fourier(test_data)
# Let's add the our new column into our data
test_data["ICD10_Fourier"] = new_col
test_data.info()
# Quick check for our data
test_data.head()
# #### Str to Int conversion on our data
list_Str_columns = ["AnesthesiaType", "Sex", "Service", "SurgeryName"]
def Str_to_Int():
for column in list_Str_columns:
# We find the keys
keys = pd.array(sub_data[column].unique()).dropna()
# Also create the values
values = range(len(keys))
# Create the our dictionry to use on convertation
dict_transform = dict(zip(keys, values))
# Add the null value to not miss them (Actually, we don't need that much really but let's be in the safe zone)
dict_transform[np.nan] = np.nan
# Train data
sub_data[column] = sub_data[column].map(dict_transform)
# Test data
test_data[column] = test_data[column].map(dict_transform)
Str_to_Int()
# Quick check for our data
sub_data.head()
# Quick check for our data
test_data.head()
|
# * Dataframe is a 2D data structure
# * Behaves similar to how we store data in Excel
# * Data is alligned in tabular fashion
# * Mutable and can store heterogenous data
import pandas as pd
# Creating DataFrame
# **using list**
l = ["Monica", "Chandler", "Ross", "Rachel", "Joe", "Phoebe"]
df = pd.DataFrame(l)
df
# **Using Dictionary**
d = {
"Name": ["Monica", "Chandler", "Ross", "Rachel", "Joe", "Phoebe"],
"Age": [26, 30, 32, 25, 28, 27],
"Occupation": [
"Chef",
"Something with data",
"Dinosaur",
"Fashion Advisor",
"Actor",
"Masseuse",
],
}
x = pd.DataFrame(d)
x
y = pd.read_csv("/kaggle/input/videogames-predictive-model/dato.csv")
y
y.head()
y.tail()
y.index
y.values
type(y)
y.shape
y.dtypes
y.dtypes.value_counts()
y.columns
y.axes
y.info()
y.describe()
# Customize Index
y.head(3)
y = pd.read_csv(
"/kaggle/input/videogames-predictive-model/dato.csv", index_col=["Platform"]
)
y
y["Wii"]
y["Global_Sales"]
y[["Global_Sales", "Rating"]]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# import library for numerical computation
import pandas as pd
import numpy as np
import pylab as pl
# import library for visualisation
import matplotlib as pl
# import library for model
from sklearn.model_selection import train_test_split
import scipy.optimize as opt
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
# read the data
df = pd.read_csv(
"/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv"
)
df.smoking_status.unique
# print the summary
df.describe()
# As can be seen from the data above, there is a null data on the BMI desrcribed as NaN, we need to replace this with the average of the BMI
# data preprocessing
# replacing nan value with the average of the columns
df["bmi"] = df["bmi"].fillna(df["bmi"].mean())
# replacing gender male and female with 1 and 0
df.gender[df.gender == "Male"] = 1
df.gender[df.gender == "Female"] = 0
df.gender[df.gender == "Other"] = 3
# replacing smoking status with numerical values
df.smoking_status[df.smoking_status == "never smoked"] = 0
df.smoking_status[df.smoking_status == "formerly smoked"] = 1
df.smoking_status[df.smoking_status == "smokes"] = 2
df.smoking_status[df.smoking_status == "Unknown"] = 3
# df['smoking_status'] = df['smoking_status'].apply({0:'never smoked', 1:'formerly smoked', 2:'smokes'}.get)
df.head(5)
# **Now since we have pre process the data, we will the select columns that will be used as the independent variable to explain whether user have higher probability to get stroke or not**
X = df[
[
"gender",
"age",
"hypertension",
"heart_disease",
"avg_glucose_level",
"bmi",
"smoking_status",
]
]
y = df["stroke"]
# now new need to normalize the X
X = preprocessing.StandardScaler().fit(X).transform(X)
X[0:5]
# after do the data pre processing, now its time for us to split the data into train and test data
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.3, random_state=4)
print("X_train shape: {}", format(X_train.shape))
print("Y_train shape: {}", format(Y_train.shape))
# now lets build logistic regression model
stroke_lr = LogisticRegression(C=0.01, solver="liblinear").fit(X_train, y_train)
stroke_lr
|
# # Ⅰ.配置YOLOv5_OBB环境(用于旋转目标检测)
# ## 1. 安装YOLOv5_OBB
# Download YOLOv5_OBB
# Install dependencies
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
# ## 2. 安装DOTA_devkit
# # Ⅱ. 配置数据集
# %cd /kaggle/working/yolov5_obb/
# %cd /kaggle/input/dronevehicle/train/train/trainlabel/
# !ls
# ## 1. 将input中DroneVehicle数据集的可见光图像移动到output
import shutil
"""dataset"""
"""train"""
shutil.copytree(
r"/kaggle/input/dronevehicle/train/train/trainimg",
r"/kaggle/working/datasets/DroneVehicleDataset/train/images",
)
# !mkdir /kaggle/working/datasets/DroneVehicleDataset/train/images/
"""val"""
shutil.copytree(
r"/kaggle/input/dronevehicle/val/val/valimg",
r"/kaggle/working/datasets/DroneVehicleDataset/val/images",
)
# !mkdir /kaggle/working/datasets/DroneVehicleDataset/val/images/
"""test"""
shutil.copytree(
r"/kaggle/input/dronevehicle/test/test/testimg",
r"/kaggle/working/datasets/DroneVehicleDataset/test/images",
)
# !mkdir /kaggle/working/datasets/DroneVehicleDataset/test/images/
# ## 2. 将input中DroneVehicle数据集的可见光图像标签(xml)转换为txt格式并移动到output
import os
import xml.etree.ElementTree as ET
import math
import cv2 as cv
def voc_to_dota(xml_dir, xml_name, img_dir):
txt_name = xml_name[:-4] + ".txt" # txt文件名字:去掉xml 加上.txt
txt_path = "/kaggle/working/datasets/DroneVehicleDataset/test/labelTxt/"
if not os.path.exists(txt_path):
os.makedirs(txt_path)
txt_file = os.path.join(txt_path, txt_name) # txt完整的含名文件路径
img_name = xml_name[:-4] + ".jpg" # 图像名字
img_path = os.path.join(img_dir, img_name) # 图像完整路径
img = cv.imread(img_path) # 读取图像
xml_file = os.path.join(xml_dir, xml_name)
tree = ET.parse(os.path.join(xml_file)) # 解析xml文件 然后转换为DOTA格式文件
root = tree.getroot()
with open(txt_file, "w+", encoding="UTF-8") as out_file:
for obj in root.findall("object"):
name = obj.find("name").text
if name == "feright car":
name = "feright_car"
else:
name = name
obj_difficult = obj.find("difficult")
if obj_difficult:
difficult = obj_difficult.text
else:
difficult = "0"
if obj.find("bndbox"):
obj_bnd = obj.find("bndbox")
obj_xmin = obj_bnd.find("xmin").text
obj_ymin = obj_bnd.find("ymin").text
obj_xmax = obj_bnd.find("xmax").text
obj_ymax = obj_bnd.find("ymax").text
x1 = obj_xmin
y1 = obj_ymin
x2 = obj_xmax
y2 = obj_ymin
x3 = obj_xmax
y3 = obj_ymax
x4 = obj_xmin
y4 = obj_ymax
elif obj.find("polygon"):
obj_bnd = obj.find("polygon")
x1 = obj_bnd.find("x1").text
x2 = obj_bnd.find("x2").text
x3 = obj_bnd.find("x3").text
x4 = obj_bnd.find("x4").text
y1 = obj_bnd.find("y1").text
y2 = obj_bnd.find("y2").text
y3 = obj_bnd.find("y3").text
y4 = obj_bnd.find("y4").text
data = (
str(x1)
+ " "
+ str(y1)
+ " "
+ str(x2)
+ " "
+ str(y2)
+ " "
+ str(x3)
+ " "
+ str(y3)
+ " "
+ str(x4)
+ " "
+ str(y4)
+ " "
)
data = data + name + " " + difficult + "\n"
out_file.write(data)
# xml路径
xml_path = "/kaggle/input/dronevehicle/test/test/testlabel/"
# img路径
img_dir = "/kaggle/working/datasets/DroneVehicleDataset/test/images/"
xmlFile_list = os.listdir(xml_path)
for i in range(0, len(xmlFile_list)):
if (".xml" in xmlFile_list[i]) or (".XML" in xmlFile_list[i]):
voc_to_dota(xml_path, xmlFile_list[i], img_dir)
print(
"----------------------------------------{}{}----------------------------------------".format(
xmlFile_list[i], " has Done!"
)
)
else:
rint(xmlFile_list[i] + " is not xml file")
# # Tools
# ## ① 删除output中的文件
import shutil
import os
# path = '/kaggle/working/datasets/DroneVehicleDataset/test/images'
path = "/kaggle/working/yolov5_obb"
if os.path.exists(path):
shutil.rmtree(path)
print("删除完成")
else:
print("原本为空")
# ## ② 将output打包为zip
import os
import zipfile
import datetime
def file2zip(packagePath, zipPath):
"""
:param packagePath: 文件夹路径
:param zipPath: 压缩包路径
:return:
"""
zip = zipfile.ZipFile(zipPath, "w", zipfile.ZIP_DEFLATED)
for path, dirNames, fileNames in os.walk(packagePath):
fpath = path.replace(packagePath, "")
for name in fileNames:
fullName = os.path.join(path, name)
name = fpath + "\\" + name
zip.write(fullName, name)
zip.close()
if __name__ == "__main__":
# 文件夹路径
packagePath = "/kaggle/working/"
zipPath = "/kaggle/working/output.zip"
if os.path.exists(zipPath):
os.remove(zipPath)
# file2zip(packagePath, zipPath)
# print("打包完成")
# print(datetime.datetime.utcnow())
# # Ⅲ. 开始训练
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
# # 测试
import os, random
random_file = random.choice(os.listdir("/kaggle/working/yolov5_obb/runs/detect/exp"))
from IPython.display import Image
Image(random_file)
|
# # Objective
# The objective is to analyze the daily count of vaccinations in the top 5 countries in terms of the total vaccinations.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
from matplotlib import style
style.use("dark_background")
font = {"family": "sans-serif", "weight": "bold", "size": 20}
plt.rc("font", **font)
plt.rc("xtick", labelsize=20)
plt.rc("ytick", labelsize=20)
df = pd.read_csv(
"/kaggle/input/covid-world-vaccination-progress/country_vaccinations.csv",
parse_dates=[2],
)
df.head()
df.shape
# The following cell demonstrates the top 5 countries (in decreasing order) in terms of the total number of vaccinations till date.
df.groupby("country")["total_vaccinations"].agg(max).nlargest(5)
some_values = ["United States", "China", "United Kingdom", "England", "India"]
vaccines = df.loc[df["country"].isin(some_values)]
vaccines.fillna(0.0, inplace=True)
vaccines.isna().sum()
# Declaring functions to group the data by country and day.
def GroupByCountryAndDay(df):
groups = df.groupby("country")
dailies = {}
for name, group in groups:
dailies[name] = GroupByDay(group)
return dailies
def GroupByDay(df, func=np.mean):
grouped = df[["date", "daily_vaccinations"]].groupby("date")
daily = grouped.aggregate(func)
daily["date"] = daily.index
start = daily.date[0]
one_year = np.timedelta64(1, "Y")
daily["years"] = (daily.date - start) / one_year
return daily
dailies = GroupByCountryAndDay(vaccines)
dailies.keys()
# The keys of the dictionary (dailies) is not in the order that we want that to be. Therefore, we create another dictionary in that specific order (the order demonstrated in 'some_values').
reordered_dailies = {country: dailies[country] for country in some_values}
reordered_dailies
# # Visualization
index = []
names = []
vacc = []
for i, (name, daily) in enumerate(reordered_dailies.items()):
index.append(daily.index)
names.append(name)
vacc.append(daily.daily_vaccinations / 1000)
names
fig, axs = plt.subplots(5, figsize=(15, 20))
fig.suptitle("Daily Vaccinations", fontsize=(20))
fig.autofmt_xdate(rotation=30)
for i, (name, daily) in enumerate(reordered_dailies.items()):
axs[i].scatter(index[i], vacc[i], color="red")
axs[i].set_title(name)
plt.tight_layout()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# from numerize import numerize
import plotly.express as px
import plotly.graph_objects as go
import datetime as dt
# Loading Data
df = pd.read_csv("../input/indian-startup-funding/startup_funding.csv")
# # Renaming columns
df.rename(
mapper={
"Date dd/mm/yyyy": "Startup_date",
"City Location": "City",
"Amount in USD": "Amount",
"Startup Name": "Startupname",
"Industry Vertical": "Industrytype",
"Investors Name": "Investorsname",
},
axis=1,
inplace=True,
)
df.isnull().sum()
df.info()
# **Column Drop**
df.drop(["Remarks", "Sr No"], axis=1, inplace=True)
# # Data cleaning
def clean_amount(amt):
amt = str(amt)
if "," in amt:
amt = amt.replace(",", "")
if amt.isnumeric():
return float(amt)
if amt.isalpha() or amt.startswith("\\"):
return np.nan
if "." in amt:
return float(amt)
if "+" in amt:
return float(amt.replace("+", ""))
df["Amount"] = df["Amount"].apply(clean_amount)
x = df["Amount"].mean()
df["Amount"].fillna(x, inplace=True)
# # Droping the Small Empty cell
df.dropna(subset=["InvestmentnType", "Investorsname"], inplace=True)
# # Replacing NaN Values
df["SubVertical"].replace(
np.nan, df["SubVertical"].value_counts().idxmax(), inplace=True
)
df["Industrytype"].replace(
np.NaN, df["Industrytype"].value_counts().idxmax(), inplace=True
)
df["City"].replace(np.NaN, df["City"].value_counts().idxmax(), inplace=True)
df.isnull().sum()
# All Nan values Cleaned****
# # Categorical Column
df_cat = df.select_dtypes(np.object_)
df_cat
# # numerical coloumn
df_num = df.select_dtypes(np.number)
df_num
# # Univairiate Analysis
df["Startupname"].nunique()
df["Amount"].describe()
df["Amount"] = df["Amount"].astype("int64")
df["Amount"]
# # Bivariate Analysis
dg_amount = df.groupby("Startup_date")["Amount"].sum().reset_index()
dg_amount.sort_values(by="Amount", ascending=False, inplace=True)
fig = px.line(dg_amount.head(20), x="Amount", y="Startup_date", markers="*")
fig.show()
# # Major city with most number of Startup
city_df = df["City"].value_counts().reset_index()
city_df.columns = ["City Name", "Occurance"]
fig = px.bar(city_df.head(10), "City Name", "Occurance", title="Startups by City")
fig.show()
# # Top Invested Amount for each city
money_city_df = df.groupby("City")["Amount"].sum().reset_index()
money_city_df.sort_values(by="Amount", ascending=False, inplace=True)
fig = px.bar(
money_city_df.head(25), "City", "Amount", title="Top Invested Amount for each city"
)
fig.show()
# # Top Investors investment in Startups
amount_df = df.groupby("Investorsname")["Amount"].sum().reset_index()
amount_df.sort_values(by="Amount", ascending=False, inplace=True)
fig = px.line(
amount_df.head(20),
x="Amount",
y="Investorsname",
title="Top Investors investment in Startups",
)
fig.show()
# # STARTUP WITH INDUSTRYTYPE
xf = df.groupby("Startupname")["Industrytype"].sum().reset_index()
xf.sort_values(by="Industrytype", ascending=False, inplace=True)
fig = px.line(
xf.head(25), x="Startupname", y="Industrytype", title="STARTUP WITH INDUSTRYTYPE"
)
fig.show()
# # Startup Amount
xr = df.groupby("Startupname")["Amount"].sum().reset_index()
xr.sort_values(by="Amount", ascending=False, inplace=True)
fig = px.bar(xr.head(50), x="Amount", y="Startupname", title="Startup Amount")
fig.show()
import re
def clean_date_str(date_str):
try:
out = re.match(r"\d\d/\d\d/\d\d\d\d", date_str)
if out:
return date_str
else:
np.nan
except:
return np.nan
df["Clean_date"] = df.Startup_date.apply(clean_date_str)
df["Clean_date"] = pd.to_datetime(df["Clean_date"])
df
xd = df["Clean_date"].value_counts(100)
df.Clean_date = pd.to_datetime(df.Clean_date)
year_df = df.Clean_date.dt.year
year_df.head()
# # Sector covers most number of startup with years
years = df.groupby(year_df)["Industrytype"].count().reset_index()
years.columns = ["years", "Industry"]
fig = px.line(
years, "years", "Industry", title="Sector covers most number of startup with years"
)
fig.show()
px.scatter(df.head(500), "Startupname", "InvestmentnType")
|
# * 범주형 변수들 숫자로 맵핑해주기
# * 이름을 가지고 파생변수를 만들기 (성 Mr, Ms, Dr, Miss)
# * 결측치나 이상치 처리하기 (결측치를 최대 빈도수인 변수로 채우든지(범주형 변수일 경우), 수치형 변수일경우엔 결측치를 평균이나 중앙값으로 채워주세요)
# * 의사결정나무 만들고 Gridsearch 로 적절한 파라미터 찾기
# * plot_tree or export graphiz 모듈 이용해서 시각화
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import koreanize_matplotlib
import sklearn
from sklearn import metrics
from sklearn import preprocessing
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import (
confusion_matrix,
plot_confusion_matrix,
roc_curve,
roc_auc_score,
)
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import optuna
from optuna import Trial, visualization
from optuna.samplers import TPESampler
import os
import warnings
warnings.filterwarnings("ignore")
train_path = "C:/Users/black/AI SCHOOL 8기/Kaggle 데이터 ( 머신러닝연습 ! )/train.csv"
test_path = "C:/Users/black/AI SCHOOL 8기/Kaggle 데이터 ( 머신러닝연습 ! )/test.csv"
train = pd.read_csv(train_path)
test = pd.read_csv(test_path)
display(train.head(2))
print(train.shape)
display(test.head(2))
print(test.shape)
# 데이터 정보
display(train.info())
display(test.info())
# # 결측치 처리
# 결측치 확인 및 처리
# 'Age', 'Cabin', ''Fare','Embarked' => 결측치 존재
# 수치형 => 평균 or 중앙값
train["Age"].fillna(train["Age"].median(), inplace=True)
test["Age"].fillna(test["Age"].median(), inplace=True)
train["Age"] = train["Age"].astype(int)
test["Age"] = test["Age"].astype(int)
train["Fare"].fillna(train["Fare"].median(), inplace=True)
test["Fare"].fillna(test["Fare"].median(), inplace=True)
# 범주형 => 최대 빈도수의 변수로 채우기
train["Cabin"].fillna(
train["Cabin"].value_counts().sort_values(ascending=False).index[0], inplace=True
)
test["Cabin"].fillna(
test["Cabin"].value_counts().sort_values(ascending=False).index[0], inplace=True
)
train["Embarked"].fillna(train["Embarked"].mode()[0], inplace=True)
test["Embarked"].fillna(test["Embarked"].mode()[0], inplace=True)
# 독립변수와 종속변수('Survived')간 상관관계 확인
# 'PClass' & 'Fare' 컬럼이 상관관계가 있는 편
corr = train.corr()
plt.figure(figsize=(14, 10))
mask = np.triu(np.ones_like(corr))
sns.heatmap(
data=corr, cmap="seismic", vmax=1, vmin=-1, annot=True, fmt=",.2f", mask=mask
)
sex_impoact_on_survive = train[["Sex", "Survived"]].groupby(["Sex"])["Survived"].mean()
fig, ax = plt.subplots(figsize=(8, 6))
sns.barplot(x="Sex", y="Survived", data=sex_impoact_on_survive.reset_index())
pclass_impact_on_survive = (
train[["Pclass", "Survived"]].groupby(["Pclass"]).mean().reset_index()
)
sns.barplot(data=pclass_impact_on_survive, x="Pclass", y="Survived")
# # 이상치 처리
# 'Age'는 노인분들도 계실 수 있으니까 이상치라고는 할 수없다
# 'SibSp' => 함께 탑승한 형제자매 or 배우자 수
# 'Parch' => 함께 부모와 자녀의 수
# 'SibSp' & 'Parch' 또한 동승한 인원의 수 이고, 각자가 탑승한 인원 다 다르기때문에 이상치 분류 X
# # 'Fare' 컬럼에서는 요금이 300 이하인 경우는 PClass 마다 달라질 수 있다고 하지만
# # 3개만 500 이상인 경우는 이상한 것 같다 제거하자
train.loc[train["Fare"] > 500, "Fare"]
train = train.drop(index=[259, 680, 738])
fig, axes = plt.subplots(figsize=(12, 5), nrows=1, ncols=2)
axes[0].set_title("Age, SibSp, Parch Boxplot")
sns.boxplot(data=train[["Age", "SibSp", "Parch"]], ax=axes[0])
axes[1].set_title("Fare Boxplot")
sns.boxplot(data=train[["Fare"]], ax=axes[1])
test.loc[test["Fare"] > 500, "Fare"]
fig, axes = plt.subplots(figsize=(12, 5), nrows=1, ncols=2)
axes[0].set_title("Age, SibSp, Parch Boxplot")
sns.boxplot(data=test[["Age", "SibSp", "Parch"]], ax=axes[0])
axes[1].set_title("Fare Boxplot")
sns.boxplot(data=test[["Fare"]], ax=axes[1])
# # 범주형 변수 => 수치형 변수 맵핑
# dype == 'object'인 범주형 변수들 중 수치화하여 예측에 적용가능한 것들 맵핑
train.select_dtypes(include="object")
dummy = pd.get_dummies(train[["Sex", "Embarked"]])
train = train.join(dummy)
test.select_dtypes(include="object")
dummy = pd.get_dummies(test[["Sex", "Embarked"]])
test = test.join(dummy)
# # 파생변수 만들기 (이름)
# Mr, Miss, Mrs, dr로 이름의 파생변수 만들고 수치형으로 맵핑
# 이외에 성은 결측치 => 0으로 채워주기
train["Name_"] = train["Name"].str.extract("([A-Za-z]+)\.", expand=False)
test["Name_"] = test["Name"].str.extract("([A-Za-z]+)\.", expand=False)
name_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Dr": 4}
train["Name_"] = train["Name_"].map(name_mapping).fillna(0).astype(int)
test["Name_"] = test["Name_"].map(name_mapping).fillna(0).astype(int)
# # 의사결정나무
train_x = train.drop(
columns=["Name", "Sex", "Ticket", "Cabin", "Embarked", "Survived"], axis=1
)
train_y = train["Survived"]
x_train, x_test, y_train, y_test = train_test_split(
train_x, train_y, random_state=42, stratify=train_y, test_size=0.2
)
model = DecisionTreeClassifier()
params = {
"criterion": ["gini", "entropy"],
"splitter": ["best", "random"],
"max_depth": [3, 5, 7],
"min_samples_split": [2, 3, 4],
"min_samples_leaf": [1, 2, 3],
"max_features": [None, "sqrt", "log2"],
}
# GridSearchCV 객체 생성
grid_dt = GridSearchCV(model, param_grid=params, cv=5, n_jobs=-1, verbose=2)
# GridSearchCV 모델 학습
grid_dt.fit(x_train, y_train)
# 최적의 하이퍼파라미터와 평가 점수 출력
print("Best parameters : ", grid_dt.best_params_)
print("Best cross-validation score : ", grid_dt.best_score_)
# 최적의 하이퍼파라미터로 모델 생성
model = DecisionTreeClassifier(
criterion=grid_dt.best_params_["criterion"],
splitter=grid_dt.best_params_["splitter"],
max_depth=grid_dt.best_params_["max_depth"],
min_samples_split=grid_dt.best_params_["min_samples_split"],
min_samples_leaf=grid_dt.best_params_["min_samples_leaf"],
max_features=grid_dt.best_params_["max_features"],
)
# 모델 학습
model.fit(x_train, y_train)
from sklearn.tree import plot_tree
plt.figure(figsize=(25, 10))
plot_tree(
model, filled=True, max_depth=5, fontsize=12, feature_names=x_train.columns.tolist()
)
test = test.drop(columns=["Name", "Sex", "Ticket", "Cabin", "Embarked"], axis=1)
# 생존 여부(Survived) 예측
y_pred = grid_dt.predict(test)
# Kaggle에 제출하기 위한 submission 파일 생성
sub = pd.read_csv(
"C:/Users/black/AI SCHOOL 8기/Kaggle 데이터 ( 머신러닝연습 ! )/gender_submission.csv"
)
sub["Survived"] = y_pred
sub.to_csv("Titanic_DT.csv", index=False)
|
# # Objective
# * To predict whether a liability customer will buy a personal loan or not.
# # Problem Definition
# How do we build a model that will help the marketing department to identify the potential customers who have a higher probability of purchasing the loan. How do we evaluate the performance of such a model?
# # Key Questions
# * Which variables are most significant.
# * Which segment of customers should be targeted more.
# # Import all the necessary libraries & Functions
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, GridSearchCV
# To build model for prediction
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
# To get diferent metric scores
from sklearn.metrics import (
f1_score,
accuracy_score,
recall_score,
precision_score,
confusion_matrix,
make_scorer,
roc_auc_score,
roc_curve,
precision_recall_curve,
)
# Removes the limit for the number of displayed columns
pd.set_option("display.max_columns", None)
# Sets the limit for the number of displayed rows
pd.set_option("display.max_rows", 200)
# setting the precision of floating numbers to 5 decimal points
pd.set_option("display.float_format", lambda x: "%.5f" % x)
# Load the data set into a pandas dataframe and preview it
data = pd.read_csv("./data_sets/Loan_Modelling.csv")
# # Data background and contents
# **Checking the data shape**
# checking the shape of the data
print(f"There are {data.shape[0]} rows and {data.shape[1]} columns.")
# Data sample
data.sample(10, random_state=2)
# Looking at the data, we do not need ID so we will drop it right away
data.drop("ID", axis=1, inplace=True)
# reset the indexes
data.reset_index()
# Data sample
data.sample(5, random_state=2)
# **Check column structure**
data.info() # checking column datatypes and number of non-null values
# * We have 12 Integer, 1 Float columns in the dataset
# **Check for missing values**
# Checking for missing values
data.isnull().sum()
# * We have no missing values in the dataset
# **Checking duplicates**
# Check for duplicated values
data.duplicated().sum()
data.loc[data.duplicated()]
# * We seem to have no duplicates in the dataset
# ## Statistical Summary of the Data
data.describe(include="all").T
# * Average age of the customers is 45, with a minimum age of 23 and max age of 67 years. Over 50% of the customers in the dataset are aged 50 years and above
# * There seems to be an error on the minimum professional experience age (-3 ) but the maximum age of customers is 43 years with a combined average professional experience of 20 years.
# * Income ranges from 8,000 USD TO 224,000 USD with an average income of approximately 74,000 USD
# * Additionally, 50% of all customers in the dataset have incomes of 64,000 USD and above
# * Zipcodes are not clear and need to be checked and formatted accordingly
# * The average family size for each customer is 2 with 50% of all customers having a family size of 2 and above.
# * The average spending on credit cards is approimately 2,000 USD per month
# * The average value of mortgages held by each customer is around 56,000 USD with a standard deviation of 101,000 USD(needs further analysis).
# * Education Level, Personal Loan Accounts, Securities_Account, CD_Account statistics not clear as they are set as classification variables. Separate analayis to be done to have a clear picture of each
# # Exploratory Data Analysis
# **Create important functions for our EDA**
# Let us create some important functions that we will use for our EDA
# function to create labeled barplots
def labeled_barplot(data, feature, perc=False, n=None):
"""
Barplot with percentage at the top
data: dataframe
feature: dataframe column
perc: whether to display percentages instead of count (default is False)
n: displays the top n category levels (default is None, i.e., display all levels)
"""
total = len(data[feature]) # length of the column
count = data[feature].nunique()
if n is None:
plt.figure(figsize=(count + 2, 6))
else:
plt.figure(figsize=(n + 2, 6))
plt.xticks(rotation=90, fontsize=15)
ax = sns.countplot(
data=data,
x=feature,
palette="Paired",
order=data[feature].value_counts().index[:n].sort_values(),
)
for p in ax.patches:
if perc == True:
label = "{:.1f}%".format(
100 * p.get_height() / total
) # percentage of each class of the category
else:
label = p.get_height() # count of each level of the category
x = p.get_x() + p.get_width() / 2 # width of the plot
y = p.get_height() # height of the plot
ax.annotate(
label,
(x, y),
ha="center",
va="center",
size=12,
xytext=(0, 5),
textcoords="offset points",
) # annotate the percentage
plt.show() # show the plot
# function to plot stacked bar chart
def stacked_barplot(data, predictor, target):
"""
Print the category counts and plot a stacked bar chart
data: dataframe
predictor: independent variable
target: target variable
"""
count = data[predictor].nunique()
sorter = data[target].value_counts().index[-1]
tab1 = pd.crosstab(data[predictor], data[target], margins=True).sort_values(
by=sorter, ascending=False
)
print(tab1)
print("-" * 120)
tab = pd.crosstab(data[predictor], data[target], normalize="index").sort_values(
by=sorter, ascending=False
)
tab.plot(kind="bar", stacked=True, figsize=(count + 5, 6))
plt.legend(
loc="lower left",
frameon=False,
)
plt.legend(loc="upper left", bbox_to_anchor=(1, 1))
plt.show()
def histogram_boxplot(data, feature, figsize=(15, 10), kde=False, bins=None):
"""
Boxplot and histogram combined
data: dataframe
feature: dataframe column
figsize: size of figure (default (15,10))
kde: whether to show the density curve (default False)
bins: number of bins for histogram (default None)
"""
f2, (ax_box2, ax_hist2) = plt.subplots(
nrows=2, # Number of rows of the subplot grid= 2
sharex=True, # x-axis will be shared among all subplots
gridspec_kw={"height_ratios": (0.25, 0.75)},
figsize=figsize,
) # creating the 2 subplots
sns.boxplot(
data=data, x=feature, ax=ax_box2, showmeans=True, color="violet"
) # boxplot will be created and a triangle will indicate the mean value of the column
sns.histplot(
data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins
) if bins else sns.histplot(
data=data, x=feature, kde=kde, ax=ax_hist2
) # For histogram
ax_hist2.axvline(
data[feature].mean(), color="green", linestyle="--"
) # Add mean to the histogram
ax_hist2.axvline(data[feature].median(), color="black", linestyle="-")
### function to plot distributions wrt target
def distribution_plot_wrt_target(data, predictor, target):
fig, axs = plt.subplots(2, 2, figsize=(12, 10))
target_uniq = data[target].unique()
axs[0, 0].set_title("Distribution of target for target=" + str(target_uniq[0]))
sns.histplot(
data=data[data[target] == target_uniq[0]],
x=predictor,
kde=True,
ax=axs[0, 0],
color="teal",
stat="density",
)
axs[0, 1].set_title("Distribution of target for target=" + str(target_uniq[1]))
sns.histplot(
data=data[data[target] == target_uniq[1]],
x=predictor,
kde=True,
ax=axs[0, 1],
color="orange",
stat="density",
)
axs[1, 0].set_title("Boxplot w.r.t target")
sns.boxplot(data=data, x=target, y=predictor, ax=axs[1, 0], palette="gist_rainbow")
axs[1, 1].set_title("Boxplot (without outliers) w.r.t target")
sns.boxplot(
data=data,
x=target,
y=predictor,
ax=axs[1, 1],
showfliers=False,
palette="gist_rainbow",
)
plt.tight_layout()
plt.show()
# Function to display a correlation heat map
def correlation_heatmap(data):
plt.figure(figsize=(15, 7))
plt.title("Correlation Heat Map", y=1.05, size=19)
sns.heatmap(data.corr(), cmap="Spectral", annot=True, fmt=".2f")
# ## Univariate Analysis
# ### Age Column
histogram_boxplot(data, "Age")
# * The distribution of age column is normal with negligible right skewness.
# ### Experience Column
histogram_boxplot(data, "Experience")
# * The distribution of Experience column is normal with negligible right skewness.
# ### Income Column
histogram_boxplot(data, "Income")
# * The distrubution for Income is right skewed
# * This means that we have few customers earning more than the median income of 98K USD
# * We observe outliers in this columns that need to be checked or treated
# ### Family Size
labeled_barplot(data, "Family")
histogram_boxplot(data, "Family")
# * From the box plot, Family distibution seems to be right skewed but not clear on the histplot.
# * Majority of the customers have family size of 1. This could infer that unmarried customers consitute the majority.
# ### CCVG - Average Spending on Credit Cards
histogram_boxplot(data, "CCAvg")
# * Distribution for this Column is Skewed to the right
# * We observe outliers in the column
# ### Education
labeled_barplot(data, "Education")
histogram_boxplot(data, "Education")
# * The distribution for education is slightly skewed to the right
# * Customers with Undergrad education and form majority in the dataset
# ### Mortgage
sns.boxplot(data=data, x="Mortgage")
plt.show()
# * Mortgage distribution seems to be heavily skewed to the right
# * We observe many outliers in this columns and calls for furher investigation
# Function to plot pie chart
def pie_plot(df, feature, title):
pyfig, axis1 = plt.subplots()
explode = (0, 0.15)
axis1.pie(
df[feature],
explode=explode,
labels=loan_stats["Labels"],
autopct="%1.1f%%",
shadow=True,
startangle=70,
)
axis1.axis("equal")
plt.title(title)
plt.show()
# ### Personal Loan
loan_stats = pd.DataFrame(
data["Personal_Loan"].value_counts(normalize=True)
).reset_index()
loan_stats.columns = ["Labels", "Personal Loan"]
pie_plot(loan_stats, "Personal Loan", "Personal_Loan Issuance")
# * Only 9.6% of the customers accepted personal loan offered during the last campaign
# * We observe quite a number of customers not accepting personal loans.
# * This gives us a sneek peak of behavior as We need to build a model that will help us to identify customers who can accept the facility.
# ### Securities Account
securities_stats = pd.DataFrame(
data["Securities_Account"].value_counts(normalize=True)
).reset_index()
securities_stats.columns = ["Labels", "Securities_Account"]
securities_stats
pie_plot(securities_stats, "Securities_Account", "Securities_Account")
# * Further only 10% of customers have securities with the bank
# ### CD_Account
cd_Account = pd.DataFrame(data["CD_Account"].value_counts(normalize=True)).reset_index()
cd_Account.columns = ["Labels", "CD_Account"]
cd_Account
pie_plot(cd_Account, "CD_Account", "CD_Account")
# * Only 6% of customers have a certificate of deposit with the bank
# ### Online
online_stats = pd.DataFrame(data["Online"].value_counts(normalize=True)).reset_index()
online_stats.columns = ["Labels", "Online"]
online_stats
pie_plot(online_stats, "Online", "Online")
# * Approximately 60% of the bank's customers use internet banking facilities offered by the bank, while 40% do not use the facility
# ### Credit Card
creditcard_stats = pd.DataFrame(
data["CreditCard"].value_counts(normalize=True)
).reset_index()
creditcard_stats.columns = ["Labels", "CreditCard"]
creditcard_stats
pie_plot(creditcard_stats, "CreditCard", "Credit Card From Other Banks")
# * Only 29% of customers use credit card facilities by other banks
# ## Bi-Variate Analysis
# ### Pair Plot
sns.pairplot(data)
# * Experience and Age are strongly correlated
# * Age distribution is normal
# * Experience distribution is also normal
# * ZIP Codes column seem not have any relationship with Personal_Loan
# * Family and Eductaion have a relatively low correlation with Personal_Loan
# ### Analysis of each Variable With Respect to Target
valid_options = ["ZIPCode", "Personal_Loan"] ## Removing Zip code & target from loop
for column in data.columns:
if column not in valid_options:
distribution_plot_wrt_target(data, column, "Personal_Loan")
# ### Correlation Heat Map
correlation_heatmap(data)
# ### Key Observations from EDA
# * Age distribution is normal as previously mentioned and with respect to target, we seem to have equal number of customers with and without personal loan across the ages of 35 to 55
# * Experience distribution is normal with majority of customers accepting loans having a professional experience of 10 to 30 years
# * Similalry we have an equal number of customers with professional experience of 10 to 30 years accepting personal loans.
# * We observe a right skeweness on income column
# * We also observe more customers with incomes of 100k and above accepting personal loan than those with lower incomes.Hence more likely to take personal loan facility
# * Customers with family size of 2 and above are more likely to accept personal loans than those with family size of less than 2
# * We observe that customers with an Average Credit Card spending of 2k USD and above are more likely to accept personal loans than those with lower Average spending
# * Customers with education level Undergrad and above are most likely to accept personal loans
# * Customers with Mortgage facility are most likely to accept personal loans than those without.
# * We observe a weak correlation with Securities account.
# * Customers with CD Account tend to accept personal loan facility more than customers without
# * We observe an equal number of customers having or not having access to internet banking accepting and rejecting personal loans
# * We observe an equal number of customers with and without credit cards from other banks accepting and rejecting personal loans
# * From the heat map, We observe a high correlation between Age and experience
# * We observe a low but moderate correlation between mortgage and income
# * We observe no correlation between zip code and target variable personal_loan
# * Target variable personal loan seems to be greatly influenced by Mortgage, Education, CD account and Credit Crd spending
# # Data Preprocessing
# Copying the data to avoid touching original DF
df = data.copy()
# preview it
df.sample(10)
# ## Zip Code
# From EDA we observed no correlation between this column and the target variable. We will drop it
df.drop("ZIPCode", axis=1, inplace=True)
# **Checking if we have negative values Experience Column and treating accordingly**
# ## Experience
# We observed from previous analysis that Experience column has negative values. We need to treat accourdingly
# Checking how many negative values on the experience column
print(
"We have %2d negative values on the experience column"
% df[df["Experience"] < 0].shape[0]
)
# Replace negative Experience values with median
# Convert all to Zeros
df["Experience"] = df["Experience"].apply(lambda x: 0 if x < 0 else x)
# Input with median
df["Experience"] = df["Experience"].replace(0, df["Experience"].median())
# Checking how many negative values on the experience column after treatment
print(
"We now have %2d negative values on the experience column after treatment"
% df[df["Experience"] < 0].shape[0]
)
# **Conclusion**
# * We have dropped ZIPCode & ID columns from the data sets they have no real value
# * Experience column was treated accordingly by removing negative values an imputting them with the median values
# # Model Building
# **Defining functions to use later during model building andt testing process**
# defining a function to compute different metrics to check performance of a classification model built using sklearn
def model_performance_classification_sklearn(model, predictors, target):
"""
Function to compute different metrics to check classification model performance
model: classifier
predictors: independent variables
target: dependent variable
"""
# predicting using the independent variables
pred = model.predict(predictors)
acc = accuracy_score(target, pred) # to compute Accuracy
recall = recall_score(target, pred) # to compute Recall
precision = precision_score(target, pred) # to compute Precision
f1 = f1_score(target, pred) # to compute F1-score
# creating a dataframe of metrics
df_perf = pd.DataFrame(
{
"Accuracy": acc,
"Recall": recall,
"Precision": precision,
"F1": f1,
},
index=[0],
)
return df_perf
# Function to compute the confusion matrix
def confusion_matrix_sklearn(model, predictors, target):
"""
To plot the confusion_matrix with percentages
model: classifier
predictors: independent variables
target: dependent variable
"""
y_pred = model.predict(predictors)
cm = confusion_matrix(target, y_pred)
labels = np.asarray(
[
["{0:0.0f}".format(item) + "\n{0:.2%}".format(item / cm.flatten().sum())]
for item in cm.flatten()
]
).reshape(2, 2)
plt.figure(figsize=(6, 4))
sns.heatmap(cm, annot=labels, fmt="")
plt.ylabel("True label")
plt.xlabel("Predicted label")
# defining a function to plot the confusion_matrix of a classification model built using sklearn
def confusion_matrix_sklearn_with_threshold(model, predictors, target, threshold=0.5):
"""
To plot the confusion_matrix, based on the threshold specified, with percentages
model: classifier
predictors: independent variables
target: dependent variable
threshold: threshold for classifying the observation as class 1
"""
pred_prob = model.predict_proba(predictors)[:, 1]
pred_thres = pred_prob > threshold
y_pred = np.round(pred_thres)
cm = confusion_matrix(target, y_pred)
labels = np.asarray(
[
["{0:0.0f}".format(item) + "\n{0:.2%}".format(item / cm.flatten().sum())]
for item in cm.flatten()
]
).reshape(2, 2)
plt.figure(figsize=(6, 4))
sns.heatmap(cm, annot=labels, fmt="")
plt.ylabel("True label")
plt.xlabel("Predicted label")
def model_performance_classification_sklearn_with_threshold(
model, predictors, target, threshold=0.5
):
"""
Function to compute different metrics, based on the threshold specified, to check classification model performance
model: classifier
predictors: independent variables
target: dependent variable
threshold: threshold for classifying the observation as class 1
"""
# predicting using the independent variables
pred_prob = model.predict_proba(predictors)[:, 1]
pred_thres = pred_prob > threshold
pred = np.round(pred_thres)
acc = accuracy_score(target, pred) # to compute Accuracy
recall = recall_score(target, pred) # to compute Recall
precision = precision_score(target, pred) # to compute Precision
f1 = f1_score(target, pred) # to compute F1-score
# creating a dataframe of metrics
df_perf = pd.DataFrame(
{
"Accuracy": acc,
"Recall": recall,
"Precision": precision,
"F1": f1,
},
index=[0],
)
return df_perf
# function to plut roc/auc curve
def plot_auc_roc(target, logistic_regression_model, predictors):
logit_roc_auc_train = roc_auc_score(
target, logistic_regression_model.predict_proba(predictors)[:, 1]
)
fpr, tpr, thresholds = roc_curve(
target, logistic_regression_model.predict_proba(predictors)[:, 1]
)
plt.figure(figsize=(7, 5))
plt.plot(fpr, tpr, label="Logistic Regression (area = %0.2f)" % logit_roc_auc_train)
plt.plot([0, 1], [0, 1], "r--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver operating characteristic")
plt.legend(loc="lower right")
plt.show()
# ## Model evaluation criterion
# ### Model can make wrong predictions as:
# 1. Predicting a customer is not going to accept (0) personal loan but in reality the customer will accept - Opportinity Loss (FN)
# 2. Predicting a customer is going to accept (1) personal loan but in reality the customer will not accept - Loss of marketing resources (FP)
# ### Which Loss is greater ?
# * Opportunity loss will be the greater loss as the bank will lose revenue and possibly the customer
# ### How to reduce this loss i.e need to reduce False Negatives ?
# * The bank needs to reduce false negatives, this can be done by maximizing the Recall.
# # Model Bulding - Logistic Regression
# Defining target and independent variables
X = df.drop("Personal_Loan", axis=1)
y = df["Personal_Loan"]
# Solitting into test and train data sets respectively
X_Train, X_Test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
# Defining the logistic regression model
logistic_regression_model = LogisticRegression(solver="newton-cg", random_state=1)
logistic_regression_model.fit(X_Train, y_train)
# ### Finding the coefficients
log_odds = logistic_regression_model.coef_[0]
pd.DataFrame(log_odds, X_Train.columns, columns=["coef"]).T
# ### Coefficient interpretations
# * Coefficients of Age, Family Size, CCAvg, Education, Mortgage are positive, an increase in these variables will lead to an increase in chances of a customer accepting a personal loan.
# * Coefficients of Experience, Securities_Account, Online, CreditCard(from other banks) are negative, an increase in these will lead to a decrease in chances of a customer accepting a personal loan
# ### Converting coefficients to odds
# converting coefficients to odds
odds = np.exp(logistic_regression_model.coef_[0])
# finding the percentage change
perc_change_odds = (np.exp(logistic_regression_model.coef_[0]) - 1) * 100
# removing limit from number of columns to display
pd.set_option("display.max_columns", None)
# adding the odds to a dataframe
pd.DataFrame({"Odds": odds, "Change_odd%": perc_change_odds}, index=X_Train.columns).T
# ### Coefficient interpretations
# * `Age`: Holding all other features constant a unit change in Age will increase the odds of a customer accepting personal loan by 1.03 times or a 3.1% increase in the odds.
# * `Experience`: Holding all other features constant a unit change in Professional Experience will decresse the odds of a customer accepting a personal loan by 0.97 times or a -2.78% decrease in the odds.
# * `Income`: Holding all other features constant a unit change in Income will increase the odds of a customer accepting a personal loan by 1.05 times or a 5.4% increase in the odds.
# * `Family`: Holding all other features constant a unit change in Family Size will increase the odds of a customer accepting a personal loan by 2 times or a 105% increase in the odds.
# * `CCAvg`: Holding all other features constant a unit change in CCAvg Size will increase the odds of a customer accepting a personal loan by 1.17 times or a 17% increase in the odds.
# * `Education`: Holding all other features constant a unit change in Education will increase the odds of a customer accepting a personal loan by 5.2 times or a 422% increase in the odds.
# * `Mortgage`: Holding all other features constant a unit change in Mortgage will increase the odds of a customer accepting a personal loan by 1.0 times or a 0.07495% increase in the odds.
# * `Securities_Account`: Holding all other features constant a unit change in Securities_Account will decrease the odds of a customer accepting a personal loan by 0.43 times or a 56% decrease in the odds.
# * `CD_Account`: Holding all other features constant a unit change in CD_Account will increase the odds of a customer accepting a personal loan by 26 times or a 2500% increase in the odds.
# * `Online`: Holding all other features constant a unit change in Online will decrease the odds of a customer accepting a personal loan by 0.39 times or a 60% decrease in the odds.
# ### Model Performance on Training Set
# #### Creating confusion matrix
confusion_matrix_sklearn_with_threshold(logistic_regression_model, X_Train, y_train)
# #### Checking the Model Performance on Training Set with default threshold of 0.5
default_model_performance = model_performance_classification_sklearn_with_threshold(
logistic_regression_model, X_Train, y_train
)
default_model_performance
plot_auc_roc(y_train, logistic_regression_model, X_Train)
# * The model is performing well on the training set with an AUC of 0.96.
# * The recall is also low at 63% but we can see if this can be improved further
# ### Logistic Model Performance Improvement
# * Let us see if the recall can be improved by applying optimal thresholds
# #### Optimal threshold using AUC-ROC curve
# Optimal threshold as per AUC-ROC curve
# The optimal cut off would be where tpr is high and fpr is low
fpr, tpr, thresholds = roc_curve(
y_train, logistic_regression_model.predict_proba(X_Train)[:, 1]
)
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold_auc_roc = thresholds[optimal_idx]
print(optimal_threshold_auc_roc)
# #### Checking Model Performance using Optimum AUC/ROC threshold on Training set
# Confusion Matrix
confusion_matrix_sklearn_with_threshold(
logistic_regression_model, X_Train, y_train, threshold=optimal_threshold_auc_roc
)
# #### F1, Recall, Precision, Accuracy with optimum threshold (AUC/ROC) - Training Set
model_performance_aoc_ruc = model_performance_classification_sklearn_with_threshold(
logistic_regression_model, X_Train, y_train, optimal_threshold_auc_roc
)
model_performance_aoc_ruc
# * Precision drops to 0.52 on the training set after applying optimum threshold from AUC/ROC
# * Recall improves to 87% after applying optimum threshold from AUC/ROC
# #### Optimal threshold using Precision/Recall curve
# **We will try to obtain the optimum threshold that will maximize F1 score**
##Obtain predictions
y_scores = logistic_regression_model.predict_proba(X_Train)[:, 1]
prec, rec, thresholds = precision_recall_curve(
y_train,
y_scores,
)
# find the threshold that maximizes F1 score
f1_scores = 2 * prec * rec / (prec + rec)
optimal_idx = np.argmax(f1_scores)
optimal_threshold_f1 = thresholds[optimal_idx]
print("Optimum Threshold is >> ", optimal_threshold_f1)
# Plot the above
def plot_prec_recall_vs_tresh(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="precision")
plt.plot(thresholds, recalls[:-1], "g--", label="recall")
plt.xlabel("Threshold")
plt.legend(loc="upper left")
plt.ylim([0, 1])
plt.figure(figsize=(10, 7))
plot_prec_recall_vs_tresh(prec, rec, thresholds)
plt.show()
# * At threshold around 0.371009117275526 we will get equal precision and recall
# #### Checking Model Performance using Optimum Precision/Recall threshold above on the Training Set
# Confusion Matrix
confusion_matrix_sklearn_with_threshold(
logistic_regression_model, X_Train, y_train, threshold=optimal_threshold_f1
)
# #### F1, Recall, Precision, Accuracy with optimum precision/recall - Training Set
model_performance_recal_pre_curve = (
model_performance_classification_sklearn_with_threshold(
logistic_regression_model, X_Train, y_train, optimal_threshold_f1
)
)
model_performance_recal_pre_curve
# * Precision improves to 77% while recall drops slightly to 71%
# ### Model Performance on Testing Set
# creating confusion matrix
confusion_matrix_sklearn_with_threshold(logistic_regression_model, X_Test, y_test)
# **Obtain performance using default threshold of 0.5**
test_performance_default = model_performance_classification_sklearn_with_threshold(
logistic_regression_model, X_Test, y_test
)
test_performance_default
# * Model with default threshold performs poorly with a recall of 57%
# * F1 Score at 69 % and high precision of 86%
# #### Testing Set ROC-AUC
plot_auc_roc(y_test, logistic_regression_model, X_Test)
# **Obtain optimum threshold from ROC/AUC and test it on training set**
# Optimal threshold as per AUC-ROC curve
# The optimal cut off would be where tpr is high and fpr is low
fpr_test, tpr_test, thresholds_test = roc_curve(
y_test, logistic_regression_model.predict_proba(X_Test)[:, 1]
)
optimal_idx_test = np.argmax(tpr_test - fpr_test)
optimal_threshold_auc_roc_test = thresholds[optimal_idx_test]
print("Optimum Threshold AUC/ROC Test >> ", optimal_threshold_auc_roc_test)
# #### Checking Model Performance using Optimum AUC/ROC threshold on Testing set
# Confusion Matrix
confusion_matrix_sklearn_with_threshold(
logistic_regression_model, X_Test, y_test, threshold=optimal_threshold_auc_roc_test
)
model_performance_aoc_ruc_test = (
model_performance_classification_sklearn_with_threshold(
logistic_regression_model, X_Test, y_test, optimal_threshold_auc_roc_test
)
)
model_performance_aoc_ruc_test
# * Model recall score gives 98% on testing but recall is low using optimum threshold for ROC/AUC
# * F1/Precision score is low at 14% & 24% respectively
# #### Optimal threshold using Precision Recall curve on testing Set
y_scores_test = logistic_regression_model.predict_proba(X_Test)[:, 1]
prec_test, rec_test, thresholds_f1_test = precision_recall_curve(y_test, y_scores_test)
# find the threshold that maximizes F1 score
f1_scores_test = 2 * prec_test * rec_test / (prec_test + rec_test)
optimal_idx_f1_test = np.argmax(f1_scores_test)
optimal_threshold_f1_test = thresholds_f1_test[optimal_idx_f1_test]
print("Optimum Threshold Precision/Recall Curve >> ", optimal_threshold_f1_test)
plt.figure(figsize=(10, 7))
plot_prec_recall_vs_tresh(prec_test, rec_test, thresholds_f1_test)
plt.show()
# #### Checking Model Performance using Optimum Precision/Recall Optimum threshold on Testing set
# Confusion Matrix
# creating confusion matrix
confusion_matrix_sklearn_with_threshold(
logistic_regression_model, X_Test, y_test, optimal_threshold_f1_test
)
model_performance_f1_test = model_performance_classification_sklearn_with_threshold(
logistic_regression_model, X_Test, y_test, optimal_threshold_f1_test
)
model_performance_f1_test
# * Recall & F1 score are relatively good after applying optimum threshold of 0.24 on the testing set using precision/recall curve
# * Precision score at 66%
# ### Logistic Regression Model Performance Summary
# training performance summary
models_train_comparision_df = pd.concat(
[
default_model_performance.T,
model_performance_aoc_ruc.T,
model_performance_recal_pre_curve.T,
],
axis=1,
)
models_train_comparision_df.columns = [
"Logistic Regression(Default)",
"Logistic Regression-0.13 Threshold(AUC/ROC)",
"Logistic Regression-0.37 Threshold (Precision/Recall Curve)",
]
print("Training performance summary:")
models_train_comparision_df
# Testing performance summary
models_test_comparision_df = pd.concat(
[
test_performance_default.T,
model_performance_aoc_ruc_test.T,
model_performance_f1_test.T,
],
axis=1,
)
models_test_comparision_df.columns = [
"Logistic Regression(Default)",
"Logistic Regression-0.02 Threshold(AUC/ROC)",
"Logistic Regression-0.23 Threshold(Precision/Recall Curve)",
]
print("Test performance summary:")
models_test_comparision_df
# **Perfromance Comments on Training Set**
# * We observe the default model has a recall of 63%, precision of 81% and an F1 score of 71% on the training set
# * We observe that after applying optimum AUC/ROC threshold of 0.13 the recall is 87%, precision is 52% and F1 score is 71% on the training set
# * We observe that after applying optimum threshold of 0.37 from the Precision/Recall curve, the recall is 71% ,precision is 77% with an F1 score of 74% on the training set
# * The optimized model with 0.37 threshold will give a better overall performance since it has a balance score of recall, precision and F1 score all ranging between 70 to 74% on the training set
# **Perfromance Comments on Testing Set**
# * We observe the default model has a recall of 57%, precision of 86% and an F1 score of 68% on the testing set
# * We observe that after applying optimum AUC/ROC threshold of 0.02 the model performs poorly with recall of 98% precision = 14% with an F1 score of 25% on the testing set.
# * We observe that after applying optimum threshold of 0.23 from the Precision/Recall curve, the recall changes to 76% the precision is 66% with an F1 score of 71% on the testing set
# * The optimized model with 0.37 threshold will give a balanced performance since it has a balance score of precision, F1 score recall ranging between 67% to 76% on the training set
# # Model Building - Decision Tree
decision_tree_model = DecisionTreeClassifier(criterion="gini", random_state=1)
decision_tree_model.fit(X_Train, y_train)
# ## Check Decision Tree model performance on training set
decision_tree_performance_default = model_performance_classification_sklearn(
decision_tree_model, X_Train, y_train
)
decision_tree_performance_default
# Confusion Matrix
confusion_matrix_sklearn(decision_tree_model, X_Train, y_train)
# ## Checking Decision Tree model performance on test set
decision_tree_performance_test = model_performance_classification_sklearn(
decision_tree_model, X_Test, y_test
)
decision_tree_performance_test
# Confusion Matrix
confusion_matrix_sklearn(decision_tree_model, X_Test, y_test)
# ## Decision Tree Visualization
column_names = list(X.columns)
feature_names = column_names
print(feature_names)
plt.figure(figsize=(20, 30))
out = tree.plot_tree(
decision_tree_model,
feature_names=feature_names,
filled=True,
fontsize=9,
node_ids=True,
class_names=True,
)
for o in out:
arrow = o.arrow_patch
if arrow is not None:
arrow.set_edgecolor("black")
arrow.set_linewidth(1)
plt.show()
print(
tree.export_text(
decision_tree_model, feature_names=feature_names, show_weights=True
)
)
importances = decision_tree_model.feature_importances_
indices = np.argsort(importances)
plt.figure(figsize=(12, 12))
plt.title("Feature Importances")
plt.barh(range(len(indices)), importances[indices], color="violet", align="center")
plt.yticks(range(len(indices)), [feature_names[i] for i in indices])
plt.xlabel("Relative Importance")
plt.show()
# ## Model Performance observations
# * Education, Family, CCAvg and Aage are the top 4 important features.
# * The default decision treem model overfits the data and hence gives the highest score of 1 on recall, accuracy,F1 and precision
# * Likewise for testing set, the perforance scores are high due to the overfitting scenario
# ## Model performance evaluation and improvement
# ### Pre-pruning
# #### Using GridSearch for Hyperparameter tuning of our tree model
estimator = DecisionTreeClassifier(random_state=1)
# Setting parameters to use for prepruning
parameters = {
"max_depth": [np.arange(1, 11, 2), None],
"criterion": ["gini"],
"splitter": ["best", "random"],
"min_impurity_decrease": [0.000001, 0.00001, 0.0001],
}
# select type of scoring. We chose precision
acc_scorer = make_scorer(recall_score)
# Run the grid search
grid_obj = GridSearchCV(estimator, parameters, scoring=acc_scorer, cv=5)
grid_obj = grid_obj.fit(X_Train, y_train)
# Set the clf to the best combination of parameters
estimator = grid_obj.best_estimator_
# Fit the best algorithm to the data.
estimator.fit(X_Train, y_train)
# #### Checking performance on training set
decision_tree_train_tuned = model_performance_classification_sklearn(
estimator, X_Train, y_train
)
decision_tree_train_tuned
confusion_matrix_sklearn(estimator, X_Train, y_train)
# #### Checking performance on testing set
decision_tree_test_preprunned = model_performance_classification_sklearn(
estimator, X_Test, y_test
)
decision_tree_test_preprunned
confusion_matrix_sklearn(estimator, X_Test, y_test)
# * The model performs well on the testing set with a precision of 0.93 , recall of 0.87 and F1 score of 0.9
# #### Visualizing the tree after Pre-prunning
plt.figure(figsize=(15, 12))
tree.plot_tree(
estimator,
feature_names=feature_names,
filled=True,
fontsize=9,
node_ids=True,
class_names=True,
)
plt.show()
# * We observe a simpler tree after pre-pruning
# ### Post-pruning - Cost Complexity Pruning
clf = DecisionTreeClassifier(random_state=1)
path = clf.cost_complexity_pruning_path(X_Train, y_train)
ccp_alphas, impurities = path.ccp_alphas, path.impurities
pd.DataFrame(path)
# #### Precision vs alpha analysis on training and testing sets
# Obtain clfs
clfs = []
for ccp_alpha in ccp_alphas:
clf = DecisionTreeClassifier(random_state=1, ccp_alpha=ccp_alpha)
clf.fit(X_Train, y_train)
clfs.append(clf)
# Training Set
recall_train = []
for clf in clfs:
pred_train = clf.predict(X_Train)
values_train = recall_score(y_train, pred_train)
recall_train.append(values_train)
# Testing Set
recall_test = []
for clf in clfs:
pred_test = clf.predict(X_Test)
values_test = recall_score(y_test, pred_test)
recall_test.append(values_test)
fig, ax = plt.subplots(figsize=(15, 5))
ax.set_xlabel("alpha")
ax.set_ylabel("Recall")
ax.set_title("Recall vs alpha for training and testing sets")
ax.plot(ccp_alphas, recall_train, marker="o", label="train", drawstyle="steps-post")
ax.plot(ccp_alphas, recall_test, marker="o", label="test", drawstyle="steps-post")
ax.legend()
plt.show()
# Obtaining the best model
index_best_model = np.argmax(recall_test)
best_model = clfs[index_best_model]
print(best_model)
# #### Checking model performance on training set
tree_postprun_performance_train = model_performance_classification_sklearn(
best_model, X_Train, y_train
)
tree_postprun_performance_train
confusion_matrix_sklearn(best_model, X_Train, y_train)
# #### Checking model performance on testing set
tree_postprun_performance_test = model_performance_classification_sklearn(
best_model, X_Test, y_test
)
tree_postprun_performance_test
confusion_matrix_sklearn(best_model, X_Test, y_test)
# ##### Visualizing the Decision Tree after Post Pruning
plt.figure(figsize=(10, 10))
out = tree.plot_tree(
best_model,
feature_names=feature_names,
filled=True,
fontsize=9,
node_ids=True,
class_names=True,
)
for o in out:
arrow = o.arrow_patch
if arrow is not None:
arrow.set_edgecolor("black")
arrow.set_linewidth(1)
plt.show()
plt.show()
print(tree.export_text(best_model, feature_names=feature_names, show_weights=True))
# #### Checking importances after post pruning
# Checking importances
print(
pd.DataFrame(
best_model.feature_importances_, columns=["Imp"], index=X_Train.columns
).sort_values(by="Imp", ascending=False)
)
# Graph Plotting
importances = best_model.feature_importances_
indices = np.argsort(importances)
plt.figure(figsize=(12, 12))
plt.title("Feature Importances")
plt.barh(range(len(indices)), importances[indices], color="violet", align="center")
plt.yticks(range(len(indices)), [feature_names[i] for i in indices])
plt.xlabel("Relative Importance")
plt.show()
# * Education, Income,Family, CCAvg are now the top 4 important features after post pruning
# ### Decision Tree Summary
# #### Decision Tree Training Set Summary
# Data frame to encompass performance summary on training set
models_train_comp_df = pd.concat(
[
decision_tree_performance_default.T,
decision_tree_train_tuned.T,
tree_postprun_performance_train.T,
],
axis=1,
)
models_train_comp_df.columns = [
"Decision Tree sklearn",
"Decision Tree (Pre-Pruning)",
"Decision Tree (Post-Pruning)",
]
print("Training performance comparison:")
models_train_comp_df
# Data frame to encompass performance summary on testing set
models_train_comp_df = pd.concat(
[
decision_tree_performance_test.T,
decision_tree_test_preprunned.T,
tree_postprun_performance_test.T,
],
axis=1,
)
models_train_comp_df.columns = [
"Decision Tree sklearn",
"Decision Tree (Pre-Pruning)",
"Decision Tree (Post-Pruning)",
]
print("Test set performance comparison:")
models_train_comp_df
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# импортируем библиотеки для визуализации
import matplotlib.pyplot as plt
import seaborn as sns
# Загружаем специальный удобный инструмент для разделения датасета:
from sklearn.model_selection import train_test_split
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# всегда фиксируйте RANDOM_SEED, чтобы ваши эксперименты были воспроизводимы!
RANDOM_SEED = 42
# зафиксируем версию пакетов, чтобы эксперименты были воспроизводимы:
# Подгрузим наши данные из соревнования
DATA_DIR = "/kaggle/input/sf-booking/"
df_train = pd.read_csv(DATA_DIR + "/hotels_train.csv") # датасет для обучения
df_test = pd.read_csv(DATA_DIR + "hotels_test.csv") # датасет для предсказания
sample_submission = pd.read_csv(DATA_DIR + "/submission.csv") # самбмишн
df_train.info()
df_train.head(2)
# Смотрим количество дублей в данных:
df_train.duplicated().sum()
# Удаляем дубли в трейне:
df_train.drop_duplicates(inplace=True)
df_test.info()
sample_submission.head(2)
sample_submission.info()
# ВАЖНО! дря корректной обработки признаков объединяем трейн и тест в один датасет
df_train["sample"] = 1 # помечаем где у нас трейн
df_test["sample"] = 0 # помечаем где у нас тест
df_test[
"reviewer_score"
] = 0 # в тесте у нас нет значения reviewer_score, мы его должны предсказать, по этому пока просто заполняем нулями
data = df_test.append(df_train, sort=False).reset_index(drop=True) # объединяем
data.info()
# Данные, относящиеся к отелю
# Можно предположить, что среди отелей есть те, у которых совпадают названия, но расположены они при этом в разных городах.
grouped = data.groupby(["hotel_name", "hotel_address"])["hotel_name"].count()
grouped = grouped.groupby("hotel_name").count().sort_values(ascending=False).head(3)
duplicated_hotel_name = grouped.index[0]
quantity_of_duplicated_names = grouped[0]
display(quantity_of_duplicated_names)
display(
data[data["hotel_name"] == duplicated_hotel_name][
["hotel_name", "hotel_address"]
].value_counts()
)
# Среди данных нашлось три отеля с одинаковым названием, но различными адресами. Дадим этим отелям уникальные имена, добавив в конце наименования число
hotels_to_rename = data[data["hotel_name"] == duplicated_hotel_name][
"hotel_address"
].value_counts()
for i in range(len(hotels_to_rename)):
address_to_rename = hotels_to_rename.index[i]
data.loc[(data["hotel_address"] == address_to_rename), "hotel_name"] = (
duplicated_hotel_name + " " + str(i)
)
display(
data[data["hotel_name"].str.contains(duplicated_hotel_name)][
["hotel_name", "hotel_address"]
].value_counts()
)
# Выберем признаки, которые относятся к данным отеля. Создадим DataFrame, содержащий только уникальные наименования отелей (с учетом произведенных выше переименований)
hotel_cols = [
"hotel_name",
"hotel_address",
"lat",
"lng",
"average_score",
"total_number_of_reviews",
"additional_number_of_scoring",
]
# Удаляем дубликаты:
hotels_uniq = data[hotel_cols].drop_duplicates().reset_index(drop=True)
display(hotels_uniq.info())
# Выбираем все отели, у которых отсутствуют данные о координатах
hotels_addr = hotels_uniq[
(hotels_uniq["lat"].notna() == False) | (hotels_uniq["lng"].notna() == False)
].sort_values("hotel_name")
display(hotels_addr)
# Для этих отелей попробуем определить координаты, используя бибблиотеку geocoders
from geopy.geocoders import Nominatim
loc = Nominatim(user_agent="GetLoc")
# entering the location name
def hotel_getLoc(hotel_address):
getLoc = loc.geocode(hotel_address)
try:
return pd.Series([getLoc.latitude, getLoc.longitude])
except:
return pd.Series([np.nan, np.nan])
hotels_addr[["lat", "lng"]] = hotels_addr["hotel_address"].apply(hotel_getLoc)
display(hotels_addr)
# Ни для одного отеля не удалось определить координаты по адресу.
# Можно заметить, что эти отели находятся в Австрии, Франции и Испании. Во всех этих странах алфавит не совпадает с латинским и, возможно, в адресе отсутствуют некоторые специфические символы.
# Попробуем в ручном режиме внести некоторые изменения в адреса отелей.
replace_words = {
"Savoyenstra e": "Savoyenstrasse",
"Gr nentorgasse": "Grunentorgasse",
"Josefst dter Stra e 22 08": "Josefstadter Strasse 22 08",
"Josefst dter Stra e 10 12 08": "Josefstadter Strasse 10-12",
"Sieveringer Stra e 4 19 D bling 1190": "Sieveringer Strasse 1190",
"23 Rue Damr mont 18th arr 75018": "Rue Damremont 23 18 Arrondissement 75018",
"Sep lveda": "Sepulveda",
"W hringer Stra e 33 35 09": "Wahringer Strasse 33-35",
"Taborstra e": "Taborstrasse",
"Landstra er G rtel 5 03 Landstra e": "Landstrasser Guertel 5",
"Hasenauerstra e 12 19 D bling": "Hasenauerstrasse 12",
"W hringer Stra e": "Wahringer Strasse",
"4 rue de la P pini re 8th arr 75008": "4 Rue De La Pepiniere",
"20 Rue De La Ga t 14th arr 75014": "Rue De La Gaite, 20b, XIV",
"Bail n": "Bailen",
"Pau Clar s 122 Eixample 08009": "Pau Claris 122",
"Paragonstra e": "Paragonstrasse",
}
hotels_addr["hotel_address"] = hotels_addr["hotel_address"].replace(
replace_words, regex=True
)
display(hotels_addr["hotel_address"])
# from geopy.geocoders import Nominatim
loc = Nominatim(user_agent="GetLoc")
# entering the location name
def hotel_getLoc(hotel_address):
getLoc = loc.geocode(hotel_address)
try:
return pd.Series([getLoc.latitude, getLoc.longitude])
except:
return pd.Series([np.nan, np.nan])
hotels_addr[["lat", "lng"]] = hotels_addr["hotel_address"].apply(hotel_getLoc)
display(hotels_addr)
# Нам удалось добиться того, что координаты всех отелей определены, и в данных отсутствуют пропущенные значения.
# Так как не все признаки, связанные с отелем, были обработаны, вернемся к датасету "hotels_uniq". Внесем в него данные о координатах отелей, которые в нем изначально отсутствовали
hotels_uniq = hotels_uniq.merge(
hotels_addr[["hotel_name", "lat", "lng", "hotel_address"]],
how="left",
on="hotel_name",
suffixes=("", "_y"),
)
hotels_uniq["lat"] = hotels_uniq["lat"].fillna(hotels_uniq["lat_y"])
hotels_uniq["lng"] = hotels_uniq["lng"].fillna(hotels_uniq["lng_y"])
hotels_uniq = hotels_uniq.drop(columns=["lat_y", "lng_y"])
display(hotels_uniq.info())
# На следующем шаге выделим из адреса пару город/страна. Эти данные понядобятся как для оценки влияния города, в котором расположен отель, на его рейтинг, так и для создания нового признака - расстояние от отеля до "центра" города.
# создаем новый признак Страна на основе адреса
hotels_uniq["country"] = hotels_uniq["hotel_address"].str.split().str[-1]
hotels_uniq["country"].replace(
"Kingdom", "United Kingdom", inplace=True
) # корректируем название для United Kingdom
# проверяем, какое количество стран у нас получилось
display(hotels_uniq["country"].nunique())
# Попробуем вычленить из адреса город
hotels_uniq["city"] = hotels_uniq["hotel_address"].apply(
lambda x: x.split()[-5] if x.split()[-1] == "Kingdom" else x.split()[-2]
)
display(hotels_uniq["city"].nunique())
# Полученный результат показывает, что количество городов соответствует количеству стран, т.е. отели располагаются только в одном городе каждой из представленных стран.
# Для облегчения работы на следующем шаге - определение координат "центра" города - создадим отдельный dataframe, содержащий только уникальные данные пары город/страна
city_uniq = hotels_uniq[["city", "country"]].drop_duplicates().reset_index(drop=True)
city_uniq["city_country"] = city_uniq["city"] + " " + city_uniq["country"]
display(city_uniq)
city_uniq[["lat_c", "lng_c"]] = city_uniq["city_country"].apply(hotel_getLoc)
display(city_uniq)
# На этом шаге рассчитаем расстояние от "центра" города до отеля, используя имеющиеся коорднаты
hotels_uniq = hotels_uniq.merge(city_uniq, how="left", on="city", suffixes=("", "_y"))
hotels_uniq = hotels_uniq.drop(columns=["country_y", "hotel_address_y"])
display(hotels_uniq.info())
display(hotels_uniq.info())
from geopy.distance import geodesic as GD
hotels_uniq["distance"] = hotels_uniq.apply(
lambda x: GD((x["lat"], x["lng"]), (x["lat_c"], x["lng_c"])).km, axis=1
)
display(hotels_uniq.head())
# Попробуем убедиться, что расстояния вычислены "правдоподобно". Для этого найдем максимальное расстояние от "центра" города до отеля
display(hotels_uniq["distance"].max())
# 17 км - это не такое уж и большое расстояние для крупного города. Считаем, что координаты отелей и расстояния определены верно.
# Само по себе расстояние содержит мало информации. Для крупного города 17 км от центра может оказаться "почти центром" с развитой инфраструктурой и достопримечательностями. В то же вреся 17 км для небольшого города может оказаться уже лесным массивом, соседней деревенькой и т.д.
# Разумно использовать не абсолютную величину - расстояние, а относительную, например, часть от условного радиуса города.
# Часто город представляет собой фигуру, далекую от круга. Интересно было бы определить новый признак как отношение расстояния от центра к расстоянию до границы города по прямой, проходящейй через центр города и отель. Определение искомого расстояния до границы города достаточно трудоемко, поэтому остановимся на варианте отношения расстояния от отеля до центра города к условному радиусу города, который вычислим, используя данные о площади города и формулу площади круга $$ r = \sqrt{\frac{S}{\pi}} $$.
#
city_radius = {
"Amsterdam": 219.4,
"Barcelona": 101.4,
"London": 1602,
"Milan": 181.67,
"Paris": 105.4,
"Vienna": 414.75,
}
import math
hotels_uniq["distance"] = hotels_uniq.apply(
lambda x: x["distance"] / math.sqrt(city_radius[x["city"]] / math.pi), axis=1
)
display(hotels_uniq)
# Попробуем нарисовать диаграммы распределения числовых признаков в данных отелей, чтобы понять, присутствуют ли там признаки с (лог-) нормальным распределением.
hotels_uniq.hist(figsize=(15, 15))
# Ни один признак не тянет на нормально распределенный, но есть несколько, подозрительных на лог-нормальное распределение.
hotels_uniq["additional_number_of_scoring"] = np.log(
hotels_uniq["additional_number_of_scoring"] + 1
)
hotels_uniq["total_number_of_reviews"] = np.log(hotels_uniq["total_number_of_reviews"])
# hotels_uniq['average_score'] = np.log(hotels_uniq['average_score']+1)
hotels_uniq.hist(figsize=(15, 15))
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(
hotels_uniq.drop(
columns=["hotel_name", "hotel_address", "country", "city", "city_country"],
axis=1,
).corr(),
annot=True,
)
bins_number = 10
font = 10
x_label = "Отношение расстояния от центра до отеля к условному радиусу города"
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(15, 15))
plt.subplots_adjust(hspace=0.3, wspace=0.2)
histplot_ams = sns.histplot(
data=hotels_uniq[hotels_uniq["city"] == "Amsterdam"],
x="distance",
bins=bins_number,
ax=axes[0, 0],
)
histplot_ams.set_title("Расстояние от центра до отеля в Амстердаме", fontsize=font)
histplot_ams.set_xlabel(x_label)
histplot_ams.set_ylabel("Количество отелей")
histplot_bar = sns.histplot(
data=hotels_uniq[hotels_uniq["city"] == "Barcelona"],
x="distance",
bins=bins_number,
ax=axes[0, 1],
)
histplot_bar.set_title("Расстояние от центра до отеля в Барселоне", fontsize=font)
histplot_bar.set_xlabel(x_label)
histplot_bar.set_ylabel("Количество отелей")
histplot_vie = sns.histplot(
data=hotels_uniq[hotels_uniq["city"] == "Vienna"],
x="distance",
bins=bins_number,
ax=axes[0, 2],
)
histplot_vie.set_title("Расстояние от центра до отеля в Вене", fontsize=font)
histplot_vie.set_xlabel(x_label)
histplot_vie.set_ylabel("Количество отелей")
histplot_lon = sns.histplot(
data=hotels_uniq[hotels_uniq["city"] == "London"],
x="distance",
bins=bins_number,
ax=axes[1, 0],
)
histplot_lon.set_title("Расстояние от центра до отеля в Лондоне", fontsize=font)
histplot_lon.set_xlabel(x_label)
histplot_lon.set_ylabel("Количество отелей")
histplot_mil = sns.histplot(
data=hotels_uniq[hotels_uniq["city"] == "Milan"],
x="distance",
bins=bins_number,
ax=axes[1, 1],
)
histplot_mil.set_title("Расстояние от центра до отеля в Милане", fontsize=font)
histplot_mil.set_xlabel(x_label)
histplot_mil.set_ylabel("Количество отелей")
histplot_par = sns.histplot(
data=hotels_uniq[hotels_uniq["city"] == "Paris"],
x="distance",
bins=bins_number,
ax=axes[1, 2],
)
histplot_par.set_title("Расстояние от центра до отеля в Париже", fontsize=font)
histplot_par.set_xlabel(x_label)
histplot_par.set_ylabel("Количество отелей")
# Из диаграмм видно, что самое большое количество отелей сосредоточено внутри условногно городского круга. Присутствие значений, превышающих единицу (т.е. за условной городской чертой), объясняется, скорее всего, формой города (например, сильно вытянут по одному из направлений), а не наличием отелей в окрестностях.
# После вычисления расстояния до "центра" города, нам уже не требуются координаты "центра". Это видно, в том числе, и из карты корреляции. Кроме того, можно удалить столбцы 'city' и 'city_country', т.к. отели присутствуют в каждой стране только в одном городе.
hotels_uniq = hotels_uniq.drop(
columns=["lat_c", "lng_c", "city", "city_country"], axis=1
)
display(hotels_uniq.info())
# С признаками, которые относятсяк отелю, мы отработали. Были определены все недостающие координаты, получены новые признаки - расстояние от "центра" города и страна.
# Можно полученные данные занести в основной DataFrame
display(data.info())
data = data.merge(hotels_uniq, how="inner", on="hotel_name", suffixes=("", "_y"))
data["lat"] = data["lat"].fillna(data["lat_y"])
data["lng"] = data["lng"].fillna(data["lng_y"])
data["hotel_address"] = data["hotel_address_y"]
data["additional_number_of_scoring"] = data["additional_number_of_scoring_y"]
data["total_number_of_reviews"] = data["total_number_of_reviews_y"]
data = data.drop(
columns=[
"lat_y",
"lng_y",
"average_score_y",
"additional_number_of_scoring_y",
"total_number_of_reviews_y",
"hotel_address_y",
]
)
data["review_year"] = pd.DatetimeIndex(data["review_date"]).year
data["review_month"] = pd.DatetimeIndex(data["review_date"]).month
data["review_year_month"] = data["review_year"] * 100 + data["review_month"]
data["weekday"] = pd.DatetimeIndex(data["review_date"]).dayofweek
data = data.drop(columns=["review_year", "review_month"])
# display(data[['review_date','review_year','review_month','review_year_month']].head())
display(data.info())
data.nunique(dropna=False)
# Выводим данные о корреляции признаков в виде тепловой карты
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data.drop(["sample"], axis=1).corr(), annot=True)
# additional_number_of_scoring и total_number_of_reviews имеют коэффициент корреляции = 0,82. Один из признаков можно удалить без видимых последствий для результата.
# Данные, относящиеся к рецензенту
# В первую очередь рассмотрим признак национальности (вернее было бы сказать, гражданства) рецензента
data["reviewer_nationality"] = data["reviewer_nationality"].apply(lambda x: x.strip())
display(data["reviewer_nationality"].value_counts(normalize=True).head(15))
# Среди рецензентов встречаются жители 227 стран. При этом на долю туристов из Великобритации приходится почти половина рецензентов. Следующие на втором месте рецензенты из США уступают Великобритании примерно в семь раз.
# Можно учесть еще один момент: рецензентом может оказаться житель страны, об отеле которой он оставил отзыв, а может быть и иностранный гражданин. Попробуем посмотреть, как создание признака совпадения стран рецензента и отеля скажется на итоговом результате.
data["country_reviewer"] = data.apply(
lambda x: 1 if x["reviewer_nationality"] == x["country"] else 0, axis=1
)
display(data["country_reviewer"].value_counts(normalize=True))
def reduce_categories_number(df, col, n):
"""
Функция принимает датасет df, колонку col и n - количество наиболее частых категорий.
Категории не входящие в n наиболее частых заменяются на 'Other'.
Функция возвращает полученный датасет.
"""
popular_values = df[col].value_counts().nlargest(n).index
df[col] = df[col].apply(lambda x: x if x in popular_values else "Other")
return df
# Переведем признак "национальность рецензента" в катагориальный. При этом ограничимся первыми 63 странами по количеству рецензентов, все остальные поместим в категорию "Other"
data = reduce_categories_number(data, "reviewer_nationality", 63)
display(data["reviewer_nationality"].value_counts(normalize=True).head(16))
# stop work
# grouped = data.groupby(by='reviewer_nationality')['reviewer_score'].agg(['mean']).sort_values('mean')
# fig, ax = plt.subplots(figsize=(20,12))
# plt.suptitle('Средняя оценка рецензента в зависимости от национальности', size=10)
# bar_types = sns.barplot(x=grouped.index, y=grouped['mean'])
# for p in bar_types.patches:
# bar_types.annotate('{:.1f}'.format(p.get_height()), (p.get_x()+.4, p.get_height()),
# ha='center', va='bottom', fontsize=10)
# plt.xlabel('Национальная принадлежность рецензентов', size=10)
# plt.ylabel('Средняя оценка', size=10)
# plt.xticks(rotation=30)
# Диаграмма показывает, что наиболее лояльными рецензентами выступают граждане англоязычных стран (исключение - Израиль), наименьшие оценки ставят граждане арабских государств. Впрочем, разлет средних оценок составляет примерно 10%.
# Т.к. мы оставили 16 категорий, то применим бинарное кодирование признака "национальность"
# бинарное кодирование признака национальонсть
import category_encoders as ce # импорт для работы с кодировщиком
bin_encoder = ce.BinaryEncoder(
cols=["reviewer_nationality"]
) # указываем столбец для кодирования
type_bin = bin_encoder.fit_transform(data["reviewer_nationality"])
data = pd.concat([data, type_bin], axis=1)
data.head(2)
# кодируем страны через OneHotEncoder
import category_encoders as ce # импорт для работы с кодировщиком
encoder = ce.OneHotEncoder(
cols=["country"], use_cat_names=True
) # указываем столбец для кодирования
type_bin = encoder.fit_transform(data["country"])
data = pd.concat([data, type_bin], axis=1)
data.head(2)
# Получаем новый признак tagn - количество тегов в отзыве tag
data["tags"] = data["tags"].apply(lambda s: s[1:] if s[0] == "[" else s)
data["tags"] = data["tags"].apply(lambda s: s[:-1] if s[-1] == "]" else s)
data["tags"] = data["tags"].apply(lambda s: s.replace("' ", ""))
data["tags"] = data["tags"].apply(lambda s: s.replace(" '", ""))
data["tags_list"] = data["tags"].apply(lambda s: s.split(", "))
data["tagn"] = data["tags_list"].apply(len)
data["tagn"].value_counts(normalize=True)
def hotels_days(tag_day):
i = tag_day.find("Stayed")
if i == -1:
return 0
j = tag_day.find("night", i + 1)
if j == -1:
return 0
istr = tag_day[i + 6 : j - 1]
i = int(istr)
return i
data["night"] = data["tags"].apply(hotels_days)
display(data["night"].value_counts())
# Количество ночей, которые провел рецензент в отеле, варьируется от 0 до 31. Наиболее популярный диапазон - от 1 до 7 дней. Сузим диапазон от 0 до 7, все остальное будем считать равным 8
data["night"] = data["night"].apply(lambda x: 8 if x > 7 else x)
display(data["tagn"].max())
tag_dic = {}
i = 0
for irow in data[["tags"]].iterrows():
mlist = list(irow[1][0].replace("'", "").split(","))
mlist = [x.strip(" ") for x in mlist]
for l in mlist:
i = tag_dic.get(l, 0)
i = i + 1
tag_dic.update({l: i})
tag_dic = dict(sorted(tag_dic.items(), key=lambda x: x[1], reverse=True))
display(tag_dic)
# Учитывать все теги достаточно затратно и, скорее всего, не нужно. Рассмотрим несколько тегов, встречающихся чаще всего, и создадим из них признаки.
# Leisure trip - самый популярный тег. Т.к. он относится к поездке на отдых, то логично добавить и признак командировки - Business trip.
# Логично добавить также признаки, связанные с количеством и составом компании. Для этого создадим признаки Solo traveler, Couple, Group, Family with older children и Family with young children.
# Можно еще заметить, что встречаются теги, характеризующие сам номер, в котором останавливались рецензенты, но характеристик номеров достаточно много, не все отели имеют королевские аппартаменты, поэтому пока не будем заострять на них внимание.
data["Leisure_trip"] = data["tags"].apply(
lambda x: 0 if x.find("Leisure trip") == -1 else 1
)
data["Business_trip"] = data["tags"].apply(
lambda x: 0 if x.find("Business trip") == -1 else 1
)
data["Solo_traveler"] = data["tags"].apply(
lambda x: 0 if x.find("Solo traveler") == -1 else 1
)
data["Couple"] = data["tags"].apply(lambda x: 0 if x.find("Couple") == -1 else 1)
data["Group"] = data["tags"].apply(lambda x: 0 if x.find("Group") == -1 else 1)
data["Family_with_older_children"] = data["tags"].apply(
lambda x: 0 if x.find("Family with older children") == -1 else 1
)
data["Family_with_young_children"] = data["tags"].apply(
lambda x: 0 if x.find("Family with young children") == -1 else 1
)
data["Submitted_from_a_mobile_device"] = data["tags"].apply(
lambda x: 0 if x.find("Submitted from a mobile device") == -1 else 1
)
display(data.info())
plt.rcParams["figure.figsize"] = (20, 20)
sns.heatmap(
data[
[
"Leisure_trip",
"Business_trip",
"Solo_traveler",
"Couple",
"Group",
"Family_with_older_children",
"Family_with_young_children",
]
].corr(),
annot=True,
)
# Из тепловой карты следует очевидный вывод, что наличие двух признаков - Leisure trip и Business trip - является избыточным. Удалим признак Business trip.
data.drop(columns="Business_trip", inplace=True)
# stop work
plt.rcParams["figure.figsize"] = (20, 20)
sns.heatmap(data.drop(["sample"], axis=1).corr(), annot=True)
# Нормализуеи числовые данные - приведем их к дипазону [0; 1]
display(data.info())
display(data.head(5))
# ans = data['total_number_of_reviews'].max()
# data['total_number_of_reviews'] = data['total_number_of_reviews']/ans
# display(ans)
# display(data['total_number_of_reviews'].min())
# display(data['total_number_of_reviews'].max())
# ans = data['additional_number_of_scoring'].max()
# data['additional_number_of_scoring'] = data['additional_number_of_scoring']/ans
# display(ans)
# display(data['additional_number_of_scoring'].min())
# display(data['additional_number_of_scoring'].max())
# ans = data['review_total_negative_word_counts'].max()
# data['review_total_negative_word_counts'] = data['review_total_negative_word_counts']/ans
# display(ans)
# display(data['review_total_negative_word_counts'].min())
# display(data['review_total_negative_word_counts'].max())
# ans = data['average_score'].max()
# data['average_score'] = data['average_score']/ans
# display(ans)
# display(data['average_score'].min())
# display(data['average_score'].max())
# ans = data['tagn'].max()
# data['tagn'] = data['tagn']/ans
# display(ans)
# display(data['tagn'].min())
# display(data['tagn'].max())
# ans = data['night'].max()
# data['night'] = data['tagn']/ans
# display(ans)
# display(data['night'].min())
# display(data['night'].max())
# ym_min = data['review_year_month'].min()
# ym_max = data['review_year_month'].max()
# ym_koef = ym_max-ym_min
# data['review_year_month'] = (data['review_year_month'] - ym_min)/ym_koef
# display(ym_koef)
# display(data['review_year_month'].min())
# display(data['review_year_month'].max())
# убираем признаки которые еще не успели обработать,
# модель на признаках с dtypes "object" обучаться не будет, просто выберем их и удалим
object_columns = [s for s in data.columns if data[s].dtypes == "object"]
data.drop(object_columns, axis=1, inplace=True)
# excess_columns = ['reviewer_nationality_0','reviewer_nationality_1','reviewer_nationality_2','reviewer_nationality_3','reviewer_nationality_4',
# 'total_number_of_reviews','country_Netherlands','country_Italy','country_Spain','country_United Kingdom','country_France','country_Austria']
excess_columns = [
"lat",
"lng",
"total_number_of_reviews",
"country_Netherlands",
"country_Italy",
"country_Spain",
"country_United Kingdom",
"country_France",
"country_Austria",
]
data.drop(excess_columns, axis=1, inplace=True)
# data.drop('lat', axis = 1, inplace=True)
# data.drop('lng', axis = 1, inplace=True)
# data.drop('country_reviewer', axis = 1, inplace=True)
data.info()
# ans = data['review_total_positive_word_counts'].max()
# data['review_total_positive_word_counts'] = data['review_total_positive_word_counts']/ans
# display(ans)
# display(data['review_total_positive_word_counts'].min())
# display(data['review_total_positive_word_counts'].max())
# ans = data['total_number_of_reviews_reviewer_has_given'].max()
# data['total_number_of_reviews_reviewer_has_given'] = data['total_number_of_reviews_reviewer_has_given']/ans
# display(ans)
# display(data['total_number_of_reviews_reviewer_has_given'].min())
# display(data['total_number_of_reviews_reviewer_has_given'].max())
# ans = data['distance'].max()
# data['distance'] = data['distance']/ans
# display(ans)
# display(data['distance'].min())
# display(data['distance'].max())
display(data.head())
# Теперь выделим тестовую часть
train_data = data.query("sample == 1").drop(["sample"], axis=1)
test_data = data.query("sample == 0").drop(["sample"], axis=1)
y = train_data.reviewer_score.values # наш таргет
X = train_data.drop(["reviewer_score"], axis=1)
# Воспользуемся специальной функцие train_test_split для разбивки тестовых данных
# выделим 20% данных на валидацию (параметр test_size)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=RANDOM_SEED
)
# проверяем
test_data.shape, train_data.shape, X.shape, X_train.shape, X_test.shape
# Импортируем необходимые библиотеки:
from sklearn.ensemble import (
RandomForestRegressor,
) # инструмент для создания и обучения модели
from sklearn import metrics # инструменты для оценки точности модели
# Создаём модель (НАСТРОЙКИ НЕ ТРОГАЕМ)
model = RandomForestRegressor(
n_estimators=100, verbose=1, n_jobs=-1, random_state=RANDOM_SEED
)
# Обучаем модель на тестовом наборе данных
model.fit(X_train, y_train)
# Используем обученную модель для предсказания рейтинга отелей в тестовой выборке.
# Предсказанные значения записываем в переменную y_pred
y_pred = model.predict(X_test)
# Пишем свою функцию Mean Absolute Percentage Error (MAPE)
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def mape_func(Y_actual, Y_predict):
mape = np.mean(np.abs((Y_actual - Y_predict) / Y_actual)) * 100
return mape
# Сравниваем предсказанные значения (y_pred) с реальными (y_test), и смотрим насколько они в среднем отличаются
# Метрика называется Mean Absolute Error (MAE) и показывает среднее отклонение предсказанных значений от фактических.
print("MAE:", metrics.mean_absolute_error(y_test, y_pred))
# Метрика называется Mean Absolute Percentage Error (MAPE) и показывает среднюю абсолютную процентную ошибку предсказанных значений от фактических.
print("MAPE 1:", mean_absolute_percentage_error(y_test, y_pred))
print("MAPE 2:", mape_func(y_test, y_pred))
# Submission:
test_data.sample(10)
# Удаляем целевой признак из тестовой выборки:
test_data = test_data.drop(["reviewer_score"], axis=1)
display(sample_submission.info())
display(test_data.info())
# Сохраняем предсказание модели в сабмишн:
sample_submission["reviewer_score"] = model.predict(test_data)
# Сохраняем сабмишн в csv-файл:
sample_submission.to_csv("submission.csv", index=False)
sample_submission.head()
# в RandomForestRegressor есть возможность вывести самые важные признаки для модели
plt.rcParams["figure.figsize"] = (10, 10)
feat_importances = pd.Series(model.feature_importances_, index=X.columns)
feat_importances.nlargest(30).plot(kind="barh")
|
# Here I present some simple data analysis and a sample submission.
# The question uploaded to Kaggle with very similar to test case `d` in the qualifiers.
# You can refer to discussion on how to solve `d` on Codeforces - https://codeforces.com/blog/entry/88188
import collections
import random
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# # Parse Input
# Firstly we need to parse the input into a data structure
INPUT_DIR = "/kaggle/input/hashcode-2021-oqr-extension/hashcode.in"
def parse_input(input_dir):
with open(input_dir) as f:
arr = f.readlines()
arr = arr[::-1]
line = arr.pop().split()
duration, num_intersections, num_streets, num_cars, fixed_score = [
int(x) for x in line
]
streets = {}
for _ in range(num_streets):
line = arr.pop().split()
start = int(line[0])
end = int(line[1])
street_name = line[2]
length = int(line[-1])
streets[street_name] = start, end, length
cars = []
for idx in range(num_cars):
line = arr.pop().split()
sequence = line[1:] # [1:] because the first word is the length of the sequence
cars.append(sequence)
return (
duration,
num_intersections,
num_streets,
num_cars,
fixed_score,
streets,
cars,
)
(
duration,
num_intersections,
num_streets,
num_cars,
fixed_score,
streets,
cars,
) = parse_input(INPUT_DIR)
# `streets` is a dictionary that maps `street_name` to (start,end,length)
# Each element of `cars` is an array of `street_name` that the car traverses
duration, num_intersections, num_streets, num_cars, fixed_score
# # Data analysis
distance_distbn = []
for car in cars:
distance = 0
for street_name in car:
distance += streets[street_name][-1] # {street_name:(start,end,length),...}
distance_distbn.append(distance)
plt.figure(figsize=(14, 2))
plt.hist(distance_distbn, bins=100)
plt.axvline(duration, color="r")
plt.title("Distribution of the distance to travel for each vehicle")
plt.show()
plt.close()
# # Upper bound estimate
# We want to estimate an upper bound so that we know the extent that we can improve.
# In this upper bound estimate, we assume that cars do not wait at every intersection
impossible_cars = 0 # number of cars that could not finish
total_required_distance = 0 # total distance to be travelled by the car
for car in cars:
distance = 0
for street_name in car[
1:
]: # [1:] because the car does not need to travel along the first street
distance += streets[street_name][-1]
if distance > duration:
impossible_cars += 1
else:
total_required_distance += distance
# for completing before the end of simulation
completion_score = fixed_score * (num_cars - impossible_cars)
# bonus for completing early
time_bonus = duration * (num_cars - impossible_cars) - total_required_distance
total_score = completion_score + time_bonus
total_required_distance, impossible_cars, time_bonus, completion_score, total_score
# # Sample solution
# In this sample solution, we assign the duration of each green light to one, if there is a car coming from that direction.
# to discover which intersection each road leads to
map_street_to_dest = {}
map_street_to_source = {} # unused
for street_name, (start, end, length) in streets.items():
map_street_to_source[street_name] = start
map_street_to_dest[street_name] = end
# for each intersection, count the amount of traffic from each incoming street
incoming_count = collections.defaultdict(collections.Counter)
for car in cars:
for street_name in car:
incoming_count[map_street_to_dest[street_name]][street_name] += 1
schedules = []
for i in range(num_intersections):
total_count = sum(incoming_count[i].values()) # number of incoming streets
num_incoming = len(incoming_count[i]) # amount of incoming traffic
arr = list(incoming_count[i].items())
random.shuffle(arr) # shuffle the incoming streets for a randomised solution
cycle = []
for incoming, count in arr:
time_fraction = 1 # all duration of green light is one
cycle.append([incoming, time_fraction])
schedules.append(cycle)
# # Parse solution into submission
res = []
res.append([len(schedules)])
for i, cycle in enumerate(schedules):
if not cycle:
res[0][0] -= 1
continue
res.append([i])
res.append([len(cycle)])
for incoming, time_fraction in cycle:
res.append([incoming, time_fraction])
result_string = "\n".join(" ".join([str(x) for x in row]) for row in res)
# print(result_string)
with open("submission.csv", "w") as text_file:
text_file.write(result_string)
# check submission
|
# # TV Shows and Movies listed on Netflix
# This dataset consists of tv shows and movies available on Netflix as of 2019. The dataset is collected from Flixable which is a third-party Netflix search engine.
# In 2018, they released an interesting report which shows that the number of TV shows on Netflix has nearly tripled since 2010. The streaming service’s number of movies has decreased by more than 2,000 titles since 2010, while its number of TV shows has nearly tripled. It will be interesting to explore what all other insights can be obtained from the same dataset.
# Integrating this dataset with other external datasets such as IMDB ratings, rotten tomatoes can also provide many interesting findings.
# Inspiration
# Some of the interesting questions (tasks) which can be performed on this dataset -
# * Understanding what content is available in different countries
# * Identifying similar content by matching text-based features
# * Network analysis of Actors / Directors and find interesting insights
# * Is Netflix has increasingly focusing on TV rather than movies in recent years.
# 
# # import library
#
# manipulation data
import pandas as pd
import numpy as np
# visualiation data
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
import plotly.graph_objects as go
import plotly.express as px
# default theme
sns.set(
context="notebook",
style="darkgrid",
palette="colorblind",
font="sans-serif",
font_scale=1,
rc=None,
)
matplotlib.rcParams["figure.figsize"] = [8, 8]
matplotlib.rcParams.update({"font.size": 15})
matplotlib.rcParams["font.family"] = "sans-serif"
from dataprep.eda import *
from dataprep.datasets import load_dataset
from dataprep.eda import create_report
# # load & analysis data
df = pd.read_csv("../input/netflix-shows/netflix_titles.csv")
df
df.info()
df.dtypes.value_counts().plot.pie(explode=[0.1, 0.1], autopct="%1.1f%%", shadow=True)
plt.title("the type of our data")
df.columns
df.describe(include="all")
# #### some note about data describtion
# like we c in our data :
# the most frequent value in each columns are :
# * type : Movie with 5377/7787
# * director : Raúl Campos, Jan Suter 18/5398
# * cast: David Attenborough 18/7069
# * country: United States 2555/7280
# * date_added: January 1, 2020/7777
# * release_year:
# * duration:
# * listed_in:
# * description:
# # finding missing values
missing_values = df.isnull().sum()
percent_missing = df.isnull().sum() / df.shape[0] * 100
value = {"missing_values ": missing_values, "percent_missing %": percent_missing}
frame = pd.DataFrame(value)
frame
# so our mising data are :
# * rating : 7 -- 0.08%
# * date_added: 10 -- 0.12
# * country: 507 -- 6.51
# * cast : 718-- 9.22
# * director: 2389 --30.67%
df.shape
# ### a) rating
freq_value = df.rating.value_counts()
print(freq_value)
freq_value.plot.bar()
# 1. like we c our **rating** columns had the **TV-MA** is the most frequ value with 2863
# 2. and our misiing value is 0.08% from the data
# ==> so we gonna remplace it with the frequent value
freq_rating = df.rating.mode()
df["rating"].fillna(df["rating"].mode, inplace=True)
df.rating.isnull().sum()
# ### b) date_added
freq_date = df.date_added.value_counts()
freq_date
# we had just 10 missing value in **date_added** so it's batter to drop the missing value
df = df.dropna(axis=0, subset=["date_added"])
df.date_added.isnull().sum()
# ### c) country
df.country.value_counts()
plt.figure(figsize=(15, 8))
country_val = df.country.value_counts().head(15)
sns.barplot(country_val.index, country_val)
plt.xticks(rotation=45)
plt.title("content available in different countries ")
# * the most freqent country is **united states**
# * so we gonna ramplace all the mising values 507 -- 6.51% with the **united states**
df.country.mode()
df["country"].fillna(df["country"].mode, inplace=True)
df.country.isnull().sum()
# ### d) cast
df.cast.value_counts().count()
# like we see in this case :
# * the missing value are 718 -- 9.8% of our data
# * the most freq value is **David Attenborough** with 18 count
# * we had 6821 values in this columns **cast**
# #### CONCLUSION :
# it's too hard to find the right methode to change the missing value so we gonna drop the missing values
df = df.dropna(axis=0, subset=["cast"])
df.isnull().sum()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.