script
stringlengths 113
767k
|
---|
import numpy as np
import pandas as pd
import os
BATCH_SIZE = 32
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_addons as tfa
import matplotlib.pyplot as plt
import re
try:
from kaggle_datasets import KaggleDatasets
except:
pass
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print("Device:", tpu.master())
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except:
strategy = tf.distribute.get_strategy()
print("Number of replicas:", strategy.num_replicas_in_sync)
AUTOTUNE = tf.data.experimental.AUTOTUNE
print(tf.__version__)
GCS_PATH = KaggleDatasets().get_gcs_path("gan-getting-started")
MONET_FILENAMES = tf.io.gfile.glob(str(GCS_PATH + "/monet_tfrec/*.tfrec"))
PHOTO_FILENAMES = tf.io.gfile.glob(str(GCS_PATH + "/photo_tfrec/*.tfrec"))
IMAGE_SIZE = [256, 256]
def decode_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = (tf.cast(image, tf.float32) / 127.5) - 1
image = tf.reshape(image, [*IMAGE_SIZE, 3])
return image
def read_tfrecord(example):
tfrecord_format = {"image": tf.io.FixedLenFeature([], tf.string)}
example = tf.io.parse_single_example(example, tfrecord_format)
image = decode_image(example["image"])
return image
def load_dataset(filenames):
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(read_tfrecord, num_parallel_calls=AUTOTUNE)
return dataset
monet_ds = load_dataset(MONET_FILENAMES).batch(1)
photo_ds = load_dataset(PHOTO_FILENAMES).batch(1)
fast_photo_ds = (
load_dataset(PHOTO_FILENAMES).batch(32 * strategy.num_replicas_in_sync).prefetch(32)
)
fid_photo_ds = (
load_dataset(PHOTO_FILENAMES)
.take(1024)
.batch(32 * strategy.num_replicas_in_sync)
.prefetch(32)
)
fid_monet_ds = (
load_dataset(MONET_FILENAMES).batch(32 * strategy.num_replicas_in_sync).prefetch(32)
)
def get_gan_dataset(
monet_files, photo_files, augment=None, repeat=True, shuffle=True, batch_size=1
):
monet_ds = load_dataset(monet_files)
photo_ds = load_dataset(photo_files)
if augment:
monet_ds = monet_ds.map(augment, num_parallel_calls=AUTOTUNE)
photo_ds = photo_ds.map(augment, num_parallel_calls=AUTOTUNE)
if repeat:
monet_ds = monet_ds.repeat()
photo_ds = photo_ds.repeat()
# if shuffle:
# monet_ds = monet_ds.shuffle(2048)
# photo_ds = photo_ds.shuffle(2048)
monet_ds = monet_ds.batch(batch_size, drop_remainder=True)
photo_ds = photo_ds.batch(batch_size, drop_remainder=True)
# monet_ds = monet_ds.cache()
# photo_ds = photo_ds.cache()
monet_ds = monet_ds.prefetch(AUTOTUNE)
photo_ds = photo_ds.prefetch(AUTOTUNE)
gan_ds = tf.data.Dataset.zip((monet_ds, photo_ds))
return gan_ds
final_dataset = get_gan_dataset(
MONET_FILENAMES,
PHOTO_FILENAMES,
augment=None,
repeat=True,
shuffle=True,
batch_size=BATCH_SIZE,
)
with strategy.scope():
inception_model = tf.keras.applications.InceptionV3(
input_shape=(256, 256, 3), pooling="avg", include_top=False
)
mix3 = inception_model.get_layer("mixed9").output
f0 = tf.keras.layers.GlobalAveragePooling2D()(mix3)
inception_model = tf.keras.Model(inputs=inception_model.input, outputs=f0)
inception_model.trainable = False
def calculate_activation_statistics_mod(images, fid_model):
act = tf.cast(fid_model.predict(images), tf.float32)
mu = tf.reduce_mean(act, axis=0)
mean_x = tf.reduce_mean(act, axis=0, keepdims=True)
mx = tf.matmul(tf.transpose(mean_x), mean_x)
vx = tf.matmul(tf.transpose(act), act) / tf.cast(tf.shape(act)[0], tf.float32)
sigma = vx - mx
return mu, sigma
myFID_mu2, myFID_sigma2 = calculate_activation_statistics_mod(
fid_monet_ds, inception_model
)
fids = []
with strategy.scope():
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2):
fid_epsilon = 1e-14
covmean = tf.linalg.sqrtm(tf.cast(tf.matmul(sigma1, sigma2), tf.complex64))
# isgood=tf.cast(tf.math.is_finite(covmean), tf.int32)
# if tf.size(isgood)!=tf.math.reduce_sum(isgood):
# return 0
covmean = tf.cast(tf.math.real(covmean), tf.float32)
tr_covmean = tf.linalg.trace(covmean)
return (
tf.matmul(
tf.expand_dims(mu1 - mu2, axis=0), tf.expand_dims(mu1 - mu2, axis=1)
)
+ tf.linalg.trace(sigma1)
+ tf.linalg.trace(sigma2)
- 2 * tr_covmean
)
def FID(
images,
gen_model,
inception_model=inception_model,
myFID_mu2=myFID_mu2,
myFID_sigma2=myFID_sigma2,
):
inp = layers.Input(shape=[256, 256, 3], name="input_image")
x = gen_model(inp)
x = inception_model(x)
fid_model = tf.keras.Model(inputs=inp, outputs=x)
mu1, sigma1 = calculate_activation_statistics_mod(images, fid_model)
fid_value = calculate_frechet_distance(mu1, sigma1, myFID_mu2, myFID_sigma2)
return fid_value
def up_sample(filters, size, apply_dropout=False):
initializer = tf.random_normal_initializer(0.0, 0.02)
gamma_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
layer = keras.Sequential()
layer.add(
layers.Conv2DTranspose(
filters,
size,
strides=2,
padding="same",
kernel_initializer=initializer,
use_bias=False,
)
)
layer.add(tfa.layers.InstanceNormalization(gamma_initializer=gamma_init))
if apply_dropout:
layer.add(layers.Dropout(0.5))
layer.add(layers.ReLU())
return layer
|
# This R environment comes with many helpful analytics packages installed
# It is defined by the kaggle/rstats Docker image: https://github.com/kaggle/docker-rstats
# For example, here's a helpful package to load
library(tidyverse) # metapackage of all tidyverse packages
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
list.files(path="../input")
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# * This is an optimization problem involving a list of 100 projects--each taking a given number of person-hours to build, and each having a given potential savings of hours per year in manual labor.
# * The one constraint is total number of person-hours per year available.
# * The optimizer selects which of the 100 projects should be undertaken in order to maximize the total number of manual hours saved, given the total number of person hours available in a year.
# First load the solver package
library(lpSolveAPI)
###########################################################################
# variables
data_path < -"../input/bigger-data/bigger_data.csv"
result_path < -"/kaggle/working/result.csv"
# Here is where to set the maximum number of person hours in a year
max_person_hours < -10000
# only one constraint, i.e. the number of person-hours for each project
constraint_count < -1
###########################################################################
# Import the data and look at the first six rows
df < -read.csv(file=data_path)
head(df)
|
# ## Keşifçi Veri Analizi | Becerileri Pekiştirme
# Aşağıda ihtiyacımız doğrultusunda kullanacağımız kütüphaneleri yükleyelim.
import numpy as np
import seaborn as sns
import pandas as pd
# Veri çerçevemizi bulunduğumuz dizinden yükleyelim ve bir veri çerçevesi haline getirerek df değişkenine atayalım. (pd.read_csv(...csv))
df = pd.read_csv("/kaggle/input/iris/Iris.csv")
# Veri çerçevesinin ilk 5 gözlemini görüntüleyelim.
print(df.head())
# Veri çerçevesinin kaç öznitelik ve kaç gözlemden oluştuğunu görüntüleyelim.
print("Öznitelik Sayısı:", df.shape[1])
print("Gözlem Sayısı:", df.shape[0])
# Veri çerçevesindeki değişkenlerin hangi tipte olduğunu ve bellek kullanımını görüntüleyelim.
df.info()
# Veri çerçevesindeki sayısal değişkenler için temel istatistik değerlerini görüntüleyelim.
# Standart sapma ve ortalama değerlerden çıkarımda bulunarak hangi değişkenlerin ne kadar varyansa sahip olduğu hakkında fikir yürütelim.
# Buna göre, PetalLengthCm değişkeni en yüksek varyansa sahipken, SepalWidthCm değişkeni en düşük varyansa sahiptir. SepalLengthCm ve PetalWidthCm değişkenleri ise orta düzeyde varyansa sahiptir.
df.describe()
# Veri çerçevesinde hangi öznitelikte kaç adet eksik değer olduğunu gözlemleyelim.
df.isnull().sum()
# Sayısal değişkenler arasında korelasyon olup olmadığını göstermek için korelasyon matrisi çizdirelim. Korelasyon katsayıları hakkında fikir yürütelim.
# En güçlü pozitif ilişki hangi iki değişken arasındadır?
# en güçlü pozitif ilişki 0.96 değeriyle PetalLengthCm ve PetalWidthCm değişkenleri arasındadır.
import seaborn as sns
import matplotlib.pyplot as plt
correlation_matrix = df.corr()
sns.heatmap(correlation_matrix, annot=True, cmap="coolwarm")
plt.show()
# Korelasyon katsayılarını daha iyi okuyabilmek için ısı haritası çizdirelim.
corr_matrix = df.corr()
plt.figure(figsize=(10, 8))
sns.heatmap(corr_matrix, annot=True, cmap="coolwarm", linewidths=0.5)
plt.title("Korelasyon Matrisi Isı Haritası")
plt.show()
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz değerlerini görüntüleyelim.
unique_species = df["Species"].unique()
print(unique_species)
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz kaç adet değer içerdiğini görüntüleyelim.
unique_species_count = df["Species"].nunique()
print(unique_species_count)
# Veri çerçevesindeki sepal.width ve sepal.length değişkenlerinin sürekli olduğunu görüyoruz. Bu iki sürekli veriyi görselleştirmek için önce scatterplot kullanalım.
sns.scatterplot(data=df, x="SepalWidthCm", y="SepalLengthCm")
plt.xlabel("Sepal Width (cm)")
plt.ylabel("Sepal Length (cm)")
plt.title("Sepal Width vs Sepal Length")
plt.show()
# Aynı iki veriyi daha farklı bir açıdan frekanslarıyla incelemek için jointplot kullanarak görselleştirelim.
sns.jointplot(
data=df, x="SepalWidthCm", y="SepalLengthCm", kind="scatter", palette="Set2"
)
plt.xlabel("Sepal Width (cm)")
plt.ylabel("Sepal Length (cm)")
plt.suptitle("Sepal Width vs Sepal Length Joint Plot", y=1.02)
plt.show()
# Aynı iki veriyi scatterplot ile tekrardan görselleştirelim fakat bu sefer "variety" parametresi ile hedef değişkenine göre kırdıralım.
# 3 farklı renk arasında sepal değişkenleriyle bir kümeleme yapılabilir mi? Ne kadar ayırt edilebilir bunun üzerine düşünelim.
# Grafikte bazı türlerin diğerlerinden daha iyi ayrıldığı görülüyor. "Setosa" türü diğerlerinden daha açık bir şekilde ayrılırken, "versicolor" ve "virginica" türleri arasındaki ayrım daha az net görünüyor. Sepal ölçüleri kullanarak gruplama yapılabilir, ancak "versicolor" ve "virginica" türlerinin tam olarak ayrılması zor olabilir.
sns.scatterplot(
data=df, x="SepalWidthCm", y="SepalLengthCm", hue="Species", style="Species"
)
plt.xlabel("Sepal Width (cm)")
plt.ylabel("Sepal Length (cm)")
plt.title("Sepal Width vs Sepal Length Scatter Plot Grouped by Species")
plt.legend(title="Species")
plt.show()
# value_counts() fonksiyonu ile veri çerçevemizin ne kadar dengeli dağıldığını sorgulayalım.
df["Species"].value_counts()
# Keman grafiği çizdirerek sepal.width değişkeninin dağılımını inceleyin.
# Söz konusu dağılım bizim için ne ifade ediyor, normal bir dağılım olduğunu söyleyebilir miyiz?
# Sepal genişliği dağılımı, normal dağılım gibi görünmüyor. Soldaki kuyruk daha uzun ve sağa doğru eğilimli, yani dağılım sağa çarpıktır.
sns.violinplot(x="Species", y="SepalWidthCm", data=df)
# Daha iyi anlayabilmek için sepal.width üzerine bir distplot çizdirelim.
sns.histplot(df["SepalWidthCm"])
# Üç çiçek türü için üç farklı keman grafiğini sepal.length değişkeninin dağılımı üzerine tek bir satır ile görselleştirelim.
sns.violinplot(x="Species", y="SepalLengthCm", data=df)
# Hangi çiçek türünden kaçar adet gözlem barındırıyor veri çerçevemiz?
# 50 x 3 olduğunu ve dengeli olduğunu value_counts ile zaten görmüştük, ancak bunu görsel olarak ifade etmek için sns.countplot() fonksiyonuna variety parametresini vereilm.
sns.countplot(x="Species", data=df)
plt.show()
# sepal.length ve sepal.width değişkenlerini sns.jointplot ile görselleştirelim, dağılımı ve dağılımın frekansı yüksek olduğu bölgelerini inceleyelim.
sns.scatterplot(
x="SepalLengthCm", y="SepalWidthCm", hue="Species", data=df, palette="Set2"
)
# Bir önceki hücrede yapmış olduğumuz görselleştirmeye kind = "kde" parametresini ekleyelim. Böylelikle dağılımın noktalı gösterimden çıkıp yoğunluk odaklı bir görselleştirmeye dönüştüğünü görmüş olacağız.
sns.jointplot(x="SepalLengthCm", y="SepalWidthCm", data=df, kind="kde")
plt.xlabel("Sepal Length (cm)")
plt.ylabel("Sepal Width (cm)")
plt.suptitle("Sepal Width vs Sepal Length Joint Plot (KDE)", y=1.02)
plt.show()
# scatterplot ile petal.length ve petal.width değişkenlerinin dağılımlarını çizdirelim.
sns.scatterplot(x="PetalLengthCm", y="PetalWidthCm", data=df)
plt.title("Petal Width vs Petal Length Scatterplot")
plt.show()
# Aynı görselleştirmeye hue = "variety" parametresini ekleyerek 3. bir boyut verelim.
sns.scatterplot(
x="PetalLengthCm", y="PetalWidthCm", hue="Species", data=df, palette="Set2"
)
plt.xlabel("Petal Length (cm)")
plt.ylabel("Petal Width (cm)")
plt.suptitle("Petal Width vs Petal Length Scatter Plot", y=1.02)
plt.show()
# sns.lmplot() görselleştirmesini petal.length ve petal.width değişkenleriyle implemente edelim. Petal length ile petal width arasında ne tür bir ilişki var ve bu ilişki güçlü müdür? sorusunu yanıtlayalım.
# Görselleştirme sonucunda, petal uzunluğu ile genişliği arasında pozitif ve güçlü bir ilişki olduğu görülebilir. Yani, petal uzunluğu arttıkça petal genişliği de artmaktadır.
#
sns.lmplot(x="PetalLengthCm", y="PetalWidthCm", data=df)
plt.xlabel("Petal Length (cm)")
plt.ylabel("Petal Width (cm)")
plt.suptitle("Petal Width vs Petal Length", y=1.02)
plt.show()
# Bu sorunun yanıtını pekiştirmek için iki değişken arasında korelasyon katsayısını yazdıralım.
corr = df[["PetalLengthCm", "PetalWidthCm"]].corr()
print(corr)
# Petal Length ile Sepal Length değerlerini toplayarak yeni bir total length özniteliği oluşturalım.
df["total_length"] = df["PetalLengthCm"] + df["SepalLengthCm"]
# total.length'in ortalama değerini yazdıralım.
print("Total Length ortalama değeri: ", df["total_length"].mean())
# total.length'in standart sapma değerini yazdıralım.
print("Total Length Standard Deviation:", df["total_length"].std())
# sepal.length'in maksimum değerini yazdıralım.
print("Sepal length maximum value:", df["SepalLengthCm"].max())
# sepal.length'i 5.5'den büyük ve türü setosa olan gözlemleri yazdıralım.
df[(df["Species"] == "Iris-setosa") & (df["SepalLengthCm"] > 5.5)]
# petal.length'i 5'den küçük ve türü virginica olan gözlemlerin sadece sepal.length ve sepal.width değişkenlerini ve değerlerini yazdıralım.
df_filtered = df[(df["PetalLengthCm"] < 5) & (df["Species"] == "Iris-virginica")]
df_filtered.loc[:, ["SepalLengthCm", "SepalWidthCm"]]
# Hedef değişkenimiz variety'e göre bir gruplama işlemi yapalım değişken değerlerimizin ortalamasını görüntüleyelim.
df_filtered = df[(df["PetalLengthCm"] < 5) & (df["Species"] == "Iris-virginica")]
print(df_filtered.loc[:, ["SepalLengthCm", "SepalWidthCm"]])
# Hedef değişkenimiz variety'e göre gruplama işlemi yaparak sadece petal.length değişkenimizin standart sapma değerlerini yazdıralım.
std_by_variety = df.groupby("Species")["PetalLengthCm"].std()
print(std_by_variety)
|
# # TPS - Mar 2021 - EDA + Models
# /med241050-56a9f68a3df78cf772abc65f.jpg)
# Packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Dataset for train
df_train = pd.read_csv("../input/tabular-playground-series-mar-2021/train.csv")
# Dataset for test
df_test = pd.read_csv("../input/tabular-playground-series-mar-2021/test.csv")
# Datasetfor submission
sample_submission = pd.read_csv(
"../input/tabular-playground-series-mar-2021/sample_submission.csv"
)
# # 1. Exploratory Data Analysis (EDA)
# ## 1.1 General
# Columns of train dataset
df_train.columns
# Columns of train dataset
df_test.columns
# All of the feature columns, cat0 - cat18 are categorical, and the feature columns cont0 - cont10 are continuous.
# Dataset Shape - Train
df_train.shape
# Dataset Shape - Test
df_test.shape
# First 5 rows - Train
df_train.head()
# First 5 rows - Test
df_test.head()
# Info - Train
df_train.info()
# Info - Test
df_test.info()
# As can be seen, there are no NaN in the columns
# Categorical columna - Names
cat_columns = ["cat" + str(i) for i in range(0, 19)]
cat_columns
# Continuos columna - Names
cont_columns = ["cont" + str(i) for i in range(0, 11)]
cont_columns
# Describe (continuos variables) - Train
df_train.describe()
# Describe (continuos variables) - Train
df_test.describe()
# ## 1.2 Categorical Variables
# Quantity per category and column - Train
df_train[cat_columns].apply(pd.Series.value_counts).fillna(0)
# Quantity per category and column - Test
df_test[cat_columns].apply(pd.Series.value_counts).fillna(0)
def grafico_contador(columna, dataset):
import random
paleta = sns.color_palette()
color = random.choice(paleta)
cantidad = dataset[columna].value_counts()
df_cantidad = pd.DataFrame(cantidad)
plt.figure(figsize=(8, 6))
ax = plt.bar(x=df_cantidad.index, height=df_cantidad[columna], color=color)
plt.title("Quantity per Type for " + columna, fontsize=20)
plt.ylabel("n")
# Quantity per category and column - Train
for columna in cat_columns:
grafico_contador(columna, df_train)
# Quantity per category and column - Test
for columna in cat_columns:
grafico_contador(columna, df_test)
# ## 1.2 Continuous variables
def grafico_distribucion(columna, dataset):
import random
datos = dataset[columna]
paleta = sns.color_palette()
color = random.choice(paleta)
sns.displot(datos, color=color)
plt.show()
# Distribution for continuous variables - Train
for columna in cont_columns:
grafico_distribucion(columna, df_train)
# Distribution for continuous variables - Test
for columna in cont_columns:
grafico_distribucion(columna, df_test)
# # 2. Encoding for categorical variables
# ## 2.2 Label encoding
from sklearn.preprocessing import LabelEncoder
# Label column by column - Train and test
for columna in cat_columns:
LB_encoder = LabelEncoder()
LB_encoder.fit(df_train[columna])
df_train[columna] = LB_encoder.transform(df_train[columna])
df_test[columna] = LB_encoder.transform(df_test[columna])
df_train.head()
df_test.head()
# # 3. Seleccion de modelos base con Pycaret
# Below are the Pycaret results obtained at the beginning of the competition. I take them as a baseline for model selection
# 
# # 4. Datasets for Model training
# Target for training
target = df_train["target"]
target.head()
# Train dataset
train_LB = df_train.drop(columns=["target"])
train_LB.head()
# Columns for train
train_LB.columns
# # 5. LightGBM Model
#
from lightgbm import LGBMRegressor
hyperparameters = {
"random_state": 42,
"metric": "rmse",
"n_jobs": -1,
"cat_feature": [x for x in range(len(cat_columns))],
"reg_alpha": 6.147694913504962,
"reg_lambda": 0.002457826062076097,
"colsample_bytree": 0.20,
"learning_rate": 0.01,
"max_depth": 48,
"num_leaves": 100,
"min_child_samples": 275,
"n_estimators": 2200,
"cat_smooth": 40.0,
"max_bin": 512,
"min_data_per_group": 100,
"bagging_freq": 1,
"bagging_fraction": 0.70,
"cat_l2": 12.0,
}
model = LGBMRegressor(**hyperparameters)
model.fit(train_LB, target)
# # 6. Prediction and submit
sample_submission["target"] = model.predict(df_test)
sample_submission.to_csv("lightgbm.csv", index=False)
|
import random
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.datasets import load_diabetes
x, y = load_diabetes(return_X_y=True)
# # Batch Gradient Descent
class GD:
def __init__(self, lr=0.01, epochs=1000):
self.lr = lr
self.epochs = epochs
self.coeff_ = None
self.intercept_ = None
def fit(self, x_train, y_train):
self.intercept_ = 0
self.coeff_ = np.ones(x_train.shape[1])
for i in range(self.epochs):
y_pred = self.intercept_ + np.dot(x_train, self.coeff_)
der = -2 * np.mean(y_train - y_pred)
self.intercept_ = self.intercept_ - (self.lr * der)
# coeffs
coeff_der = -2 * (np.dot((y_train - y_pred), x_train) / (x_train.shape[0]))
self.coeff_ = self.coeff_ - (self.lr * coeff_der)
print(self.intercept_, self.coeff_)
def predict(self, x):
return np.dot(x, self.coeff_) + self.intercept_
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2)
gd = GD(lr=0.1, epochs=1000)
gd.fit(X_train, y_train)
y_pred = gd.predict(X_test)
r2_score(y_test, y_pred)
# # Stochastic Gradient Descent
class SGD:
def __init__(self, lr=0.01, epochs=100):
self.lr = lr
self.epochs = epochs
self.coeff_ = None
self.intercept_ = None
def fit(self, x_train, y_train):
self.intercept_ = 0
self.coeff_ = np.ones(x_train.shape[1])
for i in range(self.epochs):
for i in range(x_train.shape[0]):
idx = np.random.randint(0, x_train.shape[0])
y_pred = self.intercept_ + np.dot(x_train[idx], self.coeff_)
der = -2 * (y_train[idx] - y_pred)
self.intercept_ = self.intercept_ - (self.lr * der)
# coeffs
coeff_der = -2 * np.dot((y_train[idx] - y_pred), x_train[idx])
self.coeff_ = self.coeff_ - (self.lr * coeff_der)
print(self.intercept_, self.coeff_)
def predict(self, x):
return np.dot(x, self.coeff_) + self.intercept_
sgd = SGD(lr=0.1, epochs=1000)
sgd.fit(X_train, y_train)
y_preds = sgd.predict(X_test)
r2_score(y_test, y_preds)
# # Mini Batch Gradient Descent
class MbGD:
def __init__(self, lr=0.01, epochs=100, batch_size=10):
self.lr = lr
self.epochs = epochs
self.coeff_ = None
self.intercept_ = None
self.batch_size = batch_size
def fit(self, x_train, y_train):
self.intercept_ = 0
self.coeff_ = np.ones(x_train.shape[1])
for i in range(self.epochs):
for i in range(int(x_train.shape[0] / self.batch_size)):
idx = random.sample(range(x_train.shape[0]), self.batch_size)
y_pred = self.intercept_ + np.dot(x_train[idx], self.coeff_)
intercept_der = -2 * np.mean(y_train[idx] - y_pred)
self.intercept_ = self.intercept_ - (self.lr * intercept_der)
# coeffs
coeff_der = -2 * np.dot((y_train[idx] - y_pred), x_train[idx])
self.coeff_ = self.coeff_ - (self.lr * coeff_der)
print(self.intercept_, self.coeff_)
def predict(self, x):
return np.dot(x, self.coeff_) + self.intercept_
Mbgd = MbGD(lr=0.2, epochs=1000, batch_size=100)
Mbgd.fit(X_train, y_train)
y_predm = Mbgd.predict(X_test)
r2_score(y_test, y_predm)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_df = pd.read_csv("/kaggle/input/us-accidents/US_Accidents_Dec21_updated.csv")
train_df.shape
train_df.head(1)
train_df["State"].unique()
train_df.columns
# Using DataFrame.query() method extract column values.
df2 = train_df.loc[train_df["State"] == "TX"]
df3 = train_df.loc[train_df["State"] == "FL"]
frames = [df2, df3]
result = pd.concat(frames)
display(result)
# # Select Features
# print(train_df.columns
df_TX = df2.loc[
:,
[
"ID",
"Start_Lat",
"Start_Lng",
"State",
"Start_Time",
"End_Time",
"Distance(mi)",
"Temperature(F)",
"Wind_Chill(F)",
"Humidity(%)",
"Pressure(in)",
"Visibility(mi)",
"Wind_Direction",
"Wind_Speed(mph)",
"Precipitation(in)",
"Weather_Condition",
"Amenity",
"Bump",
"Crossing",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Roundabout",
"Station",
"Stop",
"Traffic_Calming",
"Traffic_Signal",
"Sunrise_Sunset",
"Civil_Twilight",
"Nautical_Twilight",
"Astronomical_Twilight",
"Severity",
],
]
df_TX.head(1)
# # Feature Transform
from datetime import datetime
# convert date columns to datetime objects
df_TX["Start_Time"] = pd.to_datetime(df_TX["Start_Time"], format="%Y-%m-%d %H:%M:%S")
df_TX["End_Time"] = pd.to_datetime(df_TX["End_Time"], format="%Y-%m-%d %H:%M:%S")
# subtract columns to get time difference
df_TX["time_diff"] = (df_TX["End_Time"] - df_TX["Start_Time"]).dt.total_seconds() / 60
df_TX["time_binary"] = df_TX["time_diff"].apply(lambda x: 1 if x > 156 else 0)
import pandas as pd
# read in your original dataframe
# calculate the lower and upper bounds for the middle 95% range
lower_bound = df_TX["time_diff"].quantile(0.05)
upper_bound = df_TX["time_diff"].quantile(0.95)
# select all rows in the dataframe where time_diff is within the middle 90% range
df_TX_filtered = df_TX[
(df_TX["time_diff"] >= lower_bound) & (df_TX["time_diff"] <= upper_bound)
]
print("Before removing outliers")
print(np.max(df_TX["time_diff"]))
print(np.min(df_TX["time_diff"]))
print(np.average(df_TX["time_diff"]))
print(np.std(df_TX["time_diff"]))
print("After removing outliers")
print(np.max(df_TX_filtered["time_diff"]))
print(np.min(df_TX_filtered["time_diff"]))
print(np.average(df_TX_filtered["time_diff"]))
print(np.std(df_TX_filtered["time_diff"]))
df_TX_filtered
# import pandas as pd
# import matplotlib.pyplot as plt
# # plot density plot of the column data
# df_TX_filtered.boxplot(column='time_diff')
# plt.ylabel('Accident duration (in minutes)')
# plt.savefig('/kaggle/working/plotd.jpg', format='jpg', dpi=300)
# # plot density plot of the column data
# df_TX_filtered['time_diff'].plot(kind='density')
# plt.xlabel('Accident Duration (in minutes)')
# plt.xlim(0,400)
# plt.show()
# plt.savefig('/kaggle/working/plotd.jpg', format='jpg', dpi=300)
df_TX["Temperature(F)"] = df_TX["Temperature(F)"].fillna(
np.mean(df_TX["Temperature(F)"])
)
df_TX["Wind_Chill(F)"] = df_TX["Wind_Chill(F)"].fillna(np.mean(df_TX["Wind_Chill(F)"]))
df_TX["Humidity(%)"] = df_TX["Humidity(%)"].fillna(np.mean(df_TX["Humidity(%)"]))
df_TX["Pressure(in)"] = df_TX["Pressure(in)"].fillna(np.mean(df_TX["Pressure(in)"]))
df_TX["Pressure(in)"] = df_TX["Visibility(mi)"].fillna(np.mean(df_TX["Visibility(mi)"]))
df_TX["Wind_Direction"] = df_TX["Wind_Direction"].fillna(
df_TX["Wind_Direction"].mode()[0]
)
df_TX["Wind_Speed(mph)"] = df_TX["Wind_Speed(mph)"].fillna(
np.mean(df_TX["Wind_Speed(mph)"])
)
df_TX["Precipitation(in)"] = df_TX["Precipitation(in)"].fillna(
np.mean(df_TX["Precipitation(in)"])
)
df_TX["Sunrise_Sunset"] = df_TX["Sunrise_Sunset"].fillna(
df_TX["Sunrise_Sunset"].mode()[0]
)
df_TX["Weather_Condition"] = df_TX["Weather_Condition"].fillna(
df_TX["Weather_Condition"].mode()[0]
)
df_TX["Civil_Twilight"] = df_TX["Civil_Twilight"].fillna(
df_TX["Civil_Twilight"].mode()[0]
)
df_TX["Nautical_Twilight"] = df_TX["Nautical_Twilight"].fillna(
df_TX["Nautical_Twilight"].mode()[0]
)
df_TX["Astronomical_Twilight"] = df_TX["Astronomical_Twilight"].fillna(
df_TX["Astronomical_Twilight"].mode()[0]
)
df_TX["Visibility(mi)"] = df_TX["Visibility(mi)"].fillna(
np.mean(df_TX["Visibility(mi)"])
)
df_TX[
[
"Amenity",
"Bump",
"Crossing",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Roundabout",
"Station",
"Stop",
"Traffic_Calming",
"Traffic_Signal",
]
] = (
df_TX[
[
"Amenity",
"Bump",
"Crossing",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Roundabout",
"Station",
"Stop",
"Traffic_Calming",
"Traffic_Signal",
]
]
* 1
)
df_TX["Weather_Condition"] = pd.factorize(df_TX["Weather_Condition"])[0]
df_TX["Sunrise_Sunset"] = pd.factorize(df_TX["Sunrise_Sunset"])[0]
df_TX["Civil_Twilight"] = pd.factorize(df_TX["Civil_Twilight"])[0]
df_TX["Astronomical_Twilight"] = pd.factorize(df_TX["Astronomical_Twilight"])[0]
df_TX["Nautical_Twilight"] = pd.factorize(df_TX["Nautical_Twilight"])[0]
df_TX["Wind_Direction"] = pd.factorize(df_TX["Wind_Direction"])[0]
pd.set_option("display.max_columns", 29)
print(df_TX.head(n=1))
df_TX = df_TX.drop(["ID", "State", "End_Time"], axis=1)
df_TX_input = df_TX.drop(["Distance(mi)", "Severity"], axis=1)
df_TX["time_diff"]
import pandas as pd
# read in your original dataframe
# calculate the lower and upper bounds for the middle 95% range
lower_bound = df_TX["time_diff"].quantile(0.05)
upper_bound = df_TX["time_diff"].quantile(0.95)
# select all rows in the dataframe where time_diff is within the middle 90% range
df_TX_filtered = df_TX[
(df_TX["time_diff"] >= lower_bound) & (df_TX["time_diff"] <= upper_bound)
]
# df_TX_input.head(1)
df_TX_input1 = df_TX_filtered.drop(
[
"Start_Lat",
"Start_Lng",
"Start_Time",
"time_binary",
"time_diff",
"Severity",
"Distance(mi)",
],
axis=1,
)
df_TX_output1 = df_TX_filtered["time_diff"]
df_TX_output2 = df_TX["Severity"]
df_TX_output3 = df_TX_filtered["time_binary"]
df_TX_concat1 = pd.concat([df_TX_input1, df_TX_output1], axis=1)
df_TX_concat2 = pd.concat([df_TX_input1, df_TX_output2], axis=1)
df_TX_concat3 = pd.concat([df_TX_input1, df_TX_output3], axis=1)
df_TX_input1
# # **start testing**
# # continous regression testing
df_TX_concat1 = pd.concat([df_TX_input1, df_TX_output1], axis=1)
df_TX_input1
# **Full version**
# Convert to numpy arrays
import numpy as np
regression_input_concat = pd.concat([df_TX_input1, df_TX_output1], axis=1)
features = np.array(df_TX_input1)
labels = np.array(df_TX_output1)
# Training and Testing Sets
from sklearn.model_selection import train_test_split
train_features, test_features, train_labels, test_labels = train_test_split(
features, labels, test_size=0.25, random_state=42
)
# **Two parts**
regression_input_concat = pd.concat([df_TX_input1, df_TX_output1], axis=1)
# assuming your DataFrame is named regression_input_concat
# filter the rows where time_diff is less than 156
accident_short_p = regression_input_concat[regression_input_concat["time_diff"] < 156]
# filter the rows where time_diff is greater than or equal to 156
accident_long_p = regression_input_concat[regression_input_concat["time_diff"] >= 156]
accident_long_p
# split the DataFrame into features and labels
features_short_p = accident_short_p.drop("time_diff", axis=1)
labels_short_p = accident_short_p["time_diff"]
# split the DataFrame into features and labels
features_long_p = accident_long_p.drop("time_diff", axis=1)
labels_long_p = accident_long_p["time_diff"]
# Training and Testing Sets
from sklearn.model_selection import train_test_split
features_short_p = np.array(features_short_p)
labels_short_p = np.array(labels_short_p)
train_features, test_features, train_labels, test_labels = train_test_split(
features_short_p, labels_short_p, test_size=0.25, random_state=42
)
len(features_short_p)
print("Training Features Shape:", train_features.shape)
print("Training Labels Shape:", train_labels.shape)
print("Testing Features Shape:", test_features.shape)
print("Testing Labels Shape:", test_labels.shape)
# # **Catboost**
import catboost as cb
import numpy as np
import pandas as pd
import seaborn as sns
import shap
# from matplotlib import pyplot as pltfrom sklearn.datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.inspection import permutation_importance
train_dataset = cb.Pool(train_features, train_labels)
test_dataset = cb.Pool(test_features, test_labels)
model = cb.CatBoostRegressor(loss_function="RMSE")
grid = {
"iterations": [100, 150, 200],
"learning_rate": [0.03, 0.1],
"depth": [2, 4, 6, 8],
"l2_leaf_reg": [0.2, 0.5, 1, 3],
}
model.grid_search(grid, train_dataset)
pred = model.predict(test_features)
rmse = np.sqrt(mean_squared_error(test_labels, pred))
r2 = r2_score(test_labels, pred)
print("Testing performance")
print("RMSE: {:.2f}".format(rmse))
print("R2: {:.2f}".format(r2))
from sklearn.metrics import mean_absolute_error
# assuming test_labels and pred are arrays or lists of the same length
mae = mean_absolute_error(test_labels, pred)
print(f"Mean Absolute Error: {mae:.4f}")
import matplotlib.pyplot as plt
# Assuming y_pred and test_labels are numpy arrays or lists of the same length
# plt.plot(range(100), y_pred[:100], label='Predicted Values')
plt.plot(range(100), pred[:100], label="Predicted Values")
plt.plot(range(100), test_labels[:100], label="Actual Values")
plt.xlabel("Index")
plt.ylabel("Values")
plt.legend()
plt.show()
# **Long term prediction**
# Training and Testing Sets
from sklearn.model_selection import train_test_split
features_long_p = np.array(features_long_p)
labels_long_p = np.array(labels_long_p)
train_features, test_features, train_labels, test_labels = train_test_split(
features_long_p, labels_long_p, test_size=0.25, random_state=42
)
train_dataset = cb.Pool(train_features, train_labels)
test_dataset = cb.Pool(test_features, test_labels)
model = cb.CatBoostRegressor(loss_function="RMSE")
grid = {
"iterations": [100, 150, 200],
"learning_rate": [0.03, 0.1],
"depth": [2, 4, 6, 8],
"l2_leaf_reg": [0.2, 0.5, 1, 3],
}
model.grid_search(grid, train_dataset)
pred = model.predict(test_features)
rmse = np.sqrt(mean_squared_error(test_labels, pred))
r2 = r2_score(test_labels, pred)
print("Testing performance")
print("RMSE: {:.2f}".format(rmse))
print("R2: {:.2f}".format(r2))
from sklearn.metrics import mean_absolute_error
# assuming test_labels and pred are arrays or lists of the same length
mae = mean_absolute_error(test_labels, pred)
print(f"Mean Absolute Error: {mae:.4f}")
import matplotlib.pyplot as plt
# Assuming y_pred and test_labels are numpy arrays or lists of the same length
# plt.plot(range(100), y_pred[:100], label='Predicted Values')
plt.plot(range(100), pred[:100], label="Predicted Values")
plt.plot(range(100), test_labels[:100], label="Actual Values")
plt.xlabel("Index")
plt.ylabel("Values")
plt.legend()
plt.show()
# # RFCNN
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from keras.layers import Input, Conv1D, MaxPooling1D, Flatten, Dense, Dropout
from keras.models import Model
# Assume you have preprocessed time series data in the form of numpy arrays train_features and train_labels for training,
# and test_features and test_labels for testing
train_features = train_features.reshape(
train_features.shape[0], train_features.shape[1], 1
)
test_features = test_features.reshape(test_features.shape[0], test_features.shape[1], 1)
# Build RFCNN model
inputs = Input(shape=(train_features.shape[1], train_features.shape[2]))
x = Conv1D(filters=64, kernel_size=3, activation="relu")(inputs)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=32, kernel_size=3, activation="relu")(x)
x = MaxPooling1D(pool_size=2)(x)
x = Flatten()(x)
x = Dropout(0.5)(x)
outputs = Dense(1, activation="linear")(x)
model = Model(inputs=inputs, outputs=outputs)
# Compile model
model.compile(loss="mean_squared_error", optimizer="adam")
# Train RFCNN model
model.fit(
train_features,
train_labels,
epochs=50,
batch_size=32,
validation_split=0.1,
verbose=0,
)
# Get RFCNN predictions on test set
y_pred = model.predict(test_features)
import matplotlib.pyplot as plt
# Assuming y_pred and test_labels are numpy arrays or lists of the same length
plt.plot(range(100), y_pred[:100], label="Predicted Values")
plt.plot(range(100), test_labels[:100], label="Actual Values")
plt.xlabel("Index")
plt.ylabel("Values")
plt.legend()
plt.show()
from sklearn.ensemble import RandomForestRegressor
# Build Random Forest model
rf = RandomForestRegressor(n_estimators=100, max_depth=10)
# Get CNN output as input features for Random Forest
cnn_output = Model(inputs=model.inputs, outputs=model.layers[-2].output)
train_rf_features = cnn_output.predict(train_features)
test_rf_features = cnn_output.predict(test_features)
# Train Random Forest model
rf.fit(train_rf_features, train_labels)
# Get Random Forest predictions on test set
y_pred_rf = rf.predict(test_rf_features)
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.inspection import permutation_importance
rmse = np.sqrt(mean_squared_error(test_labels, y_pred_rf))
r2 = r2_score(test_labels, y_pred_rf)
print("Testing performance")
print("RMSE: {:.2f}".format(rmse))
print("R2: {:.2f}".format(r2))
from sklearn.metrics import mean_absolute_error
# assuming test_labels and pred are arrays or lists of the same length
mae = mean_absolute_error(test_labels, pred)
print(f"Mean Absolute Error: {mae:.4f}")
y_pred, test_labels
import matplotlib.pyplot as plt
# Assuming y_pred and test_labels are numpy arrays or lists of the same length
# plt.plot(range(100), y_pred[:100], label='Predicted Values')
plt.plot(range(100), y_pred_rf[:100], label="Predicted Values")
plt.plot(range(100), test_labels[:100], label="Actual Values")
plt.xlabel("Index")
plt.ylabel("Values")
plt.legend()
plt.show()
plt.savefig("rfcnn.jpg", dpi=300)
rmse = np.sqrt(mean_squared_error(test_labels[:100], y_pred_rf[:100]))
r2 = r2_score(test_labels, y_pred_rf)
print("Testing performance")
print("RMSE: {:.2f}".format(rmse))
print("R2: {:.2f}".format(r2))
# **Long term regression**
# Training and Testing Sets
from sklearn.model_selection import train_test_split
features_long_p = np.array(features_long_p)
labels_long_p = np.array(labels_long_p)
train_features, test_features, train_labels, test_labels = train_test_split(
features_long_p, labels_long_p, test_size=0.25, random_state=42
)
print("Training Features Shape:", train_features.shape)
print("Training Labels Shape:", train_labels.shape)
print("Testing Features Shape:", test_features.shape)
print("Testing Labels Shape:", test_labels.shape)
# Assume you have preprocessed time series data in the form of numpy arrays train_features and train_labels for training,
# and test_features and test_labels for testing
train_features = train_features.reshape(
train_features.shape[0], train_features.shape[1], 1
)
test_features = test_features.reshape(test_features.shape[0], test_features.shape[1], 1)
# Build RFCNN model
inputs = Input(shape=(train_features.shape[1], train_features.shape[2]))
x = Conv1D(filters=64, kernel_size=3, activation="relu")(inputs)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=32, kernel_size=3, activation="relu")(x)
x = MaxPooling1D(pool_size=2)(x)
x = Flatten()(x)
x = Dropout(0.5)(x)
outputs = Dense(1, activation="linear")(x)
model = Model(inputs=inputs, outputs=outputs)
# Compile model
model.compile(loss="mean_squared_error", optimizer="adam")
# Train RFCNN model
model.fit(
train_features,
train_labels,
epochs=50,
batch_size=32,
validation_split=0.1,
verbose=0,
)
# Get RFCNN predictions on test set
y_pred = model.predict(test_features)
import matplotlib.pyplot as plt
# Assuming y_pred and test_labels are numpy arrays or lists of the same length
plt.plot(range(100), y_pred[:100], label="Predicted Values")
plt.plot(range(100), test_labels[:100], label="Actual Values")
plt.xlabel("Index")
plt.ylabel("Values")
plt.legend()
plt.show()
from sklearn.ensemble import RandomForestRegressor
# Build Random Forest model
rf = RandomForestRegressor(n_estimators=100, max_depth=10)
# Get CNN output as input features for Random Forest
cnn_output = Model(inputs=model.inputs, outputs=model.layers[-2].output)
train_rf_features = cnn_output.predict(train_features)
test_rf_features = cnn_output.predict(test_features)
# Train Random Forest model
rf.fit(train_rf_features, train_labels)
# Get Random Forest predictions on test set
y_pred_rf = rf.predict(test_rf_features)
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.inspection import permutation_importance
rmse = np.sqrt(mean_squared_error(test_labels, y_pred_rf))
r2 = r2_score(test_labels, y_pred_rf)
print("Testing performance")
print("RMSE: {:.2f}".format(rmse))
print("R2: {:.2f}".format(r2))
from sklearn.metrics import mean_absolute_error
# assuming test_labels and pred are arrays or lists of the same length
mae = mean_absolute_error(test_labels, y_pred_rf)
print(f"Mean Absolute Error: {mae:.4f}")
import matplotlib.pyplot as plt
# Assuming y_pred and test_labels are numpy arrays or lists of the same length
# plt.plot(range(100), y_pred[:100], label='Predicted Values')
plt.plot(range(100), y_pred_rf[:100], label="Predicted Values bt rf")
plt.plot(range(100), test_labels[:100], label="Actual Values")
plt.xlabel("Index")
plt.ylabel("Values")
plt.legend()
plt.show()
rmse = np.sqrt(mean_squared_error(test_labels[:100], y_pred_rf[:100]))
r2 = r2_score(test_labels, y_pred_rf)
print("Testing performance")
print("RMSE: {:.2f}".format(rmse))
print("R2: {:.2f}".format(r2))
# # **LightGBM**
# **Short term prediciton**
# Training and Testing Sets
from sklearn.model_selection import train_test_split
features_short_p = np.array(features_short_p)
labels_short_p = np.array(labels_short_p)
train_features, test_features, train_labels, test_labels = train_test_split(
features_short_p, labels_short_p, test_size=0.25, random_state=42
)
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.inspection import permutation_importance
import lightgbm as lgb
# create the LightGBM dataset for training and validation
train_data = lgb.Dataset(train_features, label=train_labels)
val_data = lgb.Dataset(test_features, label=test_labels)
# set up the LightGBM hyperparameters
params = {
"objective": "regression",
"metric": "rmse",
"num_leaves": 31,
"learning_rate": 0.05,
"feature_fraction": 0.9,
"bagging_fraction": 0.8,
"bagging_freq": 5,
"verbose": -1,
}
# train the LightGBM model
num_round = 10000
model = lgb.train(
params, train_data, num_round, valid_sets=[val_data], early_stopping_rounds=100
)
# make predictions on the test set and calculate the RMSE
y_pred = model.predict(test_features)
rmse = mean_squared_error(test_labels, y_pred, squared=False)
r2 = r2_score(test_labels, y_pred)
print(f"Root Mean Squared Error: {rmse:.4f}")
print("R2: {:.2f}".format(r2))
from sklearn.metrics import mean_absolute_error
# assuming test_labels and pred are arrays or lists of the same length
mae = mean_absolute_error(test_labels, y_pred)
print(f"Mean Absolute Error: {mae:.4f}")
import shap
# compute SHAP values for the test set using the TreeExplainer method
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(test_features)
# # plot the SHAP values for a single instance
# shap.initjs()
# shap.force_plot(explainer.expected_value, shap_values[0,:], test_features[0,:])
# plot the SHAP values for all instances
shap.summary_plot(
shap_values, test_features, feature_names=df_TX_input1.columns, plot_type="bar"
)
# save the SHAP summary plot as a PNG image
# save the SHAP summary plot as a PNG image
plt.savefig("shap_summary_plot1.jpg")
import matplotlib.pyplot as plt
# Assuming y_pred and test_labels are numpy arrays or lists of the same length
plt.plot(range(100), test_labels[:100], label="Predicted Values")
plt.plot(range(100), y_pred[:100], label="Predicted Values bt rf")
# plt.plot(range(100), test_labels[:100], label='Actual Values')
plt.xlabel("Index")
plt.ylabel("Values")
plt.legend()
plt.show()
# **Long Term Prediction**
# Training and Testing Sets
from sklearn.model_selection import train_test_split
features_long_p = np.array(features_long_p)
labels_long_p = np.array(labels_long_p)
train_features, test_features, train_labels, test_labels = train_test_split(
features_long_p, labels_long_p, test_size=0.25, random_state=42
)
import lightgbm as lgb
# create the LightGBM dataset for training and validation
train_data = lgb.Dataset(train_features, label=train_labels)
val_data = lgb.Dataset(test_features, label=test_labels)
# set up the LightGBM hyperparameters
params = {
"objective": "regression",
"metric": "rmse",
"num_leaves": 31,
"learning_rate": 0.05,
"feature_fraction": 0.9,
"bagging_fraction": 0.8,
"bagging_freq": 5,
"verbose": -1,
}
# train the LightGBM model
num_round = 10000
model = lgb.train(
params, train_data, num_round, valid_sets=[val_data], early_stopping_rounds=100
)
# make predictions on the test set and calculate the RMSE
y_pred = model.predict(test_features)
rmse = mean_squared_error(test_labels, y_pred, squared=False)
r2 = r2_score(test_labels, y_pred)
print(f"Root Mean Squared Error: {rmse:.4f}")
print("R2: {:.2f}".format(r2))
from sklearn.metrics import mean_absolute_error
# assuming test_labels and pred are arrays or lists of the same length
mae = mean_absolute_error(test_labels, y_pred)
print(f"Mean Absolute Error: {mae:.4f}")
import matplotlib.pyplot as plt
# Assuming y_pred and test_labels are numpy arrays or lists of the same length
plt.plot(range(100), test_labels[:100], label="Predicted Values")
plt.plot(range(100), y_pred[:100], label="Predicted Values bt rf")
# plt.plot(range(100), test_labels[:100], label='Actual Values')
plt.xlabel("Index")
plt.ylabel("Values")
plt.legend()
plt.show()
import shap
# compute SHAP values for the test set using the TreeExplainer method
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(test_features)
# # plot the SHAP values for a single instance
# shap.initjs()
# shap.force_plot(explainer.expected_value, shap_values[0,:], test_features[0,:])
# plot the SHAP values for all instances
shap.summary_plot(
shap_values, test_features, feature_names=df_TX_input1.columns, plot_type="bar"
)
# save the SHAP summary plot as a PNG image
# save the SHAP summary plot as a PNG image
plt.savefig("shap_summary_plot1.jpg")
# # **Binary Classification**
# # Short and Long term prediction
# #df_TX_input.head(1)
# df_TX_input1 = df_TX_input.drop(['Start_Lat','Start_Lng','Start_Time','time_binary','time_diff'],axis=1)
df_TX_input1
# Convert to numpy arrays
import numpy as np
features = np.array(df_TX_input1)
labels = np.array(df_TX_output3)
# Training and Testing Sets
from sklearn.model_selection import train_test_split
train_features, test_features, train_labels, test_labels = train_test_split(
features, labels, test_size=0.25, random_state=42
)
average = df_TX_output1.mean()
std_dev = df_TX_output1.std()
# print the results
print("Average: ", average)
print("Standard Deviation: ", std_dev)
value_counts = df_TX_output3.value_counts()
# print the results
print(value_counts)
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
train_features = scalar.fit_transform(train_features)
test_features = scalar.fit_transform(test_features)
# # Random Forest
# importing random forest classifier from assemble module
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
# Create the parameter grid based on the results of random search
param_grid = {
"bootstrap": [True],
"max_depth": [80, 200],
"max_features": [2, 5],
"min_samples_leaf": [2, 4],
"min_samples_split": [4, 8],
"n_estimators": [100],
}
# Create a base model
rf = RandomForestClassifier(random_state=42)
# Instantiate the grid search model
grid_search = GridSearchCV(
estimator=rf,
param_grid=param_grid,
cv=2,
n_jobs=-1,
verbose=1,
return_train_score=True,
)
# Fit the grid search to the data
grid_search.fit(train_features, train_labels)
grid_search.best_params_
best_grid = grid_search.best_estimator_
# performing predictions on the test dataset
y_pred_rf = best_grid.predict(test_features)
# import shap
# # compute SHAP values for the test set using the TreeExplainer method
# explainer = shap.TreeExplainer(best_grid)
# shap_values = explainer.shap_values(test_features)
# # # plot the SHAP values for a single instance
# # shap.initjs()
# # shap.force_plot(explainer.expected_value, shap_values[0,:], test_features[0,:])
# # plot the SHAP values for all instances
# shap.summary_plot(shap_values,test_features,feature_names=df_TX_input1.columns, plot_type='bar')
# # save the SHAP summary plot as a PNG image
# # save the SHAP summary plot as a PNG image
# plt.savefig('shap_summary_plot1.jpg')
# metrics are used to find accuracy or error
from sklearn import metrics
print()
# using metrics module for accuracy calculation
print("ACCURACY OF THE MODEL: ", metrics.accuracy_score(test_labels, y_pred_rf))
from sklearn.metrics import confusion_matrix
# assuming test_labels and y_pred are already defined
cm = confusion_matrix(test_labels, y_pred_rf)
# print the confusion matrix
print(cm)
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, classification_report
# Get confusion matrix and classification report
cm = confusion_matrix(test_labels, y_pred_rf)
cr = classification_report(test_labels, y_pred_rf)
# Plot confusion matrix
sns.heatmap(cm, annot=True, fmt="d", cmap="Blues")
plt.title("Confusion Matrix")
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# Save confusion matrix as an image file
plt.savefig("confusion_matrix_rf.png", dpi=300)
# Print classification report
print(cr)
from sklearn.metrics import classification_report
# assuming test_labels and y_pred are already defined
report = classification_report(test_labels, y_pred_rf)
# print the classification report
print(report)
# # XGBoost
import xgboost as xgb
from sklearn.metrics import accuracy_score
# Train XGBoost classifier
params = {
"max_depth": 3,
"eta": 0.1,
"objective": "binary:logistic",
"eval_metric": "error",
}
dtrain = xgb.DMatrix(train_features, label=train_labels)
dtest = xgb.DMatrix(test_features, label=test_labels)
num_round = 50
bst = xgb.train(params, dtrain, num_round)
# Predict on test set
y_pred = bst.predict(dtest)
# Evaluate model
y_pred_xg = [round(value) for value in y_pred]
accuracy = accuracy_score(test_labels, y_pred_xg)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, classification_report
# Get confusion matrix and classification report
cm = confusion_matrix(test_labels, y_pred_xg)
cr = classification_report(test_labels, y_pred_xg)
# Plot confusion matrix
sns.heatmap(cm, annot=True, fmt="d", cmap="Blues")
plt.title("Confusion Matrix")
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# Save confusion matrix as an image file
plt.savefig("confusion_matrix_xg.png", dpi=300)
# Print classification report
print(cr)
# **Tree image generation**
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.tree import plot_tree
import matplotlib.pyplot as plt
# # visualize the first tree in the random forest
# fig, ax = plt.subplots(figsize=(12, 12))
# plot_tree(best_grid.estimators_[0], ax=ax)
fig, ax = plt.subplots(figsize=(300, 200))
plot_tree(
best_grid.estimators_[0],
filled=True,
feature_names=df_TX_input1.columns,
fontsize=5,
)
# plt.savefig('DT2.jpg', dpi = 640)
# plt.show()
# plt.savefig('random_forest_tree.png')
# # **Catboost**
from catboost import CatBoostClassifier
from sklearn.metrics import accuracy_score
# Train CatBoost classifier with early stopping
clf = CatBoostClassifier(
iterations=10000, use_best_model=True, early_stopping_rounds=50
)
clf.fit(train_features, train_labels, eval_set=(test_features, test_labels))
# Predict on test set
y_pred = clf.predict(test_features)
# Evaluate model
accuracy = accuracy_score(test_labels, y_pred)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, classification_report
# Get confusion matrix and classification report
cm = confusion_matrix(test_labels, y_pred)
cr = classification_report(test_labels, y_pred)
# Plot confusion matrix
sns.heatmap(cm, annot=True, fmt="d", cmap="Blues")
plt.title("Confusion Matrix")
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# Save confusion matrix as an image file
plt.savefig("confusion_matrix_Catboost.png", dpi=300)
# Print classification report
print(cr)
from sklearn.metrics import classification_report
# assuming test_labels and y_pred are already defined
report = classification_report(test_labels, y_pred)
# print the classification report
print(report)
# # ****#****weather Info
filtered_df = (df_TX_concat2[df_TX_concat2["Severity"] > 1]).dropna()
filtered_df["Start_Time"] = filtered_df["Start_Time"].dt.floor("H")
filtered_df
import seaborn as sns
corr2 = df_TX_concat3.corr()
fig, ax = plt.subplots(figsize=(20, 16))
# Display the heatmap in the larger figure
sns.heatmap(corr2, annot=True, cmap="coolwarm", ax=ax)
plt.show()
fig.savefig("heatmap_duration.png")
# **Florida Test**
df_FL = df3.loc[
:,
[
"ID",
"State",
"Weather_Timestamp",
"Temperature(F)",
"Wind_Chill(F)",
"Humidity(%)",
"Pressure(in)",
"Visibility(mi)",
"Wind_Direction",
"Wind_Speed(mph)",
"Precipitation(in)",
"Weather_Condition",
"Sunrise_Sunset",
"Civil_Twilight",
"Nautical_Twilight",
"Astronomical_Twilight",
"Severity",
],
]
df_FL.head()
df_FL["Temperature(F)"].fillna(np.mean(df_FL["Temperature(F)"]))
df_FL["Wind_Chill(F)"].fillna(np.mean(df_FL["Wind_Chill(F)"]))
df_FL["Humidity(%)"].fillna(np.mean(df_FL["Humidity(%)"]))
df_FL["Pressure(in)"].fillna(np.mean(df_FL["Pressure(in)"]))
df_FL["Visibility(mi)"].fillna(np.mean(df_FL["Visibility(mi)"]))
df_FL["Wind_Direction"].fillna(df_FL["Wind_Direction"].mode()[0])
df_FL["Wind_Speed(mph)"].fillna(np.mean(df_FL["Wind_Speed(mph)"]))
df_FL["Precipitation(in)"].fillna(np.mean(df_FL["Precipitation(in)"]))
df_FL["Sunrise_Sunset"].fillna(df_FL["Sunrise_Sunset"].mode()[0])
df_FL["Weather_Condition"].fillna(df_FL["Weather_Condition"].mode()[0])
df_FL["Civil_Twilight"].fillna(df_FL["Civil_Twilight"].mode()[0])
df_FL["Nautical_Twilight"].fillna(df_FL["Nautical_Twilight"].mode()[0])
df_FL["Astronomical_Twilight"].fillna(df_FL["Astronomical_Twilight"].mode()[0])
df_FL["Wind_Direction"].mode()[0]
X = df_TX.iloc[:, 0:16].values
y = df_TX.iloc[:, 16].values
df_TX.dropna()
import matplotlib.pyplot as plt
# x = [value1, value2, value3,....]
plt.hist(df_FL["Temperature(F)"], bins=20)
plt.show()
plt.hist(df_FL["Wind_Chill(F)"], bins=20)
plt.show()
plt.hist(df_FL["Humidity(%)"], bins=20)
plt.show()
df_TX_concat3
df_TX.corr()
corr = df_TX_concat3.corr()
corr.style.background_gradient(cmap="coolwarm")
# 'RdBu_r', 'BrBG_r', & PuOr_r are other good diverging colormaps
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# create some random data
# data = pd.Series(np.random.normal(size=1000))
# plot the density
df_TX["Temperature(F)"].plot.density()
ax = df_TX["Temperature(F)"].plot.density(color="green")
ax.set_xlabel("Temperature")
ax.set_ylabel("Density_Counts")
# show the plot
plt.show()
# plot the density
df_TX["Pressure(in)"].plot.density()
ax = df_TX["Pressure(in)"].plot.density(color="blue")
ax.set_xlabel("Pressure(in)")
ax.set_ylabel("Density_Counts")
ax.set_xlim([8, 15])
# show the plot
plt.show()
# plot the density
df_TX["Wind_Chill(F)"].plot.density()
ax = df_TX["Wind_Chill(F)"].plot.density(color="green")
ax.set_xlabel("Wind_Chill(F)")
ax.set_ylabel("Density_Counts")
# show the plot
plt.show()
# plot a bar chart
ax = df_TX["Bump"].plot.bar(x="1", y="0", rot=0)
# add axis labels and a title
ax.set_xlabel("Category")
ax.set_ylabel("Value")
ax.set_title("Bar chart of discontinuous data")
# show the plot
plt.show()
plt.bar(df_TX["Bump"])
import pandas as pd
import matplotlib.pyplot as plt
# count the values
counts = df_TX["Stop"].value_counts()
# plot a pie chart
fig, ax = plt.subplots()
ax.pie(counts, labels=["No Stop Sign", "Stop Sign"], autopct="%1.1f%%")
ax.set_title("Pie chart of Stop")
# show the plot
plt.show()
counts = df_TX["Junction"].value_counts()
# plot a pie chart
fig, ax = plt.subplots()
ax.pie(counts, labels=["No Junction", "Junction"], autopct="%1.1f%%")
ax.set_title("Pie chart of Junctions")
# show the plot
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
os.path.join(dirname, filename)
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Import required libraries
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.optimizers import Adam
from keras.layers import GlobalAveragePooling2D
# ## Read Data
# Define the paths to the train and test directories
train_dir = "/kaggle/input/melanoma-skin-cancer-dataset-of-10000-images/melanoma_cancer_dataset/train"
test_dir = "/kaggle/input/melanoma-skin-cancer-dataset-of-10000-images/melanoma_cancer_dataset/test"
# Set the image dimensions and batch size
img_width, img_height = 224, 224
batch_size = 128
# Define the data generators for the train and test sets
train_datagen = ImageDataGenerator(
rescale=1.0 / 255, # Scale the pixel values from 0-255 to 0-1
rotation_range=40, # Randomly rotate the images up to 40 degrees
width_shift_range=0.2, # Randomly shift the images horizontally up to 20% of the width
height_shift_range=0.2, # Randomly shift the images vertically up to 20% of the height
# shear_range=0.2, # Randomly apply shear transformations
zoom_range=0.2, # Randomly zoom in on the images
horizontal_flip=True, # Randomly flip the images horizontally
fill_mode="nearest",
) # Fill any empty pixels with the nearest available pixel
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
print("Num GPUs Available: ", len(tf.config.list_physical_devices("GPU")))
train_generator = train_datagen.flow_from_directory(
train_dir, # Path to the train directory
target_size=(
img_width,
img_height,
), # Resize the images to the specified dimensions
batch_size=batch_size,
class_mode="binary",
)
test_generator = test_datagen.flow_from_directory(
test_dir, # Path to the test directory
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode="binary",
)
train_generator.__len__()
label = np.concatenate(
[test_generator.next()[1] for i in range(test_generator.__len__())]
)
img, label = test_generator.next()
label.shape
label
for i in range(5):
print(img.shape) # (1,256,256,3)
print(label[i])
plt.imshow(img[i])
plt.show()
# ## Create Model
model = Sequential()
model.add(Conv2D(32, 3, activation="relu", input_shape=(img_width, img_height, 3)))
model.add(Dropout(0.1))
model.add(MaxPooling2D())
model.add(Conv2D(64, 3, activation="relu"))
model.add(Dropout(0.15))
# model_1.add(keras.layers.MaxPooling2D())
model.add(GlobalAveragePooling2D())
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dense(1, activation="sigmoid"))
# Compile the model # RMSprop -opt
opt = Adam(lr=0.0001)
model.compile(optimizer=opt, loss="binary_crossentropy", metrics=["accuracy"])
model.summary()
# ## Training and Validation
from keras.callbacks import EarlyStopping
early_stop = EarlyStopping(patience=3, verbose=1)
tf.config.list_physical_devices("GPU")
# Fine tuning the model
tf.debugging.set_log_device_placement(True)
with tf.device("/GPU:0"):
history = model.fit(
train_generator,
epochs=25,
validation_data=test_generator,
callbacks=[early_stop],
verbose=1,
)
# ## Model Evaluation
# Evaluate the model on the test set
loss, accuracy = model.evaluate(test_generator, batch_size=128)
print("Test accuracy:", accuracy)
# Plot loss values vs epoch
import matplotlib.pyplot as plt
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("Model loss")
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.legend(["Train", "Test"], loc="upper left")
plt.show()
cm = confusion_matrix(y_true, y_pred)
cm
from sklearn.metrics import accuracy_score
accuracy_score(label, y_pred)
test_generator
predictions = model.predict(test_generator)
y_pred = np.round(predictions)
label.shape
print(y_pred[0:10], label[0:10])
tf.math.confusion_matrix(label, y_pred)
import matplotlib.pyplot as plt
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import confusion_matrix
y_pred = np.round(model.predict_classes(test_generator))
y_true = test_generator.classes
class_names = list(train_generator.class_indices.keys())
cm = confusion_matrix(y_true, y_pred)
cm_display = ConfusionMatrixDisplay(cm, display_labels=class_names)
fig, ax = plt.subplots(figsize=(10, 10))
cm_display.plot(ax=ax, cmap=plt.cm.Blues, values_format=".0f")
plt.title("Confusion Matrix")
plt.show()
|
# # *Hey All!*
# # *This notebook is for ur reference to make submissions*
# # *1.Go to Code Section of the Competition Page*
# # *2.Click on 'Your Work'*
# # *3.Hit 'New Notebook' and get started*
# # *You can copy paste the code below for getting started with the Ps*
import pandas as pd
import numpy as np
train = pd.read_csv(
"/kaggle/input/ctrl-shift-intelligence-cops-week-2k23/train_data.csv"
)
train.head()
test = pd.read_csv(
"/kaggle/input/ctrl-shift-intelligence-cops-week-2k23/test_dataset.csv"
)
test.head()
# # **Your All Other Preprocessing and Model Building goes here....**
# Your code
# # *Submitting Predictions*
arr = np.ones(100000, dtype=int)
arr
# Your prediction obtained from model
# y_pred
# for example lets take it to contain all ones
# note this is not a prediction , it just for ur reference to make submission
y_pred = arr
sub = pd.read_csv(
"/kaggle/input/ctrl-shift-intelligence-cops-week-2k23/sample_submission.csv"
)
sub.head()
sub["is_similar"] = y_pred
sub.head()
sub = sub.set_index("Id")
sub.head()
sub.to_csv("submission.csv")
|
import numpy as np
import matplotlib.pyplot as plt
from keras.preprocessing.image import (
ImageDataGenerator,
load_img,
img_to_array,
array_to_img,
)
from keras.layers import Conv2D, Flatten, MaxPooling2D, Dense
from keras.models import Sequential
import glob, os, random
base_path = "../input/garbage classification/Garbage classification"
img_list = glob.glob(os.path.join(base_path, "*/*.jpg"))
print(len(img_list))
for i, img_path in enumerate(random.sample(img_list, 6)):
img = load_img(img_path)
img = img_to_array(img, dtype=np.uint8)
plt.subplot(2, 3, i + 1)
plt.imshow(img.squeeze())
train_datagen = ImageDataGenerator(
rescale=1.0 / 255,
shear_range=0.1,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=True,
validation_split=0.1,
)
test_datagen = ImageDataGenerator(rescale=1.0 / 255, validation_split=0.1)
train_generator = train_datagen.flow_from_directory(
base_path,
target_size=(300, 300),
batch_size=16,
class_mode="categorical",
subset="training",
seed=0,
)
validation_generator = test_datagen.flow_from_directory(
base_path,
target_size=(300, 300),
batch_size=16,
class_mode="categorical",
subset="validation",
seed=0,
)
labels = train_generator.class_indices
labels = dict((v, k) for k, v in labels.items())
print(labels)
model = Sequential(
[
Conv2D(
filters=32,
kernel_size=3,
padding="same",
activation="relu",
input_shape=(300, 300, 3),
),
MaxPooling2D(pool_size=2),
Conv2D(filters=64, kernel_size=3, padding="same", activation="relu"),
MaxPooling2D(pool_size=2),
Conv2D(filters=32, kernel_size=3, padding="same", activation="relu"),
MaxPooling2D(pool_size=2),
Conv2D(filters=32, kernel_size=3, padding="same", activation="relu"),
MaxPooling2D(pool_size=2),
Flatten(),
Dense(64, activation="relu"),
Dense(6, activation="softmax"),
]
)
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["acc"])
model.summary()
# # Take a Shot
model.fit_generator(train_generator, epochs=5, validation_data=validation_generator)
# # Another Shot
model.fit_generator(train_generator, epochs=1, validation_data=validation_generator)
model.fit_generator(train_generator, epochs=1, validation_data=validation_generator)
model.fit_generator(train_generator, epochs=1, validation_data=validation_generator)
model.fit_generator(train_generator, epochs=1, validation_data=validation_generator)
test_x, test_y = validation_generator.__getitem__(1)
preds = model.predict(test_x)
plt.figure(figsize=(16, 16))
for i in range(16):
plt.subplot(4, 4, i + 1)
plt.title(
"pred:%s / truth:%s"
% (labels[np.argmax(preds[i])], labels[np.argmax(test_y[i])])
)
plt.imshow(test_x[i])
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
# # Prepare data
#
df = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
dfX = df.drop(["id", "target"], axis=1)
dfY = df.target
# # Baseline
# Lets just split our data and test it as ususal with a default RandomForestClassifier
np.random.seed(42)
xtrain, xtest, ytrain, ytest = train_test_split(
dfX, dfY, test_size=0.25
) # Default test_size
m = RandomForestClassifier(random_state=42)
m.fit(xtrain, ytrain)
score = roc_auc_score(ytrain, m.predict(xtrain))
print(f"AUC on train dataset: {score}")
score = roc_auc_score(ytest, m.predict(xtest))
print(f"AUC on test dataset: {score}")
# See that? We already have **1.0 AUC** for train data. This made me suspicious.
# ## Let's check out the overfit curve
def train_and_draw_w_params(params, tuned_param, begin, end):
train_scores = []
test_scores = []
for i in range(begin, end):
params[tuned_param] = i
m = RandomForestClassifier(random_state=42, **params)
m.fit(xtrain, ytrain)
score = roc_auc_score(ytrain, m.predict(xtrain))
train_scores.append(score)
score = roc_auc_score(ytest, m.predict(xtest))
test_scores.append(score)
pd.DataFrame({"train": train_scores, "test": test_scores}).plot()
np.random.seed(42)
xtrain, xtest, ytrain, ytest = train_test_split(
dfX, dfY, test_size=0.25
) # Default test_size
train_and_draw_w_params({}, "n_estimators", 1, 30)
# See that? Something wrong is happening here, the test set doesn't show that we are improving. Why is that?
# # Less training, more test
np.random.seed(42)
xtrain, xtest, ytrain, ytest = train_test_split(dfX, dfY, test_size=0.7)
train_and_draw_w_params({}, "n_estimators", 1, 30)
# Great, now test line looks better, but still, there is a great distance between train & test results.
# # Try another hyperparameters
np.random.seed(42)
xtrain, xtest, ytrain, ytest = train_test_split(dfX, dfY, test_size=0.7)
train_and_draw_w_params({"n_estimators": 10}, "max_leaf_nodes", 2, 30)
|
mylist = ["apple", "banana", "mango"]
mylist
mylist = [1, 2, 3]
mylist
mylist = [True, False, True]
mylist
mylist = ["apple", True, 1]
mylist
mylist = [True, False, True]
mylist2 = ["apple", "banana", "mango"]
mylist3 = [1, 2, 3]
mylist + mylist2 + mylist3
mylist = list(("apple", "banana", "mango"))
mylist[-1]
mylist = list(("apple", "banana", "mango"))
if "apple" in mylist:
print("yes")
else:
print("No")
mylist = ["apple", "banana", "mango", "apple"]
mylist[0:2] = ["watermalon", "cherry"]
mylist
mylist = ["apple", "banana", "mango", "apple"]
mylist.append("Lichchi")
mylist
mylist = ["apple", "banana", "mango", "apple"]
mylist.insert(2, "watermalon")
mylist
mylist = ["apple", "banana", "mango", "apple"]
mylist.remove("mango")
mylist
mylist = ["apple", "banana", "mango", "apple"]
mylist.pop()
mylist
mylist = ["apple", "banana", "mango", "apple"]
len(mylist)
tuple1 = ("apple", "banana", "mango", "apple")
for x in tuple1:
print(x)
mylist = ["apple", "banana", "mango", "apple"]
for i in range(len(mylist)):
print(mylist[i])
mylist = ["apple", "banana", "mango", "apple"]
i = 0
while i < len(mylist):
print(mylist[i])
i = i + 1
tupele = ("apple", "banana", "mango", "apple")
[print(x) for x in tupele]
mylist = ["apple", "banana", "mango", "apple"]
mylist.sort(reverse=True)
mylist
mylist = [500, 21, 30, 5, 1000]
mylist.sort(reverse=True)
mylist
mylist = ["apple", "banana", "mango", "apple"]
mylist1 = mylist
mylist1
mylist = ["apple", "banana", "mango", "apple"]
len(mylist)
mylist = ["apple", "banana", "mango", "apple"]
mylist.count("banana")
mytuple = ("apple", "banana", "mango", "apple")
len(mytuple)
mytuple = ("apple", "banana", "mango", "apple")
mytuple[2:4]
mytuple = ("apple", "banana", "mango", "apple")
(x, y, z, a) = mytuple
z
mytuple = ("apple", "banana", "mango", "apple")
mylist = list(mytuple)
mylist[1] = "cherry"
mytuple = tuple(mylist)
# mytuple.index("banana")
mytuple.count("apple")
mytuple = ("apple", "banana", "mango", "apple")
mytuple.index("mango")
# mytuple.count("banana")
# List
mylist = ["apple", 1, 2, True]
mylist1 = [True, False]
mylist + mylist1
mylist = ["apple", "Mango", "banana", "apple", "cherry"]
mylist[-4:-1]
mylist = ["apple", "Mango", "banana", "apple", "cherry"]
if "apple" in mylist:
print("Yes")
else:
print("no")
mylist = ["apple", "Mango", "banana", "apple", "cherry"]
mylist[1:3] = ["watermalon", "stovery"]
mylist
mylist = ["apple", "Mango", "banana", "apple", "cherry"]
[print(x) for x in mylist]
mylist = ["apple", "Mango", "banana", "apple", "cherry"]
for i in range(len(mylist)):
print(mylist[i])
mylist = ["apple", "mango", "banana", "apple", "cherry"]
mylist.sort(reverse=True)
mylist
# mylist= ["apple","mango","banana","apple","cherry"]
mylist = list(("apple", "mango", "banana", "apple", "cherry"))
mylist.index("mango")
# Tuple
mytuple = ("apple", "mango", "banana", "apple", "cherry")
mytuple = tuple(("apple", "mango", "banana", "apple", "cherry"))
if "apple" in mytuple:
print("Yes")
else:
print("no")
mytuple = ("apple", "mango", "banana", "apple", "cherry")
mylist = list(mytuple)
mylist[1] = "B"
mytuple = tuple(mylist)
mytuple
mytuple = ("apple", "mango", "banana")
mytuple.index("apple")
# set
myset1 = set(("apple", "cow", 1))
# myset.add("cherry")
# myset.discard("cherry")
# myset1=myset.copy()
myset1.pop()
myset1
# Dictionary
# pair-> (key,value)
thisdict = {"a": "A", "c": "B", "d": ["X", "Y", "Z"]}
thisdict.update({"c": "b"})
thisdict
thisdict = {"a": "A", "c": "B", "d": ["X", "Y", "Z"]}
for x, y in thisdict.items():
print(x, y)
child1 = {"name": "A", "year": 2004}
child2 = {"name": "B", "year": 2005}
child3 = {"name": "C", "year": 2005}
myfamily = {
"child1": child1,
"child2": child2,
"child3": child3,
}
myfamily.values()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
# Read the file into a variable fifa_data
df1 = pd.read_csv("/kaggle/input/data-news/Fake.csv")
df1["label"] = 0
df1.shape
df1.head()
df2 = pd.read_csv("/kaggle/input/data-news/True.csv")
df2["label"] = 1
df2.shape
df2.head()
# Stack the DataFrames on top of each other
df = pd.concat([df1, df2], axis=0)
df.head()
df = df.reset_index(drop=True)
df.head()
df["title and text"] = df["title"] + " " + df["text"]
df["title and text"].str.lower().head()
url = r"http\S+|www.\S+"
df["title and text"] = df["title and text"].apply(lambda x: re.sub(url, "", x))
df["title and text"]
html = re.compile("<.*?>")
df["title and text"] = df["title and text"].apply(lambda x: re.sub(html, "", x))
df["title and text"]
punct = r"[^\w\s]"
df["title and text"] = df["title and text"].apply(lambda x: re.sub(punct, "", x))
df["title and text"]
number = r"\d+"
df["title and text"] = df["title and text"].apply(lambda x: re.sub(number, "", x))
df["title and text"]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train_ = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
test_ = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
original = pd.read_csv(
"/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
)
train_.info()
train_.drop(columns=["id"], inplace=True)
train_df = pd.concat([train_, original])
train_df = train_df.drop_duplicates()
train_df.info()
corr = train_df.corr()
mask = np.triu(corr)
ax, fig = plt.subplots(figsize=(20, 10))
sns.heatmap(corr, mask=mask, annot=True, fmt=".3f")
train_df.columns
def kde_df(data, grid, figsize=(20, 20)):
x, y = grid[0], grid[1]
fig, axes = plt.subplots(x, y, figsize=figsize)
for i, col in enumerate(data.columns):
ax = axes[i // y, i % y]
sns.kdeplot(data=data[col], ax=ax, fill=None)
ax.axvline(data[col].mean(), color="red")
fig.suptitle("Density function of each features", y=0.9, fontsize=20)
kde_df(train_df, (4, 2))
import xgboost as xgb
from sklearn.inspection import permutation_importance
from sklearn.model_selection import train_test_split
y = train_df["target"]
X = train_df.drop(columns=["target"])
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
params = {
"n_estimators": 100,
"alpha": 0.5,
"lambda": 0.5,
"learning_rate": 0.1,
"max_depth": 3,
"min_child_weight": 1,
"colsample_bytree": 1,
"scale_pos_weight": 1,
"objective": "binary:logistic",
"booster": "gbtree",
"verbosity": 1,
}
model = xgb.XGBClassifier(**params)
model.fit(X_train, y_train)
def plot_fi(data, ax=None, title=None):
fi = pd.Series(data, index=X.columns).sort_values(ascending=True)
fi.plot(kind="barh", ax=ax)
plot_fi(model.feature_importances_)
r = permutation_importance(model, X_test, y_test, n_repeats=1, random_state=46)
plot_fi(
r["importances"].reshape(
6,
)
)
cols_to_drop = ["gravity", "cond", "urea", "osmo"]
Y = train_df["target"]
X = train_df.drop(columns=cols_to_drop + ["target"])
test_df = test_.drop(columns=(cols_to_drop + ["id"]))
Y
import optuna
import xgboost as xgb
from sklearn.metrics import accuracy_score
from sklearn.model_selection import RepeatedStratifiedKFold, cross_val_score
optuna.logging.set_verbosity(optuna.logging.WARNING)
def objective(trial):
params = {
"verbosity": 0,
"n_estimators": trial.suggest_int("n_estimators", 50, 1500),
"learning_rate": trial.suggest_float("learning_rate", 1e-7, 1e-1),
"max_depth": trial.suggest_int("max_depth", 3, 20),
"colsample_bytree": trial.suggest_float("colsample_bytree", 0.1, 1.0),
"alpha": trial.suggest_float("alpha", 1e-5, 1e2),
"lambda": trial.suggest_float("lambda", 1e-5, 1e2),
"objective": "binary:logistic",
"eval_metric": "auc",
"booster": trial.suggest_categorical("booster", ["dart", "gbtree", "gblinear"]),
"min_child_weight": trial.suggest_int("min_child_weight", 0, 5),
"tree_method": "gpu_hist",
}
kf = RepeatedStratifiedKFold(n_splits=10, n_repeats=10, random_state=42)
scores = []
for train_idx, val_idx in kf.split(X, Y):
X_train, X_val = X.iloc[train_idx], X.iloc[val_idx]
y_train, y_val = Y.iloc[train_idx], Y.iloc[val_idx]
d_train = xgb.DMatrix(X_train, label=y_train)
d_val = xgb.DMatrix(X_val, label=y_val)
evallist = [(d_val, "eval")]
xgb_model = xgb.train(
params,
d_train,
num_boost_round=100,
evals=evallist,
early_stopping_rounds=20,
verbose_eval=False,
)
y_pred = xgb_model.predict(d_val)
score = accuracy_score(y_val, y_pred.round())
scores.append(score)
return np.mean(scores)
study = optuna.create_study(direction="maximize")
study.best_value
from sklearn.model_selection import StratifiedKFold
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
scores = []
target_pred = np.zeros(len(test_df))
for train_idx, valid_idx in kfold.split(X, y):
# Split data into training and validation sets
X_train, y_train = X.iloc[train_idx], y.iloc[train_idx]
X_valid, y_valid = X.iloc[valid_idx], y.iloc[valid_idx]
# Initialize and train the model
model = xgb.XGBClassifier(**study.best_params)
model.fit(X_train, y_train)
y_pred = model.predict(X_valid)
score = accuracy_score(y_valid, y_pred)
scores.append(score)
pred = model.predict(test_df)
target_pred += pred / 10
X.drop(columns=cols_to_drop, inplace=True)
target_pred
test_df.info()
model = xgb.XGBClassifier(**study.best_params)
model.fit(X, y)
model.predict(test_df)
submission = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
submission["target"] = target_pred
submission.to_csv("submission.csv", index=False)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import cv2
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from imblearn.over_sampling import SMOTE
from imblearn.combine import SMOTETomek
from imblearn.under_sampling import TomekLinks
from yellowbrick.model_selection import learning_curve
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.utils import shuffle as shf
import pickle
import os
import glob as gb
import warnings as wr
wr.filterwarnings("ignore")
code = {"non_hemmorhage_data": 0, "hemmorhage_data": 1}
# function to return the class of the images from its number, so the function would return 'Normal' if given 0, and 'PNEUMONIA' if given 1.
def getcode(n):
for x, y in code.items():
if n == y:
return x
# the directory that contain the train images set
trainpath = (
"/kaggle/input/brain-tumor-images-dataset/Brain Tumor Images Dataset/training_set/"
)
X_train = []
y_train = []
for folder in os.listdir(trainpath):
files = gb.glob(pathname=str(trainpath + folder + "/*.png"))
for file in files:
image = cv2.imread(file)
# resize images to 64 x 64 pixels
image_array = cv2.resize(image, (64, 64))
X_train.append(list(image_array))
y_train.append(code[folder])
np.save("X_train", X_train)
np.save("y_train", y_train)
X_train[0][0]
y_train[:10]
# the directory that contain the test images set
testpath = (
"/kaggle/input/brain-tumor-images-dataset/Brain Tumor Images Dataset/test_set/"
)
X_test = []
y_test = []
for folder in os.listdir(testpath):
files = gb.glob(pathname=str(testpath + folder + "/*.png"))
for file in files:
image = cv2.imread(file)
# resize images to 64 x 64 pixels
image_array = cv2.resize(image, (64, 64))
X_test.append(list(image_array))
y_test.append(code[folder])
np.save("X_test", X_test)
np.save("y_test", y_test)
# X_train, X_test contain the images as numpy arrays, while y_train, y_test contain the class of each image
# Loading the .npy files as numpy arrays
loaded_X_train = np.load("./X_train.npy")
loaded_X_test = np.load("./X_test.npy")
loaded_y_train = np.load("./y_train.npy")
loaded_y_test = np.load("./y_test.npy")
print(loaded_X_train.shape)
# the shape return dimensions of X_train, we have 5216 images of 64 x 64 pixels.
# while the forth dimension stores the RGB information of each pixel
print(loaded_X_test.shape)
# y_train and y_test contain the labels of each image, whether 0 'NORMAL' or 1 'PNEUMONIA'
print(loaded_y_train.shape)
print(loaded_y_test.shape)
plt.figure(figsize=(20, 10))
for n, i in enumerate(np.random.randint(0, len(loaded_X_train), 16)):
plt.subplot(2, 8, n + 1)
plt.imshow(loaded_X_train[i])
plt.axis("off")
plt.title(getcode(loaded_y_train[i]))
plt.figure(figsize=(20, 10))
for n, i in enumerate(np.random.randint(0, len(loaded_X_test), 16)):
plt.subplot(2, 8, n + 1)
plt.imshow(loaded_X_test[i])
plt.axis("off")
plt.title(getcode(loaded_y_train[i]))
# flatten the images into a 2d array, for model training and testing
X_train = loaded_X_train.reshape([-1, np.product((64, 64, 3))])
X_test = loaded_X_test.reshape([-1, np.product((64, 64, 3))])
print(X_train.shape)
print(X_test.shape)
y_train = loaded_y_train
y_test = loaded_y_test
# Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
# # Decision Tree
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
from sklearn import tree
plt.figure(figsize=(30, 20))
tree.plot_tree(clf)
print(
"Traning Accuracy: ", clf.score(X_train, y_train) * 100
) # Check training accuracy
print("Testing Accuracy: ", clf.score(X_test, y_test) * 100) # Check testing accuracy
from sklearn.ensemble import (
BaggingClassifier,
RandomForestClassifier,
AdaBoostClassifier,
ExtraTreesClassifier,
)
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import NuSVC, SVC
from sklearn.semi_supervised import LabelPropagation
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import (
classification_report,
accuracy_score,
confusion_matrix,
precision_score,
recall_score,
f1_score,
ConfusionMatrixDisplay,
)
# from sklearn.metrics.ConfusionMatrixDisplay
pred = clf.predict(X_test)
accuracy_score(pred, y_test) * 100
print(classification_report(y_test, pred)) # main
cm = confusion_matrix(y_test, pred)
cm
sns.heatmap(cm, square=True, annot=True, cmap="Blues", fmt="d", cbar=True)
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(clf, X_test, y_test)
from sklearn.metrics import (
confusion_matrix,
roc_curve,
recall_score,
f1_score,
plot_roc_curve,
roc_auc_score,
)
plot_roc_curve(clf, X_test, y_test, color="red")
plt.plot([1, 0], [1, 0], "go--")
# **Finding an appropiate n_neighbors value for kNN classifier by uniform and distance weight function:**
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import f1_score
f1s = []
f2s = []
for i in range(1, 51):
knct = KNeighborsClassifier(n_neighbors=i, weights="uniform")
knct.fit(X_train, y_train)
pred_i = knct.predict(X_test)
f1s.append(f1_score(y_test, pred_i, average="weighted"))
knct2 = KNeighborsClassifier(n_neighbors=i, weights="distance")
knct2.fit(X_train, y_train)
pred_i2 = knct2.predict(X_test)
f2s.append(f1_score(y_test, pred_i2, average="weighted"))
plt.figure(figsize=(10, 6))
plt.plot(
range(1, 51),
f1s,
color="red",
linestyle="dashed",
marker="o",
markerfacecolor="blue",
markersize=10,
)
plt.plot(
range(1, 51),
f2s,
color="black",
linestyle="dashed",
marker="o",
markerfacecolor="green",
markersize=10,
)
plt.title("F1 Score vs K Value")
plt.xlabel("K Value")
plt.ylabel("F1 Score")
plt.xticks(range(0, 51, 5))
plt.tick_params(labeltop=True, labelright=True, top=True, right=True)
f1_max = 0
f2_max = 0
for i in range(0, 50):
if f1s[i] > f1_max or f2s[i] > f2_max:
f1_max = f1s[i]
f2_max = f2s[i]
x = i
print(i, f1s[i], f2s[i])
print(f1_max, f2_max, x)
# **kNN Classification**
neigh_classifier = KNeighborsClassifier(n_neighbors=x + 1, weights="uniform")
neigh_classifier.fit(X_train, y_train)
print(
"Traning Accuracy: ", neigh_classifier.score(X_train, y_train) * 100
) # Check training accuracy
print(
"Testing Accuracy: ", neigh_classifier.score(X_test, y_test) * 100
) # Check testing accuracy
pred = neigh_classifier.predict(X_test)
accuracy_score(pred, y_test) * 100
print(classification_report(y_test, pred)) # main
cm = confusion_matrix(y_test, pred)
cm
sns.heatmap(cm, square=True, annot=True, cmap="Blues", fmt="d", cbar=True)
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(neigh_classifier, X_test, y_test)
plot_roc_curve(neigh_classifier, X_test, y_test, color="red")
plt.plot([1, 0], [1, 0], "go--")
# # Logistic Regression
logist = LogisticRegression(C=1.0, random_state=2, max_iter=100)
logist.fit(X_train, y_train)
print(
"Traning Accuracy: ", logist.score(X_train, y_train) * 100
) # Check training accuracy
print(
"Testing Accuracy: ", logist.score(X_test, y_test) * 100
) # Check testing accuracy
pred = logist.predict(X_test)
accuracy_score(pred, y_test) * 100
print(classification_report(y_test, pred)) # main
cm = confusion_matrix(y_test, pred)
cm
sns.heatmap(cm, square=True, annot=True, cmap="Blues", fmt="d", cbar=True)
sns.heatmap(cm, square=True, annot=True, cmap="Blues", fmt="d", cbar=True)
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(logist, X_test, y_test)
plot_roc_curve(logist, X_test, y_test, color="red")
plt.plot([1, 0], [1, 0], "go--")
# # XGBoost Classifier (Extreme Gradient Boosting)
import xgboost
from xgboost import XGBClassifier
xgb = XGBClassifier()
xgb.fit(X_train, y_train)
print(
"Traning Accuracy: ", xgb.score(X_train, y_train) * 100
) # Check training accuracy
print("Testing Accuracy: ", xgb.score(X_test, y_test) * 100) # Check testing accuracy
pred = xgb.predict(X_test)
accuracy_score(pred, y_test) * 100
print(classification_report(y_test, pred)) # main
cm = confusion_matrix(y_test, pred)
cm
sns.heatmap(cm, square=True, annot=True, cmap="Blues", fmt="d", cbar=True)
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(xgb, X_test, y_test)
plot_roc_curve(xgb, X_test, y_test, color="red")
plt.plot([1, 0], [1, 0], "go--")
# # Support vector classifier
from sklearn.svm import SVC # support vector classifier
# from sklearn.svm import SVR # support vector REGRESSOR
svm1 = SVC(kernel="linear")
svm1.fit(X_train, y_train)
print(
"Traning Accuracy: ", svm1.score(X_train, y_train) * 100
) # Check training accuracy
print("Testing Accuracy: ", svm1.score(X_test, y_test) * 100) # Check testing accuracy
pred = svm1.predict(X_test)
accuracy_score(pred, y_test) * 100
print(classification_report(y_test, pred)) # main
cm = confusion_matrix(y_test, pred)
cm
cm = confusion_matrix(y_test, pred)
cm
sns.heatmap(cm, square=True, annot=True, cmap="Blues", fmt="d", cbar=True)
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(clf, X_test, y_test)
plot_roc_curve(xgb, X_test, y_test, color="red")
plt.plot([1, 0], [1, 0], "go--")
# # support vector REGRESSOR
from sklearn.svm import SVR
svm2 = SVC(kernel="linear")
svm2.fit(X_train, y_train)
print(
"Traning Accuracy: ", svm2.score(X_train, y_train) * 100
) # Check training accuracy
print("Testing Accuracy: ", svm2.score(X_test, y_test) * 100) # Check testing accuracy
pred = svm2.predict(X_test)
accuracy_score(pred, y_test) * 100
print(classification_report(y_test, pred)) # main
cm = confusion_matrix(y_test, pred)
cm
sns.heatmap(cm, square=True, annot=True, cmap="Blues", fmt="d", cbar=True)
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(svm2, X_test, y_test)
plot_roc_curve(svm2, X_test, y_test, color="red")
plt.plot([1, 0], [1, 0], "go--")
svm3 = SVC(kernel="poly")
# 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'
svm3.fit(X_train, y_train)
print(
"Traning Accuracy: ", svm3.score(X_train, y_train) * 100
) # Check training accuracy
print("Testing Accuracy: ", svm3.score(X_test, y_test) * 100) # Check testing accuracy
pred = svm3.predict(X_test)
accuracy_score(pred, y_test) * 100
print(classification_report(y_test, pred)) # main
cm = confusion_matrix(y_test, pred)
cm
sns.heatmap(cm, square=True, annot=True, cmap="Blues", fmt="d", cbar=True)
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(svm3, X_test, y_test)
plot_roc_curve(svm3, X_test, y_test, color="red")
plt.plot([1, 0], [1, 0], "go--")
# # Naive Bayes
# # GaussianNB
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
gnb = GaussianNB()
gnb.fit(X_train, y_train)
print(
"Traning Accuracy: ", gnb.score(X_train, y_train) * 100
) # Check training accuracy
print("Testing Accuracy: ", gnb.score(X_test, y_test) * 100) # Check testing accuracy
pred = gnb.predict(X_test)
accuracy_score(pred, y_test) * 100
print(classification_report(y_test, pred)) # main
cm = confusion_matrix(y_test, pred)
cm
sns.heatmap(cm, square=True, annot=True, cmap="Blues", fmt="d", cbar=True)
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(gnb, X_test, y_test)
plot_roc_curve(gnb, X_test, y_test, color="red")
plt.plot([1, 0], [1, 0], "go--")
# # BernoulliNB
bnb = BernoulliNB()
bnb.fit(X_train, y_train)
print(
"Traning Accuracy: ", bnb.score(X_train, y_train) * 100
) # Check training accuracy
print("Testing Accuracy: ", bnb.score(X_test, y_test) * 100) # Check testing accuracy
pred = bnb.predict(X_test)
accuracy_score(pred, y_test) * 100
print(classification_report(y_test, pred)) # main
sns.heatmap(cm, square=True, annot=True, cmap="Blues", fmt="d", cbar=True)
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(bnb, X_test, y_test)
plot_roc_curve(bnb, X_test, y_test, color="red")
plt.plot([1, 0], [1, 0], "go--")
# # Random Forest
from sklearn.ensemble import RandomForestClassifier
rnd_clf = RandomForestClassifier(
n_estimators=100, max_depth=3, n_jobs=-1, random_state=42
)
rnd_clf.fit(X_train, y_train)
rnd_clf.estimators_
len(rnd_clf.estimators_)
from sklearn import tree
plt.figure(figsize=(30, 15))
for i in range(len(rnd_clf.estimators_)):
tree.plot_tree(rnd_clf.estimators_[i], filled=True)
for i in range(len(rnd_clf.estimators_)):
print(tree.export_text(rnd_clf.estimators_[i]))
print(
"Traning Accuracy: ", rnd_clf.score(X_train, y_train) * 100
) # Check training accuracy
print(
"Testing Accuracy: ", rnd_clf.score(X_test, y_test) * 100
) # Check testing accuracy
pred = rnd_clf.predict(X_test)
accuracy_score(pred, y_test) * 100
print(classification_report(y_test, pred)) # main
cm = confusion_matrix(y_test, pred)
cm
sns.heatmap(cm, square=True, annot=True, cmap="Blues", fmt="d", cbar=True)
plot_roc_curve(rnd_clf, X_test, y_test, color="red")
plt.plot([1, 0], [1, 0], "go--")
# # AdaBoostClassifier
from sklearn.ensemble import AdaBoostClassifier
abc = AdaBoostClassifier(n_estimators=100, random_state=0)
# abc=AdaBoostClassifier(DecisionTreeClassifier(criterion = 'entropy', random_state = 200),
# n_estimators=2000,
# learning_rate=0.1,
# algorithm='SAMME.R',
# random_state=1,)
abc.fit(X_train, y_train)
print(
"Traning Accuracy: ", abc.score(X_train, y_train) * 100
) # Check training accuracy
print("Testing Accuracy: ", abc.score(X_test, y_test) * 100) # Check testing accuracy
pred = abc.predict(X_test)
accuracy_score(pred, y_test) * 100
print(classification_report(y_test, pred)) # main
cm = confusion_matrix(y_test, pred)
cm
sns.heatmap(cm, square=True, annot=True, cmap="Blues", fmt="d", cbar=True)
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(abc, X_test, y_test)
plot_roc_curve(abc, X_test, y_test, color="red")
plt.plot([1, 0], [1, 0], "go--")
# # XGBoost Classifier
from xgboost import XGBClassifier
xgb_classifier = XGBClassifier()
xgb_classifier.fit(X_train, y_train)
print(
"Traning Accuracy: ", xgb_classifier.score(X_train, y_train) * 100
) # Check training accuracy
print(
"Testing Accuracy: ", xgb_classifier.score(X_test, y_test) * 100
) # Check testing accuracy
pred = xgb_classifier.predict(X_test)
accuracy_score(pred, y_test) * 100
print(classification_report(y_test, pred)) # main
cm = confusion_matrix(y_test, pred)
cm
sns.heatmap(cm, square=True, annot=True, cmap="Blues", fmt="d", cbar=True)
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(xgb_classifier, X_test, y_test)
plot_roc_curve(xgb_classifier, X_test, y_test, color="red")
plt.plot([1, 0], [1, 0], "go--")
import tensorflow as tf
ann = tf.keras.models.Sequential()
# Prepare the ANN for fitting the model
ann.add(tf.keras.layers.Dense(units=6, activation="relu"))
ann.add(tf.keras.layers.Dense(units=6, activation="relu"))
ann.add(tf.keras.layers.Dense(units=1, activation="sigmoid"))
ann.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
# Fit with ANN
ann.fit(X_train, y_train, batch_size=1, epochs=10)
# ANN Accuracy Score
ann_pred = ann.predict(X_test)
ann_score = accuracy_score(y_test, pred)
ann_score
# # Gradient Boosting
import xgboost as xgb
model = xgb.XGBClassifier(use_label_encoder=False)
model.fit(X_train, y_train)
predicted = model.predict(X_test)
cm = confusion_matrix(y_test, predicted)
print()
print(
"The accuracy of X Gradient Boosting is : ",
accuracy_score(y_test, predicted) * 100,
"%",
)
# # LazyClassifier
from lazypredict.Supervised import LazyClassifier
from lazypredict.Supervised import LazyClassifier
# Create a LazyClassifier instance and fit the training data
clf = LazyClassifier(
verbose=0,
ignore_warnings=True,
custom_metric=None,
predictions=True,
random_state=12,
classifiers="all",
)
LazyClassifier()
# model, predictions = clf.fit(X_train_res, x_test,y_train_res, y_test)
model, predictions = clf.fit(X_train, X_test, y_train, y_test)
model
predictions.head()
top_10 = model.sort_values(by="Accuracy", ascending=False).head(12)
print(top_10)
|
# ## Import Modul
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import StratifiedShuffleSplit, train_test_split
# ## Read Data
df = pd.read_csv("/kaggle/input/dataset-covid-19-2020-2021/data.csv")
df.tail(5)
target_column = "Province"
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(df, df[target_column]):
train_set = df.loc[train_index]
test_set = df.loc[test_index]
# ## Preprocessing Data
# cek missing value pada variabel train_set
train_set.isna().sum()
# cek missing value pada variabel tes_set
test_set.isna().sum()
# Cek outlier pada variabel train_set
train_set.plot(subplots=True, layout=(4, 4), kind="box", figsize=(12, 14))
# Cek outlier pada variabel test_set
test_set.plot(subplots=True, layout=(4, 4), kind="box", figsize=(12, 14))
#
# ### padahasil cek outlier diatas ditemukan pada kedua variabel memiliki outlier pada kolom Active case
# Perbaikan pada kedua variabel dengan menghapus nilai yang dibawah nol
train_set["Active_Case"] = np.where(
train_set["Active_Case"] < 0, 0, train_set["Active_Case"]
)
test_set["Active_Case"] = np.where(
test_set["Active_Case"] < 0, 0, test_set["Active_Case"]
)
# cek tipe data
print(train_set.info())
print(test_set.info())
# mengubah tipe data kolom Date dari objek menjadi Date and Time
train_set["Date"] = pd.to_datetime(train_set["Date"])
test_set["Date"] = pd.to_datetime(test_set["Date"])
# ### Data Exploration
#
numeric_cols = train_set.select_dtypes(include=["float64", "int64"]).columns
summary_stats = train_set[numeric_cols].describe()
print(summary_stats)
# ### Data Visualisasi
# membuat histogram dan boxplot untuk setiap variabel numerik pada dataset
for col in numeric_cols:
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
ax[0].hist(train_set[col], bins=20)
ax[0].set_xlabel(col)
ax[0].set_ylabel("Frequency")
ax[1].boxplot(train_set[col])
ax[1].set_ylabel(col)
plt.show()
# Line chart
for col in numeric_cols:
plt.figure(figsize=(10, 6))
plt.plot(train_set[col], label=col)
plt.title(col)
plt.legend()
plt.show()
# kalkulasi untuk mengetahui korelasi
corr_matrix = train_set.corr()
# plot heatmap
sns.heatmap(corr_matrix, annot=True, cmap="coolwarm")
|
# Importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Setting up the numbers of columns to show
pd.set_option("display.max_columns", 10)
# Importing the dataset
df = pd.read_csv("/kaggle/input/port-of-los-angeles/shipping_data.csv")
df
# Some names, are inconsistent, like plural or uppercase, so we do a littler treatment
df["name"] = df["name"].replace(
{
"Air Conditioners": "Air Conditioner",
"Tablets": "Tablet",
"Washing machine": "Washing Machine",
}
)
# let's rename all the columns to make more eficcient our work
df = df.rename(
columns={
"price ($)": "price_($)",
"weight (kg)": "weight_(kg)",
"destination port": "destination_port",
"weight (kg)": "weight_(kg)",
"length (m)": "length_(m)",
"width (m)": "width_(m)",
"height (m)": "height_(m)",
"shipment date": "shipment_date",
}
)
df
# Let's make a filter to exclude about 150 lines of data with no destination port defined
non_null_ports = df["destination_port"].notna()
df_with_ports = df[non_null_ports]
# Now we make an map, to determine the country of each port
df_with_ports["country"] = df_with_ports["destination_port"].map(
{
"Port of Singapore (Singapore)": "Singapore",
"Port of Busan (South Korea)": "South Korea",
"Port of Tianjin (China)": "China",
"Port of Shanghai (China)": "China",
"Port of Tokyo (Japan)": "Japan",
}
)
df_with_ports = df_with_ports.reset_index()
# Now we can see the amount in dollars in products by destination port
df_with_ports.groupby("destination_port")["price_($)"].sum().sort_values(
ascending=False
).plot.bar(title="Value in US Dollars by destination port")
# How we have two ports of china, lets now see the amount of products adreesed to each country
df_with_ports.groupby("country")["country"].count().sort_values(
ascending=False
).plot.bar(title="Counting of product by country of destionation")
# Let's see the count of each 15 first products
df_with_ports.groupby("name")["name"].count().head(15).sort_values(
ascending=False
).plot.bar(title="Count of Top 15 products delivered by the ship", xlabel="Products")
# Now we see the value in US Dollars by the 20 firts products
df_with_ports.groupby("name")["price_($)"].sum().head(20).sort_values(
ascending=False
).plot.bar(
title="The sum of Top 15 products delivered by the ship",
xlabel="Products",
ylabel="Price in US Dollars",
)
|
# # Intro to Python - DataCamp
# **Python** is the most powerful programming language for Data Science (**R** is another mentionable one).
# ## List Indexing
var_list = ["py", 2, "3rd", "last_element"]
# Subsetting list
# Calling 1st element
print(var_list[0]) # Python is a zero-indexed programming language
# Calling last element
print(var_list[-1])
# ## List Slicing
print(
"1-3: ", var_list[1:3]
) # calling 2nd, 3rd element. But, many are expecting the 4th element to be called as well.
# Developers are often find it difficult why **2**nd element is called, but **4**th element is not called. It's because in List[start_element: end_element], start_element is taken **inclusively** whereas end_element is taken **exclusively**. Wait, what?
# Let me clarify. When I call var_list[1:3], 1 is included, but 3 is not included (digits until 3 are included), thereby it invokes from 2nd element to 3rd element (NOT the 4th).
# 
print(var_list[1:]) # taking 2nd element and onwards; again inclusive
print(var_list[:2]) # taking until 2nd element (not 3rd element); again exclusive
# ## Python List vs Numpy Array
import numpy as np
list1 = ["Maple Snap Ltd", 2, False, "Tanmoy", "DataIE", 33]
print(
list1
) # When it comes to mixed datatype, Numpy convert everything into str unlike Python LIST that preserves datatype
print([2, False, "Das"])
py_list = [2, 4, 5]
numpy_array = np.array([2, 4, 6])
sum_np = numpy_array + numpy_array
print("Summing Python Lists: ", (py_list + py_list)) # Concatinating them
print("Summing numpy arrays: ", sum_np)
# 
# Source: DataCamp
# **List Slicing**
#
list1 = ["Maple Snap Ltd", 2, False, "Tanmoy", ["DataIE", 2021], 33]
display(list1)
display(
list1[:2]
) # Starting from zero'th element until 2nd; EXCLUSIVE (meaning 3rd element will not be added)
display(
list1[2:]
) # from 3rd element until the end; INCLUSIVE (3nd element and onwards)
list1[
2:4
] # what it will produce? 3rd & 4th element, you guessed it right! Remember 2 inclusive, 4 exclusive
# Subsetting List of List
# Calling elements inside another element of a list e.g.
list1[-2][1] # 2nd element of the child list ['DataIE', 2021] of list1
# ## Manipulating List
# Adding elements
list1_ext = list1 + [
"Tanm",
220,
3,
] # new contents are just PASTED beside previous elements (not mathematical addition)
print(list1_ext)
# Removing elements
del list1_ext[-2]
print(list1_ext)
# Editing your LIST
a = [2, 3, 7]
b = a
b[2] = "Tan"
print("a: ", a) # Kinda frustrating
# so, we have a few options here to keep original list (or whatever object) intact
# Option 1
c = list(a)
c[2] = "DataIE"
print("c: ", c)
print("a afterward: ", a)
# Option 2
d = a[:]
print("d before modification: ", d)
d[2] = "MapleSnap"
print("d: ", d)
print("a afterward d: ", a)
# More contents will be added shortly thereafter.
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
import keras
from keras.models import Sequential
from keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout
from sklearn.metrics import accuracy_score
train_dir = "../input/dog-vs-cat/dogvscat/train"
test_dir = "../input/dog-vs-cat/dogvscat/test"
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_directory(
train_dir, target_size=(224, 224), batch_size=20, class_mode="binary"
)
test_generator = test_datagen.flow_from_directory(
test_dir, target_size=(224, 224), batch_size=20, class_mode="binary"
)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation="relu", input_shape=(224, 224, 3)))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Flatten())
model.add(Dense(512, activation="relu"))
model.add(Dense(1, activation="sigmoid"))
model.summary()
from tensorflow.keras import optimizers
model.compile(
loss="binary_crossentropy",
optimizer=optimizers.RMSprop(learning_rate=1e-3),
metrics=["accuracy"],
)
history = model.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples // 20,
epochs=10,
validation_data=test_generator,
validation_steps=test_generator.samples // 20,
)
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.legend()
plt.show()
loss = history.history["loss"]
val_loss = history.history["val_loss"]
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.legend()
model.save("catvsdog.h5")
from tensorflow.keras.preprocessing import image
path = "../input/dog-vs-cat/dogvscat/train/1/12.jpg"
img = image.load_img(path, target_size=(224, 224))
plt.imshow(img, interpolation="nearest")
plt.show()
img_array = np.array(img)
img_array.shape
img_array = img_array.reshape(1, 224, 224, 3)
a = model.predict(img_array)
if a == [[0]]:
print("cat")
else:
print("dog")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
from sklearn.ensemble import BaggingClassifier
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import AdaBoostClassifier
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Import the data:
train_data = pd.read_csv("../input/cap-4611-spring-21-assignment-1/train.csv").drop(
columns=["id"]
)
test_data = pd.read_csv("../input/cap-4611-spring-21-assignment-1/test.csv").drop(
columns=["id"]
)
train_data
train_data.describe()
# We don't need normalization/standardization when using trees becaue one of the advantaged of trees is that they are not sensitive to variance in the data
# Checking outliers:
mean = train_data.describe().loc["mean"]
std = train_data.describe().loc["std"]
df = train_data.copy()
# names of the colomns
col_names = df.columns.map(str)
col_names = col_names.drop(["Bankrupt"])
# index that we wanna see in the plot, for testing
element = 45
# instantiate figure
fig, ax = plt.subplots(1, 2)
sns.scatterplot(data=train_data[col_names[element]], ax=ax[0]) # before filter
for name in col_names:
num_sd = 9 # num std from the mean
threshold_a = mean.loc[name] + num_sd * std.loc[name] # threshold above
threshold_b = mean.loc[name] - num_sd * std.loc[name] # threshold below
condition = (df[name] > threshold_a) | (df[name] < threshold_b)
df.drop(df[condition].index, inplace=True)
sns.scatterplot(data=df[col_names[element]], ax=ax[1]) # after filter
fig.show()
print(train_data.shape)
print(df.shape)
# train_data = df.copy()
# Because it was better when I didn't remove any outliers, I am purposfully choosing to keep them
# Checking if there's missing data
# In order to get the total summation of all missing values in the DataFrame, we chain two .sum() methods together
train_data.isnull().values.sum().sum()
# there seems to be no missing data
# Split the training_data
# split the data
y = train_data["Bankrupt"]
X = train_data.drop("Bankrupt", axis=1)
# random split
SEED = 24
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.50, random_state=SEED
)
X_val, X_test, y_val, y_test = train_test_split(
X_test, y_test, test_size=0.50, random_state=SEED
)
# I'm splitting the test data into 3: train, test, validate to avoid overfitting
# First I will try a single desision tree
# single desision tree
dt = DecisionTreeClassifier(max_depth=2, random_state=SEED, min_samples_leaf=0.2)
dt.fit(X_train, y_train)
y_pred_dt = dt.predict(X_test)
acc_dt = accuracy_score(y_pred_dt, y_test)
print("accuracy score: " + str(acc_dt))
y_dt_pred_proba = dt.predict_proba(X_test)[:, 1]
dt_roc_auc = roc_auc_score(y_test, y_dt_pred_proba)
print("roc aoc score: " + str(dt_roc_auc))
dt_f1 = f1_score(y_test, y_pred_dt, average=None)
print("f1 score: " + str(dt_f1))
df = pd.DataFrame({"Actual": y_test, "Predicted": y_pred_dt, "proba": y_dt_pred_proba})
pd.set_option("display.max_rows", df.shape[0] + 1)
# so the accuracy is good but the ROC AUC score is bad
# UPDATE: the base accuracy if you just guessed all 0s is like 96% so actually this is terrible
# Use RandomizedSearch to find better parameters for a single desision tree
# params
dt = DecisionTreeClassifier(random_state=SEED)
dt.fit(X_train, y_train)
max_depth = range(7, 12)
min_leaf = [0.0185, 0.019, 0.0193, 0.0195, 0.0198, 0.02, 0.025]
params_dt = {
"max_depth": max_depth,
"min_samples_leaf": min_leaf,
"random_state": [SEED],
}
grid_dt = RandomizedSearchCV(
estimator=dt, param_distributions=params_dt, scoring="roc_auc", cv=10, n_jobs=-1
)
grid_dt.fit(X_train, y_train)
print(grid_dt.best_score_)
print(grid_dt.best_params_)
# beat : 0.8166695874406719
# > {'random_state': 42, 'min_samples_leaf': 0.019, 'max_depth': 11} 0.8476964769647696
# > {'random_state': 42, 'min_samples_leaf': 0.0195, 'max_depth': 6} 0.8544742547425473
# > {'random_state': 42, 'min_samples_leaf': 0.019, 'max_depth': 9} 0.8476964769647696
# also use those parameters here
# Since one tree isn't good, we try a decision tree with bagging
# single decision tree 2, w bagging this time
dt2 = DecisionTreeClassifier(max_depth=8, random_state=SEED, min_samples_leaf=0.0185)
# dt2.fit(X_train, y_train)
# y_pred = dt2.predict(X_test)
bc = BaggingClassifier(
base_estimator=dt2, n_estimators=88, oob_score=True, random_state=SEED
)
bc.fit(X_train, y_train)
y_pred_dt2 = bc.predict(X_test)
acc_dt2 = accuracy_score(y_pred_dt2, y_test)
acc_oob = bc.oob_score_
print("accuracy score: " + str(acc_dt2))
print("accuracy out of bag score: " + str(acc_oob))
y_dt_pred_proba2 = bc.predict_proba(X_test)[:, 1]
dt_roc_auc2 = roc_auc_score(y_test, y_dt_pred_proba2)
print("roc aoc score: " + str(dt_roc_auc2))
dt_f1 = f1_score(y_test, y_pred_dt2, average=None)
print("f1 score: " + str(dt_f1))
df = pd.DataFrame(
{"Actual": y_test, "Predicted": y_pred_dt2, "proba": y_dt_pred_proba2}
)
pd.set_option("display.max_rows", df.shape[0] + 1)
# beat this: 0.9010935601458081
# beat this: 0.9631152460984393
# beat this: 0.9634453781512605
# bagging makes the decision tree noticably better, but lets go on to try other options
# Trying a random forest
# random forest
rf = RandomForestClassifier(
n_estimators=69, max_depth=10, random_state=SEED, min_samples_leaf=0.0124
)
rf.fit(X_train, y_train)
y_rf_predict = rf.predict(X_test)
df_rf = pd.DataFrame({"Actual": y_test, "Predicted": y_rf_predict})
pd.set_option("display.max_rows", df_rf.shape[0] + 1)
acc_rf = accuracy_score(y_rf_predict, y_test)
print("accuracy score: " + str(acc_rf))
y_rf_pred_proba = rf.predict_proba(X_test)[:, 1]
rf_roc_auc = roc_auc_score(y_test, y_rf_pred_proba)
print("roc aoc score: " + str(rf_roc_auc))
dt_rf = f1_score(y_test, y_rf_predict, average=None)
print("f1 score: " + str(dt_f1))
# beat this: 0.9004252733900365
# both the accuracy is better than the decision tree, and the ROC AUC is better than the single tree
# Use randomized search to find better parameters for the random forest
# params
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
max_depth = list(range(9, 13))
min_leaf = [
0.01215,
0.0122,
0.01225,
0.0123,
0.01233,
0.01235,
0.01237,
0.0124,
0.0125,
0.0126,
0.0127,
0.0128,
0.0129,
] # [0.014,0.015,0.016,0.017,0.018,0.019,0.02] #[0.01,0.02,0.03,0.04,0.05] #[0.09, 0.1,0.12,0.13,0.14,0.15,0.16,0.17,0.18]
n_estimators = list(range(69, 72))
params_rf = {
"max_depth": max_depth,
"min_samples_leaf": min_leaf,
"n_estimators": n_estimators,
"random_state": [SEED],
}
grid_rf2 = RandomizedSearchCV(
estimator=rf, param_distributions=params_rf, scoring="roc_auc", cv=10, n_jobs=-1
)
grid_rf2.fit(X_train, y_train)
print(grid_rf2.best_params_)
print(grid_rf2.best_score_)
# > {'max_depth': 5, 'min_samples_leaf': 0.15, 'n_estimators': 67, 'random_state': 5}, 0.9205330412559329
# > {'random_state': 5, 'n_estimators': 66, 'min_samples_leaf': 0.01, 'max_depth': 5}, 0.9305111354508944
# > {'random_state': 5, 'n_estimators': 67, 'min_samples_leaf': 0.01, 'max_depth': 8}, 0.9310003650967508
# > {'random_state': 5, 'n_estimators': 67, 'min_samples_leaf': 0.013, 'max_depth': 11}, 0.9331843738590726
# > {'random_state': 5, 'n_estimators': 68, 'min_samples_leaf': 0.0127, 'max_depth': 11},0.9331836436655714
# > {'random_state': 42, 'n_estimators': 69, 'min_samples_leaf': 0.0123, 'max_depth': 9}, 0.9392540034491255
# > {'random_state': 42, 'n_estimators': 70, 'min_samples_leaf': 0.0123, 'max_depth': 11}, 0.9399825080068982
# > {'random_state': 24, 'n_estimators': 70, 'min_samples_leaf': 0.0124, 'max_depth': 12}, 0.9476425702811244
# Use the calulated best hyperparameters in a new random forest:
# random forest using parameters
rf_p = RandomForestClassifier(
n_estimators=69, max_depth=10, random_state=SEED, min_samples_leaf=0.0124
)
rf_p.fit(X_train, y_train)
y_rf_p_predict = rf_p.predict(X_test)
df_rf_p = pd.DataFrame({"Actual": y_test, "Predicted": y_rf_p_predict})
pd.set_option("display.max_rows", df_rf_p.shape[0] + 1)
acc_rf_p = accuracy_score(y_rf_p_predict, y_test)
print("accuracy score: " + str(acc_rf_p))
y_rf_p_pred_proba = rf_p.predict_proba(X_test)[:, 1]
rf_p_roc_auc = roc_auc_score(y_test, y_rf_p_pred_proba)
print("roc aoc score: " + str(rf_p_roc_auc))
dt_rf_p = f1_score(y_test, y_rf_p_predict, average=None)
print("f1 score: " + str(dt_rf_p))
# beat this: 0.9527339003645201
# beat this: 0.9517213446739571
# beat this: 0.9656062424969988
# I tried to test for the important features, and I got a bunch of them but was unable to build it in such a way that I could test for accuracy with or without some of them
importances_rf = pd.DataFrame(rf.feature_importances_, index=X.columns)
sorted_importances_rf = importances_rf.sort_values(by=0, ascending=False)
df = train_data.copy()
zero_names = sorted_importances_rf.loc[
"one if net income was negative for the last two year zero otherwise":
]
n = zero_names.iterrows()
print(n)
print("--------------------------------------------")
print(sorted_importances_rf)
print(train_data.shape)
print(df.shape)
# sorted_importances_rf.plot(kind='barh', color='lightgreen');plt.show()
# Try to use AdaBost on the Random Forest
# AdaBoost
dt = rf_p
ada = AdaBoostClassifier(base_estimator=dt, n_estimators=193, random_state=SEED)
ada.fit(X_train, y_train)
y_pred_proba = ada.predict_proba(X_test)[:, 1]
ada_roc_auc = roc_auc_score(y_test, y_pred_proba)
ada_roc_auc
# beat this: 0.9386796273795059
# beat this: 0.9392871607938438
# beat this: 0.9652460984393757
# Use Randomized Search to find the best parameters and put them back into the above adaboost classifier
# params
ada = AdaBoostClassifier(base_estimator=dt, n_estimators=240, random_state=SEED)
rf.fit(X_train, y_train)
n_estimators = list(range(150, 200))
params_ada = {
"base_estimator": rf_p,
"n_estimators": n_estimators,
"random_state": [SEED],
}
grid_ada = RandomizedSearchCV(
estimator=ada, param_distributions=params_ada, scoring="roc_auc", cv=10, n_jobs=-1
)
grid_ada.fit(X_train, y_train)
print(grid_ada.best_params_)
print(grid_ada.best_score_)
# beat this: 0.9291449434100036
# beat this: 0.9294589266155532
# Test the resulting classifier with the validation test
# validation
evaluator = ada # grid_ada.best_estimator_
evaluator.fit(X_train, y_train)
y_pred = evaluator.predict(X_val)
acc = accuracy_score(y_pred, y_val)
print("accuracy score: " + str(acc))
y_proba = evaluator.predict_proba(X_val)[:, 1]
roc_auc = roc_auc_score(y_val, y_proba)
print("roc aoc score: " + str(roc_auc))
f1 = f1_score(y_val, y_pred, average=None)
print("f1 score: " + str(f1))
# beat this: 0.9378345498783455
# beat this: 0.9376723438767235
# 0.9323673234575004
# Use the ada classifier on the test set
imStealingTheIDColumn = pd.read_csv("../input/cap-4611-spring-21-assignment-1/test.csv")
evaluator = ada
predict = evaluator.predict(test_data)
prob = evaluator.predict_proba(test_data)[:, 1]
output = pd.DataFrame({"id": imStealingTheIDColumn.id, "Bankrupt": prob})
output.to_csv("my_submission13.csv", index=False)
print("Your submission was successfully saved!")
# commented out so it doesn't try to calculate all this when I hit Run All
# colormap = plt.cm.viridis
# plt.figure(figsize=(30,30))
# sns.heatmap(train_data.astype(float).corr(),linewidths=0.1,vmax=1.0, square=True, cmap=colormap, linecolor='white', annot=False)
|
# # Exp 꼴의 손실 함수
# ----------------------------------------
# 손실 함수가 음수 값이 나올 경우에, 가독성이 떨어지는 문제를 해결하기 위함.
# 밑이 자연대수(e)인 지수함수에 기존 손실값을 대입한 손실 함수와, 기존 손실 함수의 성능 비교.
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from matplotlib import pyplot as plt
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(model, device, train_loader, optimizer, epoch, log_interval=1):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
def train_exp(model, device, train_loader, optimizer, epoch, log_interval=1):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = torch.exp(F.nll_loss(output, target))
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
def test(model, device, test_loader, prefix=""):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(
output, target, reduction="sum"
).item() # sum up batch loss
pred = output.argmax(
dim=1, keepdim=True
) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print(
"[ "
+ prefix
+ " ]\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
test_loss,
correct,
len(test_loader.dataset),
100.0 * correct / len(test_loader.dataset),
)
)
return 100.0 * correct / len(test_loader.dataset)
def drawgraph(losslist1, losslist2):
plt.plot(range(len(losslist1)), losslist1, color="red", label="EXP Loss")
plt.plot(range(len(losslist2)), losslist2, color="blue", label="Default Loss")
plt.title("Accurate of Default Loss and Exp Loss")
plt.xlabel("Epcoh")
plt.ylabel("Accurate")
plt.legend()
plt.show()
default_loss = []
exp_loss = []
def main():
global exp_loss, default_loss
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {"batch_size": 50}
test_kwargs = {"batch_size": 50}
if use_cuda:
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
dataset1 = datasets.MNIST("../data", train=True, download=True, transform=transform)
dataset2 = datasets.MNIST("../data", train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
Expmodel = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=1.0)
Expoptimizer = optim.Adadelta(Expmodel.parameters(), lr=1.0)
scheduler = StepLR(optimizer, step_size=1, gamma=0.7)
Expscheduler = StepLR(Expoptimizer, step_size=1, gamma=0.7)
for epoch in range(1, 30 + 1):
train(model, device, train_loader, optimizer, epoch)
default_loss.append(test(model, device, test_loader, prefix="기존 방식"))
scheduler.step()
for epoch in range(1, 30 + 1):
train_exp(Expmodel, device, train_loader, Expoptimizer, epoch)
exp_loss.append(test(Expmodel, device, test_loader, prefix="exp 방식"))
Expscheduler.step()
if __name__ == "__main__":
main()
drawgraph(exp_loss, default_loss)
|
from tqdm.auto import tqdm
from collections import defaultdict
import pandas as pd
import numpy as np
import os
import random
import gc
import cv2
import glob
gc.enable()
pd.set_option("display.max_columns", None)
# Visialisation
import matplotlib.pyplot as plt
# Image Aug
import albumentations
from albumentations.pytorch.transforms import ToTensorV2
# Deep Learning
from torch.utils.data import Dataset, DataLoader
from torch.optim.lr_scheduler import (
CosineAnnealingWarmRestarts,
OneCycleLR,
CosineAnnealingLR,
)
import torch
import torchvision
import timm
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import PIL
import torchvision.transforms as T
from torch.utils.data import DataLoader, Dataset
# Metrics
from sklearn.metrics import mean_squared_error
# Random Seed Initialize
RANDOM_SEED = 42
def seed_everything(seed=RANDOM_SEED):
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
seed_everything()
# Device Optimization
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
print(f"Using device: {device}")
def asMinutes(s):
"""Convert Seconds to Minutes."""
m = math.floor(s / 60)
s -= m * 60
return "%dm %ds" % (m, s)
def timeSince(since, percent):
"""Accessing and Converting Time Data."""
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return "%s (remain %s)" % (asMinutes(s), asMinutes(rs))
class AverageMeter(object):
"""Computes and stores the average and current value."""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
cat_categories = []
path = "/kaggle/input/cats-breed-dataset/cat_v1"
for directory in os.listdir(path):
if "." not in directory:
cat_categories.append(directory)
print(cat_categories)
image_directory = {}
for i in cat_categories:
image_directory[i] = [
os.path.join(path, i, j) for j in os.listdir(os.path.join(path, i))
]
file_category = []
file_name = []
for i in image_directory.keys():
for j in image_directory[i]:
file_category.append(i)
file_name.append(j)
data = {"categories": file_category, "file_name": file_name}
train_df = pd.DataFrame(data)
train_df.head()
import matplotlib.pyplot as plt
x = train_df.sample()
random_category = x.categories.values[0]
random_file_name = x.file_name.values[0]
print(random_category)
print(random_file_name)
img = PIL.Image.open(random_file_name)
plt.imshow(img)
plt.title(random_category)
str_to_int = {}
for i in range(len(cat_categories)):
str_to_int[cat_categories[i]] = i
print(str_to_int)
class CatDataset(Dataset):
def __init__(self, data, path, transform=None):
self.data = data
self.path = path
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
image_path = self.data["file_name"][idx]
image = PIL.Image.open(image_path)
category_name = self.data["categories"][idx]
label = str_to_int[category_name]
image = image.convert("RGB")
if self.transform:
image = self.transform(image)
# image = torch.Tensor(np.transpose(np.array(image),(2,0,1)))
return image, label
transform = T.Compose(
[
T.Resize((224, 224)),
T.ToTensor(),
T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
)
from sklearn.model_selection import train_test_split
f0, f1 = train_test_split(train_df, test_size=0.2) # 学習用と評価用に分ける
train_df["folds"] = -1
train_df.loc[f0.index, "folds"] = 1
train_df.loc[f1.index, "folds"] = 0
train_df.head()
path = "/kaggle/input/cats-breed-dataset/cat_v1"
train_index = train_df[train_df["folds"] != 1].index
valid_index = train_df[train_df["folds"] == 1].index
train_dataset = CatDataset(
data=train_df.loc[train_index].reset_index(drop=True),
path=path,
transform=transform,
)
val_dataset = CatDataset(
data=train_df.loc[valid_index].reset_index(drop=True),
path=path,
transform=transform,
)
train_loader = DataLoader(
train_dataset,
batch_size=32,
shuffle=True,
num_workers=0,
pin_memory=True,
drop_last=True,
) # 学習用のdatasetsのbatchを作成
val_loader = DataLoader(
val_dataset,
batch_size=32,
shuffle=False,
num_workers=0,
pin_memory=True,
drop_last=False,
)
class BaseModel(nn.Module):
def __init__(self, model_name="tf_efficientnet_b0_ns", n_class=5, pretrained=True):
super().__init__()
self.backbone = timm.create_model(
model_name, pretrained=pretrained, num_classes=0
)
self.in_features = self.backbone.num_features
self.head = nn.Sequential(
nn.Linear(self.in_features, 1024),
nn.ReLU(),
nn.Dropout(),
nn.Linear(1024, 512),
nn.Dropout(),
nn.Linear(512, n_class),
)
def forward(self, x):
x = self.backbone(x)
x = self.head(x)
output = x
return output
from torch.optim import SGD, Adam, AdamW
from torch.optim.lr_scheduler import (
CosineAnnealingLR,
CosineAnnealingWarmRestarts,
ReduceLROnPlateau,
)
model = BaseModel("tf_efficientnet_b0_ns", 5).to(device)
optimizer = Adam(model.parameters(), lr=0.01, weight_decay=1e-3, amsgrad=False)
scheduler = CosineAnnealingLR(optimizer, T_max=10, eta_min=1e-5, last_epoch=-1)
criterion = nn.CrossEntropyLoss()
import tqdm.notebook as tqdm
def train_fn(train_loader, model, criterion, optimizer, epoch, scheduler):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
scores = AverageMeter()
# avg_score = AverageMeter()
model.train() # 学習モード
for step, (images, labels) in enumerate(train_loader):
images = images.to(device) # 画像をcpuからgpuヘ
labels = labels.to(device) # 正解ラベルをcpuからgpuヘ
batch_size = labels.size(0)
y_preds = model(images) # 予測ラベル
loss = criterion(y_preds, labels) # lossの計算
losses.update(loss.item(), batch_size)
loss.backward() # パラメータの勾配を計算
optimizer.step() # モデル更新
optimizer.zero_grad() # 勾配の初期化
# vramの余計なの削除
torch.cuda.empty_cache()
return losses.avg, model
def val_fn(val_loader, model):
model.eval()
final_targets = []
final_outputs = []
with torch.no_grad():
for i, (images, target) in enumerate(val_loader):
images = images.to(device)
target = target.to(device)
output = model(images)
targets = target.detach().cpu().numpy()
outputs = output.detach().cpu().numpy()
final_targets.extend(targets)
final_outputs.extend(outputs)
return final_outputs, final_targets
for epoch in range(0, 1):
avg_loss, model = train_fn(
train_loader, model, criterion, optimizer, epoch, scheduler
)
print(avg_loss)
scheduler.step()
predict, target = val_fn(val_loader, model)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# 1A
import pandas as pd
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
# Load Car Seat dataset
car_seat_df = pd.read_csv(
"https://raw.githubusercontent.com/selva86/datasets/master/Carseats.csv"
)
# Load Bank Personal Loan dataset
bank_loan_df = pd.read_csv(
"https://raw.githubusercontent.com/ChaithrikaRao/DataChime/master/Bank_Personal_Loan_Modelling.csv"
)
car_seat_df
bank_loan_df
# Encode the 'Urban' and 'US' columns using the fit_transform method
encoded_cols = pd.get_dummies(car_seat_df[["Urban", "US", "ShelveLoc"]])
car_seat_df_new = pd.concat([encoded_cols, car_seat_df], axis=1)
car_seat_features = car_seat_df_new.drop(["Sales", "Urban", "ShelveLoc", "US"], axis=1)
car_seat_features
# a)prepare the datasets for PCA
car_seat_df_scaled = StandardScaler().fit_transform(car_seat_features)
bank_loan_features = bank_loan_df.drop(["ID", "ZIP Code", "Personal Loan"], axis=1)
bank_loan_df_scaled = StandardScaler().fit_transform(bank_loan_features)
# perform PCA on the standardized data
car_seat_df_pca = PCA().fit(car_seat_df_scaled)
bank_loan_df_pca = PCA().fit(bank_loan_df_scaled)
# Create scree plots
plt.plot(
range(1, len(car_seat_df_pca.explained_variance_ratio_) + 1),
car_seat_df_pca.explained_variance_ratio_,
"bo-",
)
plt.title("Carseats Scree Plot")
plt.xlabel("Principal Component")
plt.ylabel("Explained Variance Ratio")
plt.show()
# Create scree plots
plt.plot(
range(1, len(bank_loan_df_pca.explained_variance_ratio_) + 1),
bank_loan_df_pca.explained_variance_ratio_,
"bo-",
)
plt.title("Bank Personal Loan Scree Plot")
plt.xlabel("Principal Component")
plt.ylabel("Explained Variance Ratio")
plt.show()
# 1B)
# To determine the optimal number of components for each dataset, we can look at the scree plots and choose the number of components where the explained variance ratio starts to level off or diminish.
# For the Car Seat dataset, the scree plot shows that the first 4 principal components explain the majority of the variance in the data. After the 4th component, the explained variance ratio levels off and does not decrease significantly, so we could choose to keep the first 4 components as our optimal number.
# For the Bank Personal Loan dataset, the scree plot shows that the first 5 principal components explain the majority of the variance in the data. After the 5th component, the explained variance ratio levels off and does not decrease significantly, so we could choose to keep the first 5 components as our optimal number.
# So, based on the scree plots, we can suggest the optimum number of components for the Car Seat dataset as 4 and for the Bank Personal Loan dataset as 5.
# 1C
bank_loan_df_pca = PCA().fit_transform(bank_loan_df_scaled)
ploan_pca = bank_loan_df_pca
ploan_pca
car_seat_df_pca = PCA().fit_transform(car_seat_df_scaled)
carseat_pca = car_seat_df_pca
carseat_pca
|
a = 8
print(a)
type(a)
c = 2.3
b = "abc123"
print(b)
type(c)
# Variabes With Number
# **Integer, Floating, Cpmlex number**
a = 234.5
print(a)
type(a)
a = 2 + 3j
print(a)
type(a)
# **"Working with multiple variable"**
Gross_profit = 15
Revenue = 100
Gross_Profit_Margin = (Gross_profit / Revenue) * 100
print(Gross_Profit_Margin)
type(Gross_Profit_Margin)
Revenue = 100
RevA = 8
Revenue = Revenue + RevA
print(Revenue)
# **Variable With String**
address = "84/9 Juhi Safed Colony Kanpur, Uttar Pradesh"
print(address)
type(address)
first_name = "Vaibhav"
last_name = "Srivastava"
name = first_name + " " + last_name
print(name)
# **Variable With Booleans** - True / False
a = 3
b = 5
c = a < b
print(c)
type(c)
c = True
print(c)
type(c)
# **Multiple Assignments**
a, b, c = "Ayush Jain", 26, 34.6
print(a)
print(b)
print(c)
# **Naming Convention Of Variables**
a = 3 # Variables are case sensitive
A = 4
print(a)
print(A)
# **Reserved Keywords** - for, in, else, end, break, continue....
# **Print Satement**
a = 3
print(a)
c = 2 * 3**3 / 2
print(c)
print("The vlue of a and b is", a, b)
# **Swapping 2 Variables**
a = 2
b = 3
c = a
a = b
b = c
print(a, b) # Swapping 2 Variables
|
# # Import Packages
# Data Handling
import numpy as np
import pandas as pd
# Model Selection
from sklearn.model_selection import train_test_split
# Make and Compose Pipeline
from sklearn.pipeline import make_pipeline
from sklearn.compose import make_column_transformer
# Preprocessing
## Missing Values
from sklearn.impute import SimpleImputer
## Feature Scaling
from sklearn.preprocessing import StandardScaler
## Categorical Encoding
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
# Model
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
# Peformance - Classification
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
f1_score,
cohen_kappa_score,
)
# Export
# # Import Data
train = pd.read_csv("/kaggle/input/DontGetKicked/training.csv")
test = pd.read_csv("/kaggle/input/DontGetKicked/test.csv")
# # Check
train["IsBadBuy"].value_counts().plot.bar()
train.info()
test.info()
# # Numerical and Categorical Feature Names
numerical_features = (
train.drop(["RefId", "IsBadBuy"], axis=1)
.select_dtypes(include="number")
.columns.tolist()
)
categorical_features = (
train.drop(["RefId", "IsBadBuy"], axis=1)
.select_dtypes(exclude="number")
.columns.tolist()
)
numerical_features
categorical_features
# # Partition Data
y = train["IsBadBuy"]
X = train.drop(["RefId", "IsBadBuy"], axis=1)
X_test = test.drop(["RefId"], axis=1)
# train test split
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=0.2, random_state=42, stratify=y
)
# # Preprocessing
# - Numerical
# - Categorical
# - Import Package, Create an Instance, Fit, tranform train, val, test
# Create Preprocessor Pipeline
preprocessor = make_column_transformer(
(
make_pipeline(SimpleImputer(strategy="median"), StandardScaler()),
numerical_features,
),
(
make_pipeline(
SimpleImputer(strategy="most_frequent"),
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
),
categorical_features,
),
)
# # Build Model
model = make_pipeline(
preprocessor,
RandomForestClassifier(n_estimators=10, random_state=42, class_weight="balanced"),
)
model.fit(X_train, y_train)
# # Make Predictions
# Make predictions on the training, validation, and testing sets
y_train_pred = model.predict(X_train)
y_val_pred = model.predict(X_val)
y_test_pred = model.predict(X_test)
# # Calculate Performance Scores
# Calculate performance metrics for each set
train_acc = accuracy_score(y_train, y_train_pred)
train_prec = precision_score(y_train, y_train_pred)
train_rec = recall_score(y_train, y_train_pred)
train_f1 = f1_score(y_train, y_train_pred)
train_kappa = cohen_kappa_score(y_train, y_train_pred)
val_acc = accuracy_score(y_val, y_val_pred)
val_prec = precision_score(y_val, y_val_pred)
val_rec = recall_score(y_val, y_val_pred)
val_f1 = f1_score(y_val, y_val_pred)
val_kappa = cohen_kappa_score(y_val, y_val_pred)
# Print the performance metrics
print("Training set metrics:")
print("Accuracy: {:.2f}".format(train_acc))
print("Precision: {:.2f}".format(train_prec))
print("Recall: {:.2f}".format(train_rec))
print("F1-score: {:.2f}".format(train_f1))
print("Cohen's Kappa: {:.2f}".format(train_kappa))
print()
print("Validation set metrics:")
print("Accuracy: {:.2f}".format(val_acc))
print("Precision: {:.2f}".format(val_prec))
print("Recall: {:.2f}".format(val_rec))
print("F1-score: {:.2f}".format(val_f1))
print("Cohen's Kappa: {:.2f}".format(val_kappa))
print()
|
# # Agenda of the meeting
# * How recent developments in AI could impact us as software engineers?
# * What exactly goes behind the scenes in a simple AI/ML Model? how does it work?
# * Intro to Natural Language Processing, what are LLMs, How ChatGPT works?
# # Recents Advancements in NLP
#
# * All of us have beeing seeing the staggering advancements in Natural Language Processing field recently with ChatGPT and
# big LLMs(Large Language Models) that power ChatGPT and similar things.
#
# * Why we as software engineers should care?
# ## My Own experience With GPT
# ## Writing Multithreaded JDBC code for some Mysql analysis.
# 
# ## Generating mock data using sample tables for the same.
# 
# # What would have taken normally 30 - 40 mins, took me less than 5 mins.
# Ofcourse I had to tweak the code to make some changes here and there. 1 bug was there. I fixed it. still a huge improvement in productivity.
# ## This is just GPT-3.5. GPT-4 is much much better at coding and logical reasoning.
# GPT-4 can solve full fledged problem statements. Normally what would take few days or weeks, might be possible in few hours.
# Github is coming up with latest version of Coding assistant - CopilotX.
# ## What this means for us specifically in programming
# 1. If we are already skilled at something, then tools like ChatGPT might be a big productivity boost.
# 2. These tools allow us to try more things, try more projects, since time taken to implement a new project comes down.
# 3. Many times it generates code with minor bugs/errors, so we have to be cautious while using it.
# ### What chatGPT and code generation tools are good at,
# * Generating code for simple use cases.
# * Generating boiler plate code.
# * Given a code snippet, explain the code in English.
# * We can use it as a chatbot for any API/framework documentation.
# * Generating Unit tests.
# * Writing Shell scripts for trivial tasks.
# ### What chatGPT and code generation tools are not good at,
# * Proper designing and structuring of code.
# * Deciding what to build(ultimately we have to decide and start thinking more like PMs).
# ChatGPT, Github Copilot and similar tools will become even better than their current state.
# Based on what we are seeing for the few weeks/months, it is very clear we have to make use of these tools/models in our work.
# So for all these reasons, it really is worth looking into what exactly is AI and ML. What happens behind the scenes. It will help us use these AI/ML models properly.
# # Intro to Machine Learning Basics
# What do we mean by Machine Learning? AI is an umbrella term that covers a lot of topics and ML is the core part of it. so we will cover ML basics here.
# > Input --> Algorithm --> output
# ### Traditional Programming
# > We provide - Input, algorithm
# > We get - Output
# ### Machine Learning
# > We provide - Input, Output (As training dataset)
# > We get - algorithm (i.e. the computer itself will learn what algorithm to use.)
# Based on the learned algorithm, the computer will act on inputs it has not yet seen.
# ## Types of Machine Learning Methods
#
# 1. Supervised Learning (Regression, Classification, etc.)
# 2. Unsupervised Learning(Anamoly Detection, PCA, etc.)
# # Supervised Learning
# * Specifically We will focus on a supervised learning example.
# ## Prediction of housing prices based on size of the house
import numpy as np
# Generate some random data
x = np.random.rand(50)
y = np.random.rand(50)
import matplotlib.pyplot as plt
# Sort the data along the x-axis
sort_idx = np.argsort(x)
x = x[sort_idx]
# Normalize the x-axis data to the range of 500 to 10000
x = np.interp(x, (x.min(), x.max()), (500, 10000))
sort_idx = np.argsort(y)
y = y[sort_idx]
y = np.interp(y, (y.min(), y.max()), (15000, 150000))
# Create a figure and axis object
fig, ax = plt.subplots()
# Plot the data
ax.scatter(x, y)
# Set the plot title and axis labels
ax.set_title("Housing Prices vs Size of the house")
ax.set_xlabel("Size of the house(in sq.feet)")
ax.set_ylabel("Housing Prices in INR")
# fit a line through the data.
# ax.plot([0, 1], [0, 1], color='red', transform=ax.transAxes)
# fit a horizontal line through the data
# ax.hlines(y=100000, xmin=0, xmax=10000, linewidth=2, color='r')
# Display the plot
plt.show()
# ## (Size of the house in sq.feet) X - Input.
# ## (House Price in INR) Y - output.
# Given X and Y, our goal is to find f(x) that would best fit the given data. By looking at the data we can say fitting a straight line would be accurate.
# Using this line we can extrapolate for all the data not present in given dataset.
# ## Y = F(X)
#
# ### So how do we do that?
# # Finding the best F(X) that would accurately calculate housing prices is called as training a model.
# > F(X) = AX + B
# > Y = AX + B
#
# We know X and Y, we need to find out optimal values of A and B (A and B are model params/weights) that would predict housing prices accurately.
# (Note: This model we are training has only 2 params, since this is a very simple model)
# (For comparision GPT-3 model has 175 billion params and it's very very big and complicated).
#
# Steps to do that would be,
#
# 1. Initializing Random weights for params
#
# 2. Let A = 0 and B = 100000.
#
# 3. Based on these params, we can apply them on the input data X and find out F(X).
#
# 4. We will get some incorrect housing prices, say Y'.
#
# 5. We can find the difference between actual value Y and predicted value Y'.
#
# ## Cost Function
# > C = $(Y - Y')^{2}$
# 6. Our goal is to minimize the value of this particular cost function. (bring it as close as possbile to zero)
# The absolute diff between actual value and predicted value would should very neary zero.
#
# 7. Inorder to minimize the cost function, we will do something called gradient descent.
#
#
# ## Gradient Descent
# `for 10000 iterations:
# A = A - differentiation(Cost of function)
# B = B - differentiation(Cost of function)
# Y' = AX + B
# C = (Y' - Y)^2 `
#
# We do this again and again until A and B converge to a particular set of values.
#
# At that point, we can be sure that cost function is minimized.
#
# To get the intuition behind this gradient descent step, how differential calculus is used, take up the intro to ML course in Coursera.
# 8. Once we bring down the cost function to a minimum value, we will arrive at some values of A and B.
# > In this particular case something like A = 10000 and B = 0.01
# > Y = AX + B --> Y = 10000X + 0.01
# 9. These values of A and B are called as trained/learned weights.
# 10. Y = 10000X + 0.01 this equation is called as a trained model.
# 11. Using this trained model, we can predict house prices for any arbitarily sized house.
# ## Some Points to Note.
# * This is a two param model.(A and B). Real world models are much more complicated and has ton of params.
# * Even in the above data a straight line properly does not fit it. We need somthing like a curved line.
# * To represent to a curved line we need a quadratic equation. f(x) = A$X^2$ + BX + C
# * so here the number of params in three. This quadratic eqn better fits the data than the straight line.
# * so as the number params increase, we have better chances of accurately modeling the given data and prediction accuracy goes up.
# * That's why GPT with each release keeps getting powerful.
# > GPT 2 - 1.5 billion params
# > GPT 3 - 175 billion params
#
# > GPT 4 - 10 Trillion params (exact value not disclosed. Based on rumours).
#
# * Size of the training dataset also contributes to model accuracy.
#
# * Disclaimer: In the above explanation, I have conveniently skipped over a lot of important micro details to keep the explanation simple.
# (You can take ML intro course in Cousera to know more)
# ## There are three steps to use a ML model.
# 1. Preparing a dataset.
#
# 2. Training a model on the dataset.
#
# 3. Using the trained model to do prediction/classifications.(This step is also called as inference)
#
# This is common between all the supervised learning models(irrespective of their size).
# ## Supervised Learning
# * The above model was a regression model or a prediction model. Output could be anything on that line.
# * There are classification models.Output will be one among the defined set of values
# (Email - Spam/Not spam, Dog vs Cat Images, Handwritten digits - (0-9) classification)
# ## Neural Networks
# * Neural Networks is a very complicated model that tries to mimic the way a human brain works.
# * In simple words, it is a very very very complicated maths equation that would do a lot amazing things.
# * Explaining them would be a topic for another day.
# # Sample Demo: Image Classification using a Convolutional Neural Network model
# ResNet -50 with 1000 ouput classes of images(Different animal species, cars, vehicles, etc.)
from datasets import load_dataset
from matplotlib import pyplot as plt
from PIL import Image
import requests
from io import BytesIO
# car image
# response = requests.get('https://stimg.cardekho.com/images/carexteriorimages/930x620/Porsche/911/7652/1677566789939/front-left-side-47.jpg?tr=w-880,h-495')
# elephant
response = requests.get(
"https://t1.gstatic.com/licensed-image?q=tbn:ANd9GcTkSca_mIEnlFmPU_hGz03cfzQekbr7xlVuMWmt_XVadGbCDaTf5bn5fEPi_6NMVHeRzmMhmF9RI9-VTDU"
)
img = Image.open(BytesIO(response.content))
## plotting the image
plt.imshow(img, interpolation="nearest")
plt.figure()
from transformers import AutoImageProcessor, ResNetForImageClassification
import torch
## Importing a pretrained cnn model and image preprocessor here.
processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50")
model = ResNetForImageClassification.from_pretrained("microsoft/resnet-50")
## converting the input image into vectors of numbers
inputs = processor(img, return_tensors="pt")
## doing inference with the model.
with torch.no_grad():
logits = model(**inputs).logits
# model predicts one of the 1000 ImageNet classes
predicted_label = logits.argmax(-1).item()
# Printing out the predicted label
print("The given image is a " + model.config.id2label[predicted_label])
|
# # InstructPix2Pix
# ## Change the runtime to GPU and install all the dependencies.
# ## Import all the installed required libraries and load the models.
import os
import glob
import tarfile
import shutil
import random
import requests
import torch
import PIL
from PIL import ImageOps
from IPython.display import display
from diffusers import StableDiffusionInstructPix2PixPipeline
model_id = "timbrooks/instruct-pix2pix"
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(
model_id, torch_dtype=torch.float16
).to("cuda")
# ## Demo: Make the mountain snowy.
def open_image(fp):
image = PIL.Image.open(fp)
image = PIL.ImageOps.exif_transpose(image)
image = image.convert("RGB")
return image
def download_image(url):
image = open_image(requests.get(url, stream=True).raw)
return image
url = "https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png"
# Download the image
image = download_image(url)
# Define the prompt
prompt = "make the mountains snowy"
# Process the image with the prompt using the pipeline
images = pipe(
prompt,
image=image,
num_inference_steps=20,
image_guidance_scale=1.5,
guidance_scale=7,
).images
# Display the original and processed images side by side
new_image = PIL.Image.new("RGB", (image.width * 2, image.height))
new_image.paste(image, (0, 0))
new_image.paste(images[0], (image.width, 0))
display(new_image)
# Define the prompt
prompt = "change the weather to sunny"
# Process the image with the prompt using the pipeline
images = pipe(
prompt,
image=image,
num_inference_steps=50,
image_guidance_scale=1.5,
guidance_scale=7,
).images
# Display the original and processed images side by side
new_image = PIL.Image.new("RGB", (image.width * 2, image.height))
new_image.paste(image, (0, 0))
new_image.paste(images[0], (image.width, 0))
display(new_image)
# Define the prompt
prompt = "change the season to autumn"
# Process the image with the prompt using the pipeline
images = pipe(
prompt,
image=image,
num_inference_steps=50,
image_guidance_scale=1.5,
guidance_scale=7,
).images
# Display the original and processed images side by side
new_image = PIL.Image.new("RGB", (image.width * 2, image.height))
new_image.paste(image, (0, 0))
new_image.paste(images[0], (image.width, 0))
display(new_image)
# ## Virtual try-on with LFW dataset
import tarfile
with tarfile.open("/kaggle/input/lfwpeople/lfw-funneled.tgz", "r") as tar:
for member in tar.getmembers():
tar.extract(member, "data")
PATH = "/kaggle/working/data/lfw_funneled"
image_path = os.path.join(PATH, "AJ_Cook/AJ_Cook_0001.jpg")
# Download the image
image = open_image(image_path)
# Define the prompt
prompt = "make her wear a hat"
# Process the image with the prompt using the pipeline
images = pipe(
prompt,
image=image,
num_inference_steps=20,
image_guidance_scale=1.5,
guidance_scale=7,
).images
# Display the original and processed images side by side
new_image = PIL.Image.new("RGB", (image.width * 2, image.height))
new_image.paste(image, (0, 0))
new_image.paste(images[0], (image.width, 0))
display(new_image)
image_path = os.path.join(PATH, "AJ_Cook/AJ_Cook_0001.jpg")
# Download the image
image = open_image(image_path)
# Define the prompt
prompt = "change hair color to platinum"
# Process the image with the prompt using the pipeline
images = pipe(
prompt,
image=image,
num_inference_steps=20,
image_guidance_scale=1.5,
guidance_scale=7,
).images
# Display the original and processed images side by side
new_image = PIL.Image.new("RGB", (image.width * 2, image.height))
new_image.paste(image, (0, 0))
new_image.paste(images[0], (image.width, 0))
display(new_image)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random, os, glob, cv2, h5py
from keras.models import Model, model_from_json
from sklearn import svm
from sklearn.utils import shuffle
from sklearn.metrics import classification_report, accuracy_score, f1_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.model_selection import train_test_split
fire_path_test = r"../input/fire-detection/Dataset/Testing/fire/"
nonfire_path_test = r"../input/fire-detection/Dataset/Testing/nofire/"
fire_path_train = r"../input/fire-detection/Dataset/Training and Validation/fire/"
nonfire_path_train = r"../input/fire-detection/Dataset/Training and Validation/nofire/"
fire_df_train = pd.DataFrame(
{"path": [fire_path_train + file for file in os.listdir(fire_path_train)]}
)
fire_df_train["label"] = pd.Series(1, index=range(fire_df_train.shape[0]))
fire_df_train.head()
fire_df_test = pd.DataFrame(
{"path": [fire_path_test + file for file in os.listdir(fire_path_test)]}
)
fire_df_test["label"] = pd.Series(1, index=range(fire_df_test.shape[0]))
fire_df_test.head()
nonfire_df_train = pd.DataFrame(
{"path": [nonfire_path_train + file for file in os.listdir(nonfire_path_train)]}
)
nonfire_df_train["label"] = pd.Series(0, index=range(nonfire_df_train.shape[0]))
nonfire_df_train.head()
nonfire_df_test = pd.DataFrame(
{"path": [nonfire_path_test + file for file in os.listdir(nonfire_path_test)]}
)
nonfire_df_test["label"] = pd.Series(0, index=range(nonfire_df_test.shape[0]))
nonfire_df_test.head()
# Fire
print(fire_df_train.shape, fire_df_test.shape)
fire_df = pd.concat([fire_df_train, fire_df_test])
print(fire_df.shape)
# nofire
print(nonfire_df_train.shape, nonfire_df_test.shape)
nonfire_df = pd.concat([nonfire_df_train, nonfire_df_test])
print(nonfire_df.shape)
# Fire
fire_df = shuffle(fire_df, random_state=0)
fire_train, fire_test = train_test_split(fire_df, test_size=0.4, random_state=1)
fire_valid, fire_test = train_test_split(fire_test, test_size=0.5, random_state=1)
print(fire_train.shape, fire_test.shape, fire_valid.shape)
# Non Fire
nonfire_df = shuffle(nonfire_df, random_state=0)
nonfire_train, nonfire_test = train_test_split(
nonfire_df, test_size=0.4, random_state=1
)
nonfire_valid, nonfire_test = train_test_split(
nonfire_test, test_size=0.5, random_state=1
)
print(nonfire_train.shape, nonfire_test.shape, nonfire_valid.shape)
train_df = pd.concat([fire_train, nonfire_train]).reset_index().drop(["index"], axis=1)
test_df = pd.concat([fire_test, nonfire_test]).reset_index().drop(["index"], axis=1)
valid_df = pd.concat([fire_valid, nonfire_valid]).reset_index().drop(["index"], axis=1)
print(train_df.shape, valid_df.shape, test_df.shape)
plt.figure(figsize=(20, 5))
plt.subplot(1, 3, 1)
train_df.label.hist()
plt.subplot(1, 3, 2)
test_df.label.hist()
plt.subplot(1, 3, 3)
valid_df.label.hist()
size_img = (224, 224)
def read_img(path, size):
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, size) / 255
return img
def read_all_img(paths, labels, size):
x = np.array(
[
cv2.resize(cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB), size)
for path in paths
]
)
y = labels.to_numpy()
return x, y
x_train, y_train = read_all_img(train_df.path, train_df.label, size_img)
x_valid, y_valid = read_all_img(valid_df.path, valid_df.label, size_img)
x_test, y_test = read_all_img(test_df.path, test_df.label, size_img)
for idx, i in enumerate(x_train[:3]):
plt.imshow(i)
plt.title(y_train[idx])
plt.show()
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Input, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import CSVLogger
from keras.callbacks import Callback, CSVLogger, ModelCheckpoint
batch_size = 32
size_image = 224
train_paths = train_df.path
y_train = train_df.label
test_paths = test_df.path
y_test = test_df.label
valid_paths = valid_df.path
y_valid = valid_df.label
# Callbacks
checkpoint_min_val_loss = ModelCheckpoint(
"../working/model_min_val_loss.h5",
verbose=1,
monitor="val_loss",
mode="min",
save_best_only=True,
save_weights_only=True,
)
checkpoint_max_val_acc = ModelCheckpoint(
"../working/model_max_val_acc.h5",
verbose=1,
monitor="val_binary_accuracy",
mode="max",
save_best_only=True,
save_weights_only=True,
)
csvlogger = CSVLogger(
filename="../working/training_log.csv", separator=",", append=True
)
callbacks_list = [checkpoint_min_val_loss, checkpoint_max_val_acc, csvlogger]
# for keras
from classification_models.keras import Classifiers
ResNet18, preprocess_input = Classifiers.get("resnet18")
resnet18 = ResNet18((224, 224, 3), weights="imagenet", include_top=False)
x = GlobalAveragePooling2D()(resnet18.output)
x = Dropout(0.7)(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.7)(x)
output = Dense(1, activation="sigmoid")(x)
# Model
model_resnet18 = Model(resnet18.input, output)
model_resnet18.summary()
# warm-up
for layer in model_resnet18.layers:
layer.trainable = False
for i in range(-5, 0):
model_resnet18.layers[i].trainable = True
model_resnet18.compile(
loss="binary_crossentropy",
optimizer="Adam",
metrics=[
"binary_accuracy",
],
)
model_resnet18.fit(
x_train,
y_train,
epochs=2,
shuffle=True,
validation_data=(x_valid, y_valid),
callbacks=callbacks_list,
)
epochs = 32
for layer in model_resnet18.layers:
layer.trainable = True
model_resnet18.compile(
loss="binary_crossentropy",
optimizer=Adam(learning_rate=1e-4),
metrics=["binary_accuracy"],
)
model_resnet18.fit(
x_train,
y_train,
epochs=epochs,
shuffle=True,
validation_data=(x_valid, y_valid),
callbacks=callbacks_list,
)
save = pd.read_csv("../working/training_log.csv")
print(save.shape)
# Plot
plt.figure(figsize=(20, 5))
plt.subplot(121)
plt.title("Binary accuracy")
plt.plot(save.binary_accuracy)
plt.plot(save.val_binary_accuracy)
plt.legend(("Train", "Valid"))
plt.subplot(122)
plt.title("Loss")
plt.plot(save.loss)
plt.plot(save.val_loss)
plt.legend(("Train", "Valid"))
# Save model
model_json = model_resnet18.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# load json and create model
json_file = open("model.json", "r")
loaded_model_json = json_file.read()
json_file.close()
model_resnet18 = model_from_json(loaded_model_json)
print("Train f1:", f1_score(y_train, model_resnet18.predict(x_train).round()))
print("Valid f1:", f1_score(y_valid, model_resnet18.predict(x_valid).round()))
print("Test f1:", f1_score(y_test, model_resnet18.predict(x_test).round()))
pred = model_resnet18.predict(x_test[:5])
for idx, i in enumerate(x_test[:3]):
plt.imshow(i)
plt.title(
str(y_test[idx]) + " ( dự đoán đúng)"
if pred[idx] == y_test[idx]
else +" (dự đoán sai)"
)
plt.show()
|
# loading the library
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# loading the datasets
train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv", index_col=0)
test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv", index_col=0)
submission_file = pd.read_csv(
"/kaggle/input/playground-series-s3e12/sample_submission.csv", index_col=0
)
# # what to do here:
# * Bulding machine learning models to predict target;
# * I have to use the classification method probably;
# * Learn how to do that and practice here;
# # Data
# * Need to know what they mean: id', 'gravity', 'ph', 'osmo', 'cond', 'urea', 'calc', 'target'
# checking with the data
train.shape
train.columns
train.size
train.head()
train.info()
train.isna().sum()
train.isnull().sum()
# test dataset
test.shape
test.columns
test.size
test.head()
test.info()
test.isna().sum()
test.isnull().sum()
# # Descriptive statistics (EDA)
# * check the missing values of the data
# * calculate the descriptive statistics
# * correlation analysis
# * basic regression analysis (multiple regression using couple of models)
descriptive = train.describe().transpose()
# calculating skewness and kurtosis and t-value of the data
from scipy.stats import skew, kurtosis, t
# skewness = skew(train, axis=0)
# kurtosis= kurtosis(train,axis=0)
# or using the following command to get results with column names
skew = pd.DataFrame([train[:].skew()], index=["skewness"]).transpose()
kurtosis = pd.DataFrame([train[:].kurtosis()], index=["kurtosis"]).transpose()
skewness_kurtosis = skew.join(kurtosis)
# creating a function for calculating a t-value (to see whether the mean is different from 0 or not)
def t_value(data):
import pandas as pd
t_value = (data.mean(axis=0) - 0) / (np.std(data) / np.sqrt(len(data)))
return pd.DataFrame(t_value)
t_value = t_value(data=train)
t_value.rename(columns={"0": "tScore"}, inplace=True)
# combining all the results together (descriptive statistics)
ds_results = descriptive.join(skewness_kurtosis)
ds_statistics = ds_results.join(t_value)
ds_statistics = pd.DataFrame(ds_statistics)
ds_statistics.rename(
columns={
"count": "Count",
"mean": "Mean",
"std": "SD",
"min": "Min",
"25%": "25%",
"50%": "50%",
"75%": "75%",
"max": "Max",
"skewness": "Skewness",
"kurtosis": "Kurtosis",
"0": "t-value",
}
)
# box plot for the train data
train.boxplot()
plt.show()
# showing histogram of the variables
train.hist()
plt.tight_layout()
plt.show()
# plotting the correlation heatmap to the show the results
corr = train.corr()
sns.heatmap(data=corr, cmap="coolwarm", annot=True, robust=True, cbar=True)
plt.show()
# develop general regression models to investigate the issue
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from catboost import CatBoostClassifier
from functools import partial
from skopt import space
from skopt import gp_minimize
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ### Loading data files along with original data
df_train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
df_test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
original_data = pd.read_csv(
"/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
)
submission = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
df_train.head()
original_data.shape
# ### Making a Dataframe based on given files and original data file
# Is_generated
df_train["is_generated"] = 1
df_test["is_generated"] = 1
original_data["is_generated"] = 0
# Join data
train_full = pd.concat(
[df_train, original_data], axis=0, ignore_index=True
).reset_index(drop=True)
train_full.head()
df_test.head()
# ### Feature Engineering
#
# From https://www.kaggle.com/code/tetsutani/ps3e12-eda-ensemble-baseline
def create_new_features(data):
# Ion product of calcium and urea
data["ion_product"] = data["calc"] * data["urea"]
# Calcium-to-urea ratio
data["calcium_to_urea_ratio"] = data["calc"] / data["urea"]
# Electrolyte balance
data["electrolyte_balance"] = data["cond"] / (10 ** (-data["ph"]))
# Osmolality-to-specific gravity ratio
data["osmolality_to_sg_ratio"] = data["osmo"] / data["gravity"]
## Add Feature engineering part
# The product of osmolarity and density is created as a new property
data["osmo_density"] = data["osmo"] * data["gravity"]
# Converting pH column to categorical variable
data["pH_cat"] = pd.cut(
data["ph"],
bins=[0, 4.5, 6.5, 8.5, 14],
labels=["sangat acidic", "acidic", "neutral", "basic"],
)
dummies = pd.get_dummies(data["pH_cat"])
data = pd.concat([data, dummies], axis=1)
# Deleting columns using dummy variables.
data.drop(
["pH_cat", "sangat acidic", "basic", "neutral", "ph"], axis=1, inplace=True
)
return data
# Create Feature
train_full = create_new_features(train_full)
df_test = create_new_features(df_test)
# ### Observing Correlation
# https://www.kaggle.com/code/naesalang/little-beautiful-notebook/notebook
correlation = train_full.corr()
correlation["target"].drop("target").plot(kind="bar", color="xkcd:magenta")
plt.grid(True)
plt.xlabel("Features")
plt.ylabel("Correlation")
# ### Simple Preprocessing
useful_columns = [c for c in train_full.columns if c not in ["id", "target"]]
print(useful_columns)
df_test = pd.DataFrame(df_test, columns=useful_columns)
df_test.columns
# ### Building X and y datasets from training full dataset
y = train_full["target"]
X = train_full[useful_columns]
# ### Scaling the datasets by applying StandardScalar()
# Scale
scaler = StandardScaler()
X = pd.DataFrame(scaler.fit_transform(X))
X_test = pd.DataFrame(scaler.transform(df_test))
X = np.asarray(X)
X_test = np.asarray(X_test)
# ### Using the Bayesian OPtimization
# Following two cells may be uncommented, if you wanna hyperparameter tune.
"""def optimize(params, param_names, x, y):
# convert params to dictionary
params = dict(zip(param_names, params))
model = CatBoostClassifier(**params, random_state = 42, verbose = False)
num_folds = 5
kf = StratifiedKFold(n_splits = num_folds, shuffle = True, random_state = 42)
scores = []
for fold, (trn_idx, val_idx) in enumerate(kf.split(X,y)):
print('-'*20, 'Fold:', fold, '-'*20)
X_train, X_valid = X[trn_idx], X[val_idx]
y_train, y_valid = y[trn_idx], y[val_idx]
model.fit(X_train, y_train)
y_pred = model.predict_proba(X_valid)[:,1]
score = roc_auc_score(y_valid,y_pred)
print('roc_auc_score: ', score)
scores.append(score)
return -1*np.mean(scores)"""
"""param_space = [
space.Integer(2,10, name = "max_depth"),
space.Integer(50, 1000, name = "n_estimators"),
space.Real(0.01, 0.5, name = 'learning_rate'),
space.Real(1.0, 10.0, name = 'l2_leaf_reg')
]
param_names = ["max_depth", "n_estimators","learning_rate", "l2_leaf_reg"]
optimization_function = partial(optimize, param_names = param_names, x = X, y = y)
result = gp_minimize(optimization_function, dimensions = param_space, n_calls = 15,
n_random_starts = 10, verbose = 10)
best_params = dict(zip(param_names, result.x))
print(best_params)"""
# #### Running Final Model after Parameters Tuning
best_params = {
"max_depth": 10,
"n_estimators": 50,
"learning_rate": 0.44980085135871845,
"l2_leaf_reg": 10.0,
}
y_preds = np.zeros(len(X_test))
num_folds = 5
kf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=42)
scores = []
for fold, (trn_idx, val_idx) in enumerate(kf.split(X, y)):
print("-" * 20, "Fold:", fold, "-" * 20)
X_train, X_valid = X[trn_idx], X[val_idx]
y_train, y_valid = y[trn_idx], y[val_idx]
model = CatBoostClassifier(**best_params, random_state=42, verbose=False)
model.fit(X_train, y_train)
y_pred = model.predict_proba(X_valid)[:, 1]
score = roc_auc_score(y_valid, y_pred)
y_preds += model.predict_proba(X_test)[:, 1] / num_folds
print("roc_auc_score: ", score)
scores.append(score)
print(-1 * np.mean(scores))
# ### Submission
submission["target"] = y_preds
submission.to_csv("submission.csv", index=False)
|
NUM_SENTENCE = 4
import pandas as pd
df_train = pd.read_csv("/kaggle/input/mtsamples-v2/summ_train.tsv", sep="\t")
df_test = pd.read_csv("/kaggle/input/mtsamples-v2/summ_test_new.tsv", sep="\t")
# df = pd.concat([df_train, df_test], ignore_index = True)
df_train.shape, df_test.shape
df_train.sample(2)
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.luhn import LuhnSummarizer
import nltk
import re
nltk.download("punkt")
summarizer = LuhnSummarizer()
def section_wise_luhn(df_old, num_sent):
transc = df_old["transcription"].values
desc = df_old["description"].values
assert len(transc) == len(desc)
new_trans = []
new_desc = []
lens_old = []
lens_new = []
for i in range(len(transc)):
x = transc[i]
lens_old.append(len(x))
flag = 0
new_x = ""
splits = {}
pattern = r"[A-Z\s]+:"
match = re.findall(pattern, x)
l = len(match)
for j in range(l):
if j < l - 1:
splits[match[j]] = x[
x.find(match[j]) + len(match[j]) : x.find(match[j + 1])
]
else:
splits[match[j]] = x[x.find(match[j]) + len(match[j]) :]
assert (len(splits.keys()) == len(re.findall(pattern, x)), x)
for sec, text in splits.items():
parser = PlaintextParser.from_string(text, Tokenizer("english"))
summary = summarizer(parser.document, num_sent)
new_x += sec
new_x += " "
for sent in summary:
new_x += str(sent)
new_x += ". "
if len(new_x) > 0:
new_trans.append(new_x)
lens_new.append(len(new_x))
new_desc.append(desc[i])
else:
new_trans.append(x)
lens_new.append(len(new_x))
new_desc.append(desc[i])
print("OLD: ", sum(lens_old) / len(lens_old))
print("NEW: ", sum(lens_new) / len(lens_new))
return pd.DataFrame.from_dict({"description": new_desc, "transcription": new_trans})
print(df_train.shape)
df_train = section_wise_luhn(df_train, NUM_SENTENCE)
print(df_train.shape)
print(df_test.shape)
df_test = section_wise_luhn(df_test, NUM_SENTENCE)
print(df_test.shape)
from nltk.translate.bleu_score import corpus_bleu
def bleu_score(refs, preds):
actual, predicted = [], []
for r, p in zip(refs, preds):
actual.append([r.split()])
predicted.append(p.split())
return round(corpus_bleu(actual, predicted) * 100, 2)
# Importing libraries
import random, os
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
from torch.optim.lr_scheduler import ReduceLROnPlateau
import matplotlib.pyplot as plt
# Importing the T5 modules from huggingface/transformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from rich.table import Column, Table
from rich import box
from rich.console import Console
import evaluate
def seed_everything(seed: int):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
rouge_score = evaluate.load("rouge")
# define a rich console logger
console = Console(record=True)
def display_df(df):
"""display dataframe in ASCII format"""
console = Console()
table = Table(
Column("source_text", justify="center"),
Column("target_text", justify="center"),
title="Sample Data",
pad_edge=False,
box=box.ASCII,
)
for i, row in enumerate(df.values.tolist()):
table.add_row(row[0], row[1])
console.print(table)
training_logger = Table(
Column("Epoch", justify="center"),
Column("Steps", justify="center"),
Column("Loss", justify="center"),
Column("Rouge/BLEU score", justify="center"),
title="Training Status",
pad_edge=False,
box=box.ASCII,
)
# Setting up the device for GPU usage
from torch import cuda
device = "cuda" if cuda.is_available() else "cpu"
class YourDataSetClass(Dataset):
"""
Creating a custom dataset for reading the dataset and
loading it into the dataloader to pass it to the neural network for finetuning the model
"""
def __init__(
self, dataframe, tokenizer, source_len, target_len, source_text, target_text
):
self.tokenizer = tokenizer
self.data = dataframe
self.source_len = source_len
self.summ_len = target_len
self.target_text = self.data[target_text]
self.source_text = self.data[source_text]
def __len__(self):
return len(self.target_text)
def __getitem__(self, index):
source_text = str(self.source_text[index])
target_text = str(self.target_text[index])
# cleaning data so as to ensure data is in string type
source_text = " ".join(source_text.split())
target_text = " ".join(target_text.split())
source = self.tokenizer.batch_encode_plus(
[source_text],
max_length=self.source_len,
pad_to_max_length=True,
truncation=True,
padding="max_length",
return_tensors="pt",
)
target = self.tokenizer.batch_encode_plus(
[target_text],
max_length=self.summ_len,
pad_to_max_length=True,
truncation=True,
padding="max_length",
return_tensors="pt",
)
source_ids = source["input_ids"].squeeze()
source_mask = source["attention_mask"].squeeze()
target_ids = target["input_ids"].squeeze()
target_mask = target["attention_mask"].squeeze()
return {
"source_ids": source_ids.to(dtype=torch.long),
"source_mask": source_mask.to(dtype=torch.long),
"target_ids": target_ids.to(dtype=torch.long),
"target_ids_y": target_ids.to(dtype=torch.long),
}
def train(
epoch, tokenizer, model, device, train_loader, val_loader, optimizer, scheduler
):
"""
Function to be called for training with the parameters passed from main function
"""
model.train()
r1 = []
r2 = []
rl = []
rlsum = []
b = []
te_losses = []
tr_loss = 0
for _, data in enumerate(train_loader, 0):
y = data["target_ids"].to(device, dtype=torch.long)
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone().detach()
lm_labels[y[:, 1:] == tokenizer.pad_token_id] = -100
ids = data["source_ids"].to(device, dtype=torch.long)
mask = data["source_mask"].to(device, dtype=torch.long)
optimizer.zero_grad()
outputs = model(
input_ids=ids,
attention_mask=mask,
decoder_input_ids=y_ids,
labels=lm_labels,
)
loss = outputs[0]
loss.backward()
optimizer.step()
tr_loss += loss.item()
if _ % 50 == 0:
predictions, actuals, l = validate(
epoch, tokenizer, model, device, val_loader
)
scores = rouge_score.compute(predictions=predictions, references=actuals)
te_losses.append(l)
temp = bleu_score(actuals, predictions)
b.append(temp)
r1.append(round(scores["rouge1"] * 100, 2))
r2.append(round(scores["rouge2"] * 100, 2))
rl.append(round(scores["rougeL"] * 100, 2))
rlsum.append(round(scores["rougeLsum"] * 100, 2))
r_dict = {
"r1": round(scores["rouge1"] * 100, 2),
"r2": round(scores["rouge2"] * 100, 2),
"rl": round(scores["rougeL"] * 100, 2),
"rlsum": round(scores["rougeLsum"] * 100, 2),
"bleu": temp,
}
training_logger.add_row(str(epoch), str(_), str(l), str(r_dict))
console.print(training_logger)
scheduler.step(l)
return r1, r2, rl, rlsum, b, float(tr_loss / len(train_loader)), te_losses
def validate(epoch, tokenizer, model, device, loader):
"""
Function to evaluate model for predictions
"""
model.eval()
predictions = []
actuals = []
t_loss = 0
with torch.no_grad():
for _, data in enumerate(loader, 0):
y = data["target_ids"].to(device, dtype=torch.long)
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone().detach()
lm_labels[y[:, 1:] == tokenizer.pad_token_id] = -100
ids = data["source_ids"].to(device, dtype=torch.long)
mask = data["source_mask"].to(device, dtype=torch.long)
outputs = model(
input_ids=ids,
attention_mask=mask,
decoder_input_ids=y_ids,
labels=lm_labels,
)
loss = outputs[0]
t_loss += loss.item()
generated_ids = model.generate(
input_ids=ids,
attention_mask=mask,
max_length=150,
num_beams=2,
repetition_penalty=2.5,
length_penalty=1.0,
early_stopping=True,
)
preds = [
tokenizer.decode(
g, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
for g in generated_ids
]
target = [
tokenizer.decode(
t, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
for t in y
]
# if _%10==0:
# console.print(f'Completed {_}')
predictions.extend(preds)
actuals.extend(target)
return predictions, actuals, float(t_loss / len(loader))
def T5Trainer(
train_dataset,
val_dataset,
source_text,
target_text,
model_params,
output_dir="./outputs/",
):
"""
T5 trainer
"""
# Set random seeds and deterministic pytorch for reproducibility
# torch.manual_seed(model_params["SEED"]) # pytorch random seed
# np.random.seed(model_params["SEED"]) # numpy random seed
# torch.backends.cudnn.deterministic = True
seed_everything(model_params["SEED"])
# logging
console.log(f"""[Model]: Loading {model_params["MODEL"]}...\n""")
# tokenzier for encoding the text
tokenizer = AutoTokenizer.from_pretrained(model_params["MODEL"])
# Defining the model. We are using t5-base model and added a Language model layer on top for generation of Summary.
# Further this model is sent to device (GPU/TPU) for using the hardware.
model = AutoModelForSeq2SeqLM.from_pretrained(model_params["MODEL"])
model = model.to(device)
# logging
console.log(f"[Data]: Reading data...\n")
# Importing the raw dataset
# dataframe = dataframe[[source_text,target_text]]
# display_df(df_train.head(2))
# console.print(f"FULL Dataset: {dataframe.shape}")
console.print(f"TRAIN Dataset: {train_dataset.shape}")
console.print(f"TEST Dataset: {val_dataset.shape}\n")
# Creating the Training and Validation dataset for further creation of Dataloader
training_set = YourDataSetClass(
train_dataset,
tokenizer,
model_params["MAX_SOURCE_TEXT_LENGTH"],
model_params["MAX_TARGET_TEXT_LENGTH"],
source_text,
target_text,
)
val_set = YourDataSetClass(
val_dataset,
tokenizer,
model_params["MAX_SOURCE_TEXT_LENGTH"],
model_params["MAX_TARGET_TEXT_LENGTH"],
source_text,
target_text,
)
# Defining the parameters for creation of dataloaders
train_params = {
"batch_size": model_params["TRAIN_BATCH_SIZE"],
"shuffle": True,
"num_workers": 0,
}
val_params = {
"batch_size": model_params["VALID_BATCH_SIZE"],
"shuffle": False,
"num_workers": 0,
}
# Creation of Dataloaders for testing and validation. This will be used down for training and validation stage for the model.
training_loader = DataLoader(training_set, **train_params)
val_loader = DataLoader(val_set, **val_params)
# Defining the optimizer that will be used to tune the weights of the network in the training session.
optimizer = torch.optim.Adam(
params=model.parameters(), lr=model_params["LEARNING_RATE"]
)
scheduler = ReduceLROnPlateau(optimizer, "min", factor=0.5)
# Training loop
console.log(f"[Initiating Fine Tuning]...\n")
r1s = []
r2s = []
rls = []
rlsums = []
bs = []
trl = []
tel = []
path = os.path.join(output_dir, "best_model.pt")
for epoch in range(model_params["TRAIN_EPOCHS"]):
r1, r2, rl, rlsum, b, tr, te = train(
epoch,
tokenizer,
model,
device,
training_loader,
val_loader,
optimizer,
scheduler,
)
# if len(r1s) == 0:
# torch.save(model, path)
# elif max(r1) > max(r1s) and max(r2) > max(r2s) and max(rl) > max(rls) and max(rlsum) > max(rlsums):
# torch.save(model, path)
r1s += r1
r2s += r2
rls += rl
rlsums += rlsum
bs += b
trl.append(tr)
tel += te
for epoch in range(model_params["VAL_EPOCHS"]):
predictions, actuals, _ = validate(
epoch, tokenizer, model, device, val_loader
)
final_df = pd.DataFrame(
{"Generated Text": predictions, "Actual Text": actuals}
)
final_df.to_csv(os.path.join(output_dir, "predictions.csv"))
scores = rouge_score.compute(predictions=predictions, references=actuals)
rouge_names = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
rouge_dict = dict((rn, round(scores[rn] * 100, 2)) for rn in rouge_names)
print(rouge_dict)
l = len(r1s)
x = [50 * (i + 1) for i in range(l)]
fig = plt.figure(figsize=(15, 24))
plt.subplot(4, 2, 1)
plt.plot(
x,
r1s,
label="rouge1",
color="green",
linestyle="dashed",
linewidth=3,
marker="o",
markerfacecolor="blue",
markersize=8,
)
plt.subplot(4, 2, 2)
plt.plot(
x,
r2s,
label="rouge2",
color="green",
linestyle="dashed",
linewidth=3,
marker="o",
markerfacecolor="blue",
markersize=8,
)
plt.subplot(4, 2, 3)
plt.plot(
x,
rls,
label="rougeL",
color="green",
linestyle="dashed",
linewidth=3,
marker="o",
markerfacecolor="blue",
markersize=8,
)
plt.subplot(4, 2, 4)
plt.plot(
x,
rlsums,
label="rougeLsum",
color="green",
linestyle="dashed",
linewidth=3,
marker="o",
markerfacecolor="blue",
markersize=8,
)
plt.subplot(4, 2, 5)
plt.plot(
x,
bs,
label="BLEU",
color="green",
linestyle="dashed",
linewidth=3,
marker="o",
markerfacecolor="blue",
markersize=8,
)
plt.subplot(4, 2, 6)
plt.plot(
x,
tel,
label="Val Loss",
color="green",
linestyle="dashed",
linewidth=3,
marker="o",
markerfacecolor="blue",
markersize=8,
)
plt.subplot(4, 2, 7)
plt.plot(
[i for i in range(model_params["TRAIN_EPOCHS"])],
trl,
label="Train Loss",
color="green",
linestyle="dashed",
linewidth=3,
marker="o",
markerfacecolor="blue",
markersize=8,
)
plt.show()
console.log(f"[Saving Model]...\n")
# Saving the model after training
# path = os.path.join(output_dir, "model_files")
# model.save_pretrained(path)
# tokenizer.save_pretrained(path)
# evaluating test dataset
console.log(f"[Initiating Validation]...\n")
# del model
# model = torch.load(path)
console.save_text(os.path.join(output_dir, "logs.txt"))
console.log(f"[Validation Completed.]\n")
console.print(
f"""[Model] Model saved @ {os.path.join(output_dir, "model_files")}\n"""
)
console.print(
f"""[Validation] Generation on Validation data saved @ {os.path.join(output_dir,'predictions.csv')}\n"""
)
console.print(f"""[Logs] Logs saved @ {os.path.join(output_dir,'logs.txt')}\n""")
model_params = {
"MODEL": "facebook/bart-base", # model_type: t5-base/t5-large
"TRAIN_BATCH_SIZE": 8, # training batch size
"VALID_BATCH_SIZE": 8, # validation batch size
"TRAIN_EPOCHS": 4, # number of training epochs
"VAL_EPOCHS": 1, # number of validation epochs
"LEARNING_RATE": 2e-5, # learning rate
"MAX_SOURCE_TEXT_LENGTH": 512, # max length of source text
"MAX_TARGET_TEXT_LENGTH": 150, # max length of target text
"SEED": 42, # set seed for reproducibility
}
T5Trainer(
train_dataset=df_train,
val_dataset=df_test,
source_text="transcription",
target_text="description",
model_params=model_params,
output_dir="/kaggle/working/",
)
|
# # Welcome to import land
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import matplotlib
from sklearn.neighbors import KNeighborsClassifier
import sklearn
from sklearn.model_selection import train_test_split
from sklearn import metrics
import seaborn as sns
import matplotlib.pyplot as pyplt
from sklearn.ensemble import RandomForestClassifier
import os
import glob
from glob import glob
import xgboost as xgb
import keras
from keras.datasets import mnist
from keras.layers import Dense
from keras.models import Sequential
import tensorflow as tf
from matplotlib import pyplot as plt
from sklearn.linear_model import LogisticRegression
from random import randint
from sklearn.linear_model import Perceptron
from keras.layers import Dropout
from sklearn.preprocessing import StandardScaler
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelBinarizer
from keras.layers import Dropout
from sklearn.model_selection import GridSearchCV
lb = LabelEncoder()
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from mpl_toolkits.mplot3d import Axes3D
import cv2
import tqdm
from sklearn.utils import shuffle
from PIL import Image
# from torchvision import transforms
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import (
Activation,
Conv2D,
MaxPooling2D,
ZeroPadding2D,
GlobalAveragePooling2D,
Flatten,
)
from tensorflow.keras.layers import BatchNormalization
# # Importing files
# there are two csv's :)
# However, BBox just includes some of the image sizes so it is useless and will not be used
# Bbox[x y w h] is a bounding box coordinate system and I believe it is useless. So, this entire csv is slightly useless.
path = "/kaggle/input/data/"
file = path + "Data_Entry_2017.csv"
# print(file)
Data_entry = pd.read_csv(file)
Data_entry = Data_entry.drop(columns=["Unnamed: 11"])
Data_entry
# ## Expanding the diseases
diseases = [
"No Finding",
"Cardiomegaly",
"Emphysema",
"Effusion",
"Hernia",
"Nodule",
"Pneumothorax",
"Atelectasis",
"Pleural_Thickening",
"Mass",
"Edema",
"Consolidation",
"Infiltration",
"Fibrosis",
"Pneumonia",
]
# Number diseases
for disease in diseases:
Data_entry[disease] = Data_entry["Finding Labels"].apply(
lambda x: 1 if disease in x else 0
)
# separate df for target, might use this as the y value
target = Data_entry[diseases].to_numpy()
target
Data_entry.rename(columns={"Finding Labels": "Specific Diagnosis"}, inplace=True)
Data_entry.head(5)
Data_entry["Simple Diagnosis"] = Data_entry["Specific Diagnosis"].str.split("|").str[0]
Data_entry
simpleDiseases = [
"(s)No Finding",
"(s)Cardiomegaly",
"(s)Emphysema",
"(s)Effusion",
"(s)Hernia",
"(s)Nodule",
"(s)Pneumothorax",
"(s)Atelectasis",
"(s)Pleural_Thickening",
"(s)Mass",
"(s)Edema",
"(s)Consolidation",
"(s)Infiltration",
"(s)Fibrosis",
"(s)Pneumonia",
]
for disease in diseases:
Data_entry["(s)" + disease] = Data_entry["Simple Diagnosis"].apply(
lambda x: 1 if disease in x else 0
)
Data_entry.head(10)
simpleTarget = Data_entry[simpleDiseases].to_numpy()
simpleTarget
simpleTarget.shape
# # EDA (slightly)
sns.pairplot(pd.read_csv("/kaggle/input/data/Data_Entry_2017.csv"))
sns.violinplot(x="Patient Gender", y="Patient Age", data=Data_entry)
# i smell outliers
Data_entry[Data_entry["Patient Age"] > 120]
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([148], 14)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([149], 14)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([150], 15)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([151], 15)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([152], 15)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([153], 15)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([154], 15)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([155], 15)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([411], 41)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([412], 41)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([413], 41)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([414], 41)
Data_entry[Data_entry["Patient Age"] > 120]
sns.violinplot(
x="Cardiomegaly", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Emphysema", y="Patient Age", hue="Patient Gender", data=Data_entry, split=True
)
sns.violinplot(
x="Effusion", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(x="Patient Gender", y="Patient Age", data=Data_entry)
sns.violinplot(
x="Hernia", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Nodule", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Pneumothorax", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Atelectasis", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Pleural_Thickening",
y="Patient Age",
data=Data_entry,
hue="Patient Gender",
split=True,
)
sns.violinplot(
x="Mass", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Edema", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Consolidation",
y="Patient Age",
data=Data_entry,
hue="Patient Gender",
split=True,
)
sns.violinplot(
x="Infiltration", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Fibrosis", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Pneumonia", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
plt.figure(figsize=(10, 10))
plt.xlabel("Disease")
plt.ylabel("No. of Patients")
ax = Data_entry[diseases].sum().sort_values(ascending=False).plot(kind="bar")
# # Image stuff
all_image_paths = {
os.path.basename(x): x
for x in glob(os.path.join("..", "input", "data", "images*", "images", "*.png"))
}
Data_entry["Path"] = Data_entry["Image Index"].map(all_image_paths.get)
files_list = Data_entry["Path"].tolist()
Data_entry
def show_image(img_path):
f = plt.figure(figsize=(20, 8))
s1 = f.add_subplot(1, 2, 1)
row = Data_entry[Data_entry["Path"] == img_path]
ID = int(row["Patient ID"])
age = int(row["Patient Age"])
gender = str(row["Patient Gender"].item())
diagnosis = str(row["Specific Diagnosis"].item())
s1.set_title(
f"Patient's Image\nPatient ID: {ID}\nPatient Age: {age}\nPateint Gender: {gender}\nSpecific Diagnosis: {diagnosis}"
)
img = cv2.imread(img_path)
plt.imshow(img, cmap="gray")
plt.axis("off")
plt.show()
show_image("../input/data/images_001/images/00000010_000.png")
# yay
# ## Starting to mess with images
images = list(Data_entry["Path"])
train_img, val_img, train_label, val_label = train_test_split(
images,
simpleTarget,
test_size=10000,
random_state=42,
stratify=simpleTarget,
)
Data_entry["Path"]
train_img[0]
# ## Model !??
new = Data_entry[Data_entry["Height]"] == 2500][
Data_entry["OriginalImage[Width"] == 2048
]
new
pathArr = new["Path"].to_numpy()
pathArr
targetArr = new[simpleDiseases].to_numpy()
targetArr
train_ds = tf.keras.utils.image_dataset_from_directory(
"/kaggle/input/data",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size,
)
train_ds.class_names
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
normalization_layer = tf.keras.layers.Rescaling(1.0 / 255)
normalized_ds = train_ds.map(lambda x, y: normalization_layer(x), y)
image_batch, labels_batch = next(iter(normalized_ds))
first_image = image_batch[0]
print(np.min(first_image), np.max(first_image))
test_ds = tf.keras.utils.image_dataset_from_directory(
"/kaggle/input/data/",
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size,
)
targetArr = new[simpleDiseases].iloc[:4998].to_numpy()
targetArr.shape
val_ds.class_names
X_train, X_test, y_train, y_test = train_test_split(
pathArr, targetArr, test_size=0.25, random_state=42
)
train_ds
y_train
y_train.shape
# # Attempt #1 starts here
img_width = 2048
img_height = 2500
batch_size = 32
num_classes = 15
image_size = (img_height, img_width)
flat_image_size = 5120000
import pathlib
data_dir = pathlib.Path("/kaggle/input/data").with_suffix("")
list_ds = tf.data.Dataset.list_files(str(data_dir / "*/*/*"), shuffle=False)
list_ds = list_ds.shuffle(112120, reshuffle_each_iteration=False)
for f in list_ds.take(5):
print(f.numpy())
class_names = diseases
val_size = int(112120 * 0.25)
X_train = list_ds.skip(val_size)
X_test = list_ds.take(val_size)
for f in X_train.take(2):
print(f.numpy())
print(tf.data.experimental.cardinality(X_train).numpy())
print(tf.data.experimental.cardinality(X_test).numpy())
def get_label(file_path):
# image = file_path.str.split("images/").str[0]
file_name = tf.strings.split(file_path, os.path.sep)[-1]
row = Data_entry.loc[Data_entry["Image Index"] == file_name]
s_label = row["Simple Diagnosis"]
e_label = row[simpleDiseases].to_numpy()
o_label = row[diseases].to_numpy()
# print(s_label)
return s_label
def decode_img(img):
# Convert the compressed string to a 3D uint8 tensor
img = tf.io.decode_jpeg(img, channels=3)
# Resize the image to the desired size
return tf.image.resize(img, [img_height, img_width])
def process_path(file_path):
label = get_label(file_path)
# Load the raw data from the file as a string
img = tf.io.read_file(file_path)
img = decode_img(img)
return img, label
process_path("/kaggle/input/data/images_001/images/00000001_002.png")
AUTOTUNE = tf.data.AUTOTUNE
train_ds = X_train.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = X_test.cache().prefetch(buffer_size=AUTOTUNE)
# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.
train_ds = X_train.map(process_path, num_parallel_calls=AUTOTUNE)
val_ds = X_test.map(process_path, num_parallel_calls=AUTOTUNE)
for image, label in train_ds.take(1):
print("Image shape: ", image.numpy().shape)
print("Label: ", label.numpy())
# to-do:
# -get label:
# should take in path and return {0,0,0,0,0,0,1,0,0,0,..} o 1-15
# -decode_img
# -process_img
# -dataset.map
# -configure_for_performance
# **label is not being stored**
# # Attempt #1 ends here
# # Attempt #2 starts here
data = pd.read_csv("/kaggle/input/data/Data_Entry_2017.csv")
data = data[
data["Patient Age"] < 100
] # removing datapoints which having age greater than 100
data_image_paths = {
os.path.basename(x): x
for x in glob(os.path.join("kaggle", "input", "images*", "*", "*.png"))
}
print("Scans found:", len(data_image_paths), ", Total Headers", data.shape[0])
all_image_paths = {
os.path.basename(x): x
for x in glob(os.path.join("..", "input", "data", "images*", "images", "*.png"))
}
data["path"] = Data_entry["Image Index"].map(all_image_paths.get)
files_list = Data_entry["path"].tolist()
data["Patient Age"] = data["Patient Age"].map(lambda x: int(x))
data.sample(3)
data
data["Finding Labels"] = data["Finding Labels"].map(
lambda x: x.replace("No Finding", "")
)
from itertools import chain
all_labels = np.unique(
list(chain(*data["Finding Labels"].map(lambda x: x.split("|")).tolist()))
)
all_labels = [x for x in all_labels if len(x) > 0]
print("All Labels ({}): {}".format(len(all_labels), all_labels))
for c_label in all_labels:
if len(c_label) > 1: # leave out empty labels
data[c_label] = data["Finding Labels"].map(
lambda finding: 1.0 if c_label in finding else 0
)
data.sample(3)
# keep at least 1000 cases
MIN_CASES = 1000
all_labels = [c_label for c_label in all_labels if data[c_label].sum() > MIN_CASES]
print(
"Clean Labels ({})".format(len(all_labels)),
[(c_label, int(data[c_label].sum())) for c_label in all_labels],
)
# since the dataset is very unbiased, we can resample it to be a more reasonable collection
# weight is 0.04 + number of findings
sample_weights = (
data["Finding Labels"].map(lambda x: len(x.split("|")) if len(x) > 0 else 0).values
+ 4e-2
)
sample_weights /= sample_weights.sum()
data = data.sample(40000, weights=sample_weights)
label_counts = data["Finding Labels"].value_counts()[:15]
fig, ax1 = plt.subplots(1, 1, figsize=(12, 8))
ax1.bar(np.arange(len(label_counts)) + 0.5, label_counts)
ax1.set_xticks(np.arange(len(label_counts)) + 0.5)
_ = ax1.set_xticklabels(label_counts.index, rotation=90)
# creating vector of diseases
data["disease_vec"] = data.apply(lambda x: [x[all_labels].values], 1).map(
lambda x: x[0]
)
data
data.iloc[0]["disease_vec"]
from sklearn.model_selection import train_test_split
train_df, valid_df = train_test_split(
data,
test_size=0.25,
random_state=2018,
stratify=data["Finding Labels"].map(lambda x: x[:4]),
)
print("train", train_df.shape[0], "validation", valid_df.shape[0])
from keras.preprocessing.image import ImageDataGenerator
IMG_SIZE = (128, 128)
core_idg = ImageDataGenerator(
samplewise_center=True,
samplewise_std_normalization=True,
horizontal_flip=True,
vertical_flip=False,
height_shift_range=0.05,
width_shift_range=0.1,
rotation_range=5,
shear_range=0.1,
fill_mode="reflect",
zoom_range=0.15,
)
def flow_from_dataframe(img_data_gen, in_df, path_col, y_col, **dflow_args):
base_dir = os.path.dirname(in_df[path_col].values[0])
print("## Ignore next message from keras, values are replaced anyways")
df_gen = img_data_gen.flow_from_directory(
base_dir, class_mode="sparse", **dflow_args
)
df_gen.filenames = in_df[path_col].values
df_gen.classes = np.stack(in_df[y_col].values)
df_gen.samples = in_df.shape[0]
df_gen.n = in_df.shape[0]
df_gen._set_index_array()
df_gen.directory = "" # since we have the full path
df_gen.filepaths.extend(df_gen.filenames)
print("Reinserting dataframe: {} images".format(in_df.shape[0]))
return df_gen
train_gen = flow_from_dataframe(
core_idg,
train_df,
path_col="path",
y_col="disease_vec",
target_size=IMG_SIZE,
color_mode="grayscale",
batch_size=32,
)
valid_gen = flow_from_dataframe(
core_idg,
valid_df,
path_col="path",
y_col="disease_vec",
target_size=IMG_SIZE,
color_mode="grayscale",
batch_size=256,
) # we can use much larger batches for evaluation
# used a fixed dataset for evaluating the algorithm
test_X, test_Y = next(
flow_from_dataframe(
core_idg,
valid_df,
path_col="path",
y_col="disease_vec",
target_size=IMG_SIZE,
color_mode="grayscale",
batch_size=2048,
)
) # one big batch
# # Attempt #2 ends here
# # Attempt #3 starts here
all_xray_df = pd.read_csv("../input/data/Data_Entry_2017.csv")
all_image_paths = {
os.path.basename(x): x
for x in glob(os.path.join("..", "input", "images*", "*", "*.png"))
}
print("Scans found:", len(all_image_paths), ", Total Headers", all_xray_df.shape[0])
all_image_paths = {
os.path.basename(x): x
for x in glob(os.path.join("..", "input", "data", "images*", "images", "*.png"))
}
all_xray_df["path"] = all_xray_df["Image Index"].map(all_image_paths.get)
files_list = all_xray_df["path"].tolist()
# all_xray_df['Patient Age'] = all_xray_df['Patient Age'].map(lambda x: int(x[:-1]))
all_xray_df.sample(3)
label_counts = all_xray_df["Finding Labels"].value_counts()[:15]
fig, ax1 = plt.subplots(1, 1, figsize=(12, 8))
ax1.bar(np.arange(len(label_counts)) + 0.5, label_counts)
ax1.set_xticks(np.arange(len(label_counts)) + 0.5)
_ = ax1.set_xticklabels(label_counts.index, rotation=90)
all_xray_df["Finding Labels"] = all_xray_df["Finding Labels"].map(
lambda x: x.replace("No Finding", "")
)
from itertools import chain
all_labels = np.unique(
list(chain(*all_xray_df["Finding Labels"].map(lambda x: x.split("|")).tolist()))
)
all_labels = [x for x in all_labels if len(x) > 0]
print("All Labels ({}): {}".format(len(all_labels), all_labels))
for c_label in all_labels:
if len(c_label) > 1: # leave out empty labels
all_xray_df[c_label] = all_xray_df["Finding Labels"].map(
lambda finding: 1.0 if c_label in finding else 0
)
all_xray_df.sample(3)
# keep at least 1000 cases
MIN_CASES = 1000
all_labels = [
c_label for c_label in all_labels if all_xray_df[c_label].sum() > MIN_CASES
]
print(
"Clean Labels ({})".format(len(all_labels)),
[(c_label, int(all_xray_df[c_label].sum())) for c_label in all_labels],
)
# since the dataset is very unbiased, we can resample it to be a more reasonable collection
# weight is 0.1 + number of findings
sample_weights = (
all_xray_df["Finding Labels"]
.map(lambda x: len(x.split("|")) if len(x) > 0 else 0)
.values
+ 4e-2
)
sample_weights /= sample_weights.sum()
all_xray_df = all_xray_df.sample(40000, weights=sample_weights)
label_counts = all_xray_df["Finding Labels"].value_counts()[:15]
fig, ax1 = plt.subplots(1, 1, figsize=(12, 8))
ax1.bar(np.arange(len(label_counts)) + 0.5, label_counts)
ax1.set_xticks(np.arange(len(label_counts)) + 0.5)
_ = ax1.set_xticklabels(label_counts.index, rotation=90)
label_counts = 100 * np.mean(all_xray_df[all_labels].values, 0)
fig, ax1 = plt.subplots(1, 1, figsize=(12, 8))
ax1.bar(np.arange(len(label_counts)) + 0.5, label_counts)
ax1.set_xticks(np.arange(len(label_counts)) + 0.5)
ax1.set_xticklabels(all_labels, rotation=90)
ax1.set_title("Adjusted Frequency of Diseases in Patient Group")
_ = ax1.set_ylabel("Frequency (%)")
all_xray_df["disease_vec"] = all_xray_df.apply(lambda x: [x[all_labels].values], 1).map(
lambda x: x[0]
)
from sklearn.model_selection import train_test_split
train_df, valid_df = train_test_split(
all_xray_df,
test_size=0.25,
random_state=2018,
stratify=all_xray_df["Finding Labels"].map(lambda x: x[:4]),
)
print("train", train_df.shape[0], "validation", valid_df.shape[0])
from keras.preprocessing.image import ImageDataGenerator
IMG_SIZE = (128, 128)
core_idg = ImageDataGenerator(
samplewise_center=True,
samplewise_std_normalization=True,
horizontal_flip=True,
vertical_flip=False,
height_shift_range=0.05,
width_shift_range=0.1,
rotation_range=5,
shear_range=0.1,
fill_mode="reflect",
zoom_range=0.15,
)
def flow_from_dataframe(img_data_gen, in_df, path_col, y_col, **dflow_args):
base_dir = os.path.dirname(in_df[path_col].values[0])
print("## Ignore next message from keras, values are replaced anyways")
df_gen = img_data_gen.flow_from_directory(
base_dir, class_mode="sparse", **dflow_args
)
df_gen.filenames = in_df[path_col].values
df_gen.classes = np.stack(in_df[y_col].values)
df_gen.samples = in_df.shape[0]
df_gen.n = in_df.shape[0]
df_gen._set_index_array()
df_gen.directory = "" # since we have the full path
df_gen.filepaths.extend(df_gen.filenames)
print("Reinserting dataframe: {} images".format(in_df.shape[0]))
return df_gen
train_gen = flow_from_dataframe(
core_idg,
train_df,
path_col="path",
y_col="disease_vec",
target_size=IMG_SIZE,
color_mode="grayscale",
batch_size=32,
)
valid_gen = flow_from_dataframe(
core_idg,
valid_df,
path_col="path",
y_col="disease_vec",
target_size=IMG_SIZE,
color_mode="grayscale",
batch_size=256,
) # we can use much larger batches for evaluation
# used a fixed dataset for evaluating the algorithm
test_X, test_Y = next(
flow_from_dataframe(
core_idg,
valid_df,
path_col="path",
y_col="disease_vec",
target_size=IMG_SIZE,
color_mode="grayscale",
batch_size=1024,
)
) # one big batch
## Ignore next message from keras, values are replaced anyways
t_x, t_y = next(train_gen)
fig, m_axs = plt.subplots(4, 4, figsize=(16, 16))
for c_x, c_y, c_ax in zip(t_x, t_y, m_axs.flatten()):
c_ax.imshow(c_x[:, :, 0], cmap="bone", vmin=-1.5, vmax=1.5)
c_ax.set_title(
", ".join(
[n_class for n_class, n_score in zip(all_labels, c_y) if n_score > 0.5]
)
)
c_ax.axis("off")
# # Attempt #3 ends here
# # Attempt #4 starts here
IMAGE_SIZE = [128, 128]
EPOCHS = 20
# BATCH_SIZE = 8 * strategy.num_replicas_in_sync
BATCH_SIZE = 64
train_df_main = pd.read_csv("../input/chestxray8-dataframe/train_df.csv")
# valid_df = pd.read_csv("nih/valid-small.csv")
# test_df = pd.read_csv("nih/test.csv")
train_df_main.drop(["No Finding"], axis=1, inplace=True)
labels = train_df_main.columns[2:-1]
labels
train_df_main
from sklearn.model_selection import train_test_split
train_df, discard = train_test_split(train_df_main, test_size=0.7, random_state=1993)
train_and_valid_set, test_set = train_test_split(
train_df, test_size=0.2, random_state=1993
)
train_set, valid_set = train_test_split(
train_and_valid_set, test_size=0.2, random_state=1993
)
def check_for_leakage(df1, df2, patient_col):
"""
Return True if there any patients are in both df1 and df2.
Args:
df1 (dataframe): dataframe describing first dataset
df2 (dataframe): dataframe describing second dataset
patient_col (str): string name of column with patient IDs
Returns:
leakage (bool): True if there is leakage, otherwise False
"""
df1_patients_unique = set(df1[patient_col].values)
df2_patients_unique = set(df2[patient_col].values)
patients_in_both_groups = df1_patients_unique.intersection(df2_patients_unique)
# leakage contains true if there is patient overlap, otherwise false.
leakage = len(patients_in_both_groups) > 0
return leakage
def get_train_generator(
df,
image_dir,
x_col,
y_cols,
shuffle=True,
batch_size=8,
seed=1,
target_w=320,
target_h=320,
):
"""
Return generator for training set, normalizing using batch
statistics.
Args:
train_df (dataframe): dataframe specifying training data.
image_dir (str): directory where image files are held.
x_col (str): name of column in df that holds filenames.
y_cols (list): list of strings that hold y labels for images.
batch_size (int): images per batch to be fed into model during training.
seed (int): random seed.
target_w (int): final width of input images.
target_h (int): final height of input images.
Returns:
train_generator (DataFrameIterator): iterator over training set
"""
print("getting train generator...")
# normalize images
image_generator = ImageDataGenerator(
samplewise_center=True,
samplewise_std_normalization=True,
shear_range=0.1,
zoom_range=0.15,
rotation_range=5,
width_shift_range=0.1,
height_shift_range=0.05,
horizontal_flip=True,
vertical_flip=False,
fill_mode="reflect",
)
# flow from directory with specified batch size
# and target image size
generator = image_generator.flow_from_dataframe(
dataframe=df,
directory=None,
x_col=x_col,
y_col=y_cols,
class_mode="raw",
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
target_size=(target_w, target_h),
)
return generator
def get_test_and_valid_generator(
valid_df,
test_df,
train_df,
image_dir,
x_col,
y_cols,
sample_size=100,
batch_size=8,
seed=1,
target_w=320,
target_h=320,
):
"""
Return generator for validation set and test test set using
normalization statistics from training set.
Args:
valid_df (dataframe): dataframe specifying validation data.
test_df (dataframe): dataframe specifying test data.
train_df (dataframe): dataframe specifying training data.
image_dir (str): directory where image files are held.
x_col (str): name of column in df that holds filenames.
y_cols (list): list of strings that hold y labels for images.
sample_size (int): size of sample to use for normalization statistics.
batch_size (int): images per batch to be fed into model during training.
seed (int): random seed.
target_w (int): final width of input images.
target_h (int): final height of input images.
Returns:
test_generator (DataFrameIterator) and valid_generator: iterators over test set and validation set respectively
"""
print("getting train and valid generators...")
# get generator to sample dataset
raw_train_generator = ImageDataGenerator().flow_from_dataframe(
dataframe=train_df,
directory=image_dir,
x_col="FilePath",
y_col=labels,
class_mode="raw",
batch_size=sample_size,
shuffle=True,
target_size=(target_w, target_h),
)
# get data sample
batch = raw_train_generator.next()
data_sample = batch[0]
# use sample to fit mean and std for test set generator
image_generator = ImageDataGenerator(
featurewise_center=True, featurewise_std_normalization=True
)
# fit generator to sample from training data
image_generator.fit(data_sample)
# get test generator
valid_generator = image_generator.flow_from_dataframe(
dataframe=valid_df,
directory=image_dir,
x_col=x_col,
y_col=y_cols,
class_mode="raw",
batch_size=batch_size,
shuffle=False,
seed=seed,
target_size=(target_w, target_h),
)
test_generator = image_generator.flow_from_dataframe(
dataframe=test_df,
directory=image_dir,
x_col=x_col,
y_col=y_cols,
class_mode="raw",
batch_size=batch_size,
shuffle=False,
seed=seed,
target_size=(target_w, target_h),
)
return valid_generator, test_generator
train_generator = get_train_generator(
df=train_set,
image_dir=None,
x_col="FilePath",
y_cols=labels,
batch_size=BATCH_SIZE,
target_w=IMAGE_SIZE[0],
target_h=IMAGE_SIZE[1],
)
valid_generator, test_generator = get_test_and_valid_generator(
valid_df=valid_set,
test_df=test_set,
train_df=train_set,
image_dir=None,
x_col="FilePath",
y_cols=labels,
batch_size=BATCH_SIZE,
target_w=IMAGE_SIZE[0],
target_h=IMAGE_SIZE[1],
)
def get_label(y):
"""
Returns the appended label list of the given set.
y(list) the one hot vector list containing the label encoding.
"""
ret_labels = []
i = 0
for idx in y:
if idx:
ret_labels.append(labels[i])
i += 1
if not ret_labels:
return "No Label"
else:
return "|".join(ret_labels)
# get one batch of images from the imageset
x, y = train_generator.__getitem__(0)
from tqdm import tqdm
# show a set of images along with the labels appended at the top as title.
fig = plt.figure(figsize=(20, 10))
columns = 4
rows = 2
for i in tqdm(range(1, columns * rows + 1)):
fig.add_subplot(rows, columns, i)
plt.imshow(x[i - 1], cmap="gray")
plt.title(get_label(y[i - 1]))
plt.axis(False)
fig.add_subplot
from keras.applications.mobilenet import MobileNet
from keras.layers import (
GlobalAveragePooling2D,
Dense,
Dropout,
Flatten,
Conv2D,
MaxPool2D,
)
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.models import Sequential
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
model = Sequential()
model.add(
Conv2D(
16,
kernel_size=(3, 3),
input_shape=(128, 128, 1),
activation="relu",
padding="same",
)
)
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(32, kernel_size=(3, 3), activation="relu", padding="same"))
model.add(MaxPool2D(pool_size=(2, 2), padding="same"))
model.add(Conv2D(64, kernel_size=(3, 3), activation="relu", padding="same"))
model.add(MaxPool2D(pool_size=(2, 2), padding="same"))
model.add(Conv2D(128, kernel_size=(3, 3), activation="relu", padding="same"))
model.add(MaxPool2D(pool_size=(2, 2), padding="same"))
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(Dense(32, activation="relu"))
model.add(Dense(len(all_labels), activation="softmax"))
# optimizer = tf.keras.optimizers.Adam(lr = 0.001)
# model.compile(loss = 'sparse_categorical_crossentropy',optimizer = optimizer,metrics = ['accuracy'])
model.compile(
loss="binary_crossentropy", optimizer="Adam", metrics=["mae", "binary_accuracy"]
)
model.summary()
from keras.callbacks import (
ModelCheckpoint,
LearningRateScheduler,
EarlyStopping,
ReduceLROnPlateau,
)
weight_path = "{}_weights.best.hdf5".format("xray_class")
checkpoint = ModelCheckpoint(
weight_path,
monitor="val_loss",
verbose=1,
save_best_only=True,
mode="min",
save_weights_only=True,
)
early = EarlyStopping(monitor="val_loss", mode="min", patience=3)
callbacks_list = [checkpoint, early]
model.fit_generator(
train_generator,
steps_per_epoch=20,
validation_data=valid_generator,
epochs=5,
callbacks=callbacks_list,
)
# # Attempt #4 ends here
# # Attempt #5 starts here
df = pd.read_csv("../input/data/Data_Entry_2017.csv") # Leer datos como CSV
diseases = [
"Cardiomegaly",
"Emphysema",
"Effusion",
"Hernia",
"Nodule",
"Pneumothorax",
"Atelectasis",
"Pleural_Thickening",
"Mass",
"Edema",
"Consolidation",
"Infiltration",
"Fibrosis",
"Pneumonia",
]
# Number diseases
for disease in diseases:
df[disease] = df["Finding Labels"].apply(
lambda x: 1 if disease in x else 0
) # Etiqueta si es patología o no
df
labels = df[diseases].to_numpy()
all_image_paths = {
os.path.basename(x): x
for x in glob(os.path.join("..", "input", "data", "images*", "images", "*.png"))
} # Ubica la ruta de las imágenes en un diccionario
print("Images found:", len(all_image_paths))
df["Path"] = df["Image Index"].map(
all_image_paths.get
) # Reemplaza la columna 'Image Index' por las rutas en el diccionario anterior
files_list = df["Path"].tolist() # Convierte la columna anterior en una lista
labelB = (
df[diseases].sum(axis=1) > 0
).tolist() # Convierte en lista la columna que contiene la información de patología o no
labelB = np.array(labelB, dtype=int)
del df # Elimina el DataFrame creado anteriormente
df = pd.DataFrame(
{"path": files_list, "labels": labelB}
) # Crea un nuevo DataFrame con las listes definidas
df["labels"] = df["labels"].astype(
str
) # Define el nombre de las columnas como Strings o 'letras'
df.head(100) # Primeras 100 muestras del DataFrame
from sklearn.model_selection import (
train_test_split,
) # Función para separar el conjunto de entrenamiento y de test.
df, _ = train_test_split(
df, test_size=0.9, stratify=df["labels"]
) # Se utiliza solo el 10% de los datos
df_train, df_test = train_test_split(
df, test_size=0.3, stratify=df["labels"], random_state=1993
) # Separa 70% para train y 30% para test
df_train, df_val = train_test_split(
df_train, test_size=0.3, stratify=df_train["labels"], random_state=1993
) # Del 70% anterior saca 30% para validación
print("Conjunto de entrenamiento:", df_train.shape)
print("Conjunto de validación en entrenamiento:", df_val.shape)
print("Conjunto de de prueba:", df_test.shape)
from tensorflow.keras.preprocessing.image import (
ImageDataGenerator,
) # Generador de datos para el modelo
from tensorflow.keras.preprocessing.image import (
array_to_img,
img_to_array,
load_img,
) # Procesos para las imágenes
from ipywidgets import interact
datagen = ImageDataGenerator(
fill_mode="nearest", # Los pixeles fuera de los bordes toman el valor más cercano
rotation_range=20, # Rotación de 20 grados
rescale=1.0 / 255,
) # Reescala la intensidad de las imagenes a valores entre [0,1]
nimag = df.shape[0] # Número de muestras
@interact(nray_num=(0, nimag - 1, 1))
def plot_Data_Augmentation(nray_num):
img_path = df["path"].iloc[nray_num] # Define una muestra
img = load_img(img_path, target_size=(150, 150)) # Carga la imagen
x = img_to_array(img) # Convierte la imagen en numpy array con tamaño (150, 150, 3)
x = x.reshape((1,) + x.shape) # Numpy array con tamaño (1, 150, 150, 3)
# Se generarán imagenes aleatorias indefinidas por lo que se utiliza 'break' para detener la iteración
i = 0
plt.figure(figsize=(20, 5))
for batch in datagen.flow(x, batch_size=1):
plt.subplot(1, 5, i + 1)
imgplot = plt.imshow(array_to_img(batch[0]))
plt.axis("off")
i += 1
if i % 5 == 0:
break
plt.show()
train_datagen = ImageDataGenerator(rescale=1.0 / 255) # ,
# rotation_range=40,
# width_shift_range=0.2,
# height_shift_range=0.2, ## Diferentes aumentos de datos
# shear_range=0.2,
# zoom_range=0.2,
# horizontal_flip=True)
# El generador de prueba (test) no necesita un Data Augmentation
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
img_size = 128 # Tamaño deseado de las imágenes
batch_size = 128 # Cantidad de datos que se le entrega al modelo en cada iteración
train_generator = train_datagen.flow_from_dataframe(
df_train,
x_col="path",
y_col="labels",
target_size=(
img_size,
img_size,
), # Se define el generador con los argumentos necesarios
batch_size=batch_size,
color_mode="grayscale",
class_mode="binary",
)
validation_generator = test_datagen.flow_from_dataframe(
df_val,
x_col="path",
y_col="labels",
target_size=(img_size, img_size),
batch_size=batch_size,
color_mode="grayscale",
class_mode="binary",
)
from tensorflow.keras import layers # Define las capas de la aquitectura
from tensorflow.keras import Model # Define la arquitectura como un modelo
from tensorflow.keras.optimizers import RMSprop, Adam, SGD # Diferentes optimizadores
# Se define la entrada una imagen (128x128x1) en escala grises
img_input = layers.Input(shape=(img_size, img_size, 1))
# First convolution extracts 16 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Conv2D(16, 3, activation="relu")(img_input)
x = layers.MaxPooling2D(2)(x)
# Second convolution extracts 32 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.MaxPooling2D(2)(x)
# Third convolution extracts 64 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Convolution2D(64, 3, activation="relu")(x)
x = layers.MaxPooling2D(2)(x)
# Flatten feature map to a 1-dim tensor
x = layers.Flatten()(x)
# Create a fully connected layer with ReLU activation and 512 hidden units
x = layers.Dense(512, activation="relu")(x)
# Add a dropout rate of 0.5
x = layers.Dropout(0.5)(x)
# Create output layer with a single node and sigmoid activation
output = layers.Dense(1, activation="sigmoid")(x)
# Configure and compile the model
model = Model(img_input, output)
model.compile(
loss="binary_crossentropy", optimizer=Adam(learning_rate=0.001), metrics=["acc"]
)
model.summary()
from tensorflow.keras.utils import (
plot_model,
) # Herramienta para visualizar gráficamente el modelo
plot_model(model, to_file="model.png", show_shapes=True, show_layer_names=True)
history = model.fit(
train_generator,
epochs=10,
batch_size=batch_size,
validation_data=validation_generator,
verbose=2,
) # Información adicional
history.history
# Accuracy para entrenamiento y validación
acc = history.history["acc"]
val_acc = history.history["val_acc"]
# Loss para entrenamiento y validación
loss = history.history["loss"]
val_loss = history.history["val_loss"]
loss
# # Número de epocas
# epochs = range(len(acc))
# # Gráfica de accuracy respecto a las epocas
# plt.plot(epochs, acc)
# plt.plot(epochs, val_acc)
# plt.title('Training and validation accuracy')
# plt.figure()
# # Gráfica de loss respecto a las epocas
# plt.plot(epochs, loss)
# plt.plot(epochs, val_loss)
# plt.title('Training and validation loss')
# # Attempt #5 ends here
model = Sequential()
model.add(Dense(units=512, activation="sigmoid", input_shape=(img_height, img_width)))
model.add(Dense(units=512, activation="sigmoid"))
model.add(Dense(units=num_classes, activation="softmax"))
model.build()
model.summary()
model.compile(optimizer="sgd", loss="categorical_crossentropy", metrics=["accuracy"])
history = model.fit(train_ds, val_ds, batch_size=64, epochs=5, verbose=True)
loss, accuracy = model.evaluate(val_ds, y_test, verbose=False)
# Again, do some formatting
# Except we do not flatten each image into a 784-length vector because we want to perform convolutions first
X_train = X_train.reshape(
6512, 1, 1, 1
) # add an additional dimension to represent the single-channel
X_test = X_test.reshape(2171, 1, 1, 1)
X_train = X_train.astype("str") # change integers to 32-bit floating point numbers
X_test = X_test.astype("str")
model = Sequential() # Linear stacking of layers
# Convolution Layer 1
model.add(
Conv2D(32, (3, 3), input_shape=(2048, 2500, 1))
) # 32 different 3x3 kernels -- so 32 feature maps
model.add(BatchNormalization(axis=-1)) # normalize each feature map before activation
convLayer01 = Activation("relu") # activation
model.add(convLayer01)
# Convolution Layer 2
model.add(Conv2D(32, (3, 3))) # 32 different 3x3 kernels -- so 32 feature maps
model.add(BatchNormalization(axis=-1)) # normalize each feature map before activation
model.add(Activation("relu")) # activation
convLayer02 = MaxPooling2D(pool_size=(2, 2)) # Pool the max values over a 2x2 kernel
model.add(convLayer02)
# Convolution Layer 3
model.add(Conv2D(64, (3, 3))) # 64 different 3x3 kernels -- so 64 feature maps
model.add(BatchNormalization(axis=-1)) # normalize each feature map before activation
convLayer03 = Activation("relu") # activation
model.add(convLayer03)
# Convolution Layer 4
model.add(Conv2D(64, (3, 3))) # 64 different 3x3 kernels -- so 64 feature maps
model.add(BatchNormalization(axis=-1)) # normalize each feature map before activation
model.add(Activation("relu")) # activation
convLayer04 = MaxPooling2D(pool_size=(2, 2)) # Pool the max values over a 2x2 kernel
model.add(convLayer04)
model.add(Flatten()) # Flatten final 4x4x64 output matrix into a 1024-length vector
# Fully Connected Layer 5
model.add(Dense(512)) # 512 FCN nodes
model.add(BatchNormalization()) # normalization
model.add(Activation("relu")) # activation
# Fully Connected Layer 6
model.add(Dropout(0.2)) # 20% dropout of randomly selected nodes
model.add(Dense(10)) # final 10 FCN nodes
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(train_ds, validation_data=val_ds, epochs=3)
train_generator = gen.flow(X_train, y_train, batch_size=128)
test_generator = test_gen.flow(X_test, y_test, batch_size=128)
model.fit(
train_generator,
steps_per_epoch=60000 // 128,
epochs=5,
verbose=1,
validation_data=test_generator,
validation_steps=10000 // 128,
)
|
# # Import Library
# Data load
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
# Preprocessing
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.preprocessing import MinMaxScaler, OrdinalEncoder
# Modeling
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error
# # Loading Data
# ### Problem Statement
# House price prediction has been an important area of research and practical application in real estate and finance for many years. Understanding the factors that contribute to a property's value and being able to accurately predict its price is crucial for real estate agents, buyers, and sellers to make informed decisions.
# In recent years, the development of machine learning algorithms and big data technology has revolutionized the way we analyze and predict home prices. These technologies allow us to process and analyze large amounts of data, including information on property characteristics, local market trends, economic indicators, and demographic factors, to produce more accurate and reliable price predictions.
# In this project, I aim to develop a machine learning model that can accurately predict the price of residential properties based on various features such as location, size, number of rooms, amenities, and other relevant factors. The goal is to create a model that can estimate the value of a house based on its characteristics.
pd.set_option("display.max_columns", None)
# Loading Data
df_train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
df_train.head(20)
df_train.tail(20)
df_train.info()
df_train.describe()
pd.set_option("display.max_rows", None)
df_train.isna().sum()
# Too much missing value in categorical column such as `Alley, FireplaceQu, PoolQC, Fence, MiscFeature` I think this column is not useful so we can drop this 3 column, and take a look at the another column who had missing value.
# # Exploratory Data Analysis
df_train.head()
plt.figure(figsize=(30, 20))
sns.heatmap(df_train.corr(), annot=True, cmap="coolwarm")
plt.show()
sns.displot(
data=df_train, x=df_train["SalePrice"], bins=50, kde=True, height=5, aspect=2
)
plt.title("Sale Price")
plt.show()
print("-" * 250)
fig = plt.figure(figsize=(11, 5))
res = stats.probplot(df_train["SalePrice"], plot=plt)
plt.show()
# The target variable is abnormal or right skewed. So, we need to transform this variable and make it more normally distributed.
df_hist = df_train.select_dtypes(include=np.number)
df_hist.hist(figsize=(30, 20), bins=50, xlabelsize=8, ylabelsize=8)
# From the histogram plot above, almost all columns have an abnormal data distribution, although there are several columns with normal data distribution.
# Scatter from top 3 correlaltion
plt.subplots(figsize=(20, 8))
plt.subplot(1, 3, 1)
sns.scatterplot(
data=df_train, x=df_train["SalePrice"], y=df_train["OverallQual"], color="red"
)
plt.subplot(1, 3, 2)
sns.scatterplot(
data=df_train, x=df_train["SalePrice"], y=df_train["GrLivArea"], color="blue"
)
plt.subplot(1, 3, 3)
sns.scatterplot(
data=df_train, x=df_train["SalePrice"], y=df_train["GarageCars"], color="green"
)
plt.show()
# OverallQual: Rates the overall material and finish of the house 1-10
# GrLivArea: Above grade (ground) living area square feet
# GarageCars: Size of garage in car capacity
# From all the feature like `OverallQual, GrLivArea, Garage Cars` We can conclude that the higher the value of the feature, the more it affects the selling price.
# However, the highest value garagecars do not have much influence because according to the local community garage cars with level 4 are too big than what is needed.
# # Data Preprocessing
df_test = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/test.csv"
)
df_test.head()
# ## Handle Missing value
df_train_cleaned = df_train.drop(
columns=["Id", "Alley", "FireplaceQu", "PoolQC", "Fence", "MiscFeature"]
)
df_test_cleaned = df_test.drop(
columns=["Id", "Alley", "FireplaceQu", "PoolQC", "Fence", "MiscFeature"]
)
cols_with_missing = df_train_cleaned.columns[df_train_cleaned.isnull().any()]
col_test_with_missing = df_test_cleaned.columns[df_test_cleaned.isnull().any()]
print(f"Columns that contain missing values: {cols_with_missing}")
def imputation(dataframe, cols):
mode_val = dataframe[cols].mode()
if mode_val.empty:
# Handle empty mode value DataFrame
return dataframe
else:
# fill missing value with mode
mode_val = mode_val.iloc[0]
dataframe[cols] = dataframe[cols].fillna(mode_val)
return dataframe
df_train_cleaned = imputation(df_train_cleaned, cols_with_missing)
df_test_cleaned = imputation(df_test_cleaned, col_test_with_missing)
df_test_cleaned.isna().sum()
# ## Handling Outlier
# Outliers are the extreme value or significantly different from other values in the dataset. In regression analysis, outliers can lead to inaccurate and unreliable prediction results. Therefore, handling outliers is necessary in regression analysis.
# Outliers can affect regression results in different ways, depending on their location in the data. If outliers are located in low or high x regions, then they can cause a significant influence on the calculation of regression coefficients, and lead to inaccurate results. Outliers can also affect the overall prediction of the model, reducing the quality and reliability of the regression model.
# The IQR (Interquartile Range) method is used for handling outliers in data that has an abnormal distribution. This method uses the distance between the first quartile (Q1) and the third quartile (Q3) as a measure of the distance between the outlier data and the median value in the dataset.
# The IQR method is performed by calculating the distance between the first quartile and the third quartile, then multiplying it by a multiplier value k (usually k=1.5) to determine the lower bound and upper bound to identify outlier data.
# make function IQR
def limit(data, variable):
IQR = df_train_cleaned[variable].quantile(0.75) - df_train_cleaned[
variable
].quantile(0.25)
lower_limit = df_train_cleaned[variable].quantile(0.25) - (IQR * 1.5)
upper_limit = df_train_cleaned[variable].quantile(0.75) + (IQR * 1.5)
return lower_limit, upper_limit
# menentukan lower limit dan upper limit dari kolom limit balance
lower_sale, upper_sale = limit(df_train_cleaned, "SalePrice")
# menghapus outlier
df_no_outliers = df_train_cleaned[
(df_train_cleaned.SalePrice > lower_sale)
& (df_train_cleaned.SalePrice < upper_sale)
]
print(f"Jumlah row dan kolom : {df_no_outliers.shape}")
print(f"Jumlah outlier pada kolom age : {len(df_train_cleaned)-len(df_no_outliers)}")
# Probability plot after handling outlier
stats.probplot(df_no_outliers["SalePrice"], plot=plt)
# After we handled an outlier using IQR the target variable is getting close to the normal distribution
# ## Feature Engineering
# Split column between numerical and categorical for feature engineering
num_columns = df_train_cleaned.select_dtypes(include=np.number).columns.tolist()
cat_columns = df_train_cleaned.select_dtypes(include=["object"]).columns.tolist()
num_columns
# Data by Dtypes
data_numeric = df_no_outliers[num_columns]
data_categoric = df_no_outliers[cat_columns]
data_numeric.head()
# ### Find out correlation by numeric values
# The Spearman and Pearson methods are two commonly used methods in correlation analysis. Both methods have their own advantages and disadvantages, and the choice of method depends on the type of data and the purpose of the analysis to be achieved.
# According to `Kendall and Stuart in their book "The Advanced Theory of Statistics" (1958)`, the Spearman method is more suitable for data that does not have a normal distribution, while the Pearson method is more suitable for data that has a normal distribution. However, both methods can produce the same results if the data has a normal distribution.
# So in this case I used the Spearman method because most of feature were not normally distributed.
plt.figure(figsize=(30, 16))
sns.heatmap(data_numeric.corr(method="spearman"), annot=True, cmap="coolwarm")
plt.show()
# In this case, the columns that have a high correlation include:
# `LotFrontage, LotArea, OverallQual, YearBuilt, YearRemodAdd, MasVnrArea, BsmtFinSF1, TotalBsmtSF, 1stFlrSF, 2ndFlrSF, GrLivArea, FullBath, HalfBath, BedroomAbvGr, TotRmsAbvGrd, FirePlaces, GarageYrBlt, GarageCars, WoodDeckSF, OpenPorchSF, EnclosedPorch.`
# ### Find out correlation by categorical values using ANOVA
# ANOVA (Analysis of Variance) is a statistical analysis method used to compare the means of several independent groups of data. This method was first introduced by a statistician named Ronald A. Fisher in 1925.
# According to Fisher, ANOVA aims to test the null hypothesis that there is no significant difference between the means of the groups being compared. Fisher also developed the F-ratio method used in ANOVA to calculate the variance between groups (between variance) and the variance within groups (within variance), and compare the two variances to determine whether the difference between groups is statistically significant or not.
# In this case I used ANOVA to determine which features can be used for training during modeling.
# P_value
# P_value > 0.05 : Delete feature
data_categoric.head(10)
# Encoding for ANOVA
oe = OrdinalEncoder()
cat = oe.fit_transform(data_categoric)
cat
target = df_no_outliers["SalePrice"]
anova = SelectKBest(score_func=f_regression, k=30)
anova.fit_transform(cat, target)
anova_score = pd.DataFrame(
{"Anova_score": anova.scores_, "P_value_anova": anova.pvalues_},
index=data_categoric.columns,
)
anova_score.sort_values(by=["P_value_anova"], ascending=False)
# From the ANOVA test, the columns with the results of the P_value
# `['MSZoning', 'LotShape', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual',
# 'BsmtExposure', 'BsmtFinType1', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'KitchenQual', 'Functional', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'SaleCondition']`
# ### Choosing important Feature after Correlation and ANOVA Testing
df_no_outliers.head()
num_feature = [
"LotFrontage",
"LotArea",
"OverallQual",
"YearBuilt",
"YearRemodAdd",
"MasVnrArea",
"BsmtFinSF1",
"TotalBsmtSF",
"1stFlrSF",
"2ndFlrSF",
"GrLivArea",
"FullBath",
"HalfBath",
"BedroomAbvGr",
"TotRmsAbvGrd",
"Fireplaces",
"GarageYrBlt",
"GarageCars",
"WoodDeckSF",
"OpenPorchSF",
"EnclosedPorch",
]
cat_feature = [
"MSZoning",
"LotShape",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtExposure",
"BsmtFinType1",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"KitchenQual",
"Functional",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"PavedDrive",
"SaleCondition",
]
# ## Scalling & Encoding
# Scaling is the process of changing the range of values of features or variables in a dataset into a smaller or larger range of values. The goal is to ensure that each variable has an equal contribution to the formation of the machine learning model, without affecting the significance of other variables.
# Encoding is the process of converting the value or category of a variable into a form that can be processed by machine learning algorithms. In the context of classification, encoded variables are often categorical or nominal variables, which cannot be processed directly by machine learning algorithms.
# - Jason Brownlee. (2020). Machine Learning Mastery. Data Preparation for Machine Learning: Encoding.
# - Raschka, S., & Mirjalili, V. (2017). Python Machine Learning (2nd Ed.). Packt Publishing.
#
minmax = MinMaxScaler()
train_scaled = minmax.fit_transform(df_no_outliers[num_feature])
test_scaled = minmax.transform(df_test_cleaned[num_feature])
test_scaled
oe = OrdinalEncoder()
train_encoded = oe.fit_transform(df_no_outliers[cat_feature])
test_encoded = oe.transform(df_test_cleaned[cat_feature])
test_encoded
# #### concat after scalling and enocoding
X_train_final = np.concatenate((train_scaled, train_encoded), axis=1)
X_test_final = np.concatenate((test_scaled, test_encoded), axis=1)
y_train = df_no_outliers["SalePrice"]
y_train.reset_index(drop=True, inplace=True)
# # Model Definition
# Define the Model
rf = RandomForestRegressor()
gb = GradientBoostingRegressor()
rf.fit(X_train_final, y_train)
gb.fit(X_train_final, y_train)
rf_pred = rf.predict(X_train_final)
gb_pred = gb.predict(X_train_final)
# ## Comparison Actual and Prediction Data
a = pd.DataFrame(rf_pred, columns=["prediction"])
comparison_rf = pd.concat([y_train, a], axis=1)
comparison_rf.head(10)
b = pd.DataFrame(gb_pred, columns=["prediction"])
comparison_gb = pd.concat([y_train, b], axis=1)
comparison_gb.head(10)
# # Model Evaluation
# Evaluation Random Forest
mae = mean_absolute_error(y_train, rf_pred)
r2 = r2_score(y_train, rf_pred)
mse = mean_squared_error(y_train, rf_pred)
# Evaluation Gradient Boosting
mae_gb = mean_absolute_error(y_train, gb_pred)
r2_gb = r2_score(y_train, gb_pred)
mse_gb = mean_squared_error(y_train, gb_pred)
data = {
"Model": ["Random Forest", "Gradient Boosting"],
"Mean Absolute Error (MAE)": [mae, mae_gb],
"R-Squared (R2)": [r2, r2_gb],
"Mean Squared Error (MSE)": [mse, mse_gb],
}
# Buat dataframe dari dictionary
df = pd.DataFrame(data)
df
# # Submission
df_test_cleaned.head()
ids = df_test.pop("Id")
result = rf.predict(X_test_final)
df = pd.DataFrame({"Id": ids, "SalePrice": result.squeeze()})
df.head(10)
# and write to output
df.to_csv("final_submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
# import warnings
import warnings
# filter warnings
warnings.filterwarnings("ignore")
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
data1 = pd.read_csv(r"../input/digit-recognizer/train.csv", dtype=np.float32)
data1.shape
data1.info()
data1.head(10)
data1.columns.values
data1.label.values
targets_numpy = data1.label.values # etiketlerimiz 0 9
features_numpy = data1.loc[:, data1.columns != "label"].values / 255 # pixellerimiz
plt.imshow(features_numpy[10].reshape(28, 28))
plt.axis("off")
plt.title(str(targets_numpy[10]))
plt.savefig("graph.png")
plt.show()
features_numpy.shape[1]
t = targets_numpy.reshape(42000, 1)
plt.imshow(features_numpy[21].reshape(28, 28))
t[21]
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from keras.models import Sequential # initialize neural network library
from keras.layers import Dense # build our layers library
def build_classifier():
classifier = Sequential() # initialize neural network
classifier.add(
Dense(
units=16,
kernel_initializer="uniform",
activation="relu",
input_dim=features_numpy.shape[1],
)
)
classifier.add(Dense(units=8, kernel_initializer="uniform", activation="relu"))
classifier.add(Dense(units=4, kernel_initializer="uniform", activation="relu"))
classifier.add(Dense(units=2, kernel_initializer="uniform", activation="relu"))
classifier.add(Dense(units=1, kernel_initializer="uniform", activation="sigmoid"))
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
) # bınary yapmaya calısıldı
return classifier
classifier = KerasClassifier(build_fn=build_classifier, epochs=120)
accuracies = cross_val_score(estimator=classifier, X=features_numpy, y=t, cv=3)
mean = accuracies.mean()
variance = accuracies.std()
print("Accuracy mean: " + str(mean))
print("Accuracy variance: " + str(variance))
|
import numpy as np
import matplotlib.pyplot as plt
class Helper_functions:
def Acceleration(position, mass, G):
"""
Calculate the acceleration on each particle due to Newton's Law
pos is an N x 3 matrix of positions
mass is an N x 1 vector of masses
G is Newton's Gravitational constant
softening is the softening length
a is N x 3 matrix of accelerations
"""
# positions r = [x,y,z] for all particles
x = position[:, 0:1]
y = position[:, 1:2]
z = position[:, 2:3]
# dx, dy, dz store all pairwise particle separations
dx = x.T - x
dy = y.T - y
dz = z.T - z
# inv_r3 stores 1/r^3 for all particle pairwise particle separations
inv_r3 = dx**2 + dy**2 + dz**2
inv_r3[inv_r3 > 0] = inv_r3[inv_r3 > 0] ** (-1.5)
ax = G * (dx * inv_r3) @ mass
ay = G * (dy * inv_r3) @ mass
az = G * (dz * inv_r3) @ mass
# All the acceleration components
acceleration = np.hstack((ax, ay, az))
return acceleration
class N_body_simulator:
def Simulate(N, t, t_end, dt, position, velocity, mass, G=1, plot=True):
# Converting to COM frame
velocity -= np.mean(mass * velocity, 0) / np.mean(mass)
# calculate initial gravitational accelerations
acceleration = Helper_functions.Acceleration(position, mass, G)
# number of timesteps
Nt = int(np.ceil(t_end / dt))
# save particle orbits for plotting trails
position_save = np.zeros((N, 3, Nt + 1))
position_save[:, :, 0] = pos
t_all = np.arange(Nt + 1) * dt
# Running the simulation loop
for i in range(Nt):
velocity += acceleration * dt / 2.0
# update position
position += velocity * dt
# update accelerations
acceleration = Helper_functions.Acceleration(position, mass, G)
# update velocities
velocity += acceleration * dt / 2.0
# update time
t += dt
# save positions
position_save[:, :, i + 1] = position
# Plotting the particle trajectories
if plot or (i == Nt - 1):
xx = position_save[:, 0, max(i - 50, 0) : i + 1]
yy = position_save[:, 1, max(i - 50, 0) : i + 1]
plt.scatter(xx, yy, s=1, color=[0.7, 0.7, 1])
plt.scatter(position[:, 0], position[:, 1], s=10, color="black")
plt.xticks([])
plt.yticks([])
plt.pause(0.001)
N = 10
t = 0
tEnd = 5
dt = 0.01
mass = 20 * np.ones((N, 1)) / N
pos = np.random.randn(N, 3)
vel = np.random.randn(N, 3)
N_body_simulator.Simulate(N, t, tEnd, dt, pos, vel, mass)
|
import tensorflow as tf
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import cv2
import random
import os
from tensorflow.keras import layers
from tensorflow.keras.layers import *
from tensorflow.keras.models import Sequential
print(tf.__version__)
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
path = "/kaggle/input/animals10/raw-img"
names = []
nums = []
data = {"Name of class": [], "Number of samples": []}
for i in os.listdir(path):
nums.append(len(os.listdir(path + "/" + i)))
names.append(i)
num_classes = len(names)
data["Name of class"] += names
data["Number of samples"] += nums
df = pd.DataFrame(data)
df
shape = (224, 224)
# image_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255 , rotation_range=20,
# width_shift_range=0.2,
# height_shift_range=0.2,
# horizontal_flip=True, validation_split=0.2)
train_ds = tf.keras.utils.image_dataset_from_directory(
path,
validation_split=0.2,
subset="training",
seed=123,
image_size=shape,
batch_size=32,
)
val_ds = tf.keras.utils.image_dataset_from_directory(
path,
validation_split=0.2,
subset="validation",
seed=123,
image_size=shape,
batch_size=3,
)
translate = {
"cane": "狗",
"cavallo": "马",
"elefante": "大象",
"farfalla": "蝴蝶",
"gallina": "鸡",
"gatto": "猫",
"mucca": "牛",
"pecora": "羊",
"scoiattolo": "松鼠",
"ragno": "蜘蛛",
}
# image_dataset_from_directory 函数会按字母排序label,所以这里也按字母排序
s = sorted(list(translate.keys()))
labels = [translate[x] for x in s]
print(labels)
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
for image_batch, labels_batch in val_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
normalization_layer = layers.Rescaling(1.0 / 255)
train_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
val_ds = val_ds.map(lambda x, y: (normalization_layer(x), y))
pretrained = tf.keras.applications.MobileNet(
input_shape=(*shape, 3), weights="imagenet", alpha=0.25
)
pretrained.trainable = False
# pretrained = tf.keras.applications.MobileNetV3Small(
# input_shape=(*shape , 3),
# include_top=False,
# weights='imagenet',
# dropout_rate=0.2,
# alpha=0.75,
# )
# pretrained.trainable = False
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=(*shape, 3)))
model.add(
tf.keras.Model(inputs=pretrained.inputs, outputs=pretrained.layers[-5].output)
)
model.add(tf.keras.layers.Reshape((-1,)))
model.add(tf.keras.layers.Dropout(0.1))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(num_classes))
model.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
model.summary()
history = model.fit(train_ds, validation_data=val_ds, epochs=15)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
def representative_dataset():
for _ in range(1000):
yield ([np.random.rand(1, 224, 224, 3).astype(np.float32)])
# Set the optimization flag.
converter.optimizations = [tf.lite.Optimize.DEFAULT]
# Enforce integer only quantization
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
# Provide a representative dataset to ensure we quantize correctly.
converter.representative_dataset = representative_dataset
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
model_tflite = converter.convert()
# 将 TensorFlow Lite 模型保存到磁盘中
with open("my_model.tflite", "wb") as f:
f.write(model_tflite)
interpreter = tf.lite.Interpreter(model_path="my_model.tflite")
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
input_details
output_details
img = cv2.imread("/kaggle/input/animals10/raw-img/gallina/10.jpeg")
img = cv2.resize(img, (224, 224))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
plt.imshow(img)
img = (img.copy().astype("int32") - 128).astype("int8")
print(img.shape)
# img = img.astype("float32")/255.0
# img = img.reshape(( 3, 352, 352))
# img = np.transpose(img, (2, 0, 1))
# print(img.shape)
interpreter.set_tensor(input_details["index"], [img])
interpreter.invoke()
result = interpreter.get_tensor(output_details["index"])
print(result)
m = np.argmax(result)
labels[m]
|
# **Task**: It is your job to predict the sales price for each house. For each Id in the test set, you must predict the value of the SalePrice variable.
# **Evaluation**: Submissions are evaluated on Root-Mean-Squared-Error (RMSE) between the logarithm of the predicted value and the logarithm of the observed sales price.
# This notebook has been created after studying [this notebook.](https://www.kaggle.com/ankitverma2010/house-prices-prediction-beginner-to-advanced#Exploratory-Data-Analysis)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ## Import libraries
# Python imports
import os
# Maths and data imports
import numpy as np
import pandas as pd
# Plot imports
import seaborn as sns
import matplotlib.pyplot as plt
# ML modeling imports
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error as mse
from xgboost import XGBRegressor
sns.set()
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv",
index_col="Id",
)
test = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/test.csv", index_col="Id"
)
train_df = train.copy()
test_df = test.copy()
# ## Data Preprocessing
train_df.head()
train_df.shape
train_df.info()
# MSSubClass, OverallQual and OverallCond and categorical variable, lets convert it into one.
train_df["MSSubClass"] = train_df["MSSubClass"].astype("object")
train_df["OverallQual"] = train_df["OverallQual"].astype("object")
train_df["OverallCond"] = train_df["OverallCond"].astype("object")
test_df["MSSubClass"] = test_df["MSSubClass"].astype("object")
test_df["OverallQual"] = test_df["OverallQual"].astype("object")
test_df["OverallCond"] = test_df["OverallCond"].astype("object")
# Let's now seperate the categorical and numerical columns
def get_var_dtype_list(df):
cat_cols = []
num_cols = []
for col in df.columns:
if df[col].dtypes == "object":
cat_cols.append(col)
else:
num_cols.append(col)
return (cat_cols, num_cols)
cat_cols_train, num_cols_train = get_var_dtype_list(train_df)
cat_cols_test, num_cols_test = get_var_dtype_list(test_df)
# Let's take log of SalePrice as our evaluation metric is log RMS value.
train_df["SalePrice"] = np.log(train_df["SalePrice"])
# ## EDA
# In EDA we will analyse data to see if there is any -
# 1. Skewness in features
# 2. Check for missing values and ouliers and fix them.
# 3. Check the variability of different features and scale them.
# 4. Check for multicolinearity among multiple exploratory variables (features).
# 5. Check if response variable is correlated to any/may exploratory variable(s).
# 6. Analyse the target/response variable.
# ### Lets check distribution fo continuous columns
fig, axs = plt.subplots(len(num_cols_train) // 5, 5, figsize=(30, 30))
for col, ax in zip(num_cols_train[:-1], axs.flatten()):
sns.distplot(train_df[col], ax=ax)
ax.set_title(col)
ax.set_xlabel("")
ax.set_ylabel("")
plt.show()
# **Conclusion**
# 1. There is large variation in the scale of each continuous variables, hence, we need to scale them.
# 2. Features such as YearBuilt and GarageYrBlt are left skewed, indicating that more houses were build in the later years hence, more garages were also built then. So, we might want to leave such variable for outlier check.
# 3. Exploratory varibales - 'LotArea', 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'LowQualFinSF', 'BsmtHalfBath', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'PoolArea', 'MiscVal' are heavily right skewed.
# Let's further explore these variables.
right_skewed = [
"LotArea",
"MasVnrArea",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"LowQualFinSF",
"BsmtHalfBath",
"WoodDeckSF",
"OpenPorchSF",
"EnclosedPorch",
"3SsnPorch",
"ScreenPorch",
"PoolArea",
"MiscVal",
]
r_skew_desc = train_df[right_skewed].describe().T
r_skew_desc["coef_of_var"] = r_skew_desc["std"] / r_skew_desc["mean"]
r_skew_desc
# We can see a huge variation in lot of features. For now we can drop the columns with coefficient of variation > 3.
# As the number of missing values is zero or near to zero in most of the features we would like to further analyse them (maybe take log to reduce the right skewness) and embrace the variability.
# But for now lets got with dropping them off, and compare the effects in the next iteration/followup.
r_skew_desc[r_skew_desc["coef_of_var"] > 3].T.columns
drop_skew_cols = [
"BsmtFinSF2",
"LowQualFinSF",
"BsmtHalfBath",
"3SsnPorch",
"ScreenPorch",
"PoolArea",
"MiscVal",
]
train_df.drop(drop_skew_cols, axis=1, inplace=True)
test_df.drop(drop_skew_cols, axis=1, inplace=True)
train_df.columns
# Let's update out list of numerical columns list.
cat_cols_train, num_cols_train = get_var_dtype_list(train_df)
cat_cols_test, num_cols_test = get_var_dtype_list(test_df)
# ### Let check and impute missing values in continuous variables
def get_missing_stats(df, col_list, threshold=0):
total = len(df)
for col in col_list:
null = df[col].isnull().sum()
if null > 0 and null / total >= threshold:
print(col)
if df[col].dtypes == "object":
print(df[col].value_counts())
print(f"Missing values: {null} of {total}")
print(f"Percent missing values: {round((null*100)/total, 2)}%\n")
get_missing_stats(train_df, num_cols_train)
get_missing_stats(test_df, num_cols_test)
# It does not seem to be extreme case of missing values so, we can simply replace by the median (as the data seem to be highly variable and median is a robust metric) of training set in both the training and test set.
imputer = SimpleImputer(strategy="median")
train_df[num_cols_train[:-1]] = imputer.fit_transform(train_df[num_cols_train[:-1]])
test_df[num_cols_test] = imputer.transform(test_df[num_cols_test])
get_missing_stats(train_df, num_cols_train)
get_missing_stats(test_df, num_cols_test)
# So, the missing values for numerical columns have disappeared. Now, lets hit the categorical columns.
# ### Now, lets check and impute missing values of categorical columns.
get_missing_stats(train_df, cat_cols_train)
get_missing_stats(test_df, cat_cols_test)
# Some columns seet to have quite a number of missing values. Lets further analyse them using barplot.
fig, axs = plt.subplots(len(cat_cols_train) // 5, 5, figsize=(30, 40))
for col, ax in zip(cat_cols_train, axs.flatten()):
y = train_df[col].value_counts()
ax.bar(y.index, y.values)
ax.set_title(col)
ax.set_xlabel("")
ax.set_ylabel("")
for tick in ax.get_xticklabels():
tick.set_rotation(35)
plt.show()
# Let's just analyse the columns with missing frequency greater than 40%.
get_missing_stats(train_df, cat_cols_train, 0.4)
get_missing_stats(test_df, cat_cols_test, 0.4)
# We can infer two cases from above:
# 1. The houses did not have these features
# 2. The houses did have these features but were not reported.
# However, if we think then it seems feasible to assume that all the houses surveyed did not have all the 80 features. So, for now we can impute the missing values (with freq. > 40%) with N.A, where as missing values of columns with freq < 40% with most frequent value.
# I think it will be safe to assume and hypothesise that maybe the localities in which the houses were surveyed did not have the above features, thus introducing selective bias in data collection. Although, we can not be certain of it.
train_df.drop(["PoolQC"], axis=1, inplace=True)
test_df.drop(["PoolQC"], axis=1, inplace=True)
na_cols = ["Alley", "FireplaceQu", "Fence", "MiscFeature"]
na_imputer = SimpleImputer(strategy="constant", fill_value="N.A")
train_df[na_cols] = na_imputer.fit_transform(train_df[na_cols])
test_df[na_cols] = na_imputer.transform(test_df[na_cols])
cat_cols_train, num_cols_train = get_var_dtype_list(train_df)
cat_cols_test, num_cols_test = get_var_dtype_list(test_df)
mf_imputer = SimpleImputer(strategy="most_frequent")
train_df[cat_cols_train] = mf_imputer.fit_transform(train_df[cat_cols_train])
test_df[cat_cols_test] = mf_imputer.transform(test_df[cat_cols_test])
get_missing_stats(train_df, cat_cols_train)
get_missing_stats(test_df, cat_cols_test)
# Finally, we have dealt with missing values. Now, lets check for multicolinearity among exploratory variables.
plt.figure(figsize=(20, 20))
sns.heatmap(train_df.drop(["SalePrice"], axis=1).corr(), annot=True)
plt.show()
cor = train_df.drop(["SalePrice"], axis=1).corr()
for i, col in enumerate(cor.columns):
for row in cor.index[i + 1 :]:
if col != row and cor[col][row] > 0.7:
print(f"({row}, {col}): {cor[col][row]}")
# Hmm...From the overwhelming plots above we can see that few varibles are highly correlated, and it makes sense for them to be correlated. Hence, for now we might want to keep them.
# ### Scale variables
scaler = StandardScaler()
train_df[num_cols_train[:-1]] = scaler.fit_transform(train_df[num_cols_train[:-1]])
test_df[num_cols_test] = scaler.transform(test_df[num_cols_test])
# ### One-hot encoding of categorical columns
# For now, let's simply one hot encode the categorical columns.
cat_cols_train
cat_cols_train, num_cols_train = get_var_dtype_list(train_df)
cat_cols_test, num_cols_test = get_var_dtype_list(test_df)
train_df = pd.get_dummies(train_df, drop_first=True, columns=cat_cols_train)
test_df = pd.get_dummies(test_df, drop_first=True, columns=cat_cols_test)
train_df.head()
# ### Check train and test for compatability
# check if both train and test contain same columns
train_df.columns
test_df.columns
compat_list = list(set(train_df.columns).intersection(test_df.columns))
len(compat_list)
train_X, y = train_df[compat_list], train_df["SalePrice"]
test_X = test_df[compat_list]
(train_X.columns == test_X.columns).sum()
# ## Train Test split
train_x, valid_x, train_y, valid_y = train_test_split(
train_X, y, test_size=0.2, random_state=42
)
# ## Model
linear_regressor = LinearRegression()
linear_regressor.fit(train_x, train_y)
preds = linear_regressor.predict(valid_x)
mse(valid_y, preds, squared=False)
models = {"RFR": RandomForestRegressor, "ADR": AdaBoostRegressor, "XGB": XGBRegressor}
def fit_model(name, model, train_ds, valid_ds):
X, y = train_ds
X_val, y_val = valid_ds
model.fit(X, y)
y_hat = model.predict(X)
y_hat_val = model.predict(X_val)
mse_ = mse(y, y_hat, squared=False)
mse_val = mse(y_val, y_hat_val, squared=False)
print(f"Model: {name}, Train MSE: {mse_}, Val MSE: {mse_val}")
n_est = [10, 25, 50, 100, 125]
for i in range(len(n_est)):
print(f"n_estimators: {n_est[i]}")
for name, model in models.items():
model = model(n_estimators=n_est[i])
fit_model(name, model, (train_x, train_y), (valid_x, valid_y))
print("-" * 20)
# `n_estimators=25` seems like a sweet spot for `AdaBoostRegressor` and `XGBRegressor`. Lets ensemble te ensembles
abr = AdaBoostRegressor(n_estimators=25)
xgr = XGBRegressor(n_estimators=25)
abr.fit(train_x, train_y)
xgr.fit(train_x, train_y)
p1 = abr.predict(valid_x)
p2 = xgr.predict(valid_x)
p3 = linear_regressor.predict(valid_x)
m1 = mse(valid_y, p1, squared=False)
m2 = mse(valid_y, p2, squared=False)
m3 = mse(valid_y, p3, squared=False)
m4 = mse(valid_y, (p1 + p2) / 2, squared=False)
m5 = mse(valid_y, (p1 + p2 + p3) / 3, squared=False)
print(
f"Ensemble MSE: \nABR: {m1}\nXGR: {m2}\nLinear: {m3}\nABR+XGR: {m4}\nABR+XGR+Linear: {m5}"
)
# Let's try them all,
# * linear regressor
# * Ensemble of AdaBoost and XGBoost
# * Ensemble of all of the above
pred_1 = linear_regressor.predict(test_X)
pred_2 = (abr.predict(test_X) + xgr.predict(test_X)) / 2
pred_3 = (2 * pred_2 + pred_1) / 3
def create_submission(preds, name="submission.csv"):
preds = np.expm1(preds)
submission = pd.DataFrame({"Id": test_X.index, "SalePrice": preds})
submission.to_csv(name, index=False)
return submission
create_submission(pred_1, "submission1.csv")
create_submission(pred_2, "submission2.csv")
create_submission(pred_3, "submission3.csv")
|
import numpy as np
import pandas as pd
TRAIN_PATH = "/kaggle/input/titanic/train.csv"
CATEGORY_DELETE_BASESIZE = 20
train = pd.read_csv(TRAIN_PATH)
train.info()
# # 1.delete columns
len(train["Name"].value_counts())
len(train["Sex"].value_counts())
len(train["Cabin"].value_counts())
len(train["Embarked"].value_counts())
train_only_object = train.select_dtypes(include="object")
cate_col = train_only_object.columns.tolist()
cate_col
deleteCol = []
for i in range(len(cate_col)):
if len(train[cate_col[i]].value_counts()) > CATEGORY_DELETE_BASESIZE:
deleteCol.append(cate_col[i])
deleteCol
train_drop = train.drop(columns=deleteCol, axis=1)
train_drop.info()
# # 2.N/A => non N/A
def checkNull_fillData(df):
for col in df.columns:
if len(df.loc[df[col].isnull() == True]) != 0:
if df[col].dtype == "object":
df.loc[df[col].isnull() == True, col] = df[col].mode()[0]
else:
df.loc[df[col].isnull() == True, col] = df[col].mean()
checkNull_fillData(train_drop)
train_drop.info()
# # 3.object => number
train_object = train_drop.select_dtypes(include="object")
train_object.info()
train_object_list = train_object.columns.values.tolist()
train_object_list
train_convert = pd.get_dummies(train_drop, columns=train_object_list)
train_convert.info()
train_convert.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
# ## Show max Rows and Columns
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
# ## Read files using pandas
train_df = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
train_df.head()
# ## Check information of dataframe
train_df.info()
# ## Check Shape of a dataframe
train_df.shape
# ## Check for null values
train_df.isnull().sum()
# ## Verifying values which are null
train_df["LotFrontage"].value_counts()
train_df["Alley"].value_counts()
train_df["BsmtQual"].value_counts()
train_df["PoolQC"].value_counts()
train_df["Fence"].value_counts()
train_df["MiscFeature"].value_counts()
train_df["GarageCond"].value_counts()
train_df["GarageCond"].value_counts()
train_df["GarageFinish"].value_counts()
train_df["GarageYrBlt"].value_counts()
train_df["GarageType"].value_counts()
train_df["FireplaceQu"].value_counts()
train_df["Electrical"].value_counts()
train_df["BsmtFinType2"].value_counts()
train_df["BsmtFinType1"].value_counts()
train_df["BsmtExposure"].value_counts()
train_df["BsmtCond"].value_counts()
# ## Imputing the null values
lot_front = train_df["LotFrontage"]
price = train_df["SalePrice"]
plt.figure(figsize=(30, 10))
ax = sns.barplot(x=lot_front, y=price)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
lot_front_null_df = train_df[train_df["LotFrontage"].isnull()]
sns.histplot(data=lot_front_null_df, x=train_df["SalePrice"])
lot_mean = train_df["LotFrontage"].mean()
train_df["LotFrontage"].fillna(lot_mean, inplace=True)
train_df["Alley"].value_counts()
alley_null_df = train_df[train_df["Alley"].notnull()]
alley_null_df
sns.barplot(data=alley_null_df, x=alley_null_df["Alley"], y=alley_null_df["SalePrice"])
for i in alley_null_df["SalePrice"]:
if i < 100000:
train_df["Alley"].fillna("Grvl", inplace=True)
else:
train_df["Alley"].fillna("Pave", inplace=True)
train_df["BsmtQual"].value_counts()
bsmtqual_null_df = train_df[train_df["BsmtQual"].notnull()]
bsmtqual_null_df
sns.barplot(
data=bsmtqual_null_df,
x=bsmtqual_null_df["BsmtQual"],
y=bsmtqual_null_df["SalePrice"],
)
for i in bsmtqual_null_df["SalePrice"]:
if i < 100000:
train_df["BsmtQual"].fillna("Fa", inplace=True)
elif i > 100000 and i < 1500000:
train_df["BsmtQual"].fillna("TA", inplace=True)
elif i > 1500000 and i < 200000:
train_df["BsmtQual"].fillna("Gd", inplace=True)
elif i > 200000:
train_df["BsmtQual"].fillna("Ex", inplace=True)
train_df["BsmtQual"].isnull().sum()
train_df["BsmtCond"].value_counts()
bsmtcond_null_df = train_df[train_df["BsmtCond"].notnull()]
sns.barplot(
data=bsmtcond_null_df,
x=bsmtcond_null_df["BsmtCond"],
y=bsmtcond_null_df["SalePrice"],
)
for i in bsmtcond_null_df["SalePrice"]:
if i > 50000 and i < 100000:
train_df["BsmtCond"].fillna("Po", inplace=True)
elif i > 100000 and i < 1500000:
train_df["BsmtCond"].fillna("Fa", inplace=True)
elif i > 1500000 and i < 200000:
train_df["BsmtCond"].fillna("TA", inplace=True)
elif i > 200000:
train_df["BsmtCond"].fillna("Gd", inplace=True)
train_df["BsmtCond"].isnull().sum()
train_df["BsmtExposure"].value_counts()
bsmtexposure_null_df = train_df[train_df["BsmtExposure"].notnull()]
sns.barplot(
data=bsmtexposure_null_df,
x=bsmtexposure_null_df["BsmtExposure"],
y=bsmtexposure_null_df["SalePrice"],
)
for i in bsmtexposure_null_df["SalePrice"]:
if i > 150000 and i < 170000:
train_df["BsmtExposure"].fillna("No", inplace=True)
elif i > 170000 and i < 200000:
train_df["BsmtExposure"].fillna("Mn", inplace=True)
elif i > 200000 and i < 250000:
train_df["BsmtExposure"].fillna("Av", inplace=True)
elif i > 250000:
train_df["BsmtExposure"].fillna("Gd", inplace=True)
train_df["BsmtExposure"].isnull().sum()
bsmtfintype1_null_df = train_df[train_df["BsmtFinType1"].notnull()]
sns.barplot(
data=bsmtfintype1_null_df,
x=bsmtfintype1_null_df["BsmtFinType1"],
y=bsmtfintype1_null_df["SalePrice"],
)
for i in bsmtfintype1_null_df["SalePrice"]:
if i < 150000:
train_df["BsmtFinType1"].fillna("Rec", inplace=True)
elif i > 150000 and i < 200000:
train_df["BsmtFinType1"].fillna("Unf", inplace=True)
elif i > 200000:
train_df["BsmtFinType1"].fillna("GLQ", inplace=True)
train_df["BsmtFinType1"].isnull().sum()
bsmtFinType2_null_df = train_df[train_df["BsmtFinType2"].notnull()]
sns.barplot(
data=bsmtFinType2_null_df,
x=bsmtFinType2_null_df["BsmtFinType2"],
y=bsmtFinType2_null_df["SalePrice"],
)
for i in bsmtFinType2_null_df["SalePrice"]:
if i < 150000:
train_df["BsmtFinType2"].fillna("BLQ", inplace=True)
elif i > 150000 and i < 200000:
train_df["BsmtFinType2"].fillna("Unf", inplace=True)
elif i > 200000:
train_df["BsmtFinType2"].fillna("ALQ", inplace=True)
train_df["BsmtFinType2"].isnull().sum()
electrical_null_df = train_df[train_df["Electrical"].notnull()]
sns.barplot(
data=electrical_null_df,
x=electrical_null_df["Electrical"],
y=electrical_null_df["SalePrice"],
)
train_df["Electrical"].fillna("SBrkr", inplace=True)
FireplaceQu_null_df = train_df[train_df["FireplaceQu"].notnull()]
sns.barplot(
data=FireplaceQu_null_df,
x=FireplaceQu_null_df["FireplaceQu"],
y=FireplaceQu_null_df["SalePrice"],
)
for i in FireplaceQu_null_df["SalePrice"]:
if i < 150000:
train_df["FireplaceQu"].fillna("Po", inplace=True)
elif i > 150000 and i < 200000:
train_df["FireplaceQu"].fillna("Fa", inplace=True)
elif i > 200000 and i < 225000:
train_df["FireplaceQu"].fillna("TA", inplace=True)
elif i > 225000 and i < 250000:
train_df["FireplaceQu"].fillna("Gd", inplace=True)
elif i > 250000:
train_df["FireplaceQu"].fillna("Ex", inplace=True)
train_df["FireplaceQu"].isnull().sum()
GarageType_null_df = train_df[train_df["GarageType"].notnull()]
sns.barplot(
data=GarageType_null_df,
x=GarageType_null_df["GarageType"],
y=GarageType_null_df["SalePrice"],
)
for i in GarageType_null_df["SalePrice"]:
if i > 50000 and i < 110000:
train_df["GarageType"].fillna("CarPort", inplace=True)
elif i > 110000 and i < 150000:
train_df["GarageType"].fillna("Detchd", inplace=True)
elif i > 150000 and i < 200000:
train_df["GarageType"].fillna("Basment", inplace=True)
elif i > 200000 and i < 250000:
train_df["GarageType"].fillna("Attchd", inplace=True)
elif i > 250000:
train_df["GarageType"].fillna("BuiltIn", inplace=True)
train_df["GarageType"].isnull().sum()
GarageYrBlt_null_df = train_df[train_df["GarageYrBlt"].notnull()]
plt.figure(figsize=(25, 10))
ax = sns.barplot(
data=GarageYrBlt_null_df,
x=GarageYrBlt_null_df["GarageYrBlt"],
y=GarageYrBlt_null_df["SalePrice"],
)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
for i in GarageYrBlt_null_df["SalePrice"]:
if i < 200000:
train_df["GarageYrBlt"].fillna(1960.0, inplace=True)
else:
train_df["GarageYrBlt"].fillna(2010.0, inplace=True)
train_df["GarageYrBlt"].isnull().sum()
GarageFinish_null_df = train_df[train_df["GarageFinish"].notnull()]
sns.barplot(
data=GarageFinish_null_df,
x=GarageFinish_null_df["GarageFinish"],
y=GarageFinish_null_df["SalePrice"],
)
for i in GarageFinish_null_df["SalePrice"]:
if i < 150000:
train_df["GarageFinish"].fillna("Unf", inplace=True)
elif i > 150000 and i < 200000:
train_df["GarageFinish"].fillna("RFn", inplace=True)
elif i > 200000:
train_df["GarageFinish"].fillna("Fin", inplace=True)
train_df["GarageFinish"].isnull().sum()
GarageQual_null_df = train_df[train_df["GarageQual"].notnull()]
sns.barplot(
data=GarageQual_null_df,
x=GarageQual_null_df["GarageQual"],
y=GarageQual_null_df["SalePrice"],
)
for i in GarageQual_null_df["SalePrice"]:
if i < 100000:
train_df["GarageQual"].fillna("Po", inplace=True)
elif i > 100000 and i < 150000:
train_df["GarageQual"].fillna("Fa", inplace=True)
elif i > 150000 and i < 200000:
train_df["GarageQual"].fillna("TA", inplace=True)
elif i > 200000 and i < 215000:
train_df["GarageQual"].fillna("Gd", inplace=True)
elif i > 215000:
train_df["GarageQual"].fillna("Ex", inplace=True)
train_df["GarageQual"].isnull().sum()
GarageCond_null_df = train_df[train_df["GarageCond"].notnull()]
sns.barplot(
data=GarageCond_null_df,
x=GarageCond_null_df["GarageCond"],
y=GarageCond_null_df["SalePrice"],
)
for i in GarageCond_null_df["SalePrice"]:
if i > 50000 and i < 150000:
train_df["GarageCond"].fillna("Ex", inplace=True)
else:
train_df["GarageCond"].fillna("TA", inplace=True)
train_df["GarageCond"].isnull().sum()
PoolQC_null_df = train_df[train_df["PoolQC"].notnull()]
sns.barplot(
data=PoolQC_null_df, x=PoolQC_null_df["PoolQC"], y=PoolQC_null_df["SalePrice"]
)
for i in PoolQC_null_df["SalePrice"]:
if i > 100000 and i < 200000:
train_df["PoolQC"].fillna("Gd", inplace=True)
elif i > 200000 and i < 300000:
train_df["PoolQC"].fillna("Fa", inplace=True)
elif i > 300000:
train_df["PoolQC"].fillna("Ex", inplace=True)
train_df["PoolQC"].isnull().sum()
Fence_null_df = train_df[train_df["Fence"].notnull()]
sns.barplot(data=Fence_null_df, x=Fence_null_df["Fence"], y=Fence_null_df["SalePrice"])
for i in Fence_null_df["SalePrice"]:
if i > 100000 and i < 170000:
train_df["Fence"].fillna("MnPrv", inplace=True)
else:
train_df["Fence"].fillna("GdPrv", inplace=True)
train_df["Fence"].isnull().sum()
MiscFeature_null_df = train_df[train_df["MiscFeature"].notnull()]
sns.barplot(
data=MiscFeature_null_df,
x=MiscFeature_null_df["MiscFeature"],
y=MiscFeature_null_df["SalePrice"],
)
for i in MiscFeature_null_df["SalePrice"]:
if i > 50000 and i < 100000:
train_df["MiscFeature"].fillna("Othr", inplace=True)
elif i > 100000 and i < 150000:
train_df["MiscFeature"].fillna("Shed", inplace=True)
elif i > 150000 and i < 170000:
train_df["MiscFeature"].fillna("Gar2", inplace=True)
else:
train_df["MiscFeature"].fillna("TenC", inplace=True)
train_df["MiscFeature"].isnull().sum()
MasVnrType_null_df = train_df[train_df["MasVnrType"].notnull()]
sns.barplot(
data=MasVnrType_null_df,
x=MasVnrType_null_df["MasVnrType"],
y=MasVnrType_null_df["SalePrice"],
)
for i in MasVnrType_null_df["SalePrice"]:
if i > 100000 and i < 150000:
train_df["MasVnrType"].fillna("BrkCmn", inplace=True)
elif i > 150000 and i < 200000:
train_df["MasVnrType"].fillna("None", inplace=True)
elif i > 200000 and i < 250000:
train_df["MasVnrType"].fillna("BrkFace", inplace=True)
else:
train_df["MasVnrType"].fillna("Stone", inplace=True)
MasVnrArea_null_df = train_df[train_df["MasVnrArea"].notnull()]
plt.figure(figsize=(40, 30))
ax = sns.barplot(
data=MasVnrArea_null_df,
x=MasVnrArea_null_df["MasVnrArea"],
y=MasVnrArea_null_df["SalePrice"],
)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
MasVnrArea_mean = train_df["MasVnrArea"].mean()
MasVnrArea_mean
train_df["MasVnrArea"].fillna(MasVnrArea_mean, inplace=True)
a = dict(train_df.dtypes == "object")
l = []
for i in a:
if a[i] == True:
l.append(i)
print(l)
from pycaret.regression import *
data = train_df.sample(frac=0.9, random_state=786)
data_unseen = train_df.drop(data.index)
data.reset_index(drop=True, inplace=True)
data_unseen.reset_index(drop=True, inplace=True)
print("Data for Modeling: " + str(data.shape))
print("Unseen Data For Predictions: " + str(data_unseen.shape))
regression_experiment = setup(
data=data,
ignore_features=["Id"],
numeric_features=[
"MSSubClass",
"LotFrontage",
"LotArea",
"OverallQual",
"OverallCond",
"YearBuilt",
"YearRemodAdd",
"MasVnrArea",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"1stFlrSF",
"2ndFlrSF",
"LowQualFinSF",
"GrLivArea",
"BsmtFullBath",
"BsmtHalfBath",
"FullBath",
"HalfBath",
"BedroomAbvGr",
"KitchenAbvGr",
"TotRmsAbvGrd",
"Fireplaces",
"GarageYrBlt",
"GarageCars",
"GarageArea",
"WoodDeckSF",
"OpenPorchSF",
"EnclosedPorch",
"3SsnPorch",
"ScreenPorch",
"PoolArea",
"MiscVal",
"MoSold",
"YrSold",
],
normalize=True,
transformation=True,
normalize_method="robust",
target="SalePrice",
use_gpu=True,
session_id=123,
)
best = compare_models(exclude=["ransac"])
catboost = create_model("catboost")
tuned_cat = tune_model(catboost)
plot_model(catboost)
plot_model(catboost, plot="error")
plot_model(catboost, plot="feature")
evaluate_model(catboost)
predict_model(catboost)
final_catboost = finalize_model(catboost)
print(final_catboost)
predict_model(final_catboost)
unseen_predictions = predict_model(final_catboost, data=data_unseen)
unseen_predictions.head()
from pycaret.utils import check_metric
check_metric(unseen_predictions.SalePrice, unseen_predictions.Label, "RMSLE")
save_model(final_catboost, "Final house price kaggle catboost Model 03Mar2021")
saved_final_catboost = load_model("Final house price kaggle catboost Model 03Mar2021")
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
test_df = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
test_df.head()
test_df.isnull().sum()
test_df["MSZoning"].value_counts()
MSZoning_null_df = train_df[train_df["MSZoning"].notnull()]
sns.barplot(
data=MSZoning_null_df,
x=MSZoning_null_df["MSZoning"],
y=MSZoning_null_df["SalePrice"],
)
MSZonin_mode = test_df["MSZoning"].mode()[0]
test_df["MSZoning"].fillna(MSZonin_mode, inplace=True)
test_df["MSZoning"].isnull().sum()
lotfront_mean = test_df["LotFrontage"].mean()
test_df["LotFrontage"].fillna(lotfront_mean, inplace=True)
test_df["LotFrontage"].isnull().sum()
test_df["Alley"].value_counts()
alley_mode = test_df["Alley"].mode()[0]
test_df["Alley"].fillna(alley_mode, inplace=True)
test_df["Alley"].isnull().sum()
test_df["Utilities"].value_counts()
uti_mode = test_df["Utilities"].mode()[0]
test_df["Utilities"].fillna(uti_mode, inplace=True)
test_df["Utilities"].isnull().sum()
test_df["Exterior1st"].value_counts()
ext_mode = test_df["Exterior1st"].mode()[0]
test_df["Exterior1st"].fillna(ext_mode, inplace=True)
test_df["Exterior1st"].isnull().sum()
test_df["Exterior2nd"].value_counts()
ext2_mode = test_df["Exterior2nd"].mode()[0]
test_df["Exterior2nd"].fillna(ext2_mode, inplace=True)
test_df["Exterior2nd"].isnull().sum()
test_df["MasVnrType"].value_counts()
masvnr_mode = test_df["MasVnrType"].mode()[0]
test_df["MasVnrType"].fillna(masvnr_mode, inplace=True)
test_df["MasVnrType"].isnull().sum()
test_df["MasVnrArea"].value_counts()
MasVnrArea_mean = test_df["MasVnrArea"].mean()
test_df["MasVnrArea"].fillna(MasVnrArea_mean, inplace=True)
test_df["MasVnrArea"].isnull().sum()
test_df["BsmtQual"].value_counts()
BsmtQual_mode = test_df["BsmtQual"].mode()[0]
test_df["BsmtQual"].fillna(BsmtQual_mode, inplace=True)
test_df["BsmtQual"].isnull().sum()
test_df["BsmtCond"].value_counts()
BsmtCond_mode = test_df["BsmtCond"].mode()[0]
test_df["BsmtCond"].fillna(BsmtCond_mode, inplace=True)
test_df["BsmtCond"].isnull().sum()
test_df["BsmtExposure"].value_counts()
BsmtExposure_mode = test_df["BsmtExposure"].mode()[0]
test_df["BsmtExposure"].fillna(BsmtCond_mode, inplace=True)
test_df["BsmtExposure"].isnull().sum()
test_df["BsmtFinType1"].value_counts()
BsmtFinType1_mode = test_df["BsmtFinType1"].mode()[0]
test_df["BsmtFinType1"].fillna(BsmtFinType1_mode, inplace=True)
test_df["BsmtFinType1"].isnull().sum()
test_df["BsmtFinSF1"].value_counts()
BsmtFinSF1_mean = test_df["BsmtFinSF1"].mean()
test_df["BsmtFinSF1"].fillna(BsmtFinSF1_mean, inplace=True)
test_df["BsmtFinSF1"].isnull().sum()
test_df["BsmtFinType2"].value_counts()
BsmtFinType2_mean = test_df["BsmtFinType2"].mode()[0]
test_df["BsmtFinType2"].fillna(BsmtFinType2_mean, inplace=True)
test_df["BsmtFinType2"].isnull().sum()
test_df["BsmtFinSF2"].value_counts()
BsmtFinSF2_mean = test_df["BsmtFinSF2"].mean()
test_df["BsmtFinSF2"].fillna(BsmtFinSF2_mean, inplace=True)
test_df["BsmtFinSF2"].isnull().sum()
test_df["BsmtUnfSF"].value_counts()
BsmtUnfSF_mean = test_df["BsmtUnfSF"].mean()
test_df["BsmtUnfSF"].fillna(BsmtUnfSF_mean, inplace=True)
test_df["BsmtUnfSF"].isnull().sum()
test_df["TotalBsmtSF"].value_counts()
TotalBsmtSF_mean = test_df["TotalBsmtSF"].mean()
test_df["TotalBsmtSF"].fillna(TotalBsmtSF_mean, inplace=True)
test_df["TotalBsmtSF"].isnull().sum()
test_df["BsmtFullBath"].value_counts()
BsmtFullBath_mean = test_df["BsmtFullBath"].mode()[0]
test_df["BsmtFullBath"].fillna(BsmtFullBath_mean, inplace=True)
test_df["BsmtFullBath"].isnull().sum()
test_df["BsmtHalfBath"].value_counts()
BsmtHalfBath_mode = test_df["BsmtHalfBath"].mode()[0]
test_df["BsmtHalfBath"].fillna(BsmtHalfBath_mode, inplace=True)
test_df["BsmtHalfBath"].isnull().sum()
test_df["KitchenQual"].value_counts()
KitchenQual_mode = test_df["KitchenQual"].mode()[0]
test_df["KitchenQual"].fillna(KitchenQual_mode, inplace=True)
test_df["BsmtHalfBath"].isnull().sum()
test_df["Functional"].value_counts()
Functional_mode = test_df["Functional"].mode()[0]
test_df["Functional"].fillna(Functional_mode, inplace=True)
test_df["Functional"].isnull().sum()
test_df["FireplaceQu"].value_counts()
FireplaceQu_mode = test_df["FireplaceQu"].mode()[0]
test_df["FireplaceQu"].fillna(FireplaceQu_mode, inplace=True)
test_df["FireplaceQu"].isnull().sum()
test_df["GarageType"].value_counts()
GarageType_mode = test_df["GarageType"].mode()[0]
test_df["GarageType"].fillna(GarageType_mode, inplace=True)
test_df["GarageType"].isnull().sum()
test_df["GarageYrBlt"].value_counts()
GarageYrBlt_mode = test_df["GarageYrBlt"].mode()[0]
test_df["GarageYrBlt"].fillna(GarageYrBlt_mode, inplace=True)
test_df["GarageYrBlt"].isnull().sum()
test_df["GarageFinish"].value_counts()
GarageFinish_mode = test_df["GarageFinish"].mode()[0]
test_df["GarageFinish"].fillna(GarageFinish_mode, inplace=True)
test_df["GarageFinish"].isnull().sum()
test_df["GarageCars"].value_counts()
GarageCars_mode = test_df["GarageCars"].mode()[0]
test_df["GarageCars"].fillna(GarageCars_mode, inplace=True)
test_df["GarageCars"].isnull().sum()
test_df["GarageArea"].value_counts()
GarageArea_mean = test_df["GarageArea"].mean()
test_df["GarageArea"].fillna(GarageArea_mean, inplace=True)
test_df["GarageArea"].isnull().sum()
test_df["GarageQual"].value_counts()
GarageQual_mode = test_df["GarageQual"].mode()[0]
test_df["GarageQual"].fillna(GarageQual_mode, inplace=True)
test_df["GarageQual"].isnull().sum()
test_df["GarageCond"].value_counts()
GarageCond_mode = test_df["GarageCond"].mode()[0]
test_df["GarageCond"].fillna(GarageCond_mode, inplace=True)
test_df["GarageCond"].isnull().sum()
test_df["PoolQC"].value_counts()
PoolQC_mode = test_df["PoolQC"].mode()[0]
test_df["PoolQC"].fillna(PoolQC_mode, inplace=True)
test_df["PoolQC"].isnull().sum()
test_df["Fence"].value_counts()
Fence_mode = test_df["Fence"].mode()[0]
test_df["Fence"].fillna(Fence_mode, inplace=True)
test_df["Fence"].isnull().sum()
MiscFeature_mode = test_df["MiscFeature"].mode()[0]
test_df["MiscFeature"].fillna(MiscFeature_mode, inplace=True)
test_df["MiscFeature"].isnull().sum()
SaleType_mode = test_df["SaleType"].mode()[0]
test_df["SaleType"].fillna(SaleType_mode, inplace=True)
test_df["SaleType"].isnull().sum()
saved_final_catboost = load_model("Final house price kaggle catboost Model 03Mar2021")
new_prediction = predict_model(saved_final_catboost, data=test_df)
new_prediction.head()
new_prediction.to_excel("House_prediction.xlsx")
|
# # Welcome to my Kernel !
# # Introduction
# This particular challenge is perfect for data scientists looking to get started with Natural Language Processing.
# Competition Description
# Twitter has become an important communication channel in times of emergency.
# The ubiquitousness of smartphones enables people to announce an emergency they’re observing in real-time. Because of this, more agencies are interested in programatically monitoring Twitter (i.e. disaster relief organizations and news agencies).
# But, it’s not always clear whether a person’s words are actually announcing a disaster.
# Take an example:
# The author explicitly uses the word “ABLAZE” but means it metaphorically. This is clear to a human right away, especially with the visual aid. But it’s less clear to a machine.
# In this competition, you’re challenged to build a machine learning model that predicts which Tweets are about real disasters and which one’s aren’t. You’ll have access to a dataset of 10,000 tweets that were hand classified.
# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.pyplot import xticks
from nltk.corpus import stopwords
import nltk
import re
from nltk.stem import WordNetLemmatizer
import string
from nltk.tokenize import word_tokenize
from nltk.util import ngrams
from collections import defaultdict
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from keras.utils.vis_utils import plot_model
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import ModelCheckpoint, EarlyStopping, Callback
import tensorflow as tf
from sklearn.metrics import f1_score
from wordcloud import WordCloud, STOPWORDS
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from keras.preprocessing.sequence import pad_sequences
from numpy import array
from numpy import asarray
from numpy import zeros
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Flatten, Embedding, Activation, Dropout
from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D, LSTM
from keras.layers import Bidirectional
# load train and test datasets
train = pd.read_csv("../input/nlp-getting-started/train.csv")
test = pd.read_csv("../input/nlp-getting-started/test.csv")
# check the no. of rows and columns in the dataset
train.shape, test.shape
train.head()
train.isnull().sum().sort_values(ascending=False)
# We can see a lots of null values for "keyword" and "location" columns
sns.countplot(x=train.target)
# **We have a balanced dataset, which is good**
# ### Data Cleaning
# In order to get accurate results from the predictive model, we need to remove these stop words & punctuations.
# Apart from removing these stopwords & puncuations, we would also convert all the messages in lowercase so that words like "Go" & "go" can be treated as same word and not different words.
# We will also convert the words to its lemma form (for example, lemma of word "running" would be run), converting words to their lemmas would also help improving the predictive power of our model.
# We would also remove embedded special characters from the tweets, for example, #earthquake should be replaced by earthquake
# We also need to remove the "URLs" from the tweets
# And then finally we remove the digits from the tweets
# Lets write a small function "preprocess" to achive all these tasks.
# lets save stopwords in a variable
stop = list(stopwords.words("english"))
# save list of punctuation/special characters in a variable
punctuation = list(string.punctuation)
# create an object to convert the words to its lemma form
lemma = WordNetLemmatizer()
# lets make a combine list of stopwords and punctuations
sw_pun = stop + punctuation
# function to preprocess the messages
def preprocess(tweet):
tweet = re.sub(r"https?:\/\/t.co\/[A-Za-z0-9]+", "", tweet) # removing urls
tweet = re.sub(
"[^\w]", " ", tweet
) # remove embedded special characters in words (for example #earthquake)
tweet = re.sub("[\d]", "", tweet) # this will remove numeric characters
tweet = tweet.lower()
words = tweet.split()
sentence = ""
for word in words:
if word not in (sw_pun): # removing stopwords & punctuations
word = lemma.lemmatize(word, pos="v") # converting to lemma
if len(word) > 3: # we will consider words with length greater than 3 only
sentence = sentence + word + " "
return sentence
# apply preprocessing functions on the train and test datasets
train["text"] = train["text"].apply(lambda s: preprocess(s))
test["text"] = test["text"].apply(lambda s: preprocess(s))
# function to remove emojis
def remove_emoji(text):
emoji_pattern = re.compile(
"["
"\U0001F600-\U0001F64F" # emoticons
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U00002702-\U000027B0"
"\U000024C2-\U0001F251"
"]+",
flags=re.UNICODE,
)
return emoji_pattern.sub(r"", text)
# applying the function on the train and the test datasets
train["text"] = train["text"].apply(lambda s: remove_emoji(s))
test["text"] = test["text"].apply(lambda s: remove_emoji(s))
# # Vocabulary creation
# Lets create our own vocabulary
# function to create vocab
from collections import Counter
def create_vocab(df):
vocab = Counter()
for i in range(df.shape[0]):
vocab.update(df.text[i].split())
return vocab
# concatenate training and testing datasets
master = pd.concat((train, test)).reset_index(drop=True)
# call vocabulary creation function on master dataset
vocab = create_vocab(master)
# lets check the no. of words in the vocabulary
len(vocab)
# lets check the most common 50 words in the vocabulary
vocab.most_common(50)
# lets consider only those words which have appeared more than once in the corpus
#
# create the final vocab by considering words with more than one occurence
final_vocab = []
min_occur = 2
for k, v in vocab.items():
if v >= min_occur:
final_vocab.append(k)
# lets check the no. of the words in the final vocabulary
vocab_size = len(final_vocab)
vocab_size
# vocab size reduced drastically from 16k to 6k
# Now lets apply this vocab on our train and test datasets, we will keep only those words in training and testing datasets which appear in the vocabulary
# function to filter the dataset, keep only words which are present in the vocab
def filter(tweet):
sentence = ""
for word in tweet.split():
if word in final_vocab:
sentence = sentence + word + " "
return sentence
# apply filter function on the train and test datasets
train["text"] = train["text"].apply(lambda s: filter(s))
test["text"] = test["text"].apply(lambda s: filter(s))
# lets take a look at the update training dataset
train.text.head()
# # Data Preprocessing
# the different units into which you can break down text (words, characters, or n-grams) are called tokens,
# and breaking text into such tokens is called tokenization, this can be achieved using Tokenizer in Keras
from keras.preprocessing.text import Tokenizer
# fit a tokenizer
def create_tokenizer(lines):
# num_words = vocab_size will create a tokenizer,configured to only take into account the vocab_size(6025)
tokenizer = Tokenizer(num_words=vocab_size)
# Build th word index, Turns strings into lists of integer indices
tokenizer.fit_on_texts(lines)
return tokenizer
# create and apply tokenizer on the training dataset
tokenizer = create_tokenizer(train.text)
word_index = tokenizer.word_index
print("Found %s unique tokens." % len(word_index))
# Now we will apply texts_to_matrix() function to convert text into vectors.
# The texts_to_matrix() function on the Tokenizer can be used to create one vector per document provided per input. The length of the vectors is the total size of the vocabulary, which is 6025 here (we passed 6025 as num_words into tokenizer)
# This function provides a suite of standard bag-of-words model text encoding schemes that can be provided via a mode argument to the function.
# The modes available include:
# * ‘binary‘: Whether or not each word is present in the document. This is the default.
# * ‘count‘: The count of each word in the document.
# * ‘tfidf‘: The Text Frequency-Inverse DocumentFrequency (TF-IDF) scoring for each word in the document.
# * ‘freq‘: The frequency of each word as a ratio of words within each document.
# converting texts into vectors
train_text = tokenizer.texts_to_matrix(train.text, mode="freq")
# # Model Building & Evaluation
# ### 1. Neural Network
# We will create an Artificial Neural Network, this competition is evaluated on f1 scores,which is not shown by default after every epoch, so lets create a function to achieve the same.
# Test train split
X_train, X_test, y_train, y_test = train_test_split(
train_text, train.target, test_size=0.2, random_state=42
)
# function to calculate f1 score for each epoch
import keras.backend as K
def get_f1(y_true, y_pred): # taken from old keras source code
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
recall = true_positives / (possible_positives + K.epsilon())
f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon())
return f1_val
# define the model
def define_model(n_words):
# define network
model = Sequential()
model.add(Dense(1024, input_shape=(n_words,), activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(512, activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(1, activation="sigmoid"))
# compile network
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=[get_f1])
# summarize defined model
model.summary()
return model
X_train.shape
callbacks_list = [
EarlyStopping(
monitor="get_f1",
patience=10,
),
ModelCheckpoint(filepath="./NN.h5", monitor="val_loss", save_best_only=True),
]
# create the model
n_words = X_train.shape[1]
model = define_model(n_words)
# fit network
history = model.fit(
X_train,
y_train,
epochs=100,
verbose=2,
callbacks=callbacks_list,
validation_split=0.2,
)
acc = history.history["get_f1"]
val_acc = history.history["val_get_f1"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
# There is a huge difference between training and validation accuracies and losses
import keras
dependencies = {"get_f1": get_f1}
# load the model from disk
loaded_model_NN = keras.models.load_model("./NN.h5", custom_objects=dependencies)
# prediction on the test dataset
# X_test_Set = tokenizer.texts_to_matrix(X_test, mode = 'freq')
y_pred = loaded_model_NN.predict_classes(X_test)
# important metrices
print(classification_report(y_test, y_pred))
# # Predictions on the test dataset
test_id = test.id
test.drop(["id", "location", "keyword"], 1, inplace=True)
# apply tokenizer on the test dataset
test_set = tokenizer.texts_to_matrix(test.text, mode="freq")
# make predictions on the test dataset
y_test_pred = loaded_model_NN.predict_classes(test_set)
# lets prepare for the prediction submission
sub = pd.DataFrame()
sub["Id"] = test_id
sub["target"] = y_test_pred
sub.head()
sub.to_csv("submission_NN.csv", index=False)
# # Model using Word Embeddings
# Another popular and powerful way to associate a vector with a word is the use of dense word vectors, also called `word embeddings`.
# The Embedding layer is best understood as a dictionary that maps integer indices (which stand for specific words) to dense vectors. It takes integers as input, it looks up these integers in an internal dictionary, and it returns the associated vectors. It’s effectively a dictionary lookup.
# Whereas the vectors obtained through one-hot encoding are binary, sparse (mostly made of zeros), and very high-dimensional (same dimensionality as the number of words in the vocabulary), word embeddings are low dimensional floating-point vectors (that is, dense vectors, as opposed to sparse vectors);
# Unlike the word vectors obtained via one-hot encoding, word embeddings are learned from data. It’s common to see word embeddings that are 256-dimensional, 512-dimensional, or 1,024-dimensional when dealing with very large vocabularies.
# On the other hand, one-hot encoding words generally leads to vectors that are 20,000-dimensional or greater (capturing a vocabulary of 6,025 tokens, above). So, word embeddings pack more information into far fewer dimensions.
# ### There are two ways to obtain word embeddings:
# * Learn word embeddings jointly with the main task you care about (such as document classification or sentiment prediction). In this setup, you start with random word vectors and then learn word vectors in the same way you learn the
# weights of a neural network.
# * Load into your model word embeddings that were precomputed using a different machine-learning task than the one you’re trying to solve. These are called
# pretrained word embeddings.
from keras.layers import Embedding
# The Embedding layer takes at least two arguments: the number of possible tokens (here, 5,000: 1 + maximum word index)
# and the dimensionality of the embeddings (here, 64).
# embedding_layer = Embedding(5000, 64)
# Number of words to consider as features
max_features = vocab_size
# Cuts off the text after this number of words (among the max_features most common words)
maxlen = 100
# create and apply tokenizer on the training dataset
tokenizer = create_tokenizer(train.text)
from keras import preprocessing
# conver text to sequences
sequences = tokenizer.texts_to_sequences(train.text)
# print(sequences)
# Turns the lists of integers into a 2D integer tensor of shape (samples, maxlen), padding shorter sequences with 0s
train_text = preprocessing.sequence.pad_sequences(sequences, maxlen=maxlen)
# Test train split
X_train, X_test, y_train, y_test = train_test_split(
train_text, train.target, test_size=0.2, random_state=42
)
# ### 2. Neural Network with Embedding Layer
# build the model
model = Sequential()
# Specifies the maximum input length to the Embedding layer so you can later flatten the embedded inputs.
# After the Embedding layer, the activations have shape (samples, maxlen, 8)
model.add(Embedding(vocab_size, 8, input_length=maxlen))
# Flattens the 3D tensor of embeddings into a 2D tensor of shape (samples, maxlen * 8)
model.add(Flatten())
# Dense layer for classification
model.add(Dense(1, activation="sigmoid"))
# compile the model
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=[get_f1])
model.summary()
callbacks_list = [
EarlyStopping(
monitor="get_f1",
patience=1,
),
ModelCheckpoint(filepath="./embd.h5", monitor="val_loss", save_best_only=True),
]
# train the model
history = model.fit(
X_train,
y_train,
epochs=100,
batch_size=32,
callbacks=callbacks_list,
validation_split=0.2,
)
# check model performance
acc = history.history["get_f1"]
val_acc = history.history["val_get_f1"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
dependencies = {"get_f1": get_f1}
# load the model from disk
loaded_model_embd = keras.models.load_model("./embd.h5", custom_objects=dependencies)
# prediction on the test dataset
y_pred = loaded_model_embd.predict_classes(X_test)
# important metrices
print(classification_report(y_test, y_pred))
# We got to a f1 score of 81%, which is pretty good considering that we’re only looking at the first 20 words in every review. But note that merely flattening the embedded sequences and training a single Dense layer on top leads to a model that treats each word in the input sequence separately, without considering inter-word relationships and sentence structure (for example, this model would likely treat both “this movie is a bomb” and “this movie is the bomb” as being negative reviews).
# It’s much better to add recurrent layers or 1D convolutional layers on top of the embedded sequences to learn features that take into account each sequence as a whole. We will do this later.
# conver text to sequences
sequences = tokenizer.texts_to_sequences(test.text)
# Turns the lists of integers into a 2D integer tensor of shape (samples, maxlen)
test_text = preprocessing.sequence.pad_sequences(sequences, maxlen=maxlen)
# make predictions on the test dataset
y_test_pred = loaded_model_embd.predict_classes(test_text)
# lets prepare for the prediction submission
sub = pd.DataFrame()
sub["Id"] = test_id
sub["target"] = y_test_pred
sub.head()
sub.to_csv("submission_embedding.csv", index=False)
# ### 3. Neural Network with pre trained Embedding Layer(GLOVE)
# Considers only the top 5000 words in the dataset
max_words = 5000
# We’ll build an embedding matrix that you can load into an Embedding layer.
# It must be a matrix of shape (max_words, embedding_dim), where each entry i contains the embedding_dim-dimensional vector for the word of index i in the reference word index (built during tokenization).
# Note that index 0 isn’t supposed to stand for any word or token—it’s a placeholde
import os
glove_dir = "../input/glove6b100dtxt/"
embeddings_index = {}
f = open(os.path.join(glove_dir, "glove.6B.100d.txt"))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
f.close()
print("Found %s word vectors." % len(embeddings_index))
embedding_dim = 100
embedding_matrix = np.zeros((max_words, embedding_dim))
for word, i in word_index.items():
if i < max_words:
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[
i
] = embedding_vector # Words not found in the embedding index will be all zeros.
# lets use the same model architecture we used earlier
model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
model.add(Dense(1, activation="sigmoid"))
model.summary()
# ### LOADING THE GLOVE EMBEDDINGS IN THE MODEL
# The Embedding layer has a single weight matrix: a 2D float matrix where each entry i is the word vector meant to be associated with index i. Simple enough.
# Load the GloVe matrix we prepared into the Embedding layer, the first layer in the model
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False
# Compile the model
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=[get_f1])
callbacks_list = [
EarlyStopping(
monitor="get_f1",
patience=1,
),
ModelCheckpoint(filepath="./pre_embd.h5", monitor="val_loss", save_best_only=True),
]
# train the model
history = model.fit(
X_train,
y_train,
epochs=100,
batch_size=32,
callbacks=callbacks_list,
validation_split=0.2,
)
# check model performance
acc = history.history["get_f1"]
val_acc = history.history["val_get_f1"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
dependencies = {"get_f1": get_f1}
# load the model from disk
loaded_model_pre_embd = keras.models.load_model(
"./pre_embd.h5", custom_objects=dependencies
)
# prediction on the test dataset
# X_test_Set = tokenizer.texts_to_matrix(X_test, mode = 'freq')
y_pred = loaded_model_pre_embd.predict_classes(X_test)
# important metrices
print(classification_report(y_test, y_pred))
# make predictions on the test dataset
y_test_pred = loaded_model_pre_embd.predict_classes(test_text)
# lets prepare for the prediction submission
sub = pd.DataFrame()
sub["Id"] = test_id
sub["target"] = y_test_pred
sub.head()
sub.to_csv("submission_pre_embedding.csv", index=False)
# Embedding layer that learnt embeddings with the model training proved to be better than pre trained embedding.
# ### 4.SIMPLE RNN
from keras.layers import Embedding, SimpleRNN
model = Sequential()
model.add(Embedding(max_words, 32))
model.add(SimpleRNN(32))
model.add(Dense(1, activation="sigmoid"))
model.summary()
callbacks_list = [
EarlyStopping(
monitor="get_f1",
patience=1,
),
ModelCheckpoint(filepath="./SRNN.h5", monitor="val_loss", save_best_only=True),
]
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=[get_f1])
history = model.fit(
X_train,
y_train,
epochs=100,
batch_size=128,
callbacks=callbacks_list,
validation_split=0.2,
)
# check model performance
acc = history.history["get_f1"]
val_acc = history.history["val_get_f1"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
dependencies = {"get_f1": get_f1}
# load the model from disk
loaded_model_SRNN = keras.models.load_model("./SRNN.h5", custom_objects=dependencies)
# prediction on the test dataset
# X_test_Set = tokenizer.texts_to_matrix(X_test, mode = 'freq')
y_pred = loaded_model_SRNN.predict_classes(X_test)
# important metrices
print(classification_report(y_test, y_pred))
# make predictions on the test dataset
y_test_pred = loaded_model_SRNN.predict_classes(test_text)
# lets prepare for the prediction submission
sub = pd.DataFrame()
sub["Id"] = test_id
sub["target"] = y_test_pred
sub.head()
sub.to_csv("submission_SRNN.csv", index=False)
# Model's performance not yet improved, lets stack some layers.
# ### 5. Stack multiple SimpleRNN layers
from keras.layers import Embedding, SimpleRNN
model = Sequential()
model.add(Embedding(max_words, 32))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32))
model.add(Dense(1, activation="sigmoid"))
model.summary()
callbacks_list = [
EarlyStopping(
monitor="get_f1",
patience=1,
),
ModelCheckpoint(filepath="./STRNN.h5", monitor="val_loss", save_best_only=True),
]
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=[get_f1])
history = model.fit(
X_train,
y_train,
epochs=100,
batch_size=128,
callbacks=callbacks_list,
validation_split=0.2,
)
# check model performance
acc = history.history["get_f1"]
val_acc = history.history["val_get_f1"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
dependencies = {"get_f1": get_f1}
# load the model from disk
loaded_model_STRNN = keras.models.load_model("./STRNN.h5", custom_objects=dependencies)
# prediction on the test dataset
# X_test_Set = tokenizer.texts_to_matrix(X_test, mode = 'freq')
y_pred = loaded_model_STRNN.predict_classes(X_test)
# important metrices
print(classification_report(y_test, y_pred))
# make predictions on the test dataset
y_test_pred = loaded_model_STRNN.predict_classes(test_text)
# lets prepare for the prediction submission
sub = pd.DataFrame()
sub["Id"] = test_id
sub["target"] = y_test_pred
sub.head()
sub.to_csv("submission_stackRNN.csv", index=False)
# Didn't help, lets try LSTM
# ### 6. LSTM
from keras.layers import LSTM
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(LSTM(32))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=[get_f1])
callbacks_list = [
EarlyStopping(
monitor="get_f1",
patience=1,
),
ModelCheckpoint(filepath="./LSTM.h5", monitor="val_loss", save_best_only=True),
]
history = model.fit(
X_train,
y_train,
epochs=100,
batch_size=128,
callbacks=callbacks_list,
validation_split=0.2,
)
# check model performance
acc = history.history["get_f1"]
val_acc = history.history["val_get_f1"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
dependencies = {"get_f1": get_f1}
# load the model from disk
loaded_model_LSTM = keras.models.load_model("./LSTM.h5", custom_objects=dependencies)
# prediction on the test dataset
# X_test_Set = tokenizer.texts_to_matrix(X_test, mode = 'freq')
y_pred = loaded_model_LSTM.predict_classes(X_test)
# important metrices
print(classification_report(y_test, y_pred))
# make predictions on the test dataset
y_test_pred = loaded_model_LSTM.predict_classes(test_text)
# lets prepare for the prediction submission
sub = pd.DataFrame()
sub["Id"] = test_id
sub["target"] = y_test_pred
sub.head()
sub.to_csv("LSTM.csv", index=False)
# LSTM doing a decent job here, lets try Bi directional LSTM
# ### 7. Bi-Direction LSTM
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(Bidirectional(LSTM(32, return_sequences=True)))
model.add(Bidirectional(LSTM(32, return_sequences=True)))
model.add(Bidirectional(LSTM(32, return_sequences=True)))
model.add(Bidirectional(LSTM(32)))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=[get_f1])
callbacks_list = [
EarlyStopping(
monitor="get_f1",
patience=1,
),
ModelCheckpoint(filepath="./BILSTM.h5", monitor="val_loss", save_best_only=True),
]
history = model.fit(
X_train,
y_train,
epochs=100,
batch_size=128,
callbacks=callbacks_list,
validation_split=0.2,
)
# check model performance
acc = history.history["get_f1"]
val_acc = history.history["val_get_f1"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
dependencies = {"get_f1": get_f1}
# load the model from disk
loaded_model_BILSTM = keras.models.load_model(
"./BILSTM.h5", custom_objects=dependencies
)
# prediction on the test dataset
# X_test_Set = tokenizer.texts_to_matrix(X_test, mode = 'freq')
y_pred = loaded_model_BILSTM.predict_classes(X_test)
# important metrices
print(classification_report(y_test, y_pred))
# make predictions on the test dataset
y_test_pred = loaded_model_BILSTM.predict_classes(test_text)
# lets prepare for the prediction submission
sub = pd.DataFrame()
sub["Id"] = test_id
sub["target"] = y_test_pred
sub.head()
sub.to_csv("BiLSTM.csv", index=False)
# Neural Network with Embedding layer seems to the best model for this classification task.
# # Please upvote if you like this kernel.
# # GRU
from keras.layers import GRU
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(GRU(32))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=[get_f1])
callbacks_list = [
EarlyStopping(
monitor="get_f1",
patience=1,
),
ModelCheckpoint(filepath="./GRU.h5", monitor="val_loss", save_best_only=True),
]
history = model.fit(
X_train,
y_train,
epochs=100,
batch_size=128,
callbacks=callbacks_list,
validation_split=0.2,
)
# check model performance
acc = history.history["get_f1"]
val_acc = history.history["val_get_f1"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
dependencies = {"get_f1": get_f1}
# load the model from disk
loaded_model_GRU = keras.models.load_model("./GRU.h5", custom_objects=dependencies)
# prediction on the test dataset
# X_test_Set = tokenizer.texts_to_matrix(X_test, mode = 'freq')
y_pred = loaded_model_GRU.predict_classes(X_test)
# important metrices
print(classification_report(y_test, y_pred))
# make predictions on the test dataset
y_test_pred = loaded_model_GRU.predict_classes(test_text)
# lets prepare for the prediction submission
sub = pd.DataFrame()
sub["Id"] = test_id
sub["target"] = y_test_pred
sub.head()
sub.to_csv("GRU.csv", index=False)
# # Stacked GRU
from keras.layers import GRU
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(GRU(32, return_sequences=True))
model.add(GRU(32, return_sequences=True))
model.add(GRU(32, return_sequences=True))
model.add(GRU(32))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=[get_f1])
callbacks_list = [
EarlyStopping(
monitor="get_f1",
patience=1,
),
ModelCheckpoint(filepath="./SGRU.h5", monitor="val_loss", save_best_only=True),
]
history = model.fit(
X_train,
y_train,
epochs=100,
batch_size=128,
callbacks=callbacks_list,
validation_split=0.2,
)
# check model performance
acc = history.history["get_f1"]
val_acc = history.history["val_get_f1"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
dependencies = {"get_f1": get_f1}
# load the model from disk
loaded_model_SGRU = keras.models.load_model("./SGRU.h5", custom_objects=dependencies)
# prediction on the test dataset
# X_test_Set = tokenizer.texts_to_matrix(X_test, mode = 'freq')
y_pred = loaded_model_SGRU.predict_classes(X_test)
# important metrices
print(classification_report(y_test, y_pred))
# make predictions on the test dataset
y_test_pred = loaded_model_SGRU.predict_classes(test_text)
# lets prepare for the prediction submission
sub = pd.DataFrame()
sub["Id"] = test_id
sub["target"] = y_test_pred
sub.head()
sub.to_csv("SGRU.csv", index=False)
# # Stacked GRU with Dropouts
from keras.layers import GRU
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(GRU(32, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
model.add(GRU(32, return_sequences=True))
model.add(GRU(32, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
model.add(GRU(32, return_sequences=True))
model.add(GRU(32, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
model.add(GRU(32))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=[get_f1])
callbacks_list = [
EarlyStopping(
monitor="get_f1",
patience=1,
),
ModelCheckpoint(filepath="./DSGRU.h5", monitor="val_loss", save_best_only=True),
]
history = model.fit(
X_train,
y_train,
epochs=100,
batch_size=128,
callbacks=callbacks_list,
validation_split=0.2,
)
# check model performance
acc = history.history["get_f1"]
val_acc = history.history["val_get_f1"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
dependencies = {"get_f1": get_f1}
# load the model from disk
loaded_model_DSGRU = keras.models.load_model("./DSGRU.h5", custom_objects=dependencies)
# prediction on the test dataset
# X_test_Set = tokenizer.texts_to_matrix(X_test, mode = 'freq')
y_pred = loaded_model_DSGRU.predict_classes(X_test)
# important metrices
print(classification_report(y_test, y_pred))
# make predictions on the test dataset
y_test_pred = loaded_model_DSGRU.predict_classes(test_text)
# lets prepare for the prediction submission
sub = pd.DataFrame()
sub["Id"] = test_id
sub["target"] = y_test_pred
sub.head()
sub.to_csv("GRUDropOut.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv("/kaggle/input/data-bulan-sabit/datasabit01.csv")
data
arcv = Angle(data["ARCV"], unit=u.deg)
daz = Angle(data["DAZ"], unit=u.deg)
arcl = Angle(data["ARCL"], unit=u.deg)
plt.plot(arcv, arcl, "k.")
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
# Reading the file
data = pd.read_csv("/kaggle/input/corona-virus-report/country_wise_latest.csv")
# **to display the data from csv file**
data
# **info() tells The information contains the number of columns, column labels, column data types, memory usage, range index, and the number of cells in each column (non-null values). Note: the info() method actually prints the info.**
data.info()
# descrbe() - The describe() method returns description of the data in the DataFrame. If the DataFrame contains numerical data, the description contains these information for each column:
# * count - The number of not-empty values.
# * mean - The average (mean) value
# * upper nd lower quartile
data.describe()
# # Checking for Missing Data
# We can see missing data in general way by using "isnull()", statement
data.isnull().sum()
# we can also use seaborn to create simple HeatMap where we can see mising values "in this case there are no null values"
sns.heatmap(data.isnull(), yticklabels=False, cbar=False, cmap="coolwarm")
# **Top 10 Countries by Death cases**
top_cuntries = (
data[["Country/Region", "Deaths"]]
.sort_values(by=["Deaths"], ascending=False)
.head(10)
)
top_cuntries
# **plotting the graph using sns from seaborn library nd labelling the graph**
plt.figure(figsize=(10, 8))
sns.barplot(x="Deaths", y="Country/Region", data=top_cuntries)
plt.xlabel("Totla Deaths", fontsize=20)
plt.ylabel("Country", fontsize=20)
plt.title("Top 10 Countries with Highest Deaths Recorded", fontsize=30)
# plt.legend(bbox_to_anchor=(2.05, 1.5), loc='best')
plt.show()
data.info()
Recovered_Data = (
data[["Country/Region", "Recovered"]]
.sort_values(by=["Recovered"], ascending=False)
.head(10)
)
Recovered_Data.info()
# **plotting the graph on data with higest recovery covid cases by top 10 countries**
plt.figure(figsize=(18, 10))
sns.barplot(x="Recovered", y="Country/Region", data=Recovered_Data)
plt.xlabel("Recovered Count", fontsize=20)
plt.ylabel("Country", fontsize=20)
plt.title("Top 10 Countries with higest recovered cases", fontsize=25)
plt.show()
d1 = data.head(20)
# countries with highest deaths per 100 covid cases, here i am sorting the column in desending order to get he highest value on top
death_per_100_cases = (
data[["Deaths / 100 Cases", "Country/Region"]]
.sort_values(by=["Deaths / 100 Cases"], ascending=False)
.head(20)
)
death_per_100_cases
# plotting the graph by countries with highest deaths per 100 covid cases
plt.figure(figsize=(18, 10))
sns.barplot(x="Deaths / 100 Cases", y="Country/Region", data=death_per_100_cases)
plt.xlabel("Deaths per 100 cases", fontsize=15)
plt.ylabel("Country", fontsize=15)
plt.title("Top countries with deaths per 100 cases", fontsize=25)
plt.show()
# **Average Deaths per 100 cases across the world**
d2 = data["Deaths / 100 Cases"].mean()
d2
# **Average recovered cases per 100 cases across the world**
d3 = data["Recovered / 100 Cases"].mean()
d3
data.info()
# **No of active cases**
active_cases = data["Active"].sum()
active_cases
total_conformed_cases = data["Confirmed"].sum()
total_conformed_cases
# **Total deaths across the world**
total_deaths = data["Deaths"].sum()
total_deaths
# **Total recoveries across the globe**
total_recoveres = data["Recovered"].sum()
total_recoveres
# summing the values in the columns to get total
data2 = data[["Confirmed", "Deaths", "Recovered", "Active"]]
df
# **plotting the graph to ger the overview on deaths recovered confirmed nd active covid cases**
data[["Confirmed", "Deaths", "Recovered", "Active"]].sum().plot.bar()
plt.title("Covid Cases overview")
plt.ylabel("Count")
plt.show()
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_palette("Set2")
from sklearn.model_selection import train_test_split
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
train = pd.read_csv(r"../input/titanic-prepared-dataset/train_prep.csv")
test = pd.read_csv(r"../input/titanic-prepared-dataset/test_prep.csv")
predictions = pd.read_csv(r"../input/titanic/gender_submission.csv")
# ### Model Validation
train.head()
X = train[list(set(train.columns) - {"PassengerId", "Survived", "Age_", "Fare"})]
Y = train["Survived"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.2, random_state=42, stratify=Y
)
# y_train=train_v5['Survived']
# X_train=train_v5[list(set(train_v5.columns)-{'PassengerId','Survived'})]
# X_test=test_v5[list(set(test_v5.columns)-{'PassengerId'})]
# X_train=
import statsmodels.api as sm
lg_reg_model = sm.GLM(
y_train, sm.add_constant(X_train), family=sm.families.Binomial()
).fit() # add const and famili bonom???
lg_reg_model.summary2()
def get_significant_vars(modelobject):
var_p_vals_df = pd.DataFrame(modelobject.pvalues)
var_p_vals_df["vars"] = var_p_vals_df.index
var_p_vals_df.columns = ["pvals", "vars"]
return var_p_vals_df[var_p_vals_df.pvals <= 0.05][["pvals"]]
significant_vars = get_significant_vars(lg_reg_model)
significant_vars
sig_var = ["Sex_male", "Cabin_flag", "Age_scaled", "Pclass_low"]
X_train = X_train[sig_var]
X_test = X_test[sig_var]
import statsmodels.api as sm
lg_reg_model2 = sm.GLM(
y_train, sm.add_constant(X_train), family=sm.families.Binomial()
).fit() # add const and famili bonom???
lg_reg_model2.summary2()
# ### Only significant variables included in the model
# pass_formula = 'Survived ~ C(Pclass)+C(Sex) +SibSp +Cabin_flag+Age_'
# lg_reg_model2 = smf.glm(formula=pass_formula, data=train_v4, family=sm.families.Binomial()).fit()
# lg_reg_model2.summary2()
"""PREDICTED CLASS WITH ACTUAL CLASS"""
def get_predictions(test_class, model, test_data):
y_pred_df = pd.DataFrame(
{
"actual": test_class,
"predicted_prob": model.get_prediction(
sm.add_constant(test_data)
).predicted_mean,
}
)
return y_pred_df
"PREDICTED CLASS WITH ACTUAL CLASS"
predict_test_df = pd.DataFrame(get_predictions(y_test, lg_reg_model2, X_test))
predict_test_df.head()
# DEFAULT CUTOFF TAKEN =0.7
predict_test_df["predicted"] = predict_test_df.predicted_prob.apply(
lambda x: "1" if x > 0.45 else "0"
)
predict_test_df[0:10]
"""CONFUSION MATRIX"""
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
print("The model with dummy variable coding output: ")
confusion_matrix(predict_test_df.actual, predict_test_df.predicted.astype("int"))
lg_reg_report_50 = classification_report(
y_test, predict_test_df.predicted.astype("int")
)
print(lg_reg_report_50)
"""PERFORMANCE MEASUREMENT"""
def measure_performance(clasf_matrix):
measure = pd.DataFrame(
{
"sensitivity": [
round(clasf_matrix[0, 0] / (clasf_matrix[0, 0] + clasf_matrix[0, 1]), 2)
],
"specificity": [
round(clasf_matrix[1, 1] / (clasf_matrix[1, 0] + clasf_matrix[1, 1]), 2)
],
"recall": [
round(clasf_matrix[0, 0] / (clasf_matrix[0, 0] + clasf_matrix[0, 1]), 2)
],
"precision": [
round(clasf_matrix[0, 0] / (clasf_matrix[0, 0] + clasf_matrix[1, 0]), 2)
],
"overall_acc": [
round(
(clasf_matrix[0, 0] + clasf_matrix[1, 1])
/ (
clasf_matrix[0, 0]
+ clasf_matrix[0, 1]
+ clasf_matrix[1, 0]
+ clasf_matrix[1, 1]
),
2,
)
],
}
)
return measure
cm = metrics.confusion_matrix(
predict_test_df.actual, predict_test_df.predicted.astype(int)
)
lg_reg_metrics_df = pd.DataFrame(measure_performance(cm))
lg_reg_metrics_df
"""optimal cut-off"""
lg_pred_prob_df = lg_reg_model2.predict(sm.add_constant(X_test))
n = len(X_test)
# -->since classifying someone as winning and then losing in actual would cost us a lot hence assigning double
# penalty in this case
"""COST TABLE"""
d = {"Not-survived": (0, 2), "Survived": (1, 0)}
costs = pd.DataFrame(d, index=("Not-survived", "Survived"))
print(costs)
def frange(start, stop, step):
s = start
while s < stop:
yield s
s += step
# creating empty vectors to store the results.
cutoff = []
P11 = [] # correct classification of positive as positive
P00 = [] # correct classification of negative as negative
P10 = [] # misclassification of positive class to negative class
P01 = [] # misclassification of negative class to positive class
for i in frange(0.00, 1, 0.05):
predicted_y = lg_pred_prob_df.map(lambda x: 1 if x > i else 0)
tbl = metrics.confusion_matrix(y_test, predicted_y)
if i <= 1:
j = int(20 * i)
P01.append(tbl[0, 1] / (tbl[0, 1] + tbl[0, 0]))
P00.append(tbl[0, 0] / (tbl[0, 1] + tbl[0, 0]))
P10.append(tbl[1, 0] / (tbl[1, 0] + tbl[1, 1]))
P11.append(tbl[1, 1] / (tbl[1, 0] + tbl[1, 1]))
cutoff.append(i)
d = {"cutoff": cutoff, "P10": P10, "P01": P01, "P00": P00, "P11": P11}
df_cost_table = pd.DataFrame(d, columns=["cutoff", "P00", "P01", "P10", "P11"])
df_cost_table
df_cost_table["msclaf_cost"] = (
df_cost_table.P01 * costs.iloc[0][1] + df_cost_table.P10 * costs.iloc[1][0]
)
df_cost_table["youden_index"] = df_cost_table.P00 + df_cost_table.P11 - 1
df_cost_table
"""MAX YOUDENS INDEX - CUTOFF"""
max_youden_cutoff = df_cost_table.loc[
df_cost_table.youden_index == df_cost_table.youden_index.max()
]["cutoff"].values
plt.plot(df_cost_table.cutoff, df_cost_table.youden_index, label="Youdens curve")
plt.title("Youdens Index vs Cutoff-P")
plt.axvline(max_youden_cutoff, label="Cutoff", color="b", ls=":")
plt.legend()
print("Cutoff value according to max Youdens index : ", max_youden_cutoff)
"""MIN PENALTY - CUTOFF"""
min_cost_cutoff = df_cost_table.loc[
df_cost_table.msclaf_cost == df_cost_table.msclaf_cost.min()
]["cutoff"].values
plt.plot(df_cost_table.cutoff, df_cost_table.msclaf_cost, label="Penalty curve")
plt.title("Miscalculation Penalty vs Cutoff-P")
plt.axvline(min_cost_cutoff, label="Cutoff", color="b", ls=":")
plt.legend()
print("Cutoff value according to min. Miscalc Penalty : ", min_cost_cutoff)
"" "ROC CURVE AND OPTIMAL CUTOFF" ""
def draw_roc(actual, probs):
fpr, tpr, thresholds = metrics.roc_curve(actual, probs, drop_intermediate=False)
auc_score = metrics.roc_auc_score(actual, probs)
plt.figure(figsize=(10, 10))
plt.plot(fpr, tpr, label="ROC curve (area = %0.2f)" % auc_score)
plt.plot([0, 1], [0, 1], "k--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate or [1 - True Negative Rate]")
plt.ylabel("True Positive Rate")
plt.title("Receiver operating characteristic example")
plt.legend(loc="lower right")
plt.show()
return fpr, tpr, thresholds
fpr, tpr, thresholds = draw_roc(predict_test_df.actual, predict_test_df.predicted_prob)
auc_score = metrics.roc_auc_score(
predict_test_df.actual, predict_test_df.predicted_prob
)
round(float(auc_score), 2)
# ## Actual Predictions
## Combining the data
# taking signifigant variables only
y_train = train["Survived"]
X_train = train[sig_var]
X_test = test[sig_var]
import statsmodels.api as sm
lg_reg_model2 = sm.GLM(
y_train, sm.add_constant(X_train), family=sm.families.Binomial()
).fit() # add const and famili bonom???
lg_reg_model2.summary2()
# ### Only significant variables included in the model
# pass_formula = 'Survived ~ C(Pclass)+C(Sex) +SibSp +Cabin_flag+Age_'
# lg_reg_model2 = smf.glm(formula=pass_formula, data=train_v4, family=sm.families.Binomial()).fit()
# lg_reg_model2.summary2()
"""PREDICTED CLASS WITH ACTUAL CLASS"""
def get_predictions(test_class, model, test_data):
y_pred_df = pd.DataFrame(
{
"actual": test_class,
"predicted_prob": model.get_prediction(
sm.add_constant(test_data)
).predicted_mean,
}
)
return y_pred_df
"PREDICTED CLASS WITH ACTUAL CLASS"
y_test.head()
"PREDICTED CLASS WITH ACTUAL CLASS"
predict_test_df = pd.DataFrame(get_predictions("", lg_reg_model2, X_test))
predict_test_df.head()
# DEFAULT CUTOFF TAKEN =0.7
predict_test_df["predicted"] = predict_test_df.predicted_prob.apply(
lambda x: "1" if x > 0.45 else "0"
)
predict_test_df[0:10]
predictions["Survived"] = predict_test_df["predicted"]
## kaggle output
predictions.to_csv("LR_Output.csv", index=False)
train.columns
X_train = train[
[
"SibSp",
"Parch",
"Cabin_flag",
"Pclass_low",
"Pclass_med",
"Sex_male",
"Embarked_Q",
"Embarked_S",
"Age_scaled",
"Fare_scaled",
]
]
y_train = train["Survived"]
X_test = test[
[
"SibSp",
"Parch",
"Cabin_flag",
"Pclass_low",
"Pclass_med",
"Sex_male",
"Embarked_Q",
"Embarked_S",
"Age_scaled",
"Fare_scaled",
]
]
# y_test
# ### Decision Trees
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn import (
metrics,
) # Import scikit-learn metrics module for accuracy calculation
# Create Decision Tree classifer object
clf = DecisionTreeClassifier()
# Train Decision Tree Classifer
clf = clf.fit(X_train, y_train)
# Predict the response for test dataset
y_pred = clf.predict(X_test)
predictions["Survived_DT"] = y_pred
predictions.to_csv()
## kaggle output
predictions[["PassengerId", "Survived_DT"]].rename(
columns={"Survived_DT": "Survived"}
).to_csv("DT_Output.csv", index=False)
# Important features
for i, j in dict(zip(X_train.columns, clf.feature_importances_)).items():
print(i, ":", j)
# sex, age, Fare are the most important variables
# ### Random forest
from sklearn import (
ensemble,
) # linear_model, ensemble, neural_network, naive bayes, svm, tree
# dir(ensemble)
"""
The “balanced” mode uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data as
n_samples / (n_classes * np.bincount(y))
"""
rf_model = ensemble.RandomForestClassifier(
bootstrap=True,
criterion="gini",
max_depth=4,
max_features="auto",
min_impurity_decrease=0.001,
min_samples_leaf=30,
min_samples_split=50,
class_weight="balanced_subsample", # to remove class imbalance (balanced)
n_estimators=500,
oob_score=True,
random_state=42,
verbose=0,
warm_start=False,
)
rf_model.fit(X_train, y_train.values.ravel())
# rf_model.fit(os_data_X,os_data_y)
# Number of trees in random forest
n_estimators = [100, 120, 130, 150]
# Maximum number of levels in tree
max_depth = [2, 3, 4, 5, 6, 7, 8, 9]
max_depth.append(None)
# Number of features to consider at every split
max_features = ["auto", "log2"]
# Minimum number of samples required to split a node
min_samples_split = [50, 75, 100]
# Minimum number of samples required at each leaf node
min_samples_leaf = [30, 35, 40]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# How to remove the class imbalance
class_weight = ["balanced_subsample", "balanced"]
# Create the random grid
random_grid = {
"n_estimators": n_estimators,
"max_depth": max_depth,
"max_features": max_features,
"min_samples_split": min_samples_split,
"min_samples_leaf": min_samples_leaf,
"bootstrap": bootstrap,
"class_weight": class_weight,
}
random_grid
from sklearn.metrics import SCORERS
SCORERS.keys()
from sklearn.model_selection import RandomizedSearchCV
# RandomizedSearchCV?
# Use the random grid to search for best hyperparameters
# from sklearn.model_selection import RandomizedSearchCV
rf_model = ensemble.RandomForestClassifier()
# Random search of parameters, using 3 fold cross validation,
rf_best_model = RandomizedSearchCV(
estimator=rf_model,
param_distributions=random_grid,
scoring="accuracy",
n_iter=100,
cv=3,
verbose=2,
random_state=42,
n_jobs=-2,
pre_dispatch=2,
)
rf_best_model.fit(X_train, y_train.values.ravel())
rf_best_model = rf_best_model.best_estimator_
# Fit the random search model
rf_best_model.fit(X_train, y_train.values.ravel())
predictions["Survived_RF"] = y_pred
## kaggle output
predictions[["PassengerId", "Survived_RF"]].rename(
columns={"Survived_RF": "Survived"}
).to_csv("RF_Output.csv", index=False)
# Important features
for i, j in dict(zip(X_train.columns, rf_best_model.feature_importances_)).items():
print(i, ":", j)
# sex, age, Fare are the most important variables
# ### Adaboost Classifier
from sklearn.ensemble import AdaBoostClassifier
# adaboost with the tree as base estimator
estimators = list(range(100, 150, 10))
# abc_scores = []
# for n_est in estimators:
# ABC = AdaBoostClassifier( n_estimators = n_est, random_state=101)
# ABC.fit(X_train, y_train)
# y_pred = ABC.predict(X_test)
# score = metrics.accuracy_score(y_test, y_pred)
# abc_scores.append(score)
# print(n_est,':',score)
adb_classifier = AdaBoostClassifier(n_estimators=130, random_state=101)
adb_classifier.fit(X_train, y_train)
adb_predicted = adb_classifier.predict(X_test)
predictions["Survived_ADB"] = adb_predicted
## kaggle output
predictions[["PassengerId", "Survived_ADB"]].rename(
columns={"Survived_ADB": "Survived"}
).to_csv("ADB_Output.csv", index=False)
# Important features
for i, j in dict(zip(X_train.columns, adb_classifier.feature_importances_)).items():
print(i, ":", j)
# sex, age, Fare are the most important variables
# from numpy import mean
# from numpy import std
# from sklearn.datasets import make_classification
# from sklearn.model_selection import cross_val_score
# from sklearn.model_selection import RepeatedStratifiedKFold
# ### Gradient Boosting
from sklearn.ensemble import GradientBoostingClassifier
# define the model
GBM = GradientBoostingClassifier()
# define the evaluation method
# cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate the model on the dataset
# n_scores = cross_val_score(model, X, y, scoring='precision', n_jobs=-1)
# report performance
# print('Mean Accuracy: %.3f (%.3f)' % (mean(n_scores), std(n_scores)))
GBM.fit(X_train, y_train.values.ravel())
gbm_predict = GBM.predict(X_test)
predictions["Survived_GBM"] = gbm_predict
## kaggle output
predictions[["PassengerId", "Survived_GBM"]].rename(
columns={"Survived_GBM": "Survived"}
).to_csv("GBM_Output.csv", index=False)
# Important features
for i, j in dict(zip(X_train.columns, GBM.feature_importances_)).items():
print(i, ":", j)
# sex, age, Fare are the most important variables
# Only taking good variables from all of the above modelling and analysis and applying XGBoost
X_train = X_train[["Sex_male", "Age_scaled", "Fare_scaled", "SibSp", "Cabin_flag"]]
X_test = X_test[["Sex_male", "Age_scaled", "Fare_scaled", "SibSp", "Cabin_flag"]]
import xgboost as xgb
from sklearn import metrics
xgclf = xgb.XGBClassifier()
xgclf.fit(X_train, y_train)
## This code takes a while to run hence commenting out-- the hyperparameters obtained through gridsearch are directly used in the next cell
# from sklearn.model_selection import GridSearchCV
# from sklearn.model_selection import model_selection
# 1st-Run for best hyperparameters
# parameters = {'learning_rate': [0.1, 0.2, 0.3, 0.4, 0.5],
# 'max_depth': [2, 4, 6, 8, 10],
# 'min_child_weight': [3, 7, 11, 19, 25],
# 'n_estimators': [50, 100, 150, 200]}
# clf_xgb = GridSearchCV(estimator=xgclf,
# param_grid=parameters,
# n_jobs=-1,
# cv=3,
# scoring="accuracy",
# refit=True)
# clf_xgb.fit(X_train, y_train)
clf_xgb_best = xgb.XGBClassifier(
base_score=0.5,
booster="gbtree",
colsample_bylevel=1,
colsample_bynode=1,
colsample_bytree=1,
gamma=0,
gpu_id=-1,
importance_type="gain",
interaction_constraints="",
learning_rate=0.1,
max_delta_step=0,
max_depth=6,
min_child_weight=7,
monotone_constraints="()",
n_estimators=50,
n_jobs=0,
num_parallel_tree=1,
random_state=0,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
subsample=1,
tree_method="exact",
validate_parameters=1,
verbosity=None,
)
clf_xgb_best.fit(X_train, y_train.values.ravel())
xgb_predict = clf_xgb_best.predict(X_test)
predictions["Survived_XGB"] = xgb_predict
## kaggle output
predictions[["PassengerId", "Survived_XGB"]].rename(
columns={"Survived_XGB": "Survived"}
).to_csv("XGB_Output.csv", index=False)
# Important features
for i, j in dict(zip(X_train.columns, clf_xgb_best.feature_importances_)).items():
print(i, ":", j)
# sex, age, Fare are the most important variables
|
# # What is about ?
# Analysis of the splicing related genes for the NIPS2021 CITE-seq data.
# # Preparations
import matplotlib.pyplot as plt
import seaborn as sns
import time
t0start = time.time()
import gc
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
if "research-project-01-around-multimodal-singlecell" not in os.path.join(
dirname, filename
):
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import scanpy as sc
# # Load data
# Here we use more general setup which allows to with many datasets - from the Notebook: https://www.kaggle.com/code/alexandervc/many-citeseq-02-just-load-full-data
# Presently we consider only one dataset so it is overkill, but nevertheless
dict_df_prot = {}
dict_df_rna = {}
dict_df_meta = {}
N_rows2take = int(
1e5
) # Work only with first rows of the data - to speed up, avoid RAM crashes , etc...
work_with_rna_data = "Yes" # If No - we will not store/work with protein data - only rna - For some fast analysis we need only protein data, which much more smaller
work_with_protein_data = "Yes"
dict_datasets2consider = {}
dict_datasets2consider["NIPS2021"] = "Yes"
import gc
if dict_datasets2consider["NIPS2021"] == "Yes":
key4dict = "NIPS2021"
fn = "/kaggle/input/citeseqscrnaseqproteins-challenge-neurips2021/GSE194122_openproblems_neurips2021_cite_BMMC_processed.h5ad"
adata = sc.read(fn)
adata.var_names_make_unique()
# print(adata)
if work_with_protein_data == "Yes":
mask = adata.var["feature_types"] == "ADT"
X = adata[:N_rows2take, mask].X.todense()
# type(X)
df = pd.DataFrame(X, columns=list(adata.var["feature_types"][mask].index))
l = [t.replace("-1", "") for t in df.columns]
df.columns = l
print(
[t for t in df.columns if "CD45" in t.upper()],
[t for t in df.columns if "CD53" in t.upper()],
)
print("Protein data shape:", df.shape)
dict_df_prot[key4dict] = df
display(df.head(2)) # df
if work_with_rna_data == "Yes":
mask = adata.var["feature_types"] != "ADT"
X = adata[:N_rows2take, mask].X.todense()
df = pd.DataFrame(X, columns=list(adata.var["feature_types"][mask].index))
print("RNA data shape:", df.shape)
dict_df_rna[key4dict] = df
display(df.head(2))
df = adata.obs.iloc[:N_rows2take, :]
print("Meta data shape:", df.shape)
dict_df_meta[key4dict] = df
display(df.head(2))
gc.collect()
for k in dict_df_meta:
df_prot = dict_df_prot[k]
df_rna = dict_df_rna[k]
df_meta = dict_df_meta[k]
print(df_prot.shape, df_rna.shape, df_meta.shape)
# # Genes related to splicing
# From Antonina Dolgorukova notebook:
# https://www.kaggle.com/code/antoninadolgorukova/mmscel-cd45-iso-go-enrichment?scriptVersionId=122260628&cellId=124
#
list_genes_splicing1 = [
"PTPRC",
"UBL5",
"SNRPD3",
"POLR2F",
"SNRPG",
"YBX1",
"SF3B5",
"HSPA8",
"POLR2L",
"SNU13",
"SNRPB",
"SRSF7",
"SRSF2",
"SNRPD1",
"C1QBP",
]
l = list_genes_splicing1
print(len(l), l[:4])
s = set(df_rna.columns) & set(l)
print(len(s), list(s))
# Task 1 (from 15/03/2023)
# dict_df_prot[key4dict]['UBL5']
# dict_df_rna[key4dict]['UBL5']
splicing_rna = [col for col in dict_df_rna[key4dict].columns if col in s]
rna = dict_df_rna[key4dict][splicing_rna]
cd45_cols = [t for t in dict_df_prot[key4dict].columns if "CD45" in t.upper()]
prot = dict_df_prot[key4dict][cd45_cols]
result = pd.concat([rna, prot], axis=1, join="inner")
sns.clustermap(result.corr().round(2), annot=True)
plt.show()
# # hnRNP genes
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4947485/
#
# Heterogeneous ribonucleoprotein particle
# Heterogeneous nuclear ribonucleoproteins (hnRNPs) are complexes of RNA and protein present in the cell nucleus during gene transcription and subsequent post-transcriptional modification of the newly synthesized RNA (pre-mRNA). The presence of the proteins bound to a pre-mRNA molecule serves as a signal that the pre-mRNA is not yet fully processed and therefore not ready for export to the cytoplasm.[1] Since most mature RNA is exported from the nucleus relatively quickly, most RNA-binding protein in the nucleus exist as heterogeneous ribonucleoprotein particles. After splicing has occurred, the proteins remain bound to spliced introns and target them for degradation.
# hnRNPs are also integral to the 40s subunit of the ribosome and therefore important for the translation of mRNA in the cytoplasm.[2] However, hnRNPs also have their own nuclear localization sequences (NLS) and are therefore found mainly in the nucleus. Though it is known that a few hnRNPs shuttle between the cytoplasm and nucleus, immunofluorescence microscopy with hnRNP-specific antibodies shows nucleoplasmic localization of these proteins with little staining in the nucleolus or cytoplasm.[3] This is likely because of its major role in binding to newly transcribed RNAs. High-resolution immunoelectron microscopy has shown that hnRNPs localize predominantly to the border regions of chromatin, where it has access to these nascent RNAs.[4]
# The proteins involved in the hnRNP complexes are collectively known as heterogeneous ribonucleoproteins. They include protein K and polypyrimidine tract-binding protein (PTB), which is regulated by phosphorylation catalyzed by protein kinase A and is responsible for suppressing RNA splicing at a particular exon by blocking access of the spliceosome to the polypyrimidine tract.[5]: 326 hnRNPs are also responsible for strengthening and inhibiting splice sites by making such sites more or less accessible to the spliceosome.[6] Cooperative interactions between attached hnRNPs may encourage certain splicing combinations while inhibiting others.[7]
#
# https://t.me/c/1889733548/12776 Antonina Dolgorukova:
# вот 14 РНК, которые образуют кластер в данных 2022
# Про большинство есть в обзоре - https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4947485/. Для остальных что-то о функции нашла в genecards или вики.
# 'HNRNPA0' - Involved in post-transcriptional regulation of cytokines mRNAs (https://www.genecards.org/cgi-bin/carddisp.pl?gene=HNRNPA0)
# 'HNRNPA2B1' - Splicing
# 'HNRNPA3' - paralog of HNRNPA0, Plays a role in cytoplasmic trafficking of RNA. Binds to the cis-acting response element, A2RE. May be involved in pre-mRNA splicing (https://www.genecards.org/cgi-bin/carddisp.pl?gene=HNRNPA3&keywords=HNRNPA3)
# 'HNRNPAB' - it is not a member of the HNRNP A/B subfamily of HNRNPs, but groups together closely with HNRNPD/AUF1 and HNRNPDL (https://en.wikipedia.org/wiki/HNRNPAB)
# 'HNRNPC' - May play a role in the early steps of spliceosome assembly and pre-mRNA splicing (https://www.genecards.org/cgi-bin/carddisp.pl?gene=HNRNPC&keywords=HNRNPC)
# 'HNRNPD' - mRNA decay, Telomere maintenance
# 'HNRNPDL' - Acts as a transcriptional regulator. Promotes transcription repression (https://www.genecards.org/cgi-bin/carddisp.pl?gene=HNRNPDL&keywords=HNRNPDL)
# 'HNRNPF' - Splicing
# 'HNRNPH2' - ?
# 'HNRNPH3' - Involved in the splicing process and participates in early heat shock-induced splicing arrest (https://www.genecards.org/cgi-bin/carddisp.pl?gene=HNRNPH3&keywords=HNRNPH3)
# 'HNRNPK' - Translational regulation, Transcriptional regulation, mRNA stability, Splicing
# 'HNRNPM' - Splicing
# 'HNRNPR' - Transcriptional regulation
# 'HNRNPU' - Splicing, Transcriptional regulation
#
df = dict_df_rna[key4dict]
list_hnRN_genes = []
for i in range(len(df.columns)):
if "HNRN" in df.columns[i]:
list_hnRN_genes.append(df.columns[i])
print(len(list_hnRN_genes), list_hnRN_genes)
sns.clustermap(df[list_hnRN_genes].corr().round(2), annot=True)
plt.suptitle("Pearson correlations")
plt.show()
sns.clustermap(df[list_hnRN_genes].corr(method="spearman").round(2), annot=True)
plt.suptitle("Spearman correlations")
plt.show()
# dict_df_prot[key4dict]['UBL5']
# dict_df_rna[key4dict]['UBL5']
list_rna_loc = list_hnRN_genes + ["PTPRC"]
list_rna_loc = [col for col in dict_df_rna[key4dict].columns if col in list_rna_loc]
rna = dict_df_rna[key4dict][list_rna_loc]
cd45_cols = [t for t in dict_df_prot[key4dict].columns if "CD45" in t.upper()]
prot = dict_df_prot[key4dict][cd45_cols]
result = pd.concat([rna, prot], axis=1, join="inner")
sns.clustermap(result.corr(method="spearman").round(1), annot=True)
plt.show()
list_rna_loc = list_hnRN_genes + ["PTPRC"] + list_genes_splicing1
list_rna_loc = [col for col in dict_df_rna[key4dict].columns if col in list_rna_loc]
rna = dict_df_rna[key4dict][list_rna_loc]
cd45_cols = [t for t in dict_df_prot[key4dict].columns if "CD45" in t.upper()]
prot = dict_df_prot[key4dict][cd45_cols]
result = pd.concat([rna, prot], axis=1, join="inner")
sns.clustermap(
result.corr(method="spearman").round(2), yticklabels=1
) # , ,annot=True)
plt.show()
df = dict_df_rna[key4dict]
list_hnRN_genes = []
for i in range(len(df.columns)):
if "RNU" in df.columns[i]:
list_hnRN_genes.append(df.columns[i])
print(len(list_hnRN_genes), list_hnRN_genes)
# # More splicing related genes
gene_symbols_splicing_chatGPT = [
"SNRNP70",
"RNU2-1",
"SNRPN",
"SNRNP200",
"RNU6-1",
"RNU6-2",
"RNU6-3",
]
list_rna_loc = (
gene_symbols_splicing_chatGPT # list_hnRN_genes + ['PTPRC'] + list_genes_splicing1
)
list_rna_loc = [col for col in dict_df_rna[key4dict].columns if col in list_rna_loc]
rna = dict_df_rna[key4dict][list_rna_loc]
cd45_cols = [t for t in dict_df_prot[key4dict].columns if "CD45" in t.upper()]
prot = dict_df_prot[key4dict][cd45_cols]
result = pd.concat([rna, prot], axis=1, join="inner")
sns.clustermap(result.corr(method="spearman").round(2), yticklabels=1, annot=True)
plt.show()
# gene_symbols_splicing_chatGPT = ['SNRNP70', 'RNU2-1', 'SNRPN', 'SNRNP200', 'RNU6-1', 'RNU6-2', 'RNU6-3']
list_rna_loc = spliceosome_gene_symbols_chatgpt # gene_symbols_splicing_chatGPT # list_hnRN_genes + ['PTPRC'] + list_genes_splicing1
list_rna_loc = [col for col in dict_df_rna[key4dict].columns if col in list_rna_loc]
rna = dict_df_rna[key4dict][list_rna_loc]
cd45_cols = [t for t in dict_df_prot[key4dict].columns if "CD45" in t.upper()]
prot = dict_df_prot[key4dict][cd45_cols]
result = pd.concat([rna, prot], axis=1, join="inner")
sns.clustermap(result.corr(method="spearman").round(2), yticklabels=1) # , annot=True)
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # This notebook is for the people who want to get a basic concept of recommendation system.
# # I used the Amazon Food rating data (from 1996 to 2018)
# # Dataset used : https://nijianmo.github.io/amazon/index.html
# * There are many types of datasets on the website. The dataset I used was a simple version which only contains the ratings.
# * This 'Grocery and Gourmet food' data includes reviews in the range May 1996 - Oct 2018.
# * Please read the description on the website above for more details.
data = pd.read_csv("/kaggle/input/amazonfooddataset/Grocery_and_Gourmet_Food.csv")
data
# Since the raw data has no column names, I put the column names (based on the data description page) manually.
data = pd.read_csv(
"/kaggle/input/amazonfooddataset/Grocery_and_Gourmet_Food.csv",
header=None,
index_col=None,
)
data.columns = ["ProductId", "UserId", "Rating", "Timestamp"]
data
# Number of unique product id - 283,507
data["ProductId"].nunique()
# There could be two kinds of recommendation systems for the data like this:
# # 1. ProductId-based recommendation
# * We can use cosine_similarity package which is provided from sklearn (from sklearn.metrics.pairwise import cosine_similarity)
# * In this case, the similarity of the products would be calculated by each product's rating. In other words, the recommendation based on the ProductId is not a good approach since a rating doesn't represent the whole product feature.
# # 2. UserId-based recommendation
# * So we use 'UserId-based recommendation' for this data
# Number of UserId - 2,695,974
data["UserId"].nunique()
# # Top 30 Amazon Grocery & Gourmet food (1996~2018)
# * ProductId is ASIN number of Amazon product. Since the 'metadata' is too large to open here (and also, this notebook is just for showing the concept of recommendation system) I manually show the product by typing the ProductId on www.amazon.com/dp/ProductId
product_count = data.groupby("ProductId")["UserId"].count()
product_count.sort_values(ascending=False).head(30)
# # FYI
# * Number 1 product : www.amazon.com/dp/B00BUKL666 (KIND healthy grains bar. Not suprised)
# * Number 2 product : www.amazon.com/dp/B00542YXFW (a bag of some tea. Interesting)
# * Number 3 product : www.amazon.com/dp/B008QMX2SG (another category for KIND healthy snack)
# * Number 4 product : www.amazon.com/dp/B00D3M2QP4 (again, another category for KIND healthy snack. Please note that I'm not related with the brand at all.)
# * Number 5 product : www.amazon.com/dp/B000YN2GVY (Organic unfiltered apple cider vinegar. Many people(especially Americans) like apple cider vinegar. Not surprised)
# # Now, I am adding myself to the data and will see how the model recommends foods for me
# First, I have to lookup the product I want to add before I add it into the data (because the data don't have product launched after 2018)
# I used the ASIN code for Monster Energy Zero Ultra(B00MEFXEB6) and it was in the data.
np.where(data.ProductId == "B00MEFXEB6")
# My top 5 favorite foods converted to ASIN number (most of those selected from Top 30)
# Energy drink, Coconut Oil,Sparkling Juice, Matcha Green Tea Powder, Beef Jerkey
my_favorite = ["B00MEFXEB6", "B000H2XXRS", "B0014WYXYW", "B00PFDH0IC", "B000GW0U9I"]
my_foodlist = pd.DataFrame(
{
"UserId": ["yohann"] * 5,
"Rating": [5] * 5,
"Timestamp": [12345678] * 5,
"ProductId": [
"B00MEFXEB6",
"B000H2XXRS",
"B0014WYXYW",
"B00PFDH0IC",
"B000GW0U9I",
],
}
)
if not data.isin({"UserId": ["yohann"]})["UserId"].any():
data = data.append(my_foodlist)
data.tail(10)
# Finding an unique user and product
user_unique = data["UserId"].unique()
product_unique = data["ProductId"].unique()
# indexing the users and products
user_to_idx = {v: k for k, v in enumerate(user_unique)}
product_to_idx = {v: k for k, v in enumerate(product_unique)}
print(user_to_idx["yohann"])
# # building a CSR Matrix
# Indexing
temp_user_data = data["UserId"].map(user_to_idx.get).dropna()
if len(temp_user_data) == len(data):
print("UserId Column Indexing Completed.")
data["UserId"] = temp_user_data
else:
print("UserId Column Indexing Failed.")
# Same on the product
temp_product_data = data["ProductId"].map(product_to_idx.get).dropna()
if len(temp_product_data) == len(data):
print("Product Column Indexing Completed.")
data["ProductId"] = temp_product_data
else:
print("Product Column Indexing Failed.")
data
# # Compressed Sparse Row Matrix
# UserId x Product matrix requires huge memories.
# Furthermore, the calculation will also contains the product information that the user doesn't like.
# So, in order to minimize the memory loss, I only used the product that the user likes.
#
# CSR MATRIX
from scipy.sparse import csr_matrix
num_user = data["UserId"].nunique()
num_product = data["ProductId"].nunique()
csr_data = csr_matrix(
(data["Rating"], (data.UserId, data.ProductId)), shape=(num_user, num_product)
)
csr_data
# # Using ALS (Alternating Least Squares) model
# I used the package called 'implicit' (als model)
# Two feature matrices came from Matrix Factorization are hard to train all at once. So this ALS model fixes one side and train the other side alternately.
from implicit.als import AlternatingLeastSquares
import os
import numpy as np
# recommended part from the implicit package:
os.environ["OPENBLAS_NUM_THREAD"] = "1"
os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
os.environ["MKL_NUM_THREADS"] = "1"
# ALS class's __init__ parameters:
# 1. factors : the dimension of UserId x Product vector.
# 2. regularization : for preventing overfitting problem. Using regularization. 0.01 is okay in general.
# 3. use_gpu : usage of GPU
# 4. iterations : it is like 'epoch'
als_model = AlternatingLeastSquares(
factors=100, regularization=0.01, use_gpu=False, iterations=15, dtype=np.float32
)
# ALS model takes 'Product x User' matrix as an input, so we have to convert it (Transpose)
csr_data_transpose = csr_data.T
csr_data_transpose
# # Train ALS model (it will take few mins)
als_model.fit(csr_data_transpose)
# # The expected preference of a new random product
yohann, monsterUltra = user_to_idx["yohann"], product_to_idx["B00MEFXEB6"]
yohann_vector, monster_vector = (
als_model.user_factors[yohann],
als_model.item_factors[monsterUltra],
)
yohann_vector
monster_vector
# the dot product of Yohann and Monster vector (my preference for Monster drink)
np.dot(yohann_vector, monster_vector)
# the expected preference of 'Yohann and Kind bar' (which was not added on my favorite list)
kindbar = product_to_idx["B00BUKL666"]
kindbar_vector = als_model.item_factors[kindbar]
np.dot(yohann_vector, kindbar_vector)
# # Recommendation from the model 1 (based on the similar product)
# Get recommendation similar to 'Kindbar'
kindbar = "B00BUKL666"
product_id = product_to_idx[kindbar]
similar_food = als_model.similar_items(product_id, N=15)
similar_food
# Convert the indices to ASIN numbers
idx_to_product = {v: k for k, v in product_to_idx.items()}
[idx_to_product[i[0]] for i in similar_food]
# # --------------------------------------------------------
# # Some recommendations seem reasonable:
# * https://www.amazon.com/dp/B00F6UH8JK - Hershey Special Dark Chocolate Topping
# * https://www.amazon.com/dp/B00F78I7ZK - Candy coated almonds
# # Some are not:
# * https://www.amazon.com/dp/B007NJFQ1O - Badia Mojo Marina
# * https://www.amazon.com/dp/B001F0BEZE - Biscuit cut Ham
# # ---------------------------------------------------------
# # Recommendation from the model 2 (the food I might like)
user = user_to_idx["yohann"]
food_recommended = als_model.recommend(
user, csr_data, N=20, filter_already_liked_items=True
)
food_recommended
[idx_to_product[i[0]] for i in food_recommended]
# # --------------------------------------------------------
# # Some recommendations seem reasonable:
# * https://www.amazon.com/dp/B00HNTPF7E - organic coconut oil (different brand)
# * https://www.amazon.com/dp/B014LT0712 - organic matcha powder (different type)
# * https://www.amazon.com/dp/B001XUO8AY - tonic water
# # Some are not:
# https://www.amazon.com/dp/B0015DGDR0 - mentos mint flavor
# # --------------------------------------------------------
# Contribution of the Coconut Oil for this recommendation
coconut = product_to_idx["B000H2XXRS"]
explain = als_model.explain(user, csr_data, itemid=coconut)
[(idx_to_product[i[0]], i[1]) for i in explain[1]]
|
from collections import Counter
import cv2
import os
import glob
import skimage
import numpy as np
import pandas as pd
import seaborn as sn
import preprocessing
from tqdm import tqdm
from PIL import Image
from os import listdir
import matplotlib.pyplot as plt
from skimage.transform import resize
from collections import Counter
sn.set()
from sklearn.svm import SVC # SVC
from sklearn import metrics
from sklearn.utils import shuffle
from xgboost import XGBClassifier # XGBClassifier
from sklearn import metrics
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.utils import compute_class_weight
from sklearn.preprocessing import MinMaxScaler, LabelBinarizer
from sklearn.ensemble import AdaBoostClassifier # AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier # KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier # RandomForestClassifier
from sklearn.model_selection import train_test_split
import tensorflow
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.metrics import AUC
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.applications.vgg16 import VGG16 # VGG16
from tensorflow.keras.applications.vgg19 import VGG19 # VGG19
from tensorflow.keras.applications.resnet50 import ResNet50 # ResNet50
from tensorflow.keras.applications.xception import Xception # Xception
from tensorflow.keras.applications.mobilenet import MobileNet # MobileNet
from tensorflow.keras.applications.nasnet import NASNetMobile # NASNetMobile
from tensorflow.keras.applications.densenet import DenseNet169 # DenseNet169
from tensorflow.keras.applications.densenet import DenseNet121 # DenseNet121
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2 # MobileNetV2
from tensorflow.keras.applications.inception_v3 import InceptionV3 # InceptionV3
from tensorflow.keras.layers import (
Input,
Dense,
Dropout,
BatchNormalization,
Flatten,
Activation,
GlobalAveragePooling2D,
Conv2D,
MaxPooling2D,
)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
link_t = "/kaggle/input/ddsm-yolo-512/ddsm_512/train/images/"
train_link = os.listdir("/kaggle/input/ddsm-yolo-512/ddsm_512/train/images")
train_link = pd.DataFrame(train_link, columns=["fileName"])
train_link["link"] = train_link["fileName"].apply(lambda x: link + x)
train_link
link_v = "/kaggle/input/ddsm-yolo-512/ddsm_512/val/images/"
val_link = os.listdir("/kaggle/input/ddsm-yolo-512/ddsm_512/val/images")
val_link = pd.DataFrame(val_link, columns=["fileName"])
val_link["link"] = val_link["fileName"].apply(lambda x: link_v + x)
val_link
link_image = pd.concat([train_link, val_link])
link_image["mask"] = link_image["fileName"].apply(lambda x: x.split("_")[-1:][0])
link_image
df = pd.read_excel("/kaggle/input/miniddsm2/MINI-DDSM-Complete-JPEG-8/DataWMask.xlsx")
df
df_concat = pd.merge(df, link_image, on="fileName")
df_concat = df_concat[["fileName", "Status", "link"]]
df_concat
label_map = {"Benign": 0, "Cancer": 1, "Normal": 2}
text_labels = df_concat["Status"].values
# def mapping lable
def apply_mapping(label):
return label_map.get(label, -1)
int_labels = list(map(apply_mapping, text_labels))
df_concat["labels"] = int_labels
df_concat
X = []
for i in df_concat["link"]:
X.append(cv2.imread(i))
Y = df_concat["labels"]
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
import numpy as np
X = np.array(X)
Y = np.array(Y)
x_train, x_test, y_train, y_test = train_test_split(
X, Y, test_size=0.15, random_state=2021, shuffle=True
)
# cancer_prediction_cnn(x_train, y_train, x_test, y_test)
y_train
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Conv2D, MaxPool2D, Flatten
from keras import optimizers
from keras import losses
from sklearn import metrics
rows, cols, color = x_train[0].shape
print(x_train[0].shape)
base_model = ResNet50(input_shape=(512, 512, 3), weights="imagenet", include_top=False)
model = Sequential()
model.add(base_model)
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(3, activation="softmax"))
for layer in base_model.layers:
layer.trainable = False
model.summary()
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
es = EarlyStopping(
monitor="val_loss", mode="min", patience=6, restore_best_weights=True, verbose=1
)
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
history = model.fit(
x_train,
y_train,
validation_split=0.15,
shuffle=True,
epochs=3,
batch_size=64,
callbacks=[es],
)
loss_value, accuracy = model.evaluate(x_test, y_test)
print("Test_loss_value = " + str(loss_value))
print("test_accuracy = " + str(accuracy))
# print(model.predict(x_test))
# model.save('breast_cance_model.h5')
save_dictionary("history1.dat", history.history)
# %% PLOTTING RESULTS (Train vs Validation)
import matplotlib.pyplot as plt
def Train_Val_Plot(acc, val_acc, loss, val_loss):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 10))
fig.suptitle(" MODEL'S METRICS VISUALIZATION ")
ax1.plot(range(1, len(acc) + 1), acc)
ax1.plot(range(1, len(val_acc) + 1), val_acc)
ax1.set_title("History of Accuracy")
ax1.set_xlabel("Epochs")
ax1.set_ylabel("Accuracy")
ax1.legend(["training", "validation"])
ax2.plot(range(1, len(loss) + 1), loss)
ax2.plot(range(1, len(val_loss) + 1), val_loss)
ax2.set_title("History of Loss")
ax2.set_xlabel("Epochs")
ax2.set_ylabel("Loss")
ax2.legend(["training", "validation"])
plt.show()
Train_Val_Plot(
history.history["accuracy"],
history.history["val_accuracy"],
history.history["loss"],
history.history["val_loss"],
)
y_pred = model.predict_classes(x_test)
y_pred_prb = model.predict_proba(x_test)
# def print_performance_metrics(y_test,y_pred):
# """
# parameters
# ----------
# y_test : actual label (must be in non-one hot encoded form)
# y_pred_test : predicted labels (must be in non-one hot encoded form, common output of predict methods of classifers)
# returns
# -------
# prints the accuracy, precision, recall, F1 score, ROC AUC score, Cohen Kappa Score, Matthews Corrcoef and classification report
# """
target = ["B", "M"]
from sklearn import metrics
print("Accuracy:", np.round(metrics.accuracy_score(y_test, y_pred), 4))
print(
"Precision:",
np.round(metrics.precision_score(y_test, y_pred, average="weighted"), 4),
)
print("Recall:", np.round(metrics.recall_score(y_test, y_pred, average="weighted"), 4))
print("F1 Score:", np.round(metrics.f1_score(y_test, y_pred, average="weighted"), 4))
print(
"ROC AUC Score:",
np.round(
metrics.roc_auc_score(
y_test, y_pred_prb, multi_class="ovo", average="weighted"
),
4,
),
)
print("Cohen Kappa Score:", np.round(metrics.cohen_kappa_score(y_test, y_pred), 4))
print(
"\t\tClassification Report:\n",
metrics.classification_report(y_test, y_pred, target_names=target),
)
|
from pathlib import Path
import json
import math
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torchinfo
from tqdm import tqdm
import onnx
import onnxruntime
import onnx_tf
import tensorflow as tf
import tflite_runtime.interpreter as tflite
INPUT_DIR = Path("/kaggle/input/")
ASL_DIR = INPUT_DIR / "asl-signs"
DATASET_DIR = INPUT_DIR / "asl-dataset"
device = (
"cuda"
if torch.cuda.is_available()
else "mps"
if torch.backends.mps.is_available()
else "cpu"
)
data = np.load(INPUT_DIR / "asl-dataset" / "data.npz")
tensor = torch.tensor(data[data.files[0]])
tensor
with (INPUT_DIR / "asl-dataset" / "landmarks.json").open() as f:
landmarks = json.load(f)
points = torch.cat(
[torch.tensor(value).unfold(0, 3, 1) for value in landmarks.values()]
)
view = tensor[:, points]
view
vectors = torch.stack(
(view[:, :, 1] - view[:, :, 0], view[:, :, 2] - view[:, :, 1]), dim=2
).float()
angles = torch.div(
vectors.prod(dim=2).sum(dim=2), vectors.square().sum(dim=3).sqrt().prod(dim=2)
).acos()
torch.cat((tensor.flatten(1), angles), 1).size()
signs_df = pd.read_csv(DATASET_DIR / "train.csv")
signs_df
train_df = signs_df.sample(frac=0.8)
test_df = signs_df.drop(train_df.index).sample(frac=1)
train_df.shape, test_df.shape
class ASLDataset(Dataset):
def __init__(self, dataset_df, agg, preload=None):
files = np.load(DATASET_DIR / "data.npz")
if len(preload or []) != len(dataset_df):
preload = None
self.items = preload or [
torch.Tensor(files[str(i)]).to(device)
for i in tqdm(
dataset_df.sequence_id, desc="Loading data", total=len(dataset_df)
)
]
self.labels = torch.Tensor(dataset_df.label.values).long().to(device)
self.agg = agg
def __len__(self):
return len(self.items)
def __getitem__(self, index):
return self.agg(self.items[index]).float(), self.labels[index]
ROWS_PER_FRAME = 543 # number of landmarks per frame
def load_relevant_data_subset(pq_path):
data_columns = ["x", "y", "z"]
data = pd.read_parquet(pq_path, columns=data_columns)
n_frames = int(len(data) / ROWS_PER_FRAME)
data = data.values.reshape(n_frames, ROWS_PER_FRAME, len(data_columns))
return data.astype(np.float32)
POINTS = torch.cat(
[torch.tensor(value).unfold(0, 3, 1) for value in landmarks.values()]
)
INDICES = np.load(DATASET_DIR / "indices.npy")
class Preprocess(nn.Module):
def __init__(self, agg):
super().__init__()
self.agg = agg
def forward(self, x):
x = x[:, INDICES]
x = self.agg(x)
return x
class PositionalEncoding(nn.Module):
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model)
)
pe = torch.zeros(max_len, 1, d_model)
pe[:, 0, 0::2] = torch.sin(position * div_term)
pe[:, 0, 1::2] = torch.cos(position * div_term)
self.register_buffer("pe", pe)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.pe[: x.size(0)]
return self.dropout(x).to(device)
# return self.pe
def pad_batch(batch):
max_frames = max([len(entry) for entry in batch])
size = (max_frames, len(batch), len(batch[0][0]))
padded = torch.zeros(size).to(device)
mask = torch.full((len(batch), max_frames), True).to(device)
for index, entry in enumerate(batch):
frames = len(entry)
padded[:frames, index] = entry
mask[index, :frames] = False
return padded, mask
class TransformerModel(nn.Module):
def __init__(
self,
in_tokens: int = 534,
d_model: int = 1024,
out_tokens: int = 250,
nhead: int = 8,
d_ff: int = 2048,
nlayers: int = 1,
dropout: float = 0.5,
):
super().__init__()
self.model_type = "Transformer"
self.in_tokens = 534
self.d_model = 1024
self.out_tokens = 250
self.nhead = 8
self.d_ff = 1024
self.nlayers = 1
self.dropout = 0.4
self.embed = nn.Linear(self.in_tokens, self.d_model)
self.pos_encoder = PositionalEncoding(self.d_model, self.dropout)
encoder_layers = nn.TransformerEncoderLayer(
self.d_model, self.nhead, self.d_ff, self.dropout
)
self.transformer_encoder = nn.TransformerEncoder(encoder_layers, self.nlayers)
self.decoder = nn.Linear(self.d_model, self.out_tokens)
def forward(self, X: [torch.Tensor]) -> torch.Tensor:
src, mask = pad_batch(X)
src = self.embed(src)
src = self.pos_encoder(src)
src = torch.cat([torch.zeros((1, src.size(1), self.d_model)).to(src), src], 0)
mask = torch.cat([torch.full((src.size(1), 1), False).to(mask), mask], 1)
output = self.transformer_encoder(src, src_key_padding_mask=mask)
output = output[0]
output = self.decoder(output)
return output
@staticmethod
def agg(x: torch.Tensor) -> torch.Tensor:
# Find "angles"
view = x[:, POINTS]
vectors = torch.stack(
(view[..., 1, :] - view[..., 0, :], view[..., 2, :] - view[..., 1, :]),
dim=-2,
).float()
angles = torch.div(
vectors.prod(dim=-2).sum(dim=-1),
vectors.square().sum(dim=-1).sqrt().prod(dim=-1),
) # .acos()
# Coordinate normalisation
coord_counts = (~x.isnan()).sum(dim=(0, 1))
coord_no_nan = x.clone()
coord_no_nan[coord_no_nan.isnan()] = 0
coord_mean = coord_no_nan.sum(dim=(0, 1)) / coord_counts
normed = x - coord_mean
# normed[normed.isnan()] = 0
# normed = nn.functional.normalize(normed, dim=-1)
# Coords + Angles
tensor = torch.cat((normed.flatten(-2), angles), 1)
tensor[tensor.isnan()] = 0
return tensor
TransformerModel.agg(tensor).size()
x = torch.Tensor([[1, 1, 0], [1, 2, 3], [3, 0, 0]])
mask = torch.Tensor(
[[False, False, True], [False, False, False], [False, True, True]]
).bool()
torch.rand((23, 32, 45)).sum(0).size()
(~mask).sum(1)
x.sum(1) / (~mask).sum(1)
preprocess = Preprocess(TransformerModel.agg)
preprocess(
torch.Tensor(
load_relevant_data_subset(
"/kaggle/input/asl-signs/train_landmark_files/16069/100015657.parquet"
)
)
)
if "train_preload" not in locals():
train_preload = None
if "test_preload" not in locals():
test_preload = None
train_dataset = ASLDataset(train_df, TransformerModel.agg, train_preload)
train_preload = train_dataset.items
test_dataset = ASLDataset(test_df, TransformerModel.agg, test_preload)
test_preload = test_dataset.items
len(train_dataset), len(test_dataset)
def collate(batch):
transposed = list(zip(*batch))
sequence = list(transposed[0])
X = [
torch.Tensor(x).nan_to_num(nan=0).flatten(1).float().to(device)
for x in sequence
]
y = torch.Tensor(transposed[1]).long().to(device)
return X, y
dataloader = DataLoader(train_dataset, batch_size=128, collate_fn=collate)
train_dataloader = DataLoader(
train_dataset, batch_size=128, shuffle=True, collate_fn=collate
)
test_dataloader = DataLoader(
test_dataset, batch_size=128, shuffle=True, collate_fn=collate
)
model = TransformerModel(
in_tokens=534,
d_model=1024,
out_tokens=250,
nhead=8,
d_ff=1024,
nlayers=1,
dropout=0.4,
).to(device)
learning_rate = 5.0
weight_decay = 1.5 * 1e-3
epochs = 32
loss_fn = nn.CrossEntropyLoss()
# optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)
def train_val_loop(epoch, train_dataloader, val_dataloader, model, loss_fn, optimizer):
total_batches = len(train_dataloader)
train_size, train_batches = 0, 0
train_loss, train_correct = 0, 0
with tqdm(desc=f"Epoch {epoch}", total=total_batches) as bar:
for batch, (X, y) in enumerate(train_dataloader):
start = time.time()
# Compute prediction and loss
pred = model(X)
loss = loss_fn(pred, y)
train_loss += loss.item()
train_correct += (pred.argmax(1) == y).type(torch.float).sum().item()
train_size += len(y)
train_batches += 1
# print(any(x.isnan().any() for x in X), pred.isnan().any())
# Backpropagation
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
# scheduler.step(epoch + batch / total_batches)
bar.update()
# if batch % 10 == 0:
bar.set_postfix(
accuracy=train_correct / train_size,
loss=train_loss / train_batches,
lr=scheduler.get_last_lr(),
)
bar.set_postfix(
accuracy=train_correct / train_size, loss=train_loss / train_batches
)
scheduler.step()
with torch.no_grad():
val_size, val_batches = 0, 0
val_loss, val_correct = 0, 0
for batch, (X, y) in enumerate(val_dataloader):
pred = model(X)
val_loss += loss_fn(pred, y).item()
val_correct += (pred.argmax(1) == y).type(torch.float).sum().item()
val_size += len(y)
val_batches += 1
if batch % 10 == 0 or batch + 1 == len(val_dataloader):
bar.set_postfix(
accuracy=train_correct / train_size,
loss=train_loss / train_batches,
val_accuracy=val_correct / val_size,
val_loss=val_loss / val_batches,
)
return (
train_correct / train_size,
train_loss / train_batches,
val_correct / val_size,
val_loss / val_batches,
)
epochs = 2
best_loss = float("inf")
saved_state = model.state_dict()
for epoch in range(epochs):
acc, loss, v_acc, v_loss = train_val_loop(
epoch, train_dataloader, test_dataloader, model, loss_fn, optimizer
)
if v_loss < best_loss:
best_loss = v_loss
saved_state = model.state_dict()
torch.save(saved_state, "model_weights.pth")
class EvalModel(TransformerModel):
# src should be a single sequence of frames of size [frames, 534]
def forward(self, src: torch.Tensor):
src = src.unsqueeze(1)
src = self.embed(src)
src = self.pos_encoder(src)
src = torch.cat([torch.zeros((1, 1, self.d_model)).to(device), src], 0)
output = self.transformer_encoder(src) # [frames+1,1,d_model]
output = output[0] # [1,d_model]
output = self.decoder(output) # [1,250]
output = output[0] # [250]
return output
eval_model = EvalModel(d_ff=1024)
eval_model.load_state_dict(saved_state)
eval_model.eval()
torchinfo.summary(eval_model, (105, 534))
# ### PyTorch → ONNX
preprocess.eval()
preprocess_sample = torch.rand((23, 543, 3)).to(
device
) # 23 is an arbitrary number of frames, 543 is the number of rows/landmarks, 3 is the x, y, z columns
onnx_preprocess_path = "preprocess.onnx"
torch.onnx.export(
preprocess,
preprocess_sample,
onnx_preprocess_path,
opset_version=12,
input_names=["inputs"],
output_names=["outputs"],
dynamic_axes={"inputs": {0: "frames"}, "outputs": {0: "frames"}},
)
eval_model.eval()
model_sample = torch.rand((23, 534)).to(device) # 1 is the batch size
onnx_model_path = "model.onnx"
torch.onnx.export(
eval_model,
model_sample,
onnx_model_path,
opset_version=12,
input_names=["inputs"],
output_names=["outputs"],
dynamic_axes={"inputs": {0: "frames"}},
)
# Will raise an exception if checks fail
onnx_preprocess = onnx.load(onnx_preprocess_path)
onnx.checker.check_model(onnx_preprocess)
onnx_model = onnx.load(onnx_model_path)
onnx.checker.check_model(onnx_model)
# ### ONNX → Tensorflow
tf_preprocess_path = "tf_preprocess"
tf_preprocess = onnx_tf.backend.prepare(onnx_preprocess)
tf_preprocess.export_graph(tf_preprocess_path)
tf_model_path = "tf_model"
tf_model = onnx_tf.backend.prepare(onnx_model)
tf_model.export_graph(tf_model_path)
class InferenceModel(tf.Module):
def __init__(self):
super().__init__()
self.preprocess = tf.saved_model.load(tf_preprocess_path)
self.model = tf.saved_model.load(tf_model_path)
self.preprocess.trainable = False
self.model.trainable = False
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, 543, 3], dtype=tf.float32, name="inputs")
]
)
def call(self, x):
outputs = {}
preprocessed = self.preprocess(inputs=x)["outputs"]
pred = self.model(inputs=preprocessed)["outputs"]
# pred = tf.nn.softmax(pred)
return {"outputs": pred}
tf_inference = InferenceModel()
tf_inference_path = "tf_inference"
tf.saved_model.save(
tf_inference, tf_inference_path, signatures={"serving_default": tf_inference.call}
)
# ### Tensorflow → TFLite
model_converter = tf.lite.TFLiteConverter.from_saved_model(
tf_inference_path
) # path to the SavedModel directory
tflite_model = model_converter.convert()
# Save the model.
with open("model.tflite", "wb") as f:
f.write(tflite_model)
# The submission file (submission.zip) is created by compressing the TFLite model.
# ## Evaluating the TFLite model
import tflite_runtime.interpreter as tflite
interpreter = tflite.Interpreter("model.tflite")
found_signatures = list(interpreter.get_signature_list().keys())
# if REQUIRED_SIGNATURE not in found_signatures:
# raise KernelEvalException('Required input signature not found.')
frames = load_relevant_data_subset(
"/kaggle/input/asl-signs/train_landmark_files/16069/100015657.parquet"
)
prediction_fn = interpreter.get_signature_runner("serving_default")
output = prediction_fn(inputs=frames)
sign = np.argmax(output["outputs"])
print(sign, output["outputs"].shape)
interpreter.get_input_details()
interpreter.get_output_details()
interpreter.get_signature_list()
tests = []
for index, row in signs_df.iloc[:50].iterrows():
frames = load_relevant_data_subset(ASL_DIR / row.path)
interpreter = tflite.Interpreter("model.tflite")
prediction_fn = interpreter.get_signature_runner("serving_default")
output = prediction_fn(inputs=frames)
# output = tf_inference.call(frames)
# output = model(preprocess(torch.Tensor(frames)).unsqueeze(0))
sign = np.argmax(output["outputs"])
# sign = torch.argmax(output)
tests.append((sign, row.label))
tests
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_data = pd.read_csv("../input/titanic/train.csv")
train_data.head()
test_data = pd.read_csv("../input/titanic/test.csv")
test_data.head()
train_data.shape
test_data.shape
train_data.dtypes
# * Numerical: PassengerId, Pclass, Age, SibSp, Parch, Fare
# * Categorical: Name, Sex, Ticket, Cabin, Embarked
# * Target: Survived
train_data.describe()
train_data.count()
# 1. Missing values: age, cabin and embarked
# 2. Unwanted columns or features: PassengerId, ticket(does not provide much information about survival) and cabin (has a high number of missing values)
# # Dropping the unwanted columns - PassengerId, Ticket, Cabin
train_data.drop(["PassengerId", "Ticket", "Cabin"], axis=1, inplace=True)
train_data
# # Missing Value Imputation
# input median(Age) for na's in Age column
train_data["Age"] = train_data["Age"].fillna(value=train_data["Age"].median())
# input mode(Embarked) which is 'S' for na's in Embarked column
train_data["Embarked"] = train_data["Embarked"].fillna("S")
train_data.count()
train_data.head()
# input median(Age) for na's in Age column
test_data["Age"] = test_data["Age"].fillna(value=test_data["Age"].median())
# input median(Fare) for na's in Fare column
test_data["Fare"] = test_data["Fare"].fillna(value=test_data["Fare"].median())
test_data.count()
# feature selection
features = ["Pclass", "Sex", "SibSp", "Parch", "Age", "Embarked"]
train_X = train_data[features]
train_X
test_X = test_data[features]
test_X
train_y = train_data["Survived"]
train_y
# ## Decision Tree Model
from sklearn.tree import DecisionTreeClassifier
decision_tree_model = DecisionTreeClassifier(random_state=0)
X = pd.get_dummies(train_X)
decision_tree_model.fit(X, train_y)
tX = pd.get_dummies(test_X)
pred_y = decision_tree_model.predict(tX)
decision_tree_model_accuracy = round(decision_tree_model.score(X, train_y) * 100, 2)
decision_tree_model_accuracy
# ## Random Forest Model
from sklearn.ensemble import RandomForestClassifier
random_forest_model = RandomForestClassifier(random_state=0)
random_forest_model.fit(X, train_y)
predictions = random_forest_model.predict(tX)
random_forest_model_accuracy = round(random_forest_model.score(X, train_y) * 100, 2)
random_forest_model_accuracy
# ## Support Vector Model
from sklearn import svm
clf = svm.SVC(kernel="poly", random_state=0)
clf.fit(X, train_y)
pred_y = clf.predict(tX)
clf_accuracy = round(clf.score(X, train_y) * 100, 2)
clf_accuracy
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import re
import chardet
import operator
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from nltk import pos_tag
from nltk.corpus import wordnet
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from tensorflow.keras.layers import Embedding
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import EarlyStopping
from keras.layers.normalization import BatchNormalization
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.layers import LSTM, Dense, Bidirectional
from tensorflow.keras.layers import Dropout, SpatialDropout1D
from tqdm import tqdm
from wordcloud import WordCloud, STOPWORDS
from sklearn.model_selection import train_test_split
with open("../input/covid-19-nlp-text-classification/Corona_NLP_train.csv", "rb") as f:
result = chardet.detect(f.read())
print(result)
f.close()
# can be tried cp1254, latin and iso-8859-1
train_data = pd.read_csv(
"../input/covid-19-nlp-text-classification/Corona_NLP_train.csv",
encoding="iso-8859-1",
)
test_data = pd.read_csv(
"../input/covid-19-nlp-text-classification/Corona_NLP_test.csv",
encoding="iso-8859-1",
)
train_data.head()
train_data.tail()
test_data.head()
train_data.isna().sum()
train_data.shape, test_data.shape
train_data.drop(["UserName", "ScreenName", "Location", "TweetAt"], axis=1, inplace=True)
train_data.head()
train_data.Sentiment.value_counts().plot(kind="bar")
pd.set_option("display.max_colwidth", None)
train_data.OriginalTweet[0:5]
# extract hashtag
train_data["hashtag"] = train_data["OriginalTweet"].apply(
lambda x: re.findall(r"#(\w+)", x)
)
# data pre processing
# extract url used
train_data["uri"] = train_data["OriginalTweet"].apply(
lambda x: re.findall(
r"http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+", x
)
)
train_data["handler"] = train_data["OriginalTweet"].apply(
lambda x: re.findall(r"@(\w+)", x)
)
# Lemmatize Words
def get_pos_tag(tag):
if tag.startswith("J"):
return wordnet.ADJ
elif tag.startswith("V"):
return wordnet.VERB
elif tag.startswith("N"):
return wordnet.NOUN
elif tag.startswith("R"):
return wordnet.ADV
else:
# As default pos in lemmatization is Noun
return wordnet.NOUN
lemmatizer = WordNetLemmatizer()
pos_tag(["going"])
# clean the data now
regex = [
r"<[^>]+>", # HTML tags
r"@(\w+)", # @-mentions
r"#(\w+)", # hashtags
r"http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+", # URLs
r"[^0-9a-z #+_\\r\\n\\t]", # BAD SYMBOLS
]
REPLACE_URLS = re.compile(
r"http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+"
)
REPLACE_HASH = re.compile(r"#(\w+)")
REPLACE_AT = re.compile(r"@(\w+)")
REPLACE_HTML_TAGS = re.compile(r"<[^>]+>")
# REPLACE_DIGITS = re.compile(r'\d+')
# REPLACE_BY = re.compile(r"[/(){}\[\]\|,;.:?\-\'\"$]")
REPLACE_BY = re.compile(r"[^a-z0-9\-]")
STOPWORDS = set(stopwords.words("english"))
# tokens_re = re.compile(r'('+'|'.join(regex)+')', re.VERBOSE | re.IGNORECASE)
def clean_text(text):
text = text.lower()
text = REPLACE_HTML_TAGS.sub(" ", text)
text = REPLACE_URLS.sub("", text)
text = REPLACE_HASH.sub("", text)
text = REPLACE_AT.sub("", text)
# text = REPLACE_DIGITS.sub(' ', text)
text = REPLACE_BY.sub(" ", text)
text = " ".join(
lemmatizer.lemmatize(word.strip(), get_pos_tag(pos_tag([word.strip()])[0][1]))
for word in text.split()
if word not in STOPWORDS and len(word) > 3
)
return text
train_data["Tweet"] = train_data["OriginalTweet"].apply(clean_text)
test_data["Tweet"] = test_data["OriginalTweet"].apply(clean_text)
# Now lets do some eda on the data
# how sentiments are related with the hashtag and user handler
# ext_pos = train_data[train_data['Sentiment'] == 'Extremely Positive']
pos = train_data[train_data["Sentiment"] == "Positive"]
neu = train_data[train_data["Sentiment"] == "Neutral"]
neg = train_data[train_data["Sentiment"] == "Negative"]
# ext_neg = train_data[train_data["Sentiment"] == "Extremely Negative"]
for param in ["hashtag", "handler", "uri"]:
for df in [pos, neu, neg]: # , ext_neg, ext_pos,]:
print(
"------------------------------TOP {0} IN {1}------------------------------".format(
param, df["Sentiment"].values[0]
)
)
hash_count = {}
for tag in df[param]:
for value in tag:
hash_count[value] = hash_count.get(value, 0) + 1
print(sorted(hash_count.items(), key=operator.itemgetter(1))[-10:])
print("\n")
pd.set_option("display.max_colwidth", None)
train_data.Tweet[50:55]
# word Cloud
# for color, df in {"green":ext_pos, "yellow":pos, "white":neu, "pink":neg, "orange":ext_neg}.items():
for color, df in {"yellow": pos, "white": neu, "pink": neg}.items():
plt.figure(figsize=(18, 18))
wc_pos = WordCloud(
width=400, height=250, min_font_size=5, background_color=color, max_words=10000
).generate(" ".join(df["Tweet"]))
plt.title("word cloud for {0}".format(df["Sentiment"].values[0]), fontsize=25)
plt.imshow(wc_pos, interpolation="bilinear")
# max len of clean data
import numpy as np
max_len = np.max(train_data["Tweet"].apply(lambda x: len(x)))
max_len
tokenizer = Tokenizer()
tokenizer.fit_on_texts(train_data["Tweet"].values)
vocab_size = len(tokenizer.word_index) + 1
X = tokenizer.texts_to_sequences(train_data["Tweet"].values)
X = pad_sequences(X, maxlen=max_len, padding="post")
tokenizer.word_index
# pre process test data with param of train data
X_test = tokenizer.texts_to_sequences(test_data["Tweet"].values)
X_test = pad_sequences(X_test, maxlen=max_len, padding="post")
# load the GloVe vectors in a dictionary:
# ValueError: could not convert string to float: '.' this is why using ingore
embeddings_index = {}
glovefile = open("../input/glove42b300dtxt/glove.42B.300d.txt", "r", encoding="utf-8")
for line in tqdm(glovefile):
values = line.split(" ")
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
glovefile.close()
print("Found %s word vectors." % len(embeddings_index))
# create an embedding matrix for the words we have in the dataset
embedding_matrix = np.zeros((len(tokenizer.word_index) + 1, 300))
for word, index in tqdm(tokenizer.word_index.items()):
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
X.shape
X_test.shape
encoding = {
"Extremely Negative": 0,
"Negative": 0,
"Neutral": 1,
"Positive": 2,
"Extremely Positive": 2,
}
labels = ["Negative", "Neutral", "Positive"]
train_data["Sentiment"].replace(encoding, inplace=True)
test_data["Sentiment"].replace(encoding, inplace=True)
labels = pd.get_dummies(train_data["Sentiment"]).columns
y = pd.get_dummies(train_data["Sentiment"]).values
y
y_test = pd.get_dummies(test_data["Sentiment"]).values
y_test.shape
# train valid split
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.20, random_state=12
)
print(X_train.shape, y_train.shape)
print(X_valid.shape, y_valid.shape)
vector_features = 300
model = Sequential()
model.add(
Embedding(
vocab_size,
vector_features,
input_length=X.shape[1],
weights=[embedding_matrix],
trainable=False,
)
)
model.add(SpatialDropout1D(0.2))
model.add(
Bidirectional(
LSTM(300, activation="relu", dropout=0.3, recurrent_dropout=0.3),
input_shape=(vector_features, vocab_size),
)
)
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.8))
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.8))
model.add(Dense(3, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
epochs = 100
batch_size = 512
history = model.fit(
X_train,
y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(X_valid, y_valid),
callbacks=[EarlyStopping(monitor="val_loss", patience=3, min_delta=0.0001)],
)
# Model Accuracy
accuracy = history.history["accuracy"]
loss = history.history["loss"]
val_accuracy = history.history["val_accuracy"]
val_loss = history.history["val_loss"]
print(
"training acuuracy {0}% and training loss {1}%".format(
accuracy[-1] * 100, loss[-1] * 100
)
)
print(
"validation acuuracy {0}% and validation loss {1}%".format(
val_accuracy[-1] * 100, val_loss[-1] * 100
)
)
# plot
plt.plot(accuracy, "g", label="training accuracy")
plt.plot(val_accuracy, "r", label="validation accuracy")
plt.legend()
plt.show()
plt.plot(loss, "g", label="training loss")
plt.plot(val_loss, "r", label="validation loss")
plt.legend()
plt.show()
# y_pred = model.predict_classes(X_test)
# y_pred = np.argmax(model.predict(X_test), axis=-1)
y_pred = model.predict(X_test)
print(labels[np.argmax(y_test, 1)][100:120])
print(labels[np.argmax(y_pred, 1)][100:120])
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
cm = confusion_matrix(np.argmax(y_test, 1), np.argmax(y_pred, 1))
print(cm)
sns.heatmap(cm, annot=True)
|
import numpy as np
import scipy.io
import scipy
import numpy.matlib
import scipy.stats
from sklearn.decomposition import PCA
import sys
import scipy.sparse as sps
import time
import warnings
warnings.filterwarnings("ignore")
def Config(source=None, target=None):
"""Initiazation of necessary parametrs for compuatation
Input::
source(string) : source domain index (Default : webcam)
target(string) : target domain index (Default : dslr)
Output::
param : Output dictionary containing parameters"""
amazon = 0
webcam = 1
dslr = 2
caltech = 3
param = {}
param["domains"] = [amazon, webcam, dslr, caltech]
param["domain_names"] = ["amazon", "webcam", "dslr", "caltech"]
param["categories"] = [
"back_pack",
"bike",
"calculator",
"headphones",
"keyboard",
"laptop_computer",
"monitor",
"mouse",
"mug",
"projector",
]
param["DATA_DIR"] = "/kaggle/input/domain-adaptain-splits/data/original_data/"
param["held_out_categories"] = False
if source is None:
param["source"] = webcam
else:
param["source"] = source
if target is None:
param["target"] = dslr
else:
param["target"] = target
param["num_trials"] = 20
param["dim"] = 20
param["C_s"] = 0.05
param["C_t"] = 1
param["mmdt_iter"] = 2
if param["source"] == amazon:
param["num_train_source"] = 20
else:
param["num_train_source"] = 8
param["num_train_target"] = 3
param[
"result_filename"
] = "/kaggle/input/domain-adaptain-splits/data/DataSplitsOfficeCaltech/SameCategory_{0}-{1}_{2}RandomTrials_10Categories.mat".format(
param["domain_names"][param["source"]],
param["domain_names"][param["target"]],
param["num_trials"],
)
param["telapsed"] = {}
return param
def NormData(fts):
"""Normalizes the data and returns the zscore"""
sqr_fts = np.sqrt(np.sum(fts**2, 1))
sqr_fts = np.array(sqr_fts).reshape(1, -1).T
# print(fts.shape[1] , sqr_fts.shape)
sqr_fts = np.matlib.repmat(sqr_fts, 1, fts.shape[1])
fts = fts / sqr_fts
return scipy.stats.zscore(fts)
def LoadOfficePlusCaltechData(foldername):
"""Loads the Office and Caltech Dataset and returns the Data and corresponding Labels
Input::
foldername(string) : Path to the dataset
Output::
Data(list) : SURF features of the dataset.
Labels(list) : Corresponding labels of the images in the dataset.
"""
domain_names = [
"amazon_SURF_L10.mat",
"webcam_SURF_L10.mat",
"dslr_SURF_L10.mat",
"Caltech10_SURF_L10.mat",
]
Data = []
Labels = []
for idx, name in enumerate(domain_names):
fullfilename = foldername + name
obj = scipy.io.loadmat(fullfilename)
fts = obj["fts"]
labels = obj["labels"]
fts = NormData(fts)
Data.append(fts)
Labels.append(labels)
return Data, Labels
def princomp(A):
"""Returns the principal component of the data."""
M = (A - np.mean(A.T, axis=1).reshape(1, -1)).T
[latent, coeff] = np.linalg.eig(np.cov(M))
score = np.dot(coeff.T, M) # projection of the data in the new space
args = np.argsort(-latent)
coeff = coeff[:, args]
return coeff, score, latent
param = Config()
[Data, Labels] = LoadOfficePlusCaltechData(param["DATA_DIR"])
from sklearn.svm import LinearSVC
import numpy as np
from scipy.io import loadmat
from tqdm import tqdm
import warnings
from sklearn.decomposition import PCA
warnings.filterwarnings("ignore")
import torch
from sklearn.metrics import hinge_loss
from torch import nn
from tqdm import tqdm
import sklearn
def trainsvm(data, labels, gamma=1e-4, c=1):
svm = LinearSVC(tol=gamma, C=c, penalty="l2", loss="hinge")
svm.fit(data, labels)
return svm
def normalize(data):
return sklearn.preprocessing.normalize(data)
def step1(itr, data_a, label_a, data_b, label_b):
augmented_X = np.vstack([data_a, data_b])
augmented_y = np.hstack([label_a, label_b])
svm = trainsvm(augmented_X, augmented_y)
return svm
def step2(itr, transformation_matrix, target_x, target_y, theta, beta, lr):
target_x = torch.tensor(np.double(target_x))
target_y = torch.tensor(target_y).long()
ly = -torch.ones((target_x.shape[0], theta.shape[0]))
theta = torch.tensor(np.double(theta))
beta = torch.tensor(np.double(beta))
ly[np.arange(0, target_x.shape[0]), target_y] = 1
transformation_matrix = torch.tensor(transformation_matrix)
for _ in range(10):
w = torch.tensor(transformation_matrix, requires_grad=True)
target = target_x @ w
res = torch.einsum("ab,bc -> ca", theta, target.T)
res = target @ theta.T + beta
err = torch.sum(torch.fmax(torch.zeros((res.shape)), 1 - res * ly))
out = 0.5 * torch.norm(w, p="fro") ** 2 + err
# print(err)
err.backward()
transformation_matrix -= lr * w.grad
return transformation_matrix.detach().numpy(), err.item()
def adapt(data_a, label_a, data_b, label_b, tx=None, ty=None, iterations=10, lr=0.015):
n_features_a = data_a.shape[1]
n_features_b = data_b.shape[1]
if n_features_a == n_features_b:
transformation_matrix = np.eye(n_features_b)
else:
# transformation_matrix = np.ones((n_features_b , n_features_a))
transformation_matrix = np.random.random((n_features_b, n_features_a))
error = []
scores = []
for x in range(iterations):
transformed_b = data_b @ transformation_matrix
model = step1(x, data_a, label_a, transformed_b, label_b)
theta = model.coef_
beta = model.intercept_
transformation_matrix, err = step2(
x, transformation_matrix, data_b, label_b, theta, beta, lr
)
# print(dw)
# transformation_matrix -= lr * dw
error.append(err)
if type(tx) != type(None):
ttx = tx @ transformation_matrix
scores.append(model.score(ttx, ty))
return transformation_matrix, model, error, scores
def UpdateLabelValues(labels, param):
"""Updates labels for the training source, target and testing target dataset."""
if "all_categories" not in param:
return labels
all_categories = param["all_categories"]
categories = param["categories"]
labels["train"]["source"] = UpdateLabels(
labels["train"]["source"], all_categories, categories
)
labels["train"]["target"] = UpdateLabels(
labels["train"]["target"], all_categories, categories
)
labels["test"]["target"] = UpdateLabels(
labels["test"]["target"], all_categories, categories
)
return labels
d1 = [0, 1, 2, 3]
d2 = [0, 1, 2, 3]
all_models = []
# Try for all possible cominations of source and target
mapping = {0: "a", 1: "w", 2: "d", 3: "c"}
for src in d1:
for tar in d2:
if tar == src:
continue
def run(src, tar, lr, itr):
global param
param = Config(source=src, target=tar)
source_domain = param["source"]
target_domain = param["target"]
# Load splits based on filename and update train and test ids
splits = scipy.io.loadmat(param["result_filename"])
train_ids = splits["train"]
test_ids = splits["test"]
train_ids_source = train_ids[0][0][0][0]
train_ids_target = train_ids[0][0][1][0]
test_ids_source = test_ids[0][0][0][0]
test_ids_target = test_ids[0][0][1][0]
elaps = 20
for i in tqdm(range(0, elaps)):
# Data Loading
data = {}
accuracy = []
data["train"] = {}
data["test"] = {}
data["train"]["source"] = Data[source_domain][train_ids_source[i] - 1][0]
data["train"]["target"] = Data[target_domain][train_ids_target[i] - 1][0]
data["test"]["target"] = Data[target_domain][test_ids_target[i] - 1][0]
labels = {}
labels["train"] = {}
labels["test"] = {}
labels["train"]["source"] = (
Labels[source_domain][train_ids_source[i] - 1][0].ravel() - 1
)
labels["train"]["target"] = (
Labels[target_domain][train_ids_target[i] - 1][0].ravel() - 1
)
labels["test"]["target"] = (
Labels[target_domain][test_ids_target[i] - 1][0].ravel() - 1
)
# labels = UpdateLabelValues(labels, param)
# if param['dim'] < np.shape(data['train']['source'])[1]:
# arr = np.array(data['train']['source'])
# arr = np.vstack((arr, data['train']['target']))
# arr = np.vstack((arr, data['test']['target']))
# arr= sklearn.preprocessing.normalize(arr)
# P, _, _ = princomp(arr)
# data['train']['source'] = normalize(np.matmul(data['train']['source'], P[:, :20]).real)
# data['train']['target'] = normalize(np.matmul(data['train']['target'], P[:, :20]).real)
# data['test']['target'] = normalize(np.matmul(data['test']['target'], P[:, :20]).real)
# Main function call for getting the trained models
w, model, error, scores = adapt(
data["train"]["source"],
labels["train"]["source"],
data["train"]["target"],
labels["train"]["target"],
data["test"]["target"],
labels["test"]["target"],
lr=lr,
iterations=itr,
)
accuracy.append(max(scores))
# accuracy.append(model.score(data['test']['target']@w,labels['test']['target']))
print(
"Mean Accuracy = {} +/- {} ".format(
np.mean(accuracy), np.std(accuracy) / np.sqrt(elaps)
)
)
return np.mean(accuracy)
for s in range(4):
for t in range(4):
if s == t:
continue
print("=" * 55)
print("Source ", mapping[s], "Target", mapping[t])
for i in [[1, 5], [0.1, 10], [0.001, 20], [0.0001, 25]]:
print("lr : ", i[0])
run(s, t, i[0], i[1])
# outcome["{}->{}".format(mapping[src] , mapping[tar])] = np.mean(accuracy)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_data = pd.read_csv("/kaggle/input/DontGetKicked/training.csv")
train_data.head()
train_data.shape
test_data = pd.read_csv("/kaggle/input/DontGetKicked/test.csv")
test_data.head()
test_data.shape
train_data.isnull().sum()
test_data.isnull().sum()
train_data.keys()
train_data.IsBadBuy.value_counts()
train_data.Model.isnull().value_counts()
train_data.Model.shape
train_data.drop("Model", axis=1, inplace=True)
test_data.drop("Model", axis=1, inplace=True)
train_data.drop("Trim", axis=1, inplace=True)
test_data.drop("Trim", axis=1, inplace=True)
train_data.drop("SubModel", axis=1, inplace=True)
test_data.drop("SubModel", axis=1, inplace=True)
train_data.Color.value_counts()
train_data.Color.shape
test_data.Color.value_counts()
test_data.Color.shape
train_data["Color"].fillna(value="Color_Unknown", inplace=True)
test_data["Color"].fillna(value="Color_Unknown", inplace=True)
print(train_data.Color.isnull().sum())
print(test_data.Color.isnull().sum())
test_data.isnull().sum()
train_data["Transmission"].value_counts()
test_data["Transmission"].value_counts()
train_data[train_data.Transmission == "Manual"]
train_data.Transmission.replace("Manual", "MANUAL", inplace=True)
train_data.Transmission.value_counts()
train_data.Transmission.isnull().sum()
train_data.Transmission.fillna(value="Transmission_unk", inplace=True)
train_data.Transmission.isnull().sum()
train_data.WheelTypeID.value_counts()
test_data.WheelTypeID.value_counts()
train_data.drop("WheelTypeID", inplace=True, axis=1)
test_data.drop("WheelTypeID", inplace=True, axis=1)
train_data.WheelType.isnull().sum()
train_data.WheelType.value_counts()
train_data.WheelType.fillna(value="WheelType_unk", inplace=True)
test_data.WheelType.fillna(value="WheelType_unk", inplace=True)
train_data.WheelType.isnull().sum()
train_data.WheelType.value_counts()
train_data.Nationality.isnull().sum()
test_data.Nationality.isnull().sum()
train_data.Nationality.fillna("Nationality_unk", inplace=True)
test_data.Nationality.fillna("Nationality_unk", inplace=True)
train_data.Nationality.isnull().sum()
train_data.Size.isnull().sum()
train_data.Size.fillna("Size_unk", inplace=True)
test_data.Size.fillna("Size_unk", inplace=True)
train_data.Size.isnull().sum()
train_data.keys()
train_data.TopThreeAmericanName.value_counts()
train_data.TopThreeAmericanName.isnull().sum()
train_data.TopThreeAmericanName.fillna(value="Top_unk", inplace=True)
test_data.TopThreeAmericanName.fillna(value="Top_unk", inplace=True)
train_data.PRIMEUNIT.value_counts()
test_data.PRIMEUNIT.value_counts()
train_data.PRIMEUNIT.isnull().sum()
train_data.PRIMEUNIT.fillna(value="Prime_unk", inplace=True)
test_data.PRIMEUNIT.fillna(value="Prime_unk", inplace=True)
test_data.PRIMEUNIT.isnull().sum()
train_data.AUCGUART.value_counts()
test_data.AUCGUART.value_counts()
test_data.AUCGUART.isnull().sum()
train_data.AUCGUART.fillna(value="AUC_unk", inplace=True)
test_data.AUCGUART.fillna(value="AUC_unk", inplace=True)
train_data.AUCGUART.isnull().sum()
train_data.keys()
train_data.drop(
[
"MMRAcquisitionAuctionAveragePrice",
"MMRAcquisitionAuctionCleanPrice",
"MMRAcquisitionRetailAveragePrice",
"MMRAcquisitonRetailCleanPrice",
"MMRCurrentAuctionAveragePrice",
"MMRCurrentAuctionCleanPrice",
"MMRCurrentRetailAveragePrice",
"MMRCurrentRetailCleanPrice",
],
axis=1,
inplace=True,
)
test_data.drop(
[
"MMRAcquisitionAuctionAveragePrice",
"MMRAcquisitionAuctionCleanPrice",
"MMRAcquisitionRetailAveragePrice",
"MMRAcquisitonRetailCleanPrice",
"MMRCurrentAuctionAveragePrice",
"MMRCurrentAuctionCleanPrice",
"MMRCurrentRetailAveragePrice",
"MMRCurrentRetailCleanPrice",
],
axis=1,
inplace=True,
)
train_data.keys()
train_data.drop("PurchDate", axis=1, inplace=True)
test_data.drop("PurchDate", axis=1, inplace=True)
train_data.shape
test_data.shape
train_data.head()
test_data.head()
train_data.dtypes
train_data.columns
train_data.drop(["RefId", "IsBadBuy"], axis=1).dtypes != "object"
not_categorical = train_data.drop(["RefId", "IsBadBuy"], axis=1).columns[
train_data.drop(["RefId", "IsBadBuy"], axis=1).dtypes != "object"
]
for i in not_categorical:
maximum = np.max(train_data[i])
train_data[i] = train_data[i] / maximum
maximum_test = np.max(test_data[i])
test_data[i] = test_data[i] / maximum_test
train_data[not_categorical].head()
test_data[not_categorical].head()
categorical = train_data.drop(["RefId", "IsBadBuy"], axis=1).columns[
train_data.drop(["RefId", "IsBadBuy"], axis=1).dtypes == "object"
]
categorical
train_data[categorical[0]]
pd.get_dummies(train_data[categorical[0]])
for i in categorical:
dummies = pd.get_dummies(train_data[i])
dummies.columns = str(i) + "_" + dummies.columns
train_data = pd.concat([train_data, dummies], axis=1)
train_data.drop(i, inplace=True, axis=1)
dummies = pd.get_dummies(test_data[i])
dummies.columns = str(i) + "_" + dummies.columns
test_data = pd.concat([test_data, dummies], axis=1)
test_data.drop(i, inplace=True, axis=1)
train_data.head()
train_data.shape
test_data.shape
for i in train_data.drop("IsBadBuy", axis=1).columns:
if i not in test_data.columns:
test_data[i] = np.zeros(len(test_data))
for i in test_data.columns:
if i not in train_data.columns:
train_data[i] = np.zeros(len(train_data))
train_data.shape
test_data.shape
train_data.head()
test_data.head()
test_data = test_data[train_data.drop("IsBadBuy", axis=1).columns]
print(train_data.shape)
print(test_data.shape)
train_data.columns
test_data.columns
from sklearn.model_selection import train_test_split
X = train_data.drop(["RefId", "IsBadBuy"], axis=1)
y = train_data["IsBadBuy"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
from sklearn.neighbors import KNeighborsClassifier
KNN = KNeighborsClassifier(n_neighbors=11)
KNN.fit(X_train, y_train)
KNN.score(X_test, y_test)
predict = KNN.predict(test_data.drop("RefId", axis=1))
Submission = pd.DataFrame(data=predict, columns=["IsBadBuy"])
Submission.head()
Submission["RefId"] = test_data["RefId"]
Submission.set_index("RefId", inplace=True)
Submission.head()
Submission.to_csv("Submission.csv")
|
# 克隆YLCLS项目并安装依赖包
# 使用wandb可视化展示,需要注册wandb账号
import wandb
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
wandb_api = user_secrets.get_secret("wandb_key")
wandb.login(key=wandb_api)
import warnings
warnings.filterwarnings("ignore")
from ylcls import cifar10_clsconfig_dict, get_config, CLSConfig
cifar10_clsconfig_dict
cifar10_clsconfig_dict["train_batch_size"] = 256
cifar10_clsconfig_dict["eval_batch_size"] = 256
cifar10_clsconfig_dict["model_args"] = dict(
model_type="mymodel", model_config="ylcls/configs/ResNet/ResNet50_dropout0.3.yaml"
)
# cifar10_clsconfig_dict['use_wandb'] = False #可以设置True使用wandb,上面就需要登陆
cifar10_clsconfig_dict["workers"] = 16
config = get_config(cifar10_clsconfig_dict, CLSConfig)
config.model.info()
from ylcls import Classification
cls = Classification(config)
cls.train()
|
# # **Introduction**
# This is my first machine learning kernel. I used logistic regression.
# ## **Content:**
# 1. [Load and check data](#1)
# 1. [Variable Description](#2)
# 1. [Normalization](#3)
# 1. [Train Test Split](#4)
# 1. [Paramter Initialize and Sigmoid Function](#5)
# 1. [Forward and Backward Propagation](#6)
# 1. [Updating (Learning) Parameters](#7)
# 1. [Prediction](#8)
# 1. [Logistic Regression](#9)
# 1. [Sklearn with Linear Regression](#10)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
hd = pd.read_csv("/kaggle/input/heart-disease-uci/heart.csv")
#
# ## **1.Load and Check Data**
hd.head(20)
hd.columns
#
# ## **2.Veriable Description**
# * age: age
# * sex: sex
# * cp: chest pain type
# * trestbps: resting blood pressure (in mm Hg on admission to the hospital)
# * chol:serum cholestoral in mg/dl
# * fbs: fasting blood sugar > 120 mg/dl) (1 = true; 0 = false
# * restecg:resting electrocardiographic results
# * thalach: maximum heart rate achieved
# * exang: exercise induced angina
# * oldpeak: ST depression induced by exercise relative to rest
hd.info()
y = hd.sex.values
y
x_data = hd.drop(["sex"], axis=1)
x_data
#
# ## **3.Normalization**
# I reduced the values between 1 and 0.
# -normalization formula:(x-min(x))/(max(x)-min(x))
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data)).values
x
#
# ## **4.Train Test Split**
# _train: real values
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
print("x train: ", x_train.shape)
print("x test: ", x_test.shape)
print("y train: ", y_train.shape)
print("y test: ", y_test.shape)
#
# ## **5.Paramter Initialize and Sigmoid Function**
# * w = weighs
# * b = bias
def initialize_w_and_b(dimension):
w = np.full((dimension, 1), 0.01)
b = 0.0
return w, b
def sigmoid(z):
y_head = 1 / (1 + np.exp(-z))
return y_head
print(sigmoid(0))
#
# ## **6.Forward and Backward Propagation**
# -der:derivative
def forward_backward_propagation(w, b, x_train, y_train):
# forward
z = np.dot(w.T, x_train) + b
y_head = sigmoid(z)
loss = -y_train * np.log(y_head) - (1 - y_train) * np.log(1 - y_head)
cost = (np.sum(loss)) / x_train.shape[1]
# backward
der_w = (np.dot(x_train, ((y_head - y_train).T))) / x_train.shape[1]
der_b = np.sum(y_head - y_train) / x_train.shape[1]
gradients = {"der_w": der_w, "der_b": der_b}
return cost, gradients
#
# ## **7.Updating (Learning) Parameters**
def update(w, b, learning_rate, number_of_iteration):
cost_list1 = []
cost_list2 = []
index = []
for i in range(number_of_iteration):
cost, gradients = forward_backward_propagation(w, b, x_train, y_train)
cost_list1.append(cost)
w = w - learning_rate * gradients["der_w"]
b = b - learning_rate * gradients["der_b"]
if i % 10 == 0:
cost_list2.append(cost)
index.append(i)
print("Cost after iteration %i: %f" % (i, cost))
parameters = {"weigh": w, "bias": b}
plt.plot(index, cost_list2)
plt.xticks(index, rotation="vertical")
plt.xlabel("Number of Iteration")
plt.ylabel("Cost")
plt.show()
return parameters, gradients, cost_list1
#
# ## **8.Prediction**
def predict(w, b, x_test):
z = sigmoid(np.dot(w.T, x_test) + b)
Y_prediction = np.zeros((1, x_test.shape[1]))
for i in range(z.shape[1]):
if z[0, i] <= 0.5:
Y_prediction[0, i] = 0
else:
Y_prediction[0, i] = 1
return Y_prediction
#
# ## **9.Logistic Regression**
def logistic_regression(
x_train, y_train, x_test, y_test, learning_rate, num_iterations
):
# initialize
dimension = x_train.shape[0]
w, b = initialize_w_and_b(dimension)
parameters, gradients, cost_list1 = update(w, b, learning_rate, num_iterations)
y_prediction_test = predict(parameters["weigh"], parameters["bias"], x_test)
# print test errors
print(
"test accurary:{}".format(
100 - np.mean(np.abs(y_prediction_test - y_test)) * 100
)
)
logistic_regression(
x_train, y_train, x_test, y_test, learning_rate=0.01, num_iterations=500
)
#
# ## **10.Sklearn with Linear Regression**
lr = LogisticRegression()
lr.fit(x_train.T, y_train.T)
print("test accuracy {}".format(lr.score(x_test.T, y_test.T)))
|
# Naive Bayes Classifier
# **Importing Important Libraries**
import pandas as pd
# **Reading CSV File**
df = pd.read_csv("/kaggle/input/titanic/train.csv")
df.head()
# **Removing unwanted Column's**
df.drop(
["PassengerId", "Name", "SibSp", "Parch", "Ticket", "Cabin", "Embarked"],
axis="columns",
inplace=True,
)
df.head()
# **Defining Dependent and Independent Variable**
X = df.drop(["Survived"], axis="columns")
y = df.Survived
# **Get Dummy Values for Sex column**
dummies = pd.get_dummies(X.Sex)
dummies.head()
# **Adding Dummies Column to the Dataframe**
X = pd.concat([X, dummies], axis="columns")
X
# **Dropping Sex Column after getting Dummies Value for Male and Female**
X.drop(["Sex"], axis="columns", inplace=True)
X.head()
# **Looking for Null Values in DataFrame**
X.isna().sum()
# **Replacing Null Values with Mean of Age**
X.Age = X.Age.fillna(X.Age.mean())
X.head()
# **Verifying that all Null Values are Gone**
X.isna().sum()
# ------
# **Train Test Split**
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# **Creating Naive Bayes Model**
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
# **Fitting the Model with Data**
model.fit(X_train, y_train)
# **Score of the Model**
model.score(X_test, y_test)
# **Predicting the Data of First 10 Passengers**
model.predict(X_test[:10])
|
# #### 0) Import the numpy library. The most common name. You could choose not to name it, but you will need to type numpy for everything instead of np.
# 0
import numpy as np
# 1. 1) Create a 1d array, 20 elements, with your favorite number in it.
# 1
np.full(20, 3)
# 1. #### 2) Create a 2d array, 9 rows, 30 columns (9x30). Every value should be a 0, except the final column should be all 1s.
# 2
xs = np.zeros((9, 30))
xs[:, -1] = 1
xs
# #### 3) Create a 2d array, 8 rows, 3 columns (8x3). Put a random dice roll in each location.
# 3
np.random.randint(1, 7, size=(8, 3))
# #### 4) Create a 15x15 array with all 0s except the diagonal from the top left to the bottom right, which should be all 5s.
# 4
xs = np.zeros((15, 15))
xs[:-1, :-1] = 1
xs
# #### 5) Create a 10x10 array with random floats from [0, 1).
# 5
np.random.rand(10, 10)
# 1. #### 5a) Determine the largest value of each column.
# 5a
np.max(np.random.rand(10, 10), axis=0)
# #### 5b) Determine the smallest value of each row.
# 5b
np.min(np.random.rand(10, 10), axis=1)
# #### 5c) Determine the mean of each row.
# 5c
np.mean(np.random.rand(10, 10), axis=1)
# #### 5d) Determine the median of each column.
# 5d
np.median(np.random.rand(10, 10), axis=0)
# #### 5e) Determine the 1st quartile value (25% value) of each column.
# 5e
np.percentile(np.random.rand(10, 10), 25, axis=0)
# #### 6) Create a 20x10 array with random floats from [0, 1).
# 6
xs = np.random.rand(20, 10)
# #### 6a) Generate an array of all values in the last column which are greater than 0.5.
# 6a
xs[:, -1][xs[:, -1] > 0.5]
# #### 6b) Determine if there are any numbers greater than 0.99 in the entire array.
# 6b
xs[xs > 0.99]
# #### 6c) Determine if there are any numbers less than 0.1 in the first row.
# 6c
xs[0, :][xs[0, :] < 0.1]
# #### 7) Determine the solution to the following systems of equations using matrices with numpy.
# 4x + 9y + 0z = 8
# 8x + 0y + 6z = −1
# 0x + 6y + 6z = -1
# #### For those who have never done this, use the coefficients on the left to make one 3x3 matrix, A. Use the right of the equal sign to make a 3x1 matrix, B. This would give Ax = B, where x represents the various values of x/y/z. To solve this for x using matrices, you want to do x = A^1 * B, which would be the inverse of A (a numpy function) times B using real matrix multiplication.
# ##### If you did this correctly, the answer should have x as 1/2, y as 2/3, and z as -5/6 (as decimals unless you look up how to convert them to fractions).
# 7
xs = [[4, 9, 0], [8, 0, 6], [0, 6, 6]]
ans = np.array([8, -1, -1])
ans = ans * np.linalg.inv(xs)
ans
# #### 8) Create a 1d array with 30 random integers in the set of {0,1,2..100}, then sort the array from smallest to largest.
# 8
xs = np.random.randint(0, 101, 30)
xs
# #### 8a) Determine the median value in the array.
# 8a
np.median(xs)
# #### 8b) Determine the mean value in the array.
# 8b
np.mean(xs8)
# #### 8c) Modify each value in the array less than the mean to become a 0.
# 8c
xs = np.random.randint(0, 101, 30)
xs[xs < np.mean(xs)] = 0
xs
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Import libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.preprocessing import (
MinMaxScaler,
RobustScaler,
StandardScaler,
Normalizer,
MaxAbsScaler,
)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_absolute_error, mean_squared_error
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from scipy.stats import randint
df = pd.read_csv("/kaggle/input/diabetes-dataset/diabetes.csv")
df.head()
df.describe().T
df.info()
# Create a class for scatter plot visualization.
class ScatterPlot:
def __init__(self, x_data, y_data, color):
self.x_data = x_data
self.y_data = y_data
self.color = color
def create_plot(self, title, x_label, y_label):
plt.scatter(x=self.x_data, y=self.y_data, color=self.color)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.grid(axis="both", alpha=0.75)
plt.show()
# Create a class for hist plot visualization
class HistPlot:
def __init__(self, x_data):
self.x_data = x_data
def create_plot(self, title, x_label, y_label):
plt.figure(figsize=(8, 4))
plt.hist(x=self.x_data, linewidth=2, edgecolor="black")
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.grid(axis="both", alpha=0.75)
plt.show()
agerange = HistPlot(df["Age"])
agerange.create_plot("Age range of patients", "Age", "Count")
agevsblood = ScatterPlot(df["BloodPressure"], df["Age"], "green")
agevsblood.create_plot("Age vs blood pressure", "Blood pressure", "Age")
data = df[df["BloodPressure"] > 0]
print("Patients with blood pressure\n", data.shape) # Rows x Columns
# Separate the dataset
diabetic = data[data["Outcome"] == 1]
notdiabetic = data[data["Outcome"] == 0]
Diabeticagerange = ScatterPlot(diabetic["Age"], diabetic["BloodPressure"], "red")
Diabeticagerange.create_plot(
"Diabetic: Age range of patients v. Blood Pressure", "Age", "Blood pressure"
)
Notdiabeticagerange = ScatterPlot(
notdiabetic["Age"], notdiabetic["BloodPressure"], "blue"
)
Notdiabeticagerange.create_plot(
"Not diabetic: Age range of patients v. Blood Pressure", "Age", "Blood pressure"
)
# Visualize correlation matrix
corr_matrix = data.corr()
sns.heatmap(corr_matrix, annot=True, cmap="coolwarm")
plt.show()
# Create a StandardScaler object
scaler = RobustScaler()
# Split into features and target variable
X = data.drop("Outcome", axis=1)
y = data["Outcome"]
# Fit the scaler to your data and transform it
X_scaled = scaler.fit_transform(X)
# Split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(
X_scaled, y, test_size=0.2, random_state=42
)
# Set the hyperparameter grid
param_gridLR = {
"penalty": ["l1", "l2"],
"C": [0.001, 0.01, 0.1, 10, 100],
}
# Create logistic regression model
logreg = LogisticRegression(max_iter=50, solver="liblinear")
# Create GridSearchCV object
grid_searchLR = GridSearchCV(logreg, param_grid=param_gridLR, cv=5)
# Fit the GridSearchCV object to the training data
grid_searchLR.fit(X_train, y_train)
# Evaluate the best model on the test data
score = grid_searchLR.score(X_test, y_test)
predictions = grid_searchLR.best_estimator_.predict(X_test)
print(f"Test accuracy: {score: .3f}")
param_grid = {"alpha": [0.001, 0.01, 0.1, 1, 10, 100]}
grid_search = GridSearchCV(RidgeClassifier(), param_grid, cv=5)
grid_search.fit(X_train, y_train)
best_model = grid_search.best_estimator_
score_rc = best_model.score(X_test, y_test)
print(f"Test accuracy: {score_rc:.3f}")
param_grid = {"n_neighbors": range(1, 101)}
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid, cv=5)
grid_search.fit(X_train, y_train)
best_model = grid_search.best_estimator_
score_kn = best_model.score(X_test, y_test)
print(f"Test accuracy: {score_kn:.3f}")
param_grid = {
"max_depth": [2, 4, 6, 8],
"min_samples_split": [2, 4, 8, 16],
}
treemodel = DecisionTreeClassifier(random_state=42)
grid_search = GridSearchCV(treemodel, param_grid, cv=5)
grid_search.fit(X_train, y_train)
best_model = grid_search.best_estimator_
scoretree = best_model.score(X_test, y_test)
print(f"Test accuracy: {scoretree:.3f}")
param_distribs = {
"n_estimators": randint(low=50, high=200),
"max_depth": randint(low=2, high=10),
"min_samples_split": randint(low=2, high=10),
}
rnd_search = RandomizedSearchCV(
RandomForestClassifier(random_state=42),
param_distributions=param_distribs,
n_iter=10,
cv=5,
random_state=42,
)
rnd_search.fit(X_train, y_train)
best_model = rnd_search.best_estimator_
score_rf = best_model.score(X_test, y_test)
print(f"Test accuracy: {score_rf:.3f}")
|
# Welcome to this project. This is actually an excel project that I have decided to tackle with both Python and Excel. This part concerns the analysis of bike rides with Python.
# # Initializing Python packages
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# # Importing the data
dataframe = pd.read_excel(
"/kaggle/input/bike-rides-sale-dataset/Excel Project Dataset.xlsx"
)
# # Getting info about data
dataframe.head(n=5)
dataframe.tail(n=5)
# # General Information
dataframe.info()
dataframe.columns
dataframe.size
# # Data Wrangling Process
# Changing the M and S to Married and Single for better understanding
dataframe["Marital Status"] = dataframe["Marital Status"].replace(
{"M": "Married", "S": "Single"}
)
dataframe["Marital Status"].head(n=5)
# Changing the F and M to Female and Male for better understanding
dataframe["Gender"] = dataframe["Gender"].replace({"M": "Male", "F": "Female"})
dataframe["Gender"].head(n=5)
# Creating Age Groups
# create bins and labels for each age group
bins = [0, 19, 29, 59, dataframe["Age"].max()]
labels = ["Adolescent", "Young adult", "Adult", "Elderly"]
# create a new column with age groups
dataframe["Age Group"] = pd.cut(dataframe["Age"], bins=bins, labels=labels)
# # # Exploratory Data Analysis
# * What is the average income of people who purchased bikes/ not purchased by gender category ?
data1 = round(dataframe.groupby(["Purchased Bike", "Gender"])["Income"].mean(), 2)
data1
# # Visualization
# set the Seaborn theme
sns.set_style("darkgrid")
# create a bar chart using Seaborn
sns.barplot(
x=data1.index.get_level_values(0) + " " + data1.index.get_level_values(1),
y=data1.values,
)
# add labels and title
plt.title("Average Income by Purchased Bike and Gender")
plt.xlabel("Purchased Bike and Gender")
plt.ylabel("Average Income")
# show the chart
plt.show()
# * What are the three most common occupations of people who did purchased a bike ?
# retrieving only the part of the dataframe that contains the Yes value in the Purchased Bike column
data2 = dataframe[dataframe["Purchased Bike"] == "Yes"]
data2.head(n=5)
result = data2["Occupation"].value_counts()
for occupation in result.index[:3]:
print(occupation)
# * What are the bike purchases amount for each age group ?
# initialising dataframe with counts of the values
data3 = data2["Age Group"].value_counts()
# initialising for loop that will range 4 times, which is the length of the data3
for i in range(len(data3)):
# print the result of each group separately
print(f"The {data3.index[i]} bought {data3[i]} bikes.")
# The vast majority of the bikes sales comes from the adult. The young adults and the elderly ordered exactly the same amount of bikes. No adolescent bought a bike. Maybe this is due to the price of the bike/ or its design that is not well appreciated by the younger ones.
# * How does the purchasing behavior is varying based on the commute distance ?
# Commute distance refers to the distance that someone travels between their home and their workplace or school on a regular basis. It is the distance that a person typically covers when they are commuting to and from work or school.
data4 = dataframe.groupby(["Purchased Bike", "Commute Distance"])[
"Purchased Bike"
].value_counts()
data4
# We are only interested here in the people that actually bought bikes
# I use the xs method to select a single level of a multi-index DataFrame.
data5 = data4.xs("Yes", level=0)
data5
# # Visualization
# Deleting one level of multi-index levels columns
# data5 = data5.droplevel('Purchased Bike')
print(data5.values, data5.index)
# create a list of values
values = [207, 83, 95, 77, 33]
# create a list of index labels
index = ["0-1 Miles", "1-2 Miles", "2-5 Miles", "5-10 Miles", "10+ Miles"]
# create a DataFrame with the given values and index
data5 = pd.DataFrame({"Values": values}, index=index)
# Plot lineplot
sns.lineplot(data=data5)
# We can see that the greater the commute distance, the less people is willing to buy bikes. This is maybe due to the fact that people that do buy bikes use them to ride to school/ work.
# * What are the percentages of purchased bikes per region ?
# The size() method returns a Series with the number of occurrences of each group
data6 = data2.groupby("Region").size()
data6
# # Visualization
# create a pie chart
plt.pie(data6, labels=data6.index, autopct="%1.1f%%", startangle=90)
# add a white circle to create a donut chart
centre_circle = plt.Circle((0, 0), 0.80, fc="white")
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
# add a title
plt.title("Donut Chart of Sales per Region")
# display the chart
plt.show()
# There, we can see that the region with the most sales was North America.
# # # Deciphering patterns
# # Heatmap
# select only columns with numeric values
num_data = dataframe.select_dtypes(include=[np.number])
num_data.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
pd.set_option("display.max_columns", None) # показывать все колонки
pd.set_option("display.max_rows", 50) # показывать 50 строк
# Визуализация
import matplotlib.pyplot as plt
import seaborn as sns
# Статистика
from itertools import combinations
from scipy.stats import ttest_ind
# регулярные выражения для работы с текстом
import re
# Модели машинного обучения
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
DATA_DIR = "/kaggle/input/carprice/"
train = pd.read_csv(DATA_DIR + "train.csv")
test = pd.read_csv(DATA_DIR + "test.csv")
# Переведем код цвета в название:
def color_name(code):
if code == "040001":
return "чёрный"
if code == "FAFBFB":
return "белый"
if code == "97948F":
return "серый"
if code == "CACECB":
return "серебристый"
if code == "0000CC":
return "синий"
if code == "200204":
return "коричневый"
if code == "EE1D19":
return "красный"
if code == "007F00":
return "зелёный"
if code == "C49648":
return "бежевый"
if code == "22A0F8":
return "голубой"
if code == "DEA522":
return "золотистый" # color_group_2
if code == "660099":
return "пурпурный" # color_group_1
if code == "4A2197":
return "фиолетовый" # color_group_1
if code == "FFD600":
return "жёлтый" # color_group_2
if code == "FF8649":
return "оранжевый"
if code == "FFC0CB":
return "розовый"
# Фукция для объединения мощности двигателей в группы по среднему значению
def change_eng_pow(value, parts, min_value, max_value):
step = (max_value + 0.1 - min_value) / parts
# 0.1 прибавляем, чтобы в процессе итераций гарантированно достигнуть максимального значения
for i in range(parts):
# print('part', i+1)
# print('mv', min_value)
# print('xv', min_value+step)
# print('=============')
if (int(value) >= min_value) and (int(value) <= (min_value + step)):
return round(min_value + (step / 2))
else:
min_value += step
# Обобщение редких категорий признака fuelType
def change_fuelType(value):
if (value == "гибрид") or (value == "электро") or (value == "газ"):
return "other"
else:
return value
# Логарифмирование колонок:
def log_column(value):
return np.log(int(value) + 1)
def change_pts(x):
if x == "ORIGINAL":
return "Оригинал"
if x == "DUPLICATE":
return "Дубликат"
else:
return x
def change_wheel(x):
if x == "LEFT":
return 1
if x == "RIGHT":
return 0
if x == "Левый":
return 1
if x == "Правый":
return 0
else:
return x
RANDOM_SEED = 1
train.head()
train.info()
test.head()
test.info()
train["sample"] = 1 # помечаем где у нас трейн
test["sample"] = 0 # помечаем где у нас тест
train.dropna(
subset=["price"], inplace=True
) # удалим данные без цены в тренировочном датасете, ввиду их бесполезности
# Оставим в тренировочном датасете только марки авто, которые есть в тестовом
train = train[train["brand"].isin(test["brand"].unique())]
# model_name в тесте и model в трейне имеют одинаковое значение, надо объединить в один столбец
train = train.rename(columns={"model": "model_name"})
# Переведем код цвета в название для train
train["color"] = train["color"].apply(lambda x: color_name(x))
data = test.append(train, sort=False).reset_index(drop=True) # объединяем датасеты
data.info()
# complectation_dict, equipment_dict, model_info, model_name, parsing_unixtime,
# priceCurrency, sell_id, super_gen, vendor, Состояние - присутствуют только в тесте
# start_date - присутствует только в трейне
# car_url - просто ссылка на страницу
# image - ссылка на изображение. Если сильно заморочиться, то можно обработать нейросетями, что не входит в проект.
# Владение - слишком мало данных, которые вряд-ли смогут внести ясность.
# Комплектация - Слишком мало уникальных значений (менее 3000).
# hidden - вообще нет значений
# Целевая переменная price остается, для последующих тестов на тренировочном датасете
data.drop(
[
"car_url",
"complectation_dict",
"equipment_dict",
"image",
"model_info",
"parsing_unixtime",
"priceCurrency",
"sell_id",
"super_gen",
"vendor",
"Владение",
"Состояние",
"Комплектация",
"hidden",
"start_date",
],
axis=1,
inplace=True,
)
data.head(1)
data.isna().sum()
data["bodyType"].isna().sum()
data["bodyType"].value_counts()
data["bodyType"] = data["bodyType"].apply(
lambda x: str(x).lower()
) # переведем все в нижний регистр
# Некоторые типы имеют в названии лишь цифры, так что предварительно переводим все в строковый формат
data["bodyType"].value_counts()
row = data["bodyType"][
data["bodyType"].isin(data["bodyType"].value_counts()[10:].index)
]
row = row.apply(lambda x: (re.split(r"[\s, -]", x)[0] + "_oth"))
data["bodyType"][row.index] = row
data["bodyType"].value_counts()
data = data.drop(data[data["bodyType"] == "nan_oth"].index)
row = data["bodyType"][
data["bodyType"].isin(
(data["bodyType"].value_counts()[data["bodyType"].value_counts() < 100].index)
)
]
row = row.apply(lambda x: "other")
data["bodyType"][row.index] = row
data["bodyType"].value_counts()
data["bodyType"].hist(xrot=90)
data["brand"].value_counts()
data["brand"].hist(xrot=90)
data["color"].value_counts()
data["color"].hist(xrot=90)
data["description"].isna().sum()
data.drop(["description"], axis=1, inplace=True)
data["engineDisplacement"].value_counts()
# Удалим ввиду невозможности извлечения корректной информации.
# В качестве альтернативы можно удалить строки трейн датасета.
data.drop(["engineDisplacement"], axis=1, inplace=True)
data["enginePower"].value_counts()
# Оставим лишь основное значение:
data["enginePower"] = data["enginePower"].apply(
lambda x: (re.split(r"[\s, -, .]", str(x))[0])
)
data["enginePower"].value_counts()
data["enginePower"].hist()
data["enginePower"] = data["enginePower"].apply(lambda x: log_column(x))
data["enginePower"].hist()
data["fuelType"].value_counts()
data["fuelType"].hist()
data["fuelType"] = data["fuelType"].apply(lambda x: change_fuelType(x))
data["fuelType"].hist()
data["mileage"].hist()
(data["mileage"] > 200000).sum()
data["mileage"] = data["mileage"].apply(lambda x: log_column(x))
data["mileage"].hist()
data["modelDate"].hist()
data["modelDate"].value_counts()
data["modelDate"].min()
data["modelDate"].max()
data["modelDate"].apply(lambda x: log_column(x)).hist()
data["modelDate"] -= data["modelDate"].min()
data["modelDate"].hist()
data["modelDate"] = data["modelDate"].apply(lambda x: log_column(x))
data["modelDate"].hist(bins=40)
data["model_name"].value_counts()
row = data["model_name"][
data["model_name"].isin(
(
data["model_name"]
.value_counts()[data["model_name"].value_counts() < 150]
.index
)
)
]
row = row.apply(lambda x: "other")
data["model_name"][row.index] = row
data["model_name"].hist()
data["name"].value_counts()
data["horse_power"] = data["name"].apply(
lambda x: (re.split(r"\D", (re.split(r"[(,)]", x)[1]))[0])
)
data["horse_power"].hist()
data["horse_power"] = data["horse_power"].apply(lambda x: log_column(x))
data["horse_power"].hist()
data["transmission"] = data["name"].apply(
lambda x: re.split(
r"\s",
x[re.search(r"\s", x).start() :][
: re.search(r"T ", x[re.search(r"\s", x).start() :]).start() + 1
],
)[-1]
)
data["transmission"].value_counts()
data["transmission"].hist()
data.drop(["name"], axis=1, inplace=True)
data["numberOfDoors"].value_counts()
data[data["numberOfDoors"] == 0]
data.loc[data["numberOfDoors"] == 0, "numberOfDoors"] = 2.0
data["numberOfDoors"].value_counts()
data["numberOfDoors"].hist()
data["productionDate"].value_counts()
data["productionDate"].hist()
data["productionDate"] -= data["productionDate"].min()
data["productionDate"] = data["productionDate"].apply(lambda x: log_column(x))
data["productionDate"].hist(bins=40)
data["vehicleConfiguration"].value_counts()
data["vehicleConfiguration"].apply(lambda x: re.split(r"\s", x)[0]).value_counts()
data.drop(["vehicleConfiguration"], axis=1, inplace=True)
data["vehicleTransmission"].value_counts()
data.drop(["vehicleTransmission"], axis=1, inplace=True)
data["Владельцы"].value_counts()
train["Владельцы"].isna().sum()
data.loc[data["Владельцы"].isna(), "Владельцы"] = 2
data["Владельцы"].isna().sum()
data["Владельцы"] = data["Владельцы"].apply(lambda x: (str(x)[0]))
data["Владельцы"].value_counts()
data["Владельцы"].hist()
data["ПТС"].value_counts()
data["ПТС"] = data["ПТС"].apply(lambda x: change_pts(x))
data["ПТС"].isna().sum()
data.loc[data["ПТС"].isna(), "ПТС"] = "Оригинал"
data["ПТС"].value_counts()
data["ПТС"].hist()
data["ПТС"] = data["ПТС"].apply(lambda x: 1 if x == "Оригинал" else 0)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames[:10]:
pass
# print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# /kaggle/input/111-2-uta-dl-hw2/tw_food_101_test_pred.csv
# /kaggle/input/taiwanese-food-101/tw_food_101_classes.csv
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/tw_food_101_classes.csv
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/tw_food_101_train.csv
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/tw_food_101_test_list.csv
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/test/1269.jpg
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/test/3863.jpg
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/test/623.jpg
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/test/2193.jpg
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/test/3750.jpg
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/test/2008.jpg
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/test/2081.jpg
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/test/3919.jpg
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/test/3757.jpg
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/test/4489.jpg
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/train/stinky_tofu/15653.jpg
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/train/stinky_tofu/15572.jpg
# /kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101/train/stinky_tofu/15578.jpg
import tensorflow
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.layers import Dropout, LeakyReLU, ReLU
from tensorflow.keras.optimizers import Adam
import tensorflow_addons as tfa
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
import os
import csv
import numpy as np
from tensorflow.keras.models import load_model
import glob, tqdm
from PIL import Image
root_dir = "/kaggle/input/taiwanese-food-101/tw_food_101/tw_food_101"
train_dir = root_dir + "/train"
test_dir = root_dir + "/test"
# ### 只執行一次
# pngs = glob.glob(train_dir + r'/*/*.bmp') # bmp png
# print(len(pngs))
# # print(pngs[0])
# for n in pngs[:]:
# print(n)
# img = Image.open(n)
# img = img.convert('RGB')
# nn = n[:-3]+'jpg'
# # print(nn)
# img.save(nn)
# pngs = glob.glob(test_dir + r'/*.bmp') # bmp png
# print(len(pngs))
# # print(pngs[0])
# for n in pngs[:]:
# print(n)
# img = Image.open(n)
# img = img.convert('RGB')
# nn = n[:-3]+'jpg'
# # print(nn)
# img.save(nn)
rows = 17
cols = 6
fig, ax = plt.subplots(rows, cols, frameon=False, figsize=(15, 25))
food_dirs = os.listdir(train_dir)
for i in range(rows):
for j in range(cols):
food_dir = food_dirs[(i * cols + j) % 101]
all_files = os.listdir(os.path.join(train_dir, food_dir))
img = plt.imread(os.path.join(train_dir, food_dir, all_files[3]))
ax[i][j].imshow(img)
ax[i][j].text(
0,
-20,
food_dir,
size=10,
rotation=0,
ha="left",
va="top",
bbox=dict(boxstyle="round", ec=(0, 0.6, 0.1), fc=(0, 0.7, 0.2)),
)
plt.setp(ax, xticks=[], yticks=[])
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
# train_datagen = ImageDataGenerator(
# rotation_range=30,
# fill_mode='nearest',
# shear_range=0.2,
# zoom_range=0.2,
# horizontal_flip=True,
# rescale=1./255,)
train_datagen = ImageDataGenerator(
rotation_range=90,
width_shift_range=0.2,
height_shift_range=0.2,
brightness_range=[0.0, 0.5],
shear_range=0.2,
zoom_range=0.2,
channel_shift_range=0.2,
fill_mode="nearest",
horizontal_flip=True,
vertical_flip=True,
rescale=1.0 / 255,
)
valid_datagen = ImageDataGenerator(
# rotation_range=90,
# width_shift_range=0.2,
# height_shift_range=0.2,
# brightness_range=[0.,0.5],
# shear_range=0.2,
# zoom_range=0.2,
# channel_shift_range=0.2,
# fill_mode='nearest',
# horizontal_flip=True,
# vertical_flip=True,
rescale=1.0
/ 255,
)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(221, 221),
color_mode="rgb",
batch_size=128,
class_mode="categorical",
shuffle=True,
)
# train_datagen = ImageDataGenerator(rescale=1./255)
valid_generator = valid_datagen.flow_from_directory(
train_dir,
target_size=(221, 221),
color_mode="rgb",
batch_size=128,
class_mode="categorical",
shuffle=True,
)
base_model = keras.applications.densenet.DenseNet121(
include_top=False, weights="imagenet", input_shape=(221, 221, 3)
)
# base_model = keras.applications.inception_v3.InceptionV3(include_top=False, weights='imagenet', input_shape=(221, 221, 3))
# base_model = keras.applications.vgg16.VGG16(include_top=False, weights='imagenet', input_shape=(150, 150, 3))
# base_model = keras.applications.resnet.ResNet50(include_top=False, weights='imagenet', input_shape=(150, 150, 3))
# base_model = keras.applications.mobilenet.MobileNet(include_top=False, weights='imagenet', input_shape=(150, 150, 3))
LR_function = ReduceLROnPlateau(
monitor="val_accuracy",
patience=3,
# 3 epochs 內acc沒下降就要調整LR
verbose=1,
factor=0.5,
# LR降為0.5
min_lr=0.000005
# 最小 LR 到0.00001就不再下降
)
adam = Adam(learning_rate=0.0001)
estop = tensorflow.keras.callbacks.EarlyStopping(monitor="loss", patience=6)
x = base_model.output
x = Flatten()(x)
x = Dense(2048)(x) # , activation='relu'
x = tfa.activations.mish(x)
x = Dense(1024)(x) # , activation=mish , activation='relu'
x = tfa.activations.mish(x)
x = Dropout(0.5)(x)
predictions = Dense(101, activation="softmax")(x)
model = Model(base_model.input, predictions)
model.compile(
optimizer=adam,
loss="categorical_crossentropy",
metrics=["accuracy", "val_accuracy"],
)
# model = tensorflow.saved_model.load('model')
# model = tensorflow.keras.models.load_model('model_DenseNet121.h5')
# model.summary()
history = model.fit(
train_generator,
epochs=10,
callbacks=[LR_function, estop],
validation_data=valid_generator,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
workers=32,
)
model.save("model_DenseNet121.h5")
# tensorflow.saved_model.save(model, 'model')
# Create {ID, filename} dictionary {'1269': '1269.jpg', '3863': '3863.jpg',.....}
test_dict = {}
for root, dirs, files in os.walk(root_dir + "/test"):
for filename in files:
test_id, file_ext = os.path.splitext(filename)
test_dict[test_id] = filename
from PIL import Image
import numpy as np
from skimage import transform
def load_img(filename, target_w=150, target_h=150):
np_image = Image.open(filename)
np_image = np.array(np_image).astype("float32") / 255
np_image = transform.resize(np_image, (target_w, target_h, 3))
np_image = np.expand_dims(np_image, axis=0)
return np_image
# Read images in order and make predictions
results = []
for i in tqdm.tqdm(range(len(test_dict))):
img = load_img(root_dir + "/test/" + test_dict[str(i)], 221, 221)
ret = model.predict(img, verbose=0)
results.append(np.argmax(ret))
# Print results in CSV format and upload to Kaggle
with open("pred_results.csv", "w") as f:
f.write("Id,Category\n")
for i in range(len(results)):
f.write(str(i) + "," + str(results[i]) + "\n")
# Download your results!
from IPython.display import FileLink
FileLink("pred_results.csv")
|
# # 1 | Lasso Regression
# First of all lets understand what the hell is this `Regression`???
# **What** - `Regression` is just like the lost brother of `classification`. In `classification` we have `discrete` or `particular values`, that we want to `classify`, In `regression` we have `continuous values`, that we want to `predict`
# | Classification |Regression |
# | --- | --- |
# | We have discrete values| We have continuous values |
# |Usually we know these values in depth | We usually don't know these values in depth|
# |These are comparatively less in number| These are comparatively more in number |
# # 2 | What other things we will learn here
# * Slope of Function
# * Baisc Diffrentiation
# * Intercept
# **Why** - Regression is a very useful method and is used in many places to predict values.
# **How** - There are many different techniques to perform this operation.
# **I highly encourage you to find different methods by yourself too, and can even try to build your own models, maybe your brain pushes the boundaries of machine learning**
# So now we have a basic idea of what regression is, our next move should be knowing about Lasso Regression
# So what is this `LassoRegressor`
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import IFrame
import seaborn as sns
# Lets assume we have data like this
features = np.array([x for x in range(0, 200, 1)])
features
target = np.array([x for x in range(0, 400, 2)])
target
# Lets assume there is a connection between the `target` , and `features`. By human instacne we know that every element in `target` is just a double of the corresponding element in `features`, or $target = 2XFeatures$.
# Lets assume we change the target a little bit...
target = np.array([x + 1 for x in range(0, 400, 2)])
target
# Now what could be the trend here..., We can see the code above and with the help of that we can say. That `target` value is just the `double + 1` of the corresponding element in `features`. or $target = 2Xfeatures + 1$
# Till now the problem was really easy to solve, and thats why we used the brain only, But these are just examples. As we move closer to the real world. The examples/problems get difficulat and we find it harder to find proper trends in the two `arrays`. Thats we try to teach machine, how to find trend in the data. The formula we had before $target = 2Xfeature + 1$ is subjective to only one problem or a similar problem. But this formula can be generlized by the equation of `straight line`, which is $y = mx + b$
# So what does this line means ???
# Lets first try to plot the data we had on a scatter plot
plt.scatter(features, target)
# You can see we got a sequence of dots that resembles kind of straight line.
# Lets assume we have a line that tries to capture most of the points on this, like this
plt.scatter(features, target)
plt.plot([0, 200], [0, 400], "yellow")
# Again by human intution we found the `best fit line`. But what if we want to generalize the things and kind of do not find the best fit line...?
# First of all lets get a little bit more deep into equation $y = mx + b$
# So what does these terms resembles in this eqution.
# * `m` is the slope of the line
# # 2.1 | Slope of A function
# Slope of a function shows how steep a function is, or the direction of a function at a given point on the curve.
# Lets assume we have this curve $y = 4x^2$ the slope of this curve will be $y = 8x$
IFrame("https://www.desmos.com/calculator/zluqu5vyuh", 400, 400)
# So how do we calculate the `slope` of a line???
# Lets assume we have a function $y = f(x)$,. To find the slope of a function, we simply diffrenctiate the function, thus, the slope of this line will be $y^` = f^`(x)$
# # 2.2 | Diffrentiation
# Diffrentaition can be explained as getting a small value of a function.
# lets assume we have a function `y = sin(x)`
# A small strip at that function will demonstrate taking a derivative of that function `sin(x)`
# Taking about the function we had taken before that is $y = 4x^2$
# Taking its derivative we will get $$y = 8x$$ ($x{n^`} = nx^{n-1}$)
# So the slope of $y = 4x^2$ can be represnted as
IFrame("https://www.desmos.com/calculator/hrguwktg9q", 400, 400)
# **If you want to know more aboud diffrentiation, here is [3Blue1Brown](https://www.youtube.com/@3blue1brown/featured) => [Essence Of Calculas](https://www.youtube.com/playlist?list=PLZHQObOWTQDMsr9K-rj53DwVRMYO3t5Yr)**
# So now you have a basic idea of `slope`
# # 2.3 | Intercept
# Now what `b` represents in the data. Usually it is called the `intercept`. Consider this graph of the equation $y = x$ or $y = 1x + b$
IFrame("https://www.desmos.com/calculator/gai0veg5fh", 400, 400)
# This line passes the axis at $(0 , 0)$. These coordinates are called as the `intercepts` of this line. If we make `b` or `intercept` as $1$. The line will then pass from $(1 , -1)$. Basically the `intercept` moves a line in a plane. With that being said, Lets also undertand how the `slope` changes the line. If we make `m` as $2$. The line will rotate anti-clockwise. So as we increase the value of `m` or `slope`. The line moves anti-clockwise, And so the vice-versa, If we decrease the value of `m`, The slope will move in the clockwise direction.
# In short tweeking the values of `m` and `b` or `slope` and `intercept`. We can move the line in any direction and in any way we want `as long as it resembles a straight line`. We still cannot bend the line
# So now we have any data, we just need to difine the values of `slope` and `intercpet`. And we can get the best fit line. But still the question arises how do we generalize the values of these tuning parametes.
# In simple word we can say, How can we find a relation between the data we have and these tuning parameters. So that we only need to define that relationship and then we can easily predict the values.
# Lets think that the value assigned to the line is this
plt.scatter(features, target)
plt.plot([0, 300], [0, 400], "yellow")
# If we test this line on the training data only, we will find that this line is not correct. It is predicting points incorrect, We know that the best fit line we drew first, will predict points wiht lowest incorrect ones. For example the line we just defined if asked the corresponding value of $200$, it will say $250$. But rather it was $400$. There was some `error`, some `loss`, or some `cost` with the `actual` and `predicted` values.
# For measuring this loss, what we can do is find the difference between the `actual value` and the `predicted value`. A best fit line will give the lowest value of this difference.
# The word difference here is very difficult to say, so we can give this term a new fancy name, which is `The Loss`.
# One can deifne loss as $$Loss = actual - predicted$$.
# We only took the example of one value. but there are a large group of values. that can show the same trait, For that we can change the formula to
# Lets denote $actual$ as $a$ and $predicted$ as $p$
# $$Loss = (a_1 - p_1) + (a_2 - p_2) + (a_3 - p_3) + ... + (a_n - p_n)$$
# or $$Loss = \sum\limits_{i = 1}^{n}a_i - p_i$$ or $$Loss = \sum\limits_{i = 1}^{n}(y_i - \hat y_i)$$
# Whenever you see $\hat y$, think of it as the `predicted value`
# Now lets assume we have data like this and a random line is drawn like this
# If you look closely, a lot of error terms will tend to cancel out each other. We can also get into a state where the line is `not the best fit`, but still gives $0$ error. With the `Loss` we defined before, we are not chossing a `best fit line`. Rather we are chossing a line that is in the `middle` of those points. One way to counter this is to add a `modulus` function like this $$Loss = \sum\limits_{i = 1}^{n}|y - \hat y|$$
# But what is a modulus function. The function is nothing but converts, any negative numbers to postive. For example
# $|-1| = 1$
IFrame("https://www.desmos.com/calculator/kamxotjra2", 400, 400)
# But there is a problem with this function. A `modulus` is not diffrentiable. You might be thinking that why are we even seeing that part, like we we care for that. Why would you even diffrentiate a loss function.
# We actually diffrentiate loss function in further steps, thats why we will not use the modulus function.
# Another way of doing so is to, square the loss function like this $Loss = (y - \hat y)^2$
# Its cool, its good and we can even diffrentiate this...
# Now we have a basic idea that we need to compute `m` and `b` for the lowest loss values. Now we should come to know how we can do this
# What if we somehow interelate the `losses` and `m and b`.
# There are several different methods to determine this. But lets get into more complex problem
# Lets say we have target array like this
target = []
for i in range(200):
if i < 26:
target.append(i ** (1 / 2))
else:
target.append(i ** (1 / 3))
target = np.array(target)
target
# If we try to plot this, we get
plt.scatter(features, target)
# And also we are only given a short amount of data for training, consider to be given only the starting $30$ data only.
# So there is a high chance that the model will not be able to predict good predictions.
# To counter this, we add a hyperparameter dependent term and wieghts into the loss. So that there is still some loss for the model
# So our loss will be $$Loss = (y - \hat y)^2 + \lambda(m)$$
# But what should be the initial weights and biases.
# Lets assume we intialize the parameters randomly, like this
weights = np.random.randn(1)
weights
biases = np.random.randn(1)
biases
pred = weights * 30 + biases
pred
loss = (pred - 60) + (1 * weights)
loss
# Our main motive is to reduce this loss as much as possible,.
# What if we subtract a small subset of the derivative of this loss from the parameters like this. The derivative of the loss will show us the steepness of the curve, and thus doing so might get us to the valeus of minimum loss. So how do we find the derivative of this function $Loss = (y - \hat y)^2$. What we know is $\hat y = mx + b$. COmputing this value in we get $$Loss = (y - mx - b)^2 + \lambda(m)$$, Now we can diffrentiate the function
# ## Diffrentiating wrt `b`
# $$\frac {dLoss}{db}= \frac {d}{db}(y - mx - b)^ + \lambda(m)$$
# $$= 2(y - mx - b)(-1)$$
# ## Diffrentiating wrt `m`
# $$\frac {dLoss}{dm} = \frac {d}{dm}(y - mx - b)^ + \lambda(m)^22$$
# $$= 2(y - mx - b)(-x) + \lambda$$
# Lets assume $\lambda = 1$
weights -= ((-2 * (60 - weights * 30 - biases)) + 1) * 0.01
biases -= (2 * 30 * (60 - weights * 30 - biases)) * 0.01
print(weights)
print(biases)
loss = (60 - (weights * 30 + biases)) + 1
loss
# Our losses have been decreased, so lets do it again
weights -= ((-2 * loss) + 1) * 0.01
biases -= -2 * 30 * loss * 0.01
print(weights)
print(biases)
loss = (60 - (weights * 30 + biases)) + (weights)
loss
# So now we know if we do this iteratively, we will minimise the loss, and iteratively we will reach the optimal values of `weights` or `m` and `biases` or `m`
# Lets say we have runn this again and again for around 100 times
for _ in range(100):
weights -= ((-2 * loss) + 1) * 0.01
biases -= -2 * 30 * loss * 0.01
loss = (60 - (weights * 30 + biases)) + (weights)
# Lets now see the weights and biases
weights
biases
# Though we have biases as high, but we have almost achived value of `weights`
# Lets do this all again, and now we will also try to plot a graph
weights = abs(np.random.randn(1))
biases = abs(np.random.randn(1))
losses = []
for _ in range(100):
weights -= ((-2 * loss) + 1) * 0.01
biases -= -2 * 30 * loss * 0.01
loss = (60 - (weights * 30 + biases)) + (weights)
losses.append(loss)
sns.lineplot(np.array(losses))
# As we can see we have greatly decreased our losses
# Now we just need to put this all into a function
def LassoRegression():
weights = abs(np.random.randn(1))
biases = abs(np.random.randn(1))
predic = []
losses = []
for _ in range(300):
pred = weights * features + biases
loss = np.sum((60 - (weights * 30 + biases)) + (weights))
losses.append(loss)
weights -= ((-2 * loss) + 1) * 0.01
biases -= -2 * 30 * loss * 0.01
return weights, biases
# # 3 | Functionalities
# We have made out our SGDRegressor, now we need to add some functionalities to it. We can get funcitonalites form **[Scikit-Learn](https://scikit-learn.org/)=>[Stable](https://scikit-learn.org/stable/)=>[Linear Model](https://scikit-learn.org/stable/modules/linear_model.html)=>[Ridge](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html)**
# * ✅List Of columns
# * ✅`alpha : {float, ndarray of shape (n_targets,)}, default=1.0` - Constant that multiplies the L2 term, controlling regularization strength. `alpha` must be a non-negative float i.e. in `[0, inf)`. If an array is passed, penalties are assumed to be specific to the targets. Hence they must correspond in number.
# # 3.1 | List Of Columns
# This function will only work if there are only $2$ columns, one `feature` and the other one `target`. What if the user gives out a list of columns. For this we nee dto take two different arguemnts form the user and work on them differently
def RidgeRegression(X, y):
weights = np.random.randn(X.shape[0])
biases = np.random.randn(1)
predic = []
losses = []
for _ in range(300):
pred = weights * features + biases
loss = np.sum((60 - (weights * 30 + biases)) + (weights))
losses.append(loss)
weights -= ((-2 * loss) + 1) * 0.01
biases -= -2 * 30 * loss * 0.01
return weights, biases
# # 3.2 | Alpha
# This is the $\lambda$ term we are multiplying with the weights. We had kept it $1$ and by luck it is actually set $1$, so we just need to apply some tweeks here
def RidgeRegression(X, y, alpha=0.1):
weights = np.random.randn(X.shape[0])
biases = np.random.randn(1)
predic = []
losses = []
for _ in range(300):
pred = weights * features + biases
loss = np.sum((60 - (weights * 30 + biases)) + (alpha * weights))
losses.append(loss)
weights -= ((-2 * loss) + alpha) * 0.01
biases -= -2 * 30 * loss * 0.01
return weights, biases
# # 4 | Lasso Regression Final Source Code
def RidgeRegression(X, y, alpha=0.1):
weights = np.random.randn(X.shape[0])
biases = np.random.randn(1)
predic = []
losses = []
for _ in range(300):
pred = weights * features + biases
loss = np.sum((60 - (weights * 30 + biases)) + (alpha * weights))
losses.append(loss)
weights -= ((-2 * loss) + alpha) * 0.01
biases -= -2 * 30 * loss * 0.01
return weights, biases
|
import pandas as pd
from sklearn.model_selection import train_test_split
df = pd.read_csv("/kaggle/input/cleaned-mentalhealth/dataset.csv")
df = df.dropna(subset=["Sentence"])
df.Sentence = [str(text) for text in df.Sentence]
df = df.sample(n=300000, random_state=0)
df.shape
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
df["Subreddit"] = label_encoder.fit_transform(df["Subreddit"])
df["Subreddit"].unique()
labels = list(label_encoder.classes_)
df.sample(2)
train_df, test_df = train_test_split(df, test_size=0.2, random_state=42)
test_df, val_df = train_test_split(test_df, test_size=0.5, random_state=42)
print(train_df.shape)
print(val_df.shape)
print(test_df.shape)
from torch.utils.data import Dataset
import numpy as np
import re
import nltk
import string
class Dataset(Dataset):
def __init__(self, dataframe, tokenizer):
texts = dataframe.Sentence.values.tolist()
self._print_random_samples(texts)
self.texts = [
tokenizer(
text,
padding="max_length",
max_length=256,
truncation=True,
return_tensors="pt",
)
for text in texts
]
if "Subreddit" in dataframe:
classes = dataframe.Subreddit.values.tolist()
self.labels = classes
def _print_random_samples(self, texts):
np.random.seed(42)
random_entries = np.random.randint(0, len(texts), 2)
for i in random_entries:
print(f"Entry {i}: {texts[i]}")
print()
def __len__(self):
return len(self.texts)
def __getitem__(self, idx):
text = self.texts[idx]
label = -1
if hasattr(self, "labels"):
label = self.labels[idx]
return text, label
from torch import nn
class Classifier(nn.Module):
def __init__(self, base_model):
super(Classifier, self).__init__()
self.base_model = base_model
self.linear = nn.Linear(base_model.config.hidden_size, 6)
def forward(self, input_ids, attention_mask):
output = self.base_model(input_ids=input_ids, attention_mask=attention_mask)
output = output.last_hidden_state[:, 0, :]
output = self.linear(output)
return output
def train(model, train_dataloader, val_dataloader, learning_rate, epochs):
best_val_loss = float("inf")
early_stopping_threshold_count = 0
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
criterion = torch.nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=learning_rate)
model = model.to(device)
criterion = criterion.to(device)
for epoch in range(epochs):
total_acc_train = 0
total_loss_train = 0
model.train()
for train_input, train_label in tqdm(train_dataloader):
attention_mask = train_input["attention_mask"].to(device)
input_ids = train_input["input_ids"].squeeze(1).to(device)
train_label = train_label.to(device)
output = model(input_ids, attention_mask)
loss = criterion(output, train_label)
total_loss_train += loss.item()
acc = (output.argmax(dim=1) == train_label).sum().item()
total_acc_train += acc
loss.backward()
optimizer.step()
optimizer.zero_grad()
with torch.no_grad():
total_acc_val = 0
total_loss_val = 0
model.eval()
for val_input, val_label in tqdm(val_dataloader):
attention_mask = val_input["attention_mask"].to(device)
input_ids = val_input["input_ids"].squeeze(1).to(device)
val_label = val_label.to(device)
output = model(input_ids, attention_mask)
loss = criterion(output, val_label)
total_loss_val += loss.item()
acc = (output.argmax(dim=1) == val_label).sum().item()
total_acc_val += acc
print(
f"Epochs: {epoch + 1} "
f"| Train Loss: {total_loss_train / len(train_dataloader): .3f} "
f"| Train Accuracy: {total_acc_train / (len(train_dataloader.dataset)): .3f} "
f"| Val Loss: {total_loss_val / len(val_dataloader): .3f} "
f"| Val Accuracy: {total_acc_val / len(val_dataloader.dataset): .3f}"
)
if best_val_loss > total_loss_val:
best_val_loss = total_loss_val
torch.save(model, f"best_model.pt")
print("Saved model")
early_stopping_threshold_count = 0
else:
early_stopping_threshold_count += 1
if early_stopping_threshold_count >= 1:
print("Early stopping")
break
import torch
from transformers import AutoTokenizer, AutoModel
from torch.utils.data import DataLoader
torch.manual_seed(0)
np.random.seed(0)
MODEL = "roberta-base"
tokenizer = AutoTokenizer.from_pretrained(MODEL)
base_model = AutoModel.from_pretrained(MODEL)
train_dataloader = DataLoader(
Dataset(train_df, tokenizer), batch_size=8, shuffle=True, num_workers=2
)
val_dataloader = DataLoader(Dataset(val_df, tokenizer), batch_size=8, num_workers=2)
test_dataloader = DataLoader(
Dataset(test_df, tokenizer), batch_size=8, shuffle=False, num_workers=2
)
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from torch.optim import Adam
from tqdm import tqdm
class Classifier(nn.Module):
def __init__(self, base_model):
super(Classifier, self).__init__()
self.base_model = base_model
self.num_labels = 6
self.dropout = nn.Dropout(p=0.3)
self.classifier = nn.Linear(self.base_model.config.hidden_size, self.num_labels)
def forward(self, input_ids, attention_mask):
outputs = self.base_model(input_ids=input_ids, attention_mask=attention_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits
model = Classifier(base_model)
learning_rate = 1e-5
epochs = 5
train(model, train_dataloader, val_dataloader, learning_rate, epochs)
def get_text_predictions(model, loader):
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
model = model.to(device)
results_predictions = []
with torch.no_grad():
model.eval()
for data_input, _ in tqdm(loader):
attention_mask = data_input["attention_mask"].to(device)
input_ids = data_input["input_ids"].squeeze(1).to(device)
output = model(input_ids, attention_mask)
predictions = torch.argmax(output, dim=1).cpu().detach().numpy()
results_predictions.append(predictions)
return np.concatenate(results_predictions)
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from sklearn.metrics import classification_report
# Assuming you have loaded your test set and the corresponding ground truth labels
true_labels = test_df["Subreddit"].tolist()
# Obtain predicted labels using your model and test dataloader
# model = torch.load("/kaggle/working/best_model.pt")
predictions = get_text_predictions(model, test_dataloader)
# Generate classification report
print(classification_report(true_labels, predictions, target_names=labels))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.metrics import mean_squared_error, r2_score
import matplotlib.pyplot as plt
# Load the dataset
df = pd.read_csv("/kaggle/input/ny-rental-properties-pricing/NY Realstate Pricing.csv")
# Encode categorical features
categorical_features = ["neighbourhood", "room_type"]
encoder = LabelEncoder()
for feature in categorical_features:
encoder.fit(df[feature])
df[feature] = encoder.transform(df[feature])
# Split the dataset into training and testing datasets
X_train, X_test, y_train, y_test = train_test_split(
df.drop("price", axis=1), df["price"], test_size=0.2, random_state=42
)
# Scale numerical features
scaler = StandardScaler()
numerical_features = [
"latitude",
"longitude",
"days_occupied_in_2019",
"minimum_nights",
"number_of_reviews",
"reviews_per_month",
"availability_2020",
]
X_train[numerical_features] = scaler.fit_transform(X_train[numerical_features])
X_test[numerical_features] = scaler.transform(X_test[numerical_features])
# Train the models
lr = LinearRegression()
lr.fit(X_train, y_train)
dt = DecisionTreeRegressor()
dt.fit(X_train, y_train)
rf = RandomForestRegressor()
rf.fit(X_train, y_train)
gb = GradientBoostingRegressor()
gb.fit(X_train, y_train)
# Predict the prices of the testing dataset
lr_pred = lr.predict(X_test)
dt_pred = dt.predict(X_test)
rf_pred = rf.predict(X_test)
gb_pred = gb.predict(X_test)
# Evaluate the performance of the models
lr_mse = mean_squared_error(y_test, lr_pred)
lr_r2 = r2_score(y_test, lr_pred)
dt_mse = mean_squared_error(y_test, dt_pred)
dt_r2 = r2_score(y_test, dt_pred)
rf_mse = mean_squared_error(y_test, rf_pred)
rf_r2 = r2_score(y_test, rf_pred)
gb_mse = mean_squared_error(y_test, gb_pred)
gb_r2 = r2_score(y_test, gb_pred)
print(f"Linear Regression:\nMSE = {lr_mse:.2f}\nR2 score = {lr_r2:.2f}\n")
print(f"Decision Tree Regression:\nMSE = {dt_mse:.2f}\nR2 score = {dt_r2:.2f}\n")
print(f"Random Forest Regression:\nMSE = {rf_mse:.2f}\nR2 score = {rf_r2:.2f}\n")
print(f"Gradient Boosting Regression:\nMSE = {gb_mse:.2f}\nR2 score = {gb_r2:.2f}\n")
# Visualize the Linear Regression model
plt.scatter(y_test, lr_pred)
plt.plot([0, max(y_test)], [0, max(lr_pred)], "k-", lw=2)
plt.xlabel("Actual Price")
plt.ylabel("Predicted Price")
plt.title("Linear Regression")
plt.show()
|
from google.colab import drive
drive.mount("/content/drive", force_remount=True)
# # **Aplicações de Redes complexas: Política**
# Neste trabalho, foram utilizados dados de votações da Câmara dos Deputados do Brasil para modelar deputados considerando seus posicionamentos na votação de proposições.
# Trabalho publicado disponível [aqui](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0229928).
#
"""
bibliotecas necessárias para executar os códigos
"""
import glob
import json
import tqdm
import leidenalg
import numpy as np
import igraph as ig
import xnetwork as xnet # formato para salvar grafos
from scipy import integrate
from itertools import combinations
from collections import defaultdict
path = "drive/MyDrive/BigData/" # esse é o caminho onde estão os arquivos para esse projeto
# se for rodar o código localmente, precisa atualizar essa variável conforme onde estão salvos seus arquivos
# ## **Criando as redes**
# ### Quais os dados e onde baixar:
# Os dados são votações de proposições na Câmara dos Deputados do Brasil contendo: o nome dos deputados, partido, voto (sim, não, obstrução, ausência).
# Podem ser baixados no portal de dados abertos [aqui](https://dadosabertos.camara.leg.br/swagger/api.html), na seção de *Voto de cada parlamentar*.
# ### Como contruímos as redes:
# Similaridade entre os votos dos deputados: para cada proposição, quando dois deputados votam igual, soma-se +1, quando divergem soma-se -1. Esse valor é normalizado pelo total de proposições votadas utilizadas para construir a rede. Apenas conexões positivas são consideradas.
# Arquivos já disponíveis em *datasets/politica*.
def create_net(nodes_names, edges, norm, dinfos):
g = ig.Graph()
g.add_vertices(len(nodes_names))
g.vs["name"] = [str(name) for name in nodes_names]
g.vs["fullname"] = [dinfos[int(idname)][1] for idname in g.vs["name"]]
g.vs["pp"] = [dinfos[int(idname)][2] for idname in g.vs["name"]]
elist = []
wlist = []
for pair, weight in edges.items():
if weight > 0:
a, b = tuple(pair)
a = str(a)
b = str(b)
elist.append((a, b))
wlist.append(weight / norm)
g.add_edges(elist)
g.es["weight"] = wlist
return g
def net_year(file):
print(file)
votes = pd.read_csv(file, sep=";")[
[
"idVotacao",
"dataHoraVoto",
"voto",
"deputado_id",
"deputado_nome",
"deputado_siglaPartido",
]
]
votes["ano"] = votes["dataHoraVoto"].apply(lambda row: row[:4])
del votes["dataHoraVoto"]
"""
os votos dos parlamentares são consultados por ano
"""
for yyyy, window in votes.groupby("ano"):
dinfos = defaultdict(
lambda: (-1, -1, -1)
) # esse dicionário guarda as infos dos vértices (os deputados)
pairs = defaultdict(
lambda: 0
) # esse dicionário guarda as arestas (pares de deputados) e o peso das conexões
pools = 0
dlist = set()
for id_votacao, voto in tqdm.tqdm(window.groupby("idVotacao")):
pools += 1
for d1, d1_info in voto.iterrows():
dinfos[d1_info["deputado_id"]] = (
d1_info["deputado_id"],
d1_info["deputado_nome"],
d1_info["deputado_siglaPartido"],
)
dlist.add(d1_info["deputado_id"])
for d2, d2_info in voto.iterrows():
if d1 == d2:
break
pair = frozenset({d1_info["deputado_id"], d2_info["deputado_id"]})
if d1_info["voto"] == d2_info["voto"]:
pairs[pair] += 1
else:
pairs[pair] += -1
net = create_net(
dlist, pairs, pools, dinfos
) # com os dados preprocessados, então cria-se a rede correspondente
xnet.igraph2xnet(
net, path + "networks/politica/covote_deps_%s_original.xnet" % yyyy
) # salva a rede
files = glob.glob(
path + "datasets/politica/votacoesVotos*.csv"
) # esses são os dados obtidos no site da câmara dos deputados
print(files)
for file in files:
nets_year(file)
# ## **Filtragem de arestas**
# Implementação do método proposto em [Extracting the multiscale backbone of complex weighted networks](https://www.pnas.org/content/106/16/6483.short).
# A ideia geral é preservar as conexões mais relevantes da rede. Nessa implementação, encontramos o corte que preserva uma determinada porcentagem do tamanho do componente principal original.
from igraph import *
# source: https://github.com/aekpalakorn/python-backbone-network/blob/master/backbone.py
def disparity_filter(g):
total_vtx = g.vcount()
g.es["alpha_ij"] = 1
for v in range(total_vtx):
edges = g.incident(v)
k = len(edges)
if k > 1:
sum_w = sum([g.es[e]["weight"] for e in edges])
for e in edges:
w = g.es[e]["weight"]
p_ij = w / sum_w
alpha_ij = (
1
- (k - 1) * integrate.quad(lambda x: (1 - x) ** (k - 2), 0, p_ij)[0]
)
g.es[e]["alpha_ij"] = min(alpha_ij, g.es[e]["alpha_ij"])
def alpha_cut(alpha, g):
g_copy = g.copy()
to_delete = g_copy.es.select(alpha_ij_ge=alpha)
g_copy.delete_edges(to_delete)
return g_copy
def get_largest_component_size(g):
components = g.components()
giant = components.giant()
return giant.vcount()
"""
essa função implementa uma busca binária para definir qual o
alpha que preserva cerca de 80% do componente principal original da rede
"""
def get_best_cut(net, preserve_percent, a_min, a_max):
error = 0.015
largest_size = get_largest_component_size(net)
min_erro = 1000
a_min_erro = 0.0
def get_current_percent(a):
nonlocal min_erro, a_min_erro, a_min, a_max
cuted_net = alpha_cut(a, net)
# print('number of edges',cuted_net.ecount())
preserved_size = get_largest_component_size(cuted_net)
# print('preserved size',preserved_size)
current_percent = preserved_size / largest_size
if min_erro > abs(current_percent - preserve_percent):
min_erro = abs(current_percent - preserve_percent)
a_min_erro = a
return cuted_net, current_percent, a
i = 0
a_min_perc = get_largest_component_size(alpha_cut(a_min, net)) / largest_size
a_max_perc = get_largest_component_size(alpha_cut(a_max, net)) / largest_size
a = 0.0
while True:
if i > 100:
cuted_net = alpha_cut(a_min_erro, net)
print("error infinity loop")
print(
"alpha %.2f; preserved %.2f" % (a_min_erro, min_erro + preserve_percent)
)
print()
return cuted_net
i += 1
a = (a_min + a_max) / 2
cuted_net, current_percent, a = get_current_percent(a)
current_erro = current_percent - preserve_percent
if abs(current_erro) < error:
print("total iterations", i)
print("alpha %.2f; preserved %.2f" % (a, current_percent))
print()
return cuted_net
if (a_min_perc - preserve_percent) * (current_percent - preserve_percent) > 0:
a_min = a
a_min_perc = current_percent
else:
a_max = a
a_max_perc = current_percent
def apply_backbone(net, a_min, a_max, preserve=0.8):
disparity_filter(net)
best = get_best_cut(net, preserve, a_min, a_max)
return best
files = glob.glob(path + "networks/politica/covote_deps_*_original.xnet")
print(files)
for file in files:
net = xnet.xnet2igraph(file)
net = apply_backbone(net, 0.0001, 1, preserve=0.8)
xnet.igraph2xnet(net, file.replace("original", "filtered"))
# ## **Identificação de comunidades**
# A identificação de comunidades nesse contexto pode nos mostrar quais os deputados que estão mais conectados entre si, sendo interessante comparar com seus partidos políticos.
# O método utilizado está disponível no pacote [leidenalg](https://github.com/vtraag/leidenalg).
def get_largest_component(g):
components = g.components()
giant = components.giant()
return giant
"""
aplica o algoritmo leidenalg e salva as comunidades em uma propridade do vértice
chamada "community"
"""
def identify_communities_leidenalg(net):
giant = get_largest_component(net)
comms = leidenalg.find_partition(giant, leidenalg.ModularityVertexPartition)
comm_list = comms.subgraphs() # communities in current level
print("Number of communities identified:", len(comm_list))
net_copy = net.copy()
net_copy.vs["community"] = "-1"
for idx, comm in enumerate(comm_list):
for v1 in comm.vs:
v2 = net_copy.vs.find(name=v1["name"])
v2["community"] = str(idx + 1)
return net_copy
files = glob.glob(path + "networks/politica/covote_deps_*filtered.xnet")
print(files)
for file in files:
net = xnet.xnet2igraph(file)
net = identify_communities_leidenalg(net)
xnet.igraph2xnet(net, file.replace("filtered", "filtered_comm"))
# ## **Redes por ano**
# Para visualizar os dados vamos utilizar o visualizador [Helios-web](https://filipinascimento.github.io/helios-web/).
# ## **Análises globais**
# ### Diversidade
# \begin{equation}
# D = exp(- \sum^R_{i=1}p_i~ln(p_i))
# \end{equation}
# A medida de diversidade reflete a quantidade efetiva de classes que existe em um conjunto. Por exemplo, no caso dos deputados distribuídos entre os diversos partidos. O número efetivo de classes reflete o quanto bem os elementos do grupo estão distribuidos.
# Neste trabalho, utilizamos a diversidade para comparar o número de partidos que existem com o número efetivo de partidos.
# implementação direta da equação definida acima
def div_by_param(nets, param):
divs = []
totals = []
for net in nets:
pps = net.vs[param]
unique, count = np.unique(pps, return_counts=True)
total = sum(count)
probs = count / total
entropy = -sum(np.log(probs) * probs)
div = np.exp(entropy)
divs.append(div)
totals.append(len(unique))
return divs, totals
files = glob.glob(path + "networks/politica/covote_deps_*filtered_comm.xnet")
print(files)
nets = [xnet.xnet2igraph(file) for file in files]
divs, totals = div_by_param(nets, "pp")
print(divs)
print(totals)
# ### **Diversidade, número de comunidades e número de partidos**
# ### Similaridade para distância
# \begin{equation}
# \Delta(w_{ij}) = [2 \cdot (1 - w_{ij})]^{(1/2)}
# \end{equation}
import math
# calcula a distância e salva como um atributo das arestas
def calculate_dist(filenames):
for filename in filenames:
# print(filename)
net = xnet.xnet2igraph(filename)
weights = net.es["weight"]
weights = [math.sqrt(2 * (1 - w)) for w in weights]
if len(weights) > 0:
net.es["distance"] = weights
xnet.igraph2xnet(net, filename[:-5] + "_dist.xnet")
else:
print("error", filename)
files = glob.glob(path + "networks/politica/covote_deps_*filtered_comm.xnet")
calculate_dist(files)
# ### **Menores caminhos entre partidos políticos**
import matplotlib.pyplot as plt
def get_freqs(summaries, dates):
ys = defaultdict(lambda: defaultdict(lambda: []))
freq_dict = defaultdict(lambda: [])
for d in dates:
year_summary = summaries[d]
for pp1, summary_pp1 in year_summary.items():
if summary_pp1:
for pp2, (mean, std, f) in summary_pp1.items():
ys[pp1][pp2].append((d, mean, std, f))
freq_dict[pp2].append(f)
freq = [(np.nanmean(fs), pp) for pp, fs in freq_dict.items()]
freq = sorted(freq, reverse=True)
i = 0
f_max = freq[i][0]
while np.isnan(freq[i][0]):
i += 1
f_max = freq[i][0]
return ys, freq, f_max
# calcula os menores caminhos entre os partidos e salva no dicionario summary
def calculate_shortest_paths(net, pps):
summary = defaultdict(lambda: defaultdict(lambda: 0))
all_paths = []
for pp1 in pps:
sources = net.vs.select(pp_eq=pp1)
for pp2 in pps:
targets = net.vs.select(pp_eq=pp2)
targets = [v.index for v in targets]
paths = []
for s in sources:
path_lens = net.get_shortest_paths(
s, to=targets, weights="distance", output="epath"
)
for p in path_lens:
x = sum(net.es[idx]["distance"] for idx in p)
if x > 0:
paths.append(x)
all_paths.append(x)
if len(paths) == 0:
summary[pp1][pp2] = (np.nan, np.nan, np.nan)
summary[pp2][pp1] = (np.nan, np.nan, np.nan)
else:
mean = np.mean(paths)
std_dev = np.std(paths)
summary[pp1][pp2] = (mean, std_dev, len(targets))
summary[pp2][pp1] = (mean, std_dev, len(sources))
if pp1 == pp2:
break
all_paths_mean = np.mean(
all_paths
) # a média geral de todos os deputados por todos os deputados
all_paths_std = np.std(all_paths)
return summary, (all_paths_mean, all_paths_std)
# prepara os dados para o formato que a função de plot recebe
def shortest_path_by_pp(freq, pp2_means, f_max):
to_plot = dict()
for f, pp2 in freq:
means_std = pp2_means[pp2]
means_std = np.asarray(means_std)
means = means_std[:, 1]
std = means_std[:, 2]
xs = means_std[:, 0]
fraq = f / f_max
if not np.isnan(means).all():
to_plot[pp2] = (means, std, fraq, xs)
return to_plot
# recebe os dados processados para gerar o plot
def plot_metric(pp, to_plot, color_map):
plt.figure(figsize=(8, 3))
xs2 = []
for pp1, (means, total_std, fraq, xs) in to_plot.items():
if len(xs) > len(xs2):
xs2 = xs
fraq = max(
fraq, 0.45
) # a espessura da linha muda conforme o tamanho do partido,
# se for muito pequeno, então definimos um valor mínimo para ser possivel visualizar
plt.errorbar(
xs,
means,
total_std,
linestyle="-",
label=pp1.upper(),
fmt="o",
elinewidth=1.5 * fraq,
linewidth=2 * fraq,
markersize=2 * fraq,
alpha=max(0.6, fraq),
color=color_map[pp1],
)
plt.legend(loc="upper right", bbox_to_anchor=(1.05, 1.0))
plt.title(pp)
plt.xlabel("year")
plt.ylabel("average shortest path")
plt.show()
def plot_shortest_paths(dates, nets, valid_pps, color_map):
summaries = dict()
all_paths_summary = []
for date, net in zip(dates, nets):
summaries[date], all_paths = calculate_shortest_paths(net, valid_pps)
all_paths_summary.append(all_paths)
all_paths_summary = np.asarray(all_paths_summary)
ys, _, _ = get_freqs(summaries, dates)
# para cada partido que está sendo analisado, é gerado um plot
for pp1, pp2_means in ys.items():
if not pp1 in valid_pps:
continue
freq = []
for pp2, means_std in pp2_means.items():
means_std = np.array(means_std)
freq.append((np.nanmean(means_std[:, 3]), pp2))
freq = sorted(freq, reverse=True)
f_max = freq[0][0]
to_plot = shortest_path_by_pp(freq, pp2_means, f_max)
to_plot["all"] = (all_paths_summary[:, 0], all_paths_summary[:, 1], 0.3, dates)
plot_metric(pp1, to_plot, color_map)
def plot_shortest_paths_all_years(dates, nets, valid_pps, color_map):
plot_shortest_paths(dates, nets, valid_pps, color_map)
files = glob.glob(path + "networks/politica/covote_deps_*filtered_comm_dist.xnet")
nets = [xnet.xnet2igraph(file) for file in files]
dates = [2016, 2017, 2018, 2019]
valid_pps = ["PSDB", "PP", "PMDB", "PT", "DEM"]
color_map = {
"PSDB": "blue",
"PP": "purple",
"PMDB": "green",
"PT": "red",
"DEM": "orange",
"all": "gray",
}
plot_shortest_paths_all_years(dates, nets, valid_pps, color_map)
|
import os
import glob
from collections import namedtuple
import functools
import csv
CandidateInfoTuple = namedtuple(
"CandidateInfoTuple",
"isNodule_bool, diameter_mm, series_uid, center_xyz",
)
# cache the results of func call in the memory
@functools.lru_cache()
def getCandidateInfoList(requireOnDisk_bool=True):
mhd_list = glob.glob("data-unversioned/part2/luna/subset*/*.mhd")
presentOnDisk_set = {os.path.split(p)[-1][:-4] for p in mhd_list}
diameter_dict = {}
with open("data/part2/luna/annotations.csv", "r") as f:
for row in list(csv.reader(f))[1:]:
series_uid = row[0]
annotationCenter_xyz = tuple([float(x) for x in row[1:4]])
annotationDiameter_mm = float(row[4])
diameter_dict.setdefault(series_uid, []).append(
(annotationCenter_xyz, annotationDiameter_mm)
)
candidatInfo_list = []
with open("data/part2/luna/candidates.csv", "r") as f:
for row in list(csv.reader(f))[1:]:
series_uid = row[0]
# if series_uid isn't present, it's in a subset we don't have on a disk, skip it
if series_uid not in presentOnDisk_set and requireOnDisk_bool:
continue
isNodule_bool = bool(int(row[4]))
candidateCenter_xyz = tuple([float(x) for x in row[1:4]])
candidateDiameter_mm = 0.0
for annotation_tup in diameter_dict.get(series_uid, []):
annotationCenter_xyz, annotationDiameter_mm = annotation_tup
for i in range(3):
delta_mm = abs(candidateCenter_xyz[i] - annotationCenter_xyz[i])
# to require that two nodule center ponts not be too far apart relative to the size of the nodule
if delta_mm > annotationDiameter_mm / 4:
break
else:
candidateDiameter_mm = annotationDiameter_mm
break
candidatInfo_list.append(
CandidateInfoTuple(
isNodule_bool, candidateDiameter_mm, series_uid, candidateCenter_xyz
)
)
candidatInfo_list.sort(reverse=True)
return candidatInfo_list
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
# helpful character encoding module
import chardet
# set seed for reproducibility
np.random.seed(0)
with open("../input/budgetdataset1/FlatTable_1606228231675.csv", "rb") as rawdata:
result = chardet.detect(rawdata.read(20000))
# check what the character encoding might be
print(result)
cols = [3, 4, 5]
Spending = pd.read_csv(
"../input/budgetdataset1/FlatTable_1606228231675.csv",
sep=";",
encoding="ISO-8859-1",
usecols=cols,
)
Spending.head()
# save the file
Spending.to_csv("FlatTable_1606228231675-utf8.csv")
missing_values_count = Spending.isnull().sum()
missing_values_count[0:10]
# fill with zeros the NA
Spending.fillna(0)
# I found out that I could just drop the columns and rows with the 0 as they are not relevant
Spending.shape
df = pd.DataFrame(Spending)
print(df)
df.drop([430, 432], axis=0, inplace=True)
df.drop([433, 434], axis=0, inplace=True)
df.drop([426, 427, 428, 429, 431], axis=0, inplace=True)
df.drop([424, 425], axis=0, inplace=True)
# df.drop(['notas','fuente'], axis=1, inplace=True)
df.head()
# df.columns.str.match("Unnamed")
# df.loc[:,~df.columns.str.match("Unnamed")]
# save the file
df.to_csv("FlatTable_1606228231675-utf8_2.csv")
df.info()
df.valor.describe()
df.valor.sum()
df["year"] = df["Años"].astype(str).str.extract("(\d{4})").astype(int)
print(df)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import holidays
from datetime import datetime
from sklearn.feature_selection import f_regression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold, TimeSeriesSplit
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from colorama import Style, Fore
red = Style.BRIGHT + Fore.RED
blu = Style.BRIGHT + Fore.BLUE
mgt = Style.BRIGHT + Fore.MAGENTA
grn = Style.BRIGHT + Fore.GREEN
gld = Style.BRIGHT + Fore.YELLOW
res = Style.RESET_ALL
import os
import re
import sys
from xml.etree import ElementTree
from collections import Counter, OrderedDict
from tqdm import tqdm
import optuna
import plotly.express as px
train = pd.read_csv("/kaggle/input/kaggle-pog-series-s01e04/train.csv")
test = pd.read_csv("/kaggle/input/kaggle-pog-series-s01e04/test.csv")
train_detailed = pd.read_csv(
"/kaggle/input/kaggle-pog-series-s01e04/train_detailed.csv"
)
submission = pd.read_csv("/kaggle/input/kaggle-pog-series-s01e04/sample_submission.csv")
__version__ = "1.3"
RECORD_FIELDS = OrderedDict(
(
("sourceName", "s"),
("sourceVersion", "s"),
("device", "s"),
("type", "s"),
("unit", "s"),
("creationDate", "d"),
("startDate", "d"),
("endDate", "d"),
("value", "n"),
)
)
ACTIVITY_SUMMARY_FIELDS = OrderedDict(
(
("dateComponents", "d"),
("activeEnergyBurned", "n"),
("activeEnergyBurnedGoal", "n"),
("activeEnergyBurnedUnit", "s"),
("appleExerciseTime", "s"),
("appleExerciseTimeGoal", "s"),
("appleStandHours", "n"),
("appleStandHoursGoal", "n"),
)
)
WORKOUT_FIELDS = OrderedDict(
(
("sourceName", "s"),
("sourceVersion", "s"),
("device", "s"),
("creationDate", "d"),
("startDate", "d"),
("endDate", "d"),
("workoutActivityType", "s"),
("duration", "n"),
("durationUnit", "s"),
("totalDistance", "n"),
("totalDistanceUnit", "s"),
("totalEnergyBurned", "n"),
("totalEnergyBurnedUnit", "s"),
)
)
FIELDS = {
"Record": RECORD_FIELDS,
"ActivitySummary": ACTIVITY_SUMMARY_FIELDS,
"Workout": WORKOUT_FIELDS,
}
PREFIX_RE = re.compile("^HK.*TypeIdentifier(.+)$")
ABBREVIATE = True
VERBOSE = True
def format_freqs(counter):
"""
Format a counter object for display.
"""
return "\n".join("%s: %d" % (tag, counter[tag]) for tag in sorted(counter.keys()))
def format_value(value, datatype):
"""
Format a value for a CSV file, escaping double quotes and backslashes.
None maps to empty.
datatype should be
's' for string (escaped)
'n' for number
'd' for datetime
"""
if value is None:
return ""
elif datatype == "s": # string
return '"%s"' % value.replace("\\", "\\\\").replace('"', '\\"')
elif datatype in ("n", "d"): # number or date
return value
else:
raise KeyError("Unexpected format value: %s" % datatype)
def abbreviate(s, enabled=ABBREVIATE):
"""
Abbreviate particularly verbose strings based on a regular expression
"""
m = re.match(PREFIX_RE, s)
return m.group(1) if enabled and m else s
class HealthDataExtractor(object):
"""
Extract health data from Apple Health App's XML export, export.xml.
Inputs:
path: Relative or absolute path to export.xml
verbose: Set to False for less verbose output
Outputs:
Writes a CSV file for each record type found, in the same
directory as the input export.xml. Reports each file written
unless verbose has been set to False.
"""
def __init__(self, path, dest_path, verbose=VERBOSE):
self.in_path = path
self.verbose = verbose
self.directory = os.path.abspath(os.path.split(path)[0])
self.dest = dest_path
with open(path) as f:
self.report("Reading data from %s . . . " % path, end="")
self.data = ElementTree.parse(f)
self.report("done")
self.root = self.data._root
self.nodes = list(self.root)
self.n_nodes = len(self.nodes)
self.abbreviate_types()
self.collect_stats()
def report(self, msg, end="\n"):
if self.verbose:
print(msg, end=end)
sys.stdout.flush()
def count_tags_and_fields(self):
self.tags = Counter()
self.fields = Counter()
for record in self.nodes:
self.tags[record.tag] += 1
for k in record.keys():
self.fields[k] += 1
def count_record_types(self):
"""
Counts occurrences of each type of (conceptual) "record" in the data.
In the case of nodes of type 'Record', this counts the number of
occurrences of each 'type' or record in self.record_types.
In the case of nodes of type 'ActivitySummary' and 'Workout',
it just counts those in self.other_types.
The slightly different handling reflects the fact that 'Record'
nodes come in a variety of different subtypes that we want to write
to different data files, whereas (for now) we are going to write
all Workout entries to a single file, and all ActivitySummary
entries to another single file.
"""
self.record_types = Counter()
self.other_types = Counter()
for record in self.nodes:
if record.tag == "Record":
self.record_types[record.attrib["type"]] += 1
elif record.tag in ("ActivitySummary", "Workout"):
self.other_types[record.tag] += 1
elif record.tag in ("Export", "Me"):
pass
else:
self.report("Unexpected node of type %s." % record.tag)
def collect_stats(self):
self.count_record_types()
self.count_tags_and_fields()
def open_for_writing(self):
self.handles = {}
self.paths = []
for kind in list(self.record_types) + list(self.other_types):
path = os.path.join(self.dest, "%s.csv" % abbreviate(kind))
f = open(path, "w")
headerType = kind if kind in ("Workout", "ActivitySummary") else "Record"
f.write(",".join(FIELDS[headerType].keys()) + "\n")
self.handles[kind] = f
self.report("Opening %s for writing" % path)
def abbreviate_types(self):
"""
Shorten types by removing common boilerplate text.
"""
for node in self.nodes:
if node.tag == "Record":
if "type" in node.attrib:
node.attrib["type"] = abbreviate(node.attrib["type"])
def write_records(self):
kinds = FIELDS.keys()
for node in self.nodes:
if node.tag in kinds:
attributes = node.attrib
kind = attributes["type"] if node.tag == "Record" else node.tag
values = [
format_value(attributes.get(field), datatype)
for (field, datatype) in FIELDS[node.tag].items()
]
line = ",".join(values) + "\n"
self.handles[kind].write(line)
def close_files(self):
for kind, f in self.handles.items():
f.close()
self.report("Written %s data." % abbreviate(kind))
def extract(self):
self.open_for_writing()
self.write_records()
self.close_files()
def report_stats(self):
print("\nTags:\n%s\n" % format_freqs(self.tags))
print("Fields:\n%s\n" % format_freqs(self.fields))
print("Record types:\n%s\n" % format_freqs(self.record_types))
data = HealthDataExtractor(
path="/kaggle/input/kaggle-pog-series-s01e04/raw_health_export.xml",
dest_path="/kaggle/working/",
verbose=False,
)
data.report_stats()
data.extract()
# files = os.listdir()
# files_to_use = list()
# for file in files:
# if(file.split('.')[1] == 'csv'):
# data = pd.read_csv('/kaggle/working/'+file)
# if(data.shape[0]>1000):
# files_to_use.append(file)
# print(files_to_use)
# for file in files_to_use:
# # data = pd.read_csv('/kaggle/working/'+file,usecols = ['creationDate','startDate','endDate','value'],, )
# date_columns = ['creationDate','startDate','endDate']
# data = pd.read_csv('/kaggle/working/'+file,parse_dates=date_columns)
# data.drop(['sourceName','sourceVersion','device'],axis=1,inplace=True)
# for col in date_columns:
# data[col] = data[col].dt.tz_convert(None)
# data[col] = data[col].apply(lambda x:x.strftime('%Y-%m-%d'))
# print(f'File Name: {file} \nShape : {data.shape} \nNull Values :\n ')
# print('Min Creation Date:',data['creationDate'].min())
# print('Max Creation Date:',data['creationDate'].max())
# print('Min start Date:',data['startDate'].min())
# print('Max start Date:',data['startDate'].max())
# print('Min end Date:',data['endDate'].min())
# print('Max end Date:',data['endDate'].max())
# useful_files = ['StepCount.csv','HeartRate.csv','DistanceWalkingRunning.csv','BasalEnergyBurned.csv','FlightsClimbed.csv','ActiveEnergyBurned.csv','BodyMassIndex.csv','Workout.csv','BodyMass.csv']
useful_files = [
"FlightsClimbed.csv",
"Workout.csv",
"DistanceWalkingRunning.csv",
"StepCount.csv",
"BodyMassIndex.csv",
]
df = train.copy()
test_data = test.copy()
for file in useful_files:
# file = pd.read_csv('/kaggle/working/'+file)
# date_columns = ['creationDate','startDate','endDate']
date_columns = ["startDate"]
if file == "Workout.csv":
data = pd.read_csv("/kaggle/working/" + file, usecols=["startDate", "duration"])
else:
data = pd.read_csv("/kaggle/working/" + file, usecols=["startDate", "value"])
data = data.sort_values("startDate").reset_index(drop=True)
for col in date_columns:
# data[col] = pd.to_datetime(data[col])
data[col] = pd.to_datetime(data[col]).dt.tz_convert(None)
data[col] = data[col].apply(lambda x: x.strftime("%Y-%m-%d"))
trail = pd.merge(train, data, how="inner", left_on="date", right_on="startDate")
if file == "Workout.csv":
df[file.split(".")[0]] = trail["duration"]
test_data[file.split(".")[0]] = trail["duration"]
else:
df[file.split(".")[0]] = trail["value"]
test_data[file.split(".")[0]] = trail["value"]
print(f"{gld} {file}")
display(data.head(3))
df.head()
to_train = df.copy()
to_test = test_data.copy()
fig = px.line(x=df.date, y=df.sleep_hours)
fig.show()
df.loc[((df["date"] >= "2017-09-27") & (df["date"] <= "2018-06-12")), "sleep_hours"] = (
df.loc[((df["date"] >= "2017-09-27") & (df["date"] <= "2018-06-12")), "sleep_hours"]
/ 2
)
# sns.pairplot(df)
# df.hist(bins = 50,figsize = (15,15))
# https://www.kaggle.com/code/rolanderdei/predict-my-sleep-patterns-with-simple-ensembling/notebook
q1 = df["sleep_hours"].quantile(0.25)
q3 = df["sleep_hours"].quantile(0.75)
IQR = q3 - q1
lb = q1 - IQR * 1.5
ub = q3 + IQR * 1.5
df = df[(df["sleep_hours"] >= lb) & (df["sleep_hours"] <= ub)]
df = df[df["date"] >= "2015-07-20"]
fig = px.line(x=df.date, y=df.sleep_hours)
fig.show()
# #### Note the max value of sleep_hours
# # Credits -
# #### As noted in this notebook, data at the start is noisy we better remove it
# https://www.kaggle.com/code/airqualityanthony/pogchamps4-prophet-arima-xgboost-ensemble
# This is my first time using Facebook Prophet
# plt.figure(figsize = (10,5))
# sns.lineplot(data = df,x = 'date',y = 'sleep_hours')
# plt.axvline('2015-07-20',c='red')
# df = df[df['date']>='2015-07-20']
# sns.lineplot(data = df,x = 'date',y = 'sleep_hours')
# ## Facebook Prophet
years = [2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023]
us_holidays = holidays.CountryHoliday(country="US", years=years)
df.info()
def add_holidays(df):
df["holiday"] = df["date"].map(us_holidays)
df["is_holiday"] = np.where(df["holiday"].notnull(), 1, 0)
df["holiday"] = df["holiday"].fillna("Not a holiday")
return df.drop("holiday", axis=1)
def fe(df):
df["date"] = pd.to_datetime(df["date"])
df["dayofweek"] = df["date"].dt.dayofweek
df["quarter"] = df["date"].dt.quarter
df["day"] = df["date"].dt.day
df["month"] = df["date"].dt.month
df["year"] = df["date"].dt.year
df["dayofyear"] = df["date"].dt.dayofyear
df["weekofyear"] = df["date"].dt.weekofyear
df["weekday"] = df["date"].dt.weekday
# df['weekday'] = df['date'].apply(lambda x: x.weekday())
# df['day'] = df['date'].apply(lambda x: x.day)
# df['month'] = df['date'].apply(lambda x: x.month)
# df['year'] = df['date'].apply(lambda x: x.year)
return df
df = add_holidays(df)
test_data = add_holidays(test_data)
df = fe(df)
test_data = fe(test_data)
df.head()
# prophet_predictions = predicted[(predicted['ds']>=pd.to_datetime('2022-01-01')) & (predicted['ds']<=pd.to_datetime('2023-03-16'))]['yhat'].values
# prophet_predictions = predicted[(predicted['ds']>=pd.to_datetime('2022-01-01')) & (predicted['ds']<=pd.to_datetime('2023-03-16'))]['yhat'].values
# prophet_predictions = predicted[(predicted['ds']>=pd.to_datetime('2022-01-01')) & (predicted['ds']<=pd.to_datetime('2023-03-16'))]['yhat'].values
# train = train[~((train['date'] >= '2017-09-27')&(train['date']<='2018-06-12'))]
to_be_dropped = ["day", "date", "sleep_hours"]
x = df.drop(to_be_dropped, axis=1)
y = df.sleep_hours
features = x.columns
x.fillna(method="ffill", inplace=True)
result = f_regression(x, y)
x.columns
p_values = result[1].round(3)
print(p_values)
# x.drop(,axis = 1,inplace=True)
# x.drop(['is_holiday','day'],axis = 1,inplace=True)
test_data = test_data.drop(to_be_dropped, axis=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x = pd.DataFrame(scaler.fit_transform(x), columns=features)
test_data = pd.DataFrame(scaler.transform(test_data), columns=features)
test_data.shape
# len(prophet_predictions)
print(x.columns, test_data.columns)
# def objective(trial):
# # Define search space for hyperparameters
# param = {
# 'objective': 'reg:squarederror',
# 'colsample_bytree': trial.suggest_uniform('colsample_bytree', 0.3, 1.0),
# 'learning_rate': trial.suggest_loguniform('learning_rate', 0.01, 0.3),
# 'max_depth': trial.suggest_int('max_depth', 3, 10),
# 'alpha': trial.suggest_loguniform('alpha', 1e-5, 10),
# 'n_estimators': trial.suggest_int('n_estimators', 100, 1000, step=100),
# 'subsample': trial.suggest_uniform('subsample', 0.6, 1.0),
# 'min_child_weight': trial.suggest_int('min_child_weight', 1, 10)
# }
# tss = TimeSeriesSplit(n_splits=5,test_size=50,gap=0)
# rmse=[] # list contains rmse for each fold
# n=0
# for trn_idx, test_idx in tss.split(x,y):
# X_tr,X_val=x.iloc[trn_idx],x.iloc[test_idx]
# y_tr,y_val=y.iloc[trn_idx],y.iloc[test_idx]
# model = XGBRegressor(n_estimators = 1000)
# model.fit(X_tr,y_tr,eval_set=[(X_val,y_val)],early_stopping_rounds=100,verbose=False)
# RMSE = mean_squared_error(y_val, model.predict(X_val))
# rmse.append(np.sqrt(mean_squared_error(y_val, model.predict(X_val))))
# print(f"fold: {n+1} ==> rmse: {rmse[n]}")
# n+=1
# study = optuna.create_study(direction = 'minimize')
# study.optimize(objective,n_trials=10)
# print(f'Number of trials finished: {len(study.trials)}')
# print(f'Best Params : {study.best_trial.params}')
# study = optuna.create_study(direction = 'minimize')
# study.optimize(objective,n_trials=10)
# print(f'Number of trials finished: {len(study.trials)}')
# print(f'Best Params : {study.best_trial.params}')
cat_params = {
"subsample": 0.7,
"learning_rate": 0.04774587972153831,
"early_stopping_rounds": 50,
"max_depth": 11,
"n_estimators": 999,
}
xgb_params = {
"colsample_bytree": 0.5544110677821752,
"learning_rate": 0.01849622913339397,
"max_depth": 4,
"alpha": 1.2501659176615678,
"n_estimators": 800,
"subsample": 0.9494317563361678,
"min_child_weight": 5,
}
lgbm_params = {
"subsample": 0.8,
"learning_rate": 0.08559145786845979,
"early_stopping_rounds": 50,
"max_depth": 15,
"n_estimators": 999,
}
preds = list()
kf = KFold(n_splits=10, random_state=48, shuffle=True)
rmse = [] # list contains rmse for each fold
n = 0
for trn_idx, test_idx in kf.split(x, y):
X_tr, X_val = x.iloc[trn_idx], x.iloc[test_idx]
y_tr, y_val = y.iloc[trn_idx], y.iloc[test_idx]
model = XGBRegressor(**xgb_params)
model.fit(
X_tr, y_tr, eval_set=[(X_val, y_val)], early_stopping_rounds=100, verbose=False
)
preds.append(model.predict(test_data))
RMSE = mean_squared_error(y_val, model.predict(X_val))
rmse.append(np.sqrt(mean_squared_error(y_val, model.predict(X_val))))
print(f"fold: {n+1} ==> rmse: {rmse[n]}")
n += 1
xgb_pred = np.mean(preds, axis=0)
preds = list()
kf = KFold(n_splits=10, random_state=48, shuffle=True)
rmse = [] # list contains rmse for each fold
n = 0
for trn_idx, test_idx in kf.split(x, y):
X_tr, X_val = x.iloc[trn_idx], x.iloc[test_idx]
y_tr, y_val = y.iloc[trn_idx], y.iloc[test_idx]
model = LGBMRegressor(**lgbm_params)
model.fit(
X_tr, y_tr, eval_set=[(X_val, y_val)], early_stopping_rounds=100, verbose=False
)
preds.append(model.predict(test_data))
RMSE = mean_squared_error(y_val, model.predict(X_val))
rmse.append(np.sqrt(mean_squared_error(y_val, model.predict(X_val))))
print(f"fold: {n+1} ==> rmse: {rmse[n]}")
n += 1
lgbm_pred = np.mean(preds, axis=0)
preds = list()
kf = KFold(n_splits=10, random_state=48, shuffle=True)
rmse = [] # list contains rmse for each fold
n = 0
for trn_idx, test_idx in kf.split(x, y):
X_tr, X_val = x.iloc[trn_idx], x.iloc[test_idx]
y_tr, y_val = y.iloc[trn_idx], y.iloc[test_idx]
model = CatBoostRegressor(**cat_params)
model.fit(
X_tr, y_tr, eval_set=[(X_val, y_val)], early_stopping_rounds=100, verbose=False
)
preds.append(model.predict(test_data))
RMSE = mean_squared_error(y_val, model.predict(X_val))
rmse.append(np.sqrt(mean_squared_error(y_val, model.predict(X_val))))
print(f"fold: {n+1} ==> rmse: {rmse[n]}")
n += 1
cat_pred = np.mean(preds, axis=0)
out = submission.copy()
final = (xgb_pred + cat_pred + lgbm_pred) / 3
out["sleep_hours"] = final
out.head()
out.to_csv("Output", index=False)
|
# # 1. Introduction
# Name: Tomasz Abels and Jack Chen
# Username: JackChenXJ
# Score:
# Leaderbord rank:
# # 2. Data
# ### 2.1 Dataset
# In this section, we load and explore the dataset.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import gc
import os
print(os.listdir("../input/LANL-Earthquake-Prediction"))
train = pd.read_csv(
"../input/LANL-Earthquake-Prediction/train.csv",
dtype={"acoustic_data": np.int16, "time_to_failure": np.float64},
)
print(train.shape)
acoustic_data = train["acoustic_data"].values[::100]
time_data = train["time_to_failure"].values[::100]
print(acoustic_data.shape)
print(time_data.shape)
fig, ax1 = plt.subplots(figsize=(16, 4))
plt.title("Trends of acoustic_data and time_to_failure. 2% of data (sampled)")
plt.plot(acoustic_data, color="b")
ax1.set_ylabel("acoustic_data", color="b")
plt.legend(["acoustic_data"])
ax2 = ax1.twinx()
plt.plot(time_data, color="g")
ax2.set_ylabel("time_to_failure", color="g")
plt.legend(["time_to_failure"], loc=(0.875, 0.9))
test1 = pd.read_csv(
"/kaggle/input/LANL-Earthquake-Prediction/test/seg_00030f.csv",
dtype={"acoustic_data": np.int16, "time_to_failure": np.float64},
)
test2 = pd.read_csv(
"/kaggle/input/LANL-Earthquake-Prediction/test/seg_0012b5.csv",
dtype={"acoustic_data": np.int16, "time_to_failure": np.float64},
)
test3 = pd.read_csv(
"/kaggle/input/LANL-Earthquake-Prediction/test/seg_00184e.csv",
dtype={"acoustic_data": np.int16, "time_to_failure": np.float64},
)
# Initialize the subplots
fig, ax = plt.subplots(3, 1, figsize=(16, 6))
# Plot the time domain, label the graph and limit the axis
ax[0].plot(test1, color="b")
ax[0].legend(["acoustic_data"])
ax[0].set_ylabel("acoustic_data")
ax[1].plot(test2, color="b")
ax[1].legend(["acoustic_data"])
ax[1].set_ylabel("acoustic_data")
ax[2].plot(test3, color="b")
ax[2].legend(["acoustic_data"])
ax[2].set_ylabel("acoustic_data")
# ### 2.1.1 Train-test split
# In the above below, we split the train data into a test and a train set. Set a value for the `test_size` yourself. Argue why the test value can not be too small or too large. You can also use k-fold cross validation.
# Secondly, we have set the `random_state` to 102. Can you think of a reason why we set a `random_state` at all?
# ### 2.2 Data Exploration
# Explore the features and target variables of the dataset. Think about making some scatter plots, box plots, histograms or printing the data, but feel free to choose any method that suits you.
# What do you think is the right performance
# metric to use for this dataset? Clearly explain which performance metric you
# choose and why.
# Algorithmic bias can be a real problem in Machine Learning. So based on this,
# should we use the Race and the Sex features in our machine learning algorithm? Explain what you believe.
# Code from Basic Feature Benchmark by INVERSION, https://www.kaggle.com/code/inversion/basic-feature-benchmark
# Code from Earthquakes FE. More features and samples by ANDREW LUKYANENKO, https://www.kaggle.com/code/artgor/earthquakes-fe-more-features-and-samples
from tqdm import tqdm
from scipy.signal import hilbert, convolve, hann
from scipy import stats
def calc_change_rate(x):
change = (np.diff(x) / x[:-1]).values
change = change[np.nonzero(change)[0]]
change = change[~np.isnan(change)]
change = change[change != -np.inf]
change = change[change != np.inf]
return np.mean(change)
rows = 150000
segments = int(np.floor(train.shape[0] / rows))
X_train = pd.DataFrame(index=range(segments), dtype=np.float64)
# columns=['ave', 'std', 'max', 'min'])
y_train = pd.DataFrame(
index=range(segments), dtype=np.float64, columns=["time_to_failure"]
)
for segment in tqdm(range(segments)):
seg = train.iloc[segment * rows : segment * rows + rows]
x = seg["acoustic_data"].values
y = seg["time_to_failure"].values[-1]
y_train.loc[segment, "time_to_failure"] = y
X_train.loc[segment, "ave"] = x.mean()
X_train.loc[segment, "std"] = x.std()
X_train.loc[segment, "max"] = x.max()
X_train.loc[segment, "min"] = x.min()
X_train.loc[segment, "mean_change_abs"] = np.mean(np.diff(x))
# X_train.loc[segment, 'mean_change_rate'] = calc_change_rate(x)
X_train.loc[segment, "abs_max"] = np.abs(x).max()
X_train.loc[segment, "abs_min"] = np.abs(x).min()
X_train.loc[segment, "std_first_50000"] = x[:50000].std()
X_train.loc[segment, "std_last_50000"] = x[-50000:].std()
X_train.loc[segment, "std_first_10000"] = x[:10000].std()
X_train.loc[segment, "std_last_10000"] = x[-10000:].std()
X_train.loc[segment, "avg_first_50000"] = x[:50000].mean()
X_train.loc[segment, "avg_last_50000"] = x[-50000:].mean()
X_train.loc[segment, "avg_first_10000"] = x[:10000].mean()
X_train.loc[segment, "avg_last_10000"] = x[-10000:].mean()
X_train.loc[segment, "min_first_50000"] = x[:50000].min()
X_train.loc[segment, "min_last_50000"] = x[-50000:].min()
X_train.loc[segment, "min_first_10000"] = x[:10000].min()
X_train.loc[segment, "min_last_10000"] = x[-10000:].min()
X_train.loc[segment, "max_first_50000"] = x[:50000].max()
X_train.loc[segment, "max_last_50000"] = x[-50000:].max()
X_train.loc[segment, "max_first_10000"] = x[:10000].max()
X_train.loc[segment, "max_last_10000"] = x[-10000:].max()
X_train.loc[segment, "max_to_min"] = x.max() / np.abs(x.min())
X_train.loc[segment, "max_to_min_diff"] = x.max() - np.abs(x.min())
X_train.loc[segment, "count_big"] = len(x[np.abs(x) > 500])
X_train.loc[segment, "sum"] = x.sum()
# X_train.loc[segment, 'mean_change_rate_first_50000'] = calc_change_rate(x[:50000])
# X_train.loc[segment, 'mean_change_rate_last_50000'] = calc_change_rate(x[-50000:])
# X_train.loc[segment, 'mean_change_rate_first_10000'] = calc_change_rate(x[:10000])
# X_train.loc[segment, 'mean_change_rate_last_10000'] = calc_change_rate(x[-10000:])
X_train.loc[segment, "q95"] = np.quantile(x, 0.95)
X_train.loc[segment, "q99"] = np.quantile(x, 0.99)
X_train.loc[segment, "q05"] = np.quantile(x, 0.05)
X_train.loc[segment, "q01"] = np.quantile(x, 0.01)
X_train.loc[segment, "abs_q95"] = np.quantile(np.abs(x), 0.95)
X_train.loc[segment, "abs_q99"] = np.quantile(np.abs(x), 0.99)
X_train.loc[segment, "abs_q05"] = np.quantile(np.abs(x), 0.05)
X_train.loc[segment, "abs_q01"] = np.quantile(np.abs(x), 0.01)
print(X_train.shape)
print(y_train.shape)
print(X_train)
# standard normalize the data
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
# train pca
from sklearn.decomposition import PCA
pca = PCA(
n_components=20
) # doing pca and keeping only n_components, shows the first 20 components
pca = pca.fit(
X_train
) # the correct dimension of X for sklearn is P*N (samples*features)
X_pca_skl = pca.transform(X_train)
# perform pca on features
plt.bar(range(0, 20), pca.explained_variance_ratio_, label="individual var")
plt.step(
range(0, 20), np.cumsum(pca.explained_variance_ratio_), "r", label="cumulative var"
)
plt.xlabel("Principal component index")
plt.ylabel("explained variance ratio %")
plt.legend()
# fig, ax1 = plt.subplots(figsize=(16, 8))
plt.title("Trends of acoustic_data and time_to_failure. 2% of data (sampled)")
plt.plot(X_pca_skl[:, 0], color="b")
ax1.set_ylabel("acoustic_data", color="b")
plt.legend(["acoustic_data"])
fig, ax1 = plt.subplots(figsize=(16, 8))
plt.title("Trends of acoustic_data and time_to_failure. 2% of data (sampled)")
plt.plot(X_pca_skl[:, 0], color="b")
ax1.set_ylabel("acoustic_data", color="b")
plt.legend(["acoustic_data"])
ax2 = ax1.twinx()
plt.plot(y_train, color="g")
ax2.set_ylabel("time_to_failure", color="g")
plt.legend(["time_to_failure"], loc=(0.875, 0.9))
# ### 2.3 Data Preparation
# This dataset hasn’t been cleaned yet. Meaning that some attributes (features) are in numerical format and some are in categorial format. Moreover, there are missing values as well. However, all Scikit-learn’s implementations of these algorithms expect numerical features. Check for all features if they are in categorial and use a method to transform them to numerical values. For the numerical data, handle the missing data and normalize the data.
# Note that you are only allowed to use training data for preprocessing but you then need to perform similar changes on test data too.
# You can use [pipelining](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html) to help with the preprocessing.
from sklearn.model_selection import train_test_split
X_train = X_pca_skl[:, 0:2]
# Randomize the set and split it into a training and test set
x_train, x_test, Y_train, Y_test = train_test_split(
X_train, y_train, test_size=0.33, shuffle=False, random_state=102
)
# standard normalize the data
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(x_train)
x_train_norm = scaler.transform(x_train)
x_test_norm = scaler.transform(x_test)
# #convert y values to categorical values
# Y_train = np.ravel(Y_train)
# Y_test = np.ravel(Y_test)
# lab = preprocessing.LabelEncoder()
# Y_train_t = lab.fit_transform(Y_train)
# Y_test_t = lab.fit_transform(Y_test)
# ## 3. Training and Results
# Briefly introduce the classification algorithms you choose.
# Present your final confusion matrices (2 by 2) and balanced accuracies for both test and training data for all classifiers. Analyse the performance on test and training in terms of bias and variance. Give one advantage and one drawback of the method you use.
print(x_train_norm.shape)
print(x_test_norm.shape)
print(Y_train.shape)
print(Y_test.shape)
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import NuSVR
model = NuSVR()
model.fit(x_train_norm, Y_train.values.flatten())
model.fit(x_test_norm, Y_test.values.flatten())
# print(model.score(x_train_norm, Y_train_t))
# Predict
Y_test_pred = model.predict(x_test_norm)
Y_train_pred = model.predict(x_train_norm)
fig, ax1 = plt.subplots(figsize=(20, 4))
plt.title("Trends of acoustic_data and time_to_failure. 2% of data (sampled)")
plt.plot(Y_train_pred, color="b")
ax1.set_ylabel("time_to_failure", color="g")
plt.legend(
["time_to_failure predicted"],
)
ax2 = ax1.twinx()
plt.plot(Y_train, color="g")
ax2.set_ylabel("time_to_failure", color="g")
plt.legend(["time_to_failure"], loc=(0.875, 0.9))
fig, ax1 = plt.subplots(figsize=(20, 4))
x = np.linspace(0, 1385, 1385)
plt.title("Trends of acoustic_data and time_to_failure. 2% of data (sampled)")
plt.plot(Y_test_pred, color="b")
ax1.set_ylabel("time_to_failure", color="g")
plt.legend(
["time_to_failure predicted"],
)
ax2 = ax1.twinx()
plt.plot(x, Y_test, color="g")
ax2.set_ylabel("time_to_failure", color="g")
plt.legend(["time_to_failure"], loc=(0.875, 0.8))
scaler = preprocessing.StandardScaler().fit(X_train)
X_train_norm = scaler.transform(X_train)
model.fit(X_train_norm, y_train.values.flatten())
y_train_pred = model.predict(X_train_norm)
fig, ax1 = plt.subplots(figsize=(20, 4))
plt.title("Trends of acoustic_data and time_to_failure. 2% of data (sampled)")
plt.plot(y_train_pred, color="b")
ax1.set_ylabel("time_to_failure", color="g")
plt.legend(
["time_to_failure predicted"],
)
ax2 = ax1.twinx()
plt.plot(y_train, color="g")
ax2.set_ylabel("time_to_failure", color="g")
plt.legend(["time_to_failure"], loc=(0.875, 0.9))
# accuracy
score1 = model.score(x_train_norm, Y_train)
score2 = model.score(x_test_norm, Y_test)
print(score1)
print(score2)
from sklearn.metrics import mean_squared_error
print(mean_squared_error(y_train, y_train_pred))
submission = pd.read_csv(
"../input/LANL-Earthquake-Prediction/sample_submission.csv", index_col="seg_id"
)
X_test = pd.DataFrame(dtype=np.float64, index=submission.index)
for seg_id in X_test.index:
seg = pd.read_csv("../input/LANL-Earthquake-Prediction/test/" + seg_id + ".csv")
x = seg["acoustic_data"].values
X_test.loc[seg_id, "ave"] = x.mean()
X_test.loc[seg_id, "std"] = x.std()
X_test.loc[seg_id, "max"] = x.max()
X_test.loc[seg_id, "min"] = x.min()
X_test.loc[seg_id, "mean_change_abs"] = np.mean(np.diff(x))
# X_train.loc[segment, 'mean_change_rate'] = calc_change_rate(x)
X_test.loc[seg_id, "abs_max"] = np.abs(x).max()
X_test.loc[seg_id, "abs_min"] = np.abs(x).min()
X_test.loc[seg_id, "std_first_50000"] = x[:50000].std()
X_test.loc[seg_id, "std_last_50000"] = x[-50000:].std()
X_test.loc[seg_id, "std_first_10000"] = x[:10000].std()
X_test.loc[seg_id, "std_last_10000"] = x[-10000:].std()
X_test.loc[seg_id, "avg_first_50000"] = x[:50000].mean()
X_test.loc[seg_id, "avg_last_50000"] = x[-50000:].mean()
X_test.loc[seg_id, "avg_first_10000"] = x[:10000].mean()
X_test.loc[seg_id, "avg_last_10000"] = x[-10000:].mean()
X_test.loc[seg_id, "min_first_50000"] = x[:50000].min()
X_test.loc[seg_id, "min_last_50000"] = x[-50000:].min()
X_test.loc[seg_id, "min_first_10000"] = x[:10000].min()
X_test.loc[seg_id, "min_last_10000"] = x[-10000:].min()
X_test.loc[seg_id, "max_first_50000"] = x[:50000].max()
X_test.loc[seg_id, "max_last_50000"] = x[-50000:].max()
X_test.loc[seg_id, "max_first_10000"] = x[:10000].max()
X_test.loc[seg_id, "max_last_10000"] = x[-10000:].max()
X_test.loc[seg_id, "max_to_min"] = x.max() / np.abs(x.min())
X_test.loc[seg_id, "max_to_min_diff"] = x.max() - np.abs(x.min())
X_test.loc[seg_id, "count_big"] = len(x[np.abs(x) > 500])
X_test.loc[seg_id, "sum"] = x.sum()
# X_train.loc[segment, 'mean_change_rate_first_50000'] = calc_change_rate(x[:50000])
# X_train.loc[segment, 'mean_change_rate_last_50000'] = calc_change_rate(x[-50000:])
# X_train.loc[segment, 'mean_change_rate_first_10000'] = calc_change_rate(x[:10000])
# X_train.loc[segment, 'mean_change_rate_last_10000'] = calc_change_rate(x[-10000:])
X_test.loc[seg_id, "q95"] = np.quantile(x, 0.95)
X_test.loc[seg_id, "q99"] = np.quantile(x, 0.99)
X_test.loc[seg_id, "q05"] = np.quantile(x, 0.05)
X_test.loc[seg_id, "q01"] = np.quantile(x, 0.01)
X_test.loc[seg_id, "abs_q95"] = np.quantile(np.abs(x), 0.95)
X_test.loc[seg_id, "abs_q99"] = np.quantile(np.abs(x), 0.99)
X_test.loc[seg_id, "abs_q05"] = np.quantile(np.abs(x), 0.05)
X_test.loc[seg_id, "abs_q01"] = np.quantile(np.abs(x), 0.01)
# standard normalize the data
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X_test)
X_test_norm = scaler.transform(X_test)
# train pca
from sklearn.decomposition import PCA
pca = PCA(
n_components=20
) # doing pca and keeping only n_components, shows the first 20 components
pca = pca.fit(
X_test_norm
) # the correct dimension of X for sklearn is P*N (samples*features)
X_pca_skl = pca.transform(X_test_norm)
X_test_pca = X_pca_skl[:, 0:2]
scaler = preprocessing.StandardScaler().fit(X_test_pca)
X_test_pca_norm = scaler.transform(X_test_pca)
submission["time_to_failure"] = model.predict(X_test_pca_norm)
submission.to_csv("submission.csv")
gc.collect()
print(submission)
|
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("../input/diabetes-data/pima-indians-diabetes.csv")
df.columns
column_name = [
"Pregnancies",
"Glucose",
"BloodPressure",
"SkinThickness",
"Insulin",
"BMI",
"DiabetesPedigreeFunction",
"Age",
"Class",
]
df = pd.read_csv(
"../input/diabetes-data/pima-indians-diabetes.csv", header=0, names=column_name
)
df.head(5)
column_norm = [
"Pregnancies",
"Glucose",
"BloodPressure",
"SkinThickness",
"Insulin",
"BMI",
"DiabetesPedigreeFunction",
]
df_norm = df[column_norm]
df_norm.head(3)
df1_norm = df[column_norm].apply(lambda x: ((x - x.min()) / (x.max() - x.min())))
df1_norm.head(3)
feat_Pregnancies = tf.feature_column.numeric_column("Pregnancies")
feat_Glucose = tf.feature_column.numeric_column("Glucose")
feat_BloodPressure = tf.feature_column.numeric_column("BloodPressure")
feat_SkinThickness = tf.feature_column.numeric_column("SkinThickness")
feat_Insulin = tf.feature_column.numeric_column("Insulin")
feat_BMI = tf.feature_column.numeric_column("BMI")
feat_DiabetesPedigreeFunction = tf.feature_column.numeric_column(
"DiabetesPedigreeFunction"
)
feature_column = [
feat_Pregnancies,
feat_Glucose,
feat_BloodPressure,
feat_SkinThickness,
feat_Insulin,
feat_BMI,
feat_DiabetesPedigreeFunction,
]
# Train and test split
|
# <div style="color:#485053;
# display:fill;
# border-radius:0px;
# background-color:#86C2DE;
# font-size:200%;
# padding-left:40px;
# font-family:Verdana;
# font-weight:600;
# letter-spacing:0.5px;
# ">
# <p style="padding: 15px;
# color:white;
# text-align: center;">
# DATA WAREHOUSING AND MINING (CSE5021)
# ## Assignment 2: Association Rule Mining
# ## Krupa Gajjar: 20MAI0014
# ## Scope: VIT, Vellore
# ### The Link of dataset used here: https://www.kaggle.com/c/instacart-market-basket-analysis
# ***Introduction to Association rule mining:***
# This notebook represents the working of apriori algorithm. apriori algorithm is used in many applications today for mining frequent itemsets and devising association rules from a transactional database. The parameters “support” and “confidence” are used. Support refers to items' frequency of occurrence; confidence is a conditional probability. Items in a transaction form an item set.
# The main aim is to create association rules to identify the relations between products of dataset. here we show implementation, leveraging the apriori algorithm to generate simple {A} -> {B} association rules.
# 
# ## Association Rule Mining:
# The item sets have been generated using apriori algorithm, we can start mining association rules. Given that we are only looking at item sets of size 2, the association rules we will generate will be of the form {A} -> {B}. One common application of these rules is in the domain of recommender systems like amazon basket, big basket, where customers who purchased item A are recommended item B.
# Here are 3 key metrics to consider when evaluating association rules:
# 
# # Import Library
from IPython.display import display
import pandas as pd
import numpy as np
import sys
from itertools import combinations, groupby
from collections import Counter
def size(obj):
return "{0:.2f} MB".format(sys.getsizeof(obj) / (1000 * 1000))
# # Load Dataset
import pandas as pd
orders = pd.read_csv(
"../input/instacart-market-basket-analysis/order_products__prior.csv"
)
display(orders.head())
# 
# ## Transform into association rule function format
orders = orders.set_index("order_id")["product_id"].rename("item_id")
display(orders.head(10))
type(orders)
# * Returns number of unique orders
# * generator that yields item pairs, one at a time
# * Returns name associated with item
def freq(iterable):
if type(iterable) == pd.core.series.Series:
return iterable.value_counts().rename("freq")
else:
return pd.Series(Counter(iterable)).rename("freq")
def order_count(order_item):
return len(set(order_item.index))
def get_item_pairs(order_item):
order_item = order_item.reset_index().to_numpy()
for order_id, order_object in groupby(order_item, lambda x: x[0]):
item_list = [item[1] for item in order_object]
for item_pair in combinations(item_list, 2):
yield item_pair
def merge_item_stats(item_pairs, item_stats):
return item_pairs.merge(
item_stats.rename(columns={"freq": "freqA", "support": "supportA"}),
left_on="item_A",
right_index=True,
).merge(
item_stats.rename(columns={"freq": "freqB", "support": "supportB"}),
left_on="item_B",
right_index=True,
)
def merge_item_name(rules, item_name):
columns = [
"itemA",
"itemB",
"confidenceBtoA",
"supportAB",
"supportA",
"supportB",
"freqA",
"freqAB",
"freqB",
"confidenceAtoB",
"lift",
]
rules = rules.merge(
item_name.rename(columns={"item_name": "itemA"}),
left_on="item_A",
right_on="item_id",
).merge(
item_name.rename(columns={"item_name": "itemB"}),
left_on="item_B",
right_on="item_id",
)
return rules[columns]
# **This function does the following:**
# * It Calculates the item frequency and support
# * Filter from order_item items below min support
# * Recalculate item frequency and support
# * Get item pairs generator
def association_rules(order_item, min_support):
print("Starting order_item: {:22d}".format(len(order_item)))
item_stats = freq(order_item).to_frame("freq")
item_stats["support"] = item_stats["freq"] / order_count(order_item) * 100
qualifying_items = item_stats[item_stats["support"] >= min_support].index
order_item = order_item[order_item.isin(qualifying_items)]
print("Items with support >= {}: {:15d}".format(min_support, len(qualifying_items)))
print("Remaining order_item: {:21d}".format(len(order_item)))
order_size = freq(order_item.index)
qualifying_orders = order_size[order_size >= 2].index
order_item = order_item[order_item.index.isin(qualifying_orders)]
print("Remaining orders with 2+ items: {:11d}".format(len(qualifying_orders)))
print("Remaining order_item: {:21d}".format(len(order_item)))
item_stats = freq(order_item).to_frame("freq")
item_stats["support"] = item_stats["freq"] / order_count(order_item) * 100
item_pair_gen = get_item_pairs(order_item)
# Calculate item pair frequency and support
item_pairs = freq(item_pair_gen).to_frame("freqAB")
item_pairs["supportAB"] = item_pairs["freqAB"] / len(qualifying_orders) * 100
print("Item pairs: {:31d}".format(len(item_pairs)))
item_pairs = item_pairs[item_pairs["supportAB"] >= min_support]
print(
"Item pairs with support >= {}: {:10d}\n".format(min_support, len(item_pairs))
)
item_pairs = item_pairs.reset_index().rename(
columns={"level_0": "item_A", "level_1": "item_B"}
)
item_pairs = merge_item_stats(item_pairs, item_stats)
item_pairs["confidenceAtoB"] = item_pairs["supportAB"] / item_pairs["supportA"]
item_pairs["confidenceBtoA"] = item_pairs["supportAB"] / item_pairs["supportB"]
item_pairs["lift"] = item_pairs["supportAB"] / (
item_pairs["supportA"] * item_pairs["supportB"]
)
return item_pairs.sort_values("lift", ascending=False)
rules = association_rules(orders, 0.01)
# * Replace item ID with item name and display association rules
item_name = pd.read_csv("../input/instacart-market-basket-analysis/products.csv")
item_name = item_name.rename(
columns={"product_id": "item_id", "product_name": "item_name"}
)
rules_final = merge_item_name(rules, item_name).sort_values("lift", ascending=False)
df1 = rules_final
df1 = df1.drop(columns=["lift", "freqA", "freqB"])
df1.head(11)
|
# # Para Ilustração | Utilizado Originalmente Localmente com Jupyter Notebook
# import shutil
import os
# import glob
# import cv2
# import matplotlib.pyplot as plt
# import matplotlib.image as mpimg
# from PIL import Image
# import mahotas
import numpy as np
# import pandas as pd
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
# Gerando Amostra Mínima
datagen = ImageDataGenerator(
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
brightness_range=[0.1, 0.9],
zoom_range=0.2,
horizontal_flip=True,
fill_mode="constant",
)
def gerar_amostra_minima(dir, min):
diretorios = [
filename
for filename in os.listdir(dir)
if os.path.isdir(os.path.join(dir, filename))
]
for i in range(len(diretorios)):
arquivos = os.listdir(
dir + "\\" + diretorios[i]
) # lista separando apenas os arquivos do caminho.
arquivos = [arq for arq in arquivos if arq.lower().endswith(".jpg")]
qtd = len(arquivos)
qtd_gerar = min - qtd
geradas = 0
if qtd_gerar >= qtd:
qtd_amostra = qtd
if (qtd_gerar / qtd).is_integer():
qtd_gerar_por_amostra = int(qtd_gerar / qtd)
else:
qtd_gerar_por_amostra = int(qtd_gerar / qtd) + 1
else:
qtd_amostra = qtd_gerar
qtd_gerar_por_amostra = 1
if qtd_gerar < 1:
print(
"A classe", diretorios[i], "já possui a quantidade mínima de imagens."
)
continue
else:
for x in range(qtd_amostra):
geradas_nesta_amostra = 0
img_caminho = dir + "\\" + diretorios[i] + "\\" + arquivos[x]
# print("Gerando imagens a partir de", img_caminho)
img = load_img(img_caminho)
ximg = img_to_array(img)
ximg = ximg.reshape((1,) + ximg.shape)
for batch in datagen.flow(
ximg,
batch_size=1,
save_to_dir=(dir + "\\" + diretorios[i]),
save_prefix="DataGen-",
save_format="jpg",
):
geradas_nesta_amostra += 1
geradas += 1
if (
geradas >= qtd_gerar
or geradas_nesta_amostra >= qtd_gerar_por_amostra
or (qtd + geradas) == min
):
# print(geradas_nesta_amostra, 'imagens geradas para esta amostra.')
break # otherwise the generator would loop indefinitely
print(geradas, "imagens geradas para a classe", diretorios[i])
dir = "D:\\DataSets\\Simpsons-Kaggle-jfgm2018\\simpsons_enriquecido_manualmente - meansampling"
# dir = "D:\\DataSets\\Simpsons-Kaggle-jfgm2018\\teste"
gerar_amostra_minima(dir, 522)
import random
def limitar_amostra_prefixo(dir, inicio, max):
diretorios = [
filename
for filename in os.listdir(dir)
if os.path.isdir(os.path.join(dir, filename))
]
for i in range(len(diretorios)):
arquivos = os.listdir(
dir + "\\" + diretorios[i]
) # lista separando apenas os arquivos do caminho.
total = len(arquivos)
arquivos_datagen = [arq for arq in arquivos if arq.lower().startswith(inicio)]
qtd_datagen = len(arquivos_datagen)
removidos = []
while (total - len(removidos) > max) and (len(removidos) < qtd_datagen):
x = random.randint(0, qtd_datagen - 1)
if x in removidos:
continue
else:
removidos.append(x)
path_remove = dir + "\\" + diretorios[i] + "\\" + arquivos_datagen[x]
os.remove(path_remove)
print(
len(removidos),
"arquivos iniciados em '",
inicio,
"' removidos da classe",
diretorios[i],
)
dir = "D:\\DataSets\\Simpsons-Kaggle-jfgm2018\\simpsons_enriquecido_manualmente - oversampling"
limitar_amostra_prefixo(dir, "datagen", 2297)
def limitar_amostra(dir, max):
diretorios = [
filename
for filename in os.listdir(dir)
if os.path.isdir(os.path.join(dir, filename))
]
for i in range(len(diretorios)):
arquivos = os.listdir(
dir + "\\" + diretorios[i]
) # lista separando apenas os arquivos do caminho.
total = len(arquivos)
removidos = []
if total <= max:
qtd_remover = 0
print("Nenhum arquivo removido da classe", diretorios[i])
else:
qtd_remover = total - max
while total - len(removidos) > max:
x = random.randint(0, total - 1)
if x in removidos:
continue
else:
removidos.append(x)
path_remove = dir + "\\" + diretorios[i] + "\\" + arquivos[x]
os.remove(path_remove)
print(len(removidos), "arquivos removidos da classe", diretorios[i])
dir = "D:\\DataSets\\Simpsons-Kaggle-jfgm2018\\simpsons_enriquecido_manualmente - meansampling"
limitar_amostra(dir, 522)
|
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Bread and butter
import numpy as np
import pandas as pd
# Graphs
import seaborn as sns
import matplotlib.pyplot as plt
import missingno
# Models
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
# Tools & Validation
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from nltk import tokenize
# Clean code
import warnings
warnings.filterwarnings("ignore")
# # Titanic: predicting a tragedy
# ___
# *The goal of this analysis is to correctly predict if a given passanger did or did not survive the Titanic catastrophe based on the variables provided:*
# Variable | meaning
# --- | ---
# Survival | Survival
# PassengerId | Unique Id of a passenger.
# Pclass | Ticket class
# Sex | Sex
# Age | Age in years
# Sibsp | Number of siblings / spouses aboard the Titanic
# Parch | Number of parents / children aboard the Titanic
# Ticket | Ticket number
# Fare | Passenger fare
# Cabin | Cabin number
# Embarked | Port of Embarkation
# We'll start by importing our data set and snooping arround it's variables and values.
# We'll use Pandas to transform the .csv provided into easlily manipulable data frames.
# Data we'll use to train our model
df_train = pd.read_csv("/kaggle/input/titanic/train.csv")
# Data we'll use to test our model
df_test = pd.read_csv("/kaggle/input/titanic/test.csv")
# What our final submission dataframe should look like
submission_reference = pd.read_csv("/kaggle/input/titanic/gender_submission.csv")
# Let's have a taste of what's inside our df
df_train.head()
# We can observe that most of our variables are categorical with the exceptions being: age and fare.
# Knowing this will have an impact in the way me analyze and describe our data. In the next step we'll have a quick look at the main statistical relevant info for both of this types of variables.
#
df_train.describe()
df_train.describe(include=["O"])
# One of the first things we observe is that our count is a little off. In an ideal scenario we'll have a complete dataset without missing data but I don't believe this is the case.
df_train.isna().sum()
# Plot graphic of missing values for our train set
missingno.matrix(df_train, figsize=(30, 10))
# Plot graphic of missing values for our test set
missingno.matrix(df_test, figsize=(30, 10))
# We observe some important wholes in our data in both the age and cabin variables. We also have two missing values in Embarked that we should take into account.
# In this analysis we'll disregard out cabin variable given its amount of missing vales and it beign not descriptive enough.
df_train = df_train.drop(columns=["Cabin"])
df_test = df_test.drop(columns=["Cabin"])
# # 1. Quantitative variables
# ___
# We'll proceed to take a look at our quantitative variables: age and fare
sns.set(color_codes=True)
colors = sns.color_palette("muted")
fig, axes = plt.subplots(1, 2, figsize=(14, 7))
sns.distplot(df_train["Age"], color=colors[0], ax=axes[0])
sns.distplot(df_train["Fare"], color=colors[1], ax=axes[1])
plt.suptitle("Distribution of Quantitative Variables", size=14)
plt.tight_layout()
plt.show()
# * Age could've been a normal distribution but there is an important amount of (I'm assuming) babies on board. We should look into that
# * The fare graph seems to indicate that most of the passangers rode the Titanic for free. Is this true?
# Before moving on, let's look at **possible outliers**
fig, axes = plt.subplots(1, 2, figsize=(14, 7))
sns.boxplot(y=df_train["Age"], color=colors[0], ax=axes[0])
sns.boxplot(y=df_train["Fare"], color=colors[1], ax=axes[1])
plt.suptitle("Distribution of Quantitative Variables", size=14)
plt.tight_layout()
plt.show()
# It seems like someone went overboard (pun intended) with the fare price. Let's look into it.
df_train[df_train["Fare"] > 500]
# Interestingly enough we have three persons, all three aged 35, who paid over $500 for theit tickets.
# What's even more interesenting: they all survived
# **Is there a correlation between survival and fare price?**
print("Number of passangers that survived")
print(df_train["Survived"].value_counts())
fig, axes = plt.subplots(1, 2, figsize=(14, 7))
sns.boxplot(x=df_train["Survived"], y=df_train["Age"], color=colors[0], ax=axes[0])
sns.boxplot(x=df_train["Survived"], y=df_train["Fare"], color=colors[1], ax=axes[1])
plt.suptitle("Correlation between age/fare paid and survival", size=14)
plt.tight_layout()
plt.show()
# Maybe by slight marging but, as a general trend, you should be younger and richer to survive...
# Nothing new under the sun.
# We should finally deal with the **missing values in our test set**. In this case I will just set a mean fare depending on the class to which the passanger belonged to.
mean_fare_third = df_train["Fare"][df_train["Pclass"] == 3].mean()
df_test["Fare"][df_test["Fare"].isna()] = mean_fare_third
# ____
# **We still have a problem regarding missing values though.**
# How should we approach filling them?
# We can try using the mean or median but it will be interesting to narrow the arbitrarity a little. Maybe we can use sex as a trend indicator for age?
# Let's have a look.
print(df_train["Age"][df_train["Sex"] == "female"].mean())
print(df_train["Age"][df_train["Sex"] == "male"].mean())
# Is there something else we could use to **narrow this down** a little more?
# Is there something in the **name variable** that could help us out?
# ## Working with our Name variable
# ____
df_train["Name"].describe()
# * **Good news is:** we have all the names of our passangers! Hoorray!
# * **Bad news is:** we have all the names of our passangers. This means 891 unique values. What can we do with them?
# #### If we look for something in common between them, their **preffixes** stand out.
tokenizer = tokenize.RegexpTokenizer("\w+[.]")
df_train["suffix"] = df_train["Name"].apply(
lambda name: "".join(tokenizer.tokenize(name))
)
df_train["suffix"] = df_train["suffix"].str.strip(".")
# We do the same process to test so we can maintain uniform datasets
df_test["suffix"] = df_test["Name"].apply(
lambda name: "".join(tokenizer.tokenize(name))
)
df_test["suffix"] = df_test["suffix"].str.strip(".")
df_test["suffix"].value_counts()
# #### Extracting suffixes make the name variable much more manegeable.
# We can now use this feature to see if we can further narrow down our age filling problem.
print("Mr. mean age: ", df_train["Age"][df_train["suffix"] == "Mr"].mean())
print("Mrs. mean age: ", df_train["Age"][df_train["suffix"] == "Mrs"].mean())
print("Miss. mean age: ", df_train["Age"][df_train["suffix"] == "Miss"].mean())
# Let's look at the suffixes of the people without age
df_train["suffix"][df_train["Age"].isna()].value_counts()
# #### We have four masters and one dr without age.
# We can try to mean the ages of this suffixes and see what we find. In the case of "Master" we have fourty entries, this should be enough to give us a broad picture.
# On the other hand our "Dr" supositions might not be as strong given that we only have seven entries in our dataset.
print("Mr. mean age: ", df_train["Age"][df_train["suffix"] == "Mr"].mean())
print("Mrs. mean age: ", df_train["Age"][df_train["suffix"] == "Mrs"].mean())
print("Miss. mean age: ", df_train["Age"][df_train["suffix"] == "Miss"].mean())
print("Master. mean age: ", df_train["Age"][df_train["suffix"] == "Master"].mean())
print("Dr. mean age: ", df_train["Age"][df_train["suffix"] == "Dr"].mean())
# It seems that "Master" is a suffix used to refer to young people. Who would've though of that?!
# If we had used the mean age of male from Masters, we would've been **WAY off**.
# Regarding "Dr" we won't use this mean because the number of entries is too low to be sure.
# **Let's proceed to fill our missing values.**
def apply_mean(df):
df.loc[(df["suffix"] == "Mr") & (df["Age"].isna()), "Age"] = 32
df.loc[(df["suffix"] == "Dr") & (df["Age"].isna()), "Age"] = 32
df.loc[(df["suffix"] == "Mrs") & (df["Age"].isna()), "Age"] = 35
df.loc[(df["suffix"] == "Ms") & (df["Age"].isna()), "Age"] = 35
df.loc[(df["suffix"] == "Miss") & (df["Age"].isna()), "Age"] = 21
df.loc[(df["suffix"] == "Master") & (df["Age"].isna()), "Age"] = 4
return df
df_train = apply_mean(df_train)
df_test = apply_mean(df_test)
# ### We manage to fill our missing data!
# #### Before finalizing this part of the analysis I would like to get rid of the suffixes that are to scarse to add information.
# In order to do so we'll replace the with "Mr" or "Mrs" accordingly.
filter1 = (
(df_train["suffix"] != "Master")
& (df_train["suffix"] != "Mr")
& (df_train["suffix"] != "Mrs")
& (df_train["suffix"] != "Miss")
)
df_others = df_train[filter1]
df_ok = df_train[~filter1]
df_others["suffix"][df_others["Sex"] == "male"] = "Mr"
df_others["suffix"][df_others["Sex"] == "female"] = "Mrs"
df_train = df_ok.merge(df_others, how="outer")
# Now again we apply the changes to test
filter2 = (
(df_test["suffix"] != "Master")
& (df_test["suffix"] != "Mr")
& (df_test["suffix"] != "Mrs")
& (df_test["suffix"] != "Miss")
)
df_others2 = df_test[filter2]
df_ok2 = df_test[~filter2]
df_others2["suffix"][df_others2["Sex"] == "male"] = "Mr"
df_others2["suffix"][df_others2["Sex"] == "female"] = "Mrs"
df_test = df_ok2.merge(df_others2, how="outer")
# ### We are good to go! It's time to review our qualitative variables
# # 2. Qualitative Variables
# ___
# First let's refresh our variables.
df_train.describe(include=["O"])
# We already worked with "Name" so we are going to drop it.
# For the rest of the variables our task at hand is **encoding** them so our model can work with them.
# We should also remember that we have some **missing values** in our "Embarked" variable.
# Let's take a look to the distribution ok our variables.
sns.set(color_codes=True)
colors = sns.color_palette("muted")
fig, axes = plt.subplots(1, 3, figsize=(14, 7))
sns.countplot(df_train["Sex"], ax=axes[0])
sns.countplot(df_train["Embarked"], ax=axes[1])
sns.countplot(df_train["suffix"], ax=axes[2])
plt.suptitle("Categorical Variables Frequency", size=15)
plt.tight_layout()
plt.show()
df_train["Embarked"][df_train["Embarked"].isna()] = "S"
df_train.isna().sum()
# Now that we finally got rid of all our null values, let's do the encoding.
# We'll create a function to drop and encode variables.
# This will allow us to apply the same process to the train and test sets.
def dummies(
df, drop_col=["Sex", "Embarked", "suffix", "Name", "Ticket", "PassengerId"]
):
# Get dummies.
df_sex = pd.get_dummies(df["Sex"])
df_embark = pd.get_dummies(df["Embarked"])
df_suffix = pd.get_dummies(df["suffix"])
# We drop all the columns that aren't usefull, including Passanger Id, Ticket and Name.
df_encoded = df.drop(columns=drop_col, axis=0)
# We add all the encoded df to our new df
df_encoded = pd.concat([df_encoded, df_sex, df_embark, df_suffix], axis=1)
return df_encoded
# Apply fun to our dfs
df_train_enc = dummies(df_train)
df_test_enc = dummies(df_test)
# ## Let's take a look to our final dataframe!
df_train_enc.head()
# To do our las analysis we'll change sex to 0 for male and 1 for female
df_train["Sex_binary"] = np.where(df_train["Sex"] == "female", 1, 0)
fig, axes = plt.subplots(1, 3, figsize=(14, 7))
sns.distplot(
df_train.loc[df_train["Survived"] == 1]["SibSp"],
kde_kws={"label": "Survived"},
ax=axes[0],
)
sns.distplot(
df_train.loc[df_encoded["Survived"] == 0]["SibSp"],
kde_kws={"label": "Did not survive"},
ax=axes[0],
)
sns.distplot(
df_train.loc[df_train["Survived"] == 1]["Parch"],
kde_kws={"label": "Survived"},
ax=axes[1],
)
sns.distplot(
df_train.loc[df_train["Survived"] == 0]["Parch"],
kde_kws={"label": "Did not survive"},
ax=axes[1],
)
sns.distplot(
df_train.loc[df_train["Survived"] == 1]["Sex_binary"],
kde_kws={"label": "Survived"},
ax=axes[2],
)
sns.distplot(
df_train.loc[df_train["Survived"] == 0]["Sex_binary"],
kde_kws={"label": "Did not survive"},
ax=axes[2],
)
plt.suptitle("Survival rates for SibSp/Parch/Sex", size=15)
plt.legend()
plt.tight_layout()
plt.show()
# Here we can observe the relation between some of our non explored variables and the survival. At first glance we can easily observe that men were more unlikely to survive where woman had an almos 50/50 chance.
# It is also interesting to se that those with one or more siblings/spouses and parent/children relation where more likely to survive.
# # Model training
# ___
# First we should divide our set into train and test set.
X_train = df_train_enc.drop(columns=["Survived"], axis=1)
y_train = df_train_enc["Survived"]
X_test = df_test_enc
# Our test set doesn't include a "Survived" variable so there's no need for a split.
print(y_train.shape)
print(X_train.shape)
print(X_test.shape)
X_train
# Looks like we're good to go.
# Now let's look at the models we'll be using.
# * Multinomial Naive Bayes
# * Logistic Regression
# * K-Nearest Neighbors
# * Support Vector Machines
# * Random Forest Classifier
# (It isn't the goal of this notebook to give a full review on the nature of this models, only a way in which they can be used.)
features = X_train.copy()
targets = y_train.copy()
# For this particulary project we'l be comparing this five models. To lea
models = [
MultinomialNB(),
LogisticRegression(multi_class="multinomial", max_iter=10000),
KNeighborsClassifier(),
SVC(),
RandomForestClassifier(),
DecisionTreeClassifier(),
]
# Determines the cross-validation splitting strategy. 5 is the default.
CV = 5
# Df we'll use to store our results
cv_df = pd.DataFrame(index=range(CV * len(models)))
entries = []
# Here we loop over the models and fit our cross validation model to find the one that performs the best
for model in models:
model_name = model.__class__.__name__
accuracies = cross_val_score(model, features, targets, scoring="accuracy", cv=CV)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
# We proceed to make a df with all the info we recolected.
cv_df = pd.DataFrame(entries, columns=["model_name", "fold_idx", "accuracy"])
# visualizing the results
sns.set(color_codes=True)
sns.boxplot(x="model_name", y="accuracy", data=cv_df)
sns.stripplot(
x="model_name",
y="accuracy",
data=cv_df,
size=8,
jitter=True,
edgecolor="gray",
linewidth=2,
)
plt.title("Classification Models", fontsize=20)
plt.ylabel("Accuracy", fontsize=15)
plt.xlabel("")
plt.xticks(fontsize=15, rotation=90)
plt.yticks(fontsize=15, rotation=0)
plt.show()
# Looking at avg r squared
final_comp = (
cv_df.groupby("model_name")
.accuracy.mean()
.reset_index()
.sort_values(by="accuracy", ascending=False)
)
final_comp
# ### Not bad!
# We should also try finding which were the **most important features** for our model. Let's look into it using the **Random Forest Classifier** as an example.
# The following function allow us to get the most important features and plot them
def feature_importance(model, data):
fea_imp = pd.DataFrame({"imp": model.feature_importances_, "col": data.columns})
fea_imp = fea_imp.sort_values(["imp", "col"], ascending=[True, False]).iloc[-30:]
_ = fea_imp.plot(kind="barh", x="col", y="imp", figsize=(20, 10))
model_rfc = RandomForestClassifier()
model_rfc.fit(X_train, y_train)
# Plot the feature importance scores
feature_importance(model_rfc, X_train)
# #### It seems **"Fare"** is a very important feature for our model.We had theorized that those with money where more likely to survive. Maybe that has something to do with it.
# #### **"Age"** also seem to be an improtant feature, maybe the phrase famous phrase checks out:
# ## **"Women and children first"...**
# # Fine tunning our model
# ___
# Knowing wich models are more effective is great but we always want more.
# It's time to apply our tunning and optimization algorithms in order to get the best possible prediction
# #### We'll apply this method to our two best performing models:
# * Random Forest Classifier
# * Logistic Regression
# Create first pipeline for base without reducing features.
pipe = Pipeline([("classifier", RandomForestClassifier())])
# Create param grid.
param_grid = [
{
"classifier": [LogisticRegression()],
"classifier__penalty": ["l1", "l2"],
"classifier__C": np.logspace(-4, 4, 20),
"classifier__solver": ["liblinear"],
},
{
"classifier": [RandomForestClassifier()],
"classifier__n_estimators": list(range(10, 101, 10)),
"classifier__max_features": list(range(6, 32, 5)),
},
]
# Create grid search object
model = GridSearchCV(pipe, param_grid=param_grid, cv=5, verbose=2, n_jobs=-1)
# Fit on data
model.fit(X_train, y_train)
best_performer = model.best_score_
best_model = model.best_estimator_
print(f"Best accuracy: {str(best_performer)}")
print()
print(f"Best model: {str(best_model)}")
# ### It looks like **Logistic Regression** is our best bet!
# Let's use this model to make our prediction.
# # Final Prediction
# ___
y_pred = model.predict(X_test)
# We'll look into our predicted df and the example to make sure the shape is the same.
print(f"Example prediction: {submission_reference.shape}")
print(f"My prediction: {y_pred.shape}")
# Generate Submission File
StackingSubmission = pd.DataFrame({"PassengerId": df_test.index, "Survived": y_pred})
StackingSubmission.to_csv("my_titanic_pred.csv", index=False)
|
# techgabyte.com
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df1 = pd.read_csv(
"../input/python-developers-survey-2020/Python Developers Survey questions_outside.csv",
encoding="utf8",
)
pd.set_option("display.max_columns", None)
df1.head()
df = pd.read_csv(
"../input/python-developers-survey-2020/2020_sharing_data_outside.csv",
encoding="utf8",
)
pd.set_option("display.max_columns", None)
df.head()
df.isnull().sum()
df["are.you.datascientist"].value_counts()
# #Are you Data Scientist? Duh!
df["is.python.main"].value_counts()
# #Is Python Main? I don't use Python. Python uses Me.
df["years.of.coding"].value_counts()
df["years.of.coding"].value_counts().plot.barh(
color=["blue", "red", "lime", "purple", "teal"], title="Years of Coding"
)
# #Years of coding? Zero, Null, Zip, Nulla, None.
df["python.years"].value_counts()
df["python.years"].value_counts().plot.barh(
color=["blue", "red", "lime", "purple", "teal"], title="Python Years"
)
# #Python years? I think 2 years Kaggling in Python.
df["other.purposes.Data analysis"].value_counts()
# #Other purposes than DS? Fun
##Code by Taha07 https://www.kaggle.com/taha07/data-scientists-jobs-analysis-visualization/notebook
from wordcloud import WordCloud
from wordcloud import STOPWORDS
stopwords = set(STOPWORDS)
wordcloud = WordCloud(background_color="black", height=2000, width=2000).generate(
str(df["main.purposes"])
)
plt.rcParams["figure.figsize"] = (12, 12)
plt.axis("off")
plt.imshow(wordcloud)
plt.title("Python Main Purposes")
plt.show()
# #Python Purposes: Personal Educational Work Projects. Perfect WordCloud.
df["other.purposes.Machine learning"].value_counts()
# #Machine Learning? Epochs In MPWolke Out. Maybe in the future with Rapids.
# blog.4linux.com.br
df["how.involved.Educational purposes"].value_counts()
df["data.frameworks.Matplotlib"].value_counts()
# kdnuggets.com
df["data.frameworks.Seaborn"].value_counts()
df["data.frameworks.Keras"].value_counts()
# deepsense.ai
df["age"].value_counts()
df["age"].value_counts().plot.barh(
color=["blue", "red", "lime", "purple", "teal", "cyan"],
title="Age Group",
)
df["country.live"].value_counts()
|
import os
import random
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
data_dir = "/kaggle/input/preprocessed-brain-mri-images/brain-tumor/processed-images"
batch_size = 32
img_height = 224
img_width = 224
data = []
labels = []
for subdir in os.listdir(data_dir):
subdir_path = os.path.join(data_dir, subdir)
if os.path.isdir(subdir_path):
for filename in os.listdir(subdir_path):
file_path = os.path.join(subdir_path, filename)
if file_path.endswith(".jpg") or file_path.endswith(".png"):
data.append(file_path)
labels.append(subdir)
print(len(data))
print(len(labels))
df = pd.DataFrame({"filename": data, "class": labels})
from sklearn.model_selection import train_test_split
train_df, test_df = train_test_split(df, test_size=0.2)
datagen = keras.preprocessing.image.ImageDataGenerator()
def initial_training(
img_height, img_width, batch_size, datagen, train_df, test_df, dropout_rate
):
train_data = datagen.flow_from_dataframe(
train_df,
x_col="filename",
y_col="class",
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode="categorical",
)
test_data = datagen.flow_from_dataframe(
test_df,
x_col="filename",
y_col="class",
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode="categorical",
)
base_model = tf.keras.applications.resnet50.ResNet50(
include_top=False, weights="imagenet", input_shape=(img_height, img_width, 3)
)
base_model.trainable = False
image_batch, label_batch = next(iter(train_data))
feature_batch = base_model(image_batch)
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
prediction_layer = keras.Sequential(
[
tf.keras.layers.Dense(128),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(3),
]
)
prediction_batch = prediction_layer(feature_batch_average)
preprocess_input = tf.keras.applications.resnet50.preprocess_input
inputs = tf.keras.Input(shape=(img_height, img_width, 3))
x = preprocess_input(inputs)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(dropout_rate + 0.1)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
base_learning_rate = 0.0001
model.compile(
optimizer=tf.keras.optimizers.experimental.Adam(
learning_rate=base_learning_rate
),
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
initial_epochs = 7
history = model.fit(train_data, epochs=initial_epochs, validation_data=test_data)
return model
def create_model(model, fine_tune_start):
base_model = model.get_layer(index=3)
base_model.trainable = True
fine_tune_at = fine_tune_start
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
base_learning_rate = 0.0001
model.compile(
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate / 10),
metrics=["accuracy"],
)
return model
def fine_tuning(
img_height,
img_width,
batch_size,
datagen,
train_df,
test_df,
model,
fine_tune_start,
acc_per_comb,
):
fine_tuning_epoch = 12
train_data = datagen.flow_from_dataframe(
train_df,
x_col="filename",
y_col="class",
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode="categorical",
)
test_data = datagen.flow_from_dataframe(
test_df,
x_col="filename",
y_col="class",
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode="categorical",
)
fine_tuning_model = create_model(model, fine_tune_start)
history = fine_tuning_model.fit(
train_data, validation_data=test_data, epochs=fine_tuning_epoch
)
scores = fine_tuning_model.evaluate(test_data)
acc_per_comb.append(scores[1])
return fine_tuning_model
fine_tune_start_list = [100, 120, 140]
dropout_rate_list = [0.2, 0.3, 0.4]
combination = []
acc_per_comb = []
def grid_search(fine_tune_start, dropout_rate, acc_per_comb):
model = initial_training(
img_height, img_width, batch_size, datagen, train_df, test_df, dropout_rate
)
fine_tuned_model = fine_tuning(
img_height,
img_width,
batch_size,
datagen,
train_df,
test_df,
model,
fine_tune_start,
acc_per_comb,
)
return
for fine_tune_start in fine_tune_start_list:
for dropout_rate in dropout_rate_list:
combination.append([fine_tune_start, dropout_rate])
grid_search(fine_tune_start, dropout_rate, acc_per_comb)
print(combination)
print(acc_per_comb)
best_comb_index = 0
best_acc = 0
for i in range(len(acc_per_comb)):
if i == 0:
best_acc = acc_per_comb[i]
elif best_acc < acc_per_comb[i]:
best_acc = acc_per_comb[i]
best_comb_index = i
print(combination[best_comb_index])
|
# * [Introduction](#chapter1)
# * [Theory](#chapter2)
# * [Importing Data & Libraries](#chapter3)
# * [Extracting Tweet Metadata](#chapter4)
# * [Cleaning Tweets](#chapter5)
# * [EDA](#chapter6)
# * [BERT Model](#chapter7)
# * [Building Model Architecture](#section_7_1)
# * [Defining Pre-Processing and Training Functions](#section_7_2)
# * [Training and Validation](#section_7_3)
# * [RoBERTa Model](#chapter8)
# * [Building Model Architecture](#section_8_1)
# * [Training and Validation](#section_8_2)
# * [FNN Metadata Model](#chapter9)
# * [Building Model Architecture](#section_9_1)
# * [Training and Validation](#section_9_2)
# * [Model Comparison](#chapter10)
# * [Final Prediction](#chapter11)
# ## Introduction
# The dataset of Disaster Tweets comprises several thousand tweets that have been manually categorized by humans as either being relevant to an actual disaster or not having anything to do with a disaster. For example, someone may colloquially use the word "fire" to describe a song, while other tweets containing the word "fire" may actually be describing a real fire.
# In this notebook, I will be building NLP classification models to predict whether tweets in the test dataset are actual disasters or just false alarms. The models will rely mostly on BERT and RoBERTa, two powerful NLP models created by researchers at Google AI Language and at Facebook AI, respectively. I will also be comparing the performance of BERT and RoBERTa against a simple Feed-forward Neural Network (FNN) with metadata features that will be extracted from the text.
# To build my models, I will be using the transformers library from HuggingFace with PyTorch. The transformers library from HuggingFace is a widely used open-source library for working with transformer-based models such as BERT, GPT-2, and RoBERTa. It provides a comprehensive set of pre-trained models as well as tools for pre-processing, fine-tuning, and evaluation, making it easy to implement and experiment with these NLP models.
# ## Theory
# BERT is known for its ability to perform well on a wide range of NLP tasks, including text classification, question answering, and language translation. BERT is particularly notable for its ability to understand the context and meaning of words in a sentence or paragraph, which has led to its use in search engines to more accurately produce the closest results for a given search.
# BERT stands for Bidirectional Encoder Representations from Transformers, and was first published in 2018. It is a neural network model that is differentiated among other existing models by its ability to extract context from reading a sentence both forwards and backwards (i.e., bidirectionally).
# Take the classic example below:
# 
# Previous NLP models would likely correctly predict that the word "bank" in the first sentence relates to a river bank, as it is reading the sentence front to back. However, it would be more likely to misclassify the meaning of "bank" in the second sentence, as the context needed to make the correct prediction (financial institution) comes after the word itself. BERT is differentiated in that it will perform both a forwards and a backwards pass in order to infer the context of the word.
# One of the key components of the BERT model is masked language modeling (MLM). MLM is a technique used during pre-training where random words in a sentence are masked, and the model is trained to predict the missing word. By doing so, BERT learns to understand the context of the masked word and its relationship to the rest of the sentence. MLM is particularly useful in handling the problem of polysemy, where a single word can have multiple meanings depending on the context in which it appears, as we saw above. By training on a large corpus of text using MLM, BERT is able to develop a robust understanding of the contextual nuances of language.
# Another important aspect of the BERT model is next sentence prediction (NSP). This is a pre-training task where the model is presented with pairs of sentences and is trained to predict whether they are logically connected or not. By training on this task, BERT learns to understand the relationships between sentences, which is important in tasks such as question answering and textual entailment. NSP also helps the model develop a general understanding of the structure of language, which allows it to handle a wide range of natural language processing tasks effectively.
# RoBERTa (Robustly Optimized BERT approach) is an extension of the BERT model that was introduced by Facebook AI in 2019. It is essentially an improved version of BERT that addresses some of the weaknesses of the original model. While BERT achieved state-of-the-art results in several NLP tasks, RoBERTa has surpassed it on several benchmarks.
# One of the key differences between RoBERTa and BERT is in the way they are pre-trained. BERT was trained on a large corpus of text using MLM and NSP, as described above. RoBERTa, on the other hand, was trained using the same masked language modeling objective but with several modifications. The authors of RoBERTa increased the batch size, training data and training time, and also removed the next sentence prediction objective. RoBERTa also uses a different tokenization method which allows it to better handle rare words and tasks where a large vocabulary is important. These modifications allowed RoBERTa to achieve better performance than BERT on several NLP tasks.
# Importing necessary libraries
# Data cleaning, EDA, and visualization
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
from sklearn.feature_extraction.text import CountVectorizer
import nltk
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from PIL import Image
from spellchecker import SpellChecker
# For building NN with BERT, RoBERTa, and metadata
import torch
import tensorflow as tf
from transformers import BertTokenizerFast, BertForSequenceClassification
from transformers import RobertaTokenizerFast, RobertaForSequenceClassification
from torch.utils.data import DataLoader, TensorDataset, random_split
from tensorflow.keras.losses import BinaryCrossentropy
from sklearn.metrics import confusion_matrix
import torch.nn as nn
import torch.nn.functional as F
from torchinfo import summary
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import torch.optim as optim
from sklearn.model_selection import train_test_split
import warnings
# Ignore warnings
warnings.filterwarnings("ignore")
from torch.utils.data import Dataset
from sklearn.preprocessing import StandardScaler
# Set random seed for reproducibility
import random
random_seed = 42
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
# Set cuDNN flags for deterministic behavior and disable benchmarking
torch.backends.cudnn.deterministic = (
True # Ensure deterministic convolution algorithms are used
)
torch.backends.cudnn.benchmark = (
False # Disable automatic tuning of convolution algorithms
)
# Checks if GPU is available for use
device = "cuda" if torch.cuda.is_available() else "cpu"
# ## Importing Data & Libraries
# Import datasets
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
train.to_csv("train.csv")
test.to_csv("test.csv")
# Combine for cleaning and text pre-processing
df = pd.concat([train, test], axis=0)
# Inspecting data
display(df.head())
# ## Extracting Tweet Metadata
def add_tweet_metadata(df: pd.DataFrame) -> pd.DataFrame:
"""
Extracts metadata from a DataFrame of tweets and adds the information as columns to the DataFrame.
Parameters:
df (pandas.DataFrame): The DataFrame of tweets to add metadata to.
Returns:
pandas.DataFrame: The original DataFrame with added columns for tweet metadata.
"""
# Number of words in each tweet (excluding mentions, URLs, and hashtags)
df["word_count"] = df["text"].apply(
lambda x: len(re.findall(r"\b(?<![#@])\w+\b", x))
)
# Number of unique words in each tweet (excluding mentions, URLs, and hashtags)
df["unique_word_count"] = df["text"].apply(
lambda x: len(set(re.findall(r"\b(?<![#@])\w+\b", x)))
)
# Number of characters
df["character_count"] = df["text"].apply(lambda x: len(x))
# Number of hashtags
df["hashtag_count"] = df["text"].apply(lambda x: len(re.findall(r"#\w+", x)))
# Number of mentions
df["mention_count"] = df["text"].apply(lambda x: len(re.findall(r"@\w+", x)))
# Number of URLs
df["url_count"] = df["text"].apply(
lambda x: len(
re.findall(
r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+",
x,
)
)
)
# Count the number of capitalized words
df["capitalized_word_count"] = df["text"].apply(
lambda x: len(re.findall(r"\b[A-Z][a-z]*\b", x))
)
# Count the proportion of capitalized words
df["capitalized_word_proportion"] = df["capitalized_word_count"] / df["word_count"]
return df
# Create new columns in dataframe with metadata
add_tweet_metadata(df)
# ## Cleaning Tweets
# Below, I will define a function that will clean the tweets by removing URLs, mentions, and punctuation, as well as splitting up hashtags into individual words. The function will also lowercase all words.
# There are many tweets in the dataset containing the word 'amp'. Upon doing some further research, I found that 'amp' is just a stand-in for the ampersand symbol (&), so that will be cleaned from the text as well.
# Cleaning data to remove punctuation, URLs, hashtags, mentions, and splitting up words
# that are strung together
def clean_text(text: str) -> str:
"""
This function cleans text to remove punctuation, URLs, mentions, and hashtags, occurrence
of the word 'amp', and splits up words that are strung together into individual words.
The function also lowercases all words.
Parameters:
text (str): Text to be cleaned.
Returns:
str: Cleaned text.
"""
# Remove URLs
text = re.sub(r"http\S+", "", text)
# Remove mentions
text = re.sub(r"@\w+", "", text)
# Remove punctuation except for hashtags
text = re.sub(r"[^\w\s#]", "", text)
# Split up hashtags into individual words and remove #
text = re.sub(
r"#(\w+)",
lambda x: " ".join(
re.findall(r"[A-Z]?[a-z]+|[A-Z]+(?=[A-Z][a-z])|[\'\w]+", x.group(1))
),
text,
)
# Convert to lowercase
text = text.lower()
# Remove 'amp'
text = re.sub(r"\bamp\b", "", text)
return text
# Create new column with cleaned text
df["text_cleaned"] = df["text"].apply(clean_text)
display(df.isna().sum())
# clean keyword column
df["keyword"] = df["keyword"].str.replace("%20", " ")
display(df["keyword"].unique())
# The 'location' column contains many missing values, and seems to contain information that is not even location-specific, so I will be dropping it as I don't think it will be useful for the metadata model.
# The 'keyword' column on the other hand may be helpful as some words may be more likely to refer to disasters than others. For example, it's pretty easy to use "fire" coloquially, but less easy to use "bomber".
# Below, I will define a function that should help extract keywords from the cleaned text.
def assign_keyword(df, keywords):
for index, row in df.iterrows():
if pd.isna(row["keyword"]) or row["keyword"] == "":
for keyword in keywords:
if keyword in row["text_cleaned"]:
df.at[index, "keyword"] = keyword
break
return df
keywords_array = [
"crash",
"earthquake",
"fire",
"disaster",
"emergency",
"tornado",
"flood",
"ablaze",
"accident",
"aftershock",
"airplane accident",
"ambulance",
"annihilated",
"annihilation",
"apocalypse",
"armageddon",
"army",
"arson",
"arsonist",
"attack",
"attacked",
"avalanche",
"battle",
"bioterror",
"bioterrorism",
"blaze",
"blazing",
"bleeding",
"blew up",
"blight",
"blizzard",
"blood",
"bloody",
"blown up",
"body bag",
"body bagging",
"body bags",
"bomb",
"bombed",
"bombing",
"bridge collapse",
"buildings burning",
"buildings on fire",
"burned",
"burning",
"burning buildings",
"bush fires",
"casualties",
"casualty",
"catastrophe",
"catastrophic",
"chemical emergency",
"cliff fall",
"collapse",
"collapsed",
"collide",
"collided",
"collision",
"crashed",
"crush",
"crushed",
"curfew",
"cyclone",
"damage",
"danger",
"dead",
"death",
"deaths",
"debris",
"deluge",
"deluged",
"demolish",
"demolished",
"demolition",
"derail",
"derailed",
"derailment",
"desolate",
"desolation",
"destroy",
"destroyed",
"destruction",
"detonate",
"detonation",
"devastated",
"devastation",
"displaced",
"drought",
"drown",
"drowned",
"drowning",
"dust storm",
"electrocute",
"electrocuted",
"emergency plan",
"emergency services",
"engulfed",
"quarantine",
"hazard",
"storm",
"epicentre",
"evacuate",
"evacuated",
"evacuation",
"explode",
"exploded",
"explosion",
"eyewitness",
"famine",
"fatal",
"fatalities",
"fatality",
"fear",
"fire truck",
"first responders",
"flames",
"flattened",
"flooding",
"floods",
"forest fire",
"forest fires",
"hail",
"hailstorm",
"harm",
"hazardous",
"heat wave",
"hellfire",
"hijack",
"hijacker",
"hijacking",
"hostage",
"hostages",
"hurricane",
"injured",
"injuries",
"injury",
"inundated",
"inundation",
"landslide",
"lava",
"lightning",
"loud bang",
"mass murder",
"mass murderer",
"massacre",
"mayhem",
"meltdown",
"military",
"mudslide",
"natural disaster",
"nuclear disaster",
"nuclear reactor",
"obliterate",
"obliterated",
"obliteration",
"oil spill",
"outbreak",
"pandemonium",
"panic",
"panicking",
"police",
"quarantined",
"radiation emergency",
"rainstorm",
"razed",
"refugees",
"rescue",
"rescued",
"rescuers",
"riot",
"rioting",
"rubble",
"ruin",
"sandstorm",
"screamed",
"screaming",
"screams",
"seismic",
"sinkhole",
"sinking",
"siren",
"sirens",
"smoke",
"snowstorm",
"stretcher",
"structural failure",
"suicide bomb",
"suicide bomber",
"suicide bombing",
"sunk",
"survive",
"survived",
"survivors",
"terrorism",
"terrorist",
"threat",
"thunder",
"thunderstorm",
"tragedy",
"trapped",
"trauma",
"traumatised",
"trouble",
"tsunami",
"twister",
"typhoon",
"upheaval",
"violent storm",
"volcano",
"war zone",
"weapon",
"weapons",
"whirlwind",
"wild fires",
"wildfire",
"windstorm",
"wounded",
"wounds",
"wreck",
"wreckage",
"wrecked",
]
df = assign_keyword(df, keywords_array)
display(df["keyword"].isna().sum())
# There are still 16 tweets with missing keywords. I will visualize them below to see if we can manually extract them.
display(df[df["keyword"].isna()]["text_cleaned"].values)
display(df[df["keyword"].isna()]["text"].values)
df["keyword"] = df["keyword"].fillna("none")
# Interestingly, none of the above tweets seem to have keywords in them that could be mistaken for a disaster. They might be in the dataset erroneously. I will group them all into a keyword 'none' that indicates that they don't have any disaster words in them. The neural network should be able to pick up that tweets with this keyword should all belong to the 'Non-Disaster' class.
# Below, I will define a function to count spelling errors in the cleaned tweets. Spelling errors could potentially be an indication of class, as news outlets and those reporting real disasters may proof read their tweets more frequently than people just casually tweeting about their daily lives. We will visualize the difference between classes in a bit.
# Create an instance of SpellChecker
spell = SpellChecker()
# Define a function to count spelling errors in a tweet
def count_spelling_errors(text: str) -> int:
"""
Count the number of spelling errors in a tweet.
Parameters:
text (str): The text of the tweet to check.
Returns:
int: The number of spelling errors in the tweet.
"""
# Split the tweet into words
words = text.split()
# Count the number of spelling errors
num_errors = 0
for word in words:
# Check if the word is misspelled
if not spell.correction(word) == word:
num_errors += 1
return num_errors
# Apply the function to the 'text' column and store the result in a new column called 'spelling_error_count'
df["spelling_error_count"] = df["text_cleaned"].apply(count_spelling_errors)
# Binary feature that shows presence of spelling errors
df["has_spelling_errors"] = df["spelling_error_count"].apply(
lambda x: 1 if x > 0 else 0
)
# ## Exploratory Data Analysis
# Distribution of binary classes in training set
# Set palette
colors = sns.cubehelix_palette()
# Pie chart to show distribution
fig = plt.figure(figsize=(10, 7))
labels = ["0", "1"]
sizes = train["target"].value_counts()
plt.pie(sizes, labels=labels, colors=colors, autopct="%1.1f%%")
plt.legend()
plt.title("Distribution of Classes")
plt.show()
# Based on the above, the distribution of classes is pretty similar, with 57% of tweets belonging to the 'Non-Disaster' Class and 43% of tweets belonging to the 'Disaster' class. This means that we won't have to worry too much about class imbalance in the dataset, which would require us to resample the dataset or otherwise try to mitigate the effect of one class being a majority.
# Below, I will define a function to extract metadata from the tweets in the dataset. Later, we will visualize this metadata for both Disaster and Non-Disaster tweets to see if there are differences between classes in features such as tweet length, number of hashtags, and the presence of spelling errors.
# Define a function to plot features by class
def distr_by_class(data: pd.DataFrame, column: str, ax: plt.Axes):
"""
Creates an overlaid histogram/distribution graph for features in a training set given their class.
Parameters:
- data (pd.DataFrame): The dataframe containing data to plot.
- column (str): The column to plot.
- ax (plt.Axes): The axes to plot on.
"""
# Create a histogram with overlaid density plot
sns.histplot(
data=data,
x=column,
hue="target",
kde=True,
stat="density",
common_norm=False,
ax=ax,
)
# Set the title of the plot
ax.set_title(f"{column} distribution by class")
# Visualizing distribution of pre-cleaned dataset
fig, axs = plt.subplots(nrows=5, ncols=2, figsize=(12, 24))
columns = [
"word_count",
"unique_word_count",
"character_count",
"hashtag_count",
"mention_count",
"url_count",
"capitalized_word_count",
"capitalized_word_proportion",
"spelling_error_count",
"has_spelling_errors",
]
fig.subplots_adjust(hspace=0.3)
for i, column in enumerate(columns):
row = i // 2
col = i % 2
distr_by_class(df, column, ax=axs[row, col])
plt.tight_layout()
plt.show()
# Based on the above, it appears like there are some differences between Disaster and Non-Disaster tweets. Specifically, disaster tweets tend to be longer, contain more URLs, and contain more capitalized words, among other features that can be seen above.
# Create metadata dataframe that contains metadata
metadata = df[
[
"word_count",
"unique_word_count",
"character_count",
"hashtag_count",
"mention_count",
"url_count",
"capitalized_word_count",
"capitalized_word_proportion",
"spelling_error_count",
"has_spelling_errors",
]
]
# We will also visualize the most common n-grams in tweets by using the function defined below. n-grams are strings containing n sequential words. A unigram is just one word, whereas bigrams and trigrams are 2 or 3 words that frequently appear near to each other, respectively.
# Define function that counts occurrence of n-grams
def ngram_occurrence(df: pd.DataFrame, n: int = 1) -> pd.DataFrame:
"""
This function counts the occurrence of n-grams in the 'text_cleaned' column of a given pandas DataFrame,
and returns a DataFrame with two columns: 'ngram' and 'count'.
Parameters:
- df: pandas DataFrame with a 'text_cleaned' column
- n: integer specifying the number of words in each n-gram
Returns:
- counts_df: pandas DataFrame with two columns: 'ngram' (containing the n-grams) and 'count'
(containing the number of occurrences of each n-gram)
"""
# Initialize a CountVectorizer with stop words and n-gram range
vectorizer = CountVectorizer(stop_words="english", ngram_range=(n, n))
# Convert cleaned text data to a matrix of n-gram counts
matrix = vectorizer.fit_transform(df["text_cleaned"])
# Sum the counts of each n-gram across all tweets
counts = matrix.sum(axis=0).tolist()[0]
# Get the feature names (i.e., the n-grams) from the vectorizer
feature_names = vectorizer.get_feature_names_out()
# Create a DataFrame with the n-grams and their counts
counts_df = pd.DataFrame({"ngram": feature_names, "count": counts})
# Reset the index of the DataFrame to start at 0
counts_df = counts_df.reset_index(drop=True)
# Return the DataFrame with n-grams and their counts
return counts_df
# Creating dataframes with n-grams and their counts for n=1, 2, and 3
disaster_counts_df = ngram_occurrence(df[df["target"] == 1], n=1)
non_disaster_counts_df = ngram_occurrence(df[df["target"] == 0], n=1)
disaster_bigram_counts_df = ngram_occurrence(df[df["target"] == 1], n=2)
non_disaster_bigram_counts_df = ngram_occurrence(df[df["target"] == 0], n=2)
disaster_trigram_counts_df = ngram_occurrence(df[df["target"] == 1], n=3)
non_disaster_trigram_counts_df = ngram_occurrence(df[df["target"] == 0], n=3)
# Below, I will define a function to plot the 10 most common n-grams for Disaster and Non-Disaster tweets.
def plot_top_ngrams(data: pd.DataFrame, column: str, ngram: str, disaster_class: str):
"""
Given a DataFrame of ngram counts, plot the top 10 most common ngrams for a given class
(e.g., disaster or non-disaster) and ngram type (e.g., unigrams, bigrams, or trigrams).
Parameters:
data (pd.DataFrame): DataFrame of ngram counts, with columns for 'ngram' and 'count'.
column (str): Name of column in `data` that contains the ngrams.
ngram (str): Type of ngram to plot (e.g., 'unigram', 'bigram', or 'trigram').
disaster_class (str): Name of class to plot (e.g., 'Disaster' or 'Non-Disaster').
"""
top_ngrams = data.sort_values(ascending=False, by="count").head(10)
fig, ax = plt.subplots(figsize=(10, 6))
colors = sns.cubehelix_palette(n_colors=10, reverse=True)
ax.barh(top_ngrams["ngram"], top_ngrams["count"], color=colors)
ax.set_title(f"Top 10 Most Common {ngram}s in {disaster_class} Tweets")
ax.set_ylabel(f"{ngram}")
ax.set_xlabel("Count")
plt.show()
plot_top_ngrams(disaster_counts_df, "ngram", "Word", "Disaster")
plot_top_ngrams(non_disaster_counts_df, "ngram", "Word", "Non-Disaster")
plot_top_ngrams(disaster_bigram_counts_df, "ngram", "Bigram", "Disaster")
plot_top_ngrams(non_disaster_bigram_counts_df, "ngram", "Bigram", "Non-Disaster")
plot_top_ngrams(disaster_trigram_counts_df, "ngram", "Trigram", "Disaster")
plot_top_ngrams(non_disaster_trigram_counts_df, "ngram", "Trigram", "Non-Disaster")
# Most of the common n-grams shown above make sense. For Disaster tweets, the n-grams tend to be related to actual disasters, such as "hiroshima", "california wildfire", and "bomber detonated bomb". On the other hand, Non-Disaster tweets seem to be common language used by everyday tweeters or common words that appear in non-disaster headlines, such as "like", "looks like", and "stock market crash".
# I did find the presence of "body bag" in all its forms in Non-Disaster tweets to be quite shocking. Once inspecting the tweets containing these words, I realized that "body bag" most commonly referred to the accessory (like a cross-body bag). In rare cases, the bigram "body bagging" is used as a slang term to describe when a person is overwhelmingly defeating another in a competition, argument, or conflict. These tweets are not referring to actual body bags!
# Just because it's fun to visualize the most common words in a word cloud shaped like a twitter logo, I will do so below :)
import matplotlib.colors as mcolors
# Create list of all tweets by class
disaster_text = df[df["target"] == 1]["text_cleaned"].tolist()
non_disaster_text = df[df["target"] == 0]["text_cleaned"].tolist()
# Join words and lowercase
disaster_text = " ".join(disaster_text).lower()
non_disaster_text = " ".join(non_disaster_text).lower()
# Create mask for word cloud
mask = np.array(Image.open("/kaggle/input/twitter-mask/twitter_mask.png"))
# Convert cubehelix_palette colors to hexadecimal color codes
hex_codes = [mcolors.rgb2hex(color) for color in colors]
# Define color function for word cloud
def color_func(word, font_size, position, orientation, random_state=None, **kwargs):
return hex_codes[np.random.randint(len(hex_codes))]
# Create wordclouds in shape of twitter logo
wordcloud = WordCloud(
stopwords=STOPWORDS,
collocations=True,
background_color="white",
mask=mask,
color_func=color_func,
).generate(disaster_text)
# Plot wordcloud
plt.figure(figsize=(10, 6))
plt.imshow(wordcloud)
plt.title("Most Common Words in Disaster Tweets")
plt.axis("off")
plt.show()
# Create wordclouds in shape of twitter logo
wordcloud = WordCloud(
stopwords=STOPWORDS,
collocations=True,
background_color="white",
mask=mask,
color_func=color_func,
).generate(non_disaster_text)
# Plot wordcloud
plt.figure(figsize=(10, 6))
plt.imshow(wordcloud)
plt.title("Most Common Words in Non-Disaster Tweets")
plt.axis("off")
plt.show()
# ## BERT Model
# ### Building BERT Model Architecture
# For my BERT model, I will use the pre-trained model BertForSequenceClassification. The BertForSequenceClassification model comes default with one output layer to predict the sequence classification task, but I have added additional layers on top of it to optimize its performance.
# The 'bert-base-uncased' is used as the base pre-trained model. It is a smaller version of BERT that has 110 parameters in total, while 'bert-large' contains 340 million parameters. 'bert-large' is more powerful and would likely be more accurate, but it is also computationally expensive, and the smaller BERT model seems to work fine here.
# The additional layers in the model can help in fine-tuning the pretrained model to better adapt to our dataset and the classification task at hand. The additional layers are as follows:
# - Linear layer: Fully connected layer that takes input features from the BERT model, and applies a linear transformation to produce new feature representations
# - ReLU activation layer: Introduces non-linearity to the model, useful in capturing complex patterns and improving model's ability to generalize
# - Dropout layer: Regularization layer that randomly sets a fraction of input units to zero during training, helps prevent overfitting by reducing reliance of model on any particular input feature and promoting the learning of more robust representations
# - Linear layer: Final layer that takes feature representations from the previous layers and applies a linear transformation to produce the final output logits, then we will use argmax() to convert predicted probabilities into one of two discrete class labels (1 or 0 for 'Disaster' or 'Non-Disaster' tweets, respectively).
# Split cleaned combined dataframe back into train and test sets
train_cleaned = df[df["target"].notna()]
test_cleaned = df[df["target"].isna()]
# Set device to use GPU in case it isn't already
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Check the maximumum tweet length in both cleaned and uncleaned texts
print(
"Max length of tweets", max([len(x.split()) for x in train_cleaned["text_cleaned"]])
)
model_name = "bert-base-uncased"
max_length = 50
def create_bert():
# Load the BERT model for sequence classification
model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased",
num_labels=2,
output_attentions=False,
output_hidden_states=False,
force_download=True,
)
model.to(device)
# Add activation layer to pre-trained transformer model
# Creates linear layer, ReLU activation function, dropout layer, and another linear layer
model.classifier = nn.Sequential(
nn.Linear(model.config.hidden_size, model.config.hidden_size),
nn.ReLU(),
nn.Dropout(model.config.hidden_dropout_prob),
nn.Linear(model.config.hidden_size, model.config.num_labels),
)
# Move model to GPU
model.cuda()
return model
model = create_bert()
# Visualize model architecture
batch_size = 16
input_data = torch.zeros(batch_size, max_length).long()
summary(model, input_data=input_data, device="cuda")
# ### Defining Pre-Processing and Training Functions
# Below, I will define two functions, one that can be used to tokenize and preprocess the data and returns tensor datasets, and another which trains and validates the tensor datasets.
# Based on the original BERT publication by Google AI in 2018, the authors recommended that the following range of hyperparameters works well for most tasks:
# * Batch size: 16, 32
# * Learning rate (Adam): 5e-5, 3e-5, 2e-5
# * Number of epochs: 2, 3, 4
# I will be training over 4 epochs, but implementing early stopping in case the accuracy of the model does not improve for a certain number of epochs. I will also be using a learning rate of 5e-5 and batch size of 16.
def preprocess_train_data(
df,
text_col,
target_col,
model_name="bert-base-uncased",
tokenizer="BERT",
max_length=50,
train_ratio=0.8,
device="cuda",
):
"""
Preprocesses data for BERT model training and validation.
Args:
df (pd.DataFrame): DataFrame containing the text and target columns.
text_col (str): Name of the text column in the DataFrame.
target_col (str): Name of the target column in the DataFrame.
model_name (str): Name of the BERT model to use (default: 'bert-base-uncased').
tokenizer (str): Name of model, will specify which Tokenizer to instantiate (default: 'BERT')
max_length (int): Maximum length of input texts after tokenization (default: 50).
train_ratio (float): Ratio of training set size to total dataset size (default: 0.8).
device (str): Device to use for tensor computations (default: 'cuda').
Returns:
train_dataset (torch.utils.data.TensorDataset): Training dataset.
val_dataset (torch.utils.data.TensorDataset): Validation dataset.
tokenizer (transformers.BertTokenizerFast): BERT tokenizer.
"""
# Split training set into text and labels
dataset = df[[text_col, target_col]]
text = dataset[text_col].values
labels = dataset[target_col].values
# Load the BERT tokenizer
if tokenizer == "BERT":
tokenizer = BertTokenizerFast.from_pretrained(model_name, do_lower_case=True)
else:
tokenizer = RobertaTokenizerFast.from_pretrained(model_name, do_lower_case=True)
# Tokenize the texts
encoded_dict = tokenizer(
text=text.tolist(),
add_special_tokens=True,
max_length=max_length,
truncation=True,
padding=True,
return_token_type_ids=False,
return_attention_mask=True,
verbose=True,
)
# Convert the encoded text to PyTorch tensors
input_ids = torch.tensor(encoded_dict["input_ids"], device=device)
attention_mask = torch.tensor(encoded_dict["attention_mask"], device=device)
labels = torch.tensor(labels, device=device)
labels = labels.to(torch.int64)
# Combine the inputs and labels into a TensorDataset
dataset = TensorDataset(input_ids, attention_mask, labels)
# Define the sizes of the training and validation sets
train_size = int(train_ratio * len(dataset))
val_size = len(dataset) - train_size
# Split training set into training and validation set
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
print("{} training samples".format(train_size))
print("{} validation samples".format(val_size))
return train_dataset, val_dataset, tokenizer
def train_validate_test(
model_name,
train_dataset,
val_dataset,
batch_size=16,
lr=5.9e-6,
weight_decay=0.01,
epochs=4,
patience=2,
device="cuda",
validation=True,
):
"""
Train, validate, and test a PyTorch model, compute accuracy, precision, recall, F1 score.
Args:
model name (str): Name of model from Huggingface library.
train_dataset (pd.DataFrame): The training dataset.
val_dataset (pd.DataFrame): The validation dataset.
batch_size (int): The batch size for training and validation (default: 16).
lr (float): The learning rate for the optimizer (default 5.9e-6).
weight_decay (float): The weight decay for the optimizer (default: 0.01).
epochs (int): The number of epochs for training (default: 4).
patience (int): The number of epochs to wait for early stopping during validation (default: 2).
device (str): The device to be used for training and inference ('cuda' or 'cpu') (default: 'cuda').
validation (bool): Whether to perform validation during training or not, if False,
will predict labels of test set (default: True).
Returns:
predictions(pd.DataFrame): DataFrame of predictions on test set.
"""
# Create a new instance of the model
if model_name == "bert-base-uncased":
model = create_bert()
else:
model = create_roberta()
# Define the dataloaders for the training and validation sets
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size)
# Define the optimizer and loss function
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
loss = BinaryCrossentropy(from_logits=False)
# Training loop
best_val_loss = float("inf")
early_stop_count = 0
# Lists to store training and validation loss
train_losses = []
val_losses = []
for epoch in range(epochs):
# Training
train_loss = 0
train_steps = 0
model.train() # Set to training mode
for batch in train_dataloader:
# Load the batch onto the device
batch = [item.to(device) for item in batch]
input_ids, attention_mask, labels = batch
# Zero the gradients from previous batch
optimizer.zero_grad()
model.zero_grad()
# Forward pass
outputs = model(
input_ids=input_ids, attention_mask=attention_mask, labels=labels
)
# Compute the loss
batch_loss = outputs.loss
logits = outputs.logits
train_loss += batch_loss.item()
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = labels.to("cpu").numpy()
# Backward pass
batch_loss.backward()
# Update the parameters
optimizer.step()
# Increment the training steps
train_steps += 1
# Compute the average training loss for the epoch
avg_train_loss = train_loss / train_steps
train_losses.append(avg_train_loss)
if validation:
# Validation
val_true_labels = []
val_pred_labels = []
total_f1 = 0
total_precision = 0
total_recall = 0
val_loss = 0
val_steps = 0
model.eval() # Set to evaluation mode
with torch.no_grad(): # Disable gradient calculation
for batch in val_dataloader:
# Load the batch onto the device
batch = [item.to(device) for item in batch]
input_ids, attention_mask, labels = batch
# Forward pass
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
)
# Compute the loss
batch_loss = outputs.loss
logits = outputs.logits
val_loss += batch_loss.item()
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = labels.to("cpu").numpy()
# Calculate F1 score, precision, and recall
f1 = f1_score(label_ids, np.argmax(logits, axis=1), zero_division=1)
precision = precision_score(
label_ids, np.argmax(logits, axis=1), zero_division=1
)
recall = recall_score(
label_ids, np.argmax(logits, axis=1), zero_division=1
)
total_f1 += f1
total_precision += precision
total_recall += recall
# Compute the predictions and append to lists
pred_labels = np.argmax(logits, axis=1)
val_pred_labels.extend(pred_labels)
val_true_labels.extend(label_ids)
# Increment the validation steps
val_steps += 1
# Compute the average validation loss and metrics for the epoch
val_accuracy = accuracy_score(val_true_labels, val_pred_labels)
avg_val_loss = val_loss / val_steps
val_losses.append(avg_val_loss)
avg_val_accuracy = val_accuracy / len(val_dataloader)
avg_f1 = total_f1 / val_steps
avg_precision = total_precision / val_steps
avg_recall = total_recall / val_steps
print(
f"Epoch {epoch+1} - Avg Train Loss: {avg_train_loss:.4f} | Avg Val Loss: {avg_val_loss:.4f}"
)
print(" Accuracy: {0:.2f}".format(val_accuracy))
print(" F1 Score: {0:.2f}".format(avg_f1))
print(" Precision: {0:.2f}".format(avg_precision))
print(" Recall: {0:.2f}".format(avg_recall))
# Check if validation loss improved
if avg_val_loss < best_val_loss:
best_val_loss = avg_val_loss
early_stop_count = 0
else:
early_stop_count += 1
# Check if we should early stop
if early_stop_count >= patience:
print(
f"No improvement in validation loss for {patience} epochs. Stopping early."
)
break
else:
# Create an empty list to store the predictions
predictions = []
# Set the model to evaluation mode
model.eval()
# Disable gradient calculation
with torch.no_grad():
# Loop through the test dataloader
for batch in test_dataloader:
# Load the batch onto the device
batch = [item.to(device) for item in batch]
input_ids, attention_mask = batch
# Make predictions on the batch
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
logits = outputs.logits
# Get the index of the class with the highest probability
preds = torch.argmax(logits, dim=1)
# Move predictions to CPU and append to the list
preds = preds.detach().cpu().numpy()
predictions.append(preds)
return predictions
print(f"Epoch {epoch+1} - Avg Train Loss: {avg_train_loss:.4f}")
if validation:
# Plot the training and validation losses
plt.figure()
plt.plot(
range(1, len(train_losses) + 1),
train_losses,
label="Train Loss",
color=colors[1],
)
plt.plot(
range(1, len(val_losses) + 1),
val_losses,
label="Validation Loss",
color=colors[4],
)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.xticks(range(1, len(train_losses) + 1), range(1, len(train_losses) + 1))
plt.legend()
plt.show()
cm = confusion_matrix(val_true_labels, val_pred_labels)
sns.heatmap(cm, annot=True, fmt="d", cmap=colors)
plt.title("Confusion Matrix")
plt.xlabel("Predicted Class")
plt.ylabel("True Class")
plt.show()
# ### Training and Validation
text_col = "text"
target_col = "target"
train_dataset, val_dataset, tokenizer = preprocess_train_data(
train_cleaned,
text_col,
target_col,
model_name="bert-base-uncased",
tokenizer="BERT",
)
train_validate_test("bert-base-uncased", train_dataset, val_dataset, validation=True)
text_col = "text_cleaned"
target_col = "target"
train_dataset, val_dataset, tokenizer = preprocess_train_data(
train_cleaned,
text_col,
target_col,
model_name="bert-base-uncased",
tokenizer="BERT",
)
train_validate_test("bert-base-uncased", train_dataset, val_dataset, validation=True)
# ## RoBERTa Model
# ### Building RoBERTa Model Architecture
# In the BERT model, I added some additional layers on top of the pre-trained model to improve its performance for the text classification task. RoBERTa's classification model already comes with a more robust classification head containing two fully connected linear layers, so I will not be adding any additional layers below.
# Set device to use GPU in case it isn't already
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
max_length = 50
def create_roberta():
# Load the BERT model for sequence classification
model = RobertaForSequenceClassification.from_pretrained(
"roberta-base",
num_labels=2,
output_attentions=False,
output_hidden_states=False,
force_download=True,
)
# Move the model to the desired device (e.g. GPU)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
return model
model = create_roberta()
# Visualize model architecture
batch_size = 16
# Define an example input size that matches your input data
input_data = torch.zeros(batch_size, max_length).long()
# Print the summary of the model architecture
summary(model, input_data=input_data, device="cuda")
# ### Training and Validation
text_col = "text"
target_col = "target"
train_dataset, val_dataset, tokenizer = preprocess_train_data(
train_cleaned, text_col, target_col, model_name="roberta-base", tokenizer="ROBERTA"
)
train_validate_test("roberta-base", train_dataset, val_dataset, validation=True)
text_col = "text_cleaned"
target_col = "target"
train_dataset, val_dataset, tokenizer = preprocess_train_data(
train_cleaned, text_col, target_col, model_name="roberta-base", tokenizer="ROBERTA"
)
train_validate_test("roberta-base", train_dataset, val_dataset, validation=True)
# ## FNN Metadata Model
# ### Building FNN Model Architecture
# Define the neural network model
class TextClassifier(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(TextClassifier, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, hidden_size) # Add an additional linear layer
self.relu2 = nn.ReLU() # Add an additional ReLU activation
self.fc3 = nn.Linear(
hidden_size, output_size
) # Adjust the input size of the last linear layer
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x) # Pass through the additional linear layer
x = self.relu2(x) # Pass through the additional ReLU activation
x = self.fc3(x) # Pass through the adjusted last linear layer
return x
# Define the custom dataset for text classification
class TextDataset(Dataset):
def __init__(self, features, labels):
self.features = torch.tensor(features, dtype=torch.float32)
self.labels = torch.tensor(labels, dtype=torch.long)
def __len__(self):
return len(self.features)
def __getitem__(self, idx):
return self.features[idx], self.labels[idx]
# Split the data into train and validation sets
train_features, val_features, train_labels, val_labels = train_test_split(
train_cleaned[
[
"word_count",
"unique_word_count",
"character_count",
"hashtag_count",
"mention_count",
"url_count",
"capitalized_word_count",
"capitalized_word_proportion",
"spelling_error_count",
"has_spelling_errors",
]
].values,
train_cleaned["target"].values,
test_size=0.2,
random_state=42,
)
# Scale the features using StandardScaler
scaler = StandardScaler()
scaler.fit(train_features)
train_features_scaled = scaler.transform(train_features)
val_features_scaled = scaler.transform(val_features)
# Create custom datasets for train and validation sets with scaled features
train_dataset = TextDataset(train_features_scaled, train_labels)
val_dataset = TextDataset(val_features_scaled, val_labels)
batch_size = 16
# Define hyperparameters
input_size = train_features.shape[
1
] # Update input_size to match number of features after scaling
hidden_size = 256
output_size = 2 # Set output_size to 2 for binary classification with 2 classes
# Create an instance of the text classifier model
model = TextClassifier(input_size, hidden_size, output_size)
# Move the model to the desired device (e.g. GPU)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
# Use torchinfo to visualize model architecture and output shapes
from torchinfo import summary
summary(model, input_size=(batch_size, input_size), device="cuda")
# ### Training and Validation
# Define loss function and optimizer
loss = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size)
# Lists to store training and validation loss
train_losses = []
val_losses = []
epochs = 20
best_val_loss = float("inf") # Store the best validation loss
early_stop_count = 0 # Counter for early stopping
patience = 3 # Number of epochs with no improvement in validation loss to trigger early stopping
for epoch in range(epochs):
# Training
train_loss = 0
train_steps = 0
model.train() # Set to training mode
for i, (inputs, labels) in enumerate(train_dataloader):
# Load the batch onto the device
inputs = inputs.to(device)
labels = labels.to(device)
# Zero the gradients from previous batch
optimizer.zero_grad()
# Forward pass
outputs = model(inputs)
# Compute the loss
batch_loss = loss(outputs, labels)
train_loss += batch_loss.item()
# Backward pass
batch_loss.backward()
# Update the parameters
optimizer.step()
# Increment the training steps
train_steps += 1
# Compute the average training loss for the epoch
avg_train_loss = train_loss / train_steps
train_losses.append(avg_train_loss)
# Validation
val_true_labels = []
val_pred_labels = []
total_f1 = 0
total_precision = 0
total_recall = 0
val_loss = 0
val_steps = 0
model.eval() # Set to evaluation mode
with torch.no_grad(): # Disable gradient calculation
for i, (inputs, labels) in enumerate(val_dataloader):
# Load the batch onto the device
inputs = inputs.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(inputs)
# Compute the loss
batch_loss = loss(outputs, labels)
val_loss += batch_loss.item()
# Move logits and labels to CPU
logits = outputs.detach().cpu().numpy()
label_ids = labels.to("cpu").numpy()
# Calculate F1 score, precision, and recall
f1 = f1_score(label_ids, np.argmax(logits, axis=1), zero_division=1)
precision = precision_score(
label_ids, np.argmax(logits, axis=1), zero_division=1
)
recall = recall_score(label_ids, np.argmax(logits, axis=1), zero_division=1)
total_f1 += f1
total_precision += precision
total_recall += recall
# Compute the predictions and append to lists
pred_labels = np.argmax(logits, axis=1)
val_pred_labels.extend(pred_labels)
val_true_labels.extend(label_ids)
# Increment the validation steps
val_steps += 1
# Compute the average validation loss and metrics for the epoch
val_accuracy = accuracy_score(val_true_labels, val_pred_labels)
avg_val_loss = val_loss / val_steps
val_losses.append(avg_val_loss)
avg_val_accuracy = accuracy_score(val_true_labels, val_pred_labels)
avg_f1 = total_f1 / val_steps
avg_precision = total_precision / val_steps
avg_recall = total_recall / val_steps
print(
f"Epoch {epoch+1} - Avg Train Loss: {avg_train_loss:.4f} | Avg Val Loss: {avg_val_loss:.4f}"
)
print(" Accuracy: {0:.2f}".format(val_accuracy))
print(" F1 Score: {0:.2f}".format(avg_f1))
print(" Precision: {0:.2f}".format(avg_precision))
print(" Recall: {0:.2f}".format(avg_recall))
# Check if validation loss improved
if avg_val_loss < best_val_loss:
best_val_loss = avg_val_loss
early_stop_count = 0
else:
early_stop_count += 1
# Check if we should early stop
if early_stop_count >= patience:
print(
f"No improvement in validation loss for {patience} epochs. Stopping early."
)
break
# Plot the training and validation losses
plt.figure()
plt.plot(
range(1, len(train_losses) + 1), train_losses, label="Train Loss", color=colors[1]
)
plt.plot(
range(1, len(val_losses) + 1), val_losses, label="Validation Loss", color=colors[4]
)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.xticks(range(1, len(train_losses) + 1), range(1, len(train_losses) + 1))
plt.legend()
plt.show()
cm = confusion_matrix(val_true_labels, val_pred_labels)
sns.heatmap(cm, annot=True, fmt="d", cmap=colors)
plt.title("Confusion Matrix")
plt.xlabel("Predicted Class")
plt.ylabel("True Class")
plt.show()
# ## Model Comparison
# I have now trained five neural networks to predict Disaster/Non-Disaster tweets:
# * BERT with uncleaned data
# * BERT with cleaned data
# * RoBERTa with uncleaned data
# * RoBERTa with cleaned data
# * Simple FNN with metadata
# Among the models evaluated, the two that consistently have the highest accuracy, F1 score, precision, and recall, are BERT trained on uncleaned text, and RoBERTa trained on cleaned text. It's noteworthy that BERT seems to perform better on uncleaned text while RoBERTa performs better on cleaned text, although the effect is pretty minimal. All four of the BERT and RoBERTa models perform at or above 80% consistently.
# It's possible that BERT performs better on uncleaned data because it is better able to capture patterns from metadata such as punctuation, hashtags, and mentions that were removed during data cleaning. The predictive power of metadata is further evident in the FNN metadata model, which includes features such as tweet word count, number of hashtags, number of mentions, and number of spelling errors, but does not include the text of the tweets itself. Despite this limitation, the FNN model achieves an accuracy of up to 70% on some epochs, which is not as high as the BERT and RoBERTa models' accuracies of around 85%, but still notable considering it only relies on metadata.
# RoBERTa performs slighlty better on cleaned versus uncleaned text. Based on my research, RoBERTa, which stands for "Robustly Optimized BERT," is specifically designed to improve the pre-training process of BERT by using additional data and training techniques. However, it's possible that this increased robustness actually makes RoBERTa more sensitive to noise in the data, resulting in decreased performance compared to the cleaned data. I would be interested to explore more of what makes BERT and RoBERTa perform different on cleaned and uncleaned text!
# ## Final Prediction
# I will use the BERT model with uncleaned data to predict the test set and submit my final prediction. Its performance is very similar to the RoBERTa model with cleaned data.
def preprocess_final(
df, text_column, target_column, model_name, max_length=50, device="cuda"
):
train_cleaned = df[df[target_column].notna()]
test_cleaned = df[df[target_column].isna()]
train_dataset = train_cleaned[[text_column, target_column]]
text = train_dataset[text_column].values
labels = train_dataset[target_column].values
test_dataset = test_cleaned[text_column].values
# Set the device
device = torch.device(device)
# Load the BERT tokenizer
tokenizer = BertTokenizerFast.from_pretrained(model_name, do_lower_case=True)
# Tokenize the training texts
encoded_dict = tokenizer(
text=text.tolist(),
add_special_tokens=True,
max_length=max_length,
truncation=True,
padding=True,
return_token_type_ids=False,
return_attention_mask=True,
verbose=True,
)
# Tokenize the test texts
encoded_test = tokenizer(
text=test_dataset.tolist(),
add_special_tokens=True,
max_length=max_length,
truncation=True,
padding=True,
return_token_type_ids=False,
return_attention_mask=True,
verbose=True,
)
# Convert the TensorFlow tensors to PyTorch tensors
input_ids_train = torch.tensor(encoded_dict["input_ids"])
attention_mask_train = torch.tensor(encoded_dict["attention_mask"])
labels = torch.tensor(labels)
labels = labels.to(torch.int64)
# Test set
input_ids_test = torch.tensor(encoded_test["input_ids"])
attention_mask_test = torch.tensor(encoded_test["attention_mask"])
# Combine the inputs and labels into a TensorDataset
train_dataset = TensorDataset(input_ids_train, attention_mask_train, labels)
test_dataset = TensorDataset(input_ids_test, attention_mask_test)
# Define the dataloaders for the training and test sets
batch_size = 16
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size)
return train_dataloader, test_dataloader, train_dataset, test_dataset
train_dataloader, test_dataloader, train_dataset, test_dataset = preprocess_final(
df, "text", "target", "bert-base-uncased"
)
predictions = train_validate_test(
"bert-base-uncased", train_dataset, test_dataset, epochs=2, validation=False
)
# Convert the list of predictions to a numpy array and flatten it
predictions = np.concatenate(predictions).flatten()
# Create a pandas dataframe with the test ids and the predicted targets
submission_df = pd.DataFrame({"id": test_cleaned["id"], "target": predictions})
# Output the dataframe
submission_df.head()
submission_df.to_csv("submission.csv", index=False)
|
# - author : Sitanan Damrongkaviriyapan
# - studentID : 6341223226
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print(f"pandas version = {pd.__version__}")
print(f"numpy version = {np.__version__}")
print(f"seaborn version = {sns.__version__}")
pd.Timestamp.now()
url = "/kaggle/input/the-best-cities-for-a-workation/best cities for a workation.csv"
df = pd.read_csv(url)
df
df.columns
# #
cols = [
"Remote connection: Average WiFi speed (Mbps per second)",
"Co-working spaces: Number of co-working spaces",
"Caffeine: Average price of buying a coffee",
"Travel: Average price of taxi (per km)",
"After-work drinks: Average price for 2 beers in a bar",
"Accommodation: Average price of 1 bedroom apartment per month",
"Food: Average cost of a meal at a local, mid-level restaurant",
"Climate: Average number of sunshine hours",
"Tourist attractions: Number of ‘Things to do’ on Tripadvisor",
"Instagramability: Number of photos with #",
]
df[cols].hist(layout=(2, 5), figsize=(35, 10), alpha=0.5)
# # Scikit-learn: KMeans Clustering
# ## 1. scale data
# ### - yeo_johnson
# - เลือกใช้ yeo_johnson ในการแปลงข้อมูลให้เป็น normalize data เนื่องจากเป็นวิธีที่สามารถจัดการได้ทั้งค่าบวกและค่าลบ
# ---
from sklearn import preprocessing
pre = preprocessing.PowerTransformer(method="yeo-johnson", standardize=False)
tran = pre.fit_transform(df[cols])
tran[:5]
yeo_cols = [f"yeo_{i}" for i in cols]
yeo_cols
ds = pd.concat([df, pd.DataFrame(tran, columns=yeo_cols)], axis="columns")
ds.head()
# ## กราฟ data ก่อน clean
# ---
ds[cols].hist(layout=(2, 5), figsize=(35, 10), alpha=0.5)
# ## กราฟ data หลัง clean --> โดยใช้ yeo_johnson method
# - เลือกใช้ yeo_johnson ในการแปลงข้อมูลให้เป็น normalize data เนื่องจากเป็นวิธีที่สามารถจัดการได้ทั้งค่าบวกและค่าลบ
# ---
ds[yeo_cols].hist(layout=(2, 5), figsize=(35, 10), color="orange", alpha=0.5)
# ## 2. Optimal number of clusters
# ### - Elbow method
# - เลือกวิธีนี้เนื่องจากเป็นวิธีที่ง่ายและใช้งานง่ายเห็นภาพชัดเจนในการกําหนดจํานวนการจัดกลุ่มที่ดีที่สุดในชุดข้อมูล
# ---
from sklearn.cluster import KMeans
X = ds[yeo_cols]
X.head()
ssd = []
for k in range(2, 10):
m = KMeans(n_clusters=k)
m.fit(X)
ssd.append([k, m.inertia_])
ssd
dm = pd.DataFrame(ssd, columns=["k", "ssd"])
dm["percent_change"] = dm["ssd"].pct_change() * 100
dm
xy = np.array(ssd)
print(xy)
plt.plot(xy[:, 0], xy[:, 1], "--o")
for index, row in dm.iterrows():
plt.text(
row["k"] + 0.02, row["ssd"] + 0.02, f'{row["percent_change"]:.2f}', fontsize=10
)
# ## 3. Compute and name clusters
# - เลือกใช้ค่า k = 5
# ---
model = KMeans(n_clusters=5)
model
model.fit(X)
model.cluster_centers_.round(4)
model.transform(X)
model.labels_
df["cluster"] = model.labels_
df
X = ds[yeo_cols].copy()
X.loc[:, "cluster"] = model.labels_
X
sns.countplot(x="cluster", data=df)
df.groupby("cluster").head(3).sort_values("cluster")
X.groupby("cluster").median()
# ### Heatmap
# - ใช้ heatmap เนื่องจาก heatmap สามารถสร้างภาพข้อมูลจำนวนมากให้ออกมาในลักษณะที่เข้าใจได้ง่าย อีกทั้งยังมีการใช้สีเพื่อแสดงข้อมูลเพื่อให้สามารถระบุพื้นที่ที่มีค่าสูงหรือต่ำได้ง่ายขึ้นและรวดเร็ว ซึ่งสามารถช่วยระบุแนวโน้ม ค่าผิดปกติ และรูปแบบได้
# ---
fig, ax = plt.subplots(ncols=2, figsize=(35, 6))
ax = ax.ravel()
sns.heatmap(
X.groupby("cluster").median(),
cmap="Oranges",
linewidths=1,
square=True,
annot=True,
fmt=".2f",
ax=ax[0],
)
sns.heatmap(
df.groupby("cluster").median(),
cmap="Blues",
linewidths=1,
square=True,
annot=True,
fmt=".2f",
ax=ax[1],
)
|
# # Prediction of Sales ( Based on advertisement cost ) EDA & ML modeling
# ## 1) Importing Dataset and Libraries
# ### 1- 1) Importing Libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# data processing
from sklearn.preprocessing import Normalizer, StandardScaler
# data splitting
from sklearn.model_selection import train_test_split, KFold, cross_validate
# data evaluation
from sklearn.metrics import r2_score, mean_squared_error
# data modeling
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from sklearn.preprocessing import PolynomialFeatures
# set seed
SEED = 123
# ### 1-2) Download and Loading Dataset as DataFrame
# #### To download and import data into notebook, we using 2 method
# 1. Direct method: we using pandas commands.
# 2. Indirect method: we using the "wget"
# First method
df = pd.read_csv(
"https://raw.githubusercontent.com/HadiTajari/Sales-Prediction/master/advertising.csv"
)
# Second method
# !wget https://raw.githubusercontent.com/HadiTajari/Sales-Prediction/master/advertising.csv
# df = pd.read_csv("advertising.csv")
df.head() # Looking top 5 row
# ## 2) Exploratory Data Analysis
# ### 2-1) Analyzing Individual Feature Patterns Using Visualization¶
# #### How to choose the right visualization method?
# When visualizing individual variables, it is important to first understand what type of variable you are dealing with. This will help us find the right visualization method for that variable.
df.dtypes
# #### we can calculate the correlation between variables of type "int64" or "float64" using the method "corr":
df.corr()
# ##### A great way to visualize these variables is by using scatterplots
sns.scatterplot(x="Radio", y="Sales", data=df)
sns.scatterplot(x="TV", y="Sales", data=df)
sns.scatterplot(x="Newspaper", y="Sales", data=df)
plt.legend(labels=["Radio", "TV", "Newspaper"])
plt.xlabel("Advertising Costs")
plt.ylabel("Mount of Sales")
# In order to start understanding the (linear) relationship between an individual variable and the price, we can use "regplot" which plots the scatterplot plus the fitted regression line for the data. This will be useful later on for visualizing the fit of the simple linear regression model as well
# Radio advs cost and sales
plt.figure(figsize=(6, 3))
sns.regplot(x="Radio", y="Sales", data=df, color="red")
plt.title("Radio advs")
# TV advs cost and sales
plt.figure(figsize=(6, 3))
sns.regplot(x="TV", y="Sales", data=df, color="orange")
plt.title("TV advs")
df[["TV", "Sales"]].corr()
# Newspaper advs cost and sales
plt.figure(figsize=(6, 3))
sns.regplot(
x="Newspaper",
y="Sales",
data=df,
color="green",
)
plt.title("Newspaper advs")
# ### 2-2) Descriptive Statistical Analysis
df.describe() # describtion of numeric features
df.info() # A summary of the dataset and columns
# Observaions:
# 1) All of features are numeric
# 2) The target variable is a numerical continuous type
# 3) So we must Use Regression Models
# The main question we want to answer in this section is, "What are the main characteristics which have the most impact on the `sales`?
# To get a better measure of the important characteristics, we look at the correlation of the features with the sales. In other words: how is the sales dependent on features?
# ### 2-3) Correlation and Causation
# Correlation a measure of the extent of interdependence between variables.
# Causation: the relationship between cause and effect between two variables.
#
df.corr()
# ##### we using p-value to know the significante of correlation estimate
# if :
# 1. p-value is $<$ 0.001: we say there is strong evidence that the correlation is significant.
# 2. the p-value is $<$ 0.05: there is moderate evidence that the correlation is significant.
# 3. the p-value is $<$ 0.1: there is weak evidence that the correlation is significant.
# 4. the p-value is $>$ 0.1: there is no evidence that the correlation is significant.
# We can obtain this information using "stats" module in the "scipy" library.
from scipy import stats
pearson_coef, P_value = stats.pearsonr(df["Radio"], df["Sales"])
print(
"The pearson correlation coefficient is: " f"{pearson_coef:0.4}",
"P_vlaue: " f"{P_value:0.3%}",
)
pearson_coef, P_value = stats.pearsonr(df["TV"], df["Sales"])
print(
"The pearson correlation coefficient is: " f"{pearson_coef:0.4}",
"P_vlaue: " f"{P_value:0.3%}",
)
pearson_coef, P_value = stats.pearsonr(df["Newspaper"], df["Sales"])
print(
"The pearson correlation coefficient is: " f"{pearson_coef:0.4}",
"P_vlaue: " f"{P_value:0.5}",
)
# ## 3) Modeling
# ### Spliting data to features and target variable
X = df.drop("Sales", axis=1) # The Features
y = df["Sales"] # The Target
# ### Splitting Data and making KFOLD
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=0.2, random_state=SEED, shuffle=True
)
# The shape of train and validation data
y_train.shape, y_val.shape
kf = KFold(n_splits=5, shuffle=True, random_state=SEED)
# ### Linear Regression
lin_reg = LinearRegression()
cv = cross_validate(
lin_reg,
X_train,
y_train,
cv=kf,
scoring=("r2", "neg_mean_squared_error"),
return_estimator=True,
)
# detect more metrics https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
model_lin = cv["estimator"]
y_hats = [model.predict(X_val) for model in model_lin]
r2_lin = np.mean([r2_score(y_val, y_hat) for y_hat in y_hats])
print("R2 of linearregression: " f"{r2_lin:0.2%}")
# ### DecisionTree
dec = DecisionTreeRegressor()
cv = cross_validate(
dec,
X_train,
y_train,
cv=kf,
scoring=("r2", "neg_mean_squared_error"),
return_estimator=True,
)
model_dec = cv["estimator"]
y_hats = [model.predict(X_val) for model in model_dec]
r2_dec = np.mean([r2_score(y_val, y_hat) for y_hat in y_hats])
print("R2 of DecisionTree: " f"{r2_dec:0.2%}")
# ## RandomForest
rf = RandomForestRegressor()
cv = cross_validate(
rf,
X_train,
y_train,
cv=kf,
scoring=("r2", "neg_mean_squared_error"),
return_estimator=True,
)
model_rf = cv["estimator"]
y_hats = [model.predict(X_val) for model in model_rf]
r2_rf = np.mean([r2_score(y_val, y_hat) for y_hat in y_hats])
print("R2 of Randomforest: " f"{r2_rf:0.2%}")
# ### Light Gradiant Boosting Model
lgb = LGBMRegressor()
cv = cross_validate(
lgb,
X_train,
y_train,
cv=kf,
scoring=("r2", "neg_mean_squared_error"),
return_estimator=True,
)
model_lgb = cv["estimator"]
y_hats = [model.predict(X_val) for model in model_lgb]
r2_lgb = np.mean([r2_score(y_val, y_hat) for y_hat in y_hats])
print("R2 of LightGBM: " f"{r2_lgb:0.2%}")
# ### Extra Gradiant Boosting
xgb = XGBRegressor()
cv = cross_validate(
xgb,
X_train,
y_train,
cv=kf,
scoring=("r2", "neg_mean_squared_error"),
return_estimator=True,
)
model_xgb = cv["estimator"]
y_hats = [model.predict(X_val) for model in model_xgb]
r2_xgb = np.mean([r2_score(y_val, y_hat) for y_hat in y_hats])
print("R2 of XGB: " f"{r2_xgb:0.2%}")
# ### PolyNomial Regression
poly = PolynomialFeatures(degree=2)
X_poly = poly.fit_transform(X)
X_train, X_val, y_train, y_val = train_test_split(
X_poly, y, test_size=0.2, random_state=SEED, shuffle=True
)
model = LinearRegression()
poly_model = model.fit(X_train, y_train)
pred = poly_model.predict(X_val)
r2_poly = r2_score(pred, y_val)
print("R2 of Polynomial Regression: " f"{r2_poly:0.2%}")
# Table of Results
print("R2 of linearregression: " f"{r2_lin:0.2%}")
print("R2 of DecisionTree: " f"{r2_dec:0.2%}")
print("R2 of Randomforest: " f"{r2_rf:0.2%}")
print("R2 of LightGBM: " f"{r2_lgb:0.2%}")
print("R2 of XGB: " f"{r2_xgb:0.2%}")
print("R2 of Polynomial Regression: " f"{r2_poly:0.2%}")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# **Before we start, thank you for your time walking through this analysis with me!**
# **Here is a brief intro of this analysis:**
# This directory contains data on over **4.5 million Uber pickups in New York City from April to September 2014**.
# There are six files of raw data on Uber pickups in New York City from April to September 2014. The files are separated by month and each has the following columns:
# Date/Time : The date and time of the Uber pickup
# Lat : The latitude of the Uber pickup
# Lon : The longitude of the Uber pickup
# Base : The TLC base company code affiliated with the Uber pickup
# **In this analysis, we will first examine the data for the month of April 2014 to identify any patterns in the Uber rides.
# We will then import and concatenate data from multiple files into a master file for a comprehensive assessment of the overall trends and insights.**
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
data_apr = pd.read_csv(
"/kaggle/input/uber-pickups-in-new-york-city/uber-raw-data-apr14.csv"
)
data_may = pd.read_csv(
"/kaggle/input/uber-pickups-in-new-york-city/uber-raw-data-may14.csv"
)
data_jun = pd.read_csv(
"/kaggle/input/uber-pickups-in-new-york-city/uber-raw-data-jun14.csv"
)
data_jul = pd.read_csv(
"/kaggle/input/uber-pickups-in-new-york-city/uber-raw-data-jul14.csv"
)
data_aug = pd.read_csv(
"/kaggle/input/uber-pickups-in-new-york-city/uber-raw-data-aug14.csv"
)
data_sep = pd.read_csv(
"/kaggle/input/uber-pickups-in-new-york-city/uber-raw-data-sep14.csv"
)
data_concat = pd.concat([data_apr, data_may, data_jun, data_jul, data_aug, data_sep])
# # Examining data for April 2014
# Let's start by examining the data for April 2014 in order to visualize the pattern of Uber rides in NYC.
# Before creating plots, we need to create some new columns in the original dataset 'data_apr'.
# Convert Date/Time column to datetime type:
data_apr["Date/Time"] = pd.to_datetime(data_apr["Date/Time"])
# Add two new columns of Date and Hours:
data_apr["Date"] = data_apr["Date/Time"].dt.date
data_apr["Hour"] = data_apr["Date/Time"].dt.hour
data_apr["Day_of_Week"] = data_apr["Date/Time"].dt.strftime("a%A")
# Calculate counts of occurance of Dates and Hours:
date_counts = data_apr["Date"].value_counts().sort_index()
hour_counts = data_apr["Hour"].value_counts().sort_index()
day_counts = data_apr["Day_of_Week"].value_counts()
date_counts
# # Plot ride volumn by Date
# We will start by creating a bar plot to visualize the volume of rides throughout the dates of the month.
# From the plot, it is evident that the daily ride volumes follow a cyclical pattern.
# Create a bar plot of Dates:
plt.bar(date_counts.index, date_counts.values)
plt.xlabel("Date")
plt.xticks(
date_counts.index, [i.strftime("%d") for i in date_counts.index], rotation=60
)
plt.ylabel("Counts of Dates")
# Add annotation on y-axis:
for i in range(len(date_counts)):
plt.text(
date_counts.index[i],
date_counts.values[i],
f"{date_counts.values[i]/1000:.1f}K",
rotation=90,
ha="center",
va="bottom",
)
plt.title("Counts of Uber Rides in April by Date")
plt.show()
# # Plot ride volumn by Days in Week
# Based on the previous plot, it seems like there might also be a pattern throughout the weeks.
# To confirm this, we can create another plot based on the days of the week.
# Please note that we will need to reindex the x-ticks to display the days of the week from Monday to Sunday.
# Sort the counts of Days from Monday to Friday:
days_in_order = [
"aMonday",
"aTuesday",
"aWednesday",
"aThursday",
"aFriday",
"aSaturday",
"aSunday",
]
days_label = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
day_counts = day_counts.reindex(days_in_order)
# Make a bar plot:
plt.bar(day_counts.index, day_counts.values)
# Reset the x-axis label:
plt.xlabel("Day of the Week")
plt.xticks(ticks=day_counts.index, labels=days_label, rotation=45)
plt.ylabel("Count")
# Add annotation on y-axis:
for i in range(len(day_counts)):
plt.text(
day_counts.index[i],
day_counts.values[i],
str(day_counts.values[i]),
ha="center",
va="bottom",
)
plt.title("Count of Occurrences by Day of the Week")
plt.show()
# # Plot ride volumn by Hours:
# To dive depper into our data, we can also plot the ride occurance by hours of a day
# Create a bar plot for Hours:
plt.bar(hour_counts.index, hour_counts.values)
plt.xlabel("Hours")
plt.xticks(hour_counts.index, rotation=60)
plt.ylabel("Counts of Hours")
# Add annotation on y-axis:
for i in range(len(hour_counts)):
plt.text(
hour_counts.index[i],
hour_counts.values[i],
f"{hour_counts.values[i]/1000:.1f}K",
rotation=90,
ha="center",
va="bottom",
)
plt.title("Counts of Uber Rides by Hours")
plt.show()
# # Observations and Proposals
# Based on the plot of count of Uber rides by day of the week in April 2014 in NYC,
# it appears that Monday and Sunday have the least counts, while Tuesday and Wednesday have the most rides.
# Additionally, the plot indicates that approximately 60% of rides occurred between 14:00 - 21:00.
# **Based on this observation, some possible proposals for the reasons behind this pattern could be:**
# **1. Commuting patterns**: Tuesdays and Wednesdays are typically busy weekdays when people commute to work and go about their regular activities. This could explain the higher ride counts on these days. In contrast, Mondays and Sundays may see fewer rides as people may be off work or have more flexible schedules.
# **2. Business travel:** Tuesdays and Wednesdays are often popular days for business travel, as professionals may travel to different locations for meetings, conferences, or other work-related events. This could contribute to the higher ride counts on these days compared to Mondays and Sundays.
# **3. Weekend leisure activities:** Mondays and Sundays may see fewer rides as people may prefer to stay at home or engage in leisure activities closer to their residences during weekends, resulting in lower ride counts.
# # Examing Data from April to September
# And create a **line plot** to show pattern!
data_concat["Date/Time"] = pd.to_datetime(data_concat["Date/Time"])
# Add two new columns of Date and Hours:
data_concat["Date"] = data_concat["Date/Time"].dt.date
data_concat["Hour"] = data_concat["Date/Time"].dt.hour
data_concat["Day_of_Week"] = data_concat["Date/Time"].dt.strftime("a%A")
# Calculate counts of occurance of Dates and Hours:
date_counts_concat = data_concat["Date"].value_counts().sort_index()
hour_counts_concat = data_concat["Hour"].value_counts().sort_index()
day_counts_concat = data_concat["Day_of_Week"].value_counts()
# Create a line plot of Dates:
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(date_counts_concat.index, date_counts_concat.values)
ax.set_xlabel("Date")
ax.set_ylabel("Counts of Dates")
ax.set_title("Counts of Uber Rides from April to September by Date")
plt.tight_layout()
plt.show()
# # Peaks and Pits
# By creating a line plot, it appears that Uber ride volumes follow **cyclical patterns**, with **peaks and pits**.
# Let's add **annotations** to highlight these peaks and pits.
# Create a line plot of Dates:
fig, ax = plt.subplots(figsize=(12, 6))
ax.plot(date_counts_concat.index, date_counts_concat.values)
ax.set_xlabel("Date")
ax.set_ylabel("Counts of Dates")
ax.set_title("Counts of Uber Rides from April to September by Date")
# Get the indices of peaks and pits in the data:
from scipy.signal import find_peaks
peaks, _ = find_peaks(date_counts_concat.values)
pits, _ = find_peaks(-date_counts_concat.values)
peaks_and_pits = np.sort(np.concatenate([peaks, pits]))
# Add markers at the peaks and pits:
ax.plot(
date_counts_concat.index[peaks],
date_counts_concat.values[peaks],
"o",
markersize=10,
label="Peaks",
)
ax.plot(
date_counts_concat.index[pits],
date_counts_concat.values[pits],
"<",
markersize=10,
label="Pits",
)
# Add annotations with the corresponding dates at the peaks and pits:
for i in range(len(peaks_and_pits)):
ax.annotate(
date_counts_concat.index[peaks_and_pits[i]].strftime("%m%d"),
xy=(
date_counts_concat.index[peaks_and_pits[i]],
date_counts_concat.values[peaks_and_pits[i]],
),
xytext=(10, 6),
textcoords="offset points",
ha="center",
va="bottom",
fontsize=8,
)
ax.set_xticks([])
plt.tight_layout()
plt.show()
peaks_and_pits
peaks
peak_days_of_week = []
for i in peaks:
peak_date = date_counts_concat.index[i]
peak_day_of_week = peak_date.strftime("%A")
peak_days_of_week.append(peak_day_of_week)
peak_days, counts = np.unique(peak_days_of_week, return_counts=True)
plt.bar(peak_days, counts)
plt.xlabel("Peak Days of the Week")
plt.show()
|
# # I had reached 96% accuracy with binary_classification using SVC algorithm
# # Gathering Data
# import liabraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC
from sklearn.metrics import (
classification_report,
accuracy_score,
zero_one_loss,
roc_auc_score,
confusion_matrix,
ConfusionMatrixDisplay,
)
# get data
Data = pd.read_csv(
"/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv"
)
# represent data first 5 rows
Data.head()
# last 5 rows
Data.tail()
# # Exploring Data
# data columns
Data.columns
# explain each column
data_agenda = {
"fixed acidity": "most acids involved with wine or fixed or nonvolatile (do not evaporate readily)",
"volatile acidity": "the amount of acetic acid in wine, which at too high of levels can lead to an unpleasant, vinegar taste",
"citric acid": "found in small quantities, citric acid can add 'freshness' and flavor to wines",
"residual sugar": "the amount of sugar remaining after fermentation stops, it's rare to find wines with less than 1 gram/liter ",
"chlorides": "the amount of salt in the wine",
"free sulfur dioxide": "the free form of SO2 exists in equilibrium between molecular SO2 (as a dissolved gas) and bisulfite ion; it prevents",
"total sulfur dioxide": "amount of free and bound forms of S02; in low concentrations, SO2 is mostly undetectable in wine, but at free SO2",
"density": "the density of water is close to that of water depending on the percent alcohol and sugar content",
"pH": "describes how acidic or basic a wine is on a scale from 0 (very acidic) to 14 (very basic); most wines are between 3-4",
"sulphates": "a wine additive which can contribute to sulfur dioxide gas (S02) levels, wich acts as an antimicrobial",
"alcohol": "the percent alcohol content of the wine",
"quality": "output variable (based on sensory data, score between 0 and 8)",
}
print("The PH is : {0}".format(data_agenda["pH"]))
# describe data
Data.describe()
# shape of data
Data.shape
# represent data null values
Data.isnull().sum().sum()
# represent Class output
Data.quality.value_counts()
# visualize class percentage
explode = (0, 0, 0, 0, 0, 0.9)
plt.pie(
list(Data.quality.value_counts()),
labels=[5, 6, 7, 4, 8, 3],
autopct="%1.f%%",
pctdistance=0.8,
startangle=180,
explode=explode,
)
# visualize data range
Data.hist(figsize=(30, 30), bins=30)
# visualize each data of the columns
Data.plot()
# data correlation
Data.corr()
# visualize correlation of data
sns.heatmap(Data.corr())
# visualize quartile an mean of data`s feature
plt.boxplot(Data.iloc[:, :-1], showmeans=True, meanline=True)
# relation between each feature and output
data_feature = Data.columns[:-1]
plt.figure(figsize=(30, 30))
for i in range(len(data_feature)):
plt.subplot(6, 3, i + 1)
sns.lineplot(x=Data[str(data_feature[i])], y=Data["quality"], c="g")
# # Data Preparation
# Data already cleaned
# scaling Data
Data.iloc[:, :-1] = MinMaxScaler(copy=False).fit_transform(Data.iloc[:, :-1])
# describe data to see scaling
Data.describe()
Data["quality"].value_counts()
# determine X and y
x = Data.iloc[:, :-1]
y = Data["quality"]
# first smote
x, y = SMOTE().fit_resample(x, y)
y.value_counts()
# classifiy that if output >=6 then 1 and if it < then 0
for i in range(y.shape[0]):
if y.loc[i] >= 6.5:
y.loc[i] = 1
else:
y.loc[i] = 0
y.value_counts()
# second smote
x, y = SMOTE().fit_resample(x, y)
y.value_counts()
# split data
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
x_train.shape
x_test.shape
# # Build Model
LR = LogisticRegression()
SGDC = SGDClassifier()
svc = SVC(C=120)
# # Train Model
LR.fit(x_train, y_train)
SGDC.fit(x_train, y_train)
svc.fit(x_train, y_train)
# # Test Model
LR.predict(x_test)
SGDC.predict(x_test)
svc.predict(x_test)
# # Evaluation
LR.score(x_train, y_train)
accuracy_score(y_test, LR.predict(x_test))
"""
precision recall f1-score support
0 0.92 0.87 0.89 727
1 0.86 0.91 0.88 635
accuracy 0.89 1362
macro avg 0.89 0.89 0.89 1362
weighted avg 0.89 0.89 0.89 1362
"""
classification_report(y_test, LR.predict(x_test))
confusion_matrix(y_test, LR.predict(x_test))
# visualize confusion Matrix
ConfusionMatrixDisplay(
confusion_matrix(y_test, LR.predict(x_test)),
display_labels=["Bad_Wine", "Good_Wine"],
).plot()
zero_one_loss(y_test, LR.predict(x_test))
SGDC.score(x_train, y_train)
accuracy_score(y_test, SGDC.predict(x_test))
"""
precision recall f1-score support
0 0.92 0.86 0.89 727
1 0.85 0.92 0.88 635
accuracy 0.89 1362
macro avg 0.89 0.89 0.89 1362
weighted avg 0.89 0.89 0.89 1362
"""
classification_report(y_test, SGDC.predict(x_test))
confusion_matrix(y_test, SGDC.predict(x_test))
# visualize confusion Matrix
ConfusionMatrixDisplay(
confusion_matrix(y_test, SGDC.predict(x_test)),
display_labels=["Bad_Wine", "Good_Wine"],
).plot()
zero_one_loss(y_test, SGDC.predict(x_test))
svc.score(x_train, y_train)
accuracy_score(y_test, svc.predict(x_test))
"""
precision recall f1-score support
0 0.95 0.89 0.92 727
1 0.88 0.94 0.91 635
accuracy 0.91 1362
macro avg 0.91 0.92 0.91 1362
weighted avg 0.92 0.91 0.91 1362
"""
classification_report(y_test, svc.predict(x_test))
confusion_matrix(y_test, svc.predict(x_test))
# visualize confusion Matrix
ConfusionMatrixDisplay(
confusion_matrix(y_test, svc.predict(x_test)),
display_labels=["Bad_Wine", "Good_Wine"],
).plot()
zero_one_loss(y_test, svc.predict(x_test))
|
# Let's make all the csv files in our folder into a single dataframe.
import pandas as pd
import os
from os import listdir
from os.path import isfile, join
import glob
df = pd.concat(
map(
pd.read_csv,
glob.glob(
os.path.join(
"",
"../input/borsa-istanbul-bist100-index-20102020/BİST100-2010.01.01-2020.07.01/*.csv",
)
),
)
)
df
# Let's get the ticker names from our folder. And remove .csv from ticker names
ticker_names = [
f
for f in listdir(
"../input/borsa-istanbul-bist100-index-20102020/BİST100-2010.01.01-2020.07.01/"
)
if isfile(
join(
"../input/borsa-istanbul-bist100-index-20102020/BİST100-2010.01.01-2020.07.01/",
f,
)
)
]
ticker_names
# remove .csv from ticker names
ticker_names = [y.strip(".csv") for y in ticker_names]
ticker_names
# Let's take the 'Close' column of each stock in the folder, match the filenames of the stocks with the columns.
all_files = glob.glob(
os.path.join(
"../input/borsa-istanbul-bist100-index-20102020/BİST100-2010.01.01-2020.07.01/",
"*.csv",
)
) # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_file = (
pd.read_csv(f, parse_dates=True, index_col="Date").assign(filename=f)
for f in all_files
)
concat = pd.concat(df_from_each_file, axis=1)
df = concat["Close"]
df.columns = all_files
df
# Let's replace column names with our ticker list instead of folder paths.
bist_100 = df
bist_100.columns = ticker_names
bist_100
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv")
df.head() # LEITURA DA BASE
df.columns[-1]
df.drop(
labels=df.columns[-1], axis=1, inplace=True
) # REMOÇÃO DE ÚLTIMA COLUNA COM VALORES FALTOSOS
df.shape # 569 instâncias e 33 atributos
print("{} registros e {} atributos".format(df.shape[0], df.shape[1]))
df.info() # INFORMAÇÕES DO ARQUIVO - SABER SE HÁ VALORES NULOS
df.dtypes.value_counts() # ATRIBUTOS CATEGÓRICOS = 1 E NUMÉRICOS = 31
df["diagnosis"].value_counts() # PARA SABER OS VALORES DOS DIAGNOSTICOS
df.describe().T # NÃO SÃO ESCALARES
df.isnull().sum() # VERIFICANDO SE HÁ VALORES NULOS NAS CLASSES DOS ATRIBUTOS
# QUESTÃO 2: Sim. Para organizar primeiramente a base foi excluído o atributo (Unnamed: 32)
# que possui valores faltosos e não influenciará no resultado.
# https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html Q 2
# https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html#sklearn.preprocessing.StandardScaler
from sklearn.preprocessing import MinMaxScaler
scale = MinMaxScaler()
import matplotlib.pyplot as plt
import numpy as np
# Criando o boxplot
fig, ax = plt.subplots()
ax.boxplot(df)
# Adicionando título e rótulo aos eixos
ax.set_title("Boxplot")
ax.set_ylabel("Values")
np.random.normal()
plt.show()
plt.boxplot(data=df, x="radius_se")
plt.show()
cols = ["compactness_se", "concavity_se", "concave points_se"]
df.boxplot(column=[])
# VERIFICANDO OS BOXPLOTS DE MÚLTIPLOS ATRIBUTOS
plt.subplot(131)
plt.boxplot(x=df["compactness_se"])
plt.subplot(132)
plt.boxplot(x=df["concavity_se"])
plt.subplot(133)
plt.boxplot(x=df["concave points_se"])
|
# # The Relationship Between GDP and Life Expectancy
# ### Table of Contents
# * [Goals](#goals)
# * [Scoping](#scoping)
# * [Data](#data)
# * [Time Series Analysis](#tsa)
# - [Life Expectancy](#le)
# - [GDP](#gdp)
# - [Average GDP vs Life Expectancy](#gdp-le)
# * [Time Series Multivariate Analysis](#ts-ma)
# - [Zimbabwe](#zimbabwe)
# - [Medium GDP Countries](#mediumgdp)
# - [High GDP Countries](#highgdp)
# * [Exploratory Data Analysis: Correlation](#eda:corr)
# * [Discussion](#discussion)
# ## Project Goals
# The main goal of the current project will be to analyze data on GDP and life expectancy from the World Health Organization and the World Bank, in order to identify the **relationship between GDP and Life Expectancy** of six countries.
# To this end, the data will be analysed, prepared, and visualised in order to answer this question in a meaningful way.
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
import seaborn as sns
import numpy as np
import csv
import statsmodels.api as sm
import math
import statsmodels
#
# ## Project Scoping
# With the goal of identifying the relationship between a country's GDP and its population's life expectancy, the following analytical sets will be required, in order:
# 1. Univariate Analysis of each countries life expectancy (different parts of the population will have different expectancies) and GDP - in order to observe how they evolved accross the time span seen in the data (time series analysis)
# 2. Bivariate Analysis between the average GDP and Life Expectancy for each year of the dataset - What tipe of relationship (if any) is there between the two overall?
# 3. Bivariate Analysis of the relationship between each country's GDP and Life Expectancy - How do the two change, comparatively to each other? Does there seem to be a relationship between the two, in each country specifically?
# 4. Multivariate Analysis of the 6 country's GDP and life expectancy. Observing the relationship between these two measures for each country, is there any other insight revealed?
# 5. Pearson's Correlation. This will allow us to gain a deeper understanding of the relationship between the two variables.
# ### Hypothesis for the EDA:
# 1. Countries with a higher GDP will tend to have a higher life expectancy.
# 2. Life expectancy will tend to grow over time in all countries
# 3. GDP will tend to grow over time for all countries. No reduction in GDP is expected for any of the countries present in the dataset.
# Finally, with relation to the relationship between the two measures (GDP and Life Expectancy), a significant positive relationship is expected, although, it might be more difficult to understand the direction of said relationship. Not only this, but, although significant and positive, it might well be the case that there isn't a strong correlation between the two, due to the complexity of each specific country's economic, health and societal situations rarely allowing for this sort of relationship between two single measures.
# # Data
# Before analysing the data, an examination of its structure, columns, number of datapoints, missing data and summary statistics will be run, to give us a better insight into the dataset.
data = pd.read_csv("/kaggle/input/codecademy-portfolio-2-data/all_data.csv")
print(data.head(-20))
print(data.info())
print(data.Country.unique())
print(data.describe())
# The column "Country" seems to be comprised of the countries:
# 'Chile' 'China' 'Germany' 'Mexico' 'United States of America' 'Zimbabwe'.
# As such, these will be the countries examined in the report.
# There are no Null values in the data and we have a total of 96 entries. Furthermore, the data was collected between the year 2000 and 2015.
# Across this life span, the minimum Life Expectancy for a country was 44 years, and the maximum was 81. In terms of GDP, the lowest amount was that of 441 Million Dollars, whereas the maximum is equal to 1.81 Trillion. From this, one can expect to find a very wide range of variation between these 6 countries, in both measures.
# For the next step in the current report, a Time Series analysis will be conducted to explore the evolution of the two measures accross the time-span in the dataset.
# ## Exploratory Data Analysis: Time Series Analysis
# ### Life Expectancy
data_copy = data.rename(columns={"Life expectancy at birth (years)": "Life_expectancy"})
data_copy.replace("United States of America", "USA", inplace=True)
years = data_copy.Year[0:16]
for i, country in enumerate(data_copy.Country.unique()):
# Life Expectancy in Chile
ax = plt.subplot(3, 2, i + 1)
plt.plot(years, data_copy.Life_expectancy[data_copy.Country == country], marker="s")
plt.title("Life Expectancy in " + country)
plt.xlabel("Years (2000 - 2015)")
plt.ylabel("Expected Age")
plt.show()
# It seems that, although with some variation accross time, all 6 countries have seen a positive growth in their population's Life Expectancy. This wasn't true through all the years in the dataset however, as every country - except perhaps China - saw a period of either a decrease or stagnation. However, overall, there seems to be a trend towards growth.
# ### GDP
for i, country in enumerate(data_copy.Country.unique()):
# Life Expectancy in Chile
ax = plt.subplot(3, 2, i + 1)
plt.plot(years, data_copy.GDP[data_copy.Country == country], marker="s")
plt.title(country + "'s GDP")
plt.xlabel("Years (2000 - 2016)")
if country == "Zimbabwe":
plt.ylabel("Billions")
elif (country == "USA") or (country == "China"):
plt.ylabel("Trilions")
elif country == "Chile":
plt.ylabel("(10s of) Billions")
else:
plt.ylabel("(100s of) Billions")
plt.show()
# Through the above visualisations, one can observe how, although the trend is once again growth, this isn't as clear as with Life Expectancy. Specifically, Germany, Chile and Mexico have seen their GDP stagnate or reduce in the final years of the dataset, whereas USA, China and Zimbabwe have seen growth in recent years, although Zimbabwe's was reducing accross time until around 2008.
# **From both of these visualisations, a positive trend can be identified on the two measures. As such, the next step in the analysis would be to explore how the two match up to each other year-on-year, in an effort to explore their relationship.**
# ### Time Series Analysis: Average Life Span & GDP
# obtaining Series of the average of two measures accross all years
average_GDP = data_copy.GDP.groupby(data_copy.Year).mean()
average_life_span = data_copy.Life_expectancy.groupby(data_copy.Year).mean()
# plotting both in a scatter plot
sns.scatterplot(x=average_life_span, y=average_GDP)
plt.title("GDP vs Life Expectancy (Average)")
plt.show()
# As can be seen, the relationship between these two measures seems to be linear, with the two measures growing together. Of course, finding the direction of this relationship might be outside the scope of the current project, but it seems that, as one grows, so does the other.
# **In order to better visualise this relationship, the current project will explore how the two measures map onto each other, for each country individually. This might provide us with some more insight, not only in the relationship between the two measures overall, but also in the relationship that exist between countries.**
# ### Time Series - Multivariate Analysis
sns.scatterplot(data=data_copy, x="Life_expectancy", y="GDP", hue="Country")
plt.ylim()
plt.show()
# The disparity between countriies' GDP and Life Expectancy does not allow us to correctly observe how their individual cases. For example Zimbabwe, due to having a GDP much lower than the USA and China, and a much lower life expectancy than the rest of the countries, does not map well onto the scatter plot.
# Simillarly, Germany, Chile and Mexico, due to having a lower GDP than the other two countries, are hard to visualise, despite having some of the highest Life Expectancies in the dataset.
# **As such, I am gonna plot Zimbabwe by itself, Germany, Chile and Mexico together and the USA and China in a separate plot, as their GDP numbers aren't allowing for a good examination.**
# #### Zimbabwe
sns.scatterplot(
x=data_copy.Life_expectancy[data_copy.Country == "Zimbabwe"],
y=data_copy.GDP[data_copy.Country == "Zimbabwe"],
)
plt.title("Zimbabwe's GDP vs Life Expectancy")
plt.show()
# Here, we can see that, although it is not linear in the earlier years of the dataset (Zimbabwe's Life Expectancy actually went down in the early to mid 2000s, whilst their GDP was somewhat stable), it becomes very linear in late 2000s and early/mid 2010s when both measures start to increase consistently.
# **Moving on, this relationship will be examined for the three "middle" countries of the dataset.**
# #### Medium GDP countries
# plotting Germany, Chile and Mexico
sns.scatterplot(
x=data_copy.Life_expectancy[data_copy.Country == "Germany"],
y=data_copy.GDP[data_copy.Country == "Germany"],
label="Germany",
)
sns.scatterplot(
x=data_copy.Life_expectancy[data_copy.Country == "Chile"],
y=data_copy.GDP[data_copy.Country == "Chile"],
label="Chile",
)
sns.scatterplot(
x=data_copy.Life_expectancy[data_copy.Country == "Mexico"],
y=data_copy.GDP[data_copy.Country == "Mexico"],
label="Mexico",
)
plt.legend()
plt.title("Medium GDP Countries")
plt.show()
# By plotting it this way, we can now see that each of these relationships is somewhat linear in nature for these three countries as well. Grouping the three countries seems to not be perfect, as Chile seems quite flat, indicating that, due to Germany's GDP being significantly higher, this plot isn't allowing for a full visualisation of the relationship in its case.
# However, this is good enough to reach a good understanding of what this relationship looks like in each of these case-studies.
# Finally, let's explore this relationship in High-GDP countries.
# #### High GDP countries
sns.scatterplot(
x=data_copy.Life_expectancy[data_copy.Country == "China"],
y=data_copy.GDP[data_copy.Country == "China"],
label="China",
)
sns.scatterplot(
x=data_copy.Life_expectancy[data_copy.Country == "USA"],
y=data_copy.GDP[data_copy.Country == "USA"],
label="USA",
)
plt.legend()
plt.title("High GDP Countries")
plt.show()
# The USA's Life Expectancy and GDP match almost perfectly in a linear relationship, whereas China's looks more like an exponential relationship rather than linear.
# To finalise this exploration of the data, however, a Pearson's correlation between the two will be run, in order to obtain a single measure.
# ### EDA: Correlation
from scipy.stats import pearsonr
corr, p = pearsonr(data_copy.GDP, data_copy.Life_expectancy)
print(corr, p)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# #### 💖 **Introduction**
# ***
# According to the CDC, heart disease is one of the leading causes of death for people of most races in the US (African Americans, American Indians and Alaska Natives, and white people). About half of all Americans (47%) have at least 1 of 3 key risk factors for heart disease: high blood pressure, high cholesterol, and smoking. Other key indicator include diabetic status, obesity (high BMI), not getting enough physical activity or drinking too much alcohol. Detecting and preventing the factors that have the greatest impact on heart disease is very important in healthcare. Computational developments, in turn, allow the application of machine learning methods to detect "patterns" from the data that can predict a patient's condition.
# Coronary heart disease (CHD) is a major public health concern that affects millions of individuals worldwide. The prevalence of CHD is influenced by multiple lifestyles and behavioral factors, such as sleep cycle, gender, race, age, physical health, BMI, diseases such as diabetes, asthma, and skin cancer, and addictive habits like alcohol addiction, and smoking habits. Several such lifestyle-related factors are known to influence the risk of developing CHD; however, the extent to which these factors contribute is not well understood. Therefore, it is essential to investigate the association of various lifestyle factors. Many factors such as hyperlipidemia, type 2 diabetes, and hypertension are known to be associated with CHD.
# Smoking and high alcohol consumption are important modifiable lifestyle-related factors linked to increased CHD risk. Smoking cessation has been shown to have a long-term impact on CHD prevention. Chronic alcohol use can increase CHD risk by promoting high blood pressure and increasing harmful lipid levels. Sleep disturbances, such as sleep apnea, have also been linked to increased CHD risk. Sleep apnea can lead to hypertension, inflammation, and changes in the autonomic nervous system that may increase the incidence of arrhythmias, heart failure, and CHD. Obesity, diabetes, hypertension, and high cholesterol levels are metabolic risk factors that have been identified as potent contributors to CHD. These factors increase CHD risk by promoting inflammation, insulin resistance, and endothelial dysfunction, which can lead to atherosclerosis. In conclusion, the lifestyle-related factors discussed above have been shown to increase the risk of CHD through different mechanisms. Adopting a healthy lifestyle and modifying these factors can help to reduce the incidence of CHD.
# A cross-sectional, observational study on a larger population would provide insight into the correlation of these factors to CHD. Patients’ medical histories will be utilized to carry out this study and various statistical analyses would reveal the significance of the association. Broadly, patients’ data will be categorized according to their lifestyle habits, community, and environment (habitat), underlying diseases and addictive habits.
# Originally, the dataset come from the CDC and is a major part of the Behavioral Risk Factor Surveillance System (BRFSS), which conducts annual telephone surveys to gather data on the health status of U.S. residents. As the CDC describes: "Established in 1984 with 15 states, BRFSS now collects data in all 50 states as well as the District of Columbia and three U.S. territories. BRFSS completes more than 400,000 adult interviews each year, making it the largest continuously conducted health survey system in the world.". The most recent dataset (as of February 15, 2022) includes data from 2022. It consists of 401,958 rows and 279 columns. The vast majority of columns are questions asked to respondents about their health status, such as "Do you have serious difficulty walking or climbing stairs?" or "Have you smoked at least 100 cigarettes in your entire life?".
# Note that the original dataset has 300 variables and has been reduced to just about 20 variables.
# 📌 Description of variables
# - Heart Disease: Respondents that have ever reported having coronary heart disease (CHD) or myocardial infarction (MI).
# - BMI : Body Mass Index (BMI).
# - Smoking : Have you smoked at least 100 cigarettes in your entire life? ( The answer Yes or No )
# - AlcoholDrinking : Heavy drinkers (adult men having more than 14 drinks per week and adult women having more than 7 drinks per week)
# - Stroke : (Ever told) (you had) a stroke?
# - PhysicalHealth : Now thinking about your physical health, which includes physical illness and injury, for how many days during the past 30 days was your physical health not good? (0-30 days).
# - MentalHealth : Thinking about your mental health, for how many days during the past 30 days was your mental health not good? (0-30 days).
# - DiffWalking : Do you have serious difficulty walking or climbing stairs?
# - Sex : Are you male or female?
# - AgeCategory: Fourteen-level age category.
# - Race : Imputed race/ethnicity value.
# - Diabetic : (Ever told) (you had) diabetes?
# - PhysicalActivity : Adults who reported doing physical activity or exercise during the past 30 days other than their regular job.
# - GenHealth : Would you say that in general your health is…
# - SleepTime : On average, how many hours of sleep do you get in a 24-hour period?
# - Asthma : (Ever told) (you had) asthma?
# - KidneyDisease : Not including kidney stones, bladder infection or incontinence, were you ever told you had kidney disease?
# - SkinCancer : (Ever told) (you had) skin cancer?
# #### 💖 **Import dataset and libraries**
# ***
from IPython.core.display import HTML
with open("./CSS.css", "r") as file:
custom_css = file.read()
HTML(custom_css)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
import statsmodels.api as sm
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from imblearn.over_sampling import SMOTE
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from sklearn.metrics import (
classification_report,
confusion_matrix,
roc_curve,
roc_auc_score,
ConfusionMatrixDisplay,
RocCurveDisplay,
)
import warnings
warnings.filterwarnings("ignore")
rc = {
"axes.facecolor": "#FFF9ED",
"figure.facecolor": "#FFF9ED",
"axes.edgecolor": "#000000",
"grid.color": "#EBEBE7",
"font.family": "serif",
"axes.labelcolor": "#000000",
"xtick.color": "#000000",
"ytick.color": "#000000",
"grid.alpha": 0.4,
}
sns.set(rc=rc)
from colorama import Style, Fore
red = Style.BRIGHT + Fore.RED
blu = Style.BRIGHT + Fore.BLUE
mgt = Style.BRIGHT + Fore.MAGENTA
gld = Style.BRIGHT + Fore.YELLOW
res = Style.RESET_ALL
df = pd.read_csv(
"/kaggle/input/key-indicators-of-heart-disease/heart_2022_Key_indicators.csv"
)
df.head()
df["AgeCategory"].value_counts()
print(df.info())
# #### 💖 **Basic EDA**
# ***
# 🔎 Basic information of data
# - There are 17 predictor variables and 1 target variable.
# - Out of the 17 predictor variables, 4 are numeric.
# - There are 319795 observations.
# - No variables have missing values.
# - The numeric variables are all right skewed.
# 📈 Summary table
# * Summary table shows number of missing values for each variable as well as the minimum and maximum value for each variable.
desc = pd.DataFrame(df.describe(include="all").transpose())
def summary_stats(df):
print(f"The shape of the data is: {df.shape}")
summary = pd.DataFrame(df.dtypes, columns=["data type"])
summary["Number of missing values"] = df.isnull().sum().values
summary["% of missing values"] = df.isnull().sum().values / len(df) * 100
summary["min value"] = desc["min"].values
summary["mean value"] = desc["mean"].values
summary["max value"] = desc["max"].values
return summary
summary_stats(df)
#
# - There are no null values.
# - The numeric variables seem to be right skewed.
#
skewness = df.skew()
skew_df = pd.DataFrame({"Variable": skewness.index, "Skewness": skewness.values})
skew_df
# * Since the values are all positive, the numeric variables are all right skewed.
# #### 💖 **Exploratory data analysis**
# ***
def plot_count(df: pd.core.frame.DataFrame, col_list: list) -> None:
"""Draws the pie and count plots for categorical variables.
Args:
df: train or test dataframes
col_list: a list of the selected categorical variables.
title_name: 'Train' or 'Test' (default 'Train')
Returns:
subplots of size (len(col_list), 2)
"""
f, ax = plt.subplots(len(col_list), 2, figsize=(10, 4))
plt.subplots_adjust(wspace=0)
s1 = df[col_list].value_counts()
N = len(s1)
outer_sizes = s1
inner_sizes = s1 / N
outer_colors = ["#9E3F00", "#eb5e00", "#ff781f", "#ff9752", "#ff9752"]
inner_colors = ["#ff6905", "#ff8838", "#ffa66b"]
ax[0].pie(
outer_sizes,
colors=outer_colors,
labels=s1.index.tolist(),
startangle=90,
frame=True,
radius=1.3,
explode=([0.05] * (N - 1) + [0.3]),
wedgeprops={"linewidth": 1, "edgecolor": "white"},
textprops={"fontsize": 12, "weight": "bold"},
)
textprops = {"size": 13, "weight": "bold", "color": "white"}
ax[0].pie(
inner_sizes,
colors=inner_colors,
radius=1,
startangle=90,
autopct="%1.f%%",
explode=([0.1] * (N - 1) + [0.3]),
pctdistance=0.8,
textprops=textprops,
)
center_circle = plt.Circle((0, 0), 0.68, color="black", fc="white", linewidth=0)
ax[0].add_artist(center_circle)
x = s1
y = [0, 1]
sns.barplot(x=x, y=y, ax=ax[1], palette="YlOrBr_r", orient="horizontal")
ax[1].spines["top"].set_visible(False)
ax[1].spines["right"].set_visible(False)
ax[1].tick_params(axis="x", which="both", bottom=False, labelbottom=False)
for i, v in enumerate(s1):
ax[1].text(v, i + 0.1, str(v), color="black", fontweight="bold", fontsize=12)
# plt.title(col_list)
plt.setp(ax[1].get_yticklabels(), fontweight="bold")
plt.setp(ax[1].get_xticklabels(), fontweight="bold")
ax[1].set_xlabel(col_list, fontweight="bold", color="black")
ax[1].set_ylabel("count", fontweight="bold", color="black")
plt.tight_layout()
plt.show()
plot_count(df, ["HeartDisease"])
len(df[df["HeartDisease"] == "No"]) / len(df[df["HeartDisease"] == "Yes"])
#
# - The dataset is imbalanced. There are 10 times more negative cases than positive cases.
# - We can apply techniques such as SMOTE and over random sampling to deal with this imbalance.
#
fig, axs = plt.subplots(1, 2, figsize=(12, 6))
sns.countplot(x="Sex", data=df, ax=axs[0])
ax1 = sns.countplot(x="HeartDisease", hue="Sex", data=df, ax=axs[1])
for container in ax1.containers:
ax1.bar_label(container)
ax1.set_ylabel("")
plt.show()
#
# - Equal number of males and females.
# - There are around 5000 more males than females with heart disease.
# - According to (1), men have a higher risk of coronary heart disease than women.
#
fig, axs = plt.subplots(2, 2, figsize=(12, 10))
sns.histplot(x="BMI", data=df, kde=True, ax=axs[0, 0])
sns.histplot(x="PhysicalHealth", data=df, kde=True, ax=axs[0, 1])
sns.histplot(x="MentalHealth", data=df, kde=True, ax=axs[1, 0])
sns.histplot(x="SleepTime", data=df, kde=True, ax=axs[1, 1])
axs[0, 0].set(ylabel="")
axs[0, 1].set(ylabel="")
axs[1, 0].set(ylabel="")
axs[1, 1].set(ylabel="")
fig.suptitle("Histogram for all numeric variables")
plt.show()
len(df[df["BMI"] >= 25]) / len(df[df["BMI"] < 25])
#
# - The distribution of BMI resembles a right skewed normal distribution.
# - The BMI of most respondents lie in the overweight and obese range of 25.0 and higher. There are twice as many overweight and obese individuals as underweight and healthy individuals.
# - Most people reported that their physical and mental health were good every day for the past 30 days.
# - Most people sleep between 6 to 9 hours.
#
num_cols = df.select_dtypes(include=["float64", "int64"]).columns.tolist()
figsize = (16, 10)
fig = plt.figure(figsize=figsize)
for idx, col in enumerate(num_cols):
ax = plt.subplot(2, 2, idx + 1)
sns.kdeplot(
data=df,
hue="HeartDisease",
fill=True,
x=col,
palette=["#9E3F00", "red"],
legend=False,
)
ax.set_ylabel("")
ax.spines["top"].set_visible(False),
ax.set_xlabel("")
ax.spines["right"].set_visible(False)
ax.set_title(f"{col}", loc="right", weight="bold", fontsize=20)
fig.suptitle(f"Features vs Target\n\n\n", ha="center", fontweight="bold", fontsize=25)
fig.legend([1, 0], loc="upper center", bbox_to_anchor=(0.5, 0.96), fontsize=25, ncol=3)
plt.tight_layout()
plt.show()
#
# - Distribution between presence and absence of heart dieases is different in each variable.
# - These variables have predictive power.
#
def plot_correlation_heatmap(
df: pd.core.frame.DataFrame, title_name: str = "Correlation"
) -> None:
corr = df.corr()
fig, axes = plt.subplots(figsize=(10, 6))
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(corr, mask=mask, linewidths=0.5, cmap="YlOrRd", annot=True)
plt.title(title_name)
plt.show()
plot_correlation_heatmap(df)
#
# - There is no strong correlation between the four numeric variables.
#
cat_cols = df.select_dtypes(include=["object"]).columns.tolist()
plt.figure(figsize=(30, 20))
for idx, column in enumerate(cat_cols):
plt.subplot(3, 5, idx + 1)
sns.countplot(x=column, hue="HeartDisease", data=df, palette="YlOrRd")
plt.title(f"{column} Distribution")
plt.tight_layout()
#
# - Smoking increases the chance of getting heart disease.
# - Surprisingly, there are more people who are not heavy drinkers who got heart disease.
# - White people have a higher chance of getting heart disease.
# #### 💖 **Statistical analysis (ANOVA)**
# ***
# * ANOVA can be used to determine if there is any association between a numeric and a categorical variable.
# - $H_0$: There is no association between the two variables.
# - $H_1$: There is association between the two variables.
# Assumptions for ANOVA model
# - Independence of observations
# - Normal distribution
# - Equal variances
# - Shapiro wilk test can be used to test for normality.
stats.shapiro(df["BMI"])
#
# - Under a significance level of 0.05, BMI is not normally distributed and hence, ANOVA cannot be used.
# #### 💖 **Statistical analysis (Wilcoxon rank sum test)**
# ***
# - A non-parametric statistical test used to compare the medians of two groups.
# - An alternative to the t-test or ANOVA when the assumptions of normality or equal variances are not met.
statistic, p_value = stats.mannwhitneyu(
df[df["HeartDisease"] == "Yes"]["SleepTime"],
df[df["HeartDisease"] == "No"]["SleepTime"],
)
print("p-value:", p_value)
statistic, p_value = stats.mannwhitneyu(
df[df["HeartDisease"] == "Yes"]["BMI"], df[df["HeartDisease"] == "No"]["BMI"]
)
print("p-value:", p_value)
statistic, p_value = stats.mannwhitneyu(
df[df["HeartDisease"] == "Yes"]["PhysicalHealth"],
df[df["HeartDisease"] == "No"]["PhysicalHealth"],
)
print("p-value:", p_value)
statistic, p_value = stats.mannwhitneyu(
df[df["HeartDisease"] == "Yes"]["MentalHealth"],
df[df["HeartDisease"] == "No"]["MentalHealth"],
)
print("p-value:", p_value)
#
# - Under a significance level of 0.1, there exists an association between each numeric variable and the variable "HeartDisease".
# - We will use all the numeric variables in our modelling.
# #### 💖 **Statistical analysis (Two way contigency table)**
# ***
# - Contigency table employs Pearson chi-squared test to determine any association between two categorical variables.
# - $H_0$: There is no association between the two variables.
# - $H_1$: There is association between the two variables.
# - Under a significance level of 0.05, there exists an association between each categorical variable and the variable "HeartDisease".
# - We will use all the categorical variables in our modelling.
#
for cols in cat_cols:
tabs = pd.crosstab(df[cols], df["HeartDisease"])
chi2, p, dof, con_table = stats.chi2_contingency(tabs)
print(f"Analysis for", cols)
print(f"chi-squared = {chi2}\np value= {p}\ndegrees of freedom = {dof}")
print(" ")
# #### 💖 **Feature engineering**
# ***
le = LabelEncoder()
for cols in cat_cols:
df[cols] = le.fit_transform(df[cols])
df.head()
X = df.drop(["HeartDisease"], axis=1)
y = df["HeartDisease"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# #### 💖 **SMOTE**
# ***
# - SMOTE (Synthetic Minority Oversampling Technique) is an over sampling technique that works by utilizing a k-nearest neighbor algorithm to create synthetic data.
# Simply speaking, it
# - Identify the feature vector and its nearest neighbor
# - Compute the distance between the two sample points
# - Multiply the distance with a random number between 0 and 1
# - Identify a new point on the line segment at the computed distance.
# - Repeat the process for identified feature vectors.
sm = SMOTE(
sampling_strategy=y_train.mean() / (1 - y_train.mean()) * 10,
random_state=1004,
k_neighbors=5,
n_jobs=-1,
)
X_train, y_train = sm.fit_resample(X_train, y_train.ravel())
print("After OverSampling, the shape of train_X: {}".format(X_train_res.shape))
print("After OverSampling, the shape of train_y: {} \n".format(y_train_res.shape))
print("After OverSampling, counts of label '1': {}".format(sum(y_train == 1)))
print("After OverSampling, counts of label '0': {}".format(sum(y_train == 0)))
# #### 💖 **Modelling (Logistic regression)**
# ***
lr = LogisticRegression()
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
# #### 💖 **Evaluation metrics (Logistic regression)**
# ***
# - Recall calculates how many of the actual positives our model capture through labeling it as Positive (True Positive).
# - Recall shall be the model metric we use to select our best model since there is a high cost associated with false negative.
# - The macro-averaged metric is computed using the arithmetic mean of all the per-class metric.
# - The weighted-averaged metric is calculated by taking the mean of all per-class metric while considering each class’s support.Support refers to the number of actual occurrences of the class in the dataset.
print(classification_report(y_test, y_pred))
#
# - The macro average recall is 0.56, which is quite low.
#
confusion_lr = confusion_matrix(y_pred, y_test)
sns.heatmap(confusion_lr, annot=True)
plt.xlabel("Predicted")
plt.ylabel("Actual")
#
# - Many positive cases are classified as negative.
#
roc_auc_score(y_pred, y_test)
#
# - The higher the AUC, the better the model's performance at distinguishing between the positive and negative classes.
# - Logistic regression is not a good model.
# #### 💖 **Modelling (XGBoost)**
# ***
def get_mean_auc(oof: np.array):
"""oof: ['val_idx', 'preds', 'target']"""
oof = pd.DataFrame(
np.concatenate(oof), columns=["id", "preds", "target"]
).set_index("id")
oof.index = oof.index.astype(int)
mean_val_auc = roc_auc_score(oof.target, oof.preds)
return mean_val_auc
FOLDS = 5
SEED = 1004
xgb_models = []
xgb_oof = []
predictions = np.zeros(len(X_test))
f_imp = []
counter = 1
skf = StratifiedKFold(n_splits=FOLDS, shuffle=True, random_state=SEED)
for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)):
if (fold + 1) % 5 == 0 or (fold + 1) == 1:
print(f'{"#"*24} Training FOLD {fold+1} {"#"*24}')
X_train, y_train = X.iloc[train_idx], y.iloc[train_idx]
X_valid, y_valid = X.iloc[val_idx], y.iloc[val_idx]
watchlist = [(X_train, y_train), (X_valid, y_valid)]
# XGboost model and fit
model = XGBClassifier(
n_estimators=1000, n_jobs=-1, max_depth=4, eta=0.2, colsample_bytree=0.67
)
model.fit(
X_train, y_train, eval_set=watchlist, early_stopping_rounds=300, verbose=0
)
val_preds = model.predict_proba(X_valid)[:, 1]
val_score = roc_auc_score(y_valid, val_preds)
best_iter = model.best_iteration
idx_pred_target = np.vstack(
[val_idx, val_preds, y_valid]
).T # shape(len(val_idx), 3)
f_imp.append(
{i: j for i in model.feature_names_in_ for j in model.feature_importances_}
)
print(
f'{" "*20} auc:{blu}{val_score:.5f}{res} {" "*6} best iteration :{blu}{best_iter}{res}'
)
xgb_oof.append(idx_pred_target)
xgb_models.append(model)
if val_score > 0.80:
test_preds = model.predict_proba(X_test)[:, 1]
predictions += test_preds
counter += 1
predictions /= counter
mean_val_auc = get_mean_auc(xgb_oof)
print("*" * 45)
print(f"{red}Mean{res} AUC: {red}{mean_val_auc:.5f}{res}")
|
from glob import glob
from sklearn.model_selection import GroupKFold, StratifiedKFold
import cv2
from skimage import io
import torch
from torch import nn
import os
from datetime import datetime
import time
import random
import cv2
import torchvision
from torchvision import transforms
import pandas as pd
import numpy as np
from tqdm import tqdm
from IPython.display import display
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SequentialSampler, RandomSampler
from torch.cuda.amp import autocast, GradScaler
from torch.nn.modules.loss import _WeightedLoss
import torch.nn.functional as F
import sklearn
import warnings
import joblib
from sklearn.metrics import roc_auc_score, log_loss
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
import warnings
warnings.filterwarnings("ignore")
import cv2
import pydicom
# from efficientnet_pytorch import EfficientNet
from scipy.ndimage.interpolation import zoom
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score, confusion_matrix
from pylab import rcParams
rcParams["figure.figsize"] = 20, 5
train_df = pd.read_csv(
"../input/ptb-xl-dataset-reformatted/train_table.csv", index_col=0
)
train_signal = pd.read_pickle(
"../input/ptb-xl-dataset-reformatted/train_12_lead_ecgs.pkl"
)
valid_df = pd.read_csv(
"../input/ptb-xl-dataset-reformatted/valid_table.csv", index_col=0
)
valid_signal = pd.read_pickle(
"../input/ptb-xl-dataset-reformatted/valid_12_lead_ecgs.pkl"
)
test_df = pd.read_csv("../input/ptb-xl-dataset-reformatted/test_table.csv", index_col=0)
test_signal = pd.read_pickle(
"../input/ptb-xl-dataset-reformatted/test_12_lead_ecgs.pkl"
)
print(train_df.shape)
train_df
train_df.shape
class PTBXLDatasetPreprocesser:
def __init__(self):
pass
def fit(self, x, y):
self.superclass_cols = ["NORM", "MI", "STTC", "CD", "HYP"]
self.subclass_cols = [col for col in y.columns if "sub_" in col]
self.meta_num_cols = ["age", "height", "weight"]
self.min_max_scaler = MinMaxScaler().fit(y[self.meta_num_cols])
self.meta_cat_cols = ["sex", "nurse", "device"]
self.cat_lablers = [
LabelEncoder().fit(y[col].fillna("none").astype(str))
for col in self.meta_cat_cols
]
def transform(self, x, y):
ret = []
ret += [x] # signal
y_ = y.copy()
y_[self.meta_num_cols] = self.min_max_scaler.transform(y_[self.meta_num_cols])
ret += [y_[self.meta_num_cols].fillna(-1.0)] # meta num features
for i, col in enumerate(self.meta_cat_cols):
y_[col] = y_[col].fillna("none").astype(str)
y_[col] = self.cat_lablers[i].transform(y_[col])
ret += [y_[self.meta_cat_cols]] # meta cat features
ret += [y[self.superclass_cols].fillna(0).astype(int)] # superclass targets
ret += [y[self.subclass_cols].fillna(0).astype(int)] # subclass targets
return ret
data_preprocessor = PTBXLDatasetPreprocesser()
data_preprocessor.fit(train_signal, train_df)
(
train_signal,
train_meta_num_feats,
train_meta_cat_feats,
train_superclass,
train_subclass,
) = data_preprocessor.transform(train_signal, train_df)
train_signal.shape
display(train_meta_num_feats)
display(train_meta_cat_feats)
display(train_superclass)
display(train_subclass)
(
valid_signal,
valid_meta_num_feats,
valid_meta_cat_feats,
valid_superclass,
valid_subclass,
) = data_preprocessor.transform(valid_signal, valid_df)
(
test_signal,
test_meta_num_feats,
test_meta_cat_feats,
test_superclass,
test_subclass,
) = data_preprocessor.transform(test_signal, test_df)
# ### Train a Pytorch RNN Model
def seed_everything(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
class ECGDataset(Dataset):
def __init__(self, signals, labels=None):
self.signals = signals
self.labels = labels
def __len__(self):
return len(self.signals[:, 0])
def __getitem__(self, idx):
if self.labels is not None:
return self.signals[idx, :], self.labels[idx]
else:
return self.signals[idx, :]
class ECGClassifier(nn.Module):
def __init__(self, hidden_size, n_class):
super().__init__()
self.gru = nn.GRU(1, hidden_size, batch_first=True, bidirectional=True)
self.out = nn.Linear(hidden_size * 4, n_class)
def forward(self, x):
x, _ = self.gru(x.view(x.shape[0], x.shape[1], 1))
avg_pool = torch.mean(x, 1)
max_pool, _ = torch.max(x, 1)
x = torch.cat([avg_pool, max_pool], 1)
x = self.out(x)
return x
def prepare_dataloader(df, trn_idx, val_idx):
from catalyst.data.sampler import BalanceClassSampler
x_train, y_train = (
df.loc[trn_idx].values[:, :187],
df.loc[trn_idx].astype(int).values[:, 187],
)
x_valid, y_valid = (
df.loc[val_idx].values[:, :187],
df.loc[val_idx].astype(int).values[:, 187],
)
print(x_train.shape)
train_ds = ECGDataset(x_train, y_train)
valid_ds = ECGDataset(x_valid, y_valid)
train_loader = torch.utils.data.DataLoader(
train_ds,
batch_size=128,
pin_memory=False,
drop_last=False,
shuffle=True,
num_workers=4,
# sampler=BalanceClassSampler(labels=y_train, mode='downsampling'),
)
val_loader = torch.utils.data.DataLoader(
valid_ds,
batch_size=128,
num_workers=0,
shuffle=False,
pin_memory=False,
)
return train_loader, val_loader
def train_one_epoch(
epoch,
model,
loss_fn,
optimizer,
train_loader,
device,
scheduler=None,
schd_batch_update=False,
):
model.train()
t = time.time()
running_loss = None
pbar = tqdm(enumerate(train_loader), total=len(train_loader))
for step, (signal, labels) in pbar:
signal = signal.to(device).float()
labels = labels.to(device).long()
# print(image_labels.shape, exam_label.shape)
with autocast():
preds = model(signal) # output = model(input)
# print(image_preds.shape, exam_pred.shape)
loss = loss_fn(preds, labels)
scaler.scale(loss).backward()
if running_loss is None:
running_loss = loss.item()
else:
running_loss = running_loss * 0.99 + loss.item() * 0.01
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
if scheduler is not None and schd_batch_update:
scheduler.step()
description = f"epoch {epoch} loss: {running_loss:.4f}"
pbar.set_description(description)
if scheduler is not None and not schd_batch_update:
scheduler.step()
def valid_one_epoch(
epoch, model, loss_fn, val_loader, device, scheduler=None, schd_loss_update=False
):
model.eval()
t = time.time()
loss_sum = 0
sample_num = 0
preds_all = []
targets_all = []
pbar = tqdm(enumerate(val_loader), total=len(val_loader))
for step, (signal, labels) in pbar:
signal = signal.to(device).float()
labels = labels.to(device).long()
preds = model(signal) # output = model(input)
# print(image_preds.shape, exam_pred.shape)
preds_all += [torch.argmax(preds, 1).detach().cpu().numpy()]
targets_all += [labels.detach().cpu().numpy()]
loss = loss_fn(preds, labels)
loss_sum += loss.item() * labels.shape[0]
sample_num += labels.shape[0]
description = f"epoch {epoch} loss: {loss_sum/sample_num:.4f}"
pbar.set_description(description)
preds_all = np.concatenate(preds_all)
targets_all = np.concatenate(targets_all)
print(
"validation multi-class accuracy = {:.4f}".format(
(targets_all == preds_all).mean()
)
)
print("validation F1_score = ", f1_score(targets_all, preds_all, average="micro"))
if scheduler is not None:
if schd_loss_update:
scheduler.step(loss_sum / sample_num)
else:
scheduler.step()
return targets_all, preds_all
if __name__ == "__main__":
# for training only, need nightly build pytorch
seed = 719
epochs = 6
stepsize = 2
hidden_sie = 64
nclass = labels.nunique()
seed_everything(719)
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed).split(
np.arange(train_df.shape[0]), labels.values
)
for fold, (trn_idx, val_idx) in enumerate(folds):
# we'll train fold 0 first
if fold > 0:
break
print("Training with {} started".format(fold))
print(len(trn_idx), len(val_idx))
train_loader, val_loader = prepare_dataloader(train_df, trn_idx, val_idx)
device = torch.device("cuda:0")
model = ECGClassifier(hidden_sie, nclass).to(device)
scaler = GradScaler()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, gamma=0.1, step_size=stepsize
)
loss_tr = nn.CrossEntropyLoss().to(device)
loss_fn = nn.CrossEntropyLoss().to(device)
for epoch in range(epochs):
train_one_epoch(
epoch,
model,
loss_tr,
optimizer,
train_loader,
device,
scheduler=scheduler,
schd_batch_update=False,
)
with torch.no_grad():
val_targets, val_preds = valid_one_epoch(
epoch,
model,
loss_fn,
val_loader,
device,
scheduler=None,
schd_loss_update=False,
)
torch.save(model.state_dict(), "pytorch_ecg_rnn.pth")
# torch.save(model.cnn_model.state_dict(),'{}/cnn_model_fold_{}_{}'.format(CFG['model_path'], fold, CFG['tag']))
del model, optimizer, train_loader, val_loader, scaler, scheduler
torch.cuda.empty_cache()
pd.Series(val_targets).value_counts(), pd.Series(val_preds).value_counts(),
# ### Predict on Test Set
def predict(df):
x_test = df.values[:, :187]
test_ds = ECGDataset(x_test)
tst_loader = torch.utils.data.DataLoader(
test_ds,
batch_size=128,
num_workers=0,
shuffle=False,
pin_memory=False,
)
device = torch.device("cpu")
model = ECGClassifier(hidden_sie, nclass).to(device)
model.load_state_dict(torch.load("pytorch_ecg_rnn.pth", map_location="cpu"))
preds_all = []
pbar = tqdm(enumerate(tst_loader), total=len(tst_loader))
for step, (signal) in pbar:
signal = signal.to(device).float()
preds = model(signal)
preds_all += [torch.softmax(preds, 1).detach().cpu().numpy()]
return np.concatenate(preds_all, axis=0)
test_df = pd.read_csv("../input/heartbeat/mitbih_test.csv", header=None)
test_labels = test_df[187].astype(int)
test_prob_preds = predict(test_df)
test_pred = np.argmax(test_prob_preds, axis=1)
cm = confusion_matrix(test_labels, test_pred)
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
for i in range(cm.shape[1]):
for j in range(cm.shape[0]):
plt.text(
j, i, format(cm[i, j], ".2f"), horizontalalignment="center", color="black"
)
plt.imshow(cm, cmap=plt.cm.Blues)
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.colorbar()
print("F1_score = ", f1_score(test_labels, test_pred, average="micro"))
# ### Export to onnx
x_test = test_df.values[:, :187]
test_ds = ECGDataset(x_test)
tst_loader = torch.utils.data.DataLoader(
test_ds,
batch_size=1,
num_workers=0,
shuffle=False,
pin_memory=False,
)
device = torch.device("cpu")
model = ECGClassifier(hidden_sie, nclass).to(device)
model.load_state_dict(torch.load("pytorch_ecg_rnn.pth", map_location="cpu"))
pbar = tqdm(enumerate(tst_loader), total=len(tst_loader))
for step, (signal) in pbar:
signal = signal.to(device).float()
preds = model(signal)
break
torch.onnx.export(
model, # model being run
signal, # model input (or a tuple for multiple inputs)
"torch_rnn_ecg_classifier.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=["input"], # the model's input names
output_names=["output"], # the model's output names
dynamic_axes={
"input": {0: "batch_size"}, # variable lenght axes
"output": {0: "batch_size"},
},
)
# ### Test runtime with onnx model
import onnxruntime
ort_session = onnxruntime.InferenceSession("torch_rnn_ecg_classifier.onnx")
def to_numpy(tensor):
return (
tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
)
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(signal)}
ort_outs = ort_session.run(None, ort_inputs)
# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(to_numpy(preds), ort_outs[0], rtol=1e-03, atol=1e-05)
preds, ort_outs
|
# ## Read Data
import numpy as np
import pandas as pd
train = pd.read_csv(
r"/kaggle/input/test-competition-2783456756923/airline_tweets_train.csv"
)
test = pd.read_csv(
r"/kaggle/input/test-competition-2783456756923/airline_tweets_test.csv"
)
# ## Feature Engineering
# * Feature Extraction
# * Data Cleaning & Preprocessing
# * Feature Selection
# * Feature Encoding
#
# Feature Extraction
train["tweet_length"] = train.text.apply(lambda x: len(x))
train["has_retweets"] = np.where(train.retweet_count > 0, 1, 0)
train["user_timezone"] = np.where(
train.user_timezone.isnull(), "unknown", train.user_timezone
)
# **TASK:**
# 1. Think of an example of a new variable that may help identifying the sentiment, for example:
# * 'Does the text contain numbers?'
# * 'How many words are there?'
# * 'How many twitter accounts are there, e.g. @ignacio?'
# 2. Code this new variable (with the help of GPT if needed)
import re
def contains_numbers(string):
match = re.search(r"\d", string)
if match:
return 1
else:
return 0
contains_numbers("My flight was delayed 5 hours")
train["has_numbers"] = train["text"].apply(contains_numbers)
train.head()
# ## Models
#
test["Category"][:1000] = "neutral"
test["Category"][1000:2000] = "positive"
test["Category"][2000:] = "negative"
test = test.rename(columns={"airline_sentiment": "Category"})
test.to_csv(r"test_submission.csv")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(
"../input/edgeiiotset-cyber-security-dataset-of-iot-iiot/Edge-IIoTset dataset/Selected dataset for ML and DL/DNN-EdgeIIoT-dataset.csv",
low_memory=False,
)
from sklearn.utils import shuffle
drop_columns = [
"frame.time",
"ip.src_host",
"ip.dst_host",
"arp.src.proto_ipv4",
"arp.dst.proto_ipv4",
"http.file_data",
"http.request.full_uri",
"icmp.transmit_timestamp",
"http.request.uri.query",
"tcp.options",
"tcp.payload",
"tcp.srcport",
"tcp.dstport",
"udp.port",
"mqtt.msg",
]
df.drop(drop_columns, axis=1, inplace=True)
df.dropna(axis=0, how="any", inplace=True)
df.drop_duplicates(subset=None, keep="first", inplace=True)
df = shuffle(df)
df.isna().sum()
print(df["Attack_type"].value_counts())
print(len(df))
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
def encode_text_dummy(df, name):
dummies = pd.get_dummies(df[name])
for x in dummies.columns:
dummy_name = f"{name}-{x}"
df[dummy_name] = dummies[x]
df.drop(name, axis=1, inplace=True)
encode_text_dummy(df, "http.request.method")
encode_text_dummy(df, "http.referer")
encode_text_dummy(df, "http.request.version")
encode_text_dummy(df, "dns.qry.name.len")
encode_text_dummy(df, "mqtt.conack.flags")
encode_text_dummy(df, "mqtt.protoname")
encode_text_dummy(df, "mqtt.topic")
df.to_csv("preprocessed_DNN.csv", encoding="utf-8", index=False)
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
df = pd.read_csv("./preprocessed_DNN.csv", low_memory=False)
# df
feat_cols = list(df.columns)
label_col = "Attack_type"
feat_cols.remove(label_col)
# feat_cols
empty_cols = [col for col in df.columns if df[col].isnull().all()]
empty_cols
skip_list = ["icmp.unused", "http.tls_port", "dns.qry.type", "mqtt.msg_decoded_as"]
df[skip_list[3]].value_counts()
X = df.drop([label_col], axis=1)
y = df[label_col]
y.value_counts()
print("Number of samples in X:", X.shape[0])
print("Number of samples in y:", y.shape[0])
from imblearn.over_sampling import RandomOverSampler
import warnings
warnings.filterwarnings("ignore")
minority_classes = ["Port_Scanning", "XSS", "Ransomware", "Fingerprinting", "MITM"]
desired_samples = {
"Port_Scanning": 20000,
"XSS": 20000,
"Ransomware": 20000,
"Fingerprinting": 20000,
"MITM": 20000,
}
mask = df[label_col].isin(minority_classes)
minority_mask = df[label_col].isin(minority_classes)
X_minority = X[minority_mask]
y_minority = y[minority_mask]
oversample = RandomOverSampler(
sampling_strategy={
k: desired_samples[k] if k in desired_samples else "auto"
for k in minority_classes
},
random_state=42,
)
X_oversampled, y_oversampled = oversample.fit_resample(X[mask], y[mask])
# Concatenate the oversampled data with the original data
X_balanced = pd.concat([X, X_oversampled])
y_balanced = pd.concat([y, y_oversampled])
y.value_counts()
y_balanced.value_counts()
# under sampling didn't work as the number of smaples in the minoity class is too low
# from sklearn.utils import resample
# import numpy as np
# X_resampled, y_resampled = resample(X[y == 0], y[y == 0],
# replace=False, n_samples=X[y == 1].shape[0],
# random_state=42)
# # Combine majority class with downsampled minority class
# X_balanced = pd.concat([X_resampled, X[y == 1]])
# y_balanced = pd.concat([y_resampled, y[y == 1]])
# print("Number of samples in X_balanced:", X_balanced.shape[0])
# print("Number of samples in y_balanced:", y_balanced.shape[0])
######## working only on MITM ##########################3333
# from imblearn.over_sampling import RandomOverSampler
# oversample = RandomOverSampler(sampling_strategy='minority')
# X_oversample,Y_oversample=oversample.fit_resample(X,y)
# # Combine majority class with downsampled minority class
# X_balanced = pd.concat([X_resampled, X[y == 1]])
# y_balanced = pd.concat([y_resampled, y[y == 1]])
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X_balanced, y_balanced, test_size=0.2, random_state=1, stratify=y_balanced
)
# del X
# del y
df.shape[0]
odf = df_balanced = pd.concat([X_balanced, y_balanced], axis=1)
odf.shape[0]
print("X_train", X_train)
print("X_test", X_test)
print("y_train", y_train)
print("y_test", y_test)
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
y_train = label_encoder.fit_transform(y_train)
y_test = label_encoder.transform(y_test)
label_encoder.classes_
# X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
# X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)
import numpy as np
# assuming X_train and X_test are DataFrames
X_train = X_train.values.reshape(X_train.shape[0], X_train.shape[1], 1)
X_test = X_test.values.reshape(X_test.shape[0], X_test.shape[1], 1)
input_shape = X_train.shape[1:]
print(X_train.shape)
print(X_test.shape)
num_classes = len(np.unique(y_train))
num_classes
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train, num_classes=num_classes)
y_test = to_categorical(y_test, num_classes=num_classes)
print(y_train.shape, y_test.shape)
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, Input, ZeroPadding1D
from tensorflow.keras.layers import MaxPooling1D, Add, AveragePooling1D
from tensorflow.keras.layers import Dense, BatchNormalization, Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.models import Model
from keras.initializers import glorot_uniform
import keras.backend as K
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
# input_shape = (96)
print(input_shape)
from keras.layers import Input, Conv1D, MaxPooling1D, UpSampling1D
from keras.models import Model
from keras.layers import concatenate
from keras.layers import Reshape
from keras.layers import Dropout
from keras.models import Model
from keras.layers import (
Input,
Conv1D,
MaxPooling1D,
Flatten,
Dense,
Dropout,
UpSampling1D,
concatenate,
)
from keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Input, Reshape
from keras.models import Model
# Define input shape
# input_shape = (96, 1)
# Define the encoder model
encoder = Sequential()
encoder.add(Conv1D(32, 3, activation="relu", input_shape=input_shape, padding="same"))
encoder.add(MaxPooling1D(2, padding="same"))
encoder.add(Conv1D(64, 3, activation="relu", padding="same"))
encoder.add(MaxPooling1D(2, padding="same"))
encoder.add(Conv1D(128, 3, activation="relu", padding="same"))
encoder.add(MaxPooling1D(2, padding="same"))
encoder.add(Flatten())
# Define the decoder model
decoder = Sequential()
decoder.add(Reshape((12, 128), input_shape=(1536,)))
decoder.add(Conv1D(128, 3, activation="relu", padding="same"))
decoder.add(UpSampling1D(2))
decoder.add(Conv1D(64, 3, activation="relu", padding="same"))
decoder.add(UpSampling1D(2))
decoder.add(Conv1D(32, 3, activation="relu", padding="same"))
decoder.add(UpSampling1D(2))
decoder.add(Conv1D(1, 3, activation="sigmoid", padding="same"))
# Combine the encoder and decoder models to form the autoencoder
autoencoder = Sequential()
autoencoder.add(encoder)
autoencoder.add(decoder)
# Define the CNN model using the encoder layers
# cnn_model = Sequential()
# cnn_model.add(encoder)
# cnn_model.add(Dense(64, activation='relu'))
# cnn_model.add(Dense(num_classes, activation='softmax'))
# cnn_model.add(Conv1D(32, 3, activation='relu', input_shape=(input_shape)))
cnn_model = Sequential()
cnn_model.add(Conv1D(32, 3, activation="relu", input_shape=input_shape))
cnn_model.add(MaxPooling1D(2))
# cnn_model.add(Conv1D(64, 3, activation='relu', padding='same'))
# cnn_model.add(MaxPooling1D(2, padding='same'))
# cnn_model.add(Conv1D(128, 3, activation='relu', padding='same'))
# cnn_model.add(MaxPooling1D(2, padding='same'))
cnn_model.add(Conv1D(256, 3, activation="relu"))
cnn_model.add(MaxPooling1D(2))
cnn_model.add(Flatten())
cnn_model.add(Dense(64, activation="relu"))
cnn_model.add(Dense(num_classes, activation="softmax"))
# Combine the autoencoder and CNN models
model = Sequential()
model.add(autoencoder)
model.add(cnn_model)
# cnn_input = Input(shape=input_shape)
# #print(cnn_input)
# cnn_layer = Reshape(input_shape)(cnn_input)
# cnn_layer = Conv1D(32, kernel_size=3, activation='relu')(cnn_layer)
# cnn_layer = MaxPooling1D(pool_size=2)(cnn_layer)
# cnn_layer = Flatten()(cnn_layer)
# autoencoder_input = Input(shape=input_shape)
# autoencoder_layer = autoencoder(autoencoder_input)
# autoencoder_layer = Flatten()(autoencoder_layer)
# combined_layer = concatenate([cnn_layer, autoencoder_layer])
# combined_layer = Dense(64, activation='relu')(combined_layer)
# combined_layer = Dropout(0.5)(combined_layer)
# combined_layer = Dense(num_classes, activation='softmax')(combined_layer)
# model = Model(inputs=[cnn_input, autoencoder_input], outputs=combined_layer)
opt = Adam(learning_rate=0.001)
model.compile(
optimizer=opt, loss=tf.keras.metrics.categorical_crossentropy, metrics=["accuracy"]
)
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
early_stopping = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=10)
lr_reduce = ReduceLROnPlateau(
monitor="val_loss", factor=0.5, patience=5, mode="min", verbose=1, min_lr=0
)
# X_train.shape
EPOCHS = 15
BATCH_SIZE = 256
call_backs = [early_stopping, lr_reduce]
history = model.fit(
X_train,
y_train,
validation_data=(X_test, y_test),
validation_split=0.1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
callbacks=call_backs,
# class_weight=class_weights,
verbose=1,
)
history_df = pd.DataFrame(history.history)
history_df.loc[:, ["loss", "val_loss"]].plot()
print("Minimum validation loss: {}".format(history_df["val_loss"].min()))
# #w,b = model.weights
# print("Weights\n{}\n\nBias\n{}".format(model.weights))
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv("Bengaluru_House_Data.csv")
df.head()
df.info()
df.shape
df.isnull().sum()
df.isnull().sum() / df.isnull().sum().sum() * 100
df.describe().T
# Dropping Society column as it has 88% of null values which wont add any value to the analysis
df.drop(columns=["society"], axis=1, inplace=True)
df.head(5)
df.groupby("area_type")["area_type"].agg("count").sort_values()
df.groupby("availability")["availability"].agg("count").sort_values()
df.groupby("location")["location"].agg("count").sort_values()
df.groupby("size")["size"].agg("count").sort_values()
df.groupby("balcony")["balcony"].agg("count").sort_values()
df.groupby("bath")["bath"].agg("count").sort_values()
# Univariate Analysis
df.boxplot()
plt.show()
sns.boxplot(data=df, x="bath")
df.info()
sns.pairplot(df)
sns.heatmap(df.corr(), annot=True)
df.duplicated().sum()
df[df.duplicated()]
# dataframe[dataframe['Percentage'] > 80]
df[
(df["area_type"] == "Super built-up Area")
& (df["location"] == "Haralur Road")
& (df["size"] == "3 BHK")
& (df["total_sqft"] == "1464")
]
df.drop_duplicates(inplace=True)
df.duplicated().sum()
# Treating outliers
q1 = df.quantile(0.25)
q3 = df.quantile(0.75)
iqr = q3 - q1
ul = q3 + 1.5 * (iqr)
ll = q1 - 1.5 * (iqr)
print(((df < ll) | (df > ul)).sum())
df.isnull().sum()
# Converting all outliers to null values
df[(df < ll) | (df > ul)] = np.nan
df.isnull().sum()
# Featuring engineering and then outlier treatment
df.head()
df1 = df.copy()
df1 = pd.get_dummies(df1, columns=["area_type"], drop_first=True)
df1.head(5)
df1.availability.unique()
df1["availability"] = np.where(
df1["availability"] == "Immediate Possession", "Ready To Move", df1["availability"]
)
df1.availability.unique()
df1["availability"] = np.where(
df1["availability"] != "Ready To Move", "Not Ready To Move", df1["availability"]
)
df1.availability.unique()
df1.head(5)
df1 = pd.get_dummies(df1, columns=["availability"], drop_first=True)
df1.head(5)
df1["bhk"] = df1["size"].apply(lambda x: str(x).split(" ")[0])
df1.head(5)
df1["total_sqft"].unique()
# find any metrics change inthe total sqft
def is_float(x):
try:
float(x)
except:
return False
return True
df1[~df1["total_sqft"].apply(is_float)]
def fix_range(x):
num = x.split("-")
if len(num) == 2:
return float((float(num[0]) + float(num[1])) / 2)
else:
return x
df1["total_sqft"] = df1["total_sqft"].apply(fix_range)
df1[~df1["total_sqft"].apply(is_float)]
import re
def metrics_conversion(x):
try:
float(x)
except:
num = "".join(re.split(r"[^a-zA-Z]*", x))
if num == "SqMeter":
return float(str("".join(re.split(r"[^0-9_.]*", x))).strip(".")) * 10.7639
elif num == "Perch":
return float(str("".join(re.split(r"[^0-9_.]*", x))).strip(".")) * 272.25
elif num == "SqYards":
return float(str("".join(re.split(r"[^0-9_.]*", x))).strip(".")) * 9
elif num == "Acres":
return float(str("".join(re.split(r"[^0-9_.]*", x))).strip(".")) * 43560
elif num == "Cents":
return float(str("".join(re.split(r"[^0-9_.]*", x))).strip(".")) * 435.6
elif num == "Guntha":
return float(str("".join(re.split(r"[^0-9_.]*", x))).strip(".")) * 1089
elif num == "Grounds":
return float(str("".join(re.split(r"[^0-9_.]*", x))).strip(".")) * 2400.3500
else:
return x
return x
df1["total_sqft"] = df1["total_sqft"].apply(metrics_conversion)
df1[~df1["total_sqft"].apply(is_float)]
df1.head(5)
df2 = df1.copy()
df2.head(5)
df2.drop(columns=["size"], axis=1, inplace=True)
df2.head(5)
df2["location"].value_counts().sort_values()
df2["location"] = df["location"].apply(lambda x: str(x).strip(" "))
df2.head(5)
location_below_10 = df2["location"].value_counts()
location_below_10
location_below_10_list = location_below_10[location_below_10 < 10]
location_below_10_list
df2["location"] = df2["location"].apply(
lambda x: "others" if x in location_below_10_list else x
)
df2["location"].value_counts()
df3 = df2.copy()
df3.head(5)
df3 = pd.get_dummies(df3, columns=["location"], drop_first=True)
df3.head(5)
df3.isnull().sum()
df3.head()
sns.countplot(df["area_type"])
sns.countplot(df["availability"])
sns.countplot(df["bath"])
sns.countplot(df3["bhk"])
sns.countplot(df["balcony"])
# Treating Null values + outliers using imputation method
# Scaling the variables
from sklearn.preprocessing import StandardScaler
Comp_df = df3.copy()
Comp_df.head(5)
Company_X = df3.drop("price", axis=1)
Company_Y = df3["price"]
Company_X.head(5)
df3.isnull().sum().sort_values(ascending=False) / df3.shape[0] * 100
std = StandardScaler()
df3[["total_sqft", "bath", "balcony", "bhk", "price"]] = pd.DataFrame(
std.fit_transform(df3[["total_sqft", "bath", "balcony", "bhk", "price"]])
)
# Scaled_X = pd.DataFrame(scaler.fit_transform(Company_X), columns = Company_X.columns)
df3.head(5)
from sklearn.model_selection import train_test_split
train, test = train_test_split(df3, test_size=0.30, random_state=123)
train.head()
from sklearn.impute import KNNImputer
imput = KNNImputer(n_neighbors=5)
Company_imputed_train = pd.DataFrame(imput.fit_transform(train), columns=train.columns)
Company_imputed_test = pd.DataFrame(imput.transform(test), columns=test.columns)
print(Company_imputed_train.isnull().sum().sum())
print(Company_imputed_test.isnull().sum().sum())
Company_imputed_train.head()
# Data is ready for model development
Company_imputed_train_x = Company_imputed_train.drop(columns=["price"], axis=1)
Company_imputed_train_y = Company_imputed_train["price"]
Company_imputed_test_x = Company_imputed_test.drop(columns=["price"], axis=1)
Company_imputed_test_y = Company_imputed_test["price"]
Company_imputed_train_x.shape
Company_imputed_test_x.shape
Company_imputed_train_y.shape
Company_imputed_test_y.shape
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn import tree
# DecisionTreeRegressor from sklearn.tree
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
lr = LinearRegression()
rf = RandomForestRegressor(random_state=42)
ann = MLPRegressor()
dt = tree.DecisionTreeRegressor(random_state=42)
llr = Lasso()
rdg = Ridge()
models = [lr, rf, ann, dt, llr, rdg]
rmse_train = []
rmse_test = []
scores_train = []
scores_test = []
for i in models:
model = i.fit(Company_imputed_train_x, Company_imputed_train_y)
scores_train.append(model.score(Company_imputed_train_x, Company_imputed_train_y))
scores_test.append(model.score(Company_imputed_test_x, Company_imputed_test_y))
y_train_predict = model.predict(Company_imputed_train_x)
y_test_predict = model.predict(Company_imputed_test_x)
rmse_train.append(
np.sqrt(
mean_squared_error(
Company_imputed_train_y, i.predict(Company_imputed_train_x)
)
)
)
rmse_test.append(
np.sqrt(
mean_squared_error(
Company_imputed_test_y, i.predict(Company_imputed_test_x)
)
)
)
results_dataframe = pd.DataFrame(
{
"Train RMSE": rmse_train,
"Test RMSE": rmse_test,
"Train Scores": scores_train,
"Test Scores": scores_test,
},
index=[
"Linear Regression",
"Random Forest Regressor",
"MLP Regressor",
"Decision Tree Regressor",
"Lasso Regressor",
"Ridge Regressor",
],
)
results_dataframe
# ## Hyper tuning parameters
grid_lr = {"normalize": [True, False]}
lr = LinearRegression()
grid_search = GridSearchCV(estimator=lr, param_grid=grid_lr, cv=3)
model = grid_search.fit(Company_imputed_train_x, Company_imputed_train_y)
model.best_params_
rmse_train_t = []
rmse_test_t = []
scores_train_t = []
scores_test_t = []
scores_train_t.append(model.score(Company_imputed_train_x, Company_imputed_train_y))
scores_test_t.append(model.score(Company_imputed_test_x, Company_imputed_test_y))
rmse_train_t.append(
np.sqrt(
mean_squared_error(
Company_imputed_train_y, model.predict(Company_imputed_train_x)
)
)
)
rmse_test_t.append(
np.sqrt(
mean_squared_error(
Company_imputed_test_y, model.predict(Company_imputed_test_x)
)
)
)
rf = RandomForestRegressor(random_state=42)
grid_rf = {
#'criterion':['squared_error', 'absolute_error', 'friedman_mse', 'poisson'],
"n_estimators": [50, 100, 150],
# 'max_features': ['sqrt', 'log2', None],
# 'max_depth': [3, 6, 9],
"max_leaf_nodes": [3, 6, 9],
}
grid_search = GridSearchCV(estimator=rf, param_grid=grid_rf, cv=3)
modelrf = grid_search.fit(Company_imputed_train_x, Company_imputed_train_y)
modelrf.best_params_
rf = RandomForestRegressor(random_state=42, max_leaf_nodes=9, n_estimators=100)
grid_rf = {
# 'criterion':['squared_error', 'absolute_error', 'friedman_mse', 'poisson'],
# 'n_estimators':[50,100,150],
"max_features": ["sqrt", "log2", None],
# 'max_depth': [3, 6, 9],
# 'max_leaf_nodes': [3, 6, 9],
}
grid_search = GridSearchCV(estimator=rf, param_grid=grid_rf, cv=3)
modelrf = grid_search.fit(Company_imputed_train_x, Company_imputed_train_y)
modelrf.best_params_
rf = RandomForestRegressor(
random_state=42, max_leaf_nodes=9, n_estimators=100, max_features=None
)
grid_rf = {
# 'criterion':['squared_error', 'absolute_error', 'friedman_mse', 'poisson'],
# 'n_estimators':[50,100,150],
# 'max_features': ['sqrt', 'log2', None],
"max_depth": [3, 6, 9],
# 'max_leaf_nodes': [3, 6, 9],
}
grid_search = GridSearchCV(estimator=rf, param_grid=grid_rf, cv=3)
modelrf = grid_search.fit(Company_imputed_train_x, Company_imputed_train_y)
modelrf.best_params_
rf = RandomForestRegressor(
random_state=42, max_leaf_nodes=9, n_estimators=100, max_features=None, max_depth=6
)
grid_rf = {
"criterion": ["squared_error"],
# 'n_estimators':[50,100,150],
# 'max_features': ['sqrt', 'log2', None],
# 'max_depth': [3, 6, 9],
# 'max_leaf_nodes': [3, 6, 9],
}
grid_search = GridSearchCV(estimator=rf, param_grid=grid_rf, cv=3)
modelrf = grid_search.fit(Company_imputed_train_x, Company_imputed_train_y)
modelrf.best_params_
modelrf.score(Company_imputed_train_x, Company_imputed_train_y)
scores_train_t.append(modelrf.score(Company_imputed_train_x, Company_imputed_train_y))
scores_test_t.append(modelrf.score(Company_imputed_test_x, Company_imputed_test_y))
rmse_train_t.append(
np.sqrt(
mean_squared_error(
Company_imputed_train_y, modelrf.predict(Company_imputed_train_x)
)
)
)
rmse_test_t.append(
np.sqrt(
mean_squared_error(
Company_imputed_test_y, modelrf.predict(Company_imputed_test_x)
)
)
)
param_grid = {
"hidden_layer_sizes": [100, 200, 300, 500],
#'max_iter': [5000,2500,7000,6000],
#'solver': ['sgd','adam'],
#'tol': [0.01]
}
annr = MLPRegressor(
random_state=123,
)
grid_search_mlp = GridSearchCV(estimator=annr, param_grid=param_grid, cv=3)
model_mlp = grid_search_mlp.fit(Company_imputed_train_x, Company_imputed_train_y)
print(grid_search_mlp.best_params_)
param_grid = {
#'hidden_layer_sizes': [100,200,300,500],
#'max_iter': [5000,2500,7000,6000],
"solver": ["sgd", "adam"],
#'tol': [0.01]
}
annr = MLPRegressor(random_state=123, hidden_layer_sizes=100)
grid_search_mlp = GridSearchCV(estimator=annr, param_grid=param_grid, cv=3)
model_mlp = grid_search_mlp.fit(Company_imputed_train_x, Company_imputed_train_y)
print(grid_search_mlp.best_params_)
param_grid = {
#'hidden_layer_sizes': [100,200,300,500],
"max_iter": [5000, 2500, 7000, 6000],
# 'solver': ['sgd','adam'],
#'tol': [0.01]
}
annr = MLPRegressor(random_state=123, hidden_layer_sizes=100, solver="sgd", tol=0.01)
grid_search_mlp = GridSearchCV(estimator=annr, param_grid=param_grid, cv=3)
model_mlp = grid_search_mlp.fit(Company_imputed_train_x, Company_imputed_train_y)
print(grid_search_mlp.best_params_)
scores_train_t.append(model_mlp.score(Company_imputed_train_x, Company_imputed_train_y))
scores_test_t.append(model_mlp.score(Company_imputed_test_x, Company_imputed_test_y))
rmse_train_t.append(
np.sqrt(
mean_squared_error(
Company_imputed_train_y, model_mlp.predict(Company_imputed_train_x)
)
)
)
rmse_test_t.append(
np.sqrt(
mean_squared_error(
Company_imputed_test_y, model_mlp.predict(Company_imputed_test_x)
)
)
)
param_grid = {
"max_depth": [10, 15, 20, 25, 30],
"min_samples_leaf": [3, 15, 30],
"min_samples_split": [15, 30, 35, 40, 50],
}
dtr = tree.DecisionTreeRegressor(random_state=123)
grid_search_dt = GridSearchCV(estimator=dtr, param_grid=param_grid, cv=3)
model_dt = grid_search_dt.fit(Company_imputed_train_x, Company_imputed_train_y)
print(model_dt.best_params_)
scores_train_t.append(model_dt.score(Company_imputed_train_x, Company_imputed_train_y))
scores_test_t.append(model_dt.score(Company_imputed_test_x, Company_imputed_test_y))
rmse_train_t.append(
np.sqrt(
mean_squared_error(
Company_imputed_train_y, model_dt.predict(Company_imputed_train_x)
)
)
)
rmse_test_t.append(
np.sqrt(
mean_squared_error(
Company_imputed_test_y, model_dt.predict(Company_imputed_test_x)
)
)
)
param_grid = {"alpha": [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]}
lasso = Lasso()
gridsearch_ll = GridSearchCV(estimator=lasso, param_grid=param_grid, cv=3)
model_ll = gridsearch_ll.fit(Company_imputed_train_x, Company_imputed_train_y)
print(model_ll.best_params_)
scores_train_t.append(model_ll.score(Company_imputed_train_x, Company_imputed_train_y))
scores_test_t.append(model_ll.score(Company_imputed_test_x, Company_imputed_test_y))
rmse_train_t.append(
np.sqrt(
mean_squared_error(
Company_imputed_train_y, model_ll.predict(Company_imputed_train_x)
)
)
)
rmse_test_t.append(
np.sqrt(
mean_squared_error(
Company_imputed_test_y, model_ll.predict(Company_imputed_test_x)
)
)
)
param_grid = {"alpha": [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]}
Ridge = Ridge(random_state=42)
gridsearch_rd = GridSearchCV(estimator=Ridge, param_grid=param_grid, cv=3)
model_rd = gridsearch_ll.fit(Company_imputed_train_x, Company_imputed_train_y)
print(model_rd.best_params_)
scores_train_t.append(model_rd.score(Company_imputed_train_x, Company_imputed_train_y))
scores_test_t.append(model_rd.score(Company_imputed_test_x, Company_imputed_test_y))
rmse_train_t.append(
np.sqrt(
mean_squared_error(
Company_imputed_train_y, model_rd.predict(Company_imputed_train_x)
)
)
)
rmse_test_t.append(
np.sqrt(
mean_squared_error(
Company_imputed_test_y, model_rd.predict(Company_imputed_test_x)
)
)
)
results_dataframe_Tuned = pd.DataFrame(
{
"Train RMSE": rmse_train_t,
"Test RMSE": rmse_test_t,
"Train Scores": scores_train_t,
"Test Scores": scores_test_t,
},
index=[
"Linear Regression-Tuned",
"Random Forest Regressor-Tuned",
"MLP Regressor-Tuned",
"Decision Tree Regressor-Tuned",
"Lasso Regressor-Tuned",
"Ridge Regressor-Tuned",
],
)
results_dataframe_Tuned.sort_values(by="Train RMSE")
model_dt.score
dtr = tree.DecisionTreeRegressor(
max_depth=10, min_samples_leaf=30, min_samples_split=15, random_state=123
)
model_dt = dtr.fit(Company_imputed_train_x, Company_imputed_train_y)
Feature_Imp = pd.DataFrame(
model_dt.feature_importances_,
columns=["Imp"],
index=Company_imputed_train_x.columns,
).sort_values("Imp", ascending=False)
Feature_Imp[Feature_Imp["Imp"] > 0].sort_values(by="Imp", ascending=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
# print(os.path.join(dirname, filename))
pass
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# from .cycle_gan_model import CycleGANModel
# https://youtu.be/VzIO5_R9XEM
# https://youtu.be/2MSGnkir9ew
"""
cycleGAN model
Based on the code by Jason Brownlee from his blogs on https://machinelearningmastery.com/
I am adapting his code to various applications but original credit goes to Jason.
The model uses instance normalization layer:
Normalize the activations of the previous layer at each step,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
Standardizes values on each output feature map rather than across features in a batch.
Download instance normalization code from here: https://github.com/keras-team/keras-contrib/blob/master/keras_contrib/layers/normalization/instancenormalization.py
Or install keras_contrib using guidelines here: https://github.com/keras-team/keras-contrib
"""
#
from IPython.display import FileLink
from random import random
from numpy import load
from numpy import zeros
from numpy import ones
from numpy import asarray
from numpy.random import randint
from keras.optimizers import Adam
from keras.initializers import RandomNormal
from keras.models import Model
from tensorflow.keras.layers import Input
from keras.layers import Conv2D
from keras.layers import Conv2DTranspose
from keras.layers import LeakyReLU
from keras.layers import Activation
from keras.layers import Concatenate
# from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
# Download instance norm. code from the link above.
# Or install keras_contrib using guidelines here: https://github.com/keras-team/keras-contrib
# from instancenormalization import InstanceNormalization
from keras_contrib.layers.normalization.instancenormalization import (
InstanceNormalization,
)
from matplotlib import pyplot
# discriminator model (70x70 patchGAN)
# C64-C128-C256-C512
# After the last layer, conv to 1-dimensional output, followed by a Sigmoid function.
# The “axis” argument is set to -1 for instance norm. to ensure that features are normalized per feature map.
def define_discriminator(image_shape):
# weight initialization
init = RandomNormal(stddev=0.02)
# source image input
in_image = Input(shape=image_shape)
# C64: 4x4 kernel Stride 2x2
d = Conv2D(64, (4, 4), strides=(2, 2), padding="same", kernel_initializer=init)(
in_image
)
d = LeakyReLU(alpha=0.2)(d)
# C128: 4x4 kernel Stride 2x2
d = Conv2D(128, (4, 4), strides=(2, 2), padding="same", kernel_initializer=init)(d)
d = InstanceNormalization(axis=-1)(d)
d = LeakyReLU(alpha=0.2)(d)
# C256: 4x4 kernel Stride 2x2
d = Conv2D(256, (4, 4), strides=(2, 2), padding="same", kernel_initializer=init)(d)
d = InstanceNormalization(axis=-1)(d)
d = LeakyReLU(alpha=0.2)(d)
# C512: 4x4 kernel Stride 2x2
# Not in the original paper. Comment this block if you want.
d = Conv2D(512, (4, 4), strides=(2, 2), padding="same", kernel_initializer=init)(d)
d = InstanceNormalization(axis=-1)(d)
d = LeakyReLU(alpha=0.2)(d)
# second last output layer : 4x4 kernel but Stride 1x1
d = Conv2D(512, (4, 4), padding="same", kernel_initializer=init)(d)
d = InstanceNormalization(axis=-1)(d)
d = LeakyReLU(alpha=0.2)(d)
# patch output
patch_out = Conv2D(1, (4, 4), padding="same", kernel_initializer=init)(d)
# define model
model = Model(in_image, patch_out)
# compile model
# The model is trained with a batch size of one image and Adam opt.
# with a small learning rate and 0.5 beta.
# The loss for the discriminator is weighted by 50% for each model update.
# This slows down changes to the discriminator relative to the generator model during training.
model.compile(loss="mse", optimizer=Adam(lr=0.0002, beta_1=0.5), loss_weights=[0.5])
return model
# generator a resnet block to be used in the generator
# residual block that contains two 3 × 3 convolutional layers with the same number of filters on both layers.
def resnet_block(n_filters, input_layer):
# weight initialization
init = RandomNormal(stddev=0.02)
# first convolutional layer
g = Conv2D(n_filters, (3, 3), padding="same", kernel_initializer=init)(input_layer)
g = InstanceNormalization(axis=-1)(g)
g = Activation("relu")(g)
# second convolutional layer
g = Conv2D(n_filters, (3, 3), padding="same", kernel_initializer=init)(g)
g = InstanceNormalization(axis=-1)(g)
# concatenate merge channel-wise with input layer
g = Concatenate()([g, input_layer])
return g
# define the generator model - encoder-decoder type architecture
# c7s1-k denote a 7×7 Convolution-InstanceNorm-ReLU layer with k filters and stride 1.
# dk denotes a 3 × 3 Convolution-InstanceNorm-ReLU layer with k filters and stride 2.
# Rk denotes a residual block that contains two 3 × 3 convolutional layers
# uk denotes a 3 × 3 fractional-strided-Convolution InstanceNorm-ReLU layer with k filters and stride 1/2
# The network with 6 residual blocks consists of:
# c7s1-64,d128,d256,R256,R256,R256,R256,R256,R256,u128,u64,c7s1-3
# The network with 9 residual blocks consists of:
# c7s1-64,d128,d256,R256,R256,R256,R256,R256,R256,R256,R256,R256,u128, u64,c7s1-3
def define_generator(image_shape, n_resnet=9):
# weight initialization
init = RandomNormal(stddev=0.02)
# image input
in_image = Input(shape=image_shape)
# c7s1-64
g = Conv2D(64, (7, 7), padding="same", kernel_initializer=init)(in_image)
g = InstanceNormalization(axis=-1)(g)
g = Activation("relu")(g)
# d128
g = Conv2D(128, (3, 3), strides=(2, 2), padding="same", kernel_initializer=init)(g)
g = InstanceNormalization(axis=-1)(g)
g = Activation("relu")(g)
# d256
g = Conv2D(256, (3, 3), strides=(2, 2), padding="same", kernel_initializer=init)(g)
g = InstanceNormalization(axis=-1)(g)
g = Activation("relu")(g)
# R256
for _ in range(n_resnet):
g = resnet_block(256, g)
# u128
g = Conv2DTranspose(
128, (3, 3), strides=(2, 2), padding="same", kernel_initializer=init
)(g)
g = InstanceNormalization(axis=-1)(g)
g = Activation("relu")(g)
# u64
g = Conv2DTranspose(
64, (3, 3), strides=(2, 2), padding="same", kernel_initializer=init
)(g)
g = InstanceNormalization(axis=-1)(g)
g = Activation("relu")(g)
# c7s1-3
g = Conv2D(3, (7, 7), padding="same", kernel_initializer=init)(g)
g = InstanceNormalization(axis=-1)(g)
out_image = Activation("tanh")(g)
# define model
model = Model(in_image, out_image)
return model
# define a composite model for updating generators by adversarial and cycle loss
# We define a composite model that will be used to train each generator separately.
def define_composite_model(g_model_1, d_model, g_model_2, image_shape):
# Make the generator of interest trainable as we will be updating these weights.
# by keeping other models constant.
# Remember that we use this same function to train both generators,
# one generator at a time.
g_model_1.trainable = True
# mark discriminator and second generator as non-trainable
d_model.trainable = False
g_model_2.trainable = False
# adversarial loss
input_gen = Input(shape=image_shape)
gen1_out = g_model_1(input_gen)
output_d = d_model(gen1_out)
# identity loss
input_id = Input(shape=image_shape)
output_id = g_model_1(input_id)
# cycle loss - forward
output_f = g_model_2(gen1_out)
# cycle loss - backward
gen2_out = g_model_2(input_id)
output_b = g_model_1(gen2_out)
# define model graph
model = Model([input_gen, input_id], [output_d, output_id, output_f, output_b])
# define the optimizer
opt = Adam(lr=0.0002, beta_1=0.5)
# compile model with weighting of least squares loss and L1 loss
model.compile(
loss=["mse", "mae", "mae", "mae"], loss_weights=[1, 5, 10, 10], optimizer=opt
)
return model
# load and prepare training images
def load_real_samples(filename):
# load the dataset
data = load(filename)
# unpack arrays
X1, X2 = data["arr_0"], data["arr_1"]
# scale from [0,255] to [-1,1]
X1 = (X1 - 127.5) / 127.5
X2 = (X2 - 127.5) / 127.5
return [X1, X2]
# select a batch of random samples, returns images and target
# Remember that for real images the label (y) is 1.
def generate_real_samples(dataset, n_samples, patch_shape):
# choose random instances
ix = randint(0, dataset.shape[0], n_samples)
# retrieve selected images
X = dataset[ix]
# generate 'real' class labels (1)
y = ones((n_samples, patch_shape, patch_shape, 1))
return X, y
# generate a batch of images, returns images and targets
# Remember that for fake images the label (y) is 0.
def generate_fake_samples(g_model, dataset, patch_shape):
# generate fake images
X = g_model.predict(dataset)
# create 'fake' class labels (0)
y = zeros((len(X), patch_shape, patch_shape, 1))
return X, y
# periodically save the generator models to file
def save_models(step, g_model_AtoB, g_model_BtoA):
# save the first generator model
filename1 = "g_model_AtoB_%06d.h5" % (step + 1)
g_model_AtoB.save(filename1)
# save the second generator model
filename2 = "g_model_BtoA_%06d.h5" % (step + 1)
g_model_BtoA.save(filename2)
print(">Saved: %s and %s" % (filename1, filename2))
# periodically generate images using the save model and plot input and output images
def summarize_performance(step, g_model, trainX, name, n_samples=5):
# select a sample of input images
X_in, _ = generate_real_samples(trainX, n_samples, 0)
# generate translated images
X_out, _ = generate_fake_samples(g_model, X_in, 0)
# scale all pixels from [-1,1] to [0,1]
X_in = (X_in + 1) / 2.0
X_out = (X_out + 1) / 2.0
# plot real images
for i in range(n_samples):
pyplot.subplot(2, n_samples, 1 + i)
pyplot.axis("off")
pyplot.imshow(X_in[i])
# plot translated image
for i in range(n_samples):
pyplot.subplot(2, n_samples, 1 + n_samples + i)
pyplot.axis("off")
pyplot.imshow(X_out[i])
# save plot to file
filename1 = "%s_generated_plot_%06d.png" % (name, (step + 1))
pyplot.savefig(filename1)
pyplot.close()
# update image pool for fake images to reduce model oscillation
# update discriminators using a history of generated images
# rather than the ones produced by the latest generators.
# Original paper recommended keeping an image buffer that stores
# the 50 previously created images.
def update_image_pool(pool, images, max_size=50):
selected = list()
for image in images:
if len(pool) < max_size:
# stock the pool
pool.append(image)
selected.append(image)
elif random() < 0.5:
# use image, but don't add it to the pool
selected.append(image)
else:
# replace an existing image and use replaced image
ix = randint(0, len(pool))
selected.append(pool[ix])
pool[ix] = image
return asarray(selected)
# train cyclegan models
def train(
d_model_A,
d_model_B,
g_model_AtoB,
g_model_BtoA,
c_model_AtoB,
c_model_BtoA,
dataset,
epochs=1,
):
# define properties of the training run
(
n_epochs,
n_batch,
) = (
epochs,
1,
) # batch size fixed to 1 as suggested in the paper
# determine the output square shape of the discriminator
n_patch = d_model_A.output_shape[1]
# unpack dataset
trainA, trainB = dataset
# prepare image pool for fake images
poolA, poolB = list(), list()
# calculate the number of batches per training epoch
bat_per_epo = int(len(trainA) / n_batch)
# calculate the number of training iterations
n_steps = bat_per_epo * n_epochs
# manually enumerate epochs
for i in range(n_steps):
# select a batch of real samples from each domain (A and B)
X_realA, y_realA = generate_real_samples(trainA, n_batch, n_patch)
X_realB, y_realB = generate_real_samples(trainB, n_batch, n_patch)
# generate a batch of fake samples using both B to A and A to B generators.
X_fakeA, y_fakeA = generate_fake_samples(g_model_BtoA, X_realB, n_patch)
X_fakeB, y_fakeB = generate_fake_samples(g_model_AtoB, X_realA, n_patch)
# update fake images in the pool. Remember that the paper suggstes a buffer of 50 images
X_fakeA = update_image_pool(poolA, X_fakeA)
X_fakeB = update_image_pool(poolB, X_fakeB)
# update generator B->A via the composite model
g_loss2, _, _, _, _ = c_model_BtoA.train_on_batch(
[X_realB, X_realA], [y_realA, X_realA, X_realB, X_realA]
)
# update discriminator for A -> [real/fake]
dA_loss1 = d_model_A.train_on_batch(X_realA, y_realA)
dA_loss2 = d_model_A.train_on_batch(X_fakeA, y_fakeA)
# update generator A->B via the composite model
g_loss1, _, _, _, _ = c_model_AtoB.train_on_batch(
[X_realA, X_realB], [y_realB, X_realB, X_realA, X_realB]
)
# update discriminator for B -> [real/fake]
dB_loss1 = d_model_B.train_on_batch(X_realB, y_realB)
dB_loss2 = d_model_B.train_on_batch(X_fakeB, y_fakeB)
# summarize performance
# Since our batch size =1, the number of iterations would be same as the size of our dataset.
# In one epoch you'd have iterations equal to the number of images.
# If you have 100 images then 1 epoch would be 100 iterations
print(
"Iteration>%d, dA[%.3f,%.3f] dB[%.3f,%.3f] g[%.3f,%.3f]"
% (i + 1, dA_loss1, dA_loss2, dB_loss1, dB_loss2, g_loss1, g_loss2)
)
# evaluate the model performance periodically
# If batch size (total images)=100, performance will be summarized after every 75th iteration.
if (i + 1) % (bat_per_epo * 1) == 0:
# plot A->B translation
summarize_performance(i, g_model_AtoB, trainA, "AtoB")
# plot B->A translation
summarize_performance(i, g_model_BtoA, trainB, "BtoA")
if (i + 1) % (bat_per_epo * 5) == 0:
# save the models
# #If batch size (total images)=100, model will be saved after
# every 75th iteration x 5 = 375 iterations.
save_models(i, g_model_AtoB, g_model_BtoA)
# https://youtu.be/VzIO5_R9XEM
# https://youtu.be/2MSGnkir9ew
"""
Cycle GAN: Sreeni2Zombie
Based on the code by Jason Brownlee from his blogs on https://machinelearningmastery.com/
I am adapting his code to various applications but original credit goes to Jason.
Dataset from https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/
"""
# monet2photo
from os import listdir
from numpy import asarray
from numpy import vstack
from tensorflow.keras.utils import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from matplotlib import pyplot as plt
# load all images in a directory into memory
def load_images(path, size=(256, 256)):
data_list = list()
# enumerate filenames in directory, assume all are images
for filename in listdir(path):
# load and resize the image
pixels = load_img(path + filename, target_size=size)
# convert to numpy array
pixels = img_to_array(pixels)
# store
data_list.append(pixels)
return asarray(data_list)
# dataset path
path = "/kaggle/input/plant-village/PlantVillage"
# load dataset A - Monet paintings
dataA_all = load_images(path + "/Potato___healthy/")
print("Loaded dataA: ", dataA_all.shape)
from sklearn.utils import resample
# To get a subset of all images, for faster training during demonstration
dataA = resample(dataA_all, replace=True, n_samples=75, random_state=42)
# load dataset B - Photos
dataB_all = load_images(path + "/Potato___Late_blight/")
print("Loaded dataB: ", dataB_all.shape)
# Get a subset of all images, for faster training during demonstration
# We could have just read the list of files and only load a subset, better memory management.
dataB = resample(dataB_all, replace=True, n_samples=75, random_state=42)
# plot source images
n_samples = 3
for i in range(n_samples):
plt.subplot(2, n_samples, 1 + i)
plt.axis("off")
plt.imshow(dataA[i].astype("uint8"))
# plot target image
for i in range(n_samples):
plt.subplot(2, n_samples, 1 + n_samples + i)
plt.axis("off")
plt.imshow(dataB[i].astype("uint8"))
plt.show()
# load image data
data = [dataA, dataB]
print("Loaded", data[0].shape, data[1].shape)
# Preprocess data to change input range to values between -1 and 1
# This is because the generator uses tanh activation in the output layer
# And tanh ranges between -1 and 1
def preprocess_data(data):
# load compressed arrays
# unpack arrays
X1, X2 = data[0], data[1]
# scale from [0,255] to [-1,1]
X1 = (X1 - 127.5) / 127.5
X2 = (X2 - 127.5) / 127.5
return [X1, X2]
dataset = preprocess_data(data)
# from cycleGAN_model import define_generator, define_discriminator, define_composite_model, train
# define input shape based on the loaded dataset
image_shape = dataset[0].shape[1:]
# generator: A -> B
g_model_AtoB = define_generator(image_shape)
# generator: B -> A
g_model_BtoA = define_generator(image_shape)
# discriminator: A -> [real/fake]
d_model_A = define_discriminator(image_shape)
# discriminator: B -> [real/fake]
d_model_B = define_discriminator(image_shape)
# composite: A -> B -> [real/fake, A]
c_model_AtoB = define_composite_model(
g_model_AtoB, d_model_B, g_model_BtoA, image_shape
)
# composite: B -> A -> [real/fake, B]
c_model_BtoA = define_composite_model(
g_model_BtoA, d_model_A, g_model_AtoB, image_shape
)
from datetime import datetime
start1 = datetime.now()
# train models
train(
d_model_A,
d_model_B,
g_model_AtoB,
g_model_BtoA,
c_model_AtoB,
c_model_BtoA,
dataset,
epochs=50,
)
stop1 = datetime.now()
# Execution time of the model
execution_time = stop1 - start1
print("Execution time is: ", execution_time)
# FileLink(r'/kaggle/working/g_model_AtoB_000002.h5')
############################################
from keras_contrib.layers.normalization.instancenormalization import (
InstanceNormalization,
)
# Use the saved cyclegan models for image translation
from keras.models import load_model
from matplotlib import pyplot
from numpy.random import randint
# select a random sample of images from the dataset
def select_sample(dataset, n_samples):
# choose random instances
ix = randint(0, dataset.shape[0], n_samples)
# retrieve selected images
X = dataset[ix]
return X
# plot the image, its translation, and the reconstruction
def show_plot(imagesX, imagesY1, imagesY2):
images = vstack((imagesX, imagesY1, imagesY2))
titles = ["Real", "Generated", "Reconstructed"]
# scale from [-1,1] to [0,1]
images = (images + 1) / 2.0
# plot images row by row
for i in range(len(images)):
# define subplot
pyplot.subplot(1, len(images), 1 + i)
# turn off axis
pyplot.axis("off")
# plot raw pixel data
pyplot.imshow(images[i])
# title
pyplot.title(titles[i])
pyplot.show()
# load dataset
A_data = resample(
dataA_all, replace=True, n_samples=50, random_state=42
) # reproducible results
B_data = resample(
dataB_all, replace=True, n_samples=50, random_state=42
) # reproducible results
A_data = (A_data - 127.5) / 127.5
B_data = (B_data - 127.5) / 127.5
# load the models
cust = {"InstanceNormalization": InstanceNormalization}
model_AtoB = load_model("/kaggle/working/g_model_AtoB_003375.h5", cust)
model_BtoA = load_model("/kaggle/working/g_model_BtoA_003375.h5", cust)
# plot A->B->A (Monet to photo to Monet)
A_real = select_sample(A_data, 1)
B_generated = model_AtoB.predict(A_real)
A_reconstructed = model_BtoA.predict(B_generated)
show_plot(A_real, B_generated, A_reconstructed)
# plot B->A->B (Photo to Monet to Photo)
B_real = select_sample(B_data, 1)
A_generated = model_BtoA.predict(B_real)
B_reconstructed = model_AtoB.predict(A_generated)
show_plot(B_real, A_generated, B_reconstructed)
|
import numpy as np
import pandas as pd
from time import time
# import pytorch and set dgl backend to pytorch
import os
os.environ["DGLBACKEND"] = "pytorch"
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
import dgl
except ModuleNotFoundError:
import dgl
import networkx as nx
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
# try:
# from torchsummary import summary
# except ModuleNotFoundError:
# !pip install torchsummary
# from torchsummary import summary
transactions_df = pd.read_csv(
"/kaggle/input/ieee-fraud-detection/train_transaction.csv"
)
num_transactions = transactions_df.shape[0]
print(num_transactions)
transactions_df.head()
transactions_df.isFraud.value_counts(normalize=True)
identity_df = pd.read_csv("/kaggle/input/ieee-fraud-detection/train_identity.csv")
# create validation set
# user set the ratio
TRAIN_VAL_RATIO = 0.75
# determine number of training records
n_train = int(transactions_df.shape[0] * TRAIN_VAL_RATIO)
# train/val split : split by time, training set preceeds val set.
train_ids = transactions_df.TransactionID[:n_train]
val_ids = transactions_df.TransactionID[n_train:]
id_cols = [
"card1",
"card2",
"card3",
"card4",
"card5",
"card6",
"ProductCD",
"addr1",
"addr2",
"P_emaildomain",
"R_emaildomain",
]
cat_cols = ["M1", "M2", "M3", "M4", "M5", "M6", "M7", "M8", "M9"]
# get features and labels
transactions_non_features = ["isFraud", "TransactionDT"] + id_cols
features_cols = [
col for col in transactions_df.columns if col not in transactions_non_features
]
features_df = pd.get_dummies(transactions_df[features_cols], columns=cat_cols).fillna(0)
# take log of transaction amount
features_df["TransactionAmt"] = features_df["TransactionAmt"].apply(np.log10)
features_df["TransactionAmt"].plot(
kind="hist", bins=[0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4]
)
plt.show()
labels_df = transactions_df[["TransactionID", "isFraud"]]
node_types = id_cols + list(identity_df.columns)
node_types.remove("TransactionID")
full_identity_df = identity_df.merge(
transactions_df[id_cols + ["TransactionID"]], on="TransactionID", how="right"
)
edge_dfs = {}
for ntype in node_types:
edge_dfs[ntype] = full_identity_df[["TransactionID", ntype]].dropna()
edge_dfs["card4"]["card4"].unique()
# # General idea
# Our graph will have nodes of several types. The 'target' nodes will store the `TransactionID`, while the remaining node types will be the other columns of `full_identity_df`, or equvalently, the entries in the list `node_types`.
# We add edges (bidirectional) linking a TransactionID with a corresponding ID feature.
# The target nodes will have associated features, coming from `features_df`.
# ## Build dictionary of ID to dgl node index.
# Will build a dictionary of dicts, one dict for each node type.
# initialize the dictionary to store each ID to dgl node index dictionary
id_to_node = {}
# First get dgl indices for TransactionID nodes
id_to_node["target"] = dict(
[(v, k) for k, v in dict(transactions_df["TransactionID"]).items()]
)
# current_max = max(id_to_node['target'].values())
# Then cycle through the other ID types and add those to the list (dict)
for ntype in node_types:
new_nodes_ids = edge_dfs[ntype][ntype].unique()
new_nodes_dgl = np.arange(len(new_nodes_ids) + 1)
id_to_node[ntype] = {a: b for a, b in zip(new_nodes_ids, new_nodes_dgl)}
# current_max = max(id_to_node[ntype].values())
id_to_node["card4"]
# ## Build edge lists for each pair of node types
# These will come from the `edge_dfs`. Whenever a transaction is linked to a particular value in a given id column, we connect the corresponding nodes by an edge.
# For DGL, we need to express the edge type as triples:
# * (source type, relation type, destination type).
# To list the edges this applies to, we have two options. One is to provide two lists, one of initial nodes, and a second list of the same length of the corresponding terminal nodes:
# * [(source nodes, destination nodes)]
# # ??? do we want to add self-loops ???
# instantiate the edge list dictionary
edgelists = {}
num_nodes_dict = {}
for ntype in node_types:
# prepare each edge type triple and its reverse
edge_type = ("target", "target<>" + ntype, ntype)
rev_edge_type = (ntype, ntype + "<>target", "target")
# get list of initial nodes and destination nodes
source_nodes = (
edge_dfs[ntype]["TransactionID"]
.apply(lambda a: id_to_node["target"][a])
.to_numpy()
)
destination_nodes = (
edge_dfs[ntype][ntype].apply(lambda a: id_to_node[ntype][a]).to_numpy()
)
# add to dict
edgelists[edge_type] = (source_nodes, destination_nodes)
edgelists[rev_edge_type] = (destination_nodes, source_nodes)
# get number of nodes of this type
num_nodes_dict[ntype] = len(np.unique(destination_nodes))
num_nodes_dict["target"] = num_transactions
# create the graph
g = dgl.heterograph(edgelists, num_nodes_dict)
# Visualizing the metagraph (sanity check: it should be star-shaped, with the target node in the center and a node for each id feature)
meta = g.metagraph()
nx.draw(meta)
# ## Adding features
# The graph currently has no features associated to nodes. We will use `features_df` created above to add a feature vector to each target node.
features_df.head()
# create pytorch tensor consisting of features for each node
feature_tensor = torch.from_numpy(features_df.drop("TransactionID", axis=1).to_numpy())
# add feature data to graph
g.nodes["target"].data["features"] = feature_tensor
# sanity check
# for random target node, compare expected features with the feature of the node
trial = 1515
# data from the table
orig_feat = torch.from_numpy(features_df.iloc[trial, 1:].to_numpy())
# data from the graph
graph_feat = g.ndata["features"]["target"][trial]
assert max(orig_feat - graph_feat) == 0
print("Feature vectors match. Check is good!")
# ## Get the masks and labels ready
# * Convert the training and validation id lists to lists of dgl node indices.
# * Convert the label df to a list.
train_mask = [id_to_node["target"][x] for x in train_ids]
val_mask = [id_to_node["target"][x] for x in val_ids]
labels = torch.tensor(labels_df["isFraud"].to_numpy()).float()
# ## Feature normalization
mean = torch.mean(g.ndata["features"]["target"], axis=0)
std = torch.sqrt(
torch.sum((g.ndata["features"]["target"] - mean) ** 2, axis=0)
/ g.ndata["features"]["target"].shape[0]
)
g.ndata["features"]["target"] = (g.ndata["features"]["target"] - mean) / std
# # Build the model
# For now, keep it as simple as possible.
from dgl.nn.pytorch import HeteroGraphConv, HeteroEmbedding, GraphConv
from torch.nn import Linear
target_feature_dim = g.ndata["features"]["target"].shape[1]
# create linear embeddings for the non-target nodes into R^in_dim space
num_embeddings_dict = {
src: g.num_nodes(src)
for (src, etype, dst) in g.canonical_etypes
if (dst == "target" and src != "target")
}
# define a NN for preprocessing the target data
class target_preprocessing(nn.Module):
def __init__(self, in_dim, hidden_dim, out_dim, n_layers):
super().__init__()
self.input_layer = Linear(in_dim, hidden_dim)
self.hidden_layer = Linear(hidden_dim, hidden_dim)
self.output_layer = Linear(hidden_dim, out_dim)
self.n_layers = n_layers
def forward(self, in_feats):
h = self.input_layer(in_feats)
h = nn.ReLU()(h)
for i in range(1, self.n_layers):
h = self.hidden_layer(h)
h = nn.ReLU()(h)
h = self.output_layer(h)
return h
# define the model class
class RGCN(nn.Module):
def __init__(
self,
target_feature_dim,
in_dim,
hidden_dim,
out_dim,
num_conv_layers,
num_embeddings,
target_h_dim,
target_pp_layers,
):
super().__init__()
# create dictionaries for HeteroGraphConv
entry_module_dict = {
etype: GraphConv(in_feats=in_dim, out_feats=hidden_dim)
for etype in g.etypes
}
hidden_model_dict = {
etype: GraphConv(in_feats=hidden_dim, out_feats=hidden_dim)
for etype in g.etypes
}
final_model_dict1 = {
etype: GraphConv(in_feats=hidden_dim, out_feats=out_dim)
for src, etype, dst in g.canonical_etypes
if dst == "target"
}
final_model_dict2 = {
etype: GraphConv(in_feats=hidden_dim, out_feats=1)
for src, etype, dst in g.canonical_etypes
if dst != "target"
}
final_model_dict = {**final_model_dict1, **final_model_dict2}
self.embed_layer = HeteroEmbedding(num_embeddings, in_dim)
self.target_preprocessing = target_preprocessing(
target_feature_dim, target_h_dim, in_dim, target_pp_layers
)
self.conv1 = HeteroGraphConv(entry_module_dict, aggregate="sum")
self.conv2s = nn.ModuleList(
[
HeteroGraphConv(hidden_model_dict, aggregate="sum")
for i in range(num_conv_layers - 2)
]
)
self.conv3 = HeteroGraphConv(final_model_dict, aggregate="sum")
def forward(self, graph, input_features):
embeds = self.embed_layer({ntype: graph.nodes(ntype) for ntype in node_types})
input_features = input_features.to(dtype=torch.float32)
target_features = self.target_preprocessing(input_features)
embeds["target"] = target_features
h = self.conv1(graph, embeds)
h = {k: F.relu(v) for k, v in h.items()}
for layer in self.conv2s:
h = layer(graph, h)
h = {k: F.relu(v) for k, v in h.items()}
h = self.conv3(graph, h)
return h
model = RGCN(
target_feature_dim=target_feature_dim,
in_dim=8,
hidden_dim=32,
out_dim=1,
num_conv_layers=4,
num_embeddings=num_embeddings_dict,
target_h_dim=64,
target_pp_layers=3,
)
print("Number of parameters in the model: ")
print(" " * 33, sum(param.numel() for param in model.parameters()))
# prepare weight vector for loss fn
weight_vector = (
torch.ones(labels[train_mask].shape) + labels[train_mask] * 11
).reshape((labels[train_mask].shape[0], 1))
# Set optimizer and loss function
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
loss_fn = torch.nn.BCELoss(weight_vector)
def train_one_epoch(
epoch_no,
model,
g,
features,
labels,
train_mask,
val_mask,
threshold,
return_probs=False,
):
t0 = time()
# Forward pass
logits_dict = model(g, features)
logits = logits_dict["target"]
del logits_dict
probs = torch.sigmoid(logits)
preds = (probs > threshold).float()
labels = labels.reshape_as(preds)
# compute training loss
loss = loss_fn(probs[train_mask], labels[train_mask])
# compute accuracies
train_acc = (preds[train_mask] == labels[train_mask]).float().mean()
val_acc = (preds[val_mask] == labels[val_mask]).float().mean()
# backprop
optimizer.zero_grad()
loss.backward()
optimizer.step()
if return_probs:
return loss, train_acc, val_acc, time() - t0, probs, preds
else:
return loss, train_acc, val_acc, time() - t0
def loss_history_plot(history, small=False):
if small:
size = (5, 2)
else:
size = (7, 4)
fig, ax = plt.subplots(1, 1, figsize=size)
ax.plot(history)
plt.show()
def train(model, g, num_epochs, labels, train_mask, val_mask, threshold):
best_val_acc = 0
epoch_times = []
loss_history = []
features = g.nodes["target"].data["features"]
for epoch in range(num_epochs):
if epoch < num_epochs - 1:
loss, train_acc, val_acc, epoch_time = train_one_epoch(
epoch, model, g, features, labels, train_mask, val_mask, threshold
)
else:
(
loss,
train_acc,
val_acc,
epoch_time,
final_probs,
final_preds,
) = train_one_epoch(
epoch,
model,
g,
features,
labels,
train_mask,
val_mask,
threshold,
return_probs=True,
)
loss_history.append(loss.detach().numpy())
epoch_times.append(epoch_time)
if best_val_acc < val_acc:
best_val_acc = val_acc
if epoch % 10 == 10 - 1:
loss_rate_of_change = (loss_history[-1] - loss_history[-5]) / 5
print(
f"Epoch {epoch+1} loss: {loss:.3f}, (rate of change: {loss_rate_of_change:.4f}), val accuracy: {val_acc:.3f} (best: {best_val_acc:.3f})\n\
-- Average time per epoch: {np.mean(epoch_times):.1f}sec (last 5: {np.mean(epoch_times[-5:]):.1f}sec).\
Estimated time to end: {(num_epochs-epoch-1)*np.mean(epoch_times[-5:])/60:.0f} mins"
)
if epoch % 25 == 25 - 1:
loss_history_plot(loss_history, small=True)
print("-" * 60)
print(
f"Training complete. \
Final loss: {loss:.3f}, \
final val accuracy: {val_acc:.3f}, (best: {best_val_acc:.3f})."
)
return final_probs, final_preds, loss_history
probs, preds, history = train(model, g, 200, labels, train_mask, val_mask, 0.5)
print(probs.mean())
print(preds.mean())
print(labels.mean())
print(f"Proportion of transactions predicted as fraud: {torch.mean(preds)*100:.2f} %.")
cm = confusion_matrix(preds, labels)
print(cm)
fp_rate = cm[1, 0] / (cm[1, 0] + cm[1, 1])
print(f"False positive rate: {fp_rate*100:.1f} %.")
fn_rate = cm[0, 1] / (cm[0, 1] + cm[0, 0])
print(f"False negative rate: {fn_rate*100:.2f} %.")
tn_rate = 1 - fp_rate
print(f"True negative rate: {tn_rate*100:.2f} % (specificity).")
tp_rate = 1 - fn_rate
print(f"True positive rate: {tp_rate*100:.1f} % (sensitivity).")
loss_history_plot(history)
|
# # Introduction
# The current data set includes details of the 500 people who have opted for loan. Also, the data mentions whether the person has paid back the loan or not and if paid, in how many days they have paid. In this project, we will try to draw few insights on sample Loan data.
# Please find the details of dataset below which can help to understand the features in it
# 1. Loan_id : A unique loan (ID) assigned to each loan customers- system generated
# 2. Loan_status : Tell us if a loan is paid off, in collection process - customer is yet to payoff, or paid off after the collection efforts
# 3. Principal : Principal loan amount at the case origination OR Amount of Loan Applied
# 4. Terms : Schedule(time period to repay)
# 5. Effective_date : When the loan got originated (started)
# 6. Due_date : Due date by which loan should be paid off
# 7. Paidoff_time : Actual time when loan was paid off , null means yet to be paid
# 8. Past_due_days : How many days a loan has past due date
# 9. Age : Age of customer
# 10. Education : Education level of customer applied for loan
# 11. Gender : Customer Gender (Male/Female)
# Loading the initial libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# Let us load the data set
loan = pd.read_csv("../input/loandata/Loan payments data.csv")
# ### **Data Analysis On Loan Data Set**
# **Checking first 5 and last 5 records from the datasets**
loan.head(5)
loan.tail(5)
# Let's check the duplicate data in data set
loan.duplicated().sum()
loan.shape
loan.info()
loan.isnull().sum()
# From Analysis:
# 1. There are no duplicated values.
# 2. Loan data set have 500 records in 11 columns/features.
# 3. There are 100 null values in "paid_off_time" feature and 300 null values in "past_due_days"
# 4. Also we will need to convert some columns to respective datetime datatype
# Let's convert following columns to the Datetime format
loan["effective_date"] = pd.to_datetime(loan["effective_date"])
loan["due_date"] = pd.to_datetime(loan["due_date"])
loan["paid_off_time"] = pd.to_datetime(loan["paid_off_time"]).dt.date
loan["paid_off_time"] = pd.to_datetime(loan["paid_off_time"])
loan.info()
# **Let's aim to replace NaN values for the columns in accordance with their distribution**
loan.hist(figsize=(15, 11), color="#008080")
loan["past_due_days"].fillna(loan["past_due_days"].mean(), inplace=True)
loan["paid_off_time"] = loan["paid_off_time"].fillna(-1)
loan.isnull().sum()
# Also, there is one Spelling Correction
loan["education"] = loan["education"].replace("Bechalor", "Bachelor")
# Now, it seems we are good to go ahead
# ### **Exploratory Data Analysis**
# ### **Loan Status Analysis**
loan_stat = loan["loan_status"].value_counts()
pd.DataFrame(loan_stat)
plt.figure(figsize=[10, 5])
plt.pie(
loan["loan_status"].value_counts(),
labels=loan["loan_status"].unique(),
explode=[0, 0.1, 0],
startangle=145,
autopct="%1.f%%",
colors=["#1e847f", "#ecc19c", "#000000"],
)
plt.title("Loan Status Distribution", fontsize=15)
plt.show()
# We can see here,
# * Out of 500 peoples 300 people repaid the full amount on time.
# * Collection paid off shows 100 peoples repaid the loan but lately after due date.
# * Collection shows 100 people not repaid the loan.
# ### **Gender v/s Loan Status Analysis**
loan["Gender"].value_counts().sort_index()
# **Out of 500 their are 423 males and 77 females present**
loan.groupby(["Gender"])["loan_status"].value_counts().to_frame()
plt.figure(figsize=[10, 5])
sns.countplot(
loan["Gender"], hue=loan["loan_status"], palette=("#1e847f", "#ecc19c", "#000000")
)
plt.legend(loc="upper right")
plt.title("Gender vs Loan Status", fontsize=20)
plt.xlabel("Gender", fontsize=16)
plt.ylabel("Count", fontsize=16)
plt.show()
# From above analysis:
# * Out of 500 their are 423 males and 77 females present
# * Around 40% of male population have repaid their loan lately (or yet to pay)
# * Around 30% of female population have repaid their loan lately (or yet to pay)
# * Irrespective of gender, most of the population tend to pay the loan on time
# ### **Education v/s Loan Status Analysis**
loan["education"].value_counts().to_frame()
loan.groupby(["education"])["loan_status"].value_counts().to_frame()
plt.figure(figsize=[10, 5])
sns.countplot(
loan["education"],
hue=loan["loan_status"],
palette=("#1e847f", "#ecc19c", "#000000"),
)
plt.legend(loc="upper right")
plt.title("Education vs Loan Status", fontsize=20)
plt.xlabel("Education", fontsize=16)
plt.ylabel("Count", fontsize=16)
plt.show()
# From above analysis:
# * Majority of the loan takers are from High School or College background
# * Very few people from Masters or Above background took loan.
# * Irrespective of education category, most of them repaid their loan
# ### **Age v/s Loan Status Analysis**
loan["age"].value_counts().to_frame()
plt.figure(figsize=[18, 7])
sns.countplot(
loan["age"], hue=loan["loan_status"], palette=("#1e847f", "#ecc19c", "#000000")
)
plt.legend(loc="upper left")
plt.title("Age vs Loan Status", fontsize=20)
plt.xlabel("Age", fontsize=16)
plt.ylabel("Count", fontsize=16)
plt.show()
# From above analysis:
# * Majority of the people who took loan have age ranging from 24 years to 38 years
# * Majority of people repaid their loan
# ### **Principal v/s Loan Status Analysis**
loan["Principal"].value_counts().to_frame()
loan.groupby(["Principal"])["loan_status"].value_counts().to_frame()
plt.figure(figsize=[10, 5])
sns.countplot(
loan["Principal"],
hue=loan["loan_status"],
palette=("#1e847f", "#ecc19c", "#000000"),
)
plt.legend(loc="upper left")
plt.title("Principal vs Loan Status", fontsize=20)
plt.xlabel("Principal", fontsize=16)
plt.ylabel("Count", fontsize=16)
plt.show()
# From above analysis:
# * Majority of the people have opted for Principal of 800 and 1000
# * And out of those 1800 people, majority of them repaid their loan
# ### **Term v/s Loan Status Analysis**
loan["terms"].value_counts().to_frame()
loan.groupby(["terms"])["loan_status"].value_counts().to_frame()
plt.figure(figsize=[10, 5])
sns.countplot(
loan["terms"], hue=loan["loan_status"], palette=("#1e847f", "#ecc19c", "#000000")
)
plt.legend(loc="upper left")
plt.title("Terms vs Loan Status", fontsize=20)
plt.xlabel("Terms", fontsize=16)
plt.ylabel("Count", fontsize=16)
plt.show()
# From above analysis:
# * Only few people have opted loan for 7 days term
# * Majority of the late payments are from people who have their loan terms as 15 days and 30 days
# ### **Effective Date v/s Loan Status Analysis**
loan.groupby(["effective_date"])["loan_status"].value_counts().to_frame()
plt.figure(figsize=[10, 5])
dates = loan["effective_date"].dt.date
sns.countplot(
x=dates, hue=loan["loan_status"], palette=("#1e847f", "#ecc19c", "#000000")
)
plt.legend(loc="upper right")
plt.title("Effective Date vs Loan Status", fontsize=20)
plt.xlabel("Effective Date", fontsize=16)
plt.ylabel("Count", fontsize=16)
plt.show()
# From above analysis:
# * On 11th and 12th September, loan was given to many people
# * It looks like maybe as part of a some loan event drive
# Let's see correlation between the features
correlation = loan[loan.columns].corr()
plt.figure(figsize=(10, 7))
plot = sns.heatmap(correlation, vmin=-1, vmax=1, annot=True, annot_kws={"size": 10})
plot.set_xticklabels(plot.get_xticklabels(), rotation=30)
|
import pandas as pd
import numpy as np
from collections import Counter
import nltk
from nltk.corpus import stopwords
nltk.download("stopwords")
from nltk.tokenize import word_tokenize
import re
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
def plot_words(date="today"):
df = pd.read_csv("../input/reddit-wallstreetsbets-posts/reddit_wsb.csv")
stock = pd.read_csv("../input/stock-market-dataset/symbols_valid_meta.csv")
stock = stock["Symbol"].tolist()
df = df.drop(columns=["created", "id", "url", "comms_num"])
df["body"] = df["body"].fillna("")
df["text"] = df["title"] + " " + df["body"]
df = df.drop(columns=["body", "title"])
df["timestamp"] = df["timestamp"].apply(lambda x: x[0:10])
sorted_values = np.sort(df["timestamp"].unique())
lista_valori = []
if date == "today":
df = df[df["timestamp"] == sorted_values[-1]]
elif date == "all":
pass
else:
df = df[df["timestamp"] == date]
jx = []
ix = []
for j in range(0, len(df["text"])):
for i in (df["text"].iloc[j]).split():
if i in stock:
jx.append(j)
ix.append(i)
df_termini = pd.DataFrame({"indici": jx, "valori": ix})
lista_termini = []
for i in range(0, df_termini["indici"].max() + 1):
lista_termini.append(df_termini["valori"][df_termini["indici"] == i].tolist())
df["terms"] = lista_termini
df["terms"] = df["terms"].apply(lambda x: list(set(x)))
df["terms"] = df["terms"].apply(lambda x: " ".join(map(str, x)))
df["terms"] = df["terms"].apply(
lambda x: " ".join([word for word in x.split() if word not in ("I")])
)
df["text"] = df["text"].apply(lambda x: x.lower())
stop = stopwords.words("english")
df["text"] = df["text"].apply(
lambda x: " ".join([word for word in x.split() if word not in (stop)])
)
df["text"] = df["text"].apply(lambda x: re.sub(r"http\S+", "", x))
df["text"] = df["text"].apply(
lambda x: " ".join(
[
word
for word in x.split()
if word
not in (
"[",
"]",
"array",
"will",
"######(**[click",
"–",
"i'm",
"​",
" ",
"-",
"FOR",
"To",
"it.",
"/",
"would",
"for",
"HERE",
"​",
"Array",
"*****",
"-",
"So",
"If",
"since",
"In",
"######(**[CLICK",
"It",
"You",
"What",
"And",
"lot",
"Some",
"got",
"it’s",
"#",
"This",
">",
"*",
"Is",
"They",
"My",
"Why",
"How",
"THIS",
"going",
"I'm",
"I’m",
"get",
"IS",
"We",
"WE",
"-",
"I",
"THE",
"The",
"TO",
"A",
"AND",
"NOT",
"🚀🚀🚀",
"🚀",
"🚀🚀",
)
]
)
)
return df
# counter = Counter(" ".join(text_clean).split()).most_common(100)
# wordcloud = WordCloud(collocations=True).generate(' '.join(text_clean))
# #plot the wordcloud object
# plt.figure(figsize=(14,14))
# plt.imshow(wordcloud)
# plt.axis('off')
# plt.show()
# return(counter)
df = plot_words()
for i in df["terms"]:
print(i)
|
# # Welcome to my first Model and my graduation project too.
# # This model is about classify child if he/she is normal child or he/she has any facial symptoms of any diseases.
# # Special Thanks to my friend Omar Salah for helping me in this model.
# # Let's Start with Importing classes.
# import modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="whitegrid")
import os, shutil, stat
import glob as gb
import cv2
import tensorflow as tf
import keras
from tqdm import tqdm
# # Now we get the dataset (my first dataset too)
path = "../input/child-binary-class/" # to get the path of folders
Normalimages = os.listdir(path + "/normal")
AbnormalImages = os.listdir(path + "/abnormal")
# # Now we get to the 2 folders
for folder in os.listdir(path):
files = os.listdir(os.path.join(path, folder))
print(
f"For data , found {len(files)} in folder {folder}"
) # to get the numbers of images in each folder
# # We must make our dictionary for labeling the data
code = {"normal": 0, "abnormal": 1} # Dictionary for labeling data
def getcode(n):
return code[n] # get the code of data from dictionary
# def getLabel(n):
# for x,c in new_labels.items():
# if n==c:
# return x
# # Now we read the images from 2 folders
size = []
images = [] # to store images
labels = [] # to store label for each image
for folder in os.listdir(path):
files = os.listdir(os.path.join(path, folder))
for file in tqdm(files):
image = plt.imread(os.path.join(path, folder + "/" + file))
images.append(image) # add image into list
labels.append(folder) # to add label into list
# size.append(image.shape)
len(images), len(labels), labels
# Now we plot to know the number of images in each flder and mak a simple comparison
# show
sns.countplot(labels)
# # Now we show a sample of images.
# # Note 1 for abnormal and 0 for normal
plt.figure(figsize=(20, 20))
for n, i in enumerate(list(np.random.randint(0, len(images), 36))):
plt.subplot(6, 6, n + 1)
plt.imshow(images[i])
plt.axis("off")
plt.title(getcode(labels[i]))
# # Let's make the list of images into arrays
images = np.array(images)
labels = np.array(labels)
plt.figure(figsize=(20, 20))
for n, i in enumerate(list(np.random.randint(0, len(images), 36))):
plt.subplot(6, 6, n + 1)
plt.imshow(images[i])
plt.axis("off")
plt.title(getcode(labels[i]))
# # The shape of images isn't the same so i resize them to make them having same size
# resize images into 224 ,224, 3
image_size = 224
new_images = []
new_labels = []
for i in range(len(images)):
if (
images[i].shape[-1] != image_size
): # to exclude it image that has 1 channel (grey scale)
if (
images[i].shape[-1] == 3
): # to select the image that has 3 channel (some of them have 4)
new_images.append(cv2.resize(images[i], (image_size, image_size)))
new_labels.append(labels[i])
plt.figure(figsize=(20, 20))
for n, i in enumerate(list(np.random.randint(0, len(new_images), 36))):
plt.subplot(6, 6, n + 1)
plt.imshow(new_images[i])
plt.axis("off")
plt.title(getcode(new_labels[i]))
# # convert labels to one hot encoder
# convert labels to one hot encoder
new_labels = [code[item] for item in new_labels]
# # In this section i make the resized images into array
# convert data to array , and rescale images
new_labels = np.array(new_labels)
new_images = np.array(new_images) / 255.0
plt.figure(figsize=(20, 20))
for n, i in enumerate(list(np.random.randint(0, len(new_images), 36))):
plt.subplot(6, 6, n + 1)
plt.imshow(new_images[i])
plt.axis("off")
plt.title(new_labels[i])
# # Now we finish the data preparation and scaling.
# # Let's split it into train, validate and test
from sklearn.model_selection import train_test_split
# split entire data to x_train , x_test , y_train , y_test
x_train, x_test, y_train, y_test = train_test_split(
new_images, new_labels, test_size=0.2, shuffle=True, stratify=new_labels
)
# split entire train to x_train , x_val , y_train , y_val
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train, test_size=0.05, shuffle=True, stratify=y_train
)
# #
plt.figure(figsize=(20, 20))
for n, i in enumerate(list(np.random.randint(0, len(y_train), 36))):
plt.subplot(6, 6, n + 1)
plt.imshow(x_train[i])
plt.axis("off")
plt.title(y_train[i])
# from tensorflow.keras.preprocessing.image import ImageDataGenerator
# generator=ImageDataGenerator(
# rotation_range=90,
# width_shift_range=0.1,
# height_shift_range=0.1,
# vertical_flip=True,
# zoom_range=0.2,
# shear_range=0.2,
# horizontal_flip=True,
# fill_mode='nearest'
# )
# batch_size=50
# train_generator=generator.flow(x_train,y_train ,batch_size=batch_size)
# val_generator=generator.flow(x_val,y_val)
# #get data for generator
# train_data=train_generator.__getitem__(0)[0]
# train_labels=train_generator.__getitem__(0)[1]
# plt.figure(figsize=(20,20))
# for n , i in enumerate(list(np.random.randint(0,32,36))) :
# plt.subplot(6,6,n+1)
# plt.imshow(train_data[i])
# plt.axis('off')
# plt.title(train_labels[i])
# # build models
# model = tf.keras.models.Sequential([
# # YOUR CODE HERE
# tf.keras.layers.Conv2D(1024,kernel_size=(3,3),activation='relu',input_shape=(image_size,image_size,3)),
# tf.keras.layers.Conv2D(512,kernel_size=(3,3),activation='relu'),
# tf.keras.layers.AvgPool2D(4,4),
# tf.keras.layers.Conv2D(128,kernel_size=(3,3),activation='relu'),
# tf.keras.layers.Conv2D(80,kernel_size=(3,3),activation='relu'),
# tf.keras.layers.Conv2D(64,kernel_size=(3,3),activation='relu'),
# tf.keras.layers.MaxPool2D(4,4),
# tf.keras.layers.Flatten(),
# #Full Connected Layers
# tf.keras.layers.Dense(512, activation='relu'),
# #Add dropout to avoid Overfit
# tf.keras.layers.Dropout(0.25),
# tf.keras.layers.Dense(256, activation='relu'),
# tf.keras.layers.Dense(128, activation='relu'),
# #Add dropout to avoid Overfit
# tf.keras.layers.Dropout(0.4),
# tf.keras.layers.Dense(64, activation='relu'),
# tf.keras.layers.Dense(1 , activation='sigmoid'),
# ])
# print(model.summary())
# model.compile(optimizer='adam', loss="binary_crossentropy",metrics=['accuracy'])
# epochs = 64
# history = model.fit(x_train , y_train ,batch_size=32,epochs=epochs , validation_data=(x_val , y_val))
# from keras.applications import VGG16
# pretrain_model = VGG16(input_shape = (image_size, image_size, 3), weights = 'imagenet', include_top = False)
# for layer in pretrain_model.layers:
# layer.trainable = False
# pretrain_model.summary()
# from keras.applications import ResNet50
# # resnet_weights_path = '../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
# pretrain_model=ResNet50(include_top=False,input_shape=(image_size,image_size,3),pooling='max',classes=2, weights = 'imagenet')
# for layer in pretrain_model.layers:
# layer.trainable = False
# pretrain_model.summary()
import efficientnet.keras as efn
pretrain_model = efn.EfficientNetB3(
weights="imagenet",
input_shape=(image_size, image_size, 3),
include_top=False,
pooling="avg",
)
pretrain_model.summary()
from tensorflow.keras import Model
x = tf.keras.layers.Flatten()(pretrain_model.output)
# Full Connected Layers
x = tf.keras.layers.Dense(512, activation="relu")(x)
# Add dropout to avoid Overfit
x = tf.keras.layers.Dropout(0.25)(x)
x = tf.keras.layers.Dense(256, activation="relu")(x)
x = tf.keras.layers.Dense(128, activation="relu")(x)
# Add dropout to avoid Overfit
x = tf.keras.layers.Dropout(0.4)(x)
x = tf.keras.layers.Dense(64, activation="relu")(x)
x = tf.keras.layers.Dense(1, activation="sigmoid")(x)
model = Model(pretrain_model.input, x)
print(model.summary())
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
epochs = 20
history = model.fit(
x_train, y_train, batch_size=20, epochs=epochs, validation_data=(x_val, y_val)
)
model.evaluate(x_test, y_test)
model.save("./saveModel.h5")
from tensorflow.keras import Model
from keras.models import load_model
loadedModel = load_model("./saveModel.h5")
loadedModel.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.evaluate(x_test, y_test)
pred = model.predict(x_test)
new_pred = []
for p in pred:
if p >= 0.5:
new_pred.append(1)
else:
new_pred.append(0)
print(new_pred)
from sklearn.metrics import classification_report
target_names = ["normal", "abnormal"]
print(classification_report(new_pred, y_test, target_names=target_names))
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(new_pred, y_test)
print(cm)
import matplotlib.pyplot as plt
plt.plot(history.history["accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train"], loc="upper left")
plt.show()
# summarize history for loss
plt.plot(history.history["loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train"], loc="upper left")
plt.show()
import matplotlib.pyplot as plt
plt.plot(history.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["val"], loc="upper left")
plt.show()
# summarize history for loss
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["val"], loc="upper left")
plt.show()
def gellabel(arr):
new_arr = []
for p in arr:
if p == 0:
new_arr.append("normal")
else:
new_arr.append("abnormal")
return new_arr
new_y_test = gellabel(y_test)
new_new_pred = gellabel(new_pred)
plt.figure(figsize=(30, 30))
for n, i in enumerate(list(np.random.randint(0, 32, 36))):
plt.subplot(6, 6, n + 1)
plt.imshow(x_test[i])
plt.axis("off")
plt.title(f" Real: {(new_y_test[i]) } Vs Predict: {(new_new_pred[i])}")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Python Booleans - Mantıksal Operatörler
# Mantıksal operatörler iki değerden oluşur. True - False True: doğru False: Yanlış
# # Boolean Values
# Programlamada genellikle bir ifadenin Doğru mu yoksa Yanlış mı olduğunu bilmeniz gerekir.
# Python'da herhangi bir ifadeyi değerlendirebilir ve True veya False olmak üzere iki yanıttan birini alabilirsiniz.
# İki değeri karşılaştırdığınızda, ifade değerlendirilir ve Python, Boole yanıtını döndürür:
print(5000000 > 5426)
print(185555 == 779)
print(1044444 < 9333333333333)
# if ifadesinde bir koşul çalıştırdığınızda, Python True veya False değerini döndürür:
# Koşulun Doğru veya Yanlış olmasına bağlı olarak bir mesaj yazdıralım.
z = 8044
k = 7583
if k > z:
print("k büyük z")
else:
print("k büyük degil z")
# # Değerleri ve Değişkenleri Değerlendirme
# bool() işlevi, herhangi bir değeri değerlendirmenize ve karşılığında True veya False vermenize izin verir,
# Örneğin: Bir diziyi ve bir sayıyı değerlendirin:
print(bool("100 alırım inş"))
print(bool(100))
# iki değişkeni değerlendirme
x = "yüz"
y = 100
print(bool(x))
print(bool(y))
# # Çoğu Değer Doğrudur
# Bir tür içeriğe sahipse, hemen hemen her değer True olarak değerlendirilir.
# Boş diziler dışında tüm diziler True'dur.
# 0 dışında herhangi bir sayı True'dur.
# Boş olanlar dışında tüm liste, demet, küme ve sözlük True'dur.
# Aşağıdakiler True olarak çıktı verir
bool("sedasayan")
bool(["domates", "biber", "patlıcan"])
# # Bazı Değerler Yanlış
# Aslında, (), [], {}, "", 0 sayısı ve Yok değeri gibi boş değerler dışında False olarak değerlendirilen çok fazla değer yoktur. Ve elbette False değeri False olarak değerlendirilir.
# Aşağıdaki örnekler False olarak çıktı verecektir
bool(False)
bool(False)
bool(0)
bool("")
bool(())
bool([])
bool({})
# # Fonksiyonlar bir Boole Döndürebilir
# Bir Boole Değeri döndüren fonksiyonlar oluşturabilirsiniz:
def myFunction():
return True
print(myFunction())
# Bir işlevin Boole yanıtına göre kod çalıştırabilirsiniz:
# Örnek "EVET!" Yazdır işlev True döndürürse, aksi takdirde "NO!" yazdırın:
def myFunction():
return True
if myFunction():
print("YES!")
else:
print("NO!")
# Python ayrıca, bir nesnenin belirli bir veri türünde olup olmadığını belirlemek için kullanılabilen isinstance() fonksiyonu gibi bir boolean değeri döndüren birçok yerleşik işleve sahiptir:
# Örnek Bir nesnenin tamsayı olup olmadığını kontrol edin
x = 8547.55
print(isinstance(x, int))
y = "8888888888888"
print(isinstance(y, str))
# Aşağıdaki boolean hangi değeri çıktı verir.
#
print(10725 > 98884)
print(43254 == 7)
print(8 < 8)
print(bool("sysyssy"))
print(bool(7772))
# # Python Operatörleri
# Operatörler, değişkenler ve değerler üzerinde işlem yapmak için kullanılır.
# Aşağıdaki örnekte, iki değeri bir araya getirmek için + operatörünü kullanıyoruz:
print(456 + 5)
# Python, operatörleri aşağıdaki gruplara ayırır:
# Arithmetic operators Assignment operators Comparison operators Logical operators Identity operators Membership operators Bitwise operators
# # Python Arithmetic Operators
# Aritmetik opetaörler, yaygın matematiksel işlemleri gerçekleştirmek için sayısal değerlerle birlikte kullanılır:
# Name Example Try it
# + Addition x + y
# - Subtraction x - y
# * Multiplication x * y
# / Division x / y
# % Modulus x % y
# ** Exponentiation x ** y
# // Floor division x // y
# addition - toplama
x = 71
y = 377
print(x + y)
# Subtraction - çıkarma
x = 57444444445565
y = 752588444
print(x - y)
# Multiplication
x = 77774
y = 7569547
print(x * y)
# Division - bölme
x = 78
y = 5
print(x / y)
# modulus - modüller
x = 75444
y = 2
print(x % y)
# Exponentiation - üst alma
x = 7
y = 9
print(x**y) # same as 2*2*2*2*2
# Floor division - taban fonksiyonu
x = 174
y = 8
print(x // y)
# kat bölümü // sonucu en yakın tam sayıya yuvarlar
# # Python Atama Operatörleri
# Atama işleçleri, değişkenlere değer atamak için kullanılır:
# = EŞİTTİR
x = 8
x
# += ARTI EŞİTTİR
x = 85
x += 75
print(x)
# *= ÇARPI EŞİTTİR
x = 75
x *= 77
print(x)
# /=
x = 85
x /= 745
print(x)
# %= bölümden kalan sayıyı verir
x = 758
x %= 758
print(x)
# **= sayının üssünü alır
x = 758
x **= 784
print(x)
# # Python Karşılaştırma Operatörleri
# Karşılaştırma işleçleri iki değeri karşılaştırmak için kullanılır:
# == Equal - Eşit mi
x = 555
y = 85
print(x == y)
# 5, 3'e eşit olmadığı için False döndürür
# > Greater than , büyüktür
x = 7
y = 5
print(x > y)
# 7, 5'ten büyük olduğu için True döndürür
# >= Greater than or equal to , Büyük eşittir
x = 4
y = 2
print(x >= y)
# returns True because five is greater, or equal, to 2
# Less than or equal to , küçük eşittir
x = 9
y = 2
print(x <= y)
# returns False because 5 is neither less than or equal to 2
# # Python Mantıksal Operatörler
# Mantıksal işleçler, koşullu ifadeleri birleştirmek için kullanılır: "and, or, not"
# and, Her iki ifade de doğruysa True döndürür
x = 6
print(x > 4 and x < 9)
# returns True because 5 is greater than 3 AND 4 is less than 9
# or İfadelerden biri doğruysa True döndürür
x = 55
print(x > 44 or x < 4)
# #, koşullardan biri doğru olduğu için True döndürür
# not Sonucu tersine çevirin, sonuç doğruysa False döndürür
x = 7
print(not (x > 4 and x < 100))
# sonucu tersine çevirmek için kullanılır. Norlamde true çıktısı verecekti
# # Python Kimlik Operatörleri
# Kimlik fonksiyonları, nesneleri eşit olup olmadıklarını değil, aslında aynı nesne olup olmadıklarını ve aynı bellek konumuna sahip olup olmadıklarını karşılaştırmak için kullanılır:
# is
# Her iki değişken de aynı nesneyse True döndürür
a = ["el", "ayak"]
b = ["el", "ayak"]
c = a
print(a is c)
# True değerini döndürür çünkü z, x ile aynı nesnedir
print(a is b)
# aynı içeriğe sahip olsalar bile x, y ile aynı nesne olmadığı için False döndürür
print(a == b)
# "is" ve "==" arasındaki farkı göstermek için: x eşittir y olduğu için bu karşılaştırma True değerini döndürür
# is not Her iki değişken de aynı nesne değilse True döndürür
# is not
# Her iki değişken de aynı nesne değilse True döndürür
a = ["oje", "tırnak"]
s = ["oje", "tırnak"]
d = a
print(a is not d)
# d, x ile aynı nesne olduğu için False döndürür
print(a is not s)
# True döndürür çünkü x, aynı içeriğe sahip olsalar bile y ile aynı nesne değildir
print(a != s)
# "is not" ve "!=" arasındaki farkı göstermek için: x eşittir y olduğu için bu karşılaştırma False döndürür
|
# # 1. Introduction
# Name: Tomasz Abels and Jack Chen
# Username: JackChenXJ
# Score:
# Leaderbord rank:
# # 2. Data
# ### 2.1 Dataset
# In this section, we load and explore the dataset.
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import os
print(os.listdir("../input/LANL-Earthquake-Prediction"))
train = pd.read_csv(
"../input/LANL-Earthquake-Prediction/train.csv",
dtype={"acoustic_data": np.int16, "time_to_failure": np.float64},
)
print(train.shape)
acoustic_data = train["acoustic_data"].values[:200000000:100]
time_data = train["time_to_failure"].values[:200000000:100]
print(acoustic_data.shape)
print(time_data.shape)
fig, ax1 = plt.subplots(figsize=(16, 8))
plt.title("Trends of acoustic_data and time_to_failure. 2% of data (sampled)")
plt.plot(acoustic_data, color="b")
ax1.set_ylabel("acoustic_data", color="b")
plt.legend(["acoustic_data"])
ax2 = ax1.twinx()
plt.plot(time_data, color="g")
ax2.set_ylabel("time_to_failure", color="g")
plt.legend(["time_to_failure"], loc=(0.875, 0.9))
# ### 2.1.1 Train-test split
# In the above below, we split the train data into a test and a train set. Set a value for the `test_size` yourself. Argue why the test value can not be too small or too large. You can also use k-fold cross validation.
# Secondly, we have set the `random_state` to 102. Can you think of a reason why we set a `random_state` at all?
# ### 2.2 Data Exploration
# Explore the features and target variables of the dataset. Think about making some scatter plots, box plots, histograms or printing the data, but feel free to choose any method that suits you.
# What do you think is the right performance
# metric to use for this dataset? Clearly explain which performance metric you
# choose and why.
# Algorithmic bias can be a real problem in Machine Learning. So based on this,
# should we use the Race and the Sex features in our machine learning algorithm? Explain what you believe.
from tqdm import tqdm
rows = 1000000
segments = int(np.floor(train.shape[0] / rows))
X_train = pd.DataFrame(
index=range(segments), dtype=np.float64, columns=["ave", "std", "max", "min"]
)
y_train = pd.DataFrame(
index=range(segments), dtype=np.float64, columns=["time_to_failure"]
)
for segment in tqdm(range(segments)):
seg = train.iloc[segment * rows : segment * rows + rows]
x = seg["acoustic_data"].values
y = seg["time_to_failure"].values[-1]
y_train.loc[segment, "time_to_failure"] = y
X_train.loc[segment, "ave"] = x.mean()
X_train.loc[segment, "std"] = x.std()
X_train.loc[segment, "max"] = x.max()
X_train.loc[segment, "min"] = x.min()
print(X_train.shape)
print(X_train)
from sklearn.model_selection import train_test_split
# Randomize the set and split it into a training and test set
x_train, x_test, y_train, y_test = train_test_split(
X_train, y_train, test_size=0.3, shuffle=True, random_state=102
)
# ### 2.3 Data Preparation
# This dataset hasn’t been cleaned yet. Meaning that some attributes (features) are in numerical format and some are in categorial format. Moreover, there are missing values as well. However, all Scikit-learn’s implementations of these algorithms expect numerical features. Check for all features if they are in categorial and use a method to transform them to numerical values. For the numerical data, handle the missing data and normalize the data.
# Note that you are only allowed to use training data for preprocessing but you then need to perform similar changes on test data too.
# You can use [pipelining](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html) to help with the preprocessing.
#
# standard normalize the data
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(x_train)
x_train_norm = scaler.transform(x_train)
x_test_norm = scaler.transform(x_test)
# convert y values to categorical values
y_train = np.ravel(y_train)
y_test = np.ravel(y_test)
lab = preprocessing.LabelEncoder()
y_train_t = lab.fit_transform(y_train)
y_test_t = lab.fit_transform(y_test)
# ## 3. Training and Results
# Briefly introduce the classification algorithms you choose.
# Present your final confusion matrices (2 by 2) and balanced accuracies for both test and training data for all classifiers. Analyse the performance on test and training in terms of bias and variance. Give one advantage and one drawback of the method you use.
#
print(x_train_norm.shape)
print(x_test_norm.shape)
print(y_train_t.shape)
print(y_test_t.shape)
# from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn import datasets, svm
from sklearn.linear_model import LogisticRegression
from matplotlib import pyplot
# define the model
model = svm.SVC()
# fit/train the model on all features
model.fit(x_train_norm, y_train_t)
print(model.score(x_test_norm, y_test_t))
# get feature importance
# importance = model.feature_importances_
# plot feature importance
# pyplot.bar([x for x in range(len(importance))], importance)
# pyplot.show()
# Predict
y_test_pred = model.predict(x_test_norm)
y_train_pred = model.predict(x_train_norm)
# Accuracy for training and test set
score1 = model.score(x_train_norm, y_train_t)
score2 = model.score(x_test_norm, y_test_t)
print(score1)
print(score2)
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
# creates a confusion matrix for the training set
cm = metrics.confusion_matrix(y_train_t, y_train_pred)
plt.figure(figsize=(4, 4))
sns.heatmap(cm, annot=True, fmt=".0f", linewidths=0.5, square=True, cmap="Blues_r")
plt.ylabel("Actual label")
plt.xlabel("Predicted label")
all_sample_title = "Accuracy Train Score: {0}".format(score1) # accuracy score
plt.title(all_sample_title, size=10)
# creates a confusion matrix for the test set
cm2 = metrics.confusion_matrix(y_test_t, y_test_pred)
plt.figure(figsize=(4, 4))
sns.heatmap(cm2, annot=True, fmt=".0f", linewidths=0.5, square=True, cmap="Blues_r")
plt.ylabel("Actual label")
plt.xlabel("Predicted label")
all_sample_title = "Accuracy Test Score: {0}".format(score2) # accuracy score
plt.title(all_sample_title, size=10)
# ## 4. Discussion and Conclusion
# Discuss all the choices you made during the process and your final conclusions. Highlight the strong points of your approach, discuss its shortcomings and suggest some future approaches that may improve it. Please be self critical here. The assignment is not about achieving a state of the art performance, but about showing what you have learned the concepts during the course.
# my_submission = pd.DataFrame({'Id': test.Id, 'SalePrice': predicted_data})
# you could use any filename. We choose submission here
# my_submission.to_csv('submission.csv', index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LinearRegression
df = pd.read_csv("/kaggle/input/crop-recommendation/Crop_recommendation.csv")
df.head()
df.info()
df.isnull().sum()
df.columns.tolist()
df.drop(["Unnamed: 8", "Unnamed: 9"], axis=1, inplace=True)
df.head()
df.tail()
# let's some focus on crop types
df.groupby(["label"]).head()
# checking every label values
# first i've checked unique label values; i will use lowercase method to fix possible matching issues
df["label"] = df["label"].str.lower()
rice_df = df[df["label"] == "rice"]
print(rice_df)
rice_df.head()
rice_df.tail()
rice_df.describe()
# writing a list and a function for every label values.
crops = pd.DataFrame(df["label"].unique(), columns=["crop"])
crop_list = df["label"].unique().tolist()
def create_crop_df(df):
crop_list = df["label"].unique().tolist()
crop_df_dict = {}
for crop in crop_list:
crop_df_dict[crop] = df[df["label"] == crop]
return crop_df_dict
crop_df_dict = create_crop_df(df)
rice_df = crop_df_dict["rice"]
rice_df.head()
print(crop_list)
def create_crop_df(df):
crop_list = df["label"].unique().tolist()
crop_df_dict = {}
for crop in crop_list:
crop_df_dict[crop] = df[df["label"] == crop]
return crop_df_dict
crop_df_dict = create_crop_df(df)
maize_df = crop_df_dict["maize"]
maize_df.describe()
apple_df = crop_df_dict["apple"]
apple_df.describe()
# if i want to organize all of them with one code
for crop in crop_df_dict:
print(f"DataFrame for {crop}:")
print(crop_df_dict[crop].describe())
# checking some details by groupby function
df.groupby("label")["Nitrogen"].agg(["mean", "sum", "max"]).sort_values(
by="mean", ascending=True
)
# df.groupby('label')['phosphorus'].agg(['mean', 'sum', 'max']).sort_values(by='mean', ascending=True)
# df.groupby('label')['potassium'].agg(['mean', 'sum', 'max']).sort_values(by='mean', ascending=True)
# df.groupby('label')['temperature'].agg(['mean', 'sum', 'max']).sort_values(by='mean', ascending=True)
# df.groupby('label')['humidity'].agg(['mean', 'sum', 'max']).sort_values(by='mean', ascending=True)
# df.groupby('label')['ph'].agg(['mean', 'sum', 'max']).sort_values(by='mean', ascending=True)
# df.groupby('label')['rainfall'].agg(['mean', 'sum', 'max']).sort_values(by='mean', ascending=True)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Installation
# pip install -q tensorflow tensorflow-datasets
# ## Import Necessary Libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow import keras
tfds.disable_progress_bar()
from keras.preprocessing import image
# ## Dataset
train_labels = pd.read_csv("/kaggle/input/dog-breed-identification/labels.csv")
train_labels.columns
unique_breeds = train_labels["breed"].unique()
len(unique_breeds)
# from sklearn import preprocessing
# import glob
# def load_labels(data_path):
# files = glob.glob(data_path + '/*.jpg')
# labels = pd.read_csv('/kaggle/input/dog-breed-identification/labels.csv')
# y = []
# for file in files:
# file_name = file.split('/')[1].split('.')[0]
# for id, breed in labels.itertuples(index=False):
# if id == file_name:
# y.append(breed)
# break
# y = np.array(y)
# le = preprocessing.LabelEncoder()
# y = le.fit_transform(y)
# return y
# y = load_labels('train')
# img_dir = '../input/dog-breed-identification/train/'
# train_labels = train_labels.assign(img_path=lambda x: img_dir + x['id'] + '.jpg')
# train_labels.head()
import cv2
def get_images(imgid):
pic = plt.imread(f"../input/dog-breed-identification/train/{imgid}.jpg")
pic = cv2.resize(pic, (300, 300))
return pic
train_labels["img"] = train_labels["id"].apply(get_images)
train_labels.head()
# ## Examples
labrador_retriever_id = train_labels.loc[
train_labels.breed == "labrador_retriever"
].iloc[1, 0]
Image.open("../input/dog-breed-identification/train/" + labrador_retriever_id + ".jpg")
# ## Data Preparation
Y = pd.get_dummies(train_labels["breed"])
labels = Y.values
print(labels.shape)
X = train_labels["img"]
x = np.zeros((10222, 300, 300, 3))
for i in range(len(X)):
img = train_labels["img"][i]
x[i] = img
from sklearn.model_selection import train_test_split
train_images, test_images, train_labels, test_labels = train_test_split(
images, labels, test_size=0.2
)
# ### Looking at train and test data
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
print(type(X_train))
print(type(y_train))
# X_train = X_train.reshape(3912, 300, 300, 1)
# X_test = X_test.reshape(978, 300, 300, 1)
y_train = y_train.astype("float32")
t_test = y_test.astype("float32")
# Normalise
X_train /= 255
X_test /= 255
# ## Training the CNN
model = keras.Sequential(
[
# keras.layers.AveragePooling2D(6, 3, input_shape=(200, 200, 3)),
# keras.layers.Conv2D(64, 3, activation='relu'),
keras.layers.Conv2D(
32,
kernel_size=(3, 3),
input_shape=(200, 200, 3),
activation="relu",
padding="same",
),
keras.layers.MaxPool2D(2, 2),
# keras.layers.Dropout(0.5),
keras.layers.Flatten(),
keras.layers.Dense(64, activation="relu"),
keras.layers.Dropout(0.5),
keras.layers.Dense(30, activation="softmax"),
]
)
model.compile(
optimizer="adam", loss=keras.losses.CategoricalCrossentropy(), metrics=["accuracy"]
)
model.summary()
# model.fit(X_train, y_train, epochs=2, batch_size=32)
model.fit(X_train, y_train, epochs=5, batch_size=32)
model.evaluate(X_test, y_test)
# ## Hyperparameter Tuning
from kerastuner.tuners import RandomSearch
def build_model(hp):
model = keras.Sequential()
model.add(keras.layers.AveragePooling2D(6, 3, input_shape=(200, 200, 3)))
# Trail with the number of Convo layers to use
for i in range(hp.Int("Conv Layers", min_value=0, max_value=3)):
model.add(
keras.layers.Conv2D(
hp.Choice(f"layer_{i}_filters", [16, 32, 64]), 3, activation="relu"
)
)
model.add(keras.layers.MaxPool2D(2, 2))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Flatten())
# hp.Choice allows the model to try out the different hyperparams to pick out the best performing one
model.add(
keras.layers.Dense(
hp.Choice("Dense layer", [64, 128, 256, 512, 1024]), activation="relu"
)
)
model.add(keras.layers.Dense(30, activation="softmax"))
model.compile(
optimizer="adam",
loss=keras.losses.CategoricalCrossentropy(),
metrics=["accuracy"],
)
return model
tuner = RandomSearch(
build_model, objective="val_accuracy", max_trials=32, directory="./multi_conv"
)
tuner.search(
X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=32
)
best_model = tuner.get_best_models()[0]
best_model.evaluate(X_test, y_test)
best_model.summary()
tuner.results_summary()
# ## Save & Load Models
best_model.save("./best_model")
loaded_model = keras.models.load_model("./best_model")
loaded_model.evaluate(X_test, y_test)
# ## Plot Image from Numpy Array
# RGB image
rgb_images = np.array([example["image"].numpy() for example in ds_train.take(1)])
rgb_image = rgb_images[0]
image = train_images[0].reshape(300, 300)
plt.imshow(rgb_image)
rgb_image.shape
# Greyscale image
# image = train_images[0].reshape(300, 300)
# plt.imshow(train_images[0], cmap='Greys_r')
# ## Convert PNG/JPG Imgaes to Numpy Format
import imageio
im = imageio.imread("")
print(type(im))
im_np = np.asarray(im)
print(im_np.shape)
# import glob
# # First of all we will extract the detail of all the data and save all of them in terms of dataframe with foldername, imagename, objectname and labels
# detail = sorted(glob.glob("../input/dog-breed-identification/train/*"))
# Folder_Name = [str(i.split("in/")[0]) + "in" for i in detail]
# Image_Name = [str(i.split("/")[4]) for i in detail]
# Train_Labels = np.array((pd.read_csv('../input/dog-breed-identification/labels.csv'))["breed"])
# # Defining dataframe and saving all the extracted information in that dataframe
# train_detail = pd.DataFrame()
# train_detail["Folder Name"] = Folder_Name
# train_detail["Image Name"] = Image_Name
# train_detail["Train Labels"] = Train_Labels
# # Analying the train data detail
# print("\nNumber of images in training set = "+str(len(detail)))
# print(train_detail.columns)
# train_detail.head()
|
python_version = "3.9"
env_name = "name"
import os
old_path = os.environ["PATH"]
new_path = f"/opt/conda/envs/{env_name}/bin:{old_path}"
import sys
if __name__ == "__main__":
print(sys.path)
|
from numpy.random import randint
from numpy.random import rand
def objective(x):
return x[0] ** 2.0 + x[1] ** 2.0
def decode(bounds, n_bits, bitstring):
decoded = list()
largest = 2**n_bits
for i in range(len(bounds)):
# extract the substring
start, end = i * n_bits, (i * n_bits) + n_bits
substring = bitstring[start:end]
# convert bitstring to a string of chars
chars = "".join([str(s) for s in substring])
# convert string to integer
integer = int(chars, 2)
# scale integer to desired range
value = bounds[i][0] + (integer / largest) * (bounds[i][1] - bounds[i][0])
# store
decoded.append(value)
return decoded
def selection(pop, scores, k=3):
# first random selection
selection_ix = randint(len(pop))
for ix in randint(0, len(pop), k - 1):
# check if better (e.g. perform a tournament)
if scores[ix] < scores[selection_ix]:
selection_ix = ix
return pop[selection_ix]
def crossover(p1, p2, r_cross):
# children are copies of parents by default
c1, c2 = p1.copy(), p2.copy()
# check for recombination
if rand() < r_cross:
# select crossover point that is not on the end of the string
pt = randint(1, len(p1) - 2)
# perform crossover
c1 = p1[:pt] + p2[pt:]
c2 = p2[:pt] + p1[pt:]
return [c1, c2]
def mutation(bitstring, r_mut):
for i in range(len(bitstring)):
# check for a mutation
if rand() < r_mut:
# flip the bit
bitstring[i] = 1 - bitstring[i]
def genetic_algorithm(objective, bounds, n_bits, n_iter, n_pop, r_cross, r_mut):
# initial population of random bitstring
pop = [randint(0, 2, n_bits * len(bounds)).tolist() for _ in range(n_pop)]
# keep track of best solution
best, best_eval = 0, objective(pop[0])
# enumerate generations
for gen in range(n_iter):
# decode population
decoded = [decode(bounds, n_bits, p) for p in pop]
# evaluate all candidates in the population
scores = [objective(d) for d in decoded]
# check for new best solution
for i in range(n_pop):
if scores[i] < best_eval:
best, best_eval = pop[i], scores[i]
print("%d, new best f(%s) = %f" % (gen, decoded[i], scores[i]))
# select parents
selected = [selection(pop, scores) for _ in range(n_pop)]
# create the next generation
children = list()
for i in range(0, n_pop, 2):
# get selected parents in pairs
p1, p2 = selected[i], selected[i + 1]
# crossover and mutation
for c in crossover(p1, p2, r_cross):
# mutation
mutation(c, r_mut)
# store for next generation
children.append(c)
# replace population
pop = children
return [best, best_eval]
# define range for input
bounds = [[-5.0, 5.0], [-5.0, 5.0]]
# define the total iterations
n_iter = 100
# bits per variable
n_bits = 16
# define the population size
n_pop = 100
# crossover rate
r_cross = 0.9
# mutation rate
r_mut = 1.0 / (float(n_bits) * len(bounds))
# perform the genetic algorithm search
best, score = genetic_algorithm(
objective, bounds, n_bits, n_iter, n_pop, r_cross, r_mut
)
print("Done!")
decoded = decode(bounds, n_bits, best)
print("f(%s) = %f" % (decoded, score))
|
# Python Libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Libraries for Visualization
import plotly.express as px
import matplotlib.pyplot as plt
# Library for splitting the data in Train and Test
from sklearn.model_selection import train_test_split
# Library required for the Linear Regression Algorithm
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
# Library for the metric required to evaluate the model
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
# allow to plot the charts inline
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
iris_d = pd.read_csv("/kaggle/input/iris-dataset/Iris.csv")
# ### Step 3: Understanding the Data
iris_d.head()
# Fetching 10 records at random from the dataset
iris_d.sample(10)
# Data type of columns
iris_d.info()
# Shape of Data
iris_d.shape
# Identifying Null values
iris_d.isnull().sum()
iris_d.describe()
iris_d["Species"].value_counts()
# ### Step 4: Exploring the data through Visualization
# Box plot of price
plt.figure(figsize=(10, 10))
# sns.boxplot(data= iris_d)
sns.boxplot(data=iris_d, width=0.8, fliersize=5)
plt.show()
# plt.figure(figsize=(10,10))
# sns.set(rc={'figure.figsize':(5,20)})
# To plot the species data using a box plot:
sns.boxplot(x="Species", y="SepalLengthCm", data=iris_d)
plt.show()
sns.boxplot(x="Species", y="SepalWidthCm", data=iris_d)
plt.show()
sns.boxplot(x="Species", y="PetalLengthCm", data=iris_d)
plt.show()
sns.boxplot(x="Species", y="PetalWidthCm", data=iris_d)
plt.show()
# Let see how the classes are separated
sns.FacetGrid(iris_d, hue="Species", height=5).map(
plt.scatter, "PetalLengthCm", "PetalWidthCm"
).add_legend()
plt.show()
# Let see how the classes are separated
sns.FacetGrid(iris_d, hue="Species", height=5).map(
plt.scatter, "SepalLengthCm", "SepalWidthCm"
).add_legend()
plt.show()
# Use pairplot to analyze the relationship between species for all characteristic combinations.
# An observable trend shows a close relationship between two of the species
sns.pairplot(iris_d.drop("Id", axis=1), hue="Species", height=4)
plt.show()
# Correlation
iris_d.corr()
# Vizually appealing corr matrix
cor_iris = iris_d.corr()
# cor1.style.background_gradient(cmap = 'coolwarm')
cor_iris.style.background_gradient(cmap="viridis")
# import correlation matrix to see parametrs which best correlate each other
# According to the correlation matrix results PetalLengthCm and
# PetalWidthCm have possitive correlation which is proved by the plot above
sns.heatmap(
cor_iris, xticklabels=cor_iris.columns.values, yticklabels=cor_iris.columns.values
)
plt.show()
# In order to rus Naive_Bayes classifier we have to replace the "Species" values
iris_d["Species"].replace("Iris-setosa", 1, inplace=True)
iris_d["Species"].replace("Iris-virginica", 2, inplace=True)
iris_d["Species"].replace("Iris-versicolor", 3, inplace=True)
# Now check if everything was changed properly
iris_d["Species"].unique()
# ### Step 5 Splitting the data into Train Data and Test Data
# #### For this step, we will first separate the 'Feature' and 'Target' from the data
feature = iris_d.iloc[:, 0:4]
target = iris_d["Species"]
# X = dataframe.iloc[:, 0:4]
# Y = dataframe['Species']
# Splitting the data into Train and Test data
X_train, X_test, y_train, y_test = train_test_split(
feature, target, test_size=0.4, random_state=40
)
print("X-Train :", X_train.shape)
print("X-Test :", X_test.shape)
print("Y-Train :", y_test.shape)
print("Y-Test :", y_test.shape)
# ## Training the model
# ### 1. K – Nearest Neighbour (KNN)
KN_model = KNeighborsClassifier()
KN_model.fit(X_train, y_train)
pred_vals = model.predict(X_test)
print(accuracy_score(y_test, pred_vals))
pred_vals
# ### 2. Support Vector Machine (SVM)
Support_vm = SVC()
Support_vm.fit(X_train, y_train)
pred_vals = model.predict(X_test)
print(accuracy_score(y_test, pred_vals))
pred_vals
# ### 3. RandomForest
RF_model = RandomForestClassifier()
RF_model.fit(X_train, y_train)
pred_vals = model.predict(X_test)
print(accuracy_score(y_test, pred_vals))
pred_vals
# ### 4. Logistic Regression
LR_Model = LogisticRegression()
LR_Model.fit(X_train, y_train)
pred_vals = model.predict(X_test)
print(accuracy_score(y_test, pred_vals))
pred_vals
# ### 5. Linear Regression
Linear_model = LinearRegression()
Linear_model.fit(X_train, y_train)
pred_vals = model.predict(X_test)
print(accuracy_score(y_test, pred_vals))
pred_vals
# ### 6. Gaussian Naive Bayes
# Train and test model
GNB_model = GaussianNB()
GNB_model.fit(X_train, y_train)
pred_vals = model.predict(X_test)
print(accuracy_score(y_test, pred_vals))
pred_vals
dfIris_d = pd.DataFrame(
pred_vals,
index=range(
60,
),
columns=["Predicted"],
)
dfIris_d["Actual"] = y_test
dfIris_d # This dataframe gives the Actual vs Predicted values
# ### Step 9: Evaluating the Performance of the Model
print("Mean Absolute Error =", mean_absolute_error(y_test, pred_vals))
print("Mean Squared Error= ", mean_squared_error(y_test, pred_vals))
print("Root Mean Squared Error= ", np.sqrt(mean_squared_error(y_test, pred_vals)))
|
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import os # accessing directory structure
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
import torch.nn as nn
def normalize(x):
x_normed = x / x.max(0, keepdim=True)[0]
return x_normed
nRowsRead = 10 # specify 'None' if want to read whole file
# grade1.csv may have more rows in reality, but we are only loading/previewing the first 1000 rows
df1 = pd.read_csv("/kaggle/input/grade6.csv", delimiter=",")
df1.dataframeName = "grade1.csv"
nRow, nCol = df1.shape
print("shape:", df1.shape)
print("df1.head\n", df1.head())
print(f"There are {nRow} rows and {nCol} columns")
# print("df1:\n", df1)
print("df1.head(7)\n", df1.head(7))
print("df1.tail(3)\n", df1.tail(3))
print("value clounts of admission:\n", df1["admission"].value_counts())
# df1.plot(kind="scatter", x="toefl ", y="gpa")
cols = ["toefl", "gre", "gpa"]
# torch_tensor = torch.tensor(targets_df['targets'].values)
X = torch.tensor(df1[cols].values, dtype=torch.float32)
print("X", X)
print("X.size", X.size())
X = normalize(X)
print("normalized X", X)
print("X.size", X.size())
Y = torch.tensor(df1["admission"].values, dtype=torch.float32)
print("Y.size:", Y.size())
Y = torch.reshape(Y, (26, -1))
print("Y", Y)
print(Y.size())
class myregressionmodel(nn.Module):
def __init__(self, n_input_features):
super(myregressionmodel, self).__init__()
self.layer1 = nn.Linear(n_input_features, 1)
def forward(self, XX):
Y_pred = torch.sigmoid(self.layer1(XX))
return Y_pred
model = myregressionmodel(3)
loss = nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
for epoch in range(100000):
Y_hat = model(X)
l = loss(Y_hat, Y)
if epoch % 1000 == 0:
print("loss", l.item())
l.backward()
optimizer.step()
optimizer.zero_grad()
with torch.no_grad():
testX = torch.tensor([[95, 350, 3.5], [60, 230, 2.8]])
Y_hat = model(testX)
result = Y_hat.round()
print("Predicted result: ", result)
# There is 1 csv file in the current version of the dataset:
#
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# The next hidden code cells define functions for plotting data. Click on the "Code" button in the published kernel to reveal the hidden code.
# Distribution graphs (histogram/bar graph) of column data
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[
[col for col in df if nunique[col] > 1 and nunique[col] < 50]
] # For displaying purposes, pick columns that have between 1 and 50 unique values
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
plt.figure(
num=None,
figsize=(6 * nGraphPerRow, 8 * nGraphRow),
dpi=80,
facecolor="w",
edgecolor="k",
)
for i in range(min(nCol, nGraphShown)):
plt.subplot(nGraphRow, nGraphPerRow, i + 1)
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
valueCounts.plot.bar()
else:
columnDf.hist()
plt.ylabel("counts")
plt.xticks(rotation=90)
plt.title(f"{columnNames[i]} (column {i})")
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
plt.show()
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna("columns") # drop columns with NaN
df = df[
[col for col in df if df[col].nunique() > 1]
] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(
f"No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2"
)
return
corr = df.corr()
plt.figure(
num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor="w", edgecolor="k"
)
corrMat = plt.matshow(corr, fignum=1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f"Correlation Matrix for {filename}", fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include=[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna("columns")
df = df[
[col for col in df if df[col].nunique() > 1]
] # keep columns where there are more than 1 unique values
columnNames = list(df)
if (
len(columnNames) > 10
): # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(
df, alpha=0.75, figsize=[plotSize, plotSize], diagonal="kde"
)
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k=1)):
ax[i, j].annotate(
"Corr. coef = %.3f" % corrs[i, j],
(0.8, 0.2),
xycoords="axes fraction",
ha="center",
va="center",
size=textSize,
)
plt.suptitle("Scatter and Density Plot")
plt.show()
# Now you're ready to read in the data and use the plotting functions to visualize the data.
# ### Let's check 1st file: /kaggle/input/grade6.csv
nRowsRead = 1000 # specify 'None' if want to read whole file
# grade6.csv may have more rows in reality, but we are only loading/previewing the first 1000 rows
df1 = pd.read_csv("/kaggle/input/grade6.csv", delimiter=",", nrows=nRowsRead)
df1.dataframeName = "grade6.csv"
nRow, nCol = df1.shape
print(f"There are {nRow} rows and {nCol} columns")
# Let's take a quick look at what the data looks like:
df1.head(5)
columnNames = list(df1)
print("columnNames: ", columnNames)
# Distribution graphs (histogram/bar graph) of sampled columns:
plotPerColumnDistribution(df1, 10, 5)
# Correlation matrix:
plotCorrelationMatrix(df1, 8)
# Scatter and density plots:
plotScatterMatrix(df1, 12, 10)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("seaborn")
import nltk
from nltk.corpus import stopwords
from collections import Counter, defaultdict
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from PIL import Image
from nltk.tokenize import word_tokenize
from nltk.util import ngrams
import string
nltk.download("stopwords")
stop = set(stopwords.words("english"))
SEED = 13
data = pd.read_csv(
"/kaggle/input/disneyland-reviews/DisneylandReviews.csv",
encoding="iso-8859-1",
na_values="missing",
)
data.shape
data.info()
# checking duplicated
data[
data["Review_ID"].isin(data["Review_ID"][data["Review_ID"].duplicated()])
].sort_values("Review_ID")
data.drop_duplicates("Review_ID", inplace=True, keep="first")
data.shape
data.Rating.value_counts()
def remap_rating(rating):
if rating < 3:
return "negative"
elif rating > 3:
return "positive"
else:
return "neutral"
data["Rating"] = data["Rating"].apply(lambda rating: remap_rating(rating))
data.Rating.value_counts()
data["Review_Text"] = data["Review_Text"].astype(str)
class_data = (
data.groupby("Rating")
.count()["Review_Text"]
.reset_index()
.sort_values("Review_Text", ascending=False)
)
percent_rating = class_data.Review_Text
labels = class_data.Rating
colors = ["#00ff00", "#0000ff", "#ff0000"]
chart, _, _ = plt.pie(
percent_rating, colors=colors, radius=1.0, labels=labels, autopct="%.1f%%"
)
plt.setp(chart, width=0.5)
plt.show()
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(10, 5))
review_len = data[data["Rating"] == "negative"]["Review_Text"].str.len()
ax1.hist(review_len, color="#ff0000")
review_len = data[data["Rating"] == "neutral"]["Review_Text"].str.len()
ax2.hist(review_len, color="#0000ff")
review_len = data[data["Rating"] == "positive"]["Review_Text"].str.len()
ax3.hist(review_len, color="#00ff00")
fig.suptitle("Number of characters in reviews")
plt.show()
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(10, 5))
review_len = (
data[data["Rating"] == "negative"]["Review_Text"]
.str.split()
.map(lambda review: len(review))
)
ax1.hist(review_len, color="#ff0000")
review_len = (
data[data["Rating"] == "neutral"]["Review_Text"]
.str.split()
.map(lambda review: len(review))
)
ax2.hist(review_len, color="#0000ff")
review_len = (
data[data["Rating"] == "positive"]["Review_Text"]
.str.split()
.map(lambda review: len(review))
)
ax3.hist(review_len, color="#00ff00")
fig.suptitle("Total words in a review")
plt.show()
def create_corpus(target):
corpus = []
for x in data[data["Rating"] == target]["Review_Text"].str.split():
for i in x:
corpus.append(i)
return corpus
stop = set(stopwords.words("english"))
pos_common_words = create_corpus("positive")
pos_counter = Counter(pos_common_words)
pos_most = pos_counter.most_common()
x = []
y = []
for word, count in pos_most[:40]:
if word not in stop:
x.append(word)
y.append(count)
sns.barplot(x=y, y=x)
plt.title("Most common words in positive reviews")
plt.show()
neu_common_words = create_corpus("neutral")
neu_counter = Counter(neu_common_words)
neu_most = neu_counter.most_common()
x = []
y = []
for word, count in neu_most[:40]:
if word not in stop:
x.append(word)
y.append(count)
sns.barplot(x=y, y=x)
plt.title("Most common words in neutral reviews")
plt.show()
neg_common_words = create_corpus("negative")
neg_counter = Counter(neg_common_words)
neg_most = neg_counter.most_common()
x = []
y = []
for word, count in neg_most[:40]:
if word not in stop:
x.append(word)
y.append(count)
sns.barplot(x=y, y=x)
plt.title("Most common words in negative reviews")
plt.show()
import re
import random
def remove_punctuations(review):
return re.sub(
r"(@[A-Za-z0-9]+)|(#[A-Za-z0-9]+)|([^\x00-\x7F]+)|([0-9])|(\w+:\/\/\S+)|([^\w\s])|(\s+)",
" ",
review,
)
def rep(review):
return review.replace("_", " ")
def whitespace_LT(review):
return review.strip()
def multispace(review):
return re.sub(r"\s+", " ", review)
data["Review_Text"] = data["Review_Text"].str.lower()
data["Review_Text"] = data["Review_Text"].apply(
lambda review: remove_punctuations(review)
)
data["Review_Text"] = data["Review_Text"].apply(lambda review: rep(review))
data["Review_Text"] = data["Review_Text"].apply(lambda review: whitespace_LT(review))
data["Review_Text"] = data["Review_Text"].apply(lambda review: multispace(review))
data["Review_Text"][random.randint(0, len(data["Review_Text"]))]
from nltk.tokenize import word_tokenize
from nltk.probability import FreqDist
def word_tokenize_wrapper(review):
return word_tokenize(review)
def freqDist_wrapper(review):
return FreqDist(review)
data["Review_Text_Token"] = data["Review_Text"].apply(
lambda review: word_tokenize_wrapper(review)
)
data["Review_Text_Token_FreqDist"] = data["Review_Text_Token"].apply(
lambda token: freqDist_wrapper(token)
)
from nltk.corpus import stopwords
list_stopwords = stopwords.words("english")
list_stopwords = set(list_stopwords)
def stopwords_removal(words):
return [word for word in words if word not in list_stopwords]
data["Review_Text_Token_WSW"] = data["Review_Text_Token"].apply(
lambda word: stopwords_removal(word)
)
data["Review_Text_Token_WSW"].head()
from nltk.stem import PorterStemmer
import swifter
stemmer = PorterStemmer()
def stemmer_wrapper(term):
return stemmer.stem(term)
term_dict = {}
for document in data["Review_Text_Token_WSW"]:
for term in document:
if term not in term_dict:
term_dict[term] = " "
print(len(term_dict))
for term in term_dict:
term_dict[term] = stemmer_wrapper(term)
print(term, ":", term_dict[term])
print(term_dict)
def get_stemmed_term(document):
return [term_dict[term] for term in document]
data["Review_Stemmed"] = data["Review_Text_Token_WSW"].swifter.apply(
lambda doc: get_stemmed_term(doc)
)
print(data["Review_Stemmed"])
data.head()
cols = ["Rating", "Review_Stemmed"]
data = data[cols]
data.columns = ["label", "review"]
import ast
def join(reviews):
return " ".join([review for review in reviews])
data["review"] = data["review"].apply(lambda review: join(review))
data.label.value_counts(normalize=True)
data.info()
def remap_label(label):
if label == "positive":
return 1
elif label == "negative":
return -1
else:
return 0
data["label"] = data["label"].apply(lambda label: remap_label(label))
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(analyzer="word")
features = tfidf.fit_transform(data["review"])
features_array = features.toarray()
features = features.astype(np.float32)
features_array = features_array.astype(np.float32)
labels = data["label"]
print("%d reviews, %d feature" % (features.shape))
X = features
y = labels
X = X.astype(np.float32)
y = y.astype(np.int8)
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from sklearn.ensemble import RandomForestClassifier
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=SEED)
acc_score = []
f1s_score = []
for train_idx, val_idx in skf.split(X, y):
Xt, Xv = X[train_idx, :], X[val_idx, :]
yt, yv = y.iloc[train_idx], y.iloc[val_idx]
model = RandomForestClassifier(
criterion="entropy",
random_state=SEED,
n_estimators=500,
max_features="sqrt",
n_jobs=-1,
)
model.fit(Xt, yt)
y_pred = model.predict(Xv)
y_prob = model.predict_proba(Xv)[:, 1]
acc_score.append(accuracy_score(yv, y_pred))
f1s_score.append(f1_score(yv, y_pred, average="macro"))
acc_mean = np.mean(acc_score)
f1s_mean = np.mean(f1s_score)
df_result = pd.DataFrame(
{
"Accuracy": [acc_mean],
"F1 Score": [f1s_mean],
}
)
df_result
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import wordcloud
import re
sns.set_style("whitegrid")
data = pd.read_csv(
"/kaggle/input/protocols-texts/data_with_price_text_preprocessed.csv"
)
data = data.dropna(subset=["text"])
data.head()
# # Data Prep
from sklearn.model_selection import train_test_split
def get_price_class(price):
if price < 250000:
return 0
elif price < 500000:
return 1
elif price < 1000000:
return 2
elif price < 3000000:
return 3
elif price < 10000000:
return 4
elif price < 50000000:
return 5
elif price < 200000000:
return 6
else:
return 7
data["price_class"] = data["price"].apply(get_price_class)
X, y = data["text"], data["price_class"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42, stratify=y
)
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=0.2, random_state=42, stratify=y_train
)
from sklearn.metrics import f1_score, roc_auc_score, accuracy_score
from sklearn.preprocessing import OneHotEncoder
def metrics_for_model(model, X_test, y_test):
y_pred = model.predict(X_test)
f1_macro = f1_score(y_test, y_pred, average="macro")
accuracy = accuracy_score(y_test, y_pred)
ohe = OneHotEncoder()
y_test_ohe = ohe.fit_transform(y_test.values.reshape(-1, 1)).toarray()
y_pred_ohe = ohe.transform(y_pred.reshape(-1, 1)).toarray()
auc_score = roc_auc_score(y_test_ohe, y_pred_ohe, multi_class="ovr")
print(f"F1-macro: {f1_macro}")
print(f"ROC AUC: {auc_score}")
print(f"Accuracy: {accuracy}")
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset, TensorDataset
from transformers import AutoTokenizer, AutoModel
from sklearn.model_selection import train_test_split
from tqdm import tqdm
class TextDataset(Dataset):
def __init__(self, texts, labels):
self.texts = texts
self.labels = labels
self.tokenizer = AutoTokenizer.from_pretrained("DeepPavlov/rubert-base-cased")
self.model = AutoModel.from_pretrained("DeepPavlov/rubert-base-cased")
def __len__(self):
return len(self.texts)
def __getitem__(self, idx):
text = self.texts[idx]
label = self.labels[idx]
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=256,
return_tensors="pt",
)
input_ids = encoding["input_ids"]
attention_mask = encoding["attention_mask"]
with torch.no_grad():
embeddings = self.model(
input_ids=input_ids.to(self.model.device),
attention_mask=attention_mask.to(self.model.device),
)[0][:, 0, :]
return embeddings, label
def collate_fn(self, batch):
embeddings = torch.stack([item[0] for item in batch], dim=0)
labels = torch.tensor([item[1] for item in batch])
return embeddings, labels
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(LSTM, self).__init__()
self.hidden_size = hidden_size
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=2,
batch_first=True,
)
self.fc1 = nn.Linear(hidden_size, 64)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(64, num_classes)
def forward(self, x):
h0 = torch.zeros(2, x.size(0), self.hidden_size).to(x.device)
c0 = torch.zeros(2, x.size(0), self.hidden_size).to(x.device)
out, _ = self.lstm(x, (h0, c0))
out = self.fc1(out[:, -1, :])
out = self.relu(out)
out = self.fc2(out)
return out
def train(model, train_loader, criterion, optimizer):
model.train()
train_loss = 0.0
for batch_idx, (embeddings, labels) in enumerate(train_loader):
embeddings, labels = embeddings.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(embeddings)
loss = criterion(outputs, labels) # labels
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(train_loader)
return train_loss
def validate(model, val_loader, criterion):
model.eval()
val_loss = 0.0
with torch.no_grad():
for batch_idx, (embeddings, labels) in enumerate(val_loader):
embeddings, labels = embeddings.to(device), labels.to(device)
outputs = model(embeddings)
loss = criterion(outputs, labels)
val_loss += loss.item()
val_loss /= len(val_loader)
return val_loss
batch_size = 512
# train_dataset = TextDataset(X_train.values, y_train.values)
# val_dataset = TextDataset(X_val.values, y_val.values)
# train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=train_dataset.collate_fn)
# val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, collate_fn=val_dataset.collate_fn)
# embeddings_train = []
# for batch in tqdm(train_loader, desc='Getting Embeddings'):
# batch_embeddings = batch[0]
# embeddings_train.append(batch_embeddings)
# embeddings_train = torch.cat(embeddings_train, dim=0)
# embeddings_val = []
# for batch in tqdm(val_loader, desc='Getting Embeddings'):
# batch_embeddings = batch[0]
# embeddings_val.append(batch_embeddings)
# embeddings_val = torch.cat(embeddings_val, dim=0)
# embeddings_train = torch.load('/kaggle/input/tensor-rubert/embeddings_train.pt')
# embeddings_val = torch.load('/kaggle/input/tensor-rubert/embeddings_val.pt')
# train_dataset = TensorDataset(embeddings_train, torch.from_numpy(y_train_ohe))
# train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
# val_dataset = TensorDataset(embeddings_val, torch.from_numpy(y_val_ohe))
# val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
# from sklearn.preprocessing import OneHotEncoder
# ohe = OneHotEncoder(sparse=False)
# y_train_ohe = ohe.fit_transform(y_train.values.reshape(-1, 1))
# y_val_ohe = ohe.transform(y_val.values.reshape(-1, 1))
# y_test_ohe = ohe.transform(y_test.values.reshape(-1, 1))
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
cv.fit(X_train)
X_train_cv = cv.transform(X_train).toarray()
X_val_cv = cv.transform(X_val).toarray()
X_test_cv = cv.transform(X_test).toarray()
train_dataset = TensorDataset(
torch.from_numpy(X_train_cv.astype(np.float32))[:, None, :],
torch.from_numpy(y_train.values),
)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataset = TensorDataset(
torch.from_numpy(X_val_cv.astype(np.float32))[:, None, :],
torch.from_numpy(y_val.values),
)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
test_dataset = TensorDataset(
torch.from_numpy(X_test_cv.astype(np.float32))[:, None, :],
torch.from_numpy(y_test.values),
)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
learning_rate = 0.01
num_epochs = 739
early_stopping_patience = 5000
model = LSTM(
# input_size=768,
input_size=22973,
hidden_size=128,
num_classes=8,
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode="min", factor=0.1, patience=2
)
# Train loop
best_val_loss = float("inf")
early_stopping_counter = 0
train_losses, val_losses = [], []
for epoch in range(num_epochs):
# Train
train_loss = train(model, train_loader, criterion, optimizer)
train_losses.append(train_loss)
# Validation
val_loss = validate(model, val_loader, criterion)
val_losses.append(val_loss)
# Learning rate scheduler
# scheduler.step(val_loss)
# scheduler.step(train_loss)
if epoch % 30 == 0:
print(f"Epoch {epoch + 1} of {num_epochs}")
print(f"Train loss: {train_loss:.4f} Val loss: {val_loss:.4f}")
print("-----------------------------------")
# Early stopping
if val_loss < best_val_loss:
best_val_loss = val_loss
early_stopping_counter = 0
else:
early_stopping_counter += 1
if early_stopping_counter == early_stopping_patience:
print("Early stopping")
break
import matplotlib.pyplot as plt
# plot losses
plt.plot(train_losses, label="Train Loss")
plt.plot(val_losses, label="Validation Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.show()
import torch
from sklearn.metrics import roc_auc_score, f1_score, accuracy_score
def evaluate(model, dataloader):
model.eval()
preds = []
targets = []
with torch.no_grad():
for embeddings, labels in dataloader:
embeddings, labels = embeddings.to(device), labels.to(device)
outputs = model(embeddings)
preds.extend(torch.argmax(outputs, dim=1).cpu().numpy())
targets.extend(labels.cpu().numpy())
roc_auc = roc_auc_score(np.eye(8)[targets], np.eye(8)[preds])
f1_macro = f1_score(targets, preds, average="macro")
accuracy = accuracy_score(targets, preds)
return roc_auc, f1_macro, accuracy
roc_auc, f1_macro, accuracy = evaluate(model, train_loader)
print("ROC AUC score:", roc_auc)
print("F1 macro score:", f1_macro)
print("Accuracy:", accuracy)
roc_auc, f1_macro, accuracy = evaluate(model, val_loader)
print("ROC AUC score:", roc_auc)
print("F1 macro score:", f1_macro)
print("Accuracy:", accuracy)
roc_auc, f1_macro, accuracy = evaluate(model, test_loader)
print("ROC AUC score:", roc_auc)
print("F1 macro score:", f1_macro)
print("Accuracy:", accuracy)
# ----
# from transformers import AutoTokenizer, AutoModelForMaskedLM, AutoModel
# # tokenizer_roberta = AutoTokenizer.from_pretrained('xlm-roberta-base')
# # model_roberta = AutoModelForMaskedLM.from_pretrained("xlm-roberta-base")
# tokenizer_rubert = AutoTokenizer.from_pretrained("DeepPavlov/rubert-base-cased")
# model_rubert = AutoModel.from_pretrained("DeepPavlov/rubert-base-cased")
# import torch
# def get_embeddings(texts, model, tokenizer):
# """
# Get embeddings from text
# :param texts: list
# :param model: model
# :return: embeddings: list
# """
# embeddings = []
# for text in tqdm(texts):
# # if len(text) > 510:
# # text = text[len(text)-510:]
# tokens = tokenizer.tokenize(text)
# if len(tokens) > 510:
# tokens = tokens[:510]
# tokens = ['[CLS]'] + tokens + ['[SEP]']
# input_ids = tokenizer.convert_tokens_to_ids(tokens)
# input_ids = torch.tensor([input_ids])
# with torch.no_grad():
# last_hidden_states = model(input_ids)[0]
# embeddings.append(last_hidden_states[0][0].numpy())
# return embeddings
# from torch.utils.data import DataLoader
# import torch
# def get_embeddings_2_0(texts, model, tokenizer, batch_size=8):
# """
# Get embeddings from text
# :param texts: list
# :param model: model
# :return: embeddings: list
# """
# tokens = tokenizer(list(texts), padding=True, truncation=True, return_tensors='pt')
# dataloader = DataLoader(tokens['input_ids'], batch_size=batch_size)
# pbar = tqdm(total=len(dataloader), desc='Getting RoBERTa embeddings')
# embeddings_list = []
# with torch.no_grad():
# for batch_input_ids in dataloader:
# outputs = model(batch_input_ids)
# embeddings = outputs[0][:, 0, :]
# embeddings_list.append(embeddings)
# pbar.update(1)
# pbar.close()
# concatenated_embeddings = torch.cat(embeddings_list, dim=1)
# numpy_embeddings = concatenated_embeddings.numpy()
# return numpy_embeddings
# import torch
# from torch.utils.data import DataLoader
# from transformers import AutoTokenizer, AutoModel
# from tqdm import tqdm
# def get_rubert_embeddings(texts):
# tokenizer = AutoTokenizer.from_pretrained('DeepPavlov/rubert-base-cased')
# model = AutoModel.from_pretrained('DeepPavlov/rubert-base-cased')
# batch_size = 8
# max_length = 512
# tokenized_texts = []
# for text in texts:
# tokenized_text = tokenizer.encode(text, truncation=True, max_length=max_length)
# tokenized_texts.append(tokenized_text)
# padded_texts = torch.nn.utils.rnn.pad_sequence([torch.tensor(t) for t in tokenized_texts], batch_first=True)
# dataloader = DataLoader(padded_texts, batch_size=batch_size)
# pbar = tqdm(total=len(dataloader), desc='Getting RuBERT embeddings')
# embeddings_list = []
# with torch.no_grad():
# for i, batch_input_ids in enumerate(dataloader):
# outputs = model(batch_input_ids)
# embeddings = outputs[0][:, 0, :]
# embeddings_list.append(embeddings)
# pbar.update(1)
# if i % 10 == 0:
# pbar.clear()
# pbar.write(f'Processed {i} batches out of {len(dataloader)}')
# pbar.close()
# concatenated_embeddings = torch.cat(embeddings_list, dim=0)
# embeddings = concatenated_embeddings.numpy()
# return embeddings
# X_train_embeddings = get_rubert_embeddings(X_train)
# X_val_embeddings = get_rubert_embeddings(X_val)
# X_test_embeddings = get_rubert_embeddings(X_test)
# X_train_embeddings = get_embeddings(X_train, model_rubert, tokenizer_rubert)
# X_val_embeddings = get_embeddings(X_val, model_rubert, tokenizer_rubert)
# X_test_embeddings = get_embeddings(X_test, model_rubert, tokenizer_rubert)
# np.save('X_train', X_train_embeddings)
# np.save('X_val', X_val_embeddings)
# np.save('X_test', X_test_embeddings)
# train_pool_rubert = Pool(X_train_embeddings,
# label=y_train.astype(int))
# val_pool_rubert = Pool(X_val_embeddings,
# label=y_val.astype(int))
# test_pool_rubert = Pool(X_test_embeddings,
# label=y_test.astype(int))
# model = CatBoostClassifier(
# task_type='GPU',
# iterations=2000,
# eval_metric='TotalF1:average=Macro',
# early_stopping_rounds=500,
# bootstrap_type='Bernoulli',
# subsample=0.7
# )
# model.fit(
# train_pool_rubert,
# plot=True,
# verbose=100,
# eval_set=(val_pool_rubert)
# )
# metrics_for_model(model, test_pool_rubert, y_test)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.