script
stringlengths 113
767k
|
---|
# # **Data Processing**
# # **Importing libraries**
# - This code is importing some commonly used libraries for data analysis and visualization in Python, After importing these libraries, you can use their functions to perform various data analysis and visualization tasks in Python.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# # **Reading the file**
# - This code is using the pandas library to read a CSV file named real estate.
# - csv located at the path /kaggle/input/date-of-real-estate3/ and storing its contents in a pandas DataFrame called df.
# - The head() method is then called on df to display the first 5 rows of the DataFrame.
import pandas as pd
df = pd.read_csv("/kaggle/input/date-of-real-estate3/real estate.csv")
df.head()
# # **Data Cleansing and Improvement**
# # Find if there is some duplicated data
# - This code is using the loc accessor of the pandas DataFrame df with the duplicated() method to locate and display the rows of the DataFrame that are duplicates.
df.loc[df.duplicated()]
# - This code uses pandas DataFrame df iterate() method to locate the duplicate rows in the DataFrame, and then counts the number of duplicate rows using the sum() method.
df.duplicated().sum()
# # **Process missing data**
# - This code is using the isna() method of the pandas DataFrame df to identify the missing (NaN) values in the DataFrame, and then using the sum() method to count the number of missing values in each column.
df.isna().sum()
# # **visualization**
# - This code is used to display the first five rows of the data frame by default, which is a convenient way to preview a large set of data, for example df can be entered df.head(10) to display the first 10 rows.
df.head()
# - The unique() method of a pandas Series (which is what a single column of a DataFrame is) returns an array of the unique values in the Series, in the order in which they appear.
# - This can be useful for exploring the distribution of values in a column, or for checking for any unexpected or invalid values.
df["Number of rooms"].unique()
# - This code is replacing certain string values in the "Number of rooms" column of the pandas DataFrame df with their corresponding numeric values.
df["Number of rooms"] = df["Number of rooms"].replace(["1 bedrooms"], 1)
df["Number of rooms"] = df["Number of rooms"].replace(["2 bedrooms"], 2)
df["Number of rooms"] = df["Number of rooms"].replace(["3 bedrooms"], 3)
df["Number of rooms"] = df["Number of rooms"].replace(["4 bedrooms"], 4)
df["Number of rooms"] = df["Number of rooms"].replace(["5 bedrooms"], 5)
df["Number of rooms"] = df["Number of rooms"].replace(["6+ bedrooms"], 6)
df
df["Number of bathrooms"].unique()
df["Number of bathrooms"] = df["Number of bathrooms"].replace(["1 bathrooms"], 1)
df["Number of bathrooms"] = df["Number of bathrooms"].replace(["2 bathrooms"], 2)
df["Number of bathrooms"] = df["Number of bathrooms"].replace(["3 bathrooms"], 3)
df["Number of bathrooms"] = df["Number of bathrooms"].replace(["4 bathrooms"], 4)
df["Number of bathrooms"] = df["Number of bathrooms"].replace(["5 bathrooms"], 5)
df["Number of bathrooms"] = df["Number of bathrooms"].replace(["5+ bathrooms"], 6)
df
df["neighborhood"].unique()
df["neighborhood"] = df["neighborhood"].replace(["Suwaiq"], 0)
df
df["lister type"].unique()
df["lister type"] = df["lister type"].replace(["landlord"], 0)
df["lister type"] = df["lister type"].replace(["Agent"], 1)
df
# - In this code ,The modified DataFrame df is returned with clean price values, without any commas, and the previous version of df is ignored.
s = df["subcategory"].unique()
s
temp = list(df["price"])
L = []
for x in temp:
x = x.replace(",", "")
L.append(x)
df = df.drop(columns=["price"])
df["price"] = L
df.head()
# - This code is calculating the median price for each unique value in the "subcategory" column of the pandas DataFrame df, and storing the results in a list called p.
s = df["subcategory"].unique()
p = []
for cat in s:
df1 = df[df["subcategory"] == cat]
av = df1["price"].median()
print(cat, av)
p.append(av)
# - This code is using the bar() function to create a bar chart of the median prices for each unique value in the "subcategory" column of the pandas DataFrame df.
plt.bar(s, p)
df["subcategory"] = df["subcategory"].replace(["Townhouses for Sale"], 1)
df["subcategory"] = df["subcategory"].replace(["villa-places for sale"], 2)
df["subcategory"] = df["subcategory"].replace(["Farm & Chaltes for sale"], 3)
df
df["number of floors "].unique()
df["number of floors "] = df["number of floors "].replace(["1 floor"], 0)
df["number of floors "] = df["number of floors "].replace(["2 floors"], 1)
df
df["city "].unique()
df["city "] = df["city "].replace(["Al Batinah"], 0)
df
df["category "].unique()
df["category "] = df["category "].replace(["Real Estate for Sale"], 0)
df
df["payment methods "].unique()
df["payment methods "] = df["payment methods "].replace(["cash or Installments"], 0)
df["payment methods "] = df["payment methods "].replace(["cash only"], 1)
df
df["building age "].unique()
df["building age "] = df["building age "].replace(["0"], 1)
df["building age "] = df["building age "].replace(["0-11 months"], 2)
df["building age "] = df["building age "].replace(["1-5 years"], 3)
df["building age "] = df["building age "].replace(["6-9 years "], 4)
df["building age "] = df["building age "].replace(["10-19 years"], 5)
df["building age "] = df["building age "].replace(["20+ years"], 6)
df
s = df["building age "].unique()
s
temp = list(df["price"])
L = []
for x in temp:
x = x.replace(",", "")
L.append(x)
df = df.drop(columns=["price"])
df["price"] = L
df.head()
# - This code is used for exploring the relationship between the "building age" and "price" columns of df, by calculating the median price for each unique building age value.
s = df["building age "].unique()
p = []
for cat in s:
df1 = df[df["building age "] == cat]
av = df1["price"].median()
print(cat, av)
p.append(av)
# - This code is using the bar() method to create a bar chart of the median prices (p) for each unique value in the "building age" column (s).
plt.bar(s, p)
# - plt.scatter is a function of the matplotlib library used to generate a scatter plot. It takes two arguments: s and p. s represents a list or array of data points for the x-axis of the scatter plot, and p is a list or array of data points for the y-axis of the scatter plot.
# - The plt.show() function call is used to display the scatter plot.
plt.scatter(s, p)
plt.show()
df["property status "].unique()
df["property status "] = df["property status "].replace(["0"], 0)
df["property status "] = df["property status "].replace(["complete"], 1)
df
# - s = df["Surface area m2"]. unique(): This line of code selects the "Surface area m2" column from the pandas DataFrame df and stores the unique values in the s variable.
# - p = [ ]: This line of code initializes an empty list called p, which will be used to store the median prices.
# - for cat in s:: This is a loop that iterates through the unique values in s.
# - df1 = df [df['Surface area m2']==cat]: This line of code creates a new DataFrame df1 by filtering df to only include rows where the "Surface area m2" column is equal to the current value of cat.
# - av = df1['price']. median(): This line of code calculates the median price of the real estate properties in df1.
# - print(cat, av): This line of code prints the current category and its corresponding median price.
# - p.append(av): This line of code appends the current median price to the p list.
# - After this code executes, the variable p contains the median prices for each unique value of "Surface area m2" in the original DataFrame df.
s = df["Surface area m2"].unique()
p = []
for cat in s:
df1 = df[df["Surface area m2"] == cat]
av = df1["price"].median()
print(cat, av)
p.append(av)
plt.scatter(s, p)
plt.show()
# - Here we have converted all the data in the table into numbers, as our data is now fully numbered.
df.head()
# ## **Correlation**
# - df['price'] = df['price'].apply(pd.to_numeric, errors='coerce'): This line of code selects the "price" column from the pandas DataFrame df and applies the pd.to_numeric() function to convert the column to a numeric data type. The errors='coerce' parameter is used to set any non-numeric values in the column to NaN (Not a Number).
# - df.head(): This line of code displays the first few rows of the updated DataFrame df using the head() method.
# - After this code executes, the "price" column in the pandas DataFrame df will have been converted to a numeric data type, and any non-numeric values will have been replaced with NaN. The head() method is used to display the first few rows of the DataFrame to check that the conversion was successful.
import seaborn as sns
df["price"] = df["price"].apply(pd.to_numeric, errors="coerce")
df.head()
# - df.corr(): This line of code computes the correlation coefficients between all pairs of columns in the DataFrame df. The resulting output will be a new DataFrame with the same columns and index as df.
# - ['price']: This is a DataFrame indexing operation that selects the column with the label "price" from the correlation matrix DataFrame.
# - df.corr()['price']: This code combines the previous two lines of code to compute the correlation coefficients between the "price" column and all other columns in the DataFrame df.
# - After this code executes, the output will be a pandas Series object containing the correlation coefficients between the "price" column and all other columns in the DataFrame df. The index of the Series will be the column names of df, and the values will be the correlation coefficients. This information can be used to explore the relationships between the "price" column and other columns in the DataFrame.
df.corr()["price"]
# - This code is using the Python programming language and the matplotlib and seaborn libraries to import their respective modules and use them for data visualization.
# - import matplotlib.pyplot as plt: This line of code imports the pyplot module from the matplotlib library and gives it an alias 'plt', which is a common convention in Python. pyplot is a collection of functions that make matplotlib work like MATLAB. It provides a convenient interface for creating various types of plots and charts.
# - import seaborn as sns: This line of code imports the seaborn library and gives it an alias 'sns'. seaborn is a data visualization library based on matplotlib that provides a high-level interface for creating informative and attractive statistical graphics.
# - After these lines of code execute, the plt and sns modules are available to be used for creating and customizing various types of visualizations, such as line charts, scatter plots, histograms, heatmaps, and more.
import matplotlib.pyplot as plt
import seaborn as sns
# - df.copy() is a method used to create a copy of a pandas DataFrame. This method returns a new DataFrame object with the same data and column names as the original DataFrame, but with its own memory allocation.
df.copy()
# - This code produces a heatmap of the correlation matrix of a pandas DataFrame using Seaborn, which is a powerful tool for visualizing correlations between variables.
#
_, ax = plt.subplots(figsize=(14, 12))
colormap = sns.diverging_palette(220, 10, as_cmap=True)
_ = sns.heatmap(
df.corr(),
cmap=colormap,
square=True,
cbar_kws={"shrink": 0.9},
ax=ax,
annot=True,
linewidths=0.1,
vmax=1.0,
linecolor="white",
annot_kws={"fontsize": 12},
)
plt.title("Correlation of Features", y=1.05, size=15)
# ## **Training and Testing Model**
# - This code imports the pandas library and the DecisionTreeClassifier class from scikit-learn, and then makes a copy of a pandas DataFrame df and assigns it to a new variable data.
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
data = df.copy()
data
x = data.drop(columns=["price"])
y = data["price"]
m = x.columns.values.tolist()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
# - This code builds a linear regression model using scikit-learn's LinearRegression class, fits the model to training data, makes predictions on testing data, and prints the actual and predicted values of the target variable.
# - This code drops a column from a copy of a pandas DataFrame, splits the remaining data into predictors and target variable, and then splits the data into training and testing subsets.
# - his code trains a linear regression model on the training data and evaluates it on the testing data using the mean squared error metric.
from sklearn.metrics import accuracy_score
from sklearn import linear_model
model = linear_model.LinearRegression()
model.fit(x_train, y_train)
p = model.predict(x_test)
print(y_test, p)
# - This code prints the shape of the x_test array, which is a numpy array containing the predictor values for the testing subset of the data.
# - The shape attribute of a numpy array returns a tuple with the dimensions of the array. In this case, x_test.shape returns a tuple with two elements: the number of samples in the testing subset (rows), and the number of predictor features (columns). So, x_test.shape is a tuple with the shape of the x_test array.
x_test.shape
# - This code prints the shape of the x_train array, which is a numpy array containing the predictor values for the training subset of the data.
x_train.shape
# - This code selects the columns of df that contain categorical variables (i.e., non-numeric variables) and assigns them to a new dataframe categ.
categ = df.select_dtypes(exclude="number")
categ.head()
# - This code prints the first 5 rows of the dataframe df.
df.head()
data
# - This code creates a list xt containing 12 integer values, and then creates a list of tuples zipped_listmyoptions by zipping together the column names m and the values in x.
# - xt= [0, 0, 4, 350, 714, 0, 4, 0, 1, 0, 1, 0]
# This line creates a new list xt containing 12 integer values. The values in the list correspond to the predictor variables in the same order as the columns in the original dataframe.
# - listmyoptions = zip(m,x)
# This line zips together the two lists m and x to create a list of tuples listmyoptions, where each tuple contains a column name from m and the corresponding values from x.
#
# - zipped_listmyoptions = list(listmyoptions)
# This line converts the listmyoptions object into a list of tuples and assigns the resulting list to zipped_listmyoptions. This list of tuples contains the same information as the x and m lists, but in a format that is easier to work with for some operations (such as predicting the value of a new instance using a trained model).
# ## **Training**
x_train
display(df.loc[[6]])
xi = x_train.loc[[6]]
yo = y_train.loc[[6]]
yo
# ## **testing**
x_test
display(df.loc[[68]])
xi = x_test.loc[[68]]
yo = y_test.loc[[68]]
yo
# - This code uses the trained linear regression model model to predict the price of a new real estate instance, using the predictor variables in the list xt.
# - plspredict = model.predict([xt])
# - This line uses the predict() method of the model object to make a prediction for a new instance of the data. The input to the predict() method is a 2D array of predictor variables, where each row corresponds to a single instance of the data and each column corresponds to a predictor variable. In this case, there is only one instance of the data, so the input is a 1x12 2D array. The output of the predict() method is an array of predicted values, one for each row in the input array. In this case, there is only one predicted value, which is stored in the variable plspredict.
# - print("the price of the real estate is:" , plspredict)
# - This line prints the predicted price of the new instance, which is stored in the variable plspredict. The output is a formatted string that includes the predicted price.
plspredict = model.predict(xi)
print("the price of the real estate is:", plspredict)
|
# # Exploring Ideas for ML
# ## Playground Series - S3, E12
# Install pycaret
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Import all the nesesary lebraries
from pycaret.classification import *
from imblearn.over_sampling import (
ADASYN,
BorderlineSMOTE,
KMeansSMOTE,
RandomOverSampler,
SMOTE,
SMOTENC,
SVMSMOTE,
)
from pathlib import Path # Import OS path libraries.
# I like to disable my Notebook Warnings.
import warnings
warnings.filterwarnings("ignore")
# Configure notebook display settings to only use 2 decimal places, tables look nicer.
pd.options.display.float_format = "{:,.2f}".format
pd.set_option("display.max_columns", 15)
pd.set_option("display.max_rows", 50)
# Define some of the notebook parameters for future experiment replication.
SEED = 42
def read_csv_to_dataframe(file_path, delimiter=",", encoding="utf-8", header="infer"):
"""
Read data from a CSV file and load it into a pandas DataFrame.
Parameters:
file_path (str): The file path to the CSV file.
delimiter (str): The delimiter used in the CSV file (default: ',').
encoding (str): The character encoding used in the CSV file (default: 'utf-8').
header (int or str): The row number to use as the header, or 'infer' to let pandas determine the header (default: 'infer').
Returns:
pandas.DataFrame: A DataFrame containing the data from the CSV file.
"""
return pd.read_csv(file_path, delimiter=delimiter, encoding=encoding, header=header)
# Example usage:
# Assuming 'file_path' is the path to your CSV file
# data = read_csv_to_dataframe(file_path)
TRN_PATH = "/kaggle/input/playground-series-s3e12/train.csv"
TST_PATH = "/kaggle/input/playground-series-s3e12/test.csv"
SUB_PATH = "/kaggle/input/playground-series-s3e12/sample_submission.csv"
ORG_PATH = "/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
trn_data = read_csv_to_dataframe(TRN_PATH)
tst_data = read_csv_to_dataframe(TST_PATH)
org_data = read_csv_to_dataframe(ORG_PATH)
org_data = org_data[org_data["target"] == 1]
sub_data = read_csv_to_dataframe(SUB_PATH)
import pandas as pd
def append_dataframes(df1, df2, reset_index=True):
"""
Append two pandas DataFrames along the rows.
Parameters:
df1 (pandas.DataFrame): The first DataFrame.
df2 (pandas.DataFrame): The second DataFrame.
reset_index (bool): Whether to reset the index of the resulting DataFrame (default: True).
Returns:
pandas.DataFrame: An appended DataFrame.
"""
appended_df = pd.concat([df1, df2], axis=0, ignore_index=reset_index)
return appended_df
trn_data = append_dataframes(trn_data, org_data)
def analyze_dataframe(df):
"""
Analyze a pandas DataFrame and provide a summary of its characteristics.
Parameters:
df (pandas.DataFrame): The input DataFrame to analyze.
Returns:
None
"""
print("DataFrame Information:")
print("----------------------")
display(df.info(verbose=True, show_counts=True))
print("\n")
print("DataFrame Description:")
print("----------------------")
display(df.describe(include="all"))
print("\n")
print("Number of Null Values:")
print("----------------------")
display(df.isnull().sum())
print("\n")
print("Number of Duplicated Rows:")
print("--------------------------")
display(df.duplicated().sum())
print("\n")
print("Number of Unique Values:")
print("------------------------")
display(df.nunique())
print("\n")
print("DataFrame Shape:")
print("----------------")
print(f"Rows: {df.shape[0]}, Columns: {df.shape[1]}")
# Example usage:
# Assuming 'data' is your DataFrame
# analyze_dataframe(data)
analyze_dataframe(trn_data)
TARGET = "target"
ignore = ["id", "target"]
numeric_feat = ["gravity", "ph", "osmo", "cond", "urea", "calc"]
categ_feat = [
feat for feat in trn_data.columns if feat not in numeric_feat and feat not in ignore
]
# numeric_feat = ['cond', 'calc', 'gravity'] # Another options for experimentation...
features = categ_feat + numeric_feat
print("Features: ", features)
# Pycaret configuration.
clf = setup(
data=trn_data,
target=TARGET,
categorical_features=categ_feat,
numeric_features=numeric_feat,
normalize=True,
ignore_features=["id"],
normalize_method="zscore",
fix_imbalance=True,
fix_imbalance_method=SMOTE(),
remove_outliers=True,
outliers_method="iforest",
fold_strategy="stratifiedkfold",
fold=20,
use_gpu=True,
session_id=123,
)
# Selecting what model should be trained.
best_model = compare_models(sort="auc", fold=10)
# Define the base models
ext = create_model("et", fold=20)
tuned_ext = tune_model(ext, fold=20)
plot_model(tuned_ext, plot="feature")
unseen_predictions_ext = predict_model(tuned_ext, data=tst_data, raw_score=True)
unseen_predictions_ext
# ...
sub_data["target"] = unseen_predictions_ext["prediction_score_1"]
sub_data.to_csv("pycaret_ext_submission.csv", index=False)
sub_data.head()
# ---
# Define the base models
lda = create_model("lda", fold=20)
tuned_lda = tune_model(lda, fold=20)
plot_model(tuned_lda, plot="feature")
unseen_predictions_lda = predict_model(tuned_lda, data=tst_data, raw_score=True)
unseen_predictions_lda
# ...
sub_data["target"] = unseen_predictions_lda["prediction_score_1"]
sub_data.to_csv("pycaret_lda_submission.csv", index=False)
sub_data.head()
# ---
# # Training a DNN Model, Using Keras
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import (
ReduceLROnPlateau,
LearningRateScheduler,
EarlyStopping,
)
from tensorflow.keras.layers import (
Dense,
Input,
InputLayer,
Add,
BatchNormalization,
Dropout,
)
from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler
import random
import datetime
def nn_model():
""" """
L2 = 65e-6
activation_func = "swish"
inputs = Input(shape=(len(features)))
x = Dense(
512,
# use_bias = True,
kernel_regularizer=tf.keras.regularizers.l2(L2),
activation=activation_func,
)(inputs)
x = BatchNormalization()(x)
x = Dense(
256,
# use_bias = True,
kernel_regularizer=tf.keras.regularizers.l2(L2),
activation=activation_func,
)(x)
x = BatchNormalization()(x)
x = Dense(
64,
# use_bias = True,
kernel_regularizer=tf.keras.regularizers.l2(L2),
activation=activation_func,
)(x)
x = BatchNormalization()(x)
x = Dense(
16,
# use_bias = True,
kernel_regularizer=tf.keras.regularizers.l2(L2),
activation=activation_func,
)(x)
x = BatchNormalization()(x)
x = Dense(
1,
# use_bias = True,
# kernel_regularizer = tf.keras.regularizers.l2(L2),
activation="sigmoid",
)(x)
model = Model(inputs, x)
return model
architecture = nn_model()
architecture.summary()
# Defining model parameters...
BATCH_SIZE = 2048
EPOCHS = 512
EPOCHS_COSINEDECAY = 512
DIAGRAMS = True
USE_PLATEAU = False
INFERENCE = False
VERBOSE = 0
TARGET = "target"
# Defining model training function...
def fit_model(X_train, y_train, X_val, y_val, run=0):
""" """
lr_start = 0.01
start_time = datetime.datetime.now()
scaler = StandardScaler()
# scaler = RobustScaler()
# scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
epochs = EPOCHS
lr = ReduceLROnPlateau(monitor="val_loss", factor=0.7, patience=4, verbose=VERBOSE)
es = EarlyStopping(
monitor="val_loss",
patience=12,
verbose=1,
mode="min",
restore_best_weights=True,
)
tm = tf.keras.callbacks.TerminateOnNaN()
callbacks = [lr, es, tm]
# Cosine Learning Rate Decay
if USE_PLATEAU == False:
epochs = EPOCHS_COSINEDECAY
lr_end = 0.0002
def cosine_decay(epoch):
if epochs > 1:
w = (1 + math.cos(epoch / (epochs - 1) * math.pi)) / 2
else:
w = 1
return w * lr_start + (1 - w) * lr_end
lr = LearningRateScheduler(cosine_decay, verbose=0)
callbacks = [lr, tm]
model = nn_model()
optimizer_func = tf.keras.optimizers.Adam(learning_rate=lr_start)
loss_func = tf.keras.losses.BinaryCrossentropy()
model.compile(optimizer=optimizer_func, loss=loss_func)
X_val = scaler.transform(X_val)
validation_data = (X_val, y_val)
history = model.fit(
X_train,
y_train,
validation_data=validation_data,
epochs=epochs,
verbose=VERBOSE,
batch_size=BATCH_SIZE,
shuffle=True,
callbacks=callbacks,
)
history_list.append(history.history)
print(f'Training loss:{history_list[-1]["loss"][-1]:.5f}')
callbacks, es, lr, tm, history = None, None, None, None, None
y_val_pred = model.predict(X_val, batch_size=BATCH_SIZE, verbose=VERBOSE)
score = roc_auc_score(y_val, y_val_pred)
print(
f"Fold {run}.{fold} | {str(datetime.datetime.now() - start_time)[-12:-7]}"
f"| AUC: {score:.5f}"
)
score_list.append(score)
tst_data_scaled = scaler.transform(tst_data[features])
tst_pred = model.predict(tst_data_scaled)
predictions.append(tst_pred)
return model
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import roc_auc_score, roc_curve
import math
# Create empty lists to store NN information...
history_list = []
score_list = []
predictions = []
# Define kfolds for training purposes...
kf = StratifiedKFold(n_splits=10)
for fold, (trn_idx, val_idx) in enumerate(
kf.split(trn_data[features], trn_data[TARGET])
):
X_train, X_val = trn_data.iloc[trn_idx][features], trn_data.iloc[val_idx][features]
y_train, y_val = trn_data.iloc[trn_idx][TARGET], trn_data.iloc[val_idx][TARGET]
fit_model(X_train, y_train, X_val, y_val)
print(f"OOF AUC: {np.mean(score_list):.5f}")
# Populated the prediction on the submission dataset and creates an output file
sub_data["target"] = np.array(predictions).mean(axis=0)
sub_data.to_csv("keras_dnn_submission.csv", index=False)
sub_data
|
# ## Mandate 2
# InferSent is a sentence embeddings method that provides semantic representations for English sentences. It is trained on natural language inference data and generalizes well to many different tasks.
# In this notebook InferSent is pretrained on GloVe.
import warnings
warnings.filterwarnings("ignore")
import pickle
import numpy as np
import pandas as pd
import json
from textblob import TextBlob
import nltk
from scipy import spatial
import torch
import spacy
en_nlp = spacy.load("en_core_web_sm")
nltk.download("punkt")
t1 = pd.read_json(r"/kaggle/input/nlpproject/train-v1.1.json")
queans = pd.read_csv(r"/kaggle/input/answer/Book1.csv")
queans = queans.drop(
["Unnamed: 3", "Unnamed: 4", "Unnamed: 5", "Unnamed: 6", "Unnamed: 7"], axis=1
)
queans
verses = queans["Answer"].tolist()
verses
# **Converting the JSON into Dataframe**
contexts = []
answersText = []
questions = []
answersStart = []
for i in range(t1.shape[0]):
topic = t1.iloc[i, 0]["paragraphs"]
# print(topic)
for subPara in topic:
for q in subPara["qas"]:
# print(q)
questions.append(q["question"])
answersStart.append(q["answers"][0]["answer_start"])
answersText.append(q["answers"][0]["text"])
contexts.append(subPara["context"])
df = pd.DataFrame(
{
"context": contexts,
"question": questions,
"answer_start": answersStart,
"text": answersText,
}
)
df.shape
df.dropna(inplace=True)
paras = list(df["context"].drop_duplicates().reset_index(drop=True))
blob = TextBlob(" ".join(paras))
sentences = [item.raw for item in blob.sentences]
senpdf = pd.DataFrame(sentences)
senpdf[0]
sen = senpdf[0].tolist()
import shutil
# here we need to restructure working directory, so that script imports working properly
shutil.copytree("/kaggle/input/infersent/", "/kaggle/working/infersent")
# TODO: add encoder to dataset as well
# If this cell freezes, probably you haven't enabled Internet access for the notebook
# ## Load Model
model_version = 1
MODEL_PATH = "encoder/infersent%s.pkl" % model_version
W2V_PATH = "/kaggle/input/glove-840b-300d/glove.840B.300d.txt"
VOCAB_SIZE = 1e5 # Load embeddings of VOCAB_SIZE most frequent words
USE_CUDA = False # Keep it on CPU if False, otherwise will put it on GPU
from models import InferSent
params_model = {
"bsize": 64,
"word_emb_dim": 300,
"enc_lstm_dim": 2048,
"pool_type": "max",
"dpout_model": 0.0,
"version": model_version,
}
model = InferSent(params_model)
model.load_state_dict(torch.load(MODEL_PATH))
# # ****Building a vocabulary of infersent model****
model = model.cuda() if USE_CUDA else model
model.set_w2v_path(W2V_PATH)
# model.build_vocab_k_words(K=VOCAB_SIZE)
model.build_vocab(verses, tokenize=True)
dict_embeddings = {}
for i in range(len(verses)):
# print(i)
dict_embeddings[verses[i]] = model.encode([verses[i]], tokenize=True)
dict_embeddings[0]
# > **Embeddings are stored in pickle file**
question = queans["Question"].tolist()
model.build_vocab(question, tokenize=True)
for i in range(len(question)):
# print(i)
dict_embeddings[question[i]] = model.encode([question[i]], tokenize=True)
def process_data(df):
# print("step 1")
# df['sentences'] = df['Answer'].apply(lambda x: [item.raw for item in TextBlob(x).sentences])
print("step 1")
df["sent_emb"] = df["sentences"].apply(
lambda x: [
dict_embeddings[item][0] if item in dict_embeddings else np.zeros(4096)
for item in x
]
)
print("step 2")
df["quest_emb"] = df["Question"].apply(
lambda x: dict_embeddings[x] if x in dict_embeddings else np.zeros(4096)
)
return df
dict_embeddings.keys()
queans = queans.assign(sentences="NAN")
for i in range(65):
queans["sentences"][i] = verses
queans = process_data(queans)
queans.head()
queans["sentences"][0]
# df2.sent_emb[0]=[[ 0.1077757 , 0.06420924, 0.08119761, ..., 0.14636174,
# -0.03814263, -0.0289226 ]]
df2.sent_emb[5]
df2["sent_emb"][0]
def similarity_cosine(x):
cosine_distance = []
for embeding in x["sent_emb"]:
cosine_distance.append(spatial.distance.cosine(embeding, x["quest_emb"][0]))
return cosine_distance
def predict_id(distances):
return np.argmin(distances)
def predictions(train):
train["cosine_sim"] = train.apply(similarity_cosine, axis=1)
train["diff"] = (train["quest_emb"] - train["sent_emb"]) ** 2
train["euclidean_dis"] = train["diff"].apply(lambda x: list(np.sum(x, axis=1)))
del train["diff"]
train["pred_idx_euc"] = train["euclidean_dis"].apply(lambda y: predict_id(y))
train["pred_idx_cos"] = train["cosine_sim"].apply(lambda y: predict_id(y))
return train
predicted = predictions(queans)
predicted.head(50)
import numpy as np, pandas as pd
import ast
from sklearn import linear_model
from sklearn import metrics
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings("ignore")
import spacy
from nltk import Tree
en_nlp = spacy.load("en_core_web_sm")
from nltk.stem.lancaster import LancasterStemmer
st = LancasterStemmer()
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
def update_dataframe(data):
train = pd.DataFrame()
for k in range(len(data["euclidean_dis"])):
dis = ast.literal_eval(str(data["euclidean_dis"][k]))
for i in range(len(dis)):
train.loc[k, "column_euc_" + "%s" % i] = dis[i]
# for k, dis in enumerate(data["euclidean_dis"]):
# distances = ast.literal_eval(str(dis))
# column_names = [f"column_euc_{i}" for i in range(len(distances))]
# train.loc[k, column_names] = distances
# print("Finished")
for k in range(len(data["cosine_sim"])):
dis = ast.literal_eval(str(data["cosine_sim"][k]).replace("nan", "1"))
for i in range(len(dis)):
train.loc[k, "column_cos_" + "%s" % i] = dis[i]
# for k, sim in enumerate(data["cosine_sim"]):
# sim_list = [float(x) if x != "nan" else 1 for x in sim.split(",")]
# column_names = [f"column_cos_{i}" for i in range(len(sim_list))]
# train.loc[k, column_names] = sim_list
train["target"] = data["target"]
return train
train = update_dataframe(queans)
train
train.apply(max, axis=0)
subset1 = train.iloc[:, :10].fillna(60)
subset2 = train.iloc[:, 10:].fillna(1)
train2 = pd.concat([subset1, subset2], axis=1)
train2 = train2.reindex(subset1.index)
train2.apply(max, axis=0)
scaler = MinMaxScaler()
X = scaler.fit_transform(train2.iloc[:, :-1])
train_x, test_x, train_y, test_y = train_test_split(
X, train.iloc[:, -1], train_size=0.8, random_state=5
)
train_y
mul_lr = linear_model.LogisticRegression(multi_class="multinomial", solver="newton-cg")
mul_lr.fit(train_x, train_y)
print(
"Multinomial Logistic regression Train Accuracy : ",
metrics.accuracy_score(train_y, mul_lr.predict(train_x)),
)
print(
"Multinomial Logistic regression Test Accuracy : ",
metrics.accuracy_score(test_y, mul_lr.predict(test_x)),
)
rf = RandomForestClassifier(min_samples_leaf=8, n_estimators=60)
rf.fit(train_x, train_y)
print(
"Random Forest Train Accuracy : ",
metrics.accuracy_score(train_y, rf.predict(train_x)),
)
print(
"Random Forest Test Accuracy : ", metrics.accuracy_score(test_y, rf.predict(test_x))
)
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
stimator.append(
("LR", LogisticRegression(solver="lbfgs", multi_class="multinomial", max_iter=200))
)
estimator.append(("SVC", SVC(gamma="auto", probability=True)))
estimator.append(("DTC", DecisionTreeClassifier()))
# Voting Classifier with hard voting
vot_hard = VotingClassifier(estimators=estimator, voting="hard")
vot_hard.fit(train_x, train_y)
y_pred = vot_hard.predict(test_x)
# using accuracy_score metric to predict accuracy
score = accuracy_score(test_y, y_pred)
print("Hard Voting Score % d" % score)
with open("/kaggle/working/dict_embeddings1.pickle", "wb") as handle:
pickle.dump(dict_embeddings, handle)
with open("/kaggle/input/dictembeddings/dict_embeddings1.pickle", "rb") as f:
d1 = pickle.load(f)
dict_emb = dict(d1)
len(dict_emb)
def get_target(x):
idx = -1
for i in range(len(x["sentences"])):
if x["text"] in x["sentences"][i]:
idx = i
return idx
def process_data(train):
print("step 1")
train["sentences"] = train["context"].apply(
lambda x: [item.raw for item in TextBlob(x).sentences]
)
print("step 2")
train["target"] = train.apply(get_target, axis=1)
print("step 3")
train["sent_emb"] = train["sentences"].apply(
lambda x: [
dict_emb[item][0] if item in dict_emb else np.zeros(4096) for item in x
]
)
print("step 4")
train["quest_emb"] = train["question"].apply(
lambda x: dict_emb[x] if x in dict_emb else np.zeros(4096)
)
return train
train = process_data(df)
train.head(3)
def cosine_sim(x):
li = []
for item in x["sent_emb"]:
li.append(spatial.distance.cosine(item, x["quest_emb"][0]))
return li
def pred_idx(distances):
return np.argmin(distances)
def predictions(train):
train["cosine_sim"] = train.apply(cosine_sim, axis=1)
train["diff"] = (train["quest_emb"] - train["sent_emb"]) ** 2
train["euclidean_dis"] = train["diff"].apply(lambda x: list(np.sum(x, axis=1)))
del train["diff"]
print("cosine start")
train["pred_idx_cos"] = train["cosine_sim"].apply(lambda x: pred_idx(x))
train["pred_idx_euc"] = train["euclidean_dis"].apply(lambda x: pred_idx(x))
return train
predicted = predictions(train.iloc[0:80000])
predicted.head()
def accuracy(target, predicted):
acc = (target == predicted).sum() / len(target)
return acc
print(accuracy(predicted["target"], predicted["pred_idx_euc"]))
print(accuracy(predicted["target"], predicted["pred_idx_cos"]))
|
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import transformers
from transformers import DistilBertTokenizer
from transformers import TFDistilBertForSequenceClassification
from transformers import TextClassificationPipeline
import tensorflow as tf
import pandas as pd
import json
import gc
from sklearn.model_selection import train_test_split
import seaborn as sns
import matplotlib.pyplot as plt
from plotly.offline import iplot
from tqdm import tqdm
# # Importing dataset
import requests
url = "https://storage.googleapis.com/dataset-uploader/bbc/bbc-text.csv"
r = requests.get(url, allow_redirects=True)
open("bbc-news.csv", "wb").write(r.content)
df = pd.read_csv("bbc-news.csv")
df
df.shape
# # Histogram of text frequency
df["count"] = df["text"].apply(lambda x: len(x.split()))
max(df["count"])
df.head()
plt.figure(figsize=(8, 8))
sns.displot(df["count"])
plt.xlim(0, 1000)
plt.xlabel("the no. of words ", fontsize=16)
plt.title("the no. of words in the distrubution ", fontsize=18)
plt.show()
# # Bar plot for each of the new category
category_count = df["category"].value_counts()
categories = category_count.index
categories
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
sns.barplot(x=category_count.index, y=category_count)
for a, p in enumerate(ax.patches):
ax.annotate(
f"{categories[a]}\n" + format(p.get_height(), ".0f"),
xy=(p.get_x() + p.get_width() / 2.0, p.get_height()),
xytext=(0, -25),
size=13,
color="white",
ha="center",
va="center",
textcoords="offset points",
bbox=dict(boxstyle="round", facecolor="none", edgecolor="white", alpha=0.5),
)
plt.xlabel("Categories", size=15)
plt.ylabel("The Number of News", size=15)
plt.title("The number of news by categories", size=15)
plt.show()
df["category"].unique()
# Encoding category
df["encoded_text"] = df["category"].astype("category").cat.codes
df.head()
table_lookup = pd.DataFrame(df.encoded_text.unique(), df.category.unique())
table_lookup = table_lookup.sort_values(by=[0])
table_lookup
data_texts = df["text"].to_list()
data_labels = df["encoded_text"].to_list()
# Train Test split
train_texts, val_texts, train_labels, val_labels = train_test_split(
data_texts, data_labels, test_size=0.2, random_state=0
)
train_texts, test_texts, train_labels, test_labels = train_test_split(
train_texts, train_labels, test_size=0.1, random_state=0
)
len(train_texts)
len(val_texts)
len(test_texts)
# # Model Definition
tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
train_encodings = tokenizer(train_texts, truncation=True, padding=True)
val_encodings = tokenizer(val_texts, truncation=True, padding=True)
test_encodings = tokenizer(test_texts, truncation=True, padding=True)
tokenizer.vocab_size, tokenizer.model_max_length, tokenizer.model_input_names
# 512 is the max an input text can be to the transformer tokenizer
# ### Taking an example to show preprocessing
example = "Sample Text"
example_encoded_text = tokenizer(example)
example_encoded_text
example_tokens = tokenizer.convert_ids_to_tokens(example_encoded_text.input_ids)
tokenizer.convert_tokens_to_string(example_tokens)
train_dataset = tf.data.Dataset.from_tensor_slices(
(dict(train_encodings), train_labels)
)
val_dataset = tf.data.Dataset.from_tensor_slices((dict(val_encodings), val_labels))
test_set = tf.data.Dataset.from_tensor_slices((dict(test_encodings), test_labels))
import wandb
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
WANDB_API_KEY = user_secrets.get_secret("WANDB_API_KEY")
os.environ["WANDB_MODE"] = "online"
os.environ["WANDB_API_KEY"] = WANDB_API_KEY
wandb.init(project="huggingface")
# # Fine Tuning Transformer
from transformers import (
TFDistilBertForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from sklearn.metrics import (
ConfusionMatrixDisplay,
confusion_matrix,
accuracy_score,
f1_score,
)
# define performance metrics
def metrics(pred):
label_prediction = pred.label_ids
preds = pred.predictions.argmax(-1)
f1 = f1_score(label_prediction, preds, average="weighted")
acc = accuracy_score(label_prediction, preds)
return {"accuracy": acc, "f1": f1}
training_args = TFTrainingArguments(
output_dir="./results",
num_train_epochs=7,
per_device_train_batch_size=16,
per_device_eval_batch_size=64,
warmup_steps=500,
weight_decay=1e-5,
logging_dir="./logs",
eval_steps=100,
disable_tqdm=False,
evaluation_strategy="epoch", # To calculate metrics per epoch
logging_strategy="epoch",
)
with training_args.strategy.scope():
trainer_model = TFDistilBertForSequenceClassification.from_pretrained(
"distilbert-base-uncased", num_labels=5
)
trainer = TFTrainer(
model=trainer_model,
args=training_args,
compute_metrics=metrics,
train_dataset=train_dataset,
eval_dataset=val_dataset,
)
trainer_model.summary()
trainer.train()
trainer.evaluate()
pred = trainer.predict(val_dataset)
pred.metrics
def plot_cm(y_preds, y_true, labels):
cm = confusion_matrix(y_true, y_preds, normalize="true")
fig, ax = plt.subplots(figsize=(6, 6))
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=labels)
disp.plot(cmap="Blues", values_format=".2f", ax=ax, colorbar=False)
plt.title("Normalized confusion matrix")
plt.show()
pred = trainer.predict(val_dataset)
y_pred = np.argmax(pred.predictions, axis=1)
plot_cm(y_pred, val_labels, table_lookup.values)
# # Saving and Loading the model
save_dir = "./saved_models"
trainer_model.save_pretrained(save_dir)
tokenizer.save_pretrained(save_dir)
os.getcwd()
trainer_model.config.to_json_file("/saved_models/config.json")
tokenizer_fine_tuned = DistilBertTokenizer.from_pretrained(save_dir)
model_fine_tuned = TFDistilBertForSequenceClassification.from_pretrained(save_dir)
# # Testing Process using a single sample
def predict_news(
test_text, tokenizer_fine_tuned, model_fine_tuned, table_lookup, library="tf"
):
predict_input = tokenizer_fine_tuned.encode(
test_text, truncation=True, padding=True, return_tensors=library
)
output = model_fine_tuned(predict_input)[0]
prediction_value = tf.argmax(output, axis=1).numpy()[0]
return table_lookup.iloc[prediction_value].name
test_text = test_texts[0]
test_text
predict_news(test_text, tokenizer_fine_tuned, model_fine_tuned, table_lookup)
pred = trainer.predict(test_set)
y_pred = np.argmax(pred.predictions, axis=1)
plot_cm(y_pred, test_labels, table_lookup.values)
# # Inferencing with Pytorch
import torch
from transformers import DistilBertForSequenceClassification
tokenizer = DistilBertTokenizer.from_pretrained(save_dir)
model = DistilBertForSequenceClassification.from_pretrained(save_dir, from_tf=True)
predict_input = tokenizer.encode(
test_text, truncation=True, padding=True, return_tensors="pt"
)
output = model(predict_input)
prediction_value = torch.argmax(output[0], dim=1).item()
prediction_value
table_lookup.iloc[prediction_value].name
predict_news(
test_text, tokenizer_fine_tuned, model_fine_tuned, table_lookup, library="pt"
)
# # Testing on custom inputs
# sports
test_text = "RCB vs MI Highlights: Virat Kohli, Faf du Plessis help Royal Challengers Bangalore brush aside Mumbai Indians for first win"
predict_news(test_text, tokenizer_fine_tuned, model_fine_tuned, table_lookup)
# entertainment
test_text = "Priyanka Chopra and Karan Johar hug each other at NMACC event after Priyanka whispers in husband Nick Jonas’ ear"
test_text = "A star is reborn: Zeenat Aman"
predict_news(test_text, tokenizer_fine_tuned, model_fine_tuned, table_lookup)
# tech
test_text = "An AI researcher who has been warning about the technology for over 20 years says we should 'shut it all down,' and issue an 'indefinite and worldwide' ban"
test_text = "Astronomers discover flattest explosion ever seen in space"
test_text = "Indian start-ups may take legal route against Google in-app billing"
predict_news(test_text, tokenizer_fine_tuned, model_fine_tuned, table_lookup)
# politics
test_text = "BJP to organise protests across Rajasthan to corner Congress govt"
predict_news(test_text, tokenizer_fine_tuned, model_fine_tuned, table_lookup)
# business
test_text = "MPC may raise rate by another 25 bps as inflation woes persist: BS Poll"
test_text = "RBI likely to hike benchmark interest rate by 25 bps on April 6"
predict_news(test_text, tokenizer_fine_tuned, model_fine_tuned, table_lookup)
# # Saving the model
path = "/kaggle/working/BERT"
model.config.to_json_file("config.json")
|
import numpy as np
import pandas as pd
TRAIN_PATH = "../input/titanic/train.csv"
TEST_PATH = "../input/titanic/test.csv"
SAMPLE_SUBMISSION_PATH = "../input/titanic/gender_submission.csv"
SUBMISSION_PATH = "submission.csv"
ID = "PassengerId"
TARGET = "Survived"
train = pd.read_csv(TRAIN_PATH)
train.head()
# # 1.using map
train = pd.read_csv(TRAIN_PATH)
train["Sex"] = train["Sex"].map({"male": 0, "female": 1})
train.head()
# # 2.One Hot Encoding
# ## 2-1.using get_dummies
train = pd.read_csv(TRAIN_PATH)
train = pd.get_dummies(train, columns=["Sex"], prefix="OneHot")
train.head()
# ## 2-2.using OneHotEncoder
from sklearn.preprocessing import OneHotEncoder
train = pd.read_csv(TRAIN_PATH)
oneHotEncoder = OneHotEncoder(sparse=False)
oneHotEncoder.fit(train[["Sex"]])
train_sex = pd.DataFrame(
oneHotEncoder.transform(train[["Sex"]]), columns=["OneHot_man", "OneHot_female"]
)
train = train.drop(columns="Sex")
train = pd.concat([train, train_sex], axis=1)
train.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Use this for multiple df outputs from same cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# fillter warnings
# not recommanded for beginners
import warnings
warnings.filterwarnings("ignore")
df_train = pd.read_csv("../input/dapprojekt23/train.csv")
df_test = pd.read_csv("../input/dapprojekt23/test.csv")
# # Preparation of dataset
# Based on my first mandatory notebook, I will perform operations to have meaningful dataset without missing observations and negative values.
mask = df_train.loc[:, "total"] < 0
df_train.loc[mask, "total"] = df_train.loc[mask, "total"] + 65536
mask = df_test.loc[:, "total"] < 0
df_test.loc[mask, "total"] = df_test.loc[mask, "total"] + 65536
# replacing negative values in feature total - train
df_train.replace({"total": 0}, np.nan, inplace=True)
df_train["total"].interpolate(method="pchip", inplace=True)
df_train = df_train.astype({"total": int})
# replacing negative values in feature total - test
df_test.replace({"total": 0}, np.nan, inplace=True)
df_test["total"].interpolate(method="pchip", inplace=True)
df_test = df_test.astype({"total": int})
# dropping monotonic feature id
df_train.drop(columns=["id"], inplace=True)
def process_missing(df, train=False):
grouped = df.groupby("machine_name")
missing_ones = pd.DataFrame()
for name, group in grouped:
group = group.sort_values(by=["day"])
group["day_diff"] = group["day"].diff()
diff_greater_than_one = group[group["day_diff"] > 1]
for index, row in diff_greater_than_one.iterrows():
for i in range(int(row["day"]) - int(row["day_diff"]) + 1, int(row["day"])):
if train:
new_row = {
"day": i,
"broken": np.nan,
"total": np.nan,
"label": row["label"],
"machine_name": row["machine_name"],
}
else:
new_row = {
"day": i,
"broken": np.nan,
"total": np.nan,
"machine_name": row["machine_name"],
}
missing_ones = missing_ones.append(new_row, ignore_index=True)
df = df.append(missing_ones, ignore_index=True)
df.sort_values(by=["machine_name", "day"], ignore_index=True, inplace=True)
df["total"].interpolate(method="pchip", inplace=True)
df["broken"].interpolate(method="pad", inplace=True)
# negative values set to zero, because of interpolation
df.loc[df["total"] < df["broken"], "broken"] = 0
if train:
df = df.astype({"day": int, "broken": int, "total": int, "label": int})
else:
df = df.astype({"day": int, "broken": int, "total": int})
return df
df_train = process_missing(df_train, train=True)
# Also, I will remove record after the occurence of anomaly.
def dataset_after_removal(df):
# remove records after the last anomaly for each group
def remove_after_anomaly(group):
last_anomaly_index = group["label"][::-1].idxmax()
return group.loc[:last_anomaly_index]
grouped = df.groupby("machine_name")
df = grouped.apply(remove_after_anomaly)
df = df.reset_index(drop=True)
return df
df_train = dataset_after_removal(df_train)
# # 1. Cross-validation function 1
# > Write a function that receives data as input, does a 5-fold cross-validation split by sample, and tests the following models (with default hyper-parameters): GaussianNB, LogisticRegression, RIPPER, RandomForestClassifier, ExtraTreesClassifier, and XGBClassifier.
# Due to long runtime,the RIPPER algorithm is omitted in cross-validation functions. (Assistant Stančin approved)
import wittgenstein as lw
from xgboost import XGBClassifier
from sklearn.metrics import f1_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import ExtraTreesClassifier
from tabulate import tabulate
from sklearn.model_selection import KFold
def construct_model(name):
switch = {
"GaussianNB": GaussianNB(),
"LogisticRegression": LogisticRegression(),
"RandomForestClassifier": RandomForestClassifier(),
"XGBClassifier": XGBClassifier(),
"ExtraTreesClassifier": ExtraTreesClassifier(),
"RIPPER": lw.RIPPER(),
}
return switch.get(name, "Invalid input")
def train_by_samples(X, Y, name):
folds_macro = []
kf = KFold(n_splits=5)
for train_index, test_index in kf.split(X):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = Y.iloc[train_index], Y.iloc[test_index]
model = construct_model(name)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
score = f1_score(y_test, y_pred, average="macro")
folds_macro.append(score)
return [
name,
folds_macro[0],
folds_macro[1],
folds_macro[2],
folds_macro[3],
folds_macro[4],
sum(folds_macro) / 5,
]
import time
def cross_validation_function1(df):
df_copied = df.copy(deep=True)
df_copied = df_copied.loc[df_copied.loc[:, "day"] > 365, :]
Y = pd.DataFrame(df_copied["label"].tolist())
df_copied.drop(columns=["machine_name", "label"], inplace=True)
X = pd.DataFrame(df_copied)
table = []
table.append(train_by_samples(X, Y, "GaussianNB"))
table.append(train_by_samples(X, Y, "LogisticRegression"))
table.append(train_by_samples(X, Y, "RandomForestClassifier"))
table.append(train_by_samples(X, Y, "XGBClassifier"))
table.append(train_by_samples(X, Y, "ExtraTreesClassifier"))
# table.append(train_by_samples(X, Y, "RIPPER"))
print(
tabulate(
table,
headers=[
"Algorithm",
"Fold 1",
"Fold 2",
"Fold 3",
"Fold 4",
"Fold 5",
"Average",
],
)
)
# # 2. Run the cross-validation function 1
# > Use the entire dataset. Submit the best model to the Kaggle leaderboard. Write the score obtained on the Public Leaderboard.
# cross_validation_function1(df_train)
def train(model, df):
df_copied = df.copy(deep=True)
df_copied = df_copied.loc[df_copied.loc[:, "day"] > 365, :]
Y = pd.DataFrame(df_copied["label"].tolist())
df_copied.drop(columns=["machine_name", "label"], inplace=True)
X = pd.DataFrame(df_copied)
model.fit(X, Y)
print(f"\tTrain f1 macro: {f1_score(Y, model.predict(X), average='macro')}")
return model
def test(df, model):
df_copied = df.copy(deep=True)
df_copied.drop(columns=["machine_name", "id"], inplace=True)
X_test = pd.DataFrame(df_copied)
Y_pred = model.predict(X_test)
return Y_pred
# model = train(GaussianNB(), df_train)
# Y_pred = test(df_test, model)
# submission = df_test.loc[:,df_test.columns.isin(('id', ))]
# submission = submission.rename(columns={'id':'Id'})
# submission.loc[:,'Predicted'] = Y_pred
# submission.to_csv("submission.csv", index=None)
# submission.head()
# **Leaderboard score: 0.54203**
# # 3. Cross-validation function 2
# > Create a second cross-validation function that receives data as input, does 5-fold cross-validation split by machine (all samples from one machine are in the same fold), and tests the same models as before.
from sklearn.model_selection import GroupKFold
# During the split, GroupKFold ensures that samples from the same group are kept together in the same fold.
def train_by_machines(X, Y, name):
folds_macro = []
gkf = GroupKFold(n_splits=5)
groups = X["machine_name"]
folds_macro = []
for train_index, test_index in gkf.split(X, Y, groups):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = Y.iloc[train_index], Y.iloc[test_index]
model = construct_model(name)
model.fit(X_train.drop("machine_name", axis=1), y_train)
y_pred = model.predict(X_test.drop("machine_name", axis=1))
score = f1_score(y_test, y_pred, average="macro")
folds_macro.append(score)
return [
name,
folds_macro[0],
folds_macro[1],
folds_macro[2],
folds_macro[3],
folds_macro[4],
sum(folds_macro) / 5,
]
def cross_validation_function2(df):
df_copied = df.copy(deep=True)
df_copied = df_copied.loc[df_copied.loc[:, "day"] > 365, :]
Y = pd.DataFrame(df_copied["label"].tolist())
df_copied.drop(columns=["label"], inplace=True)
X = pd.DataFrame(df_copied)
table = []
table.append(train_by_machines(X, Y, "GaussianNB"))
table.append(train_by_machines(X, Y, "LogisticRegression"))
table.append(train_by_machines(X, Y, "RandomForestClassifier"))
table.append(train_by_machines(X, Y, "XGBClassifier"))
table.append(train_by_machines(X, Y, "ExtraTreesClassifier"))
# table.append(train_by_machines(X, Y, "RIPPER"))
print(
tabulate(
table,
headers=[
"Algorithm",
"Fold 1",
"Fold 2",
"Fold 3",
"Fold 4",
"Fold 5",
"Average",
],
)
)
# # 4. Run the cross-validation function 2
# > Use the entire dataset. Submit the best model to the Kaggle leaderboard. Write the score obtained on the Public Leaderboard.
# cross_validation_function2(df_train)
# model = train(GaussianNB(), df_train)
# Y_pred = test(df_test, model)
# submission = df_test.loc[:,df_test.columns.isin(('id', ))]
# submission = submission.rename(columns={'id':'Id'})
# submission.loc[:,'Predicted'] = Y_pred
# submission.to_csv("submission.csv", index=None)
# submission.head()
# **Leaderboard score: 0.54203**
# # 5. New features
# > Create 20 new features. You are free to create whatever features you believe will be good predictors.
# Hints: Simple math operations, probability and statistics, rolling and expanding windows, entropies, …
for df in [df_train, df_test]:
grouped = df.groupby("machine_name")
# Broken ratio
df["broken_ratio"] = grouped["broken"].transform(lambda x: x / x.sum())
# Rolling broken mean
df["rolling_broken_mean"] = grouped["broken"].transform(
lambda x: x.rolling(window=7).mean()
)
df["rolling_broken_mean"] = df["rolling_broken_mean"].fillna(method="bfill")
# Rolling total mean
df["rolling_total_mean"] = grouped["total"].transform(
lambda x: x.rolling(window=7).mean()
)
df["rolling_total_mean"] = df["rolling_total_mean"].fillna(method="bfill")
# Rolling broken sum
df["rolling_broken_sum"] = grouped["broken"].transform(
lambda x: x.rolling(window=7).sum()
)
df["rolling_broken_sum"] = df["rolling_broken_sum"].fillna(method="bfill")
# Rolling total sum
df["rolling_total_sum"] = grouped["total"].transform(
lambda x: x.rolling(window=7).sum()
)
df["rolling_total_sum"] = df["rolling_total_sum"].fillna(method="bfill")
# Rolling broken ratio mean
df["rolling_broken_ratio_mean"] = grouped["broken_ratio"].transform(
lambda x: x.rolling(window=7).mean()
)
df["rolling_broken_ratio_mean"] = df["rolling_broken_ratio_mean"].fillna(
method="bfill"
)
# Expanding broken mean
df["expanding_broken_mean"] = grouped["broken"].transform(
lambda x: x.expanding().mean()
)
# Expanding total mean
df["expanding_total_mean"] = grouped["total"].transform(
lambda x: x.expanding().mean()
)
# Expanding broken sum
df["expanding_broken_sum"] = grouped["broken"].transform(
lambda x: x.expanding().sum()
)
# Expanding total sum
df["expanding_total_sum"] = grouped["total"].transform(
lambda x: x.expanding().sum()
)
# Expanding broken ratio mean
df["expanding_broken_ratio_mean"] = grouped["broken_ratio"].transform(
lambda x: x.expanding().mean()
)
# Broken entropy
alpha = 1e-9
p_broken = grouped["broken"].transform(lambda x: (x + alpha) / (x.sum() + alpha))
df["broken_entropy"] = -p_broken * np.log2(p_broken)
# Total entropy
p_total = grouped["total"].transform(lambda x: (x + alpha) / (x.sum() + alpha))
df["total_entropy"] = -p_total * np.log2(p_total)
# Broken ratio entropy
p_broken_ratio = grouped["broken_ratio"].transform(
lambda x: (x + alpha) / (x.sum() + alpha)
)
df["broken_ratio_entropy"] = -p_broken_ratio * np.log2(p_broken_ratio)
# Broken z-score
df["broken_zscore"] = grouped["broken"].transform(
lambda x: (x - x.mean()) / x.std()
)
# Total z-score
df["total_zscore"] = grouped["total"].transform(lambda x: (x - x.mean()) / x.std())
# Broken ratio z-score
broken_ratio = df["broken"] / df["total"]
df["broken_ratio_zscore"] = grouped["broken_ratio"].transform(
lambda x: (x - x.mean()) / x.std()
)
# Broken min-max
df["broken_minmax"] = grouped["broken"].transform(
lambda x: (x - x.min()) / (x.max() - x.min())
)
# Total min-max
df["total_minmax"] = grouped["total"].transform(
lambda x: (x - x.min()) / (x.max() - x.min())
)
# Broken ratio min-max
broken_ratio = df["broken"] / df["total"]
df["broken_ratio_minmax"] = grouped["broken_ratio"].transform(
lambda x: (x - x.min()) / (x.max() - x.min())
)
# # 6. Run cross-validation functions
# > Run cross-validation function 1 and 2. Use features created in the previous step. Submit the best model from function 1 and the best model from function 2.
# cross_validation_function1(df_train)
# cross_validation_function2(df_train)
# model = train(XGBClassifier(), df_train)
# Y_pred = test(df_test, model)
# submission = df_test.loc[:,df_test.columns.isin(('id', ))]
# submission = submission.rename(columns={'id':'Id'})
# submission.loc[:,'Predicted'] = Y_pred
# submission.to_csv("submission.csv", index=None)
# submission.head()
# **Leaderboard score: 0.67785**
# # 7. Filter method
# > Determine which model has the best results based on the printout of the previous step and use only that model in this step. Use mutual_info_classif, f_classif, or chi2 (from the sklearn library) to rank the features. Take the first 2 best-ranked features and calculate the f1_score of the selected model using 5-fold cross-validation. Then add the next 2 ranked features and repeat the process. Keep adding the next 2 features until all features are included in the dataset. Draw a graph showing the f1_score on the y-axis and the number of features used in the algorithm on the x-axis. What is the optimal choice of the number of features?
from sklearn.feature_selection import mutual_info_classif, f_classif, chi2
df_copied = df_train.copy(deep=True)
df_copied = df_copied.loc[df_copied.loc[:, "day"] > 365, :]
y = pd.DataFrame(df_copied["label"].tolist())
df_copied.drop(columns=["machine_name", "label"], inplace=True)
X = pd.DataFrame(df_copied)
feature_scores = mutual_info_classif(X, y)
sorted_indices = feature_scores.argsort()[::-1]
ranked_features = X.columns[sorted_indices]
ranked_features = ranked_features.to_list()
print(f"Ranked features:\n{ranked_features}")
"""f1_measures = []
for num in range(2, len(ranked_features) + 2, 2):
features = ranked_features[:num]
print(f"Iteration {num // 2} with features: {features}\n")
features.append('machine_name')
df_copied = df_train.copy(deep=True)
df_copied = df_copied.loc[df_copied.loc[:,'day']>365, :]
X = pd.DataFrame(df_copied[features])
table = []
table.append(train_by_machines(X, y, "XGBClassifier"))
print(tabulate(table, headers=["Algorithm", "Fold 1", "Fold 2", "Fold 3", "Fold 4", "Fold 5", "Average"]))
print("\n\n")
f1_measures.append(table[0][-1])"""
"""import matplotlib.pyplot as plt
print(f1_measures)
plt.plot(range(1, len(f1_measures)+1), f1_measures)
plt.title("The dependency of f1 measures on the number of features")
plt.show()"""
# # 8. Run the cross-validation functions
# > Use features selected in the previous step. Comment on the changes in the evaluation metrics. Submit the best model to the Kaggle leaderboard. Write the score obtained on the Public Leaderboard. Based on the results determine which cross-validation function is better and why. Is the Public Leaderboard score improved? Does cross-validation on the training set linearly correlate with Public Leaderboard scores?
features = ranked_features[:22]
features.append("machine_name")
features.append("label")
# cross_validation_function1(df_train[features])
# cross_validation_function2(df_train[features])
# Based on the previous cross-validation functions, XGBClassifier is the best model in both of them.
# model = train(XGBClassifier(), df_train)
# Y_pred = test(df_test, model)
# submission = df_test.loc[:,df_test.columns.isin(('id', ))]
# submission = submission.rename(columns={'id':'Id'})
# submission.loc[:,'Predicted'] = Y_pred
# submission.to_csv("submission.csv", index=None)
# submission.head()
# **Leaderboard score: 0.64488**
# To test linear correlation, I will try to submit second (RandomForestClassifier) and third (ExtraTreesClassifier) best model and observe the results.
model = train(RandomForestClassifier(), df_train)
Y_pred = test(df_test, model)
submission = df_test.loc[:, df_test.columns.isin(("id",))]
submission = submission.rename(columns={"id": "Id"})
submission.loc[:, "Predicted"] = Y_pred
submission.to_csv("submission.csv", index=None)
submission.head()
|
import polars as pl
most_popular_articles = (
pl.read_csv(
"/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv",
columns=["article_id"],
)
.get_column("article_id")
.value_counts()
.sort("counts", descending=True)
.head(12)
.get_column("article_id")
.to_list()
)
most_popular_articles_per_user = (
pl.read_csv(
"/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv",
columns=["customer_id", "article_id"],
)
.groupby(["customer_id", "article_id"])
.agg(pl.count())
.groupby("customer_id")
.agg(pl.col("article_id").sort_by("count", descending=True).head(12))
)
improved_submission = (
pl.read_csv(
"/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv",
columns=["customer_id"],
)
.join(most_popular_articles_per_user, on="customer_id", how="left")
.with_columns(
[
pl.col("article_id").fill_null([]).alias("personal_top_<=12"),
pl.lit([most_popular_articles]).alias("global_top_12"),
]
)
.with_columns(
pl.col("personal_top_<=12")
.arr.concat(pl.col("global_top_12"))
.arr.head(12)
.alias("prediction")
)
.select(
[
pl.col("customer_id"),
pl.col("prediction")
.arr.eval("0" + pl.element().cast(pl.Utf8))
.arr.join(" "),
]
)
)
improved_submission.write_csv("submission.csv")
|
import numpy as np
import pandas as pd
import os
import time
import requests
import tarfile
# **load the netfilx data**
path = "../input/netflix-shows/netflix_titles.csv"
net_df = pd.read_csv(path)
net_df.head()
# ## The netflix data is good but it will be better if it had ratings from IMDB
# ## so i downloaded IMDB ratings
# ### Downloading the imdb data
urls = [
"https://datasets.imdbws.com/title.ratings.tsv.gz",
"https://datasets.imdbws.com/title.basics.tsv.gz",
]
for url in urls:
r = requests.get(url)
with open(url.split("/")[-1], "wb") as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
# ## Just found out that kaggle doesn't need you to unzip the files you download just load them into the dataframes So unziping is not needed
basics_df = pd.read_csv("title.basics.tsv.gz", sep="\t")
ratings_df = pd.read_csv("title.ratings.tsv.gz", sep="\t")
basics_df.head()
ratings_df.head()
# ### Now that the data is loaded it needs to be checked and cleaned
# check for duplicated in the data
# the time code is just to check how much it takes to run this print statment ignore it
start_time = time.time()
print(ratings_df.duplicated().sum(), basics_df.duplicated().sum())
end_time = time.time()
print(f"it took :{end_time-start_time}")
# ### The data looks clean so It's time to merge the ratings with the titles data
# the basics file is used to link the ratings with the tv shows and movies
rated_titles = pd.merge(
basics_df.set_index("tconst"),
ratings_df.set_index("tconst"),
left_index=True,
right_index=True,
how="inner",
).drop_duplicates()
rated_titles.sample(5)
net_df.info()
net_df.sample(5)
rated_titles.info()
# ## Before merging this data with the netflix data it needs some cleaning and change some data types
# * all names need to be in lower case
# * fix years
rated_titles_clean = rated_titles.copy()
net_clean = net_df.copy()
# lower case titles
net_clean["title"] = net_clean["title"].str.lower()
rated_titles_clean["primaryTitle"] = rated_titles_clean["primaryTitle"].str.lower()
rated_titles_clean["originalTitle"] = rated_titles_clean["originalTitle"].str.lower()
# datetime dates
net_clean["release_year"] = pd.to_datetime(net_clean["release_year"])
rated_titles_clean["startYear"] = rated_titles_clean["startYear"].astype(int)
rated_titles_clean["endYear"] = rated_titles_clean["endYear"].astype(int)
# rated_titles_clean['startYear'] = pd.to_datetime(rated_titles_clean['startYear'])
# rated_titles_clean['endYear'] = pd.to_datetime(rated_titles_clean['endYear'])
|
from IPython.display import Image
# Первый ноутбук: https://www.kaggle.com/kasevgen/transfer-style
# Все тройки фотографии получены из этого ноутбука.
# ## Функция потерь для нейронной передачи стиля складывается из 3-ёх функций
# Loss = w1 * distance(content(generate_image) - content(base_image)) +
# w2 * distance(style(generate_image) - style(style_image)) +
# w3 * total_variation_loss(generate_image),
# где distance() - это l2 норма,
# style(image) - матрица Грамма для feature map от VGG19, поданного на вход image,
# content(image) - feature map, полученного от VGG19 для изображения image
# Коэффициент w1 - штраф за потерю содержания сгенерированного и исходного изображений.
# w2 - штраф за потерю стиля сгенерированного и стилевого изображений.
# w3 - штраф за потерю вариации (мозаичный эффект).
# ## Исходная тройка изображений
Image(filename="../input/transfer-style-examples/1 (31).png", width=4000, height=3000)
# ## Варьирование 3-его параметра
Image(filename="../input/transfer-style-examples/1 (41).png", width=4000, height=3000)
Image(filename="../input/transfer-style-examples/1 (42).png", width=4000, height=3000)
Image(filename="../input/transfer-style-examples/1 (34).png", width=4000, height=3000)
# Увеличение 3-его параметра, отвечающего за потерю вариации, приводит к созданию блюра.
# В тоже время, если смотреть от последней фотографии к первой - уменьшение параметра приводит к большей "морщинистости" полученного изображения.
# ## Варьирование 2-го параметра
Image(filename="../input/transfer-style-examples/1 (35).png", width=4000, height=3000)
Image(filename="../input/transfer-style-examples/1 (37).png", width=4000, height=3000)
# Преобладание стиля на фотографии зависит от абсолютного значения стилевого параметра.
# Если присмотреться к цветовой гамме фотографий, видно, что вторая фотография находится в более тёплых тонах, чем первая.
# Это можно объяснить тем, что картина Ван Гога (стилевая) создана в холодных тонах, а исходная – в тёплых.
# Следовательно, когда на исходную фотографию применяется «фильтр» с сильным весом для стиля – фотография из тёплых тонов превращается в более холодные.
# Следовательно, стилевая компонента, также решает в каких тонах будет представлена результирующая фотография.
# На втором изображении, штраф за потерю стиля минимален, ожидалось, что изображение будет напоминать исходное (без применения минимизации). Это следует из того, что на первой итерации сгенерированное изображение = первому.
# ## Варьирование 1-го параметра
Image(filename="../input/transfer-style-examples/1 (38).png", width=4000, height=3000)
Image(filename="../input/transfer-style-examples/1 (40).png", width=4000, height=3000)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
import librosa as lb
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Handeling Class Imbalance
diagnosis = pd.read_csv(
"/kaggle/input/respiratory-sound-database/respiratory_sound_database/Respiratory_Sound_Database/patient_diagnosis.csv",
names=["pid", "disease"],
)
diagnosis.head()
# * In the following plot we can see that classes are **imbalanced** so we must split them into train and validation set via stratify
sns.countplot(diagnosis.disease)
plt.xticks(rotation=90)
# > We will try to extract **Id** of each processed audio file and then merge them with their respective **class label** so we can split files in to train and validation folder in **stratified manner**
import os
def extractId(filename):
return filename.split("_")[0]
path = "/kaggle/input/preprocessing-part-1/processed_audio_files/"
length = len(os.listdir(path))
index = range(length)
i = 0
files_df = pd.DataFrame(index=index, columns=["pid", "filename"])
for f in os.listdir(path):
files_df.iloc[i]["pid"] = extractId(f)
files_df.iloc[i]["filename"] = f
i += 1
files_df.head()
files_df.pid = files_df.pid.astype(
"int64"
) # both pid's must be of same dtype for them to merge
data = pd.merge(files_df, diagnosis, on="pid")
data.head()
sns.countplot(data.disease)
plt.xticks(rotation=90)
# > We can see that classes are very **skewed**
from sklearn.model_selection import train_test_split
Xtrain, Xval, ytrain, yval = train_test_split(
data, data.disease, stratify=data.disease, random_state=42, test_size=0.25
)
# * Above i used the **stratify** arg of **train_test_split** and set it to disease to stratify data based on **class labels**
Xtrain.disease.value_counts() / Xtrain.shape[0]
Xval.disease.value_counts() / Xval.shape[0]
# > % of class labels in same in both train and val as we can see above
# * We did this because this will help our model to **learn and validate classes** , it will not be like we are training only on COPD disease and there is no COPD in our validation
# # Visualizing MFCCS
# > I have used one file as an example here.
path = "../input/preprocessing-part-1/processed_audio_files/"
import librosa.display
file = path + Xtrain.iloc[193].filename
sound, sample_rate = lb.load(file)
mfccs = lb.feature.mfcc(y=sound, sr=sample_rate, n_mfcc=40)
fig, ax = plt.subplots()
img = librosa.display.specshow(mfccs, x_axis="time", ax=ax)
fig.colorbar(img, ax=ax)
ax.set(title="MFCC")
# * Here I am going to output **train & test datasets** so that i can use the **filenames** in then in another notebook where i will try other **feature extraction** methods inclusive of MFCC's and adopt the one or many based on **performance**.
Xtrain.to_csv("train.csv")
Xval.to_csv("val.csv")
import shutil
shutil.make_archive(
"processed_audio_files", "zip", "/kaggle/input/preprocessing-part-1"
)
from IPython.display import FileLink
FileLink(r"/kaggle/working/processed_audio_files.zip")
|
# **Handling null values**
import pandas as pd
d = {
"Students": ["Bala", "Raja", "Aalini", "Nantha", None],
"Marks": [45, 30, 25, 20, None],
"Age": [12, None, 10, 30, None],
}
d
a = pd.DataFrame(d)
a
a.dropna()
a.dropna(how="all")
a.fillna(20)
a.dropna(how="all", inplace=True)
a
a.fillna(20)
|
# # Zomato dataset
# # Importing Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# # Reading CSV
data = pd.read_csv("/kaggle/input/zomato-bangalore-restaurants/zomato.csv")
# ## 1.head()
# ### Return the first n rows
data.head()
# ## 2.shape
# ### Return a tuple representing the dimensionality of the DataFrame.
data.shape
# ## 3.columns
# ### The column labels of the DataFrame.
data.columns
|
# # Table of Content
# 1. Introduction
# 2. Installing Libraries
# 3. Importing Data
# Missing Value Analysis
# Exploratory Data Analysis
# Feature Engineering
# Modeling
# Hyperparameter Tuning
# Prediction
# # Introduction
# Diabetes is a chronic medical condition that affects millions of people worldwide. It is a complex disease that is characterized by high levels of glucose in the blood due to the body's inability to produce or use insulin effectively. The management of diabetes is a major healthcare challenge, with patients requiring careful monitoring and treatment to prevent complications such as heart disease, nerve damage, and kidney failure. As data scientists, we can use the power of machine learning to analyze large amounts of data related to diabetes and develop predictive models to improve patient outcomes. Feature engineering plays a critical role in this process by selecting and transforming relevant variables in the dataset to enhance the performance of our models. In this project, we will explore different feature engineering techniques and evaluate their impact on the accuracy of our diabetes prediction model.
# # Features
# Pregnancies : Number of times pregnant
# Glucose : Plasma glucose concerntration over 2 hours in an oral glucose tolerance test
# BloodPressure : Diastolic blood pressure (mmHg)
# SkinThickness : Triceps skin fold thickness (mm)
# Insulin : 2-Hour serum insulin (mcU/ml)
# BMI : Body Mass Index (wieght in kg/(height in m)^2)
# DiabetesPedigreeFunction : a function which scores likelihood of diabetes based on family history
# Age : age in years
# Outcome : Class variable (0 = non-diabetic, 1 = diabetic)
# # Explanatory Data Analysis
# Firstly, import the packages:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
f1_score,
roc_auc_score,
)
from sklearn.model_selection import train_test_split
# from sklearn.model_selection import GridSearchCV, cross_validate
from sklearn.preprocessing import LabelEncoder
# from sklearn.preprocessing import StandardScaler
pd.set_option("display.max_columns", None)
pd.set_option("display.width", 170)
df = pd.read_csv("/kaggle/input/diabetes-dataset/diabetes.csv")
def check_df(dataframe, head=5):
print("##################### Shape #####################")
print(dataframe.shape)
print("##################### Types #####################")
print(dataframe.dtypes)
print("##################### Head #####################")
print(dataframe.head(head))
print("##################### Tail #####################")
print(dataframe.tail(head))
print("##################### Describe #####################")
print(dataframe.describe())
print("##################### NA #####################")
print(dataframe.isnull().sum())
print("##################### Quantiles #####################")
print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T)
check_df(df)
def grab_col_names(dataframe, cat_th=10, car_th=20):
# cat_cols, cat_but_car
cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"]
num_but_cat = [
col
for col in dataframe.columns
if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"
]
cat_but_car = [
col
for col in dataframe.columns
if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"
]
cat_cols = cat_cols + num_but_cat
cat_cols = [col for col in cat_cols if col not in cat_but_car]
# num_cols
num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"]
num_cols = [col for col in num_cols if col not in num_but_cat]
print(f"Observations: {dataframe.shape[0]}")
print(f"Variables: {dataframe.shape[1]}")
print(f"cat_cols: {len(cat_cols)}")
print(f"num_cols: {len(num_cols)}")
print(f"cat_but_car: {len(cat_but_car)}")
print(f"num_but_cat: {len(num_but_cat)}")
return cat_cols, num_cols, cat_but_car
cat_cols, num_cols, cat_but_car = grab_col_names(df)
def cat_summary(dataframe, col_name, plot=False):
print(
pd.DataFrame(
{
col_name: dataframe[col_name].value_counts(),
"Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe),
}
)
)
print("##########################################")
if plot:
sns.countplot(x=dataframe[col_name], data=dataframe)
plt.show()
for col in cat_cols:
cat_summary(df, col)
def num_summary(dataframe, numerical_col, plot=False):
quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99]
print(dataframe[numerical_col].describe(quantiles).T)
if plot:
dataframe[numerical_col].hist(bins=20)
plt.xlabel(numerical_col)
plt.title(numerical_col)
plt.show()
for col in num_cols:
num_summary(df, col, plot=True)
def target_summary_with_num(dataframe, target, numerical_col):
print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n")
for col in num_cols:
target_summary_with_num(df, "Outcome", col)
def target_summary_with_cat(dataframe, target, categorical_col):
print(categorical_col)
print(
pd.DataFrame(
{
"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean(),
"Count": dataframe[categorical_col].value_counts(),
"Ratio": 100
* dataframe[categorical_col].value_counts()
/ len(dataframe),
}
),
end="\n\n\n",
)
for col in cat_cols:
target_summary_with_cat(df, "Outcome", col)
def missing_values_table(dataframe, na_name=False):
na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0]
n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False)
ratio = (
dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100
).sort_values(ascending=False)
missing_df = pd.concat(
[n_miss, np.round(ratio, 2)], axis=1, keys=["n_miss", "ratio"]
)
print(missing_df, end="\n")
if na_name:
return na_columns
na_columns = missing_values_table(df, na_name=True)
f, ax = plt.subplots(figsize=[18, 13])
sns.heatmap(df[num_cols].corr(), annot=True, fmt=".2f", ax=ax, cmap="magma")
ax.set_title("Correlation Matrix", fontsize=20)
plt.show()
df.corrwith(df["Outcome"]).sort_values(ascending=False)
# # Base Model
y = df["Outcome"]
X = df.drop("Outcome", axis=1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, random_state=17
)
rf_model = RandomForestClassifier(random_state=46).fit(X_train, y_train)
y_pred = rf_model.predict(X_test)
print(f"Accuracy: {round(accuracy_score(y_pred, y_test), 2)}")
print(f"Recall: {round(recall_score(y_pred,y_test),3)}")
print(f"Precision: {round(precision_score(y_pred,y_test), 2)}")
print(f"F1: {round(f1_score(y_pred,y_test), 2)}")
print(f"Auc: {round(roc_auc_score(y_pred,y_test), 2)}")
def plot_importance(model, features, num=len(X), save=False):
feature_imp = pd.DataFrame(
{"Value": model.feature_importances_, "Feature": features.columns}
)
plt.figure(figsize=(10, 10))
sns.set(font_scale=1)
sns.barplot(
x="Value",
y="Feature",
data=feature_imp.sort_values(by="Value", ascending=False)[0:num],
)
plt.title("Features")
plt.tight_layout()
plt.show()
if save:
plt.savefig("importances.png")
plot_importance(rf_model, X)
zero_columns = [
col
for col in df.columns
if (df[col].min() == 0 and col not in ["Pregnancies", "Outcome"])
]
zero_columns
# 0 values in 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI' are not logical, so we will replace them with “Na” and fill them logically. For “Pregnancies” 0 means there isn't any, and for “Outcome” 0 means that person is not diabetic.
for col in zero_columns:
df[col] = np.where(df[col] == 0, np.nan, df[col])
df.isnull().sum()
na_columns = [col for col in df.columns if df[col].isnull().sum() > 0]
na_columns
def missing_values_table(dataframe, na_name=False):
na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0]
n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False)
ratio = (
dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100
).sort_values(ascending=False)
missing_df = pd.concat(
[n_miss, np.round(ratio, 2)], axis=1, keys=["n_miss", "ratio"]
)
print(missing_df, end="\n")
if na_name:
return na_columns
na_columns = missing_values_table(df, na_name=True)
def missing_vs_target(dataframe, target, na_columns):
temp_df = dataframe.copy()
for col in na_columns:
temp_df[col + "_NA_FLAG"] = np.where(temp_df[col].isnull(), 1, 0)
na_flags = temp_df.loc[:, temp_df.columns.str.contains("_NA_")].columns
for col in na_flags:
print(
pd.DataFrame(
{
"TARGET_MEAN": temp_df.groupby(col)[target].mean(),
"Count": temp_df.groupby(col)[target].count(),
}
),
end="\n\n\n",
)
missing_vs_target(df, "Outcome", na_columns)
for col in zero_columns:
df.loc[df[col].isnull(), col] = df[col].median()
df.isnull().sum()
def outlier_thresholds(dataframe, col_name, q1=0.05, q3=0.95):
quartile1 = dataframe[col_name].quantile(q1)
quartile3 = dataframe[col_name].quantile(q3)
interquantile_range = quartile3 - quartile1
up_limit = quartile3 + 1.5 * interquantile_range
low_limit = quartile1 - 1.5 * interquantile_range
return low_limit, up_limit
def check_outlier(dataframe, col_name):
low_limit, up_limit = outlier_thresholds(dataframe, col_name)
if dataframe[
(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)
].any(axis=None):
return True
else:
return False
def replace_with_thresholds(dataframe, variable, q1=0.05, q3=0.95):
low_limit, up_limit = outlier_thresholds(dataframe, variable, q1=0.05, q3=0.95)
dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit
dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit
for col in df.columns:
print(col, check_outlier(df, col))
for col in df.columns:
print(col, check_outlier(df, col))
if check_outlier(df, col):
replace_with_thresholds(df, col)
# # Feature Engineering
df.loc[(df["Age"] >= 21) & (df["Age"] < 50), "NEW_AGE_CAT"] = "mature"
df.loc[(df["Age"] >= 50), "NEW_AGE_CAT"] = "senior"
df["NEW_BMI"] = pd.cut(
x=df["BMI"],
bins=[0, 18.5, 24.9, 29.9, 100],
labels=["Underweight", "Healthy", "Overweight", "Obese"],
)
df["NEW_GLUCOSE"] = pd.cut(
x=df["Glucose"],
bins=[0, 140, 200, 300],
labels=["Normal", "Prediabetes", "Diabetes"],
)
df.loc[
(df["BMI"] < 18.5) & ((df["Age"] >= 21) & (df["Age"] < 50)), "NEW_AGE_BMI_NOM"
] = "underweightmature"
df.loc[(df["BMI"] < 18.5) & (df["Age"] >= 50), "NEW_AGE_BMI_NOM"] = "underweightsenior"
df.loc[
((df["BMI"] >= 18.5) & (df["BMI"] < 25)) & ((df["Age"] >= 21) & (df["Age"] < 50)),
"NEW_AGE_BMI_NOM",
] = "healthymature"
df.loc[
((df["BMI"] >= 18.5) & (df["BMI"] < 25)) & (df["Age"] >= 50), "NEW_AGE_BMI_NOM"
] = "healthysenior"
df.loc[
((df["BMI"] >= 25) & (df["BMI"] < 30)) & ((df["Age"] >= 21) & (df["Age"] < 50)),
"NEW_AGE_BMI_NOM",
] = "overweightmature"
df.loc[
((df["BMI"] >= 25) & (df["BMI"] < 30)) & (df["Age"] >= 50), "NEW_AGE_BMI_NOM"
] = "overweightsenior"
df.loc[
(df["BMI"] > 18.5) & ((df["Age"] >= 21) & (df["Age"] < 50)), "NEW_AGE_BMI_NOM"
] = "obesemature"
df.loc[(df["BMI"] > 18.5) & (df["Age"] >= 50), "NEW_AGE_BMI_NOM"] = "obesesenior"
df.loc[
(df["Glucose"] < 70) & ((df["Age"] >= 21) & (df["Age"] < 50)), "NEW_AGE_GLUCOSE_NOM"
] = "lowmature"
df.loc[(df["Glucose"] < 70) & (df["Age"] >= 50), "NEW_AGE_GLUCOSE_NOM"] = "lowsenior"
df.loc[
((df["Glucose"] >= 70) & (df["Glucose"] < 100))
& ((df["Age"] >= 21) & (df["Age"] < 50)),
"NEW_AGE_GLUCOSE_NOM",
] = "normalmature"
df.loc[
((df["Glucose"] >= 70) & (df["Glucose"] < 100)) & (df["Age"] >= 50),
"NEW_AGE_GLUCOSE_NOM",
] = "normalsenior"
df.loc[
((df["Glucose"] >= 100) & (df["Glucose"] <= 125))
& ((df["Age"] >= 21) & (df["Age"] < 50)),
"NEW_AGE_GLUCOSE_NOM",
] = "hiddenmature"
df.loc[
((df["Glucose"] >= 100) & (df["Glucose"] <= 125)) & (df["Age"] >= 50),
"NEW_AGE_GLUCOSE_NOM",
] = "hiddensenior"
df.loc[
(df["Glucose"] > 125) & ((df["Age"] >= 21) & (df["Age"] < 50)),
"NEW_AGE_GLUCOSE_NOM",
] = "highmature"
df.loc[(df["Glucose"] > 125) & (df["Age"] >= 50), "NEW_AGE_GLUCOSE_NOM"] = "highsenior"
def set_insulin(dataframe, col_name="Insulin"):
if 16 <= dataframe[col_name] <= 166:
return "Normal"
else:
return "Abnormal"
df["NEW_INSULIN_SCORE"] = df.apply(set_insulin, axis=1)
df["NEW_GLUCOSE*INSULIN_405"] = df["Glucose"] * df["Insulin"] / 405
df["NEW_GLUCOSE*PREGNANCIES"] = df["Glucose"] * df["Pregnancies"]
df.columns = [col.upper() for col in df.columns]
# # Encoding
cat_cols, num_cols, cat_but_car = grab_col_names(df)
def label_encoder(dataframe, binary_col):
labelencoder = LabelEncoder()
dataframe[binary_col] = labelencoder.fit_transform(dataframe[binary_col])
return dataframe
binary_cols = [
col for col in df.columns if df[col].dtypes == "O" and df[col].nunique() == 2
]
binary_cols
for col in binary_cols:
df = label_encoder(df, col)
df.head()
cat_cols = [
col for col in cat_cols if col not in binary_cols and col not in ["OUTCOME"]
]
cat_cols
def one_hot_encoder(dataframe, categorical_cols, drop_first=False):
dataframe = pd.get_dummies(
dataframe, columns=categorical_cols, drop_first=drop_first
)
return dataframe
df = one_hot_encoder(df, cat_cols, drop_first=True)
df.head()
# # Standardization
scaler = StandardScaler()
df[num_cols] = scaler.fit_transform(df[num_cols])
df.head()
# # Modelling
y = df["OUTCOME"]
X = df.drop("OUTCOME", axis=1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, random_state=17
)
df.head()
rf_model = RandomForestClassifier(random_state=46).fit(X_train, y_train)
y_pred = rf_model.predict(X_test)
print(f"Accuracy: {round(accuracy_score(y_pred, y_test), 2)}")
print(f"Recall: {round(recall_score(y_pred,y_test),3)}")
print(f"Precision: {round(precision_score(y_pred,y_test), 2)}")
print(f"F1: {round(f1_score(y_pred,y_test), 2)}")
print(f"Auc: {round(roc_auc_score(y_pred,y_test), 2)}")
# # Feature Importance
def plot_importance(model, features, num=len(X), save=False):
feature_imp = pd.DataFrame(
{"Value": model.feature_importances_, "Feature": features.columns}
)
print(feature_imp.sort_values("Value", ascending=False))
plt.figure(figsize=(10, 10))
sns.set(font_scale=1)
sns.barplot(
x="Value",
y="Feature",
data=feature_imp.sort_values(by="Value", ascending=False)[0:num],
)
plt.title("Features")
plt.tight_layout()
plt.show()
if save:
plt.savefig("importances.png")
# Base Model
# Accuracy: 0.77
# Recall: 0.706
# Precision: 0.59
# F1: 0.64
# Auc: 0.75
plot_importance(rf_model, X)
|
# ## 使用NaiveBayes分类器检测垃圾短信 ##
# 数据集: https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection
# 通常而言, 垃圾短信包含一些特殊吸引眼球的单词, 例如‘free’, ‘win’, ’prize‘ 等等, 这些恰好是我们可以用于训练机器学习模型的好特征. 此任务为一个二元分类的任务, 短信的类别为‘Spam’(垃圾短信)或者‘Not Spam’(正常短信)两类.
# # 大纲
# 此项目分为如下步骤:
# - 1.1: 分析短信数据
# - 1.2: 数据预处理
# - 2.1: 词袋(Bag of Words)模型
# - 2.2: 从头开始自己实现词袋(Bag of Words)模型
# - 2.3: 使用现成的scikit-learn中的词袋(Bag of Words)模型
# - 3.1: 准备训练和测试数据集
# - 3.2: 使用词袋模型处理短信数据集
# - 4.1: 理解Bayes理论
# - 4.2: 从头开始自己实现NaiveBayes分类器
# - 5: 使用现成的scikit-learn中的NaiveBayes分类器
# - 6: 结论
# ### 1.1: 分析短信数据 ###
# 数据来源于https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection, 可以通过如下链接下载:(https://archive.ics.uci.edu/ml/machine-learning-databases/00228/).
# **部分数据:**
# 数据包含两列, 第一列是每一条短信的标签‘ham’或者‘spam’. 第二列为短信文本.
#
import pandas as pd
df = pd.read_csv(
"/kaggle/input/sms-data-labelled-spam-and-non-spam/SMSSpamCollection",
sep="\t",
names=["label", "message"],
)
df.head()
# ### 1.2: 数据预处理 ###
# 将类别标签从字符串‘ham', ’spam'转换为数字 0 和 1.
#
df["label"] = df.label.map({"ham": 0, "spam": 1})
print(df.shape)
df.head()
# ### 2.1: 词袋(Bag of Words)模型 ###
# 词袋(Bag of Words)模型: 统计一段文字中出现的每一个单词的次数, 不考虑单词出现的顺序/位置.
# 通过词袋模型, 我们可以将一个包含多个文本的数据集转换为一个矩阵, 矩阵的每一行对应一个文本, 每一列是某一个单词出现在当前这一文本中的次数.例如, 我们有如下四个文本:
# `['Hello, how are you!',
# 'Win money, win from home.',
# 'Call me now',
# 'Hello, Call you tomorrow?']`
# 通过词袋模型, 我们将它转换为如下矩阵:
# 我们可以使用 sklearns 中的方法来实现这个操作
# [count vectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html#sklearn.feature_extraction.text.CountVectorizer) :
# * 首先将一段文字分为多个独立的单词, 使用一个整数代表每一个单词.
# * 计算每一个单词在这段文字中出现的次数.
# **注意:**
# * CountVectorizer 会把所有的单词自动转换为小写的形式, 所以 'He' 和 'he' 会被当作同一个单词.
# * 会去掉所有的标点符号, 例如'hello!'和'hello'会被当作一个单词.
# * 这个方法允许传入参数 `stop_words` , 用于指代需要去掉传入的语言中太常用的词. 例如英文中的'am', 'an', 'and', 'the' 等等. 如果将 `stop_words` 的值为 `english`, CountVectorizer 方法会自动使用scikit-learn自带的英文 `stop_words` 去掉英文中的常用词. 通常这些 `stop_words` 对于我们判断一个文本是否是垃圾短信没有帮助.
# ### 2.2: 从头开始自己实现词袋(Bag of Words)模型 ###
# 在使用 scikit-learn 内建的词袋(Bag of Words)库之前, 我们可以试着自己动手实现词袋模型.
# **1: 将所有的单词专为小写.**
# 假设我们的文档如下:
# ```python
# documents = ['Hello, how are you!',
# 'Win money, win from home.',
# 'Call me now.',
# 'Hello, Call hello you tomorrow?']
# ```
documents = [
"Hello, how are you!",
"Win money, win from home.",
"Call me now.",
"Hello, Call hello you tomorrow?",
]
lower_case_documents = []
for i in documents:
lower_case_documents.append(i.lower())
print(lower_case_documents)
# **2: 去掉标点符号**
sans_punctuation_documents = []
import string
for i in lower_case_documents:
sans_punctuation_documents.append(
i.translate(str.maketrans("", "", string.punctuation))
)
print(sans_punctuation_documents)
# **3: 将句子分割为多个单词**
preprocessed_documents = []
for i in sans_punctuation_documents:
preprocessed_documents.append(i.split())
print(preprocessed_documents)
# **4: 计算单词在每一个句子中出现的次数**
frequency_list = []
import pprint
from collections import Counter
for i in preprocessed_documents:
frequency_list.append(Counter(i))
pprint.pprint(frequency_list)
# 以上就是scikit-learn中 `sklearn.feature_extraction.text.CountVectorizer` 方法的原理. 接下来我们是scikit-learn来完成同样的任务.
# ### 2.3: 使用scikit-learn的词袋模型 ###
documents = [
"Hello, how are you!",
"Win money, win from home.",
"Call me now.",
"Hello, Call hello you tomorrow?",
]
from sklearn.feature_extraction.text import CountVectorizer
count_vector = CountVectorizer()
# **CountVectorizer()中的数据预处理**
# 注意, CountVectorizer() 默认使用如下的数据预处理:
# * `lowercase = True`
#
# 将所有单词专为小写.
# * `token_pattern = (?u)\\b\\w\\w+\\b`
#
# 这个参数的意思是使用如上的正则表达式去掉所以的标点符号, 把标点符号的位置当作单词的分界点. 去掉长度小于2的单词.
# * `stop_words`
# 默认为 `None` 如果设为 `english` 则去掉所有的英文常用词, 考虑到我们的数据是短信, 每一个单词都非常重要, 我们就不去常用词了.
"""
察看 'CountVectorizer() 的默认参数'
"""
print(count_vector)
# 得到出现在数据集中的所有单词
count_vector.fit(documents)
count_vector.get_feature_names()
# 将文本专为矩阵, 每一行对应一条短信文本, 每一列对应一个单词在短信中出现的次数
doc_array = count_vector.transform(documents).toarray()
doc_array
# 我们可以把这个矩阵转为 panda.dataframe, 并且每一列标上单词, 以便察看.
frequency_matrix = pd.DataFrame(doc_array, columns=count_vector.get_feature_names())
frequency_matrix
# 注意我们没有去掉常用词, 例如 'are', 'is', 'the', 'an', 他们可能会影响预测. 为了解决这个问题, 有两种方案:
# 1. 设置 `stop_words` 的值为 `english`, 去除常用词.
# 2. 使用 [tfidf](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html#sklearn.feature_extraction.text.TfidfVectorizer) 而不单单考虑单词出现的频率.
# ### 3.1: 准备训练和测试数据集 ###
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
df["message"], df["label"], random_state=1
)
print("总的短信数量: {}".format(df.shape[0]))
print("训练集的短信数量: {}".format(X_train.shape[0]))
print("测试集的短信数量: {}".format(X_test.shape[0]))
# ### 3.2: 使用词袋模型处理短信数据集 ###
# 注意: 代码分为两步, 第一步使用训练数据集获取所有的单词集合, 同时将训练数据集转为矩阵. 第二步直接使用第一步获取的单词集合, 将测试数据集转为矩阵.
count_vector = CountVectorizer()
training_data = count_vector.fit_transform(X_train)
testing_data = count_vector.transform(X_test)
print(testing_data[:2])
# ### 4.1: 理解Bayes理论 ###
# 我们使用一个简单的例子来理解Bayes理论.
# 假设:
# `P(D)` 为任何一个人罹患糖尿病的概率, 假设其值为 `0.01`.
# `P(Pos)` 为任何一个人做糖尿病检测时, 结果为阳性的的概率.
# `P(Pos|D)` 为任何一个已经罹患糖尿病人做糖尿病检测时, 结果为阳性的的概率, 其值为 `0.9`.
# `P(Pos|~D)` 为任何一个没有罹患糖尿病人做糖尿病检测时, 结果为阳性的的概率, 其值为 `0.1`.
# 求: 一个人检测结果为阳性的情况下, 这个人真正罹患糖尿病的概率P(D|Pos).
# 贝叶斯公式如下:
# 根据Bayes公式:
# `P(D|Pos) = P(Pos|D) * P(D) / P(Pos)`
# 根据全概率公式:
# `P(Pos) = [P(D) * P(Pos|D)] + [P(~D) * P(Pos|~D)]`
# P(D)
p_diabetes = 0.01
# P(~D)
p_no_diabetes = 0.99
# P(Pos|D)
p_pos_diabetes = 0.9
# P(Pos|~D)
p_pos_no_diabetes = 0.1
# P(Pos)
p_pos = (p_diabetes * p_pos_diabetes) + (p_no_diabetes * p_pos_no_diabetes)
print("任何一个人(无论是否罹患糖尿病)检测结果为阳性的概率 P(Pos): {}".format(p_pos))
# P(D|Pos)
p_diabetes_pos = (p_diabetes * p_pos_diabetes) / p_pos
print("一个人检测结果为阳性的情况下, 这个人真正罹患糖尿病的概率:", format(p_diabetes_pos))
# **'Naive Bayes'(朴素贝叶斯)中的'Naive'(朴素)的意义 ?**
# 'Naive'指预测的特征之间是独立的. 例如糖尿病的例子, 假设我们的除了检测结果是否为阳性, 还有另外一个特征是这个人的体重. 'Naive Bayes'在计算的时候假设‘检测结果是否为阳性’和‘体重’是独立的, 相互不影响.
# 假设:
# * 假设正常短信出现单词'good'的概率: 0.7 -----> `P(g|h)`
# * 假设正常短信出现单词'free'的概率: 0.1 -----> `P(f|h)`
# * 假设正常短信出现单词'win'的概率: 0.2 -----> `P(w|h)`
# * 假设垃圾短信出现单词'good'的概率: 0.3 -----> `P(g|s)`
# * 假设垃圾短信出现单词'free'的概率: 0.9 -----> `P(f|s)`
# * 假设垃圾短信出现单词'win'的概率: 0.8 -----> `P(w|s)`
# 假设正常短信和垃圾短信出现的概率分别为 `P(h) = 0.8`, `P(s) = 0.2`
# Naive Bayes 公式为: $P\left(y \mid x_{1}, \ldots, x_{n}\right)=\frac{P(y) P\left(x_{1}, \ldots x_{n} \mid y\right)}{P\left(x_{1}, \ldots, x_{n}\right)}$
# 求:
# 一条短信出现了’free‘和’win', 这条短信为垃圾短信的概率?
# 我们需要如下计算:
# * `P(s|f,w)`: 出现‘free’和‘win'的时候, 短信为垃圾短信的概率.
# * `P(s|f,w)` = `(P(s) * P(f|s) * P(w|s)) / P(f,w)`: 其中 `P(f,w)` 为一条短信中同时包含‘free'和’win'的概率.
#
# * `P(f,w) = P(h) * P(f, w | h) + P(s) * P(f, w | s)`
# 使用‘Naive’的方式, 认为 f, w 这两个特征是独立的
# * `P(f,w) = P(h) * P(f | h) * P(w | h) + P(s) * P(f | s) * P(w | s)`
# P(s)
p_s = 0.2
# P(f/s)
p_f_s = 0.9
# P(w/s)
p_w_s = 0.8
# P(h)
p_h = 0.8
# P(f/h)
p_f_h = 0.1
# P(w|h)
p_w_h = 0.2
# `P(f,w) = P(h) * P(f, w | h) + P(s) * P(f, w | s)`
p_fw = p_h * p_f_h * p_w_h + p_s * p_f_s * p_w_s
print("短信同时出现free和win的概率:{0}".format(p_fw))
# `P(s|f,w)` = `(P(s) * P(f|s) * P(w|s)) / P(f,w)`
p_s_fw = p_s * p_f_s * p_w_s / p_fw
print("短信同时出现free和win时为垃圾短信的概率:{0}".format(p_s_fw))
# ### 5: 使用现成的scikit-learn中的NaiveBayes分类器 ###
# `sklearn.naive_bayes` 包含现成的方式实现NaiveBayes分类器, 其中分为两种情况, 一种是MultinomialNB, 用于特征为离散值的情况, 另一种为GuassianNB, 用于特征值为连续值的情况.
# 训练
from sklearn.naive_bayes import MultinomialNB
naive_bayes = MultinomialNB()
naive_bayes.fit(training_data, y_train)
# 预测
predictions = naive_bayes.predict(testing_data)
# #### 评价模型性能 ####
# **Accuracy**: 分类器预测的结果为正确类别的数量占测试数据集中所有短信数量的百分比.
# **Precision**: 分类器预测为垃圾短信的短信中, 其真实情况为垃圾短信的百分比.
# 为 true positives (真实垃圾短信, 被预测为垃圾短信) 除以所有被预测为垃圾短信的数量(无论是否为垃圾短信, 预测为垃圾短信的数量)
# `[True Positives/(True Positives + False Positives)]`
# **Recall(sensitivity)**: 测试数据集中所有真实垃圾短信被预测为垃圾短信的百分比.
# 为 true positives (真实垃圾短信, 被预测为垃圾短信) 除以所有垃圾短信的数量
# `[True Positives/(True Positives + False Negatives)]`
# **F1-Score**: 综合考虑 Precision 和 Recall 的情况, 取值范围为 0 到 1 之间, 越大越好.
# `2 * Precision * Recall / (Precision + Recall) `
# 在类别不平衡的情况下, 例如100条短信只有 1 条为垃圾短信的情况下, **Accuracy**并不是好的评价指标, 因为只要盲目的预测任意短信为正常短信, **Accuracy**就为99%. 这时使用F1-Score是更好的选择.
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
print("Accuracy score: ", format(accuracy_score(y_test, predictions)))
print("Precision score: ", format(precision_score(y_test, predictions)))
print("Recall score: ", format(recall_score(y_test, predictions)))
print("F1 score: ", format(f1_score(y_test, predictions)))
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Import libraries
import tensorflow as tf
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
import seaborn as sns
import sklearn.metrics as sk_metrics
import tempfile
import os
# Preset matplotlib figure sizes.
matplotlib.rcParams["figure.figsize"] = [9, 6]
print(tf.__version__)
# To make the results reproducible, set the random seed value.
tf.random.set_seed(22)
# Load the data
dataset = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
Test_dataset = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
# Overview of the dataset
dataset.info()
dataset.head()
# It is a binary classification problem. There are two possible outputs: (0, 1), "Stones in the kidney or no stones". We will use logistic regression which will, essentialy, give us a probability of having kidney stones.
# Split the dataset into training and validation and randomize
train_dataset = dataset.sample(frac=0.75, random_state=1)
len(train_dataset) # Length of the training dataset
# We can drop the id column as each row is unique
vl_dataset = dataset.drop(train_dataset.index)
train_dataset = train_dataset.drop(columns="id")
len(vl_dataset) # length of validation dataset
vl_dataset = vl_dataset.drop(columns="id")
# Separate labels and features which will be fitted into the model
x_train, y_train = train_dataset.iloc[:, :-1], train_dataset["target"]
x_val, y_val = vl_dataset.iloc[:, :-1], vl_dataset["target"]
x_test = Test_dataset.drop(columns="id")
# **Data preprocessing**
# Convert the dataset into a tensor
x_train, y_train = tf.convert_to_tensor(
x_train, dtype=tf.float32
), tf.convert_to_tensor(y_train, dtype=tf.float32)
x_val, y_val = tf.convert_to_tensor(x_val, dtype=tf.float32), tf.convert_to_tensor(
y_val, dtype=tf.float32
)
# **Data visualization**
sns.pairplot(train_dataset, hue="target", diag_kind="kde")
# Skewness can be observed, removing outliers would be ideal
sns.heatmap(dataset.corr())
train_dataset.describe().transpose()
# Given the inconsistency of ranges it is a good idea to normalize the data to help predictions
class Normalize(tf.Module):
def __init__(self, x):
# Initialize the mean and standard deviation for normalization
self.mean = tf.Variable(tf.math.reduce_mean(x, axis=0))
self.std = tf.Variable(tf.math.reduce_std(x, axis=0))
def norm(self, x):
# Normalize the input
return (x - self.mean) / self.std
def unnorm(self, x):
# Unnormalize the input
return (x * self.std) + self.mean
norm_x = Normalize(x_train)
x_train_norm, x_val_norm = norm_x.norm(x_train), norm_x.norm(x_val)
# **Define loss function, log loss or binary cross-entropy loss, is the ideal loss function for a binary classification problem with logistic regression. For each example, the log loss quantifies the similarity between a predicted probability and the example's true value. **
def log_loss(y_pred, y):
# Compute the log loss function
ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=y_pred)
return tf.reduce_mean(ce)
# Building the model
class LogisticRegression(tf.Module):
def __init__(self):
self.built = False
def __call__(self, x, train=True):
# Initialize the model parameters on the first call
if not self.built:
# Randomly generate the weights and the bias term
rand_w = tf.random.uniform(shape=[x.shape[-1], 1], seed=22)
rand_b = tf.random.uniform(shape=[], seed=22)
self.w = tf.Variable(rand_w)
self.b = tf.Variable(rand_b)
self.built = True
# Compute the model output
z = tf.add(tf.matmul(x, self.w), self.b)
z = tf.squeeze(z, axis=1)
if train:
return z
return tf.sigmoid(z)
log_reg = LogisticRegression()
y_pred = log_reg(x_train[:5], train=False)
y_pred.numpy()
# Accuracy function to calculate the proportions of correct classification during training
def predict_class(y_pred, thresh=0.4):
# Return a tensor with `1` if `y_pred` > `0.4`, and `0` otherwise
return tf.cast(y_pred > thresh, tf.float32)
def accuracy(y_pred, y):
# Return the proportion of matches between `y_pred` and `y`
y_pred = tf.math.sigmoid(y_pred)
y_pred_class = predict_class(y_pred)
check_equal = tf.cast(y_pred_class == y, tf.float32)
acc_val = tf.reduce_mean(check_equal)
return acc_val
# Training the model
batch_size = 528
train_dataset = tf.data.Dataset.from_tensor_slices((x_train_norm, y_train))
train_dataset = train_dataset.shuffle(buffer_size=x_train.shape[0]).batch(batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices((x_val_norm, y_val))
test_dataset = test_dataset.shuffle(buffer_size=x_val.shape[0]).batch(batch_size)
# Set training parameters
epochs = 2000
learning_rate = 0.1
train_losses, test_losses = [], []
train_accs, test_accs = [], []
# Set up the training loop and begin training
for epoch in range(epochs):
batch_losses_train, batch_accs_train = [], []
batch_losses_test, batch_accs_test = [], []
# Iterate over the training data
for x_batch, y_batch in train_dataset:
with tf.GradientTape() as tape:
y_pred_batch = log_reg(x_batch)
batch_loss = log_loss(y_pred_batch, y_batch)
batch_acc = accuracy(y_pred_batch, y_batch)
# Update the parameters with respect to the gradient calculations
grads = tape.gradient(batch_loss, log_reg.variables)
for g, v in zip(grads, log_reg.variables):
v.assign_sub(learning_rate * g)
# Keep track of batch-level training performance
batch_losses_train.append(batch_loss)
batch_accs_train.append(batch_acc)
# Iterate over the testing data
for x_batch, y_batch in test_dataset:
y_pred_batch = log_reg(x_batch)
batch_loss = log_loss(y_pred_batch, y_batch)
batch_acc = accuracy(y_pred_batch, y_batch)
# Keep track of batch-level testing performance
batch_losses_test.append(batch_loss)
batch_accs_test.append(batch_acc)
# Keep track of epoch-level model performance
train_loss, train_acc = tf.reduce_mean(batch_losses_train), tf.reduce_mean(
batch_accs_train
)
test_loss, test_acc = tf.reduce_mean(batch_losses_test), tf.reduce_mean(
batch_accs_test
)
train_losses.append(train_loss)
train_accs.append(train_acc)
test_losses.append(test_loss)
test_accs.append(test_acc)
if epoch % 20 == 0:
print(f"Epoch: {epoch}, Training log loss: {train_loss:.3f}")
# **Model performance evaluation**
plt.plot(range(epochs), train_losses, label="Training loss")
plt.plot(range(epochs), test_losses, label="Testing loss")
plt.xlabel("Epoch")
plt.ylabel("Log loss")
plt.legend()
plt.title("Log loss vs training iterations")
# Accuracy
plt.plot(range(epochs), train_accs, label="Training accuracy")
plt.plot(range(epochs), test_accs, label="Testing accuracy")
plt.xlabel("Epoch")
plt.ylabel("Accuracy (%)")
plt.legend()
plt.title("Accuracy vs training iterations")
print(f"Final training log loss: {train_losses[-1]:.3f}")
print(f"Final validation log Loss: {test_losses[-1]:.3f}")
print(f"Final training accuracy: {train_accs[-1]:.3f}")
print(f"Final validation accuracy: {test_accs[-1]:.3f}")
# The model displays a relatively high accuracy and low loss when classifying patients which might have kidney stones
def show_confusion_matrix(y, y_classes, typ):
# Compute the confusion matrix and normalize it
plt.figure(figsize=(10, 10))
confusion = sk_metrics.confusion_matrix(y.numpy(), y_classes.numpy())
confusion_normalized = confusion / confusion.sum(axis=1)
axis_labels = range(2)
ax = sns.heatmap(
confusion_normalized,
xticklabels=axis_labels,
yticklabels=axis_labels,
cmap="Blues",
annot=True,
fmt=".4f",
square=True,
)
plt.title(f"Confusion matrix: {typ}")
plt.ylabel("True label")
plt.xlabel("Predicted label")
y_pred_train, y_pred_val = log_reg(x_train_norm, train=False), log_reg(
x_val_norm, train=False
)
y_pred_test = log_reg_loaded(x_test)
train_classes, val_classes = predict_class(y_pred_train), predict_class(y_pred_val)
train_classes, test_classes = predict_class(y_pred_train), predict_class(y_pred_test)
show_confusion_matrix(y_train, train_classes, "Training")
show_confusion_matrix(y_val, val_classes, "Validation")
# 0 ("Absence of stones") is the negative label. 1, ("Presence of stones") is the positive label.
# My model correctly predicted that the patience had kidney stones 86% of the time, it also predicted correctly 70% of the times that it did not have kidney stones.
# However the model incorrectly diagnosed 36% of the healthy patients as having kidney stones, this model would probably bankrupt the hospital due to litigations.
# The good side is that in models predicting medical conditions it is usually worth it to sacrifice some "Recall" in oreder to more accurately identify the sick people.
# Save and export the model
class ExportModule(tf.Module):
def __init__(self, model, norm_x, class_pred):
# Initialize pre- and post-processing functions
self.model = model
self.norm_x = norm_x
self.class_pred = class_pred
@tf.function(input_signature=[tf.TensorSpec(shape=[None, None], dtype=tf.float32)])
def __call__(self, x):
# Run the `ExportModule` for new data points
x = self.norm_x.norm(x)
y = self.model(x, train=False)
y = self.class_pred(y)
return y
log_reg_export = ExportModule(model=log_reg, norm_x=norm_x, class_pred=predict_class)
models = tempfile.mkdtemp()
save_path = os.path.join(models, "log_reg_export")
tf.saved_model.save(log_reg_export, save_path)
# Make predictions on test set
log_reg_loaded = tf.saved_model.load(save_path)
test_preds = log_reg_loaded(x_test)
test_preds[:10].numpy()
# Assign them to the dataframe
Test_dataset = Test_dataset.assign(target=test_preds)
Test_dataset[["id", "target"]].to_csv("Logistic_regression_tf.csv", index=False)
|
# importing libraries
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import warnings
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import BatchNormalization
from keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.models import Sequential
from keras.layers.core import Dense
from sklearn.model_selection import train_test_split
warnings.filterwarnings("ignore")
train_dir = r"/kaggle/input/leaf-disease-dataset/data_penyakit/dataset/train"
test_dir = r"/kaggle/input/leaf-disease-dataset/data_penyakit/dataset/test"
for d in [train_dir, test_dir]:
filepaths = []
labels = []
classlist = sorted(os.listdir(d))
for klass in classlist:
label = klass.split("__")[1]
classpath = os.path.join(d, klass)
flist = sorted(os.listdir(classpath))
for f in flist:
fpath = os.path.join(classpath, f)
filepaths.append(fpath)
labels.append(label)
Fseries = pd.Series(filepaths, name="filepaths")
Lseries = pd.Series(labels, name="labels")
if d == train_dir:
df = pd.concat([Fseries, Lseries], axis=1)
else:
test_df = pd.concat([Fseries, Lseries], axis=1)
train_df, test_df = train_test_split(
df, train_size=0.9, shuffle=True, random_state=123, stratify=df["labels"]
)
print(
"train_df lenght: ",
len(train_df),
" test_df length: ",
len(test_df),
" test_df length: ",
len(test_df),
)
# get the number of classes and the images count for each class in train_df
classes = sorted(list(train_df["labels"].unique()))
class_count = len(classes)
print("The number of classes in the dataset is: ", class_count)
groups = train_df.groupby("labels")
print("{0:^30s} {1:^13s}".format("CLASS", "IMAGE COUNT"))
countlist = []
classlist = []
for label in sorted(list(train_df["labels"].unique())):
group = groups.get_group(label)
countlist.append(len(group))
classlist.append(label)
print("{0:^30s} {1:^13s}".format(label, str(len(group))))
# get the classes with the minimum and maximum number of train images
max_value = np.max(countlist)
max_index = countlist.index(max_value)
max_class = classlist[max_index]
min_value = np.min(countlist)
min_index = countlist.index(min_value)
min_class = classlist[min_index]
print(
max_class,
" has the most images= ",
max_value,
" ",
min_class,
" has the least images= ",
min_value,
)
# lets get the average height and width of a sample of the train images
ht = 0
wt = 0
# select 100 random samples of train_df
train_df_sample = train_df.sample(n=100, random_state=123, axis=0)
for i in range(len(train_df_sample)):
fpath = train_df_sample["filepaths"].iloc[i]
img = plt.imread(fpath)
shape = img.shape
ht += shape[0]
wt += shape[1]
print(
"average height= ",
ht // 100,
" average width= ",
wt // 100,
"aspect ratio= ",
ht / wt,
)
train_gen = ImageDataGenerator(
rescale=None, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
test_gen = ImageDataGenerator(
rescale=None, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
training_set = train_gen.flow_from_directory(
train_dir, target_size=(128, 128), batch_size=32, class_mode="categorical"
)
test_set = test_gen.flow_from_directory(
test_dir, target_size=(128, 128), batch_size=32, class_mode="categorical"
)
# basic cnn layers
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=(128, 128, 3)))
model.add(
MaxPooling2D(
pool_size=(
2,
2,
)
)
)
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3, 3), activation="relu"))
model.add(
MaxPooling2D(
pool_size=(
2,
2,
)
)
)
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3, 3), activation="relu"))
model.add(
MaxPooling2D(
pool_size=(
2,
2,
)
)
)
model.add(BatchNormalization())
model.add(Conv2D(96, kernel_size=(3, 3), activation="relu"))
model.add(
MaxPooling2D(
pool_size=(
2,
2,
)
)
)
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size=(3, 3), activation="relu"))
model.add(
MaxPooling2D(
pool_size=(
2,
2,
)
)
)
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(33, activation="softmax"))
# compiling our model
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
model.summary()
import csv
# Get the configuration of the model
config_dict = {
"optimizer": model.optimizer.__class__.__name__,
"loss": model.loss,
"metrics": model.metrics_names,
}
# Write the configuration to a CSV file
with open("model_config.csv", "w", newline="") as csvfile:
csvwriter = csv.writer(csvfile)
for key, value in config_dict.items():
csvwriter.writerow([key, value])
# # e10
labels = training_set.class_indices
labels2 = test_set.class_indices
# fitting data into our model
fitted_model_10 = model.fit(
training_set,
steps_per_epoch=375,
epochs=10,
validation_data=test_set,
validation_steps=125,
)
# extract features from trained CNN model
cnn_features_train = model.predict(training_set)
cnn_features_test = model.predict(test_set)
from sklearn.ensemble import RandomForestClassifier
# build RF model
rf_model = RandomForestClassifier(n_estimators=250, max_depth=12, min_samples_leaf=16)
rf_model.fit(cnn_features_train, training_set.classes)
# make predictions on test set
rf_preds = rf_model.predict(cnn_features_test)
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# evaluate model performance
accuracy = accuracy_score(test_set.classes, rf_preds)
# precision = precision_score(test_set.classes, rf_preds)
# recall = recall_score(test_set.classes, rf_preds)
# f1 = f1_score(test_set.classes, rf_preds)
print(f"Accuracy: {accuracy}")
# print(f"Precision: {precision}")
# print(f"Recall: {recall}")
# print(f"F1 score: {f1}")
# # e20
labels = training_set.class_indices
labels2 = test_set.class_indices
# fitting data into our model
fitted_model_10 = model.fit(
training_set,
steps_per_epoch=375,
epochs=20,
validation_data=test_set,
validation_steps=125,
)
# extract features from trained CNN model
cnn_features_train = model.predict(training_set)
cnn_features_test = model.predict(test_set)
# build RF model
rf_model = RandomForestClassifier(n_estimators=250, max_depth=12, min_samples_leaf=16)
rf_model.fit(cnn_features_train, training_set.classes)
# make predictions on test set
rf_preds = rf_model.predict(cnn_features_test)
# build RF model
rf_model = RandomForestClassifier(n_estimators=250, max_depth=12, min_samples_leaf=16)
rf_model.fit(cnn_features_train, training_set.classes)
# evaluate model performance
accuracy = accuracy_score(test_set.classes, rf_preds)
# precision = precision_score(test_set.classes, rf_preds)
# recall = recall_score(test_set.classes, rf_preds)
# f1 = f1_score(test_set.classes, rf_preds)
print(f"Accuracy: {accuracy}")
# print(f"Precision: {precision}")
# print(f"Recall: {recall}")
# print(f"F1 score: {f1}")
# # e30
labels = training_set.class_indices
labels2 = test_set.class_indices
# fitting data into our model
fitted_model_10 = model.fit(
training_set,
steps_per_epoch=375,
epochs=30,
validation_data=test_set,
validation_steps=125,
)
# extract features from trained CNN model
cnn_features_train = model.predict(training_set)
cnn_features_test = model.predict(test_set)
# build RF model
rf_model = RandomForestClassifier(n_estimators=250, max_depth=12, min_samples_leaf=16)
rf_model.fit(cnn_features_train, training_set.classes)
# make predictions on test set
rf_preds = rf_model.predict(cnn_features_test)
# build RF model
rf_model = RandomForestClassifier(n_estimators=250, max_depth=12, min_samples_leaf=16)
rf_model.fit(cnn_features_train, training_set.classes)
# evaluate model performance
accuracy = accuracy_score(test_set.classes, rf_preds)
# precision = precision_score(test_set.classes, rf_preds)
# recall = recall_score(test_set.classes, rf_preds)
# f1 = f1_score(test_set.classes, rf_preds)
print(f"Accuracy: {accuracy}")
# print(f"Precision: {precision}")
# print(f"Recall: {recall}")
# print(f"F1 score: {f1}")
# # e40
labels = training_set.class_indices
labels2 = test_set.class_indices
# fitting data into our model
fitted_model_10 = model.fit(
training_set,
steps_per_epoch=375,
epochs=30,
validation_data=test_set,
validation_steps=125,
)
# extract features from trained CNN model
cnn_features_train = model.predict(training_set)
cnn_features_test = model.predict(test_set)
# build RF model
rf_model = RandomForestClassifier(n_estimators=250, max_depth=12, min_samples_leaf=16)
rf_model.fit(cnn_features_train, training_set.classes)
# make predictions on test set
rf_preds = rf_model.predict(cnn_features_test)
# build RF model
rf_model = RandomForestClassifier(n_estimators=250, max_depth=12, min_samples_leaf=16)
rf_model.fit(cnn_features_train, training_set.classes)
# evaluate model performance
accuracy = accuracy_score(test_set.classes, rf_preds)
# precision = precision_score(test_set.classes, rf_preds)
# recall = recall_score(test_set.classes, rf_preds)
# f1 = f1_score(test_set.classes, rf_preds)
print(f"Accuracy: {accuracy}")
# print(f"Precision: {precision}")
# print(f"Recall: {recall}")
# print(f"F1 score: {f1}")
|
# # Approachs
# 1. NLTK
# 2. Spacy
# # **NLTK**
# ## 1. Word Tokenization
# ## 2. POS Tagging
# ## 3. ne_chunk
import pandas as pd
import nltk
text = """ Apple is aiming to buy a India's startup Xolo INC for $6 million USD,
which increased the stock rate of AAPL by 15% in United States of America"""
# # Word Tokenization
words = nltk.word_tokenize(text)
words
# # POS Tagging
pos_tags = nltk.pos_tag(words)
pos_tags
# refrerring for tags
nltk.help.upenn_tagset("VBZ")
# # ne_chunk
chunks = nltk.ne_chunk(pos_tags)
for i in chunks:
print(i)
chunks_ne = nltk.ne_chunk(pos_tags, binary=True)
for i in chunks_ne:
print(i)
# # binary = False
entities = []
labels = []
for chunk in chunks:
if hasattr(chunk, "label"):
entities.append(" ".join(word[0] for word in chunk))
labels.append(chunk.label())
entities_labels = list(set(zip(entities, labels)))
df = pd.DataFrame(entities_labels)
df.columns = ["Entities", "Labels"]
df
# # binary = True
entities_ne = []
labels_ne = []
for chunk in chunks_ne:
if hasattr(chunk, "label"):
entities_ne.append(" ".join(word[0] for word in chunk))
labels_ne.append(chunk.label())
entities_labels_ne = list(set(zip(entities_ne, labels_ne)))
df_ne = pd.DataFrame(entities_labels_ne)
df_ne.columns = ["Entities", "Labels"]
df_ne
# # **Spacy**
import spacy, nltk
nlp = spacy.load("en_core_web_sm")
nlp.pipe_names
nlp.pipe_labels["ner"]
doc = nlp(text)
for ents in doc.ents:
print(ents, "-->", ents.label_, "-->", spacy.explain(ents.label_))
entities_labels_ne = list(set(zip(entities_ne, labels_ne)))
entities_doc = []
labels_doc = []
description = []
start_position = []
end_position = []
for ents in doc.ents:
entities_doc.append(ents)
labels_doc.append(ents.label_)
description.append(spacy.explain(ents.label_))
start_position.append(ents.start_char)
end_position.append(ents.end_char)
ner_spacy = list(
zip(entities_doc, labels_doc, description, start_position, end_position)
)
df_spacy = pd.DataFrame(ner_spacy)
df_spacy.columns = [
"Entities",
"Labels",
"Description",
"Starting Position",
"Ending Position",
]
df_spacy
from spacy import displacy
displacy.render(doc, style="ent")
|
# ## Keşifçi Veri Analizi | Becerileri Pekiştirme
# Aşağıda ihtiyacımız doğrultusunda kullanacağımız kütüphaneleri yükleyelim.
import numpy as np
import seaborn as sns
import pandas as pd
from statistics import mode
# Veri çerçevemizi bulunduğumuz dizinden yükleyelim ve bir veri çerçevesi haline getirerek df değişkenine atayalım. (pd.read_csv(...csv))
df = pd.read_csv("/kaggle/input/iris1csv/iris.csv")
df.shape
# Veri çerçevesinin ilk 5 gözlemini görüntüleyelim.
df.head(5)
# Veri çerçevesinin kaç öznitelik ve kaç gözlemden oluştuğunu görüntüleyelim.
print("ÖZNİTELİK SAYISI : {}".format(df.shape[1]))
print("GÖZLEM SAYISI : {}".format(df.size))
# Veri çerçevesindeki değişkenlerin hangi tipte olduğunu ve bellek kullanımını görüntüleyelim.
df.dtypes
# Veri çerçevesindeki sayısal değişkenler için temel istatistik değerlerini görüntüleyelim.
# Standart sapma ve ortalama değerlerden çıkarımda bulunarak hangi değişkenlerin ne kadar varyansa sahip olduğu hakkında fikir yürütelim.
def dfDonustur(oznitelik):
x = []
for i in df[oznitelik]:
x += [i]
x.sort()
y = len(x)
return x, y
def AritmetikOrt(oznitelik):
sayac = 0
toplam = 0
for i in df[oznitelik]:
toplam += i
sayac = sayac + 1
return toplam / sayac
def Medyan(oznitelik):
x, y = dfDonustur(oznitelik)
if y % 2 == 0:
medyan = (x[y // 2 - 1] + x[y // 2]) / 2
else:
medyan = x[((y + 1) // 2) - 1]
return medyan
def KartilHesapla(oznitelik):
x, y = dfDonustur(oznitelik)
q1 = (y + 2) // 8
q3 = (3 * y + 2) // 4
print(
"{} için :\n\t1.KARTİL = {} , indis:{}\n\t3.KARTİL = {} , indis:{}".format(
oznitelik, x[q1 - 1], q1 - 1, x[q3 - 1], q3 - 1
)
)
def RanjHesapla(oznitelik):
x, y = dfDonustur(oznitelik)
ranj = x[0] - x[y - 1]
print("{} için RANJ = {}".format(oznitelik, ranj))
def StandartSapma(oznitelik):
aort = AritmetikOrt(oznitelik)
x, y = dfDonustur(oznitelik)
toplam = 0
for i in x:
toplam += (i - aort) ** 2
return toplam / (y - 1)
def Varyans(oznitelik):
return StandartSapma(oznitelik) ** 2
def Carpiklik(oznitelik):
c = (AritmetikOrt(oznitelik) - Medyan(oznitelik)) / StandartSapma(oznitelik)
return c
def Basiklik(oznitelik):
x, y = dfDonustur(oznitelik)
aort = AritmetikOrt(oznitelik)
toplam = 0
for i in x:
toplam += (i - aort) ** 4
return toplam / ((StandartSapma(oznitelik) ** 4) * y)
for i in range(0, 4):
a = df.columns[i]
print("{} için A.ORT = {}".format(a, AritmetikOrt(a)))
print("{} için MEDYAN = {}".format(a, Medyan(a)))
print("{} için MOD = {}".format(a, mode(df[a])))
KartilHesapla(a)
RanjHesapla(a)
print("{} için STANDART SAPMA = {}".format(a, StandartSapma(a)))
print("{} için VARYANS = {}".format(a, Varyans(a)))
print("{} için ÇARPIKLIK = {}".format(a, Carpiklik(a)))
print("{} için BASIKLIK = {}".format(a, Basiklik(a)))
print(50 * "-")
# ## Korelasyon ve Kovaryans Hesabı
def DiziToplamKare(dizi, ort):
toplam = 0
for i in dizi:
toplam += (i - ort) ** 2
return toplam
def DizilerCarpimToplam(dizi1, dizi1ort, dizi2, dizi2ort):
toplam = 0
for i in dizi1:
for j in dizi2:
toplam += (i - dizi1ort) * (j - dizi2ort)
return toplam
def Korelasyon(oznitelik1, oznitelik2):
x = dfDonustur(oznitelik1)[0]
y = dfDonustur(oznitelik2)[0]
xort = AritmetikOrt(oznitelik1)
yort = AritmetikOrt(oznitelik2)
carpimToplam = DizilerCarpimToplam(x, xort, y, yort)
dizi1ToplamKare = DiziToplamKare(x, xort)
dizi2ToplamKare = DiziToplamKare(y, yort)
return carpimToplam / ((dizi1ToplamKare * dizi2ToplamKare) ** (1 / 2))
def Kovaryans(oznitelik1, oznitelik2):
x = dfDonustur(oznitelik1)[0]
y = dfDonustur(oznitelik2)[0]
xort = AritmetikOrt(oznitelik1)
yort = AritmetikOrt(oznitelik2)
return DizilerCarpimToplam(x, xort, y, yort) / len(x)
print(
"{} ile {} arasındaki Korelasyon = {}".format(
df.columns[0], df.columns[1], Korelasyon(df.columns[0], df.columns[1])
)
)
print(
"{} ile {} arasındaki Kovaryans = {}".format(
df.columns[0], df.columns[1], Kovaryans(df.columns[0], df.columns[1])
)
)
print(
"{}-{} Korelasyon = {}".format(
df.columns[0], df.columns[1], df[df.columns[0]].corr(df[df.columns[1]])
)
)
print(
"{}-{} Kovaryans = {}".format(
df.columns[0], df.columns[1], df[df.columns[0]].cov(df[df.columns[1]])
)
)
# Sayısal değişkenler arasında korelasyon olup olmadığını göstermek için korelasyon matrisi çizdirelim. Korelasyon katsayıları hakkında fikir yürütelim.
# En güçlü pozitif ilişki hangi iki değişken arasındadır?
korelasyon_matris = df.corr()
print(korelasyon_matris)
sns.heatmap(korelasyon_matris)
for i in range(0, 4):
for j in range(0, 4):
if i == j:
continue
print(
"{}-{} Korelasyon= {}".format(
df.columns[i], df.columns[j], df[df.columns[i]].corr(df[df.columns[j]])
)
)
print(50 * "-")
# Korelasyon katsayılarını daha iyi okuyabilmek için ısı haritası çizdirelim.
sns.heatmap(korelasyon_matris, annot=True)
# SONUÇ OLARAK EN GÜÇLÜ POZİTİF İLİŞKİ petal.width ile petal.length arasında olduğu görülecektir.
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz değerlerini görüntüleyelim.
df["variety"].unique()
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz kaç adet değer içerdiğini görüntüleyelim.
print("ADET : ", len(df["variety"].unique()))
# Veri çerçevesindeki sepal.width ve sepal.length değişkenlerinin sürekli olduğunu görüyoruz. Bu iki sürekli veriyi görselleştirmek için önce scatterplot kullanalım.
sns.scatterplot(x="sepal.width", y="sepal.length", data=df)
# Aynı iki veriyi daha farklı bir açıdan frekanslarıyla incelemek için jointplot kullanarak görselleştirelim.
sns.jointplot(x="sepal.width", y="sepal.length", data=df)
# Aynı iki veriyi scatterplot ile tekrardan görselleştirelim fakat bu sefer "variety" parametresi ile hedef değişkenine göre kırdıralım.
# 3 farklı renk arasında sepal değişkenleriyle bir kümeleme yapılabilir mi? Ne kadar ayırt edilebilir bunun üzerine düşünelim.
sns.scatterplot(x="sepal.width", y="sepal.length", data=df, hue="variety")
# Setosa değerleri için ayırt edicilik yüksek olur ancak diğer değerler için karmaşık olabilir
# value_counts() fonksiyonu ile veri çerçevemizin ne kadar dengeli dağıldığını sorgulayalım.
df.value_counts()
# Keman grafiği çizdirerek sepal.width değişkeninin dağılımını inceleyin.
# Söz konusu dağılım bizim için ne ifade ediyor, normal bir dağılım olduğunu söyleyebilir miyiz?
sns.catplot(x="sepal.width", data=df, kind="violin")
# Cevap: Evet söyleyebiliriz çünkü veriler orta noktalarda daha yoğun uç noktalarda ise daha sivri bir durumda gözüküyor
# Daha iyi anlayabilmek için sepal.width üzerine bir distplot çizdirelim.
sns.distplot(df["sepal.width"], bins=5)
# Üç çiçek türü için üç farklı keman grafiğini sepal.length değişkeninin dağılımı üzerine tek bir satır ile görselleştirelim.
sns.catplot(x="variety", y="sepal.length", data=df, kind="violin")
# Hangi çiçek türünden kaçar adet gözlem barındırıyor veri çerçevemiz?
# 50 x 3 olduğunu ve dengeli olduğunu value_counts ile zaten görmüştük, ancak bunu görsel olarak ifade etmek için sns.countplot() fonksiyonuna variety parametresini vereilm.
sns.countplot(x="variety", data=df)
# CEVAP : HER ÇİÇEK TÜRÜ İÇİN 50'ŞER ADET GÖZLEM BARINDIRIYOR DATAFRAME.
# sepal.length ve sepal.width değişkenlerini sns.jointplot ile görselleştirelim, dağılımı ve dağılımın frekansı yüksek olduğu bölgelerini inceleyelim.
sns.jointplot(x="sepal.length", y="sepal.width", data=df)
# Bir önceki hücrede yapmış olduğumuz görselleştirmeye kind = "kde" parametresini ekleyelim. Böylelikle dağılımın noktalı gösterimden çıkıp yoğunluk odaklı bir görselleştirmeye dönüştüğünü görmüş olacağız.
sns.jointplot(x="sepal.length", y="sepal.width", data=df, kind="kde")
# scatterplot ile petal.length ve petal.width değişkenlerinin dağılımlarını çizdirelim.
sns.scatterplot(x="petal.length", y="petal.width", data=df)
# Aynı görselleştirmeye hue = "variety" parametresini ekleyerek 3. bir boyut verelim.
sns.scatterplot(x="petal.length", y="petal.width", data=df, hue="variety")
# sns.lmplot() görselleştirmesini petal.length ve petal.width değişkenleriyle implemente edelim. Petal length ile petal width arasında ne tür bir ilişki var ve bu ilişki güçlü müdür? sorusunu yanıtlayalım.
sns.lmplot(x="petal.length", y="petal.width", data=df)
# Pozitif Yönlü Güçlü Bir İlişki vardır.
# Bu sorunun yanıtını pekiştirmek için iki değişken arasında korelasyon katsayısını yazdıralım.
print(
"{}-{} Korelasyon = {}".format(
"petal.length", "petal.width", df["petal.length"].corr(df["petal.width"])
)
)
# Petal Length ile Sepal Length değerlerini toplayarak yeni bir total length özniteliği oluşturalım.
x = df["sepal.length"] + df["petal.length"]
print(type(x))
m = []
for i in x:
m += [i]
df2 = pd.DataFrame(m, columns=["total.length"])
df2.head(10)
# total.length'in ortalama değerini yazdıralım.
df2["total.length"].mean()
# total.length'in standart sapma değerini yazdıralım.
df2["total.length"].std()
# sepal.length'in maksimum değerini yazdıralım.
df["sepal.length"].max()
# sepal.length'i 5.5'den büyük ve türü setosa olan gözlemleri yazdıralım.
df[(df["sepal.length"] > 5.5) & (df["variety"] == "Setosa")]
# petal.length'i 5'den küçük ve türü virginica olan gözlemlerin sadece sepal.length ve sepal.width değişkenlerini ve değerlerini yazdıralım.
df.loc[
(df["petal.length"] < 5) & (df["variety"] == "Virginica"),
["sepal.length", "sepal.width"],
]
# Hedef değişkenimiz variety'e göre bir gruplama işlemi yapalım değişken değerlerimizin ortalamasını görüntüleyelim.
df[df["variety"] == "Setosa"].mean()
# Hedef değişkenimiz variety'e göre gruplama işlemi yaparak sadece petal.length değişkenimizin standart sapma değerlerini yazdıralım.
df.loc[(df["variety"] == "Setosa"), ["petal.length"]].std()
|
import os
import numpy as np
import cv2
import tensorflow as tf
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Conv2D, Input, Add, Lambda
from tensorflow.keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, UpSampling2D
from tensorflow.keras.models import Model
from tensorflow.nn import relu, depth_to_space
from keras import Input
import torch
import torch.nn as nn
import torch.optim as optim
# Load Function - to load images
def load_img(path):
l = os.listdir(path)
l.sort()
arr = []
c = 0
for i in range(0, 1000):
image = cv2.imread(os.path.join(path, l[i]))
im = np.asarray(image)
arr.append(im)
if c % 500 == 0:
print(c, " Images processed")
c += 1
arr = np.array(arr)
return arr
# Loading data
lowres = load_img(
"/kaggle/input/ai-superres/AnalyticsArena_DataSet/LowReolution_3x_Train"
)
highres = load_img(
"/kaggle/input/ai-superres/AnalyticsArena_DataSet/HighResolution_Train"
)
import numpy as np
from PIL import Image
def data_generator(images_path, batch_size, input_shape):
while True:
idx = np.random.randint(0, len(images_path), batch_size)
batch_input = []
for i in idx:
# Load input image
img = Image.open(images_path[i])
img = img.resize(input_shape) # Resize to input shape
batch_input.append(np.array(img))
yield np.array(batch_input)
# Models
# import tensorflow as tf
# from tensorflow.keras.layers import Input, Conv2D, Add, Lambda, UpSampling2D
# from tensorflow.keras.models import Model
# def residual_block(x, num_filters):
# """Defines a residual block with two 3x3 convolutions."""
# x_in = x
# x = Conv2D(num_filters, kernel_size=3, strides=1, padding='same', activation='relu')(x)
# x = Conv2D(num_filters, kernel_size=3, strides=1, padding='same')(x)
# x = Add()([x_in, x])
# return x
# def MDSR(num_res_blocks=4, num_filters=16, upscale_factor=3):
# """Defines the MDSR model with specified number of residual blocks, filters, and upscaling factor."""
# # Input layer
# input_layer = Input(shape=(None, None, 3))
# # First convolutional layer
# x = Conv2D(num_filters, kernel_size=3, strides=1, padding='same', activation='relu')(input_layer)
# # Residual blocks
# for i in range(num_res_blocks):
# x = residual_block(x, num_filters)
# # Second convolutional layer
# x = Conv2D(num_filters, kernel_size=3, strides=1, padding='same')(x)
# # Upsampling layers
# x = Conv2D(num_filters * upscale_factor**2, kernel_size=3, strides=1, padding='same')(x)
# x = Lambda(lambda x: tf.nn.depth_to_space(x, upscale_factor))(x)
# x = Conv2D(3, kernel_size=3, strides=1, padding='same')(x)
# # Model definition
# model = Model(inputs=input_layer, outputs=x)
# return model
# model = MDSR()
# model.compile(optimizer='adam', loss='mse', metrics=['mse'])
# # Train the model on your dataset
# model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=10, batch_size=16)
#
import tensorflow as tf
import tensorflow as tf
from tensorflow.image import ssim_multiscale
def ms_ssim(y_true, y_pred):
y_true = tf.cast(y_true, tf.float32) / 255.0
y_pred = tf.cast(y_pred, tf.float32) / 255.0
ms_ssim = tf.reduce_mean(ssim_multiscale(y_true, y_pred, max_val=1.0))
loss = 1 - ms_ssim
return loss
from tensorflow.keras.layers import Input, Conv2D, Lambda
from tensorflow.keras.models import Model
def ISR(input_shape):
input_tensor = Input(shape=input_shape)
x = Conv2D(filters=64, kernel_size=3, padding="same", activation="relu")(
input_tensor
)
x = Conv2D(filters=128, kernel_size=3, padding="same", activation="relu")(x)
x = Conv2D(filters=270, kernel_size=3, padding="same", activation="relu")(x)
x = Conv2D(filters=540, kernel_size=3, padding="same", activation="relu")(x)
x = Lambda(lambda x: tf.nn.depth_to_space(x, block_size=3))(x)
x = Conv2D(filters=27, kernel_size=1)(x)
x = Conv2D(filters=9, kernel_size=1)(x)
x = Conv2D(filters=3, kernel_size=1)(x)
model = Model(inputs=input_tensor, outputs=x)
print(model.summary())
return model
model = ISR((170, 170, 3))
model.compile(optimizer="adam", loss="mse", metrics=[ms_ssim])
model.fit(lowres, highres, epochs=5, batch_size=1)
import matplotlib.pyplot as plt
x = model.predict(lowres[1:2])[0]
plt.imshow(x)
x
highres[1]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Importing the data
data = pd.read_csv("/kaggle/input/us-college-completion-rate-analysis/train.csv")
data
data = data.drop("Unnamed: 0", axis=1)
x_train = data.drop("Completion_rate", axis=1)
y_train = data["Completion_rate"]
print(x_train, y_train)
# ### Creating a simple regression model
from xgboost import XGBClassifier
import xgboost as xgb
dtrain_reg = xgb.DMatrix(x_train, y_train, enable_categorical=True)
params = {"objective": "reg:squarederror"}
n = 100
model = xgb.train(
params=params,
dtrain=dtrain_reg,
num_boost_round=n,
)
#
print(model)
test = pd.read_csv("/kaggle/input/us-college-completion-rate-analysis/x_test.csv")
test = test.drop("Unnamed: 0", axis=1)
x_test = test
dtest_reg = xgb.DMatrix(x_test, enable_categorical=True)
y_test = model.predict(dtest_reg)
print(y_test)
# ### Creating our submission
submission = pd.DataFrame.from_dict({"Completion_rate": y_test})
submission
submission.to_csv("submission.csv", index=True, index_label="id")
|
# # Retail Sales Forescast Project by Shizheng Hou, Chuke Xu and Lei
# # Preprocessing
# ## Import needed packages
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_validate
from sklearn import linear_model
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ## Read and Import data
df_sales = pd.read_csv("/kaggle/input/retaildataset/sales data-set.csv")
df_stores = pd.read_csv("/kaggle/input/retaildataset/stores data-set.csv")
df_features = pd.read_csv("/kaggle/input/retaildataset/Features data set.csv")
df_features.tail()
# ## Merge data
df = df_sales.merge(df_stores).merge(df_features)
df.tail()
df.describe()
df.drop(
df[(df["MarkDown2"] < 0) | (df["MarkDown3"] < 0) | (df["Weekly_Sales"] < 0)].index,
inplace=True,
)
df.describe()
# ## Data frame information
df.info()
# ## Number of Nulls for each Feature
df.isnull().sum()
# ## Replace all missing value with zero
df = df.fillna(0)
df.head()
# ## Convery date to datatime
df["Date"] = pd.to_datetime(df["Date"])
df.head()
# ## Encode Type obtaining a numeric representation of an array
s = pd.get_dummies(df["Type"])
s = s.rename(columns={"A": "TypeA", "B": "TypeB", "C": "TypeC"})
s
df = pd.concat([df, s], axis=1)
df.drop("Type", axis=1, inplace=True)
df
# ## Encode Isholiday
m = pd.get_dummies(df["IsHoliday"])
m = m.rename(columns={False: "Not Holiday", True: "Holiday"})
m
df = pd.concat([df, m], axis=1)
df.drop("IsHoliday", axis=1, inplace=True)
df_encode = df
# ## Set Date as Index
df_encode = df_encode.set_index("Date")
df_encode.head()
# ## Heat Map
sns.set(rc={"figure.figsize": (20, 18)})
sns.heatmap(df.corr(), center=0, annot=True)
# ## Drop Fuel Price and Temperature
df_encode.drop(["Fuel_Price", "Temperature"], inplace=True, axis=1)
# ## Normalization
df_weekly_sales = df_encode["Weekly_Sales"]
df_encode_norm = df_encode / df_encode.max()
df_encode_norm["Weekly_Sales"] = df_weekly_sales
df_encode_norm
# ## Bayes regression
X = df_encode_norm.drop("Weekly_Sales", axis=1)
Y = df_encode_norm["Weekly_Sales"]
reg = linear_model.BayesianRidge()
reg.fit(X, Y)
reg.coef_
reg.score(X, Y)
# ## Linear Regression
import statsmodels.api as sm
import statsmodels.formula.api as smf
X = df_encode_norm.drop("Weekly_Sales", axis=1)
y = df_encode_norm["Weekly_Sales"]
est = sm.OLS(y, sm.add_constant(X)).fit()
est.summary()
# ## Polynomial regression
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
poly = PolynomialFeatures(degree=2)
X = df_encode_norm.drop("Weekly_Sales", axis=1)
y = df_encode_norm["Weekly_Sales"]
X = poly.fit_transform(X)
clf = linear_model.LinearRegression().fit(X, y)
clf.score(X, y)
df_encode_norm.drop("Weekly_Sales", axis=1).shape
# # Test data
# ## Same preprocessing as before
df_test = df_features.merge(df_stores)
df_test["Date"] = pd.to_datetime(df_test["Date"])
df_test = df_test[
(df_test["Date"].dt.year >= 2013)
| ((df_test["Date"].dt.year == 2012) & (df_test["Date"].dt.month >= 11))
]
df_test.sort_values(["Store", "Date"], inplace=True)
df_dept = df_sales[df_sales["Date"] == "26/02/2010"].loc[:, ("Store", "Dept")]
df_final = df_dept.merge(df_test)
df_final
df_final.fillna(0, inplace=True)
df_final
s_ = pd.get_dummies(df_final["Type"])
s_ = s_.rename(columns={"A": "TypeA", "B": "TypeB", "C": "TypeC"})
df_final = pd.concat([df_final, s_], axis=1)
df_final.drop("Type", axis=1, inplace=True)
df_final
m_ = pd.get_dummies(df_final["IsHoliday"])
m_ = m_.rename(columns={False: "Not Holiday", True: "Holiday"})
df_final = pd.concat([df_final, m_], axis=1)
df_final.drop("IsHoliday", axis=1, inplace=True)
df_final_encode = df_final
sns.set(rc={"figure.figsize": (20, 18)})
sns.heatmap(df_final_encode.corr(), center=0, annot=True)
df_final_encode.drop(["Fuel_Price", "Temperature"], axis=1, inplace=True)
df_final_encode.set_index("Date", inplace=True)
df_final_encode_norm = df_final_encode / df_final_encode.max()
df_final_encode_norm
# ## Predict
import statsmodels.api as sm
import statsmodels.formula.api as smf
y_pred1 = pd.DataFrame(
est.predict(sm.add_constant(df_final_encode_norm)), columns=["Weekly_Sales_predict"]
)
y_pred1.describe()
y_pred2 = pd.DataFrame(
clf.predict(poly.fit_transform(df_final_encode_norm)),
columns=["Weekly_Sales_predict"],
)
y_pred2.describe()
|
# Omelchenko 8.1212
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import time
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
print("Enter the size of square matrix and vector: ")
n = input()
n = int(n)
matrix = np.random.randint(10, size=(n, n))
print("Matrix:\n")
print(matrix)
print("\n")
vector = np.random.randint(10, size=n)
print("Vector:\n")
print(vector)
# ***The code solves the linear matrix equation using both the `numpy.linalg.solve` function***
startTime = time.time()
res1 = np.linalg.solve(matrix, vector)
print(res1)
print("--- %s milliseconds ---" % ((time.time() - startTime) * 1000))
# ***The code solves the linear matrix equation using the Cramer's rule (using the `dot` function from `numpy.linalg`)***
startTime = time.time()
res2 = np.dot(matrix, vector)
print(res2)
print("--- %s milliseconds ---" % ((time.time() - startTime) * 1000))
|
# # Plot-a-Thon 2021!
# A tutorial notebook by Dr. Emily Fairfax
# March 5th, 2021
# ***
# ## Let's start by importing [the data!]("https://raw.githubusercontent.com/emilyfairfax/PlotAThon/main/rotten_tomatoes_movies.csv")
# We can't make slick plots until we get some data in here to work with! And the data this year is all about movies - the **good** movies, the **bad** movies, the ***ugly*** movies...
# To import data, we use the command `read.csv()` and inside the parentheses put the URL to the dataset we are going to use in quotation marks.
# Let's try importing the data into the cell below. You will want to name your dataset something easy to remember and to type - we will be "calling" your data a lot! Your code should look something like:
# ```
# MyData = read.csv("URL.csv")
# ```
# We can check that it worked by telling R to generate the "head" of the data set (the first few rows), as well as a quick summary of the dataset. To do that, we will use the `head()` and `summary()` commands. To "run them on" your data, just put the name of your dataset (in my example that is `MyData`) inside of the parentheses.
# Import the data
MyData = read.csv(
"https://raw.githubusercontent.com/emilyfairfax/PlotAThon/main/rotten_tomatoes_movies.csv"
)
# Check that the import worked with head
head(MyData)
# Run a quick summary
summary(MyData)
# ## Woah, that's a lot of data.
# How are you possibly supposed to make sense of all of that?? There are 22 columns of data! What the heck!!
# A good first step is picking a variable to narrow things down based on. The variables are your column headers, and whenever you want to search within a column or call a column of data, you can do that by typing `YourDataName$ColumnYouWant.`
# Narrowing down your dataset based on values within one of your columns is called "subsetting" and you can make a subset using the `subset()` command if your criteria are simple. To use `subset()` you put first put the name of the original dataset (in this case `MyData`), then a comma, then tell it the criteria you want to make your subset based on, like ratings that are greater than 50. If you want to make a more complex subset where you are searching within a column for partial matches (e.g. the actors contains Nicholas Cage) then you might want to use `grep()`. To use `grep()`, first you call your dataset, then tell open square brackets `[]`. Inside those square brackets, put `grep()`, and then inside `grep` first list your search word in quotes, then a comma, then the column you want it to search in, then a comma, then a space, then close the square bracket. That is a lot! So here are some examples.
# Some example subsetting commands:
# - movies with Nicholas Cage:
# - `CageData = MyData[grep("Cage", MyData$actors), ]`
# - movies rated greater than 50:
# - `TopHalf = subset(MyData, tomatometer_rating >50)`
# - movies with tomatometer status of rotten:
# - `RottenData = subset(MyData, tomatometer_status == "Rotten")`
# - *note the double equals sign for setting a condition!*
# - movies with tomatometer rating >75 AND audience rating >75
# - `GreatMovies = subset(MyData, tomatometer_rating > 75 & audience_rating > 75)`
# - movies rated PG or G
# - `KidFriendly = subset(MyData, content_rating == "PG" | content_rating == "G")`
#
# For quick criteria forming reference, `&` is and, `|` is or, `==` is "is equivalent to" (for text based criteria), `>` is greater than, `=` is greater than or equal to.
# I am going to go ahead and make a subset based on movies with Nicholas Cage in them below. You can make a subset based on anything you want!
# Make a subset looking for actors with Cage in their name
|
# <div style="padding:20px;
# color:white;
# margin:10;
# font-size:200%;
# text-align:center;
# display:fill;
# border-radius:5px;
# background-color:#c2a5ca;
# overflow:hidden;
# font-weight:700">BP & CNN Example
# # Catalogue
# 1.Data Description
# 2.Data Procession
# 3.Training(Using DNN/BP)
# 4.Training(Using CNN)
# 5.Summary
# Change the code theme
#!wget https://raw.githubusercontent.com/VvanWindring/DataAnalysis/main/CSS.css?token=GHSAT0AAAAAAB7MBASUTL7DTBW6NBHXUPYQZAQI3RA -O CSS.css -q
from IPython.core.display import HTML
with open("/kaggle/input/my-theme/CSS.css", "r") as file:
custom_css = file.read()
HTML(custom_css)
# # Part 1
# <div style="padding:20px;
# color:white;
# margin:10;
# font-size:170%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#c2a5ca;
# overflow:hidden;
# font-weight:700">| 1.Data Description
#
import numpy as np
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import math
origin = pd.read_csv("/kaggle/input/cluster-nn-data/penguins.csv")
origin.head()
# Randomly selected 10 samples from origin data for display
features = origin[origin.columns[0:4]] # =features = origin.iloc[:,0:4]
target = origin["Species"]
features.sample(10)
# summary table function
def summary(df):
print(f"data shape: {df.shape}")
summ = pd.DataFrame(df.dtypes, columns=["data type"])
summ["#missing"] = df.isnull().sum().values * 100
summ["%missing"] = df.isnull().sum().values / len(df)
summ["#unique"] = df.nunique().values
summ["skewness"] = df.skew() # 偏度系数,abs>1为严重左/右偏
desc = pd.DataFrame(df.describe(include="all").transpose())
summ["min"] = desc["min"].values
summ["max"] = desc["max"].values
summ["first value"] = df.loc[0].values
summ["second value"] = df.loc[1].values
summ["third value"] = df.loc[2].values
return summ
summary(origin)
fea = list(origin.columns.values)
n_bins = 100
histplot_hyperparams = {"kde": True, "alpha": 0.4, "stat": "percent", "bins": n_bins}
columns = fea
n_cols = 4
n_rows = math.ceil(len(columns) / n_cols)
fig, ax = plt.subplots(n_rows, n_cols, figsize=(20, n_rows * 4))
ax = ax.flatten()
for i, column in enumerate(columns):
plot_axes = [ax[i]]
sns.kdeplot(origin[column], label="origin", ax=ax[i], color="red")
# titles
ax[i].set_title(f"{column} Distribution")
ax[i].set_xlabel(None)
# remove axes to show only one at the end
plot_axes = [ax[i]]
handles = []
labels = []
for plot_ax in plot_axes:
handles += plot_ax.get_legend_handles_labels()[0]
labels += plot_ax.get_legend_handles_labels()[1]
plot_ax.legend().remove()
for i in range(i + 1, len(ax)):
ax[i].axis("off")
fig.suptitle(
f"Numerical Feature Distributions\n\n\n",
ha="center",
fontweight="bold",
fontsize=25,
)
fig.legend(
handles, labels, loc="upper center", bbox_to_anchor=(0.5, 0.96), fontsize=25, ncol=3
)
plt.tight_layout()
# describe numeric feature correlation
def df_corr(df):
plt.figure(figsize=(df.shape[1], df.shape[1]))
color = "RdYlGn"
mask = np.zeros_like(df.corr())
mask[np.triu_indices_from(mask)] = True
sns.heatmap(df.corr(), mask=mask, annot=True, linewidth=0.2, cmap=color)
df_corr(origin)
# # Part 2
# <div style="padding:20px;
# color:white;
# margin:10;
# font-size:170%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#c2a5ca;
# overflow:hidden;
# font-weight:700">| 2.Data Procession
#
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import ADASYN
class data_procession:
def __init__(self, X=None, Y=None):
self.X = X
self.Y = Y
print("Origin X Shape:", self.X.shape)
print("Origin Y Shape:", self.Y.shape)
def process(self):
def gaussian_normalize(data):
mean = np.mean(data)
std = np.std(data)
normalized_data = (data - mean) / std
return normalized_data
# 填补缺失值
self.X = self.X.fillna(method="ffill")
print("Step1:已填补缺失值")
# 高斯归一
for col in self.X.columns:
self.X[col] = gaussian_normalize(self.X[col])
print("Step2:已将数据正态化")
# 样本数过少,采用超采样
sm = SMOTE(random_state=39)
self.X, self.Y = sm.fit_resample(self.X, self.Y)
for i in range(1, 3):
self.X, self.Y = self.X.append(self.X), self.Y.append(self.Y)
print("Step3:已完成数据超采样")
print("Processed X Shape:", self.X.shape)
print("Processed Y Shape:", self.Y.shape)
return self.X, self.Y
X, Y = data_procession(features, target).process()
# # Part 3
# <div style="padding:20px;
# color:white;
# margin:10;
# font-size:170%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#c2a5ca;
# overflow:hidden;
# font-weight:700">| 3.Training(Using DNN/BP)
#
from sklearn.model_selection import train_test_split
import tensorflow
from tensorflow import keras
from tensorflow.keras import models
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras import utils
from tensorflow.keras import optimizers
tensorflow.random.set_seed(39)
print("Libraries imported.")
print("Keras version:", keras.__version__)
print("TensorFlow version:", tensorflow.__version__)
penguin_classes = ["Adelie", "Gentoo", "Chinstrap"]
len_fea = len(X.columns.values)
x_train, x_valid, y_train, y_valid = train_test_split(
X, Y, test_size=0.3, random_state=39
)
print("Training Set: %d, Test Set: %d \n" % (len(x_train), len(x_valid)))
# Set data type
x_train = x_train.astype("float32")
x_valid = x_valid.astype("float32")
y_train = utils.to_categorical(y_train)
y_valid = utils.to_categorical(y_valid)
print("Ready...")
# Define network
hl = 10 # Number of hidden layer nodes
model = Sequential()
model.add(Dense(hl, input_dim=len_fea, activation="relu"))
model.add(Dense(hl, input_dim=hl, activation="relu"))
model.add(Dense(len(penguin_classes), input_dim=hl, activation="softmax"))
print(model.summary())
# Define hyper-parameters
learning_rate = 0.001
opt = optimizers.Adam(lr=learning_rate)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
num_epochs = 50
history = model.fit(
x_train,
y_train,
epochs=num_epochs,
batch_size=10,
validation_data=(x_valid, y_valid),
verbose=1,
)
# Evaluate our model training loss
epoch_nums = range(1, num_epochs + 1)
training_loss = history.history["loss"]
validation_loss = history.history["val_loss"]
plt.plot(epoch_nums, training_loss)
plt.plot(epoch_nums, validation_loss)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend(["training", "validation"], loc="upper right")
plt.show()
# Check weights and biases
for layer in model.layers:
weights = layer.get_weights()[0]
biases = layer.get_weights()[1]
print("------------\nWeights:\n", weights, "\nBiases:\n", biases)
# Use confusion matrix to evaluate model outputs
from sklearn.metrics import confusion_matrix
class_probabilities = model.predict(x_valid)
predictions = np.argmax(class_probabilities, axis=1)
true_labels = np.argmax(y_valid, axis=1)
# Create confusion matrix
cm = confusion_matrix(true_labels, predictions)
plt.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues)
plt.colorbar()
tick_marks = np.arange(len(penguin_classes))
plt.xticks(tick_marks, penguin_classes, rotation=85)
plt.yticks(tick_marks, penguin_classes)
plt.xlabel("Predicted Species")
plt.ylabel("Actual Species")
plt.show()
# Save our models
modelFileName = "models/penguin-classifier.h5"
model.save(modelFileName)
del model
print("model saved as", modelFileName)
|
from IPython.core.display import HTML
with open("./CSS.css", "r") as file:
custom_css = file.read()
HTML(custom_css)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import warnings
warnings.filterwarnings("ignore")
tdcsfog_path = "/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/tdcsfog"
## https://www.kaggle.com/code/arjanso/reducing-dataframe-memory-size-by-65
def reduce_memory_usage(df):
start_mem = df.memory_usage().sum() / 1024**2
print("Memory usage of dataframe is {:.2f} MB".format(start_mem))
for col in df.columns:
col_type = df[col].dtype.name
if (col_type != "datetime64[ns]") & (col_type != "category"):
if col_type != "object":
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif (
c_min > np.iinfo(np.int16).min
and c_max < np.iinfo(np.int16).max
):
df[col] = df[col].astype(np.int16)
elif (
c_min > np.iinfo(np.int32).min
and c_max < np.iinfo(np.int32).max
):
df[col] = df[col].astype(np.int32)
elif (
c_min > np.iinfo(np.int64).min
and c_max < np.iinfo(np.int64).max
):
df[col] = df[col].astype(np.int64)
else:
if (
c_min > np.finfo(np.float16).min
and c_max < np.finfo(np.float16).max
):
df[col] = df[col].astype(np.float16)
elif (
c_min > np.finfo(np.float32).min
and c_max < np.finfo(np.float32).max
):
df[col] = df[col].astype(np.float32)
else:
pass
else:
df[col] = df[col].astype("category")
mem_usg = df.memory_usage().sum() / 1024**2
print("Memory usage became: ", mem_usg, " MB")
return df
tdcsfog_list = []
for file_name in os.listdir(tdcsfog_path):
if file_name.endswith(".csv"):
file_path = os.path.join(tdcsfog_path, file_name)
file = pd.read_csv(file_path)
tdcsfog_list.append(file)
tdcsfog = pd.concat(tdcsfog_list, axis=0)
tdcsfog = reduce_memory_usage(tdcsfog)
def summary(text, df):
print(f"{text} shape: {df.shape}")
summ = pd.DataFrame(df.dtypes, columns=["dtypes"])
summ["null"] = df.isnull().sum()
summ["unique"] = df.nunique()
summ["min"] = df.min()
summ["median"] = df.median()
summ["max"] = df.max()
summ["mean"] = df.mean()
summ["std"] = df.std()
return summ
summary("tdcsfog", tdcsfog)
g = sns.PairGrid(tdcsfog[["AccV", "AccML", "AccAP"]])
g.map_lower(plt.scatter, alpha=0.6)
g.map_diag(plt.hist, alpha=0.7)
X = tdcsfog.iloc[:, 1:4]
y1 = tdcsfog["StartHesitation"]
y2 = tdcsfog["Turn"]
y3 = tdcsfog["Walking"]
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, mean_squared_log_error
X_train, X_val, y1_train, y1_val = train_test_split(
X, y1, test_size=0.2, random_state=52
)
X_train, X_val, y2_train, y2_val = train_test_split(
X, y2, test_size=0.2, random_state=52
)
X_train, X_val, y3_train, y3_val = train_test_split(
X, y3, test_size=0.2, random_state=52
)
from sklearn.linear_model import LogisticRegression
# Create three separate logistic regression models.
model1 = LogisticRegression()
model2 = LogisticRegression()
model3 = LogisticRegression()
# Train the models on the training data.
model1.fit(X_train, y1_train)
model2.fit(X_train, y2_train)
model3.fit(X_train, y3_train)
# Evaluate the models on the test data.
print("Accuracy for StartHesitation:", model1.score(X_val, y1_val))
print("Accuracy for Turn:", model2.score(X_val, y2_val))
print("Accuracy for Walking:", model3.score(X_val, y3_val))
from sklearn.metrics import classification_report
y1_pred = model1.predict(X_val)
y2_pred = model2.predict(X_val)
y3_pred = model3.predict(X_val)
print(
f"Classification Report for StartHesitation:{classification_report(y1_val, y1_pred)}"
)
print(f"Classification Report for Turn:{classification_report(y2_val, y2_pred)}")
print(f"Classification Report for Walking: {classification_report(y3_val, y3_pred)}")
tdcsfog_test_path = (
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/test/tdcsfog"
)
tdcsfog_test_list = []
for file_name in os.listdir(tdcsfog_test_path):
if file_name.endswith(".csv"):
file_path = os.path.join(tdcsfog_test_path, file_name)
file = pd.read_csv(file_path)
file["Id"] = file_name[:-4] + "_" + file["Time"].apply(str)
tdcsfog_test_list.append(file)
tdcsfog_test = pd.concat(tdcsfog_test_list, axis=0)
tdcsfog_test
defog_test_path = "/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/test/defog"
defog_test_list = []
for file_name in os.listdir(defog_test_path):
if file_name.endswith(".csv"):
file_path = os.path.join(defog_test_path, file_name)
file = pd.read_csv(file_path)
file["Id"] = file_name[:-4] + "_" + file["Time"].apply(str)
defog_test_list.append(file)
defog_test = pd.concat(defog_test_list, axis=0)
defog_test
tdcsfog_test = reduce_memory_usage(tdcsfog_test)
defog_test = reduce_memory_usage(defog_test)
test = pd.concat([tdcsfog_test, defog_test], axis=0).reset_index(drop=True)
test
# Separate the dataset for the independent variables.
test_X = test.iloc[:, 1:4]
# Get the predictions for the three models on the test data.
pred_y1 = model1.predict(test_X)
pred_y2 = model2.predict(test_X)
pred_y3 = model3.predict(test_X)
test["StartHesitation"] = pred_y1 # target variable for StartHesitation
test["Turn"] = pred_y2 # target variable for Turn
test["Walking"] = pred_y3 # target variable for Walking
test
submission = test.iloc[:, 4:].fillna(0.0)
submission.to_csv("submission.csv", index=False)
|
# ## Purpose
# The notebook explores keypoint detection techniques to identify the volume within
# the LV (left ventricle) as a set of grid lines. The lines are used to calculate the LV volume at the corresponding cardiac phase. We use the EchoNet-Dynamic data as input for this exploration. The data already has tracings for the ES and the ED frame. The LV volume calculated as both ED and ES phase is used for Ejection fraction calculation, which is a crucial measure of heart function.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import SimpleITK as sitk
import PIL
import cv2
import os
import shutil
import tempfile
from pathlib import Path
## Code to clean kaggle output folders
def remove_folder_contents(folder):
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
# remove_folder_contents(file_path)
# os.rmdir(file_path)
continue
except Exception as e:
print(e)
# folder_path = '/kaggle/working'
# remove_folder_contents(folder_path)
# ### Set Data Directory to where you want all the data and processing artifacts to be
# If using Kaggle
# If using Google Drive
from google.colab import drive
drive.mount("/content/drive")
# If using a local directory
# ### Set the Root Directory
directory = os.environ.get("DATA_DIRECTORY")
ROOT_DIR = Path(tempfile.mkdtemp()) if directory is None else Path(directory)
print(ROOT_DIR)
def checkPathExists(path):
if not os.path.exists(path):
print(f"Cannot access path: {path}")
else:
print(f"Path {path} accessible")
# Get the data and setup the input directories
ECHONET_DATA_DIR = "heartdatabase/EchoNet-Dynamic"
import pprint
pp = pprint.PrettyPrinter()
DATA_DIR = ROOT_DIR.joinpath(ECHONET_DATA_DIR)
checkPathExists(DATA_DIR)
# from monai.utils import set_determinism
# set_determinism(seed=0)
# from monai.apps import download_and_extract, extractall
## Note that if downloaded then the data will be zippped. It should just
## unzip at the location. Not tested//
# compressed_file = ROOT_DIR.joinpath("data.zip")
# if not os.path.exists(DATA_DIR):
# extractall(compressed_file, ROOT_DIR)
# Load the Info files and the volume traced files
INFO_FILE = DATA_DIR.joinpath("FileList.csv")
VOL_TRACE_FILE = DATA_DIR.joinpath("VolumeTracings.csv")
checkPathExists(INFO_FILE)
checkPathExists(VOL_TRACE_FILE)
INFO_DF = pd.read_csv(INFO_FILE)
VOL_TRACE_DF = pd.read_csv(VOL_TRACE_FILE)
INFO_DF.head()
VOL_TRACE_DF.head()
INFO_DF.Split.value_counts()
## To extract the ED and ES frame from the video file
def extractEDandESframes(image_file, ED_frame_number, ES_frame_number):
video = cv2.VideoCapture(str(image_file))
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
## Retrieve the ED frame
for i in range(ED_frame_number - 1):
ret, frame = video.read()
res, ED_frame = video.read()
## Retrieve the ES frame
diff = ES_frame_number - ED_frame_number
for i in range(diff):
ret, frame = video.read()
res1, ES_frame = video.read()
if not res:
print("issue ED")
if not res1:
print("issue ES")
if res & res1:
return ED_frame, ES_frame
else:
return None, None
## Save the ED and ES frame as a png which is prefixed with the original avi file name
## Save a csv file name is the same as orginal avi file name,that has the volume
## tracings for both the ES and ED file
## the csv file also has a column with Image file name, and the Split value
## from the original file list.
def saveEDandESimages(data_dir, output_dir, info_df, trace_df):
patient_list = [x for x in data_dir.iterdir()]
for i, file in enumerate(patient_list):
patient_id = file.name.split(".")[0]
frame_df = trace_df.query(f"FileName == '{file.name}'")
try:
ed_number, es_number = frame_df.Frame.unique()
except:
print(f"This {file} generated an error")
continue
split_value = info_df.query(f"FileName == '{patient_id}'").Split
# print(ed_number, es_number)
ED_frame, ES_frame = extractEDandESframes(file, ed_number, es_number)
if ED_frame is not None or ES_frame is not None:
## Write the ED and ES frames as images
iED_path = output_dir.joinpath(f"{patient_id}_ED.png")
iES_path = output_dir.joinpath(f"{patient_id}_ES.png")
cv2.imwrite(str(iED_path), ED_frame)
cv2.imwrite(str(iES_path), ES_frame)
## Write the trac points into a csv file
ED_info = frame_df.query(
f'FileName =="{file.name}" and Frame == {ed_number}'
).reset_index(drop=True)
ES_info = frame_df.query(
f'FileName =="{file.name}" and Frame == {es_number}'
).reset_index(drop=True)
ES_info = frame_df.query(
f'FileName =="{file.name}" and Frame == {es_number}'
).reset_index(drop=True)
ED_stack = np.hstack(ED_info[["X1", "Y1", "X2", "Y2"]].values).tolist()
ES_stack = np.hstack(ES_info[["X1", "Y1", "X2", "Y2"]].values).tolist()
keypoint_df = pd.DataFrame([ED_stack, ES_stack])
keypoint_df["Image"] = [f"{patient_id}_ED.png", f"{patient_id}_ES.png"]
keypoint_df["Split"] = [split_value.iloc[0], split_value.iloc[0]]
keypoint_df.to_csv(output_dir.joinpath(f"{patient_id}.csv"), index=False)
else:
print(f"There was an issue with processing {file}")
# Extract the ED and ES frames and the corresponding Trace files into a seperate file structure that will be used later for the modeling,
VIDEO_DIR = DATA_DIR.joinpath("Videos")
OUTPUT_DIR = Path("/kaggle/working/Output")
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
checkPathExists(OUTPUT_DIR)
saveEDandESimages(VIDEO_DIR, OUTPUT_DIR, INFO_DF, VOL_TRACE_DF)
# Visualize a sample image and tracing from the newly built file structure
NUM_KEYPOINTS = 84
trace_df = pd.read_csv(OUTPUT_DIR.joinpath("0XB5CECBD29920B7B.csv"))
arr = PIL.Image.open(str(OUTPUT_DIR.joinpath("0XB5CECBD29920B7B_ED.png")))
plt.imshow(arr)
df = trace_df.query('Image == "0XB5CECBD29920B7B_ED.png"')
print(df.iloc[0][0])
for i in range(0, NUM_KEYPOINTS, 4):
x1, y1 = df.iloc[0][i], df.iloc[0][i + 1]
x2, y2 = df.iloc[0][i + 2], df.iloc[0][i + 3]
plt.plot([x1, x2], [y1, y2], color="red", linewidth=3)
plt.show()
trace_df
# ### Build a Key Point Detection Model
# Models
from keras.models import Model
from tensorflow.keras.applications import MobileNetV2, mobilenet_v2
from keras.layers import Input, Dropout, SeparableConv2D, Dense, Flatten
NUM_KEYPOINTS = 84
IMAGE_SIZE = 112
OUTPUT_DIR = Path("/kaggle/working/Output")
EPOCHS = 50
# The convenience function will load the training, testing, or test data as needed
## Load all the images and the keypoints
def LoadData(input_dir, type="TRAIN"):
all_images = []
all_points = []
all_ids = []
for j, p in enumerate(input_dir.glob(f"*.csv")):
df = pd.read_csv(p)
try:
df_type = df.Split.unique()[0]
except AttributeError:
print(df)
break
if df_type == type:
for i, x in enumerate(df.Image):
img = PIL.Image.open(input_dir.joinpath(x))
# plt.imshow(img)
# plt.show()
v = df.iloc[i][:NUM_KEYPOINTS]
if len(v) != 84:
continue
all_points.append(v)
img = cv2.resize(np.asarray(img), (IMAGE_SIZE, IMAGE_SIZE))
all_images.append(img)
all_ids.append(p.name.split(".")[0])
all_images = np.asarray(all_images)
all_points = np.asarray(all_points)
all_points = all_points.reshape(-1, 1, 1, NUM_KEYPOINTS) / IMAGE_SIZE
all_ids = np.asarray(all_ids)
return all_images, all_points, all_ids
# Load the training data into memory
# Convert the datatype for the keypoints to float32. By default it takes float64, that MobileNetV2 does not like,
train_images, train_keypoints, train_ids = LoadData(OUTPUT_DIR)
pp.pprint(train_images.shape)
pp.pprint(train_keypoints.shape)
train_keypoints_conv = train_keypoints.astype("float32")
def VisualizeSampleImages(image, kps, col="red"):
plt.imshow(image)
for i in range(0, NUM_KEYPOINTS, 4):
x1, y1 = kps[0][i], kps[0][i + 1]
x2, y2 = kps[0][i + 2], kps[0][i + 3]
plt.plot([x1, x2], [y1, y2], color=col, linewidth=2)
# plt.show()
# Visualize sample training images
plt.subplots(4, 4, figsize=(10, 10))
num_total = train_images.shape[0]
for i, k in enumerate(np.random.randint(num_total, size=16)):
kps = train_keypoints_conv[k].reshape(-1, NUM_KEYPOINTS) * IMAGE_SIZE
image = train_images[k]
plt.subplot(4, 4, i + 1)
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
VisualizeSampleImages(image, kps)
plt.xlabel(train_ids[k])
def VisualizeInstanceData(data_images, data_keypoints, data_ids, ED_index, ES_index):
print(f"Data id {data_ids[ED_index]}")
plt.subplots(1, 2, figsize=(8, 8))
plt.subplot(1, 2, 1)
img = data_images[ED_index]
kps = data_keypoints[ED_index].reshape(-1, NUM_KEYPOINTS) * IMAGE_SIZE
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
VisualizeSampleImages(img, kps)
plt.xlabel("ED image")
plt.subplot(1, 2, 2)
img = data_images[ES_index]
kps = data_keypoints[ES_index].reshape(-1, NUM_KEYPOINTS) * IMAGE_SIZE
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
VisualizeSampleImages(img, kps)
plt.xlabel("ES image")
# The following ids have incorrect volume traces and will be removed\
# 0X354B37A25C64276F\
# 0X973E4A9DAADDF9F\
# 0X37F9E9981E207C04\
# 0X766B7B0ABDB07CD5\
# 0X5B6FCBB75BF8FCB7\
# 0X36C5A15AC7FC6AAA\
# 0X4BBA9C8FB485C9AB\
# 0X49EC1927F5747B19\
# 0X5D38D994C2490EAE\
# 0X53C185263415AA4F\
# 0X65E605F203321860\
# 0X753AA26EA352BBB
train_error_list = [
"0X354B37A25C64276F",
"0X973E4A9DAADDF9F",
"0X37F9E9981E207C04",
"0X766B7B0ABDB07CD5",
"0X5B6FCBB75BF8FCB7",
"0X36C5A15AC7FC6AAA",
"0X4BBA9C8FB485C9AB",
"0X49EC1927F5747B19",
"0X5D38D994C2490EAE",
"0X53C185263415AA4F",
"0X65E605F203321860",
"0X753AA26EA352BBB",
]
x = [np.where(train_ids == inst)[0].tolist() for inst in train_error_list]
# flat_list = list(np.concatenate(x).flat)
print(x)
for i, j in x:
VisualizeInstanceData(train_images, train_keypoints_conv, train_ids, i, j)
plt.show()
# Remove the incorrect volume traces from the training dataset
flat_list = list(np.concatenate(x).flat)
train_keypoints_conv = np.delete(train_keypoints_conv, flat_list, 0)
train_images = np.delete(train_images, flat_list, 0)
train_ids = np.delete(train_ids, flat_list, 0)
# #### Build the model for training
# I am using the MobileNetv2 from keras applications as the backbone.
# Using the imagenet weights, I do a transfer learning using a new head that detects the keypoints for the Echo images. There are 42 points to detect.
IMAGE_SIZE = 112
backbone = MobileNetV2(
input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), weights="imagenet", include_top=False
)
backbone.trainable = False
MODEL_NAME = "LV_Cavity_Volume_Trace"
# InputLayer
inputs = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name="InputLayer")
# Preprocess Input
x = mobilenet_v2.preprocess_input(inputs)
# MobileNetV2 Backbone
x = backbone(x)
# Regularization
x = Dropout(0.3, name="DropOut")(x)
# Separable Convolutional Operation
x = SeparableConv2D(
NUM_KEYPOINTS,
kernel_size=3,
activation="relu",
data_format="channels_last",
name="ConvPass",
)(x)
# Outputs
outputs = SeparableConv2D(
NUM_KEYPOINTS,
kernel_size=2,
activation="sigmoid",
data_format="channels_last",
name="OutputLayer",
)(x)
# Model
model_1 = Model(inputs, outputs, name=MODEL_NAME)
model_1.summary()
# Load the validation images
val_images, val_keypoints, val_ids = LoadData(OUTPUT_DIR, type="VAL")
pp.pprint(val_images.shape)
pp.pprint(val_keypoints.shape)
val_keypoints_conv = val_keypoints.astype("float32")
# Callbacks
from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback
# Optimizer
from tensorflow.keras.optimizers import Adam
class ShowProgress(Callback):
def on_epoch_end(self, epoch, logs=None):
if epoch % 20 == 0:
plt.subplots(1, 4, figsize=(10, 10))
for i, k in enumerate(np.random.randint(num_total, size=2)):
img = train_images[k]
img = img.reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 3)
pred_kps = self.model.predict(img)
pred_kps = pred_kps.reshape(-1, NUM_KEYPOINTS) * IMAGE_SIZE
kps = train_keypoints_conv[k].reshape(-1, NUM_KEYPOINTS) * IMAGE_SIZE
plt.subplot(1, 4, 2 * i + 1)
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
VisualizeSampleImages(img[0], pred_kps, col="#16a085")
plt.xlabel(f"Predicted")
plt.subplot(1, 4, 2 * i + 2)
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
VisualizeSampleImages(img[0], kps)
plt.xlabel(f"GT:{train_ids[k]}")
plt.show()
# Train
WEIGHT_DIR = Path("/kaggle/working/Weights")
WEIGHT_DIR.mkdir(parents=True, exist_ok=True)
EPOCHS = 200
# Compile
model_1.compile(
loss="mae", optimizer=Adam(learning_rate=1e-4)
) # Lower the Learning Rate better the results.
checkpoint_path = str(WEIGHT_DIR) + MODEL_NAME + "-{epoch:04d}.ckpt"
# Model Training
callbacks = [
# EarlyStopping(patience=7, restore_best_weights=True), # keep the patience low.
ModelCheckpoint(checkpoint_path, save_best_only=True, save_weights_only=True),
ShowProgress(),
]
history = model_1.fit(
train_images,
train_keypoints_conv,
validation_data=(val_images, val_keypoints_conv),
epochs=EPOCHS,
callbacks=callbacks,
)
# Visualize Loss Curve
lc = pd.DataFrame(history.history)
lc.plot(figsize=(10, 8))
plt.title("Learning Curve", fontsize=25)
plt.grid()
plt.legend(fontsize=12)
plt.show()
# Observing the model outputs, it looks like it captures the left ventricle area very well. Also the scaling of the volume tracing points is nicely captured. The lines are parallel as expected. However, the long axis line are not always perpendicular to the parallel lines, as seen in some of the cases.
# Also it does not capture the deformations in the left ventricle. The model needs a convolution layer that can capture deformations of the left ventricle across the images
# ## Evaluation on Test Data
from tensorflow.train import latest_checkpoint
latest = latest_checkpoint("/kaggle/working")
latest
test_images, test_keypoints, test_ids = LoadData(OUTPUT_DIR, type="TEST")
pp.pprint(test_images.shape)
pp.pprint(test_keypoints.shape)
test_keypoints_conv = test_keypoints.astype("float32")
def evaluate_model(model, data_images, data_keypoints):
loss = model.evaluate(data_images, data_keypoints, verbose=2)
return loss
model_2 = Model(inputs, outputs, name=MODEL_NAME)
model_2.compile(loss="mae", optimizer=Adam(learning_rate=1e-4))
model_2.load_weights(latest)
print(
f"Loss for training images : {evaluate_model(model_2, train_images, train_keypoints_conv)}"
)
print(
f"Loss for validation images : {evaluate_model(model_2, val_images, val_keypoints_conv)}"
)
print(
f"Loss for testing images : {evaluate_model(model_2, test_images, test_keypoints_conv)}"
)
test_total = test_images.shape[0]
plt.subplots(1, 4, figsize=(10, 10))
for i, k in enumerate(np.random.randint(test_total, size=2)):
img = test_images[k]
img = img.reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 3)
pred_kps = model_2.predict(img)
pred_kps = pred_kps.reshape(-1, NUM_KEYPOINTS) * IMAGE_SIZE
kps = test_keypoints[k].reshape(-1, NUM_KEYPOINTS) * IMAGE_SIZE
plt.subplot(1, 4, 2 * i + 1)
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
VisualizeSampleImages(img[0], pred_kps, col="#16a085")
plt.xlabel(f"Predicted")
plt.subplot(1, 4, 2 * i + 2)
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
VisualizeSampleImages(img[0], kps)
plt.xlabel(f"GT:{train_ids[k]}")
# ## Calculate the Ejection Fraction using the Volume Tracings
# Add some utility functions for the calculation
import math
def calculate_disk_area(x1, y1, x2, y2):
dist = np.linalg.norm(np.array((x1, y1)) - np.array((x2, y2)))
r = dist / 2
area = np.pi * r * r
return area
def calculate_volume(keypoints):
"""
keypoints: shape is [1, NUM_KEYPOINTS]
"""
## first 4 is the long axis points
x1, y1, x2, y2 = keypoints[0][0], keypoints[0][1], keypoints[0][2], keypoints[0][3]
distance = np.linalg.norm(np.array((x1, y1)) - np.array((x2, y2)))
height_of_disk = distance / 20
accumalated_areas = []
for i in range(4, NUM_KEYPOINTS, 4):
accumalated_areas.append(
calculate_disk_area(
keypoints[0][i],
keypoints[0][i + 1],
keypoints[0][i + 2],
keypoints[0][i + 3],
)
)
xa, ya, xb, yb = keypoints[0][4], keypoints[0][5], keypoints[0][6], keypoints[0][7]
xc, yc, xd, yd = (
keypoints[0][8],
keypoints[0][9],
keypoints[0][10],
keypoints[0][11],
)
## Calculate the distance between the 2 adjacent parallel lines. This will be alternate height of
## the disk
m = (yb - ya) / (xb - xa)
c1 = yb - m * xb
c2 = yd - m * xd
alt_height_of_disk = abs(c1 - c2) / math.sqrt(1 + m * m)
volume = sum(accumalated_areas) * height_of_disk
return volume
def calculate_EF(ED_keypoints, ES_keypoints):
"""
ED_keypoints: shape [1, NUM_KEYPOINTS]
ES_keypoints: shape [1, NUM_KEYPOINTS]
"""
ED_volume = calculate_volume(ED_keypoints)
ES_volume = calculate_volume(ES_keypoints)
EF = ((ED_volume - ES_volume) / ED_volume) * 100
return EF
def calculate_EFs(data_keypoints):
"""
data_keypoints: shape [None, 1, 1, NUM_KEYPOINTS]
"""
total = data_keypoints.shape[0]
data_EFs = []
for i in range(0, total, 2):
ED_kps = data_keypoints[i].reshape(-1, NUM_KEYPOINTS) * IMAGE_SIZE
ES_kps = data_keypoints[i + 1].reshape(-1, NUM_KEYPOINTS) * IMAGE_SIZE
EF = calculate_EF(ED_kps, ES_kps)
data_EFs.append(EF)
return data_EFs
def build_dataframe_EFs(calculated_kps, predicted_kps):
"""
calculated_kps: shape [None, 1, 1, NUM_KEYPOINTS]
predicted_kps: shape [None, 1, 1, NUM_KEYPOINTS]
"""
cal_efs = calculate_EFs(calculated_kps)
pred_efs = calculate_EFs(predicted_kps)
d = {"Actual_EF": cal_efs, "Pred_EF": pred_efs}
df = pd.DataFrame(data=d)
act_lvef_class = []
for i in df.Actual_EF:
if i >= 50:
act_lvef_class.append("Normal")
elif i > 40:
act_lvef_class.append("Mild")
else:
act_lvef_class.append("Abnormal")
act_lvef_class = pd.Series(act_lvef_class, name="Actual_HFClass")
act_lvef_class = act_lvef_class.astype("category")
act_lvef_class = act_lvef_class.cat.set_categories(
["Normal", "Mild", "Abnormal"], ordered=True
)
df["Actual_HFClass"] = act_lvef_class
pred_lvef_class = []
for i in df.Pred_EF:
if i >= 50:
pred_lvef_class.append("Normal")
elif i > 40:
pred_lvef_class.append("Mild")
else:
pred_lvef_class.append("Abnormal")
pred_lvef_class = pd.Series(pred_lvef_class, name="Actual_HFClass")
pred_lvef_class = pred_lvef_class.astype("category")
pred_lvef_class = pred_lvef_class.cat.set_categories(
["Normal", "Mild", "Abnormal"], ordered=True
)
df["Pred_HFClass"] = pred_lvef_class
df["Diff_EFs"] = np.abs(df.Actual_EF - df.Pred_EF)
return df
def get_predicted_points(data_images, model):
"""
data_images: shape [None, 112, 112, 3]
"""
data_kps = model.predict(data_images)
return data_kps
# Get the predicted keypoints for training, validation and test images
predicted_train_kps = get_predicted_points(train_images, model_2)
predicted_val_kps = get_predicted_points(val_images, model_2)
predicted_test_kps = get_predicted_points(test_images, model_2)
# Get the dataframe of the actual and predicted efs. Note that I am not taking the EF values from the original data, but calculated it from the original volume tracings. This will be compared to the EF values from the predicted volume tracings.
training_output_df = build_dataframe_EFs(train_keypoints_conv, predicted_train_kps)
val_output_df = build_dataframe_EFs(val_keypoints_conv, predicted_val_kps)
test_output_df = build_dataframe_EFs(test_keypoints_conv, predicted_test_kps)
# I calculate the EF values for the training, validation, and test images and build a dataframe, which is visualized below.
training_output_df.head()
val_output_df.head()
test_output_df.head()
# I see some negative EF values. Looking at 1 case with negative value, it looks like the long axis is not correct.
def VisualizeSingleData(data_images, data_keypoints, pred_keypoints, data_ids, index):
print(f"Data id {data_ids[2*index]}")
plt.subplots(1, 4, figsize=(12, 12))
img = data_images[2 * index]
pred_kps = pred_keypoints[2 * index].reshape(-1, NUM_KEYPOINTS) * IMAGE_SIZE
kps = data_keypoints[2 * index].reshape(-1, NUM_KEYPOINTS) * IMAGE_SIZE
plt.subplot(1, 4, 1)
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
VisualizeSampleImages(img, pred_kps, col="#16a085")
plt.xlabel(f"Predicted")
plt.subplot(1, 4, 2)
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
VisualizeSampleImages(img, kps)
img = data_images[2 * index + 1]
pred_kps = pred_keypoints[2 * index + 1].reshape(-1, NUM_KEYPOINTS) * IMAGE_SIZE
kps = data_keypoints[2 * index + 1].reshape(-1, NUM_KEYPOINTS) * IMAGE_SIZE
plt.xlabel(f"GT:{data_ids[2*index+1]}")
plt.subplot(1, 4, 3)
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
VisualizeSampleImages(img, pred_kps, col="#16a085")
plt.xlabel(f"Predicted")
plt.subplot(1, 4, 4)
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
VisualizeSampleImages(img, kps)
plt.xlabel(f"GT:{data_ids[2*index+1]}")
error_list = training_output_df.query("Actual_EF < 0").index.tolist()
## Errors in training data
for i, err in enumerate(error_list):
VisualizeSingleData(
train_images, train_keypoints_conv, predicted_train_kps, train_ids, err
)
plt.show()
error_list_val = val_output_df.query("Actual_EF < 0").index.tolist()
## Errors in validation data
for err in error_list_val:
VisualizeSingleData(val_images, val_keypoints_conv, predicted_val_kps, val_ids, err)
plt.show()
error_list_test = test_output_df.query("Actual_EF < 0").index.tolist()
## Errors in validation data
for err in error_list_test:
VisualizeSingleData(
test_images, test_keypoints_conv, predicted_test_kps, test_ids, err
)
plt.show()
error_list = training_output_df.query("Pred_EF < 0").index.tolist()
print(len(error_list))
## Errors in Training data
for i, err in enumerate(error_list):
print(training_output_df.Pred_EF[err])
VisualizeSingleData(
train_images, train_keypoints_conv, predicted_train_kps, train_ids, err
)
plt.show()
if i == 6:
break
# ## Error Analysis
from sklearn.metrics import accuracy_score
def Accuracy_ConfusionMatrix(actual, predicted, categories):
print(f"Accuracy of model: {accuracy_score(actual, predicted)}")
confusion_matrix = pd.crosstab(
actual, predicted, rownames=["Actual"], colnames=["Predicted"]
)
print(confusion_matrix)
print("Sensitivity of model for individual classes")
class_sum = np.sum(confusion_matrix, axis=1)
for c, i in enumerate(categories):
print(f"Class {i} : {confusion_matrix.iloc[c][c]/class_sum[c]}")
# Confusion Matrix for Training Data
print("Confusion Matrix for Training Data")
Accuracy_ConfusionMatrix(
training_output_df.Actual_HFClass,
training_output_df.Pred_HFClass,
training_output_df.Actual_HFClass.cat.categories,
)
# Confusion Matrix for Validation Data
print("Confusion Matrix for Validation Data")
Accuracy_ConfusionMatrix(
val_output_df.Actual_HFClass,
val_output_df.Pred_HFClass,
val_output_df.Actual_HFClass.cat.categories,
)
# Confusion Matrix for Testing Data
print("Confusion Matrix for Testing Data")
Accuracy_ConfusionMatrix(
test_output_df.Actual_HFClass,
test_output_df.Pred_HFClass,
test_output_df.Actual_HFClass.cat.categories,
)
# From the above values, it looks like the model performs worse on the mild class - ie EFs that are between 40 and 50.
VisualizeSingleData(
train_images, train_keypoints_conv, predicted_train_kps, train_ids, 531
)
training_output_df.query("Diff_EFs > 100")
training_output_df.boxplot(column="Diff_EFs", by="Actual_HFClass", showfliers=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from keras.models import Sequential
from keras.layers import Dense, Flatten, Convolution2D, MaxPooling2D, Dropout
data = pd.read_csv("../input/tmnist-alphabet-94-characters/94_character_TMNIST.csv")
data.head()
print(data["labels"].unique())
print(len(data["labels"].unique()))
print(data["names"].unique())
print(len(data["names"].unique()))
y = data["labels"]
X = data.drop(["labels", "names"], axis=1)
print(plt.imshow(X.values[22].reshape(28, 28), cmap=plt.get_cmap("Spectral")))
print(y[22])
# plotting some of the values and corresponding labels as title
plt.figure(figsize=(20, 10))
for i in range(50):
plt.subplot(5, 10, i + 1)
plt.title(y.iloc[i])
plt.imshow(X.values[i].reshape(28, 28), cmap=plt.get_cmap("Spectral"))
plt.show()
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=42, stratify=y
)
X_train.head()
y_train.head()
enc = OneHotEncoder(sparse=False, handle_unknown="ignore")
y_train = enc.fit_transform(y_train.values.reshape(-1, 1))
y_test = enc.fit_transform(y_test.values.reshape(-1, 1))
y_train, y_test
X_train = X_train.values.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.values.reshape(X_test.shape[0], 28, 28, 1)
X_train = X_train.astype("float32") / 255.0
X_test = X_test.astype("float32") / 255.0
print("X_train shape:", X_train.shape)
print(X_train.shape[0], "train samples")
print(X_test.shape[0], "test samples")
# Convolutional model
model = Sequential()
# convolutional layer 1
model.add(
Convolution2D(
64, (5, 5), padding="same", activation="relu", input_shape=(28, 28, 1)
)
)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same"))
model.add(Dropout(0.25))
# convolutional layer 2
model.add(Convolution2D(32, (3, 3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same"))
model.add(Dropout(0.25))
# convolutional layer 3
model.add(Convolution2D(32, (3, 3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same"))
model.add(Dropout(0.25))
model.add(Flatten())
# fully connected layer 1
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.25))
# fully connected layer 2
no_of_classes = data["labels"].nunique()
model.add(Dense(no_of_classes, activation="softmax"))
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
model.summary()
# Train
history = model.fit(
X_train,
y_train,
validation_data=(X_test, y_test),
epochs=20,
batch_size=128,
shuffle=True,
verbose=1,
)
# Evaluate
evaluation = model.evaluate(X_test, y_test, batch_size=128, verbose=1)
print(
"Summary: Loss over the test dataset: %.2f, Accuracy: %.4f"
% (evaluation[0], evaluation[1])
)
plt.figure(figsize=(14, 6))
plt.subplot(1, 2, 1)
epochs = np.arange(20)
plt.title("Accuracy vs Epochs")
plt.plot(epochs, history.history["accuracy"], label="train", color="#851D2D")
plt.ylabel("Accuracy")
plt.plot(epochs, history.history["val_accuracy"], label="test", color="#306844")
plt.legend()
plt.subplot(1, 2, 2)
plt.title("Loss vs Epochs")
plt.plot(epochs, history.history["loss"], label="train", color="#851D2D")
plt.ylabel("Loss")
plt.plot(epochs, history.history["val_loss"], label="test", color="#306844")
plt.legend()
plt.show()
|
# ## Carregamento de bibliotecas
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
# ## Carregamento e preparação de dados
df = pd.read_csv("/kaggle/input/hospital-matlab/hospital.csv")
df = df[["Sex", "Age", "Weight", "Smoker", "BloodPressure_1"]]
df.head()
# Prepara dados para o modelo de Regressão Linear.
# Transforma dados categóricos das colunas "Sex" e "Smoker" em inteiros.
X = df[["Sex", "Age", "Weight", "Smoker"]]
X = pd.get_dummies(X, drop_first=True, columns=["Sex", "Smoker"])
y = df["BloodPressure_1"]
# ## Algoritmo e resultados
# Utiliza o modelo de Regressão Linear com validação cruzada de 5 pastas e gera os resultados.
model = LinearRegression()
correlations = []
mean_squared_errors = []
print("Pasta | Correlação | MSE")
for i in range(5):
start = i * 20
end = (i + 1) * 20
X_test = X.iloc[start:end]
y_test = y.iloc[start:end]
X_train = pd.concat([X.iloc[:start], X.iloc[end:]])
y_train = pd.concat([y.iloc[:start], y.iloc[end:]])
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
correlation = np.corrcoef(y_test, y_pred)[0, 1]
mse = mean_squared_error(y_test, y_pred)
print(f" {i + 1} | {correlation:.4f} | {mse:.4f}")
correlations.append(correlation)
mean_squared_errors.append(mse)
# Apresenta a média final do coeficiente de Pearson e erro quadrático médio.
print("Média total")
print(f"Coeficiente de Pearson: {np.mean(correlations):.4f}")
print(f"Erro Quadrático Médio (MSE): {np.mean(mean_squared_errors):.4f}")
|
import torch
import torch.nn as nn
# # **Hyperparameters Initialization**
d_model = 512 # Dimension of the Word Vectors
num_heads = 8 # No of attention matrices that will be created
drop_prob = 0.1 # Dropout neurons of the layers
# For Proper Gradient Descenting (Lower Batch size meaning Stochastic Graident Descent (batch_size = 1) will lead to improper convergence to the global minimum)
# Batch_size = 30 (Mini Batch Gradient Descent) means model will be trained based on 30 input words/sentences and will do all the forward pass and perform the backward propagation
batch_size = 30
max_sequence = 200 # Maximum Size of the Word Vector
ffn_hidden = (
2048 # At the time of the Feed Forward stage the number of neurons will be expanded
)
num_layers = 5 # Number of Encoder Layers in the Transformer Model
# # **Single Head Attention**
# # **Multihead Attention Mechanism**
# # **Transformer Model (Encoder + Decoder) along with the Attention Mechanism Architecture**
# ]
# # **Creating the Encoder Layer Architecture using Various Mechanisms Involved in the Above Diagram**
class EncoderLayer(nn.Module):
def __int__self(self, d_model, ffn_hidden, num_heads, drop_prob):
super(EncoderLayer, self).__init__()
self.attention = MultiHeadAttention(
d_model=d_model, num_heads=num_heads
) # Multiheaded Attention Layer --> Attention Matrices
self.norm1 = LayerNormalization(
parameters_shape=[d_model]
) # Layer Normalization which gives normalized values by considering trainable parameters
self.dropout1 = nn.Dropout(
p=drop_prob
) # Dropput neurons from the network (No overfitting of input data)
self.fnn = PositionWiseFeedForward(
d_model=d_model, hidden=ffn_hidden, drop_prob=drop_prob
) # Simple feed forward Neural network
self.norm2 = LayerNormalization(
parameters_shape=[d_model]
) # Layer Normalization which gives normalized values by considering trainable parameters
self.dropout2 = nn.Dropout(
p=drop_prob
) # Dropput neurons from the network (No overfitting of input data)
def forward(self, x):
residual_x = x
x = self.attention(x, mask=None)
x = self.dropout1(x)
x = self.norm1(
x + residual_x
) # Passing the Original Encoded and Embedded data along with the output of the previous layer
residual_x = x
x = self.ffn(x)
x = self.dropout2(x)
x = self.norm2(
x + residual_x
) # Passing the Original Encoded and Embedded data along with the output of the previous layer
return x
# # **Base Encoder Class - Involves Sequential Execution of the Encoder Layers (Number of Encoder Layers are defined above)**
# Base Encoder Class
class Encoder(nn.Module):
def __init__(
self,
d_model,
num_heads,
drop_prob,
batch_size,
max_sequence,
ffn_hidden,
num_layers,
):
super().__init__(Encoder, self)
self.layers = nn.Sequential(
*[
EncoderLayer(d_model, ffn_hidden, num_heads, drop_prob)
for _ in range(num_layers)
]
)
def forward(self, x):
x = self.layers(x)
return x
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import torch
import torchvision as tv
import time
BATCH_SIZE = 256
train_dataset = tv.datasets.FashionMNIST(
".", train=True, transform=tv.transforms.ToTensor(), download=True
)
test_dataset = tv.datasets.FashionMNIST(
".", train=False, transform=tv.transforms.ToTensor(), download=True
)
train_iter = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE)
test_iter = torch.utils.data.DataLoader(test_dataset, batch_size=BATCH_SIZE)
plt.imshow(train_dataset[0][0].numpy().reshape(28, 28), cmap="gray")
model = torch.nn.Sequential(
torch.nn.Conv2d(1, 8, kernel_size=3, padding=2),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, stride=2),
torch.nn.Conv2d(8, 16, kernel_size=3, padding=2),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, stride=2),
torch.nn.Conv2d(16, 32, kernel_size=3),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, stride=2),
torch.nn.Flatten(),
torch.nn.Linear(288, 60),
torch.nn.ReLU(),
torch.nn.Linear(60, 10),
)
X = train_dataset[0][0]
X = X.reshape(1, 1, 28, 28)
print(X.shape)
for l in model:
X = l(X)
print("Layer {}. X shape: {}".format(l, X.shape))
X = train_dataset[0][0]
X = X.reshape(1, 1, 28, 28)
y = model(X)
print(y.shape)
loss = torch.nn.CrossEntropyLoss(reduction="sum")
trainer = torch.optim.Adam(model.parameters(), lr=0.005)
num_epochs = 15
for ep in range(num_epochs):
start, train_iters, train_passed = time.time(), 0, 0
train_loss, train_acc = 0.0, 0.0
for X, y in train_iter:
trainer.zero_grad()
y_pred = model(X)
l = loss(y_pred, y)
l.backward()
trainer.step()
train_loss += l.item()
train_acc += (y_pred.argmax(dim=1) == y).sum().item()
train_iters += 1
train_passed += y.shape[0]
test_iters, test_passed = 0, 0
test_loss, test_acc = 0.0, 0.0
for X, y in test_iter:
y_pred = model(X)
l = loss(y_pred, y)
test_loss += l.item()
test_acc += (y_pred.argmax(dim=1) == y).sum().item()
test_iters += 1
test_passed += y.shape[0]
print(
"""ep: {}, taked: {:.3f}
train_loss: {:.3f}, train_acc: {:.3f},
test_loss: {:.3f}, test_acc: {:.3f}
""".format(
ep,
time.time() - start,
train_loss / train_iters,
train_acc / train_passed,
test_loss / test_iters,
test_acc / test_passed,
)
)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
train = pd.read_csv("/kaggle/input/aqueous-solubility-predictioin/train.csv")
test = pd.read_csv("/kaggle/input/aqueous-solubility-predictioin/test.csv")
import pandas as pd
import ast
import re
def extract_sympol(string):
s = []
append = False
for i in string:
if i == "/" and append:
append = False
break
if i == "/" and (not append):
append = True
continue
if append:
s.append(i)
return "".join(s)
# Define a function to count the elements in a chemical formula string
def count_elements(formula):
elements = {}
for element in re.findall(r"[A-Z][a-z]*\d*", formula):
name = re.match(r"[A-Za-z]+", element).group()
count = (
int(re.search(r"\d+$", element).group())
if re.search(r"\d+$", element)
else 1
)
elements[name] = elements.get(name, 0) + count
return elements
# Apply the function to the InChI column and save the result to a new column
d = (
train["InChI"]
.apply(extract_sympol)
.apply(count_elements)
.apply(str)
.apply(ast.literal_eval)
)
# Convert the string representations of dictionaries to dictionaries
# train['InChI Elements'].apply(ast.literal_eval)
t = (
test["InChI"]
.apply(extract_sympol)
.apply(count_elements)
.apply(str)
.apply(ast.literal_eval)
)
test = pd.concat([test, pd.DataFrame(t.to_list())], axis=1)
# def count_elements(string):
# if string == "O.Pd":
# return "A&A"
# dic = {}
# num = 1
# length = len(string)
# for i in range(length):
# if(string[i] == '.'):
# continue
# if(string[i].islower()):
# continue
# if i == length - 1:
# break
# if string[i].isalpha():
# if string[i + 1].isalpha():
# dic[string[i]] = 1
# continue
# if(i != length- 1):
# temp = []
# while ((i + num <= length -1) and (string[i + num].isnumeric())):
# temp.append(string[i + num])
# num += 1
# num = 1
# if(temp == []):
# temp = ["1"]
# dic[string[i]] = int ("".join(temp))
# if string[length - 1].isalpha():
# dic[string[length - 1]] = 1
# return dic
# import ast
# d = train["InChI"].apply(f).apply(count_elements).apply(ast.literal_eval)
train = pd.concat([train, pd.DataFrame(d.to_list())], axis=1)
train.info()
features_todrop = ["SD", "Ocurrences", "Group"]
label = train["Solubility"]
features = train.drop(columns=features_todrop)
features = features.drop(columns="Solubility")
num_features = features.select_dtypes(["float64", "int64"])
from sklearn.model_selection import train_test_split
features_train, features_valid, labels_train, labels_valid = train_test_split(
num_features, label, test_size=0.2, random_state=7
)
from xgboost import XGBRegressor
xgb_model = XGBRegressor(n_estimators=300)
xgb_model.fit(features_train, labels_train)
from sklearn.metrics import mean_squared_error
mean_squared_error(labels_valid, xgb_model.predict(features_valid))
test["Solubility"] = xgb_model.predict(test.select_dtypes(["float64", "int64"]))
test[["comp_id", "Solubility"]].to_csv("/kaggle/working/submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing libraries and reading CSV Files
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
train = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv")
test = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv")
sample = pd.read_csv("/kaggle/input/spaceship-titanic/sample_submission.csv")
# # Data Exploration and Visualization
# * Checking datatypes
# * Unique values
# * Null values
# * Plotting a correlation heatmap and
# * Checking the distribution of the target variable
train.head()
train.dtypes
train.nunique().sum
train.isna().sum()
# Visualizing the datatypes in the train dataset
train.dtypes.value_counts().plot(kind="pie")
# Correlation heatmap
corr = train.corr()
sns.heatmap(corr, cmap="YlOrBr", annot=True)
plt.show()
# Variable with highest correlation
corr["Transported"].sort_values(ascending=False)
# Distribution of target variable in training data
sns.countplot(x="Transported", hue="Transported", data=train)
# # Data cleaning
# * Splitting Cabin and PassengerId
# * Finding numerical and categorical columns
# * Imputing numerical columns using SimpleImputer
# * Dummy encoding nominal columns
# * Label encoding ordinal columns
# Splitting Cabin and PassengerId using str.split()
train[["Deck", "Num", "Side"]] = train["Cabin"].str.split("/", expand=True)
test[["Deck", "Num", "Side"]] = test["Cabin"].str.split("/", expand=True)
train["Num"] = train["Num"].astype(float)
test["Num"] = test["Num"].astype(float)
train[["PassengerGGG", "PassengerPP"]] = train["PassengerId"].str.split(
"_", expand=True
)
test[["PassengerGGG", "PassengerPP"]] = test["PassengerId"].str.split("_", expand=True)
train["PassengerGGG"] = train["PassengerGGG"].astype(float)
train["PassengerPP"] = train["PassengerPP"].astype(float)
test["PassengerGGG"] = test["PassengerGGG"].astype(float)
test["PassengerPP"] = test["PassengerPP"].astype(float)
train = train.drop(["Cabin", "PassengerId", "Name"], axis=1)
test = test.drop(["Cabin", "PassengerId", "Name"], axis=1)
# Find categorical and numerical columns
# ohe_cat_cols = columns that need to be OneHotEncoded as they are nominal (Dont have order)
# le_cat_cols = columns that need to LabelEncoded as they are ordinal
ohe_cat_cols = [
cname
for cname in train.columns
if (train[cname].nunique() < 5) and (train[cname].dtype == "object")
]
le_cat_cols = [
cname
for cname in train.columns
if (train[cname].nunique() > 5) and (train[cname].dtype == "object")
]
num_cols = [
cname for cname in train.columns if train[cname].dtype in ["float64", "int64"]
]
ohe_cat_cols
le_cat_cols
num_cols
# Using SimpleImputer() for numerical columns with strategy "mean"
from sklearn.impute import SimpleImputer
si = SimpleImputer(strategy="mean")
train[num_cols] = si.fit_transform(train[num_cols])
test[num_cols] = si.transform(test[num_cols])
# LabelEncoding Ordinal values
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
for col in le_cat_cols:
train[col] = le.fit_transform(train[col])
test[col] = le.transform(test[col])
# Dummy encoding Nominal values
train = pd.get_dummies(train, columns=ohe_cat_cols)
test = pd.get_dummies(test, columns=ohe_cat_cols)
# # Building model and evaluation
# Model is XGBClassifier().
# Data split using StratifiedKFold().
# Model is scored using Roc_Auc_Score()
from xgboost import XGBClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
y = train["Transported"]
X = train.drop(["Transported"], axis=1)
kfold = 10
xgb_pred = []
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1441)
for i, (train_idx, test_idx) in enumerate(skf.split(X, y)):
print("Fold %d/%d" % (i + 1, kfold))
X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]
y_train, y_test = y.iloc[train_idx], y.iloc[test_idx]
watchlist = [(X_train, y_train), (X_test, y_test)]
model = XGBClassifier(n_estimators=1000, max_depth=4, n_jobs=2)
model.fit(X_train, y_train, eval_set=watchlist, verbose=False)
pred_probs = model.predict_proba(X_test)[:, 1]
test_score = roc_auc_score(y_test, pred_probs)
print("Roc auc score for fold %d = %f" % (i + 1, test_score))
preds = model.predict_proba(test)[:, 1]
xgb_pred.append(preds)
print("End of loop")
xgb_pred = np.mean(np.vstack(xgb_pred), axis=0)
sub_preds = np.vstack(xgb_pred)
# The sub_pred value is 2D
sub_preds.shape
# Assigning it to a new 1D array to convert it from 2D to 1D
sub_pred = sub_preds[:, 0]
# # Submission
# Submission
submission = pd.DataFrame(
{"PassengerId": sample["PassengerId"], "Transported": sub_pred}
)
submission.to_csv("Submission.csv", index=False)
|
# # K-Nearest Neighbor(KNN) Algorithm
# * K-Nearest Neighbour is one of the simplest Machine Learning algorithms based on Supervised Learning technique.
# * K-NN algorithm assumes the similarity between the new case/data and available cases and put the new case into the category that is most similar to the available categories.
# * K-NN algorithm stores all the available data and classifies a new data point based on the similarity. This means when new data appears then it can be easily classified into a well suite category by using K- NN algorithm.
# * K-NN algorithm can be used for Regression as well as for Classification but mostly it is used for the Classification problems.
# * K-NN is a non-parametric algorithm, which means it does not make any assumption on underlying data.
# * It is also called a lazy learner algorithm because it does not learn from the training set immediately instead it stores the dataset and at the time of classification, it performs an action on the dataset.
# * KNN algorithm at the training phase just stores the dataset and when it gets new data, then it classifies that data into a category that is much similar to the new data.
# 
# **Example:** Suppose, we have an image of a creature that looks similar to cat and dog, but we want to know either it is a cat or dog. So for this identification, we can use the KNN algorithm, as it works on a similarity measure. Our KNN model will find the similar features of the new data set to the cats and dogs images and based on the most similar features it will put it in either cat or dog category.
# # Why do we need a K-NN Algorithm?
# Suppose there are two categories, i.e., Category A and Category B, and we have a new data point x1, so this data point will lie in which of these categories. To solve this type of problem, we need a K-NN algorithm. With the help of K-NN, we can easily identify the category or class of a particular dataset. Consider the below diagram:
# 
# # How does K-NN work?
# The K-NN working can be explained on the basis of the below algorithm:
# > Step-1: Select the number K of the neighbors
# > Step-2: Calculate the Euclidean distance of K number of neighbors
# > Step-3: Take the K nearest neighbors as per the calculated Euclidean distance.
# > Step-4: Among these k neighbors, count the number of the data points in each category.
# > Step-5: Assign the new data points to that category for which the number of the neighbor is maximum.
# > Step-6: Our model is ready.
# # Diabetes Prediction
# **What is Diabetes?**
# Diabetes is a chronic disease that occurs when the pancreas is no longer able to make insulin, or when the body cannot make good use of the insulin it produces. Learning how to use Machine Learning can help us predict Diabetes. Let’s get started!
# **About this project**
# The dataset is part of the large dataset held at the National Institutes of Diabetes-Digestive-Kidney Diseases in the USA. Data used for diabetes research on Pima Indian women aged 21 and over living in Phoenix, the 5th largest city of the State of Arizona in the USA. It consists of 768 observations and 8 numerical independent variables. The target variable is specified as "outcome"; 1 indicates positive diabetes test result, 0 indicates negative.
# **About the Dataset**
# > **Pregnancies** :Number of times a woman has been pregnant
# >
# > **Glucose** :Plasma Glucose concentration of 2 hours in an oral glucose tolerance test
# >
# > **BloodPressure** :Diastollic Blood Pressure (mm hg)
# >
# > **SkinThickness** :Triceps skin fold thickness(mm)
# >
# > **Insulin** :2 hour serum insulin(mu U/ml)
# >
# > **BMI** :Body Mass Index ((weight in kg/height in m)^2)
# >
# > **Age** :Age(years)
# >
# > **DiabetesPedigreeFunction** :Scores likelihood of diabetes based on family history)
# >
# > **Outcome**:0(doesn't have diabetes) or 1 (has diabetes)
# **Table of Contents**
# 1. Exploratory Data Analysis
# 2. Data Preprocessing & Feature Engineering
# 3. Modeling & Prediction
# 4. Model Evaluation
# 5. Hyperparameter Optimization
# 6. Final Model
# > # **We used the Logistic Regression Model with the same data set. I have studied all the titles in detail there.**
# > # ❗️**Our main focus in this code block has been to examine the KNN model.**
# > # **👉** [**You can reach detailed code blocks from this link.**](https://www.kaggle.com/code/noktameva/what-is-logistic-regression-diabetes-prediction/edit)
# # 1. Exploratory Data Analysis
import pandas as pd
from sklearn.metrics import classification_report, roc_auc_score
from sklearn.model_selection import GridSearchCV, cross_validate
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
pd.set_option("display.max_columns", None)
pd.set_option("display.width", 500)
df = pd.read_csv("/kaggle/input/diabetes/diabetes.csv")
df.head()
df.shape
df.describe().T
df["Outcome"].value_counts()
# # 2. Data Preprocessing & Feature Engineering
y = df["Outcome"]
X = df.drop(["Outcome"], axis=1)
# **♛Standardization Process**
X_scaled = StandardScaler().fit_transform(X)
X_scaled
# 🚫 The numpy array we have carries the information we want. The new column has no names. We need to add these.
X = pd.DataFrame(X_scaled, columns=X.columns)
X.head()
# # 3. Modeling & Prediction
knn_model = KNeighborsClassifier().fit(X, y)
# Let's pick any random user.
random_user = X.sample(1, random_state=45)
random_user
knn_model.predict(random_user)
# # 4. Model Evaluation
# 📌**y_pred for confusion matrix**
y_pred = knn_model.predict(X)
y_pred
# 📌**y_probe for AUC**
y_prob = knn_model.predict_proba(X)[:, 1]
y_prob
# The **[:, 1]** expression selects the second column of this matrix, which contains the probabilities of the instances being **labeled 1** and creates a NumPy array of these probabilities.
# **Therefore, the y_prob array contains the probabilities of each instance in the X dataset being labeled 1.**
print(classification_report(y, y_pred))
# Calculates according to **class 1** and **class 0**
# AUC
roc_auc_score(y, y_prob)
# **✨** **Cross Validation Method**
# Cross validation is performed through the following steps:
# > **✣**The dataset is randomly partitioned into training and test sets.
# > **✣**The model is trained on the training set and its performance is measured on the test set.
# > **✣**Steps 1 and 2 are repeated using different parts of the dataset. This allows for the model's performance to be measured using different training and test sets and the results are averaged for better prediction.
# > **✣**The best model is selected and trained on the entire dataset.*
# Cross validation enables the objective measurement of the performance of a machine learning model and helps prevent overfitting. It also helps to determine the optimal parameters for training the model.
cv_results = cross_validate(
knn_model, X, y, cv=5, scoring=["accuracy", "f1", "roc_auc"]
)
cv_results
print(cv_results["test_accuracy"].mean())
print(cv_results["test_f1"].mean())
print(cv_results["test_roc_auc"].mean())
# **How can success scores be increased ?**🤹♀️
# * 1. Sample size can be increased
# * 2. Data preprocessing can be detailed
# * 3. Feature Engineering(New variables can be derived)
# * 4. Optimizations can be made for the relevant algorithm
# # 5. Hyperparameter Optimization
knn_model = KNeighborsClassifier()
knn_model.get_params()
# **🏋️♀️My goal is to change the number of neighborhoods to find the optimum number that should be.**
knn_params = {"n_neighbors": range(2, 50)}
knn_gs_best = GridSearchCV(knn_model, knn_params, cv=5, n_jobs=-1, verbose=1).fit(X, y)
knn_gs_best.best_params_
# Its **default value is 5**. Now let's update the optimum **value to 17** and look at the success of the model.
# # 6. Final Model
knn_final = knn_model.set_params(**knn_gs_best.best_params_).fit(X, y)
cv_results = cross_validate(
knn_final, X, y, cv=5, scoring=["accuracy", "f1", "roc_auc"]
)
print(cv_results["test_accuracy"].mean())
print(cv_results["test_f1"].mean())
print(cv_results["test_roc_auc"].mean())
|
from keras.datasets import reuters
from keras import layers, models
import numpy as np
from keras.utils.np_utils import to_categorical
(train_data, train_labels), (test_data, test_labels) = reuters.load_data(
num_words=10000
)
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.0
return results
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
model = models.Sequential()
model.add(layers.Dense(64, activation="relu", input_shape=(10000,)))
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(46, activation="softmax"))
model.compile(
optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"]
)
x_val = x_train[:1000]
partial_x_train = x_train[1000:]
y_val = train_labels[:1000]
partial_y_train = train_labels[1000:]
history = model.fit(
partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val),
)
import matplotlib.pyplot as plt
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
plt.clf()
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
model = models.Sequential()
model.add(layers.Dense(64, activation="relu", input_shape=(10000,)))
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(46, activation="softmax"))
model.compile(
optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"]
)
model.fit(
partial_x_train,
partial_y_train,
epochs=9,
batch_size=512,
validation_data=(x_val, y_val),
)
results = model.evaluate(x_test, test_labels)
print("Aymoon accuracy results: ", results[1] * 100)
predictions = model.predict(x_test)
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPRegressor
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import r2_score
dataset3 = "/kaggle/input/co2-emissions/CO2 Emissions.csv"
ds3 = pd.read_csv(dataset3)
ds3.head()
ds3.info()
print("")
list(ds3.columns)
print("")
print(ds3.isnull().sum())
# analysing our dataset
X = ds3[
[
"Engine Size(L)",
"Cylinders",
"Fuel Consumption City (L/100 km)",
"Fuel Consumption Hwy (L/100 km)",
"Fuel Consumption Comb (L/100 km)",
"Fuel Consumption Comb (mpg)",
]
]
y = ds3[["CO2 Emissions(g/km)"]]
# select int only columns
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8, shuffle=True)
|
import pandas as pd
d = {"Delhi": 235469, "Pune": 20132, "Mumbai": 45123602, "Banglore": 54236124}
s = pd.Series(d, name="population")
s
# values_index,dtype,is_unique,ndim,shape,size,name etc
s.values
s.index
s.dtype
s.is_unique
s.ndim
s.shape
s.name
# **Methods in Series**
data = pd.read_csv("/kaggle/input/email-spam-classification-dataset-csv/emails.csv")
data
s = pd.read_csv(
"/kaggle/input/email-spam-classification-dataset-csv/emails.csv",
usecols=["the"],
squeeze=True,
)
s
s.drop_duplicates()
s.count()
s.head() # returns top 5 results as default
s.head(7)
s.tail() # returns last 5 values as default
s.tail(1)
sorted(s, reverse=True)
x = pd.Series([True, False, True, True, False, False])
x.all() # it will return a true if all data points are true
y = x.replace(to_replace=False, value=True)
y
y.all() # this will return a true if one or more than one value is true
# sort_values with inplace parameter
import numpy as np
n = np.random.randint(10, 50, 4)
l = list(n)
l
s = pd.Series(l)
s
s.sort_values() # the function inplace=True, will make the change permanent
# **drop_duplicates() method**
a = pd.Series([23, 32, 32, 5, 32], index=["Anuj", "Joe", "Joe", "Ashima", "Joe"])
a
a.drop_duplicates()
# **handling null values in series**
b = pd.Series(
[23, 32, 34, None, 45], index=["Delhi", "Mumbai", "Pune", "Banglore", "Chennai"]
)
b
# find out null record
mask = b.isna()
mask
b[mask]
b.dropna() # to drop null value
b.fillna(20) # to replace null value
data = pd.read_csv("/kaggle/input/email-spam-classification-dataset-csv/emails.csv")
data
data.describe()
f = pd.read_csv(
"/kaggle/input/email-spam-classification-dataset-csv/emails.csv",
usecols=["jay"],
squeeze=True,
)
f.count()
mask = f.isna()
mask
f[mask]
# **in operator** in python checks whether a specified value is an element of a sequence is like string,array,list,tuple etc
"a" in "abc"
|
import numpy as np
import pandas as pd
import seaborn as sns
import warnings
import matplotlib
from matplotlib import pyplot as plt
from scipy.stats import skew
import statsmodels.api as sm
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
from scipy import stats
import statsmodels.stats.multitest as multi
pd.set_option("display.max_rows", 50)
pd.set_option("display.max_columns", 50)
warnings.filterwarnings("ignore")
# # Business Understanding
# Airbnb is a platform allowing house and apartment owner to rent their properties to guest for a short term stay. Due to the convenience and flexibility, it hasb been becoming one of the top choice for travelers. In this post, we will dive into Airbnb Seattle dataset and answer 3 business questions, mentioned below.
# The dataset is a collection of property listings, their key attributes such as the size of properties, availability of amenities, description of neighborhood, customers' reviews and much more.
# Q1. As a traveler, should you choose an airbnb unit hosted by superhost?
# Q2. As a investor who want to purchase a property and run Airbnb business in Seattle, what kind of properties you should look at?
# Q3. How's the development of Airbnb in Seattle? Do you observe any pattern of trend?
# # Data Understanding
df_calendar = pd.read_csv("/kaggle/input/seattle/calendar.csv")
df_listings = pd.read_csv("/kaggle/input/seattle/listings.csv")
df_reviews = pd.read_csv("/kaggle/input/seattle/reviews.csv")
# df_calendar = pd.read_csv("/content/calendar.csv")
# df_listings = pd.read_csv("/content/listings.csv")
# df_reviews = pd.read_csv("/content/reviews.csv")
for data in [df_calendar, df_listings, df_reviews]:
display(data.head())
print(data.shape)
# After checking the [official data assumption document](http://insideairbnb.com/data-assumptions/), I got more understanding of the dataset and have a sense of which features I should use to answer questions.
# The **calendar** data records the price, availability and other details from the listing's calendar for each day of the next 365 days. Since it decribes each host's housing plan in the future, I won't use this data in the analysis.
# In the **listings** data, I found that feature "_host_is_superhost_" is an indicator of superhost. I can use this column to explore answers for Q1.
# For Q2, I tried to locate features related to popularity or occupancy rate. In the [official data assumption document](http://insideairbnb.com/data-assumptions/), it mentioned that "**A Review Rate of 50% is used to convert reviews to estimated bookings**." Thus, I will convert _reviews_per_month_ to the number of bookings, as a proxy to the metric of popularity.
# # Data Preparation
# ## Data Wrangling
# Retrieve the creation date of the dataframe for feature engineering
df_listing_last_scraped = pd.Timestamp(df_listings.last_scraped[0])
# Drop duplicate columns
df_listings = df_listings.T.drop_duplicates().T
# Drop columns with full NA
df_listings.dropna(axis=1, how="all", inplace=True)
# Drop columns with only one unique value
df_listings.drop(
[c for c in df_listings.columns if df_listings[c].nunique() == 1],
axis=1,
inplace=True,
)
# Drop columns representing url
df_listings.drop(
df_listings.columns[df_listings.columns.str.contains("url")], axis=1, inplace=True
)
# Reformat the money related columns
df_listings.price = (
df_listings.price.str.replace(r"$", "").str.replace(",", "").astype("float32")
)
df_listings.weekly_price = (
df_listings.weekly_price.str.replace(r"$", "")
.str.replace(",", "")
.astype("float32")
)
df_listings.monthly_price = (
df_listings.monthly_price.str.replace(r"$", "")
.str.replace(",", "")
.astype("float32")
)
df_listings.security_deposit = (
df_listings.security_deposit.str.replace(r"$", "")
.str.replace(",", "")
.astype("float32")
)
df_listings.cleaning_fee = (
df_listings.cleaning_fee.str.replace(r"$", "")
.str.replace(",", "")
.astype("float32")
)
df_listings.extra_people = (
df_listings.extra_people.str.replace(r"$", "")
.str.replace(",", "")
.astype("float32")
)
# Transform the date related columns
df_listings["host_since_deltaDays"] = (
pd.to_datetime(df_listing_last_scraped) - pd.to_datetime(df_listings.host_since)
).dt.days
df_listings["last_review_deltaDays"] = (
pd.to_datetime(df_listing_last_scraped) - pd.to_datetime(df_listings.last_review)
).dt.days
df_listings["first_review_deltaDays"] = (
pd.to_datetime(df_listing_last_scraped) - pd.to_datetime(df_listings.first_review)
).dt.days
# Reformat other features
df_listings["host_response_time"] = df_listings.host_response_time.map(
{
"within an hour": 1,
"within a few hours": 12,
"within a day": 24,
"a few days or more": 48,
}
)
df_listings.host_response_rate = df_listings.host_response_rate.str.replace(
"%", ""
).astype("float32")
df_listings["cancellation_policy"] = df_listings["cancellation_policy"].map(
{"strict": 0, "moderate": 1, "flexible": 2}
)
# Create a feature count the number of host verification methods
host_verifications = np.unique(
np.concatenate(
df_listings.host_verifications.map(
lambda x: x[1:-1].replace("'", "").split(", ")
)
)
)[1:]
matrix_verifications = [
[
veri in row
for row in df_listings.host_verifications.map(
lambda x: x[1:-1].replace("'", "").split(", ")
)
]
for veri in host_verifications
]
df_listings["host_verificationCount"] = pd.DataFrame(
matrix_verifications, index=host_verifications
).T.sum(1)
# I check the features one by one and finally select 3 types of features relevant to our questions. They are features related to Airbnb host, property and travler.
features_host = [
"host_is_superhost",
"host_about",
"host_response_time",
"host_response_rate",
"host_listings_count",
"host_verificationCount",
"host_has_profile_pic",
"host_identity_verified",
"host_since_deltaDays",
"calculated_host_listings_count",
]
features_property = [
"summary",
"space",
"description",
"neighborhood_overview",
"notes",
"transit",
"street",
"neighbourhood",
"zipcode",
"latitude",
"longitude",
"is_location_exact",
"property_type",
"room_type",
"accommodates",
"bathrooms",
"bedrooms",
"beds",
"bed_type",
"amenities",
"square_feet",
"price",
"weekly_price",
"security_deposit",
"cleaning_fee",
"guests_included",
"extra_people",
"minimum_nights",
"maximum_nights",
]
features_traveler = [
"number_of_reviews",
"last_review_deltaDays",
"first_review_deltaDays",
"review_scores_rating",
"review_scores_accuracy",
"review_scores_cleanliness",
"review_scores_checkin",
"review_scores_communication",
"review_scores_location",
"review_scores_value",
"instant_bookable",
"cancellation_policy",
"require_guest_profile_picture",
"require_guest_phone_verification",
"reviews_per_month",
]
features = features_host + features_property + features_traveler
df_listings_filtered = df_listings[features]
numeric_feature = [
"host_listings_count",
"calculated_host_listings_count",
"latitude",
"longitude",
"accommodates",
"bathrooms",
"bedrooms",
"beds",
"guests_included",
"minimum_nights",
"maximum_nights",
"number_of_reviews",
"review_scores_rating",
"review_scores_accuracy",
"review_scores_cleanliness",
"review_scores_checkin",
"review_scores_communication",
"review_scores_location",
"review_scores_value",
"review_scores_rating",
"review_scores_accuracy",
"review_scores_cleanliness",
"review_scores_checkin",
"review_scores_communication",
"review_scores_location",
"review_scores_value",
"reviews_per_month",
]
bool_feature = [
"host_is_superhost",
"host_has_profile_pic",
"host_identity_verified",
"is_location_exact",
"instant_bookable",
"require_guest_phone_verification",
"require_guest_profile_picture",
]
# Transform the bool features
for bool_f in bool_feature:
df_listings_filtered[bool_f] = (
df_listings_filtered[bool_f].map({"t": 1, "f": 0}).astype("bool")
)
# Transform the numerical features
for num_f in numeric_feature:
df_listings_filtered[num_f] = df_listings_filtered[num_f].astype("float32")
# Fix the weird zipcode value
df_listings_filtered.zipcode[df_listings_filtered.zipcode == "99\n98122"] = 98122
# Drop column square_feet for containing too many NA values (97.5%)
print(df_listings_filtered.square_feet.isna().mean())
df_listings_filtered.drop("square_feet", axis=1, inplace=True)
# Transform the amenities feature into a one-hot encoding matrix
unqiue_amenities = np.unique(
np.concatenate(
df_listings_filtered.amenities.str[1:-1].str.replace('"', "").str.split(",")
)
)[1:]
matrix_amenities = [
[
amen in row
for row in df_listings_filtered.amenities.str[1:-1]
.str.replace('"', "")
.str.split(",")
]
for amen in unqiue_amenities
]
df_amenities = pd.DataFrame(matrix_amenities, index=unqiue_amenities).T
# Drop amenities features appaer in less than 5% of samples to avoid overfitting
df_amenities.drop(
df_amenities.columns.values[np.where(df_amenities.mean() < 0.05)],
axis=1,
inplace=True,
)
df_listings_filtered_amenities = pd.concat([df_listings_filtered, df_amenities], axis=1)
# ## Deal with missing value
df_listings_filtered_amenities.isna().mean().sort_values()
# It seems like for those review related columns, they got a similar missing value ratio around 17%. It is reasonable to infer that these missing values are largely from the properties with zero reviews. Here comes a question, do we want to include these properties into our following analysis? Properties with zero reviews could be due to many reasons. Firstly, it is possible that these properties are unattrative themselves and got no customers. Secondly, it is possible that the owner just listed his/her properties on the website for other purpose and chose not make it open to public. Finally, it is also possible that the host of these properties are new comers and their properties are on market for a relatively short time.
# The histogram below shows the distribution of years the properties with zero reviews have been listed on Airbnb. 42% of these properties were just listed less than 1 year. Since including properties with zero review will bring more uncertainty to the model, I decide to remove these "outliers" for further analysis
matplotlib.rcParams["figure.figsize"] = (5, 5)
(
df_listings_filtered_amenities.query("number_of_reviews==0").host_since_deltaDays
/ 365
).plot(kind="hist")
ax = plt.gca()
ax.set_title(
"The distribution of years of the properties with zero reviews have been listed on Airbnb "
)
ax.set_xlabel("year")
ax.set_ylabel("count")
# Drop samples with zero reviews
# We see that the majority of missing values gone with these properties
df_listings_filtered_amenities = df_listings_filtered_amenities.query(
"number_of_reviews>0"
)
df_listings_filtered_amenities.isna().mean().sort_values()
df_listings_filtered_amenities_cleaned = df_listings_filtered_amenities.copy()
# Fill NA for numeric features
df_listings_filtered_amenities_cleaned.zipcode = (
df_listings_filtered_amenities_cleaned.zipcode.fillna(
df_listings_filtered_amenities_cleaned.zipcode.mode()[0]
)
)
feature_fillna_median = [
"host_response_time",
"host_response_rate",
"security_deposit",
"cleaning_fee",
"weekly_price",
"bedrooms",
"bathrooms",
"review_scores_rating",
"review_scores_communication",
"review_scores_cleanliness",
"review_scores_location",
"review_scores_value",
"review_scores_accuracy",
"review_scores_checkin",
]
df_listings_filtered_amenities_cleaned[
feature_fillna_median
] = df_listings_filtered_amenities_cleaned[feature_fillna_median].fillna(
df_listings_filtered_amenities_cleaned[feature_fillna_median].median()
)
# Fill NA for object features
feature_fillna_empty = [
"summary",
"neighbourhood",
"space",
"host_about",
"transit",
"neighborhood_overview",
"notes",
]
df_listings_filtered_amenities_cleaned[
feature_fillna_empty
] = df_listings_filtered_amenities_cleaned[feature_fillna_empty].fillna("")
# Numerical features
df_num = df_listings_filtered_amenities_cleaned.select_dtypes(exclude="object")
# One hot encoding categorical features
df_cat = pd.get_dummies(
df_listings_filtered_amenities_cleaned.select_dtypes(include="object")[
["property_type", "room_type", "bed_type"]
]
)
# Drop one hot categorical feature columns appearing less then 5% of samples
catFeatureToDrop = df_cat.columns.where(df_cat.mean() < 0.05).dropna()
df_cat.drop(catFeatureToDrop, axis=1, inplace=True)
df_cat = df_cat.astype("bool")
df_total = pd.concat([df_num, df_cat], axis=1)
# ### Q1: Should you choose an Airbnb unit hosted by superhost?
# In this seciton, I will analyze the difference between super host and other host. Mainly, I will use two independent t-test to find features whose mean value is significantly different between properties hosted by super hosts and properties hosted by others.
# Helper function for two independent sample t-test
def ttest(df, group_feature, test_feature):
flag0 = df[group_feature] == False
flag1 = ~flag0
vector_0 = df.loc[flag0, test_feature]
vector_1 = df.loc[flag1, test_feature]
statistic, pvalue = stats.ttest_ind(vector_1, vector_0)
return [statistic, pvalue, test_feature]
# T-test
ttest_result = []
for col in df_total.columns:
if col == "host_is_superhost":
continue
else:
ttest_result.append(ttest(df_total, "host_is_superhost", col))
# Display the t-test result
ttest_result = pd.DataFrame(ttest_result, columns=["statistics", "pvalue", "feature"])
# P-value adjustment
multitest_result = multi.multipletests(ttest_result.pvalue, method="bonferroni")
ttest_result["significant"], ttest_result["adjust_pvalue"] = (
multitest_result[0],
multitest_result[1],
)
ttest_result.sort_values(
["significant", "adjust_pvalue"], ascending=[False, True]
).style.bar(subset=["statistics"], align="zero", color=["#d65f5f", "#5fba7d"])
# # Result Evaluation
# About 43% of the significant features are amenities.
ttest_result.feature[ttest_result.significant == True].isin(
unqiue_amenities
).sum() / np.sum(ttest_result.significant == True)
matplotlib.rcParams["figure.figsize"] = (6, 6)
plt.subplot(221)
sns.barplot(data=df_total, y="Shampoo", x="host_is_superhost")
plt.subplot(222)
sns.barplot(data=df_total, y="number_of_reviews", x="host_is_superhost")
plt.subplot(223)
sns.barplot(data=df_total, y="calculated_host_listings_count", x="host_is_superhost")
plt.subplot(224)
sns.barplot(data=df_total, y="price", x="host_is_superhost")
plt.tight_layout()
# It is not surprised to see that super host got much more reviews and higher ratings. Super host respond to traveler quickly and pay more attention to safety as they ask verification for traveler and also tend to got themselves verified in multiple platforms.
# Among the features significantly distinguish super host from other host, about half of them are amenities. Interestingly, _Shampoo_ is the top impacting factor while _kitchen_ merely scrapes through the significance threshold. This is understandable as traveler usually got no time for cooking and taking a good shower and have a good sleep is more important for their short-term stay.
#
# Given all the superiority of superhost’s properties, you might be wondering superhost may charge premium price. However, statistically this is not the case. One interesting feature is _calculated_host_listings_count_, which is the number of properties own by the host. It seem like companies and owners owning multiple properties cannot pay enough attention to each properties and super host are usually owners own 1 or 2 properties.
# So for Q1, the answer is.. yes! As a traveler you should prioritize airbnb units hosted super host over other airbnb-units. You can enjoy better living condition without being worried about being over charged. However, as super hosts' properties are very popular and you should definitely consider booking ahead.
# ## Q2: What kind of properties I should look at to ensure it is going to be a successful airbnb unit?
# In order to determine the success of an Airbnb unit we need to find a metric. The first thing comes to mind is _reviews_per_month_. However, it is possible that a property could get many reviews but they are simply piles of complains from travelers. Thus, I decide to use the product of _review_scores_rating_ and _reviews_per_month_ as the metric.
# Also, we need to get rid of all the review and rating related features in the data since they are heavily correlated with the response variable. Moreover, I will also drop features related to host because the question is about the attributes of a property a investor should pay attention to.
# # Data Modeling
# Create the metric and named it "performance"
df_total["performance"] = df_total.reviews_per_month * df_num.review_scores_rating
# Draw host and review related features
featureToDrop = [f for f in df_total.columns.values if "review" in f or "host" in f]
featureToDrop
# Our first Ridge Regression model got a r2 score around 0.25. Let's take a look at our data and see if we can improve it.
X = df_total.drop(featureToDrop + ["performance"], axis=1)
y = df_total.performance
clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 10, 100], scoring="r2").fit(X, y)
print("r2 score:{:.3f}".format(clf.score(X, y)))
# Let's look at the distribution of the response variable. It looks like a positive skewed distriubtion. I will log-transform all the numeric features in the data set and see if the result can be improved.
matplotlib.rcParams["figure.figsize"] = (6, 6)
fig = plt.figure()
performance = df_total["performance"]
log_performance = np.log1p(performance)
ax = fig.add_subplot(2, 2, 1)
performance.hist(ax=ax)
plt.title("performance")
ax = fig.add_subplot(2, 2, 2)
sm.qqplot((performance - performance.mean()) / performance.std(), line="45", ax=ax)
plt.title("performance")
ax = fig.add_subplot(2, 2, 3)
log_performance.hist(ax=ax)
plt.title("log(1+performance)")
ax = fig.add_subplot(2, 2, 4)
sm.qqplot(
(log_performance - log_performance.mean()) / log_performance.std(), line="45", ax=ax
)
plt.title("log(1+performance)")
plt.tight_layout()
# Choose numeric features
num_feature = df_total.select_dtypes(include="number").dtypes.index.values
# Compute the sknewness, log transform features with abs(skewness)>0.75
skewed_feats = df_total[num_feature].apply(lambda x: x.skew())
skewed_feats = skewed_feats[abs(skewed_feats) > 0.75]
skewed_feats = skewed_feats.index
# Helper function transforming features containing negative values
# to features only containing nonnegative values
def moveToNonNegative(series):
if series.min() < 0:
series = series - series.min()
return series
df_total[skewed_feats] = df_total[skewed_feats].apply(moveToNonNegative)
df_total[skewed_feats] = np.log1p(df_total[skewed_feats])
X = df_total.drop(featureToDrop + ["performance"], axis=1)
y = df_total.performance
clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 10, 100], scoring="r2").fit(X, y)
print("r2 score:{:.3f}".format(clf.score(X, y)))
# Our r2 score just jump to from 0.253 to 0.317. I will stop here and check what the coefficients suggests.
# # Result Evaluation
coef = pd.Series(clf.coef_, index=X.columns)
imp_coef = pd.concat([coef.sort_values().head(10), coef.sort_values().tail(10)])
matplotlib.rcParams["figure.figsize"] = (8.0, 5.0)
imp_coef.plot(kind="barh")
plt.title("Coefficients in the Lasso Model")
plt.show()
# By looking at the features with the largest absolute value of coefficents, we can get some insights. While the amentities like _Shampoo_ and _Essentials_ positively contribute to the good performance of a Airbnb unit, properties with _Hot Tub_ could make things worse. It is rare to find a property in downtown with a pool. It is possible that these properties are located in the rural area which are far away from the Seattle's attractions.
# I have never been to Seattle, it is curious to observe that longitude and latitude correspond to the smallest negative coefficients at the bottom. It suggests that people should avoid the northeastern area of Seattle. Out of curiosity , I google the crime rate distribution in Seattle and the result confirms the finding.
# So for Q2, the answer is.. you should avoid searching properties in the northeastern area. Apartments in downtown area should be in your priority list. Usually, these luxury properties comes with _Heating_ and _Wireless Internet_. Moreover, you should consider renting out the assest as entire apartment or private rooms instead of shared room.
# 
# # Q3. How's the development of Airbnb in Seattle? Do you observe any pattern of trend?
# In this section, I will dive in the reviews data which stores the date and reviews from 20010 to 2015. I will use the number of review as a metric to reflect the growth of Airbnb in Seattle. Also I can also use metric to see if there is any seasonal trend exists in Airbnb property renting.
# Extract year and month from date feature
df_reviews.date = pd.to_datetime(df_reviews.date)
df_reviews["year"] = df_reviews.date.dt.year
df_reviews["month"] = df_reviews.date.dt.month
df_reviews.head()
review_count = df_reviews.groupby(["year", "month"])["comments"].size().reset_index()
# We drop 2009 and 2016 here as they don't contains data for full 12 months
review_count = review_count.loc[
(review_count.year < 2016) & (review_count.year > 2009), :
]
# I use log transform here to better observe the seasonal trend between different years
review_count_log = review_count.copy()
review_count_log.comments = np.log(review_count_log.comments)
sns.relplot(
data=review_count_log,
x="month",
hue="year",
y="comments",
marker="o",
kind="line",
palette="RdPu_r",
aspect=10 / 8,
)
ax = plt.gca()
ax.set_ylabel("log(review_count)")
ax.set_title("log(#review) for each month from 2010 to 2015")
plt.show()
# From the graph above, I can observe that there is a seasonal trend. It seems like August and September are travelers' favourite months to visit Seattle. The demand of Airbnb usually goes down with the freezing weather in January and February.
# Moreover, the number of Airbnb reviews grows each year. The next question is how does the Year over Year growth rate change through all these years.
review_count_pivot = review_count.pivot(
index="month", columns="year", values="comments"
).T
# YoY growth rate per month
review_YoY_perMonth = review_count_pivot.diff() / review_count_pivot.shift()
review_YoY_perMonth = review_YoY_perMonth.dropna()
review_YoY_perMonth = (
review_YoY_perMonth.stack().reset_index().rename(columns={0: "comments"})
)
# YoY growh rate per year
review_YoY_perYear = (
review_count_pivot.sum(axis=1).diff() / review_count_pivot.sum(axis=1).shift()
)
review_YoY_perYear = review_YoY_perYear.dropna()
review_YoY_perYear = review_YoY_perYear.reset_index().rename(columns={0: "comments"})
f, axs = plt.subplots(1, 2, figsize=(15, 6))
sns.lineplot(
data=review_YoY_perMonth,
x="year",
hue="month",
y="comments",
marker="o",
palette="RdPu_r",
ax=axs[0],
)
ax = axs[0]
ax.axhline(1, ls="--")
ax.set_xticks([2011, 2012, 2013, 2014, 2015])
ax.set_yticks([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
ax.set_ylabel("Growth Rate")
ax.set_title(
"Year-Over-Year growth rate of review number of each month from 2010 to 2015"
)
sns.lineplot(
data=review_YoY_perYear,
x="year",
y="comments",
marker="o",
palette="RdPu_r",
ax=axs[1],
)
ax = axs[1]
ax.axhline(1, ls="--")
ax.set_xticks([2011, 2012, 2013, 2014, 2015])
ax.set_ylabel("Growth Rate")
ax.set_title("Year-Over-Year growth rate of review number each year from 2010 to 2015")
f.tight_layout()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
xls = pd.ExcelFile(
"../input/highway-accidents-in-nigeria-q22020/Road transport Q2 2020.xlsx"
)
df1 = pd.read_excel(xls, sheet_name="Causative factors of RTC")
df1.tail()
key = pd.DataFrame()
key = df1.iloc[41:61, :2]
legends = pd.DataFrame()
header = key.iloc[1]
legends = key.iloc[2:, :]
legends.columns = ["causative_factors", "code"]
legends.reset_index(inplace=True, drop=True)
# legends.drop(['42'], axis=1)
legends["causative_factors"] = legends.causative_factors.map(lambda x: x.split("(")[0])
legends
# print(legends.columns)
causes_by_state = df1.loc[1:38, :]
header = df1.loc[0]
causes_by_state.columns = header
causes_by_state.reset_index(inplace=True, drop=True)
causes_by_state
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(16, 8))
g = sns.barplot(x=causes_by_state["STATE"][:-1], y=causes_by_state["TOTAL"])
g.set_xticklabels(g.get_xticklabels(), rotation=90)
import geopandas as gpd
nigeria_roads = gpd.read_file("../input/nigeria-roads/Nigeria_Roads.shp")
nigeria_roads.head()
|
import os
import time
from time import sleep
import gc
import numpy as np
import pandas as pd
import psutil
import humanize
from tqdm.auto import tqdm
import sys
from PIL import Image
class Config:
"""
Here we save the configuration for the experiments
"""
# dirs
base_dir = os.path.abspath("../")
data_dir = os.path.join(base_dir, "input/bengaliai-cv19")
working_dir = os.path.join(base_dir, "working")
submissions_dir = os.path.join(base_dir, "submissions")
models_dir = os.path.join(base_dir, "models")
logs_dir = os.path.join(base_dir, "logs")
# Hparams
train_batch_size = 32
test_batch_size = 32
base_model = "resnet18"
learning_rate = 1e-3
device = "cuda" # cuda -> GPU, "cpu"->CPU, "tpu"->TPU
def get_parquet_lists():
"""
Load all .parquet files and get train and test splits
"""
parquet_files = [f for f in os.listdir(Config.data_dir) if f.endswith(".parquet")]
train_files = [f for f in parquet_files if "train" in f]
test_files = [f for f in parquet_files if "test" in f]
return train_files, test_files
def read_parquet_via_pandas(filenames: list = None, files=4, cast="uint8", resize=1):
gc.collect()
sleep(5)
# wait for gc to complete
memory_before = psutil.virtual_memory()[3]
# NOTE: loading all the files into a list variable, then applying pd.concat() into a second variable, uses double the memory
# loading training files
if filenames is None:
print("[INFO] Getting file lists")
filenames, _ = get_parquet_lists()
print(filenames)
# concat dataFrames
try:
df = pd.concat(
[
pd.read_parquet(os.path.join(Config.data_dir, filename))
.set_index("image_id", drop=True)
.astype("uint8")
for filename in tqdm(filenames[:files])
]
)
memory_end = psutil.virtual_memory()[3]
print(" sys.getsizeof():", humanize.naturalsize(sys.getsizeof(df)))
print(
" memory total: ",
humanize.naturalsize(memory_end - memory_before),
"+system",
humanize.naturalsize(memory_before),
)
return df
except Exception as ex:
print(f"[ERROR] {ex}")
filenames, _ = get_parquet_lists()
filenames
df = pd.read_parquet(os.path.join(Config.data_dir, filenames[2]), engine="pyarrow")
df.head()
idx = np.random.randint(low=0, high=len(df))
im_array = df.iloc[idx][1:].values
im_array = im_array.reshape(137, 236)
Image.fromarray(im_array.astype("uint8"))
images = df.drop("image_id", axis=1)
images_ids = df.image_id.values
images_ids
images.iloc[0].values.reshape(137, 236)
for index, img_id in enumerate(tqdm(images_ids)):
path = os.path.join(Config.working_dir, "images", f"{img_id}.npy")
# print(path)
# break
try:
np.save(path, images.iloc[index].values.reshape(137, 236))
except Exception as ex:
print(ex)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
train.head()
train.describe()
train.isnull().sum()
train.duplicated().sum()
train.info()
train.dtypes
train["target"].value_counts()
train.drop("id", axis=1, inplace=True)
train.head()
data_cleaning_suggestions(train)
train.corr()
import seaborn as sns
sns.heatmap(train.corr(), annot=True)
X = train.drop("target", axis=1)
Y = train["target"]
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
import sklearn as sk
from sklearn.linear_model import LogisticRegression
models = LogisticRegression(random_state=73)
model.fit(X_train, Y_train)
model.score(X_train, Y_train)
df_sub = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
test.drop(["id"], axis=1, inplace=True)
test_pred = model.predict(test)
df_sub["target"] = test_pred
df_sub
df_sub.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
train = pd.read_csv(
"/kaggle/input/predict-student-performance-from-game-play/train.csv", nrows=10000
)
train.head()
train.shape
train.columns
# **What each column represents**
# *session_id* : Session ID
# *elapsed_time* : Time elapsed between the start of the session and when it was recorded
# *event_name* : Event type name
# *name* : Event name
# *level* : Game level for the event
# *page* : Page number (for notebook related events)
# *room_coor_x, room_coor_y* : Coordinates of click wrt in-game room
# *screen_coor_x, screen_coor_y* : Coordinates of click wrt to player's room size
# *hover_duration* : Duration for which the hover exists
# *text* : Text seen by the player during the event
# *fqid* : Fully qualified event ID
# *room_fqid* : fqid for the room
# *text_fqid* : fquid for the text
# *fullscreen* : Label to denote if the player uses full screen mode
# *hq*: Label to denote if the game is in HQ
# *music* : Label to denote if the music is on or off while playing
# *level_group* : Which group of levels out of 0-4, 5-12 and 13-22 the current row belings to
plt.plot(train["level"], train["hover_duration"])
sns.catplot(
data=train,
x="hover_duration",
y="level_group",
hue="fullscreen",
kind="violin",
)
sns.catplot(data=train, x="elapsed_time", y="level_group", hue="music", kind="violin")
|
# importing the Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Read the DataFrame
df = pd.read_csv("/kaggle/input/temperatures-of-india/temperatures.csv")
df.head()
# #### Data Processing
df.shape
df.isnull().sum()
df.info()
df.head()
df.tail()
df.describe()
# #### Plotting the graph YEAR VS AVERAGE TEMPERATURE
plt.scatter(df["YEAR"], df["ANNUAL"], color="olive")
plt.title("Average temperature during (1901 - 2017)")
plt.xlabel("YEAR")
plt.ylabel("Temperature (Celsius)")
plt.show()
# #### Predicting the best model for this dataset
#### Split the dataset into training and test dataset
X = df.drop("ANNUAL", axis="columns")
y = df.ANNUAL
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=10
)
#### Linear Regression Model
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
y_pred
lr.score(X_test, y_test)
from sklearn.linear_model import Lasso
log = Lasso()
log.fit(X_train, y_train)
log.score(X_test, y_test)
from sklearn.linear_model import Ridge
rig = Ridge()
rig.fit(X_train, y_train)
rig.score(X_test, y_test)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
AdaBoostClassifier,
VotingClassifier,
)
from sklearn.model_selection import (
GridSearchCV,
cross_val_score,
StratifiedKFold,
RepeatedStratifiedKFold,
)
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
ConfusionMatrixDisplay,
precision_recall_fscore_support,
roc_auc_score,
classification_report,
)
# # Clasificación de Salud fetal
# ## 1. Carga de datos y EDA
# Cargamos nuestro dataset y visualizamos las primeras 5 líneas
# Nuestro dataset vive aquí: https://www.kaggle.com/datasets/andrewmvd/fetal-health-classification
# Contiene 2126 medidas de cadiotocogramas y clasificado tal como se describe en
# Ayres de Campos et al. (2000) SisPorto 2.0 A Program for Automated Analysis of Cardiotocograms. J Matern Fetal Med 5:311-318
data = pd.read_csv("../input/fetal-health-classification/fetal_health.csv")
data.head()
# Información que contiene nuestro dataframe
data.info()
data.shape
data.isnull().sum()
# Selecciona la variable fetal_health (variable dependiente) y cuenta que valores puede tomar.
# ¿Cuántas muestras son 1 (Normal), 2 (Sospechoso) y 3 (patológico)
# Puede serte de ayuda: https://pandas.pydata.org/docs/getting_started/intro_tutorials/03_subset_data.html
# Puede serte de ayuda: https://pandas.pydata.org/docs/reference/api/pandas.Series.value_counts.html
# Tu código aquí
# Cuenta el número de instancias en cada clase
class_counts = data["fetal_health"].value_counts()
# Print the class distribution
print("Class distribution:")
print(class_counts)
# Detectamos Salud fetal: 1 - Normal (1.655 casos) 2 - Sospechoso(295 casos) 3 - Patológico(176 casos)
#
# Hazte una mejor idea de los valores que pueden tomar las variables extrayendo indicadortes estadísticos
# Puede serte de ayuda: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.describe.html
# Tu código aquí
data.describe(datetime_is_numeric=True)
# Muestra en una gráfica la distribución de algunas variables como fetal_movement y fetal_health
# Puede serte de ayuda: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.plot.html
# Tu código aquí
# En primer lugar, evalúemos el objetivo y descubramos si nuestros datos están desequilibrados o no
colours = ["#f7b2b0", "#8f7198", "#003f5c"]
sns.countplot(data=data, x="fetal_health", palette=colours)
plt.title("fetal health rating count")
plt.xticks(rotation=90)
plt.ylabel("Number of cases per Rating")
plt.show()
# La clase normal contiene alrededor de 1.600 pacientes, la clase sospechosa contiene alrededor de 300 pacientes y la clase patológica contiene alrededor de 200 pacientes. Esto representa un comportamiento común en problemas de la vida real en este campo de aplicación, ya que por lo general los pacientes que pertenecen a clases problemáticas son menos que los demás pacientes. Claramente, esta variable está completamente desequilibrada.
data["fetal_health"].value_counts().plot(kind="pie")
plt.title("fetal health rating count")
plt.xticks(rotation=90)
plt.ylabel("Number of cases per Rating")
plt.show()
# Tu código aquí
# Trazando el histograma para cada característica
# Iterar a través de cada columna en el DataFrame
for col in data.columns:
data[col].plot.hist()
plt.title(f"Histogram of {col}")
plt.show()
# Los boxplots son muy útiles para ver gráficamente que valores toman nuestras variables. Genera uno.
# Puede serte de ayuda: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.boxplot.html
data.boxplot(figsize=(6, 10))
plt.xticks(rotation=90, ha="right")
plt.show()
# Las líneas superior e inferior representan el valor máximo y mínimo de cada característica, los puntos superiores a la línea máxima y los inferiores a la línea mínima se consideran valores atípicos. La amplitud de la caja representa el rango intercuartílico, como la diferencia entre el 75% y el 25% de las observaciones. La línea dentro del cuadro representa el valor medio de cada característica. A partir del diagrama de caja es posible detectar algunos valores atípicos distribuidos en casi todas las características, sin embargo, asumiendo que esos valores atípicos no son consecuencia de errores humanos o de mediciones sino que representan algún comportamiento específico del conjunto de datos, decidí dejar los valores atípicos sin detener, para permitir los algoritmos para aprender de algún comportamiento específico.
# ALgunas variables tienen variaciones mucho más pequeñas que otras. Selecciona solo las columnas
# con poca variación para ver un detalle de sus boxplots
# Calcular la variación de cada columna y seleccionar aquellas que tienen una variación menor que 0.01
low_var_cols = data.columns[data.var() < 0.01]
# Mostrar los boxplots de las columnas seleccionadas
for col in low_var_cols:
plt.figure()
data.boxplot(column=[col])
plt.title(col)
# Estudiemos ahora la distribución de probabilidades de las variables. Esto nos ayudará a encontrar sesgos
# Puedes iterar por algunas columnas y plotear su histograma y función de densidad (KDE)
# Tu código aquí
# Seleccionar algunas columnas para analizar su distribución
cols = ["baseline value", "accelerations", "fetal_movement", "uterine_contractions"]
# Iterar por cada columna y generar un histograma y una función de densidad (KDE) para cada una
for col in cols:
sns.histplot(data[col], kde=True)
plt.title(col)
plt.show()
# Vamos a pintar un mapa de correlación
# ¿Qué variables independientes están muy correladas entre sí?
# ¿Qué variables independientes están muy correladas con la dependiente?
# Puede serte de ayuda: https://seaborn.pydata.org/generated/seaborn.heatmap.html
# Puede serte de ayuda: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.corr.html
# Tu código aquí
plt.figure(figsize=(18, 10))
sns.heatmap(
data.corr(), annot=True, linewidths=0.1, annot_kws={"fontsize": 12}, fmt=".2f"
)
# A partir de la matriz de correlación mostrada arriba, es posible detectar la correlación por pares entre los predictores y la respuesta. Además, algunas variables relacionadas con los valores de los histogramas parecen estar muy correlacionadas entre sí (multicolinealidad).La multicolinealidad es una condición que puede ocurrir en un modelo de regresión cuando hay una alta correlación entre dos o más variables predictoras (variables independientes). Esta correlación puede tener un impacto negativo en la precisión y estabilidad del modelo de regresión , ya que hace que sea más difícil identificar la verdadera relación de cada variable independiente con la variable dependiente (variable objetivo).
# # 2. Preparando los datos
# Vamos a generar una variable (X) con todas las columnas que representan variables independientes
# (Todas menos la última)
# y una serie (y) para la fila de la variable independiente
# Tu código aquí
# Crear una variable X con todas las columnas excepto la última
X = data.iloc[:, :-1]
# Crear una variable y para la última columna (variable objetivo)
y = data.iloc[:, -1]
# ¿Cómo funciona la línea de arriba? https://stackoverflow.com/a/37512144
# Vamos a partir nuestro dataset. Queremos el 70% de muestras para entrenar y el 30% para validar
# Como las clases no están balanceadas vamos a usar la opción "stratify".
# Generamos las variables X_train, X_test, y_train, y_test
# Puede serte de ayuda: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y)
# Vamos a trabajar con datos escalados también. Escala X usando StandardScaler
# Genera las variables X_train_ss, X_test_ss
# Tu código aquí
# Escalar los datos de entrenamiento y validación
scalar = StandardScaler()
X_train_ss = scalar.fit_transform(X_train)
X_test_ss = scalar.transform(X_test)
# ## 3 Modelado y evaluación
# ## 3.1 Regresión logística
# Crea una instancia de LogisticRegression, entrénala con X_train, y_train
# Obtén la predicción del set de X_test y llámala y_pred
# Puede serte de ayuda: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
# Tu código aquí
# Crear una instancia de LogisticRegression
lr = LogisticRegression(solver="lbfgs", max_iter=400)
# Entrenar el modelo con los datos de entrenamiento
lr.fit(X_train, y_train)
# Obtener las predicciones del conjunto X_test
y_pred = lr.predict(X_test)
# Calcular la precisión del modelo
accuracy = accuracy_score(y_test, y_pred)
print(f"La precisión del modelo es {accuracy:.2f}")
# Calcula la métrica accuracy y guárdala en la variable acc_lr
# Puede serte de ayuda: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html#sklearn.metrics.accuracy_score
# Calcular la precisión del modelo
acc_lr = accuracy_score(y_test, y_pred)
print(f"Mi modelo tiene una tasa de acierto del {acc_lr:.2f}")
# Muestra la matriz de confusión
# Puede serte de ayuda: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html#sklearn.metrics.confusion_matrix
# Calcular la matriz de confusión del modelo
cm_lr = confusion_matrix(y_test, y_pred)
print("La matriz de confusión del modelo es:")
print(cm_lr)
# Repite la regresión logística usando los datos normalizados generados en el apartado anterior
# Compara las tasas de acierto
# Tu código aquí
# Separar las variables predictoras (X) y la variable objetivo (y)
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
# Escalar los datos
scalar = StandardScaler()
X_norm = scalar.fit_transform(X)
# Dividir los datos en entrenamiento y validación
X_train, X_test, y_train, y_test = train_test_split(
X_norm, y, test_size=0.3, random_state=42
)
# Crear una instancia de LogisticRegression
lr = LogisticRegression()
# Entrenar el modelo con los datos normalizados de entrenamiento
lr.fit(X_train, y_train)
# Obtener las predicciones del conjunto X_test normalizado
y_pred_norm = lr.predict(X_test)
# Calcular la precisión del modelo utilizando datos normalizados
acc_lr_norm = accuracy_score(y_test, y_pred_norm)
print(
f"Mi modelo tiene una tasa de acierto con datos normalizados es {acc_lr_norm:.2f}"
)
# ## 3.2 KNN
# Repite el ejercicio de Regresión usando KNN
# Puede serte de ayuda: https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import mean_squared_error
# Separar las variables predictoras (X) y la variable objetivo (y)
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
# Normalizar los datos
scalar = StandardScaler()
X_norm = scalar.fit_transform(X)
# Dividir los datos en entrenamiento y validación
X_train, X_test, y_train, y_test = train_test_split(
X_norm, y, test_size=0.3, random_state=42
)
# Crear una instancia de KNeighborsRegressor
knn = KNeighborsRegressor(n_neighbors=5)
# Entrenar el modelo con los datos normalizados de entrenamiento
knn.fit(X_train, y_train)
# Obtener las predicciones del conjunto X_test normalizado
y_pred_norm = knn.predict(X_test)
# Calcular el error cuadrático medio del modelo utilizando datos normalizados
mse_knn_norm = mean_squared_error(y_test, y_pred_norm)
print(
f"El error cuadrático medio del modelo con KNN y datos normalizados es {mse_knn_norm:.2f}"
)
# ## 3.3 Random forest
# Repite el ejercicio de Regresión usando Random Forest
# Puede serte de ayuda: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
# Tu código aquí
from sklearn.ensemble import RandomForestRegressor
# Separar las variables predictoras (X) y la variable objetivo (y)
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
# Normalizar los datos
scalar = StandardScaler()
X_norm = scalar.fit_transform(X)
# Dividir los datos en entrenamiento y validación
X_train, X_test, y_train, y_test = train_test_split(
X_norm, y, test_size=0.3, random_state=42
)
# Crear una instancia de RandomForestRegressor
rf = RandomForestRegressor(n_estimators=100, random_state=42)
# Entrenar el modelo con los datos normalizados de entrenamiento
rf.fit(X_train, y_train)
# Obtener las predicciones del conjunto X_test normalizado
y_pred_norm = rf.predict(X_test)
# Calcular el error cuadrático medio del modelo utilizando datos normalizados
mse_rf_norm = mean_squared_error(y_test, y_pred_norm)
print(
f"El error cuadrático medio del modelo con Random Forest y datos normalizados es {mse_rf_norm:.2f}"
)
# Esto significa que el modelo de regresión utilizando Random Forest y datos normalizados se ajustó muy bien a los datos de prueba, lo que se refleja en el bajo error cuadrático medio (MSE) de 0.06. Esto indica que el modelo es capaz de predecir con precisión los valores objetivos para los datos no vistos. Un MSE bajo es una buena señal de que el modelo es adecuado para este problema específico de predicción.
# (Bonus) Random Forest te permite calcular la "importancia" de las variables en su cómputo
# Puede serte de ayuda: https://scikit-learn.org/stable/auto_examples/ensemble/plot_forest_importances.html
# Separar las variables predictoras (X) y la variable objetivo (y)
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
# Normalizar los datos
scalar = StandardScaler()
X_norm = scalar.fit_transform(X)
# Dividir los datos en entrenamiento y validación
X_train, X_test, y_train, y_test = train_test_split(
X_norm, y, test_size=0.3, random_state=42
)
# Crear una instancia de RandomForestRegressor y entrenar el modelo
rf = RandomForestRegressor(n_estimators=100, random_state=42)
rf.fit(X_train, y_train)
# Obtener la importancia de las variables
importances = rf.feature_importances_
std = np.std([tree.feature_importances_ for tree in rf.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
# Crear un gráfico de barras
plt.figure(figsize=(10, 6))
plt.figure()
plt.title("Random Forest Feature Importances")
plt.bar(
range(X.shape[1]),
importances[indices],
color="r",
yerr=std[indices],
align="center",
)
plt.xticks(range(X.shape[1]), X.columns[indices], rotation="vertical")
plt.xlim([-1, X.shape[1]])
plt.tight_layout()
plt.show()
|
# for linear algebra
import numpy as np
# for data processing
import pandas as pd
# for Box-Cox Transformation
from scipy import stats
# for min_max scaling
from mlxtend.preprocessing import minmax_scaling
# plotting modules
import seaborn as sns
import matplotlib.pyplot as plt
# set seed for reproducibility
np.random.seed(0)
# #Import the dataset from here: Vehicle Dataset from CarDekho.
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# #Reading first data csv file and showing the output.
data = pd.read_csv("../input/vehicle-dataset-from-cardekho/car data.csv")
data.head()
# #Reading Second data csv file and showing the output.
data = pd.read_csv(
"../input/vehicle-dataset-from-cardekho/CAR DETAILS FROM CAR DEKHO.csv"
)
data.head()
# #Reading Third data csv file and showing the output.
data = pd.read_csv("../input/vehicle-dataset-from-cardekho/Car details v3.csv")
data.head()
# #Check for NULL/MIssing Values/Duplicate Values(drop if any)
#
data.isnull()
data.isnull().sum()
# #When run the above code, we found out that there is no mssing/null values in our dataset.
data.describe()
data.info()
# **Cheching for unique values in different coloumns**
print("Fuel type:-", data["Fuel_Type"].unique())
print("Seller:-", data["Seller_Type"].unique())
print("Transmission:-", data["Transmission"].unique())
print("Owner:-", data["Owner"].unique())
print("Year:-", data["Year"].unique())
# **Importing libraries for visualization**
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=[12, 8])
sns.countplot(x="Year", data=data)
# ##### **The above Countplot shows the Number of cars for each year**
# **Adding a new coloum to our dataset called vehicle_age**
data["Vehicle_age"] = 2021 - data["Year"]
# **Droping the year coloum from our dataset**
data.drop(["Year"], axis=1, inplace=True)
data.head()
# ### **Exploratory Data Analysis**
sns.pairplot(data)
# **Plotting countplot for varius features**
plt.figure(figsize=[12, 7])
plt.subplot(2, 2, 1)
sns.countplot(x="Fuel_Type", data=data)
plt.subplot(2, 2, 2)
sns.countplot(x="Owner", data=data)
plt.subplot(2, 2, 3)
sns.countplot(x="Transmission", data=data)
plt.subplot(2, 2, 4)
sns.countplot(x="Seller_Type", data=data)
plt.show()
# From the above count plots we can get to know various information like:-
# i) How many petrol,disesl and cng cars are there.
# ii) Number of cars for different Owners.
# iii) How many manual and automatic cars are there.
# iv) Count of numbers of sellers type.
# **Plotting boxplots for different features**
plt.figure(figsize=[12, 7])
plt.subplot(2, 2, 1)
sns.boxplot(x="Selling_Price", data=data)
plt.subplot(2, 2, 2)
sns.boxplot(x="Present_Price", data=data)
plt.subplot(2, 2, 3)
sns.boxplot(x="Kms_Driven", data=data)
plt.subplot(2, 2, 4)
sns.boxplot(x="Vehicle_age", data=data)
plt.show()
# The above graphs provides us with the information like Q1,Median,Q3,Minimum,Maximum value and outliers of different features
# **Having a look on extreme data entry points**
# Cars with selling price more than 20 lakhs
data[data["Selling_Price"] > 20]
# Cars that are driven for more than 1 Lakh Kms.
data[data["Kms_Driven"] > 100000]
# Cars older than 15 years of age.
data[data["Vehicle_age"] > 15]
# **Checking relationship between features**
plt.figure(figsize=[12, 8])
sns.heatmap(data.corr(), annot=True, cmap="PuBuGn")
# **Visualizing selling price relationship with other features**
plt.figure(figsize=[9, 6])
plt.subplot(1, 2, 1)
sns.barplot(x="Fuel_Type", y="Selling_Price", data=data)
plt.subplot(1, 2, 2)
sns.stripplot(x="Fuel_Type", y="Selling_Price", data=data)
# **We can say that Diesel cars have higher selling price than Petrol and CNG cars.**
plt.figure(figsize=[9, 6])
plt.subplot(1, 2, 1)
sns.barplot(x="Seller_Type", y="Selling_Price", data=data)
plt.subplot(1, 2, 2)
sns.stripplot(x="Seller_Type", y="Selling_Price", data=data)
# **From the graphs we can infer that the dealers are earning more money than the individual sellers as dealers are getting more selling price.**
plt.figure(figsize=[9, 6])
plt.subplot(1, 2, 1)
sns.barplot(x="Transmission", y="Selling_Price", data=data)
plt.subplot(1, 2, 2)
sns.stripplot(x="Transmission", y="Selling_Price", data=data)
# **From the above graphs, we can conclude that people prefer Automatic cars over Manual cars hence they have a high selling price.**
plt.figure(figsize=[9, 6])
plt.subplot(1, 2, 1)
sns.barplot(x="Owner", y="Selling_Price", data=data)
plt.subplot(1, 2, 2)
sns.stripplot(x="Owner", y="Selling_Price", data=data)
plt.figure(figsize=[12, 6])
sns.scatterplot(x="Vehicle_age", y="Selling_Price", data=data)
plt.figure(figsize=[12, 6])
sns.scatterplot(x="Kms_Driven", y="Selling_Price", data=data)
# ### **Model Building**
# **Creating dummy variables for categorical features**
data.drop(["Car_Name"], inplace=True, axis=1)
data = pd.get_dummies(data, drop_first=True)
data.head()
x = data.iloc[:, 1:]
y = data.iloc[:, 0].values
x.head()
print(y)
# Feature Importance
from sklearn.ensemble import ExtraTreesRegressor
etr = ExtraTreesRegressor()
etr.fit(x, y)
print(etr.feature_importances_)
# plotting important feature
imp = pd.Series(etr.feature_importances_, index=x.columns)
imp.nlargest(5).plot(kind="barh")
plt.show()
# **Train Test Split**
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2)
# ### Linear regression
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(x_train, y_train)
# predicting value using linear regression
y_pred = lr.predict(x_test)
print(
np.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test), 1)), 1)
)
plt.figure(figsize=[10, 6])
plt.plot(y_pred, label="Predicted")
plt.plot(y_test, label="Actual_test")
plt.legend()
plt.title("Linear Regression Model")
from sklearn.metrics import r2_score
lr_r2 = r2_score(y_test, y_pred)
print(lr_r2)
# ### support vector regressor
#
# Feature scalling
from sklearn.preprocessing import StandardScaler
sc_x = StandardScaler()
sc_y = StandardScaler()
x_train_scaled = sc_x.fit_transform(x_train)
y_train = y_train.reshape(len(y_train), 1)
y_train_scaled = sc_y.fit_transform(y_train)
print(x_train_scaled)
print(y_train_scaled)
from sklearn.svm import SVR
svr = SVR(kernel="rbf")
svr.fit(x_train_scaled, y_train_scaled)
# predicting values
y_pred = sc_y.inverse_transform(svr.predict(sc_x.transform(x_test)))
print(
np.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test), 1)), 1)
)
plt.figure(figsize=[10, 6])
plt.plot(y_pred, label="Predicted")
plt.plot(y_test, label="Actual_test")
plt.legend()
plt.title("Support Vector Regressor Model")
svr_r2 = r2_score(y_test, y_pred)
print(svr_r2)
# ### Random Forest
from sklearn.ensemble import RandomForestRegressor
rfr = RandomForestRegressor()
from sklearn.model_selection import RandomizedSearchCV
n_estimators = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200]
max_features = ["auto", "sqrt"]
max_depth = [5, 10, 15, 20, 25, 30]
min_samples_split = [2, 5, 10, 15, 100]
min_samples_leaf = [1, 2, 5, 10, 12]
# creating random grid
random_grid = {
"n_estimators": n_estimators,
"max_features": max_features,
"max_depth": max_depth,
"min_samples_split": min_samples_split,
"min_samples_leaf": min_samples_leaf,
}
print(random_grid)
rfr_random = RandomizedSearchCV(
estimator=rfr,
param_distributions=random_grid,
n_iter=10,
cv=6,
verbose=2,
random_state=14,
n_jobs=1,
)
rfr_random.fit(x, y)
rfr_random.best_params_
rfr_random.best_score_
y_pred = rfr_random.predict(x_test)
print(
np.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test), 1)), 1)
)
plt.figure(figsize=[10, 6])
plt.plot(y_pred, label="Predicted")
plt.plot(y_test, label="Actual_test")
plt.legend()
plt.title("Random Forest Regressor Model")
rfr_r2 = r2_score(y_test, y_pred)
print(rfr_r2)
# comparing models R^2
model = ["Linear Regression", "SupportVectorRegressor", "RandomForestRegressor"]
values = [lr_r2, svr_r2, rfr_r2]
table = pd.DataFrame({"Models": model, "R squared": values})
display(table)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
sub = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
train.head()
train.info()
train.describe().T
train.isnull().sum()
for col in train.columns:
print(col, train[col].nunique(), "\n")
sns.countplot(x="target", data=train)
train = train.drop(["id"], axis=1)
test = test.drop(["id"], axis=1)
plt.figure(figsize=(10, 8))
corr = train.corr().round(2)
sns.heatmap(corr, linewidth=0.7, annot=True, cmap="coolwarm")
sns.pairplot(train, hue="target")
# train['bin1'] = pd.qcut(train['calc'], 4, duplicates='drop', labels=['Low', 'Medium', 'High','Veryhigh'])
# test['bin1'] = pd.qcut(test['calc'], 4, duplicates='drop', labels=['Low', 'Medium', 'High','Veryhigh'])
# train['bin2'] = pd.qcut(train['urea'], 4, duplicates='drop', labels=['Low', 'Medium', 'High','Veryhigh'])
# test['bin2'] = pd.qcut(test['urea'], 4, duplicates='drop', labels=['Low', 'Medium', 'High','Veryhigh'])
# train['ph_bins'] = pd.cut(x=train['ph'], bins=[0, 2, 6, 8, 11, 14],
# labels=['hi_acidic', 'acidic', 'neutral',
# 'alklin', 'hi_alk'])
# test['ph_bins'] = pd.cut(x=test['ph'], bins=[0, 2, 6, 8, 11, 14],
# labels=['hi_acidic', 'acidic', 'neutral',
# 'alklin', 'hi_alk'])
# train['osm_urea_ratio'] = train['osmo']/train['urea']
# test['osm_urea_ratio'] = test['osmo']/test['urea']
# train['cond_ph_ratio']=train['cond']/train['ph']
# test['cond_ph_ratio']=test['cond']/train['ph']
train["ion_product"] = train["calc"] * train["urea"]
train["calcium_to_urea_ratio"] = train["calc"] / train["urea"]
train["osmolality_to_sg_ratio"] = train["osmo"] / train["gravity"]
train["osmo_density"] = train["osmo"] * train["gravity"]
test["ion_product"] = test["calc"] * test["urea"]
test["calcium_to_urea_ratio"] = test["calc"] / test["urea"]
test["osmolality_to_sg_ratio"] = test["osmo"] / test["gravity"]
test["osmo_density"] = test["osmo"] * test["gravity"]
train.head()
# col = ['ph_bins', 'bin1', 'bin2']
# train = pd.get_dummies(train, columns = col)
# test = pd.get_dummies(test, columns = col)
train.head()
from pycaret.classification import *
exp1 = setup(train, target="target", session_id=1234)
top3 = compare_models(sort="AUC")
print(top3)
nb = create_model("lr")
tuned_nb = tune_model(nb, optimize="AUC", n_iter=100, fold=10)
# interpret_model(tuned_gbc)
plot_model(tuned_nb, plot="auc")
evaluate_model(tuned_nb)
predictions = predict_model(tuned_nb, data=test)
pred1 = pd.DataFrame(predictions)
predictions["target"] = pred1["prediction_label"]
ID = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
test_id = ID["id"]
ID = pd.DataFrame(test_id, columns=["id"])
result = pd.concat([ID, predictions["target"]], axis=1)
result
result.to_csv("submission_nb.csv", index=False)
|
# Import required Libraries
import pandas as pd
import spacy
import numpy as np
fake_df = pd.read_csv("/kaggle/input/fake-and-real-news-dataset/Fake.csv")
true_df = pd.read_csv("/kaggle/input/fake-and-real-news-dataset/True.csv")
# We need Fake or True word as a output, so now we will add "Fake" string in fake_df and "True" string in true_df dataframe.
fake_df["Label"] = "Fake"
true_df["Label"] = "True"
fake_df.head()
true_df.head()
# Now we will concat both of above dataframe
df = pd.concat([fake_df, true_df], axis=0)
df.head()
print("Shape of fake_df: ", fake_df.shape)
print("Shape of true_df: ", true_df.shape)
print("Shape of df: ", df.shape)
# We need only 2 columns: 1st text column as input and 2nd Label column as input
# So we will drop all another columns
df = df.drop(["title", "subject", "date"], axis=1)
df.head()
df["Label"].value_counts()
# Here in our dataset there are 23481 data for fake news and 21417 data for True news
# Now we will convert our fake and true data into numerical format
df["label_encode"] = df["Label"].map({"Fake": 0, "True": 1})
df.head()
# Let's convert text coloumn into word vector using Spacy
# en_core_web_lg is the trained pipeline for the English language
nlp = spacy.load("en_core_web_lg")
df["text_vector"] = df["text"].apply(lambda x: nlp(x).vector)
df.head()
# Split the data for training and testing
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
df["text_vector"], df["label_encode"], test_size=0.3, random_state=10
)
print("Shape of X_train: ", X_train.shape)
print("Shape of X_test: ", X_test.shape)
print("Shape of y_train: ", y_train.shape)
print("Shape of y_test: ", y_test.shape)
# stack() is used for joining multiple NumPy arrays.
X_train_convert = np.stack(X_train)
X_test_convert = np.stack(X_test)
# Our model will not accept negative value for training so we will rescale it into 0 to 1 using MinMaxScaler
from sklearn.naive_bayes import MultinomialNB
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train_convert_scaled = scaler.fit_transform(X_train_convert)
X_test_convert_scaled = scaler.transform(X_test_convert)
clf = MultinomialNB()
clf.fit(X_train_convert_scaled, y_train)
from sklearn.metrics import classification_report
y_prediction = clf.predict(X_test_convert_scaled)
print(classification_report(y_test, y_prediction))
from sklearn.tree import DecisionTreeClassifier
clf_tree = DecisionTreeClassifier(random_state=0)
clf_tree.fit(X_train_convert_scaled, y_train)
y_prediction_tree = clf_tree.predict(X_test_convert_scaled)
print(classification_report(y_test, y_prediction_tree))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from keras.preprocessing.image import ImageDataGenerator, load_img
from keras.layers import (
Conv2D,
Dense,
BatchNormalization,
Activation,
Dropout,
MaxPooling2D,
Flatten,
AveragePooling2D,
)
from keras.optimizers import Adam, SGD
from keras import Sequential
import matplotlib.pyplot as plt
from keras.utils import plot_model
import cv2
import os
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import warnings
warnings.filterwarnings("ignore")
train_dir = "../input/multiclassimagedatasetairplanecar/Dataset/train/"
test_dir = "../input/multiclassimagedatasetairplanecar/Dataset/test/"
# # Data
def count_exp(path, set_):
dict_ = {}
for expression in os.listdir(path):
dir_ = path + expression
dict_[expression] = len(os.listdir(dir_))
df = pd.DataFrame(dict_, index=[set_])
return df
train_count = count_exp(train_dir, "train")
test_count = count_exp(test_dir, "test")
print(train_count)
print(test_count)
print("training pictures\n")
plt.figure(figsize=(14, 22))
i = 1
for expression in os.listdir(train_dir):
img = load_img(
(train_dir + expression + "/" + os.listdir(train_dir + expression)[5])
)
plt.subplot(1, 7, i)
plt.imshow(img)
plt.title(expression)
plt.axis("off")
i += 1
plt.show()
print("testing pictures\n")
plt.figure(figsize=(14, 22))
i = 1
for expression in os.listdir(test_dir):
img = load_img((test_dir + expression + "/" + os.listdir(test_dir + expression)[5]))
plt.subplot(1, 7, i)
plt.imshow(img)
plt.title(expression)
plt.axis("off")
i += 1
plt.show()
train_datagen = ImageDataGenerator(
rescale=1.0 / 255,
zoom_range=0.3,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
brightness_range=[0.4, 1.5],
horizontal_flip=True,
)
training_set = train_datagen.flow_from_directory(
train_dir,
batch_size=32,
target_size=(224, 224),
shuffle=True,
color_mode="rgb",
class_mode="categorical",
)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_set = test_datagen.flow_from_directory(
test_dir,
batch_size=32,
target_size=(224, 224),
shuffle=True,
color_mode="rgb",
class_mode="categorical",
)
# # Model
vgg = VGG16(weights="imagenet", include_top=False, input_shape=(224, 224, 3))
for layer in vgg.layers:
layer.trainable = False
vgg16Model = Sequential()
vgg16Model.add(vgg)
vgg16Model.add(Flatten())
vgg16Model.add(Dense(3, activation="softmax"))
vgg16Model.summary()
steps_per_epoch = training_set.n // training_set.batch_size
validation_steps = test_set.n // test_set.batch_size
checkpoint = ModelCheckpoint(
"vgg16.h5", monitor="val_accuracy", save_best_only=True, verbose=1
)
earlystop = EarlyStopping(monitor="val_accuracy", patience=8, verbose=1)
vgg16Model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]
)
hist = vgg16Model.fit(
x=training_set,
validation_data=test_set,
epochs=25,
callbacks=[checkpoint, earlystop],
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
)
# # Results
def plot_results(history):
plt.figure(figsize=(14, 5))
plt.subplot(1, 2, 2)
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("Model Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend(["train", "test"], loc="upper left")
plt.subplot(1, 2, 1)
plt.plot(hist.history["loss"])
plt.plot(hist.history["val_loss"])
plt.title("model Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend(["train", "test"], loc="upper left")
plt.show()
|
import warnings
warnings.filterwarnings("ignore")
import os
from pathlib import Path
import random
from datasets import load_dataset, Dataset
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
classification_report,
confusion_matrix,
ConfusionMatrixDisplay,
)
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
import torch
from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
from transformers import DataCollatorWithPadding
from transformers import (
BertConfig,
BertForSequenceClassification,
PreTrainedTokenizerFast,
)
from transformers import Trainer, TrainingArguments, EarlyStoppingCallback
from tokenizers import normalizers
from tokenizers.decoders import ByteLevel as ByteLevelDecoder
from tokenizers.normalizers import NFD, Lowercase, NFKC
from tokenizers import pre_tokenizers
from tokenizers.pre_tokenizers import Whitespace, ByteLevel
from tokenizers import Tokenizer, models, trainers
from tqdm.auto import tqdm
os.environ["WANDB_DISABLED"] = str("true")
os.environ["TOKENIZERS_PARALLELISM"] = str("true")
# There is no description about the dataset. But I like it, since it contains of 3 datasets (human, dog, chimpanzee). Each datasets has a dna sequence and 7 possible classes. Each dna sequence is describes by the nucleotides A (adenine) C (cytosine) G (guanine) T (thymine). N also occurs in the dataset and specifics non-dertermined nucleotides. Please have a look for the dataset introduction [here](https://www.kaggle.com/code/tarunsolanki/classifying-dna-sequence-using-ml).
# This notebook is part of a small work project. Please have a look at [part 1](https://www.kaggle.com/steffenhaeussler/dna-sequence-classification-part-1/) and [part 2](https://www.kaggle.com/steffenhaeussler/dna-sequence-classification-part-2). In this part, I will implement a TransformerEncoder network. This doesn't make much sense here, since the sequence length is too big for a transformer. But I picked this dataset as an example and want to compare it with other methods. Additional, I used sklearn, tensorflow, now it's pytorch turn. And here I failed and switch to the transformers framework. I will add the pytorch code at the bottom of this notebook and explain my issues. It's just not feasible with my current timeframe to extract it. The transformers implementation is mostly copy and paste and it took me one hour to achieve a working model.
# ## Data preparation
def clean_text(examples):
examples = examples["text"].split("\t")
return {"text": examples[0], "label": examples[1]}
# We need to create the dataset from the text file and also, remove the first line.
dataset = load_dataset(
"text",
data_files="/kaggle/input/dna-sequence-dataset/human.txt",
split="train[1:]",
streaming=False,
)
dataset = dataset.map(clean_text)
n_classes = len(set(dataset["label"]))
# I'm not sure, if the datasets library will allow stratified splits. Here I create a train - val - test dataset.
seed = 83110
split = [0.8, 0.1, 0.1]
data = pd.DataFrame(dataset)
X_train, X_test, y_train, y_test = train_test_split(
data["text"],
data["label"],
test_size=split[1],
train_size=1 - split[1],
random_state=seed,
shuffle=True,
stratify=data["label"],
)
n_ratio = X_test.shape[0] / X_train.shape[0]
X_train, X_val, y_train, y_val = train_test_split(
X_train,
y_train,
test_size=n_ratio,
train_size=1 - n_ratio,
random_state=seed,
shuffle=True,
stratify=y_train,
)
print(X_train.shape[0], X_val.shape[0], X_test.shape[0])
# Out of convenience, we store the splits as csv files. I also run into some numpy conversion errors with the datasets library and this just works.
pd.DataFrame({"text": X_train, "label": y_train}).to_csv(
"/kaggle/working/train.csv", index=False
)
pd.DataFrame({"text": X_val, "label": y_val}).to_csv(
"/kaggle/working/val.csv", index=False
)
pd.DataFrame({"text": X_test, "label": y_test}).to_csv(
"/kaggle/working/test.csv", index=False
)
dataset = load_dataset(
"csv",
data_files={
"train": ["/kaggle/working/train.csv"],
"val": ["/kaggle/working/val.csv"],
"test": ["/kaggle/working/test.csv"],
},
)
# ## Tokenizer
# The transformers library is quite handy. Setting up a tokenizer is not really needed, but since I have already the code and the overhead is not that big, I just go for it.
tokenizer = Tokenizer(models.BPE())
tokenizer.normalizer = normalizers.Sequence(
[
NFD(),
]
)
# Our tokenizer also needs a pre-tokenizer responsible for converting the input to a ByteLevel representation.
tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
[ByteLevel(add_prefix_space=False), Whitespace()]
)
tokenizer.decoder = ByteLevelDecoder()
# We have to deal only with 5 characters.
base_vocab = ["A", "C", "G", "T", "N"]
print(f"Size of our base vocabulary: {len(base_vocab)}")
# And with the vocab_size of 10, I will make sure, that we only use the base vocabulary. The other 5 items in the vocab are the special tokens. A small experiment for the future will be a bigger vocab size and how it affects the model performance. My assumption is, that the performance should improve a lot, since we are able to use more information in our training.
vocab_size = 10
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
show_progress=True,
initial_alphabet=base_vocab,
special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"],
)
iter_dataset = iter(dataset["train"])
def batch_iterator(batch_size=10):
for _ in tqdm(range(0, round(len(dataset["train"]), -1), batch_size)):
yield [next(iter_dataset)["text"] for _ in range(batch_size)]
tokenizer.train_from_iterator(batch_iterator(), trainer=trainer)
# A small test and the encoder does, what it should do. It encodes every character. Again, this can be solved with an one-liner.
output = tokenizer.encode("ATGCCCCAACTAAATACTACCGTATGGCCCACCATAATTACCCCCA")
print(output.tokens)
print(output.ids)
tokenizer.save(f"/kaggle/working/tokenizer.json")
tk_tokenizer = Tokenizer.from_file(f"/kaggle/working/tokenizer.json")
tokenizer = PreTrainedTokenizerFast(tokenizer_object=tk_tokenizer)
tokenizer.add_special_tokens(
{
"pad_token": "[PAD]",
"unk_token": "[UNK]",
"sep_token": "[SEP]",
"cls_token": "[CLS]",
"bos_token": "[CLS]",
"eos_token": "[SEP]",
"mask_token": "[MASK]",
}
)
# ## Data
max_length = 512
# The dataset is small, so we can tokenize the dna sequence before and keep it in memory. We also need to truncate and pad our dna sequence. The dna sequences are much longer than it could be processed with a transformer model.
def preprocess_function(examples):
return tokenizer(examples["text"], truncation=True, max_length=max_length)
tokenized_ds = dataset.map(preprocess_function, batched=True)
tokenized_ds = tokenized_ds.with_format("torch")
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=max_length)
tokenized_ds
# ## Model
# We create a small text classification model for fast iteration. This is playing around, just to see, how well we can perform on a subset of the dna sequence.
max_position_embeddings = 512
hidden_size = 768
num_hidden_layers = 2 # 12
num_attention_heads = 4 # 12
intermediate_size = 3072
drop_out = 0.1
model_path = "/kaggle/working/model"
config = BertConfig(
num_labels=n_classes,
# mask_token_id = 4,
bos_token_id=1,
sep_token_id=2,
# pad_token_id = 3,
eos_token_id=2,
max_position_embeddings=max_position_embeddings,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act="gelu",
hidden_dropout_prob=drop_out,
attention_probs_dropout_prob=drop_out,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
vocab_size=vocab_size,
use_cache=True,
classifier_dropout=None,
onnx_export=False,
)
# Setting up the classification model and it only has 15 million parameters. This is the effect of the small vocab_size and attention dimension of 256.
model = BertForSequenceClassification(config=config)
print(f"n of parameters: {model.num_parameters():_}")
# ## Model training
# Basic BERT training parameters. We don't have much data, so I set up the epoch to 50. Also, increasing the learning_rate didn't help.
learning_rate = 1e-4 # bert
weight_decay = 1e-2 # bert
lr_scheduler_type = "linear"
num_train_epochs = 50 # 5 but training set is small
train_batch_size = 32
eval_batch_size = 32
gradient_accumulation_steps = 2
eval_accumulation_steps = 2
warmup_steps = 0
adam_beta1 = 0.9 # bert
adam_beta2 = 0.999 # bert
adam_epsilon = 1e-8 # bert
max_grad_norm = 1.0 # bert
training_args = TrainingArguments(
output_dir=model_path,
overwrite_output_dir=True,
learning_rate=learning_rate,
weight_decay=weight_decay,
lr_scheduler_type=lr_scheduler_type,
num_train_epochs=num_train_epochs,
adam_beta1=adam_beta1,
adam_beta2=adam_beta2,
adam_epsilon=adam_epsilon,
max_grad_norm=max_grad_norm,
evaluation_strategy="epoch",
per_device_train_batch_size=train_batch_size, # depends on memory
per_device_eval_batch_size=eval_batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
# eval_accumulation_steps=eval_accumulation_steps,
save_strategy="epoch",
save_total_limit=3,
prediction_loss_only=False,
report_to="tensorboard",
log_level="warning",
logging_strategy="epoch",
# fp16 = True,
# fp16_full_eval=True,
load_best_model_at_end=True,
metric_for_best_model="loss",
greater_is_better=False,
push_to_hub=False,
dataloader_pin_memory=True,
)
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
accuracy = accuracy_score(y_true=labels, y_pred=predictions)
recall = recall_score(y_true=labels, y_pred=predictions, average="weighted")
precision = precision_score(y_true=labels, y_pred=predictions, average="weighted")
f1 = f1_score(y_true=labels, y_pred=predictions, average="weighted")
return {"accuracy": accuracy, "precision": precision, "recall": recall, "f1": f1}
early_stopping = EarlyStoppingCallback(
early_stopping_patience=3, early_stopping_threshold=0.02
)
callbacks = [early_stopping]
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=tokenized_ds["train"],
eval_dataset=tokenized_ds["val"],
compute_metrics=compute_metrics,
tokenizer=tokenizer,
callbacks=callbacks,
)
trainer.train()
trainer.evaluate()
# ## Test
# Let's have a look at the test data:
y_hat = trainer.predict(tokenized_ds["test"])
# I definitely expected worse. An weighted f1-score is not too bad, considering, that we only use a short sequence of the dna. Also, we can not predict class 5 correct. Probably, we don't have enough training data or the sequence is specific and there is not enough information in our used data.
for weight in ["micro", "macro", "weighted"]:
f1 = f1_score(
np.argmax(y_hat[0], axis=1), tokenized_ds["test"]["label"], average=weight
)
print(f"{weight}: {f1}")
print(classification_report(tokenized_ds["test"]["label"], np.argmax(y_hat[0], axis=1)))
cm = confusion_matrix(tokenized_ds["test"]["label"], np.argmax(y_hat[0], axis=1))
disp = ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=list(range(n_classes))
)
disp.plot(xticks_rotation="vertical")
# to be continued - my failed transformer implementation
# ## Pytorch
# I followed following examples, when implementing it:
# - [official tutorial](https://pytorch.org/tutorials/beginner/transformer_tutorial.html)
# - [harvards amazing guide](https://nlp.seas.harvard.edu/2018/04/03/attention.html)
# - [my coursera implementation](https://www.coursera.org/learn/attention-models-in-nlp?specialization=natural-language-processing)
# It is working and something is happening, but the model doesn't improve. I assume, this is some kind of obvious bug, which takes you two days to find and everyone else finds it immediately. :)
import math
import os
from tempfile import TemporaryDirectory
import time
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from torch.nn.utils.rnn import pad_sequence
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
human_data = pd.read_table("/kaggle/input/dna-sequence-dataset/human.txt")
ctoi = {"A": 1, "C": 2, "G": 3, "N": 5, "T": 4}
vocab_size = len(ctoi)
n_classes = human_data["class"].nunique()
def encode(s):
return [ctoi[i] for i in s]
train_val_split = 0.8
X_train_torch, X_val_torch, y_train_torch, y_val_torch = train_test_split(
human_data["sequence"],
human_data["class"],
test_size=1 - train_val_split,
train_size=train_val_split,
random_state=seed,
shuffle=True,
stratify=human_data["class"],
)
class DNADataset(Dataset):
def __init__(self, data, classes, num_classes):
self.data = data
self.classes = classes
self.num_classes = num_classes
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
x = self.data[idx]
y = self.classes[idx]
# Convert the label to a one-hot encoded tensor
label = torch.zeros(self.num_classes)
label[y] = 1
return x, label
def collate_fn(batch):
(
label_list,
text_list,
) = (
[],
[],
)
for text, label in batch:
label_list.append(label)
encoded_text = torch.tensor(encode(text[:max_length]), dtype=torch.int64)
text_list.append(encoded_text)
label_list = torch.stack(label_list)
text_list = pad_sequence(text_list, batch_first=True, padding_value=0)
return text_list.to(device), label_list.long().to(device)
train_dataset = DNADataset(X_train_torch.tolist(), y_train_torch.tolist(), n_classes)
val_dataset = DNADataset(X_val_torch.tolist(), y_val_torch.tolist(), n_classes)
train_loader = DataLoader(
train_dataset, batch_size=train_batch_size, collate_fn=collate_fn
)
val_loader = DataLoader(val_dataset, batch_size=eval_batch_size, collate_fn=collate_fn)
class TransformerModel(nn.Module):
def __init__(
self,
ntoken: int,
d_model: int,
nhead: int,
d_hid: int,
nlayers: int,
dropout: float = 0.5,
):
super().__init__()
self.model_type = "Transformer"
self.pos_encoder = PositionalEncoding(d_model, dropout)
encoder_layers = TransformerEncoderLayer(d_model, nhead, d_hid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, d_model)
self.d_model = d_model
self.classifier = nn.Linear(d_model, ntoken)
self.init_weights()
def init_weights(self) -> None:
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.classifier.bias.data.zero_()
self.classifier.weight.data.uniform_(-initrange, initrange)
def forward(self, src: Tensor, src_mask: Tensor = None) -> Tensor:
"""
Arguments:
src: Tensor, shape ``[seq_len, batch_size]``
src_mask: Tensor, shape ``[seq_len, seq_len]``
Returns:
output Tensor of shape ``[seq_len, batch_size, ntoken]``
"""
src = self.encoder(src) * math.sqrt(self.d_model)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, src_mask)
output = self.classifier(output)
return output
class PositionalEncoding(nn.Module):
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model)
)
pe = torch.zeros(max_len, 1, d_model)
pe[:, 0, 0::2] = torch.sin(position * div_term)
pe[:, 0, 1::2] = torch.cos(position * div_term)
self.register_buffer("pe", pe)
def forward(self, x: Tensor) -> Tensor:
"""
Arguments:
x: Tensor, shape ``[seq_len, batch_size, embedding_dim]``
"""
x = x + self.pe[: x.size(0)]
return self.dropout(x)
model = TransformerModel(
n_classes, max_length, num_attention_heads, hidden_size, num_hidden_layers, drop_out
).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)
def train(model: nn.Module) -> None:
model.train() # turn on train mode
epoch_loss = 0
epoch_acc = 0
epoch_count = 0
log_interval = 20
start_time = time.time()
num_batches = len(train_dataset) // train_batch_size
for n_batch, (inputs, targets) in enumerate(train_loader, 1):
optimizer.zero_grad()
output = model(inputs)
loss = criterion(output, targets)
correct = output.argmax(axis=1) == targets
epoch_acc += (output.argmax(1) == targets).sum().item()
epoch_count += correct.size(0)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
epoch_loss += loss.item()
if n_batch % log_interval == 0 and n_batch > 0:
lr = scheduler.get_last_lr()[0]
ms_per_batch = (time.time() - start_time) * 1000 / log_interval
cur_loss = epoch_loss / (log_interval * n_batch)
print(
f"| epoch {epoch:3d} | {n_batch:5d}/{num_batches:5d} batches | "
f"loss {cur_loss:5.2f} | current accuracy: {epoch_acc/epoch_count:.2f}"
)
total_loss = 0
start_time = time.time()
def evaluate(model: nn.Module) -> float:
model.eval() # turn on evaluation mode
epoch_loss = 0
epoch_acc = 0
epoch_count = 0
with torch.no_grad():
for n_batch, (data, targets) in enumerate(val_loader):
output = model(data)
loss = criterion(output, targets)
epoch_loss += loss.item()
correct = output.argmax(axis=1) == targets
epoch_acc += (output.argmax(1) == targets).sum().item()
epoch_count += correct.size(0)
return epoch_loss / (len(val_dataset) - 1), epoch_acc / epoch_count
best_val_loss = float("inf")
epochs = 20
with TemporaryDirectory() as tempdir:
best_model_params_path = os.path.join(tempdir, "best_model_params.pt")
for epoch in range(1, epochs + 1):
epoch_start_time = time.time()
train(model)
val_loss, val_acc = evaluate(model)
elapsed = time.time() - epoch_start_time
print("-" * 89)
print(
f"| end of epoch {epoch:3d} | time: {elapsed:5.2f}s | "
f"valid loss {val_loss:5.2f} | val_acc {val_acc:.2f}"
)
print("-" * 89)
if val_loss < best_val_loss:
best_val_loss = val_loss
torch.save(model.state_dict(), best_model_params_path)
scheduler.step()
model.load_state_dict(torch.load(best_model_params_path)) # load best model states
|
# # What is Bag of words in Natural Language Processing
# In Natural Language Processing (NLP), the bag of words model is a simplifying representation of text that disregards grammar and word order, and focuses solely on the frequency of words in a document or a corpus.
# The Bag of Words model represents a text document as a bag (multiset) of its words, disregarding grammar and word order, but keeping track of the frequency of each word. For example, consider the following two sentences:
# Sentence 1: "I love pizza"
# Sentence 2: "Pizza is my favorite food"
# To apply the bag of words model, we first create a vocabulary of all the unique words in the corpus. For this example, the vocabulary would be:
# ["I", "love", "pizza", "is", "my", "favorite", "food"]
# Then, we represent each sentence as a vector, where each element of the vector corresponds to the frequency of a word in the vocabulary. The vectors for the two sentences would be:
# Sentence 1: [1, 1, 1, 0, 0, 0, 0]
# Sentence 2: [0, 0, 1, 1, 1, 1, 1]
# Note that the order of the words does not matter, and each word is treated as an independent feature. The resulting vectors can be used as input to machine learning algorithms for various NLP tasks such as sentiment analysis, text classification, and topic modeling.
# # Bag of words python code
from sklearn.feature_extraction.text import CountVectorizer
# Define the corpus
corpus = [
"I love pizza",
"Pizza is my favorite food",
"I also love burgers",
"Burgers are great too",
]
# Create an instance of CountVectorizer
vectorizer = CountVectorizer()
# Fit the vectorizer to the corpus and transform the corpus
bag_of_words = vectorizer.fit_transform(corpus)
bag_of_words.toarray()
# Print the vocabulary
print("Vocabulary: ", vectorizer.get_feature_names())
# Print the bag of words representation for each sentence
for i, sentence in enumerate(corpus):
print("Sentence", i + 1, ":", bag_of_words[i].toarray())
# # How Bag of words are used in Text classification, explain with the help of python code?
from sklearn.datasets import fetch_20newsgroups # dataset
from sklearn.feature_extraction.text import CountVectorizer # convert text->vectors
from sklearn.naive_bayes import MultinomialNB # Ml Algo
from sklearn.metrics import accuracy_score, classification_report # check accuracy
categories = ["alt.atheism", "comp.graphics", "sci.med", "soc.religion.christian"]
train_data = fetch_20newsgroups(
subset="train", categories=categories, shuffle=True, random_state=42
)
test_data = fetch_20newsgroups(
subset="test", categories=categories, shuffle=True, random_state=42
)
train_data["data"][0]
len(train_data["data"][0])
# Create an instance of CountVectorizer
vectorizer = CountVectorizer()
# Fit the vectorizer to the training data and transform the data
train_features = vectorizer.fit_transform(train_data.data)
train_features.toarray()
# Print the vocabulary
print("Vocabulary: ", vectorizer.get_feature_names())
len(vectorizer.get_feature_names())
len(train_features.toarray())
train_data.target
# Train a Naive Bayes classifier on the vectorized training data
clf = MultinomialNB()
clf.fit(train_features, train_data.target)
# Transform the test data using the same vectorizer
test_features = vectorizer.transform(test_data.data)
test_features.toarray()
len(test_features.toarray())
# Predict the labels of the test data
predicted_labels = clf.predict(test_features)
predicted_labels
# Evaluate the performance of the classifier
print("Accuracy:", accuracy_score(test_data.target, predicted_labels))
print(classification_report(test_data.target, predicted_labels))
|
# #### EEMT 5400 IT for E-Commerce Applications
# ##### HW4 Max score: (1+1+1)+(1+1+2+2)+(1+2)+2
# You will use two different datasets in this homework and you can find their csv files in the below hyperlinks.
# 1. Car Seat:
# https://raw.githubusercontent.com/selva86/datasets/master/Carseats.csv
# 2. Bank Personal Loan:
# https://raw.githubusercontent.com/ChaithrikaRao/DataChime/master/Bank_Personal_Loan_Modelling.csv
# #### Q1.
# a) Perform PCA for both datasets. Create the scree plots (eigenvalues).
# b) Suggest the optimum number of compenents for each dataset with explanation.
# c) Save the PCAs as carseat_pca and ploan_pca respectively.
# a)
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
# a) Load the datasets
carseats_df = pd.read_csv(
"https://raw.githubusercontent.com/selva86/datasets/master/Carseats.csv"
)
ploan_df = pd.read_csv(
"https://raw.githubusercontent.com/ChaithrikaRao/DataChime/master/Bank_Personal_Loan_Modelling.csv"
)
carseats_df
ploan_df
# Encode the 'Urban' and 'US' columns using the fit_transform method
encoded_cols = pd.get_dummies(carseats_df[["Urban", "US", "ShelveLoc"]])
carseats_df_new = pd.concat([encoded_cols, carseats_df], axis=1)
carseats_features = carseats_df_new.drop(["Sales", "Urban", "ShelveLoc", "US"], axis=1)
carseats_features
# a)prepare the datasets for PCA
carseats_scaled = StandardScaler().fit_transform(carseats_features)
ploan_features = ploan_df.drop(["ID", "ZIP Code", "Personal Loan"], axis=1)
ploan_scaled = StandardScaler().fit_transform(ploan_features)
# perform PCA on the standardized data
carseats_pca = PCA().fit(carseats_scaled)
ploan_pca = PCA().fit(ploan_scaled)
# Create scree plots
plt.plot(
range(1, len(carseats_pca.explained_variance_ratio_) + 1),
carseats_pca.explained_variance_ratio_,
"bo-",
)
plt.title("Carseats Scree Plot")
plt.xlabel("Principal Component")
plt.ylabel("Explained Variance Ratio")
plt.show()
plt.plot(
range(1, len(ploan_pca.explained_variance_ratio_) + 1),
ploan_pca.explained_variance_ratio_,
"bo-",
)
plt.title("Bank Personal Loan Scree Plot")
plt.xlabel("Principal Component")
plt.ylabel("Explained Variance Ratio")
plt.show()
# **b)**
# For the car seat dataset, the scree plot shows a clear elbow at around the second component. Therefore, we can suggest that the optimum number of components for this dataset is 3.
# For the bank personal loan dataset, the scree plot is more gradual, but there is still a noticeable elbow at around the fourth component. Therefore, we can suggest that the optimum number of components for this dataset is 4.
#
# c) Transform the original data into principal component space
carseat_pca = carseats_pca.transform(carseats_scaled)
ploan_pca = ploan_pca.transform(ploan_scaled)
carseat_pca
ploan_pca
# #### Q2. (Car Seat Dataset)
# a) Convert the non-numeric variables to numeric by using get_dummies() method in pandas. Use it in this question.
# b) Use the scikit learn variance filter to reduce the dimension of the dataset. Try different threshold and suggest the best one.
# c) Some columns may have high correlation. For each set of highly correlated variables, keep one variable only and remove the rest of highly correlated columns. (Tips: You can find the correlations among columns by using .corr() method of pandas dataframe. Reference: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.corr.html)
# d) Perform linear regression to predict the Sales with datasets from part b and part c respectively and compare the result
#
# a)
carseats_df = pd.get_dummies(
carseats_df, columns=["ShelveLoc", "Urban", "US"], drop_first=True
)
carseats_df
# b)
from sklearn.feature_selection import VarianceThreshold
# b)
# Set different threshold values
thresholds = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3]
# b)
for thresh in thresholds:
# Create variance threshold object
vt = VarianceThreshold(threshold=thresh)
# Fit and transform data
carseats_df_var = vt.fit_transform(carseats_df)
# Print results
print(
"Threshold = {}: Number of Features = {}".format(
thresh, carseats_df_var.shape[1]
)
)
# **b)**
# Based on the results of this code, the best threshold value appears to be 0.3, which reduces the number of features to 8.
# c)
import numpy as np
# create correlation matrix
corr_matrix = carseats_df.corr().abs()
# create upper triangle mask
mask = np.triu(np.ones_like(corr_matrix, dtype=bool))
# find index of feature columns with high correlation
high_corr = np.where(corr_matrix > 0.8, True, False)
high_corr = high_corr & mask
high_corr_idx = np.unique(np.where(high_corr)[1])
# remove highly correlated columns
high_corr_cols = carseats_df.columns[high_corr_idx]
if "Sales" in high_corr_cols:
high_corr_cols = high_corr_cols.drop("Sales")
carseats_df_corr = carseats_df.drop(high_corr_cols, axis=1)
carseats_df_corr
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# Split the data into training and testing sets for variance threshold dataset
X_train_var, X_test_var, y_train_var, y_test_var = train_test_split(
carseats_df_var, carseats_df["Sales"], test_size=0.2, random_state=42
)
# Train a linear regression model on the variance threshold feature selection dataset
reg_var = LinearRegression().fit(X_train_var, y_train_var)
# Evaluate the model on the test set
score_var = reg_var.score(X_test_var, y_test_var)
print(
"R-squared value for variance threshold feature selection: {:.2f}".format(score_var)
)
# Split the data into training and testing sets for correlation analysis dataset
X_train_corr, X_test_corr, y_train_corr, y_test_corr = train_test_split(
carseats_df_corr, carseats_df["Sales"], test_size=0.2, random_state=42
)
# Train a linear regression model on the dataset with highly correlated features removed
reg_corr = LinearRegression().fit(X_train_corr, y_train_corr)
# Evaluate the model on the test set
score_corr = reg_corr.score(X_test_corr, y_test_corr)
print(
"R-squared value for correlation analysis feature selection: {:.2f}".format(
score_corr
)
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data1 = pd.read_csv("/kaggle/input/email-spam-dataset/lingSpam.csv")
data1.info()
data1.head()
data2 = pd.read_csv("/kaggle/input/email-spam-dataset/enronSpamSubset.csv")
data2.info()
data2.head()
data3 = pd.read_csv("/kaggle/input/email-spam-dataset/completeSpamAssassin.csv")
data3.info()
data3.head()
# delete unneeded columns
data1.drop("Unnamed: 0", inplace=True, axis=1)
data2.drop(["Unnamed: 0", "Unnamed: 0.1"], inplace=True, axis=1)
data3.drop("Unnamed: 0", inplace=True, axis=1)
# concatenate data
data = pd.concat([data1, data2, data3], axis=0)
# remove missing values (NaN)
data.dropna(inplace=True)
data.info()
data.head()
# # Text preprocessing
emails = data["Body"]
# lowering case
emails = [text.lower() for text in emails]
emails[0]
# removal of special characters and numbers
import re
emails = [re.sub("[^a-zA-Z]", " ", text) for text in emails]
emails[0]
# removal of extra spaces
emails = [re.sub(" +", " ", text) for text in emails]
emails[0]
# removal of hyperlinks
emails = [re.sub(r"http\S+", "", text) for text in emails]
# removal of HTML tags
emails = [re.sub(r"'<.*?>'", "", text) for text in emails]
emails[0]
# tokenization
import nltk
emails = [nltk.word_tokenize(text) for text in emails]
emails[0]
# removal of stopwords
stopwords = nltk.corpus.stopwords.words("english")
emails = [[word for word in text if word not in stopwords] for text in emails]
emails[0]
# Stemming or lemmatization - lemmatizators are slower, but change tenses and nouns. Use the Wordnet lemmatizer, but with POS tag)
# lemmatization
nltk.data.path.append("/kaggle/input/corpora/")
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
emails = [[lemmatizer.lemmatize(word) for word in text] for text in emails]
emails[0]
# word cloud with the most frequent words
# TF-IDF, TF-IDF weighted W2V, and average W2
# try creating a length of text feature and average word length and find whether it’s practical
# LDA for topic modeling as a feature
# bag of words, almost all steps could be done there
# https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(max_features=21020)
x = vectorizer.fit_transform([" ".join(text) for text in emails]).toarray()
print(x.shape)
vectorizer.get_feature_names_out()[
:10
] # first 10 in alphabetical order from 21020 most used
# split to train and test data
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, np.asarray(data["Label"]), random_state=42, test_size=0.2
)
x_train.shape
# # Classification algorithms
# ( https://towardsdatascience.com/top-10-binary-classification-algorithms-a-beginners-guide-feeacbd7a3e2 )
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
def print_stats(algorithm):
# actually perform classification
y_pred = algorithm.predict(x_test)
# Thus in binary classification, the count of
# true negatives is 0,0
# false negatives is 1,0
# true positives is 1,1
# false positives is 0,1
conf = confusion_matrix(y_pred=y_pred, y_true=y_test)
tn, fp, fn, tp = conf.ravel()
print(
"Accuracy on training data: {:.2f}%".format(
100 * algorithm.score(x_train, y_train)
)
)
print(
"Accuracy on testing data: {:.2f}%".format(
100 * algorithm.score(x_test, y_test)
)
)
print("Precision: {:.2f}%".format(100 * precision_score(y_pred, y_test)))
print("Recall: {:.2f}%".format(100 * recall_score(y_pred, y_test)))
print("F1 Score: {:.2f}%".format(100 * f1_score(y_pred, y_test)))
import seaborn
import matplotlib.pyplot as plt
ax = plt.subplot()
seaborn.heatmap(conf, annot=True, fmt="", linewidths=2, cmap="Greens")
ax.set_xlabel("Predicted")
ax.set_ylabel("Real")
ax.xaxis.set_ticklabels(["Ham", "Spam"])
ax.yaxis.set_ticklabels(["Ham", "Spam"])
plt.show()
# Naïve Bayes
from sklearn.naive_bayes import GaussianNB
NB = GaussianNB()
NB.fit(x_train, y_train)
print_stats(NB)
from sklearn.naive_bayes import MultinomialNB
MNB = MultinomialNB()
MNB.fit(x_train, y_train)
print_stats(MNB)
from sklearn.linear_model import LogisticRegression
LR = LogisticRegression(max_iter=1000)
LR.fit(x_train, y_train)
print_stats(LR)
# very long and not very accurate, 12 minutes
# from sklearn.neighbors import KNeighborsClassifier
# KNN = KNeighborsClassifier(algorithm = 'brute', n_jobs=-1)
# KNN.fit(x_train, y_train)
# print_stats(KNN)
from sklearn.svm import LinearSVC
SVM = LinearSVC(C=0.0001)
SVM.fit(x_train, y_train)
print_stats(SVM)
# 4 minutes
# from sklearn.tree import DecisionTreeClassifier
# CLF = DecisionTreeClassifier()
# CLF.fit(x_train, y_train)
# print_stats(CLF)
from sklearn.ensemble import RandomForestClassifier
# n_estimators = number of decision trees
RF = RandomForestClassifier(n_estimators=30, max_depth=9)
RF.fit(x_train, y_train)
print_stats(RF)
# Voting Classifier
from sklearn.ensemble import VotingClassifier
EVC = VotingClassifier(
estimators=[("MNB", MNB), ("LR", LR), ("RF", RF), ("SVM", SVM)], voting="hard"
)
EVC.fit(x_train, y_train)
print_stats(EVC)
|
import socket
hostname = socket.gethostname()
if hostname.startswith("ug"):
hostnum = hostname.split("ug")[-1]
import sys
sys.stdout = open(f"{hostname}.txt", "w")
from random import randint
import random
import matplotlib.pyplot as plt
import numpy as np
example = "/kaggle/input/tsp-toronto/20230407_03-33_adjMatrix.txt"
fpath = example.split("adjMatrix")[0]
adjMatrix = np.loadtxt(fpath + "adjMatrix" + ".txt")
alls = np.loadtxt(fpath + "all" + ".txt").astype(int)
dropoffs = np.loadtxt(fpath + "dropoffs" + ".txt").astype(int)
pickups = np.loadtxt(fpath + "pickups" + ".txt").astype(int)
def printtsp(lst):
for num in lst:
num = alls[num]
if len(str(num)) == 3:
print("\033[32m" + str(num) + "\033[0m", end=" ") # print in green
else:
print(num, end=" ")
print() # print a new line at the end
def isPickup(x):
return x % 2 == 0
def toPickup(x):
return x - 1 # x & 0xFFFFFFFE
def toDropoff(x):
return x + 1 # x | 1
def cost(route):
cost = 0
for i in range(len(route) - 1):
cost += adjMatrix[route[i]][route[i + 1]]
return cost
import time
def timer(func):
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print(f"Elapsed_time={end_time - start_time:.2f} seconds")
return result
return wrapper
import statistics
def testDistribution(sample_count, numOfIteration, T):
def decorator(func):
def wrapper(*args, **kwargs):
trials = []
for i in range(sample_count):
trials.append(func(*args, **kwargs))
std = statistics.stdev(trials)
avg = statistics.mean(trials)
plt.clf()
n, bins, patches = plt.hist(trials, color="orange")
print(std, avg)
plt.axvline(avg, color="red", linestyle="dashed", linewidth=2)
t = plt.text(avg + 0.1 * std, np.max(n) * 0.15, f"{avg:.1f}")
t.set_bbox(dict(facecolor="white", alpha=0.7, edgecolor="black"))
plt.axvline(avg + std, color="g", linestyle="dashed", linewidth=1)
plt.text(
avg + std * 1.1, np.max(n) * 0.1, "{:.1f}".format(avg + std), color="g"
)
plt.axvline(avg - std, color="g", linestyle="dashed", linewidth=1)
plt.text(
avg - std * 0.9, np.max(n) * 0.1, "{:.1f}".format(avg - std), color="g"
)
plt.axvline(avg + 2 * std, color="b", linestyle="dashed", linewidth=1)
plt.text(
avg + 2.1 * std,
np.max(n) * 0.05,
"{:.1f}".format(avg + 2 * std),
color="b",
)
plt.axvline(avg - 2 * std, color="b", linestyle="dashed", linewidth=1)
plt.text(
avg - 1.9 * std,
np.max(n) * 0.05,
"{:.1f}".format(avg - 2 * std),
color="b",
)
plt.title(
f"{numOfIteration} iterations, T={T}, std={std:.1f}, avg={avg:.1f}"
)
plt.xlabel("score (kilosec)")
plt.ylabel("freq")
plt.savefig(
f"r_{numOfIteration}iteration_{T}T_{sample_count}samples_{hostname}.png"
)
plt.show()
print(
f"iteration={numOfIteration:<8} T={T:<6} std={std}\tavg={avg}", end="\t"
)
return trials
return wrapper
return decorator
import math
def minCostItem(possible_nexts, current):
minCost = math.inf
minIndex = None
for i in range(len(possible_nexts)):
item = possible_nexts[i]
if item == current:
continue
if minCost > adjMatrix[current][item]:
minCost = adjMatrix[current][item]
minIndex = i
return minIndex
def greedy_route(num_of_points, firstitem):
route = []
possible_nexts = list(range(0, num_of_points, 2))
possible_nexts.remove(firstitem)
route.append(firstitem)
possible_nexts.append(firstitem + 1)
while len(route) < num_of_points:
item = possible_nexts.pop(minCostItem(possible_nexts, route[-1]))
route.append(item)
if isPickup(item):
possible_nexts.append(item + 1)
return route
def random_route(num_of_points):
route = []
possible_nexts = list(range(0, num_of_points, 2))
while len(route) < num_of_points:
item = possible_nexts.pop(randint(0, len(possible_nexts) - 1))
route.append(item)
if isPickup(item):
possible_nexts.append(item + 1)
return route
def canMove(item, newLocation, etuor):
if isPickup(item):
if newLocation < etuor[toDropoff(item)]:
return True
else:
if newLocation > etuor[toPickup(item)]:
return True
return False
import math
def FEELING_GOOD(delta, temperature):
if temperature == 0:
return False
return random.random() < math.exp(-delta / temperature)
def twoOpt(route, temperature):
etuor = {}
for i, item in enumerate(route):
etuor[item] = i
cur_cost = cost(route)
# printtsp(route)
item = randint(0, len(route) - 1)
loc1 = etuor[item]
# print("item",alls[item], "located at",etuor[item])
if isPickup(item):
d = toDropoff(item)
# print("drop off located at", etuor[d])
possibleSwaps = range(0, etuor[d])
else: # is drop off
p = toPickup(item)
possibleSwaps = range(etuor[p] + 1, len(route))
# print("pickup located at", etuor[p])
for loc2 in possibleSwaps:
# print(loc1, loc2)
if canMove(route[loc2], loc1, etuor):
if loc1 != loc2:
route[loc1], route[loc2] = route[loc2], route[loc1]
delta = cost(route) - cur_cost
if delta > 0 and not FEELING_GOOD(delta, temperature): # bad
route[loc1], route[loc2] = route[loc2], route[loc1]
break
def somewhat_minCostItem(possible_nexts, current, degree_of_randomness):
row = adjMatrix_indexed[current]
for distance, tspIdx in row:
if tspIdx in possible_nexts and random.random() < degree_of_randomness:
return tspIdx
return possible_nexts[0]
def random_greedy_route(num_of_points, firstitem, degree_of_randomness):
route = []
possible_nexts = list(range(0, num_of_points, 2))
possible_nexts.remove(firstitem)
route.append(firstitem)
possible_nexts.append(firstitem + 1)
while len(route) < num_of_points:
item = somewhat_minCostItem(possible_nexts, route[-1], degree_of_randomness)
possible_nexts.remove(item)
route.append(item)
if isPickup(item):
possible_nexts.append(item + 1)
return route
def nth_minCostItem(possible_nexts, current, nth_best):
row = adjMatrix_indexed[current]
for distance, tspIdx in row:
if tspIdx in possible_nexts: # ??
if nth_best == 0:
return tspIdx
nth_best -= 1
return possible_nexts[0]
import random
def random_nth_greedy_route(num_of_points, firstitem, rand_list):
route = []
possible_nexts = list(range(0, num_of_points, 2))
possible_nexts.remove(firstitem)
route.append(firstitem)
possible_nexts.append(firstitem + 1)
while len(route) < num_of_points:
if len(route) in rand_list:
item = nth_minCostItem(possible_nexts, route[-1], 1)
else:
item = nth_minCostItem(possible_nexts, route[-1], 0)
possible_nexts.remove(item)
route.append(item)
if isPickup(item):
possible_nexts.append(item + 1)
return route
def findMin_random_route(
random_func, degree_of_randomness, numTryIteration, plot=False
):
baseline = 125000
min_cost = float("inf")
min_cost_list = []
cost_list = []
debug_counter = 0
for start_point in range(0, len(alls), 2):
counter = numTryIteration
while counter > 0:
# how_many_seconds = random.randint(1,degree_of_randomness)
# rand_list = random.sample( range(0, len(alls)), how_many_seconds) # generate n unique random numbers from 0 to 10
# route = random_func(len(alls), start_point ,rand_list)#degree_of_randomness
route = random_func(
len(alls), start_point, degree_of_randomness
) # degree_of_randomness
c = cost(route)
cost_list.append(c)
counter -= 1
debug_counter += 1
if c < min_cost:
if c < 115329:
update_costs(diff_cheat)
min_cost = c
min_route = route
counter = numTryIteration - 1
try:
print(min_cost, debug_counter, start_point, how_many_seconds)
except:
print(
min_cost,
"iteration:",
debug_counter,
"startingPoint:",
start_point,
)
print(f"\t {len(diff_cheat)} {diff_cheat}")
min_cost_list.append(min_cost)
if plot:
plt.plot(cost_list)
plt.plot(min_cost_list)
print(min_cost, counter, start_point, degree_of_randomness, numTryIteration)
return min_cost
diff_cheat = []
def smart_somewhat_minCostItem(possible_nexts, current, degree_of_randomness):
global diff_cheat
row = adjMatrix_indexed[current]
this_is_nth_best = 0
for distance, tspIdx in row:
if tspIdx in possible_nexts:
if random.random() < degree_of_randomness:
if this_is_nth_best > 0:
diff_cheat.append([current, tspIdx])
return tspIdx
this_is_nth_best += 1
return possible_nexts[0]
def smart_random_greedy_route(num_of_points, firstitem, degree_of_randomness):
global diff_cheat
diff_cheat.clear()
route = []
possible_nexts = list(range(0, num_of_points, 2))
possible_nexts.remove(firstitem)
route.append(firstitem)
possible_nexts.append(firstitem + 1)
while len(route) < num_of_points:
item = smart_somewhat_minCostItem(
possible_nexts, route[-1], degree_of_randomness
)
possible_nexts.remove(item)
route.append(item)
if isPickup(item):
possible_nexts.append(item + 1)
return route
adjMatrix_indexed = [[0 for j in adjMatrix] for i in adjMatrix]
for r, row in enumerate(adjMatrix):
for c, item in enumerate(row):
adjMatrix_indexed[r][c] = (item, c)
adjMatrix_indexed[r].sort()
# findMin_random_route(randomadjMatr6_indexed317 ].insert(0,(0.01,57 ))_greedy_route,0.99,100, True)#118781
def update_costs(change_list):
global adjMatrix_indexed
cost = None
for i, j in change_list:
adjMatrix_indexed[i].insert(0, (cost, j))
cheat = [(42, 266), (332, 168), (312, 276), (146, 197), (281, 88)] # 118152
# cheat = [(42, 266), (332, 168), (224, 277), (80, 215), (63, 131), (160, 88)]#[115329
update_costs(cheat)
# findMin_random_route(smart_random_greedy_route,0.99,20, True)#108
for T in [0, 10, 50, 100, 500, 750, 1000, 1500, 2000]:
for num in [10000, 30000, 50000, 100000, 500000, 1000000]:
pass
for T in [0, 10, 50, 100, 500, 750, 1000, 1500, 2000]:
for num in [1, 50, 100, 1000, 2000, 5000]:
pass
def experiment(_sample_count=100, _numOfIteration=10, _T=0.99):
@timer
@testDistribution(sample_count=_sample_count, numOfIteration=_numOfIteration, T=_T)
def inside():
# simulated_annealing(10,0)
return findMin_random_route(random_greedy_route, _T, _numOfIteration)
inside()
test_randomness = [
0.9,
0.91,
0.92,
0.93,
0.94,
0.95,
0.96,
0.97,
0.98,
0.985,
0.99,
0.995,
0.999,
0.9995,
]
print(len(test_randomness))
try:
ind = int(hostnum) % len(test_randomness)
experiment(3, 1, test_randomness[0])
except:
pass
@timer
def individual(T, numOfIteration, route):
costs = []
mincost = 99999999
for x in range(numOfIteration):
c = cost(route)
costs.append(c)
if c < mincost:
mincost = c
print(c)
twoOpt(route, T)
if x % 1000 == 0:
plt.plot(costs)
plt.pause(0.001)
print(T, numOfIteration, min(costs))
plt.plot(costs)
plt.title(f"base temperature={T}")
plt.xlabel("Iteration")
plt.ylabel("Cost")
plt.show()
plt.savefig(f"{numOfIteration}iteration_{T}T.png")
# individual(30,1000,random_route(len(alls)))#136191.12329999983
# individual(1,10000,greedy_route(len(alls), 4))#136191.12329999983
# individual(100, 100000, smart_random_greedy_route(len(alls),0,0.97))
def random_choice(pdf):
items, relative_probabilities = zip(*pdf)
total = np.sum(relative_probabilities)
r = random.uniform(0, 1) * total
s = 0
for item, prob in pdf:
s += prob
if s >= r:
break
return item
# pdf = [(0.1, 1), (0.05, 2), (0.05, 3)]
# dist_pdf = sorted(random_choice2(pdf, [2,3,4]) for _ in range(10000))
# plt.hist(dist_pdf)
[
(5.589278146337194e-07, 2),
(2.862325239123211e-07, 80),
(1.450768213806211e-07, 112),
(1.3032665367672836e-07, 50),
(9.49935635479437e-08, 194),
(7.432662102510769e-08, 38),
(7.350898670068442e-08, 216),
(6.048444020480834e-08, 70),
(2.42864354766737e-08, 36),
(1.2372930662760792e-08, 192),
(1.2065013876209576e-08, 4),
(9.659850683759049e-09, 42),
(8.9686022870579e-09, 260),
(4.456077950439913e-09, 202),
(3.2600227167409963e-09, 34),
(2.655070869106369e-09, 268),
(1.6649717244735812e-09, 280),
(1.002497688442608e-09, 110),
(3.9689889303377316e-10, 28),
(3.9346517440894083e-10, 84),
]
def random_choice2(pdf, possible_selections):
# pdf = [(0.1, 1), (0.05, 2), (0.05, 3)]
pdf = [pair for pair in pdf if (pair[1] in possible_selections)]
relative_probabilities, items = zip(*pdf)
total = np.sum(relative_probabilities)
r = random.uniform(0, 1) * total
s = 0
for prob, item in pdf:
s += prob
if s >= r:
break
return item
adjMatrix_distribution = [[0 for j in adjMatrix] for i in adjMatrix]
for r, row in enumerate(adjMatrix):
for c, item in enumerate(row):
adjMatrix_distribution[r][c] = (item, c)
del adjMatrix_distribution[r][r]
def convert_to_probability(example):
costs, tspidxs = zip(*example)
avg_1 = np.mean(costs)
def cost_to_probability_weight(cost, first, average):
# return 1/((cost-first+average/100))
steepness = 10
return np.exp(-(cost / first) * steepness) # 1/(2**cost)
first = np.min(costs)
averagecost = np.mean(costs)
probabilities = [cost_to_probability_weight(c, first, averagecost) for c in costs]
total_probability = np.sum(probabilities)
probabilities = [p / total_probability for p in probabilities]
return {idx: prob for prob, idx in zip(probabilities, tspidxs)}
probability_distribution = [convert_to_probability(i) for i in adjMatrix_distribution]
def probabilistic_somewhat_minCostItem(possible_nexts, current):
pdf = probability_distribution[current]
return random_choice2(list(zip(pdf.values(), pdf.keys())), possible_nexts)
def probabilistic_random_greedy_route(num_of_points, firstitem):
route = []
possible_nexts = list(range(0, num_of_points, 2))
possible_nexts.remove(firstitem)
route.append(firstitem)
possible_nexts.append(firstitem + 1)
choices = [] # from, to
while len(route) < num_of_points:
item = probabilistic_somewhat_minCostItem(possible_nexts, route[-1])
choices.append([route[-1], item])
possible_nexts.remove(item)
route.append(item)
if isPickup(item):
possible_nexts.append(item + 1)
return route, choices
current_best_cost = cost(greedy_route(len(alls), 0))
print("current_best_cost", current_best_cost)
cost_list = []
for i in range(10000):
testroute, choices = probabilistic_random_greedy_route(len(alls), 0)
new_cost = cost(testroute)
cost_list.append(new_cost)
if new_cost < bestline:
ratio_delta_cost = 140000 / new_cost - 1
k = 3
print("\t updating", new_cost, "\t", ratio_delta_cost * 3)
for fromIdx, toIdx in choices:
probability_distribution[fromIdx][toIdx] += (
(1 - probability_distribution[fromIdx][toIdx]) * ratio_delta_cost * 3
)
if new_cost < current_best_cost:
current_best_cost = new_cost
print("newbest", new_cost)
for fromIdx, toIdx in choices:
probability_distribution[fromIdx][toIdx] += (
1 - probability_distribution[fromIdx][toIdx]
) * 0.5
plt.plot(cost_list)
# dist_pdf = [random_choice2(probability_distribution[0], _a) for _ in range(1000)]
# _dict = {}
# import pandas
# from collections import Counter
# letter_counts = Counter(dist_pdf)
# total_count = sum(letter_counts.values())
# df = pandas.DataFrame.from_dict(letter_counts, orient='index').sort_values(by=0, ascending=False)
# df = df / total_count * 100 # convert counts to percentage frequency
# df.plot(kind='bar')
# _a, _b = zip(*probability_distribution[2])
# plt.plot(_a)
# print(_a[:20])
# print(_b)
# print("time:",[ _ for _ in adjMatrix_distribution[2] if _[1] in _b][:20])
|
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import OneHotEncoder, StandardScaler, QuantileTransformer
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.compose import make_column_transformer
from sklearn.ensemble import (
IsolationForest,
ExtraTreesRegressor,
AdaBoostRegressor,
StackingRegressor,
)
from sklearn.neural_network import MLPRegressor
from xgboost import XGBRegressor
from scipy.stats import randint, uniform
import warnings
warnings.filterwarnings("ignore")
# Load data and drop columns that are redundant (Id is just a serial number) or have too many missing values
train_data = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
test_data = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/test.csv"
)
train_data = train_data.drop(
["Id", "Alley", "FireplaceQu", "PoolQC", "Fence", "MiscFeature"], axis=1
)
test_data = test_data.drop(
["Id", "Alley", "FireplaceQu", "PoolQC", "Fence", "MiscFeature"], axis=1
)
# Initialize preprocessors
x_train = train_data.iloc[:, :-1]
y_train = train_data.iloc[:, -1]
x_test = test_data.iloc[:, :]
outlier = IsolationForest(random_state=42)
selector = VarianceThreshold(threshold=0.3)
encoder = make_column_transformer(
(
OneHotEncoder(drop="first", sparse=False, handle_unknown="ignore"),
x_train.select_dtypes(include="object").columns,
),
remainder="passthrough",
verbose_feature_names_out=False,
)
pipe = make_pipeline(
encoder,
IterativeImputer(random_state=42),
StandardScaler(),
QuantileTransformer(output_distribution="normal", random_state=42),
)
# Encoding, Imputation, Scaling, Transformation
x_train = pd.DataFrame(
pipe.fit_transform(x_train), columns=encoder.get_feature_names_out()
)
x_test = pd.DataFrame(pipe.transform(x_test), columns=encoder.get_feature_names_out())
# Outlier Detection & Removal
data = pd.concat([x_train, y_train], axis=1)
clf = outlier.fit_predict(data)
score = pd.DataFrame(clf, columns=["Score"])
data = pd.concat([data, score], axis=1)
data = data[data["Score"] == 1]
data = data.drop(["Score"], axis=1)
x_train = data.iloc[:, :-1]
y_train = data.iloc[:, -1]
# Feature Selection
x_train = pd.DataFrame(
selector.fit_transform(x_train), columns=selector.get_feature_names_out()
)
x_test = pd.DataFrame(
selector.transform(x_test), columns=selector.get_feature_names_out()
)
"""xgb = XGBRegressor(random_state=0)
search = GridSearchCV(xgb, {'n_estimators': [500, 750, 1000], 'max_depth': [3, 4, 5] 'learning_rate': [0.01, 0.05, 0.1], 'colsample_bytree': [0.7, 0.85, 1]})
search.fit(x_train, y_train)
search.best_params_"""
# n_estimators = 1200, max_depth = 3, learning_rate = 0.05, colsample_bytree=0.7
"""mlp = MLPRegressor(hidden_layer_sizes=(10,10,10,10,10,), solver='lbfgs', random_state=0)
search = GridSearchCV(mlp, {'max_iter': [50, 100, 150], 'alpha': [0.1, 0.2, 0.5], 'max_fun': [2500, 5000, 10000]})
search.fit(x_train, y_train)
search.best_params_"""
# max_iter=150, alpha=0.1, max_fun=500
"""mlp = MLPRegressor(hidden_layer_sizes=(10,10,10,10,10,), max_iter=150, alpha=0.1, max_fun=500, solver='lbfgs', random_state=0)
mlp.fit(x_train, y_train)
y_pred = mlp.predict(x_test)"""
"""xgb = XGBRegressor(n_estimators=1200, max_depth=3, learning_rate=0.05, colsample_bytree=0.7, random_state=0)
xgb.fit(x_train, y_train)
y_pred = xgb.predict(x_test)"""
# Estimation
xgb = XGBRegressor(
n_estimators=1200,
max_depth=3,
learning_rate=0.05,
colsample_bytree=0.7,
random_state=42,
)
ada = AdaBoostRegressor(random_state=0)
mlp = MLPRegressor(
hidden_layer_sizes=(
10,
10,
10,
10,
10,
),
max_iter=150,
alpha=0.1,
max_fun=500,
solver="lbfgs",
random_state=0,
)
et = ExtraTreesRegressor(n_estimators=1000, random_state=0)
estimators = [("xgb", xgb), ("ada", ada), ("mlp", mlp)]
regressor = StackingRegressor(
estimators=estimators, final_estimator=et, passthrough=True
)
regressor.fit(x_train, y_train)
y_pred = regressor.predict(x_test)
# Submission
df = pd.DataFrame()
df["Id"] = range(1461, 2920)
df["SalePrice"] = y_pred
df.to_csv("submission.csv", index=False)
|
# # Insurance Data Analysis and Predict practice
# this project is about a public dataset of insurance data obtained from kaggle.com and i use to practice EDA analytics and Linear Regresion analytics about course Machine Learning with Python in udemy
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
kaggle = True
if kaggle:
insurance_path = "/kaggle/input/insurance/insurance.csv"
else:
insurance_path = "./data/insurance.csv"
# # Import Data
df_insurance = pd.read_csv(insurance_path, sep=",")
df_insurance.head()
# ## null values
df_insurance.isna().sum()
# ## Categorical data
"""
we can identify than sex, smoker and region colums are categorical, then must be changed to numeric column
"""
le_sex = LabelEncoder()
le_smk = LabelEncoder()
le_sex.fit(df_insurance.sex.drop_duplicates())
le_smk.fit(df_insurance.smoker.drop_duplicates())
df_insurance["sex_e"] = le_sex.transform(df_insurance.sex)
df_insurance["smoker_e"] = le_smk.transform(df_insurance.smoker)
# apply OneHotEncoder to region using column transformer class
ct = ColumnTransformer([("ohe", OneHotEncoder(), ["region"])], remainder="passthrough")
transformed = ct.fit_transform(df_insurance)
df_insurance = pd.DataFrame(transformed, columns=ct.get_feature_names_out())
# set the new names columns
df_insurance.columns = [
"region_northeast",
"region_northwest",
"region_southeast",
"region_southwest",
"age",
"sex",
"bmi",
"children",
"smoker",
"charges",
"sex_e",
"smoker_e",
]
df_insurance = df_insurance[
[
"age",
"sex",
"sex_e",
"bmi",
"children",
"smoker",
"smoker_e",
"region_northeast",
"region_northwest",
"region_southeast",
"region_southwest",
"charges",
]
]
# ## New Data Frame with categorical data encoded
df_insurance_t = df_insurance[
[
"age",
"sex_e",
"bmi",
"children",
"smoker_e",
"region_northeast",
"region_northwest",
"region_southeast",
"region_southwest",
"charges",
]
]
df_insurance_t = df_insurance_t.apply(pd.to_numeric)
df_insurance_t.head()
# # Descriptive analysis of data
# show the total rows and columns of dataset
df_insurance_t.shape
# show the estatistics describe analysis
df_insurance_t.describe()
# ## Distribution of Age
fig = px.histogram(df_insurance_t, nbins=25, x="age", color="sex_e", barmode="group")
fig.update_layout(title="Distribution of Age", height=700)
fig.show()
# ## Distribution of BMI
# El BMI es el peso de una persona en kilogramos dividido por el cuadrado de la estatura en metros. Un IMC alto puede indicar una grasa corporal alta y un IMC bajo puede indicar una grasa corporal demasiado baja. Para calcular su IMC, consulte la Calculadora de IMC. También puede determinar su IMC al encontrar su estatura y peso en esta Tabla de índice de IMCexternal icon (enlace solo en inglés).
# - Si su IMC es menos de 18.5, se encuentra dentro del rango de peso insuficiente.
# - Si su IMC es entre 18.5 y 24.9, se encuentra dentro del rango de peso normal o saludable.
# - Si su IMC es entre 25.0 y 29.9, se encuentra dentro del rango de sobrepeso.
# - Si su IMC es 30.0 o superior, se encuentra dentro del rango de obesidad.
fig = px.histogram(df_insurance_t, nbins=20, x="bmi", barmode="stack")
fig.update_layout(title="Distribution of BMI", height=700)
fig.show()
# ## Distribution of Childrens
fig = px.histogram(df_insurance_t, nbins=20, x="children")
fig.update_layout(title="Distribution of Children", height=700)
fig.show()
# ## Distribution of smoker
fig = px.histogram(df_insurance_t, nbins=3, x="smoker_e")
fig.update_layout(title="Distribution of BMI", height=700)
fig.show()
# ## Correlation analysis
df_insurance_t.dtypes
# create a df with correlation between variables
df_corr = df_insurance_t[
["age", "sex_e", "bmi", "children", "smoker_e", "charges"]
].corr(method="pearson")
df_corr.head()
fig = go.Figure(
go.Heatmap(
x=df_corr.columns, y=df_corr.columns, z=df_corr.values.tolist(), zmin=-1, zmax=1
)
)
fig.update_layout(width=800, height=700)
fig.show()
# ## Distribution of Charges for smokers and no smokers
# smokers -> 1
# no smoker -> 0
fig = px.box(df_insurance_t, x="smoker_e", y="charges", color="smoker_e")
fig.update_layout({"title": "Distribution of charges by smokers", "height": 700})
fig.show()
# ## Histogram of charges by Smokers
fig = px.histogram(df_insurance_t, x="charges", color="smoker_e")
fig.update_layout(height=700)
fig.show()
# ## Relation between Age and Charges by Smokers
fig = px.scatter(
df_insurance_t,
x="age",
y="charges",
color="smoker_e",
color_continuous_scale="Portland",
)
fig.update_layout({"title": "Total Charges by Age and Smokers", "height": 700})
fig.show()
# # Linear Regresion Model
# ## Select the columns features to predict charges for insurance
# The region characteristic has no incidence within the insurance value
df_features = df_insurance_t[["age", "bmi", "children", "smoker_e", "charges"]]
df_features.head()
# ## create a features and predict groups
x = df_features.iloc[:, 0:-1]
y = df_features.iloc[:, -1]
# ## Divide in train and test
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1, test_size=0.3)
# ## Create a Linear Regresion Model object
lr_model = LinearRegression(fit_intercept=True)
lr_model.fit(x_train, y_train)
# predictions with train data
pred_train = lr_model.predict(x_train)
# predictions with test data
pred_test = lr_model.predict(x_test)
# ## Metrics
mse_train = mean_squared_error(pred_train, y_train)
sqrt_mse_train = np.sqrt(mse_train)
mae_train = mean_absolute_error(pred_train, y_train)
mse_test = mean_squared_error(pred_test, y_test)
sqrt_mse_test = np.sqrt(mse_test)
mae_test = mean_absolute_error(pred_test, y_test)
print("MSE train -> ", mse_train)
print("SQRT MSE train -> ", sqrt_mse_train)
print("MAE train ->", mae_train)
print("MSE test -> ", mse_test)
print("SQRT MSE test -> ", sqrt_mse_test)
print("MAE test ->", mae_test)
df_coef = pd.DataFrame(lr_model.coef_, x_train.columns, columns=["Coef"])
df_coef
val_fred = lr_model.predict([[33, 24.5, 1, 1]])
print("value charge to frederick insurance = ", val_fred)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data2 = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/test.csv"
)
data2.head(10)
data_f = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
data_f.head()
df = data_f.drop("Id", axis=1)
df.head()
df3 = data2[["MSSubClass", "MSZoning", "LotFrontage", "LotArea"]]
df3.head()
import matplotlib.pyplot as plt
df3.plot(kind="bar")
plt.figure(figsize=(20, 8))
plt.xlabel
plt.ylabel
plt.title
plt.show()
df.boxplot(column="LotArea", by="MSZoning")
plt.ylabel("Lot Area")
plt.show()
data = data2.drop("Id", axis=1)
data.columns
df1 = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/train.csv")
df1.head()
df = df1.drop("Id", axis=1)
df.columns
df.isnull().sum()
import matplotlib.pyplot as plt
# Create a scatter plot for 'LotArea' and 'SalePrice'
plt.scatter(df["MSSubClass"], df["SalePrice"])
plt.title("MSSubClass vs SalePrice")
plt.xlabel("LotArea")
plt.ylabel("SalePrice")
plt.show()
# Create a scatter plot for 'LotArea' and 'SalePrice'
plt.scatter(df["LotArea"], df["SalePrice"])
plt.title("LotArea vs SalePrice")
plt.xlabel("LotArea")
plt.ylabel("SalePrice")
plt.show()
# Create a box plot for 'SalePrice'
plt.boxplot(df["SalePrice"])
plt.title("Distribution of SalePrice")
plt.ylabel("SalePrice")
plt.show()
import seaborn as sns
# Create a correlation matrix
corr_matrix = df.corr()
# Create a heatmap for the correlation matrix
sns.heatmap(corr_matrix, annot=True)
plt.title("Correlation Heatmap")
plt.show()
# Create a correlation matrix
corr_matrix = df.corr()
# Create a heatmap for the correlation matrix
sns.heatmap(corr_matrix, annot=True)
plt.title("Correlation Heatmap")
plt.show()
y = df.SalePrice
# Create X (After completing the exercise, you can return to modify this line!)
features = [
"LotArea",
"YearBuilt",
"1stFlrSF",
"2ndFlrSF",
"FullBath",
"BedroomAbvGr",
"TotRmsAbvGrd",
]
X = df[features]
X.head()
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
# Split into validation and training data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
# Define a random forest model
model = RandomForestRegressor(random_state=1)
model.fit(train_X, train_y)
val_predictions = model.predict(val_X)
val_mae = mean_absolute_error(val_predictions, val_y)
print(val_mae)
# print(val_predictions)
model = RandomForestRegressor(random_state=1)
model.fit(X, y)
pred = model.predict(X)
pred
# **Model Validation**
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
from sklearn.tree import DecisionTreeRegressor
model = DecisionTreeRegressor()
model.fit(train_X, train_y)
predict = model.predict(val_X)
mae = mean_absolute_error(val_y, predict)
mae
model = DecisionTreeRegressor(max_leaf_nodes=100, random_state=1)
print("Validated MAE: {:,.0f}".format(mae))
X = pd.read_csv("/kaggle/input/house-price-dataset/train(7).csv", index_col="Id")
X_test_full = pd.read_csv(
"/kaggle/input/house-price-dataset/test(3).csv", index_col="Id"
)
# Remove rows with missing target, separate target from predictors
X.dropna(axis=0, subset=["SalePrice"], inplace=True)
y = X.SalePrice
X.drop(["SalePrice"], axis=1, inplace=True)
# Break off validation set from training data
X_train_full, X_valid_full, y_train, y_valid = train_test_split(
X, y, train_size=0.8, test_size=0.2, random_state=0
)
# Select categorical columns with relatively low cardinality
low_cardinality_cols = [
cname
for cname in X_train_full.columns
if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == "object"
]
# Select numeric columns
numeric_cols = [
cname
for cname in X_train_full.columns
if X_train_full[cname].dtype in ["int64", "float64"]
]
my_cols = low_cardinality_cols + numeric_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_test = X_test_full[my_cols].copy()
# One-hot encode the data
X_train = pd.get_dummies(X_train)
X_valid = pd.get_dummies(X_valid)
X_test = pd.get_dummies(X_test)
X_train, X_valid = X_train.align(X_valid, join="left", axis=1)
X_train, X_test = X_train.align(X_test, join="left", axis=1)
# **Build the Model**
from xgboost import XGBRegressor
model_1 = XGBRegressor(random_state=0)
model_1.fit(X_train, y_train)
predictions = model_1.predict(X_valid)
predictions
# **Model 1**
mae = mean_absolute_error(predictions, y_valid)
print("Mean Absolute Error:", mae)
# **Model 2**
model_2 = XGBRegressor(n_estimators=500, learning_rate=0.05)
model_2.fit(X_train, y_train)
predictions2 = model_2.predict(X_valid)
# predictions2
mae = mean_absolute_error(predictions2, y_valid)
print("Mean Absolute Error:", mae)
# **Model 3**
model_3 = XGBRegressor(n_estimators=500, learning_rate=0.005)
model_3.fit(X_train, y_train)
predictions3 = model_3.predict(X_valid)
mae = mean_absolute_error(predictions3, y_valid)
print("Mean Absolute Error:", mae)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
ff = pd.read_csv("/kaggle/input/fastfood-nutrition/fastfood.csv")
ff.head(5)
sns.barplot(x="restaurant", y="calories", data=ff)
# Add labels and title
plt.xlabel("restaurant")
plt.ylabel("calories")
plt.title("Restaurants by Calories")
plt.figure(figsize=(5, 20))
# Show the plot
plt.show()
# From the Dataset you can see that mcdonalds has the highest calories average per meal than other fastfood restaurants
sns.scatterplot(x="restaurant", y="trans_fat", data=ff)
# Add labels and title
plt.xlabel("Restaurants")
plt.ylabel("Trans_fat")
plt.title("Trans Fat per Restaurant")
# Show the plot
plt.show()
|
# ## 1. restaurant-business-rankings-2020
# #### - This notebook is EDA about restaurant-business-rankings-2020
# #### - I want to you get some ideas.
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.express as px
F50 = pd.read_csv("../input/restaurant-business-rankings-2020/Future50.csv")
I100 = pd.read_csv("../input/restaurant-business-rankings-2020/Independence100.csv")
T250 = pd.read_csv("../input/restaurant-business-rankings-2020/Top250.csv")
# ## 2. Nan value check function.
# ### In case of Top250.csv, this data have Nan value. So you need to preprosessing before data mining.
def NanValueCount(data):
nancount = data.isnull().sum(axis=0)
nancount = pd.DataFrame(nancount, columns=["count"])
if nancount["count"].sum() == 0:
print("This dataframe don't have Nan values.")
else:
print("This dataframe have Nan values. Check dataframe")
NanValueCount(F50)
NanValueCount(I100)
NanValueCount(T250)
print(F50.columns)
print(I100.columns)
print(T250.columns)
fig = make_subplots(rows=1, cols=3)
fig.add_trace(go.Histogram(x=F50.Sales, name="Future50"), row=1, col=1)
fig.add_trace(go.Histogram(x=I100.Sales, name="Independence100"), row=1, col=2)
fig.add_trace(go.Histogram(x=T250.Sales, name="Top250"), row=1, col=3)
fig.show()
fig = make_subplots(rows=1, cols=2)
fig.add_trace(go.Histogram(x=F50.Units, name="Future50"), row=1, col=1)
fig.add_trace(go.Histogram(x=T250.Units, name="Top250"), row=1, col=2)
fig.show()
T250.Segment_Category = T250.Segment_Category.astype(str)
px.bar(T250.Segment_Category.value_counts())
# ### This code is which category is popular.
# ### You can see 'Quick service' and 'Varied Menu' is important for restraunt-ranking.
category_df = pd.DataFrame()
for i in range(T250.shape[0]):
if "&" in T250.loc[i, "Segment_Category"]:
temp = T250.loc[i, "Segment_Category"].split("&")
temp = pd.DataFrame(temp).T
category_df = pd.concat([category_df, temp], axis=0)
else:
temp = T250.loc[i, "Segment_Category"]
temp = [0, temp]
temp = pd.DataFrame(temp).T
category_df = pd.concat([category_df, temp], axis=0)
category_df.columns = ["Service", "Menu"]
category_df.index = range(category_df.shape[0])
px.histogram(category_df[category_df["Service"] != 0], x="Service").update_xaxes(
categoryorder="total descending"
)
px.bar(category_df["Menu"]).update_xaxes(categoryorder="total descending")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# In this assignment I need to predict whether or not a company will go bankrupt when given some data from the company.
# Before anything can be done, I first need to import our data!
train_data = pd.read_csv("/kaggle/input/cap-4611-spring-21-assignment-1/train.csv")
test_data = pd.read_csv("/kaggle/input/cap-4611-spring-21-assignment-1/test.csv")
# Now that its imported, I should take a look at the data and make sure everything looks good.
train_data.describe()
train_data
# Better check for missing values while I'm going through this.
print(
train_data.loc[:, "Bankrupt"].value_counts().sort_values(ascending=False)
/ train_data.loc[:, "Bankrupt"].count()
)
print("\n")
train_data.info()
# Ew, lots of finance words that I don't understand. Maybe pretty graphs are better for my peanut brain.
# Also, it may be important to note that we have a fairly skewed data set, in that most of the companies did NOT go bankrupt.
# We also DO NOT have any null entries, all 3410 are full! This means there are **NO** missing entries :)
# Let's take a quick gander at the graphs of each column. I'll make a simple loop that goes through and prints a graph for each.
# This hangs forever :(
# graph = sns.PairGrid(train_data, hue = 'Bankrupt')
# graph.map_diag(sns.histplot)
# graph.map_offdiag(sns.scatterplot)
# graph.add_legend()
# THIS IS UGLY I WANT IT MORE COMPRESSED BUT I DON'T KNOW HOW (THERE ARE SO MANY PICTURES)
for j in train_data.columns:
plt.hist(train_data[j].values)
plt.ylabel("Count")
plt.xlabel(j)
plt.show()
# Well, that was a bit of a mess, but at least it gives us a good visual of our data!
# It doesn't really look like there are outliers, but this isn't a great indicator. But, you know what is?
# A box plot! So, let's make one of those. The whiskers will make it much easier to tell whether or not we have outliers!
#
for j in train_data.columns:
sns.catplot(kind="box", data=train_data, y=j)
# That gave us a much better indicator of outliers, now I know that I definitely need to clamp the values! (also now I know that first graph was an absolute waste of time!)
# I will use the whiskers of the box plot to determine where our clamps lie. I guess this would be called the Inner Quartile Range method.
# What I'll do is make a function that obtains the upper and lower quartiles for each column, and then checks each value within the column for outliers. If one exists, it will simply set its value to our upper or lower clamp.
def removeOutliers(data):
# I will ignore these, and perhaps I must do this for all booleans as well
# Edit: we DID have to do this for all booleans because it just overwrote all of the lesser of the two values
ignore = [
"id",
"Bankrupt",
"one if net income was negative for the last two year zero otherwise",
"one if total liabilities exceeds total assets zero otherwise",
]
# Go through each column (except for those we ignore)...
for column in data.columns:
k = 0
if column in ignore:
continue
# ...then find the quartiles...
firstQuartile = np.percentile(data.loc[:, column], 25)
thirdQuartile = np.percentile(data.loc[:, column], 75)
iqr = thirdQuartile - firstQuartile
lowWhisker = firstQuartile - (1.5 * iqr)
highWhisker = thirdQuartile + (1.5 * iqr)
# ...and cap the columns there
for i in data[column]:
if i < lowWhisker:
data.loc[k, column] = lowWhisker
if i > highWhisker:
data.loc[k, column] = highWhisker
k = k + 1
# There is definitely a prettier way to do this, but it's my birthday so cut me some slack.
# Now I can just run the function twice, passing in the data that I would like to modify.
removeOutliers(train_data)
removeOutliers(test_data)
for j in train_data.columns:
sns.catplot(kind="box", data=train_data, y=j)
# Beautiful BEAUTIFUL graphs with NO DOTS. This means we successfully got rid of all of the outliers!
# I was GOING to come back and perhaps normalize the data, but creating my models and testing them, I managed to beat the benchmark.
# I suppose this means that I didn't need to normalize or standardize the data at all!
# Now its on to decision tree classifier time.
# We are searching FOR y and using X to find it.
# I removed the ID and the Bankrupt columns from the features list, because ID will obviously have no correlation, and if I already know the company is bankrupt, testing for it would be pointless.
#
y = train_data.Bankrupt
feats = [
" ROA(C) before interest and depreciation before interest",
" ROA(A) before interest and % after tax",
" ROA(B) before interest and depreciation after tax",
" operating gross margin",
" realized sales gross margin",
" operating profit rate",
" tax Pre-net interest rate",
" after-tax net interest rate",
" non-industry income and expenditure/revenue",
" continuous interest rate (after tax)",
" operating expense rate",
" research and development expense rate",
" cash flow rate",
" interest-bearing debt interest rate",
" tax rate (A)",
" per Net Share Value (B)",
" Net Value Per Share (A)",
" Net Value Per Share (C)",
" Persistent EPS in the Last Four Seasons",
" Cash Flow Per Share",
" Revenue Per Share (Yuan)",
" Operating Profit Per Share (Yuan)",
" Per Share Net profit before tax (yuan)",
" realized sales gross profit growth rate",
" operating profit growth rate",
" after-tax net profit growth rate",
" regular net profit growth rate",
" continuous net profit growth rate",
" total asset growth rate",
" net value growth rate",
" total asset return growth rate Ratio",
" cash reinvestment %",
" current ratio",
" quick ratio",
" interest expense ratio",
" total debt/total net worth",
" debt ratio %",
" net worth/assets",
" long-term fund suitability ratio (A)",
" borrowing dependency",
" contingent liabilities/net worth",
" Operating profit/paid-in capital",
" net profit before tax/paid-in capital",
" inventory and accounts receivable/net value",
" total asset turnover",
" accounts receivable turnover",
" average collection days",
" inventory turnover rate (times)",
" fixed assets Turnover frequency",
" net worth turnover rate (times)",
" revenue per person",
" operating profit per person",
" allocation rate per person",
" working capital to total assets",
"Quick asset/Total asset",
"current assets/total assets",
"cash / total assets",
"Quick asset /current liabilities",
"cash / current liability",
"current liability to assets",
"operating funds to liability",
"Inventory/working capital",
"Inventory/current liability",
"current liability / liability",
"working capital/equity",
"current liability/equity",
"long-term liability to current assets",
"Retained Earnings/Total assets",
"total income / total expense",
"total expense /assets",
" current asset turnover rate",
" quick asset turnover rate",
" working capitcal turnover rate",
" cash turnover rate",
" Cash flow to Sales",
" fix assets to assets",
" current liability to liability",
"current liability to equity",
"equity to long-term liability",
"Cash flow to total assets",
"cash flow to liability",
"CFO to ASSETS",
"cash flow to equity",
"current liabilities to current assets",
"one if total liabilities exceeds total assets zero otherwise",
"net income to total assets",
"total assets to GNP price",
"No-credit interval",
"Gross profit to Sales",
"Net income to stockholder's Equity",
"liability to equity",
"Degree of financial leverage (DFL)",
"Interest coverage ratio( Interest expense to EBIT )",
"one if net income was negative for the last two year zero otherwise",
"equity to liability",
]
X = train_data[feats]
# It's time to make like a banana!
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, test_size=0.1, random_state=1
)
from sklearn.tree import DecisionTreeClassifier
# gini by default, entropy seemed to do little (made it worse apparently)
treeModel = DecisionTreeClassifier(max_features=6, max_depth=7, random_state=1)
treeModel.fit(X_train, y_train)
treePredict = treeModel.predict(X_test)
import sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
print(sklearn.metrics.classification_report(y_test, treePredict))
print("ROC AUC:", sklearn.metrics.roc_auc_score(y_test, treePredict))
# This little table is very nice and technically shows us everything. The only thing that is kinda obfuscated is the ROC score, so I made a separate line to print that out!
# This was very medium at predicting properly, and I messed with basically all of the parameters that were available.
# Let's see how the fun forest does things!
y = train_data.Bankrupt
feats = [
" ROA(C) before interest and depreciation before interest",
" ROA(A) before interest and % after tax",
" ROA(B) before interest and depreciation after tax",
" operating gross margin",
" realized sales gross margin",
" operating profit rate",
" tax Pre-net interest rate",
" after-tax net interest rate",
" non-industry income and expenditure/revenue",
" continuous interest rate (after tax)",
" operating expense rate",
" research and development expense rate",
" cash flow rate",
" interest-bearing debt interest rate",
" tax rate (A)",
" per Net Share Value (B)",
" Net Value Per Share (A)",
" Net Value Per Share (C)",
" Persistent EPS in the Last Four Seasons",
" Cash Flow Per Share",
" Revenue Per Share (Yuan)",
" Operating Profit Per Share (Yuan)",
" Per Share Net profit before tax (yuan)",
" realized sales gross profit growth rate",
" operating profit growth rate",
" after-tax net profit growth rate",
" regular net profit growth rate",
" continuous net profit growth rate",
" total asset growth rate",
" net value growth rate",
" total asset return growth rate Ratio",
" cash reinvestment %",
" current ratio",
" quick ratio",
" interest expense ratio",
" total debt/total net worth",
" debt ratio %",
" net worth/assets",
" long-term fund suitability ratio (A)",
" borrowing dependency",
" contingent liabilities/net worth",
" Operating profit/paid-in capital",
" net profit before tax/paid-in capital",
" inventory and accounts receivable/net value",
" total asset turnover",
" accounts receivable turnover",
" average collection days",
" inventory turnover rate (times)",
" fixed assets Turnover frequency",
" net worth turnover rate (times)",
" revenue per person",
" operating profit per person",
" allocation rate per person",
" working capital to total assets",
"Quick asset/Total asset",
"current assets/total assets",
"cash / total assets",
"Quick asset /current liabilities",
"cash / current liability",
"current liability to assets",
"operating funds to liability",
"Inventory/working capital",
"Inventory/current liability",
"current liability / liability",
"working capital/equity",
"current liability/equity",
"long-term liability to current assets",
"Retained Earnings/Total assets",
"total income / total expense",
"total expense /assets",
" current asset turnover rate",
" quick asset turnover rate",
" working capitcal turnover rate",
" cash turnover rate",
" Cash flow to Sales",
" fix assets to assets",
" current liability to liability",
"current liability to equity",
"equity to long-term liability",
"Cash flow to total assets",
"cash flow to liability",
"CFO to ASSETS",
"cash flow to equity",
"current liabilities to current assets",
"one if total liabilities exceeds total assets zero otherwise",
"net income to total assets",
"total assets to GNP price",
"No-credit interval",
"Gross profit to Sales",
"Net income to stockholder's Equity",
"liability to equity",
"Degree of financial leverage (DFL)",
"Interest coverage ratio( Interest expense to EBIT )",
"one if net income was negative for the last two year zero otherwise",
"equity to liability",
]
X = train_data[feats]
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, test_size=0.1, random_state=1
)
from sklearn.ensemble import RandomForestClassifier
forestModel = RandomForestClassifier(
criterion="entropy", max_features=6, max_depth=7, random_state=1
)
forestModel.fit(X_train, y_train)
forestPredict = forestModel.predict(X_test)
print(sklearn.metrics.classification_report(y_test, forestPredict))
print("ROC AUC:", sklearn.metrics.roc_auc_score(y_test, forestPredict))
# After adjusting the parameters over and over and over again, I was unable to improve my accuracy. Just when I was about to go back and adjust my data, I had the 'brilliant' idea that I should just train with MORE data. So I dropped the test size in my splitter down to 10%, meaning we were training with 90% of the data, and suddenly my scores skyrocketed. This feels like it was not a good idea, but it worked I guess.
# The ROC AUC score does seem kind of (very) low, so I'm confused to why it apparently improved so much vs the test data. Maybe this is just a consequence of only testing vs 10% of the training data.
# Anyways, now just pull data from the test document and make predictions! We'll output this one because it seems to do better.
X_test = pd.get_dummies(test_data[feats])
preds = forestModel.predict_proba(X_test)
output = pd.DataFrame({"id": test_data.id, "Bankrupt": preds[:, 1]})
output.to_csv("ForestSub.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# I wanna make **files name uniuqe** but here in kaggle platform when the data are in input mode , we couldn't rename them , because all the file are **read only******.
# **to fixing this issue** , we have to move data from input to output mood. and then trying to rename the files.
#
import shutil
# set the path to input and output directories
input_path = "/kaggle/input/over-20-car-brands-dataset"
output_path = "/kaggle/working/over-20-car-brands-dataset"
# copy the entire input directory to the output directory
shutil.copytree(input_path, output_path)
# ### First renaming images to unique names
import os
import uuid
dataset_path = "/kaggle/working/over-20-car-brands-dataset"
for sub_dir in os.listdir(dataset_path):
sub_dir_path = os.path.join(dataset_path, sub_dir)
if os.path.isdir(sub_dir_path):
# get the list of image files in the subdirectory
image_files = [
f
for f in os.listdir(sub_dir_path)
if f.endswith(".jpg")
or f.endswith(".png")
or f.endswith(".jpeg")
or f.endswith(".svg")
]
# rename each image file with a unique name
for i, file_name in enumerate(image_files):
src_path = os.path.join(sub_dir_path, file_name)
file_ext = os.path.splitext(file_name)[1]
dst_name = str(uuid.uuid4()) + file_ext
dst_path = os.path.join(sub_dir_path, dst_name)
os.rename(src_path, dst_path)
# Well , Now our files are renamed and ready to next step
# ### Spliting files to Training and testing Folders automaticly
# i do it beacuse the amont of data are too much, and i wanna split **80% of data to traning** and 2**0% to testing** folders inside each folder.
import os
import random
import shutil
# set the path to data set directory
dataset_path = "/kaggle/working/over-20-car-brands-dataset"
# set path fot training directory
train_path = "kaggle/working/train"
# set path for testing directory
test_path = "kaggle/working/test"
# set the split ratio
split_ratio = 0.8
# repating over subdirectories in the dataset directory
for sub_dir in os.listdir(dataset_path):
sub_dir_path = os.path.join(dataset_path, sub_dir)
if os.path.isdir(sub_dir_path):
# create the traing and testing subdirectories
train_sub_dir_path = os.path.join(train_path, sub_dir)
test_sub_dir_path = os.path.join(test_path, sub_dir)
os.makedirs(train_sub_dir_path, exist_ok=True)
os.makedirs(test_sub_dir_path, exist_ok=True)
# get the list of image files in the subdireotries
image_files = [
f
for f in os.listdir(sub_dir_path)
if f.endswith(".jpg")
or f.endswith(".png")
or f.endswith(".jpeg")
or f.endswith(".svg")
]
# suffle random
random.shuffle(image_files)
# spliting images
split_index = int(len(image_files) * split_ratio)
train_files = image_files[:split_index]
test_files = image_files[split_index:]
# copy traning files to the traingin subdirecotry
for file_name in train_files:
src_path = os.path.join(sub_dir_path, file_name)
dst_path = os.path.join(train_sub_dir_path, file_name)
shutil.copy(src_path, dst_path)
# copy testing files to testing subdirectory
for file_name in test_files:
src_path = os.path.join(sub_dir_path, file_name)
dst_path = os.path.join(test_sub_dir_path, file_name)
shutil.copy(src_path, dst_path)
print("Dataset Split into training and testing sets.")
|
# Data Preprocessing
#
import cv2
import numpy as np
import pandas as pd
from keras.utils import np_utils
from keras.datasets import mnist
from sklearn.utils import shuffle
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
data_root = (
"/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv"
)
dataset = pd.read_csv(data_root).astype("float32")
dataset.rename(columns={"0": "label"}, inplace=True)
letter_x = dataset.drop("label", axis=1)
letter_y = dataset["label"]
(digit_train_x, digit_train_y), (digit_test_x, digit_test_y) = mnist.load_data()
letter_x = letter_x.values
print(letter_x.shape, letter_y.shape)
print(digit_train_x.shape, digit_train_y.shape)
print(digit_test_x.shape, digit_test_y.shape)
digit_data = np.concatenate((digit_train_x, digit_test_x))
digit_target = np.concatenate((digit_train_y, digit_test_y))
print(digit_data.shape, digit_target.shape)
digit_target += 26
data = []
for flatten in letter_x:
image = np.reshape(flatten, (28, 28, 1))
data.append(image)
letter_data = np.array(data, dtype=np.float32)
letter_target = letter_y
digit_data = np.reshape(
digit_data, (digit_data.shape[0], digit_data.shape[1], digit_data.shape[2], 1)
)
print(letter_data.shape, letter_target.shape)
print(digit_data.shape, digit_target.shape)
shuffled_data = shuffle(letter_data)
rows, cols = 10, 10
plt.figure(figsize=(20, 20))
for i in range(rows * cols):
plt.subplot(cols, rows, i + 1)
plt.imshow(shuffled_data[i].reshape(28, 28), interpolation="nearest", cmap="gray")
plt.show()
shuffled_data = shuffle(digit_data)
rows, cols = 10, 10
plt.figure(figsize=(20, 20))
for i in range(rows * cols):
plt.subplot(cols, rows, i + 1)
plt.imshow(shuffled_data[i].reshape(28, 28), interpolation="nearest", cmap="gray")
plt.show()
data = np.concatenate((digit_data, letter_data))
target = np.concatenate((digit_target, letter_target))
print(data.shape, target.shape)
shuffled_data = shuffle(data)
rows, cols = 10, 10
plt.figure(figsize=(20, 20))
for i in range(rows * cols):
plt.subplot(cols, rows, i + 1)
plt.imshow(shuffled_data[i].reshape(28, 28), interpolation="nearest", cmap="gray")
plt.show()
train_data, test_data, train_labels, test_labels = train_test_split(
data, target, test_size=0.2
)
print(train_data.shape, train_labels.shape)
print(test_data.shape, test_labels.shape)
train_data = train_data / 255.0
test_data = test_data / 255.0
train_labels = np_utils.to_categorical(train_labels)
test_labels = np_utils.to_categorical(test_labels)
train_data = np.reshape(
train_data, (train_data.shape[0], train_data.shape[1], train_data.shape[2], 1)
)
test_data = np.reshape(
test_data, (test_data.shape[0], test_data.shape[1], test_data.shape[2], 1)
)
print(train_data.shape, test_data.shape)
print(train_labels.shape, test_labels.shape)
train_label_counts = [0 for i in range(36)]
test_label_counts = [0 for i in range(36)]
for i in range(train_data.shape[0]):
train_label_counts[np.argmax(train_labels[i])] += 1
for i in range(test_data.shape[0]):
test_label_counts[np.argmax(test_labels[i])] += 1
frequency = [train_label_counts, test_label_counts]
fig = plt.figure(figsize=(8, 6))
ax = fig.add_axes([0, 0, 1, 1])
x = [
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
]
plt.xticks(range(len(frequency[0])), x)
plt.title("train vs. test data distribution")
plt.xlabel("character")
plt.ylabel("frequency")
ax.bar(np.arange(len(frequency[0])), frequency[0], color="b", width=0.35)
ax.bar(np.arange(len(frequency[1])) + 0.35, frequency[1], color="r", width=0.35)
ax.legend(labels=["train", "test"])
np.save("train_data.npy", train_data)
np.save("train_labels.npy", train_labels)
np.save("test_data.npy", test_data)
np.save("test_labels.npy", test_labels)
# CNN Architecture
#
import numpy as np
import visualkeras as vk # pip install visualkeras
import pandas as pd
import seaborn as sn
from keras.models import Sequential
from matplotlib import pyplot as plt
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import confusion_matrix
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import (
Conv2D,
Activation,
MaxPooling2D,
Flatten,
Dense,
Dropout,
BatchNormalization,
)
train_data = np.load("train_data.npy")
test_data = np.load("test_data.npy")
train_labels = np.load("train_labels.npy")
test_labels = np.load("test_labels.npy")
model = Sequential()
model.add(Conv2D(32, (5, 5), input_shape=(28, 28, 1), activation="relu"))
model.add(BatchNormalization())
model.add(Conv2D(32, (5, 5), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(2, 2))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dense(36, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
vk.layered_view(model)
best_loss_checkpoint = ModelCheckpoint(
filepath="../models/best_loss_model.h5",
monitor="loss",
save_best_only=True,
save_weights_only=True,
mode="min",
)
best_val_loss_checkpoint = ModelCheckpoint(
filepath="../models/best_val_loss_model.h5",
monitor="val_loss",
save_best_only=True,
save_weights_only=True,
mode="min",
)
print(train_data.shape, test_data.shape)
print(train_labels.shape, test_labels.shape)
history = model.fit(
train_data,
train_labels,
validation_data=(test_data, test_labels),
epochs=10,
batch_size=200,
callbacks=[best_loss_checkpoint, best_val_loss_checkpoint],
)
|
# # **Import libraries**
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# # 1. Load the data
train_df = pd.read_csv("/kaggle/input/titanic/train.csv")
display(train_df.head(10).style.set_caption("Train data"))
test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
display(test_df.head().style.set_caption("Test data"))
# # 2. Exploratory data analysis (EDA)
train_df.shape
test_df.shape
train_df.describe()
# Concatenate train and test data
all_df = pd.concat([train_df, test_df], axis=0)
# Split back up
# train_df = all_df.iloc[:len(train_df)]
# test_df = all_df.iloc[len(train_df):]
# Distribution of passengers per class
all_df.groupby("Pclass").size().plot(
kind="pie",
legend=True,
autopct="%1.1f%%",
)
plt.title("Class distribution")
all_df.groupby("Pclass")["Survived"].sum().plot(
kind="bar",
legend=True,
)
# plt.title("Class distribution")
all_df.groupby("Pclass").size().plot(kind="bar")
all_df.groupby(["Pclass", "Survived"]).sum().unstack().plot(kind="bar", stacked=True)
all_df.groupby(["Pclass", "Survived"]).sum().unstack().plot(kind="bar", stacked=True)
# How Pclass & Cabin is related to survival
cl_sur = all_df.groupby("Pclass")["Survived"].sum()
cl_dis = all_df.groupby("Pclass").sum()
ax = cl_sur.plot.bar(rot=0)
# ax = df.plot.bar(stacked=True)
# Plot the first x and y axes:
# all_df.plot(x = 'Survived', y = 'Pclass', ax = ax)
# Plot the second x and y axes. By secondary_y = True a second y-axis is requested:
# (see https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.html for details)
# all_df.plot(x = 'Survived', y = 'Cabin', ax = ax, secondary_y = True)
# **Correlation of variables**
# calculate the correlation matrix
plt.figure(figsize=(10, 6))
sns.heatmap(all_df.corr(), annot=True, square=True, cmap="RdBu", vmax=1, vmin=-1)
plt.title("Correlations Between Variables", size=14)
plt.xticks(size=13)
plt.yticks(size=13)
plt.show()
cols_with_missing = list(
all_df.isnull().sum(axis=0)[all_df.isnull().sum(axis=0) > 0].index
)
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
sns.heatmap(
train_df.set_index(["PassengerId"])[cols_with_missing].isnull(), cmap="binary"
)
ax.set_title("Missing values")
plt.show()
cols_with_missing = list(
train_df.isnull().sum(axis=0)[train_df.isnull().sum(axis=0) > 0].index
)
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
sns.heatmap(
train_df.set_index(["PassengerId"])[cols_with_missing].isnull(), cmap="binary"
)
ax.set_title("Missing values")
plt.show()
# Things to address:
# * Prediction column = "Survived"
# * Hotencoding columns = "Sex", "Embarqued"
# * Round column = "Age"
# * Nan's in columns
# * Ranges = "Fare"
# * Define = "SibSp", "Parch"
# * Compare = "PassengerId" if they are unique or are they in both datasets.
# * Drop = "Name", "Ticket"
# Distribution of 'Sex' in the dataset
all_df.groupby("Sex").size().plot(
kind="pie",
legend=True,
autopct="%1.1f%%",
)
plt.title("Class distribution")
# # **Preprocessing**
# Preprocessin the collumn 'Age'
# * Option 1) Fill the nan's with the mean of the Age of each Sex
# * Option 2) Fill the nan's with the most frequent value of the Age of each Sex
# * Option 3) Use NLP with the titles and the most frequent value of each person's title.
# This is how the first both options can be visualized:
all_df["Age"].plot(
kind="hist",
legend=True,
)
x = all_df["Age"]
y = all_df["Age"].mode()
plt.axvline(x.mean(), color="r", label="Mean")
plt.axvline(y.item(), color="k", label="Mode")
# place legend outside
plt.legend(bbox_to_anchor=(1.0, 1), loc="upper left")
# rendering plot
plt.title("Age histogram including mean and mode")
plt.show()
all_df.groupby("Sex")["Age"].mean().round()
all_df.groupby("Sex")["Age"].agg(pd.Series.mode)
# **NLP**
# The third option goes around using natural leguage processing techniques sorting slicing the different titels in front of the passengers names and optaining the most frequent value of it. We've been inspired by the previous work of ALLOHVK (https://www.kaggle.com/code/allohvk/titanic-missing-age-imputation-tutorial-advanced)
display(all_df[(all_df.Age.isnull()) & (all_df.Name.str.contains("Master"))])
# Conclussion: All nan's with the title 'Master' belong to class 3.
all_df[all_df.Name.str.contains("Master")]["Age"].mean()
all_df[all_df.Name.str.contains("Master")]["Age"].value_counts().idxmax()
# **Natural lenguage processing (NLP)**
train_df["Title"], test_df["Title"] = [
df.Name.str.extract(" ([A-Za-z]+)\.", expand=False) for df in [train_df, test_df]
]
train_df.head(), test_df.head()
train_df.groupby(["Title", "Pclass"])["Age"].agg(pd.Series.mode)
train_df.groupby(["Title", "Pclass"])["Age"].agg(
[pd.Series.mode, "mean", "median", "count"]
)
df = pd.concat([train_df, test_df], axis=0)
traindex = train_df.index
testdex = test_df.index
df.groupby(["Title", "Pclass", "Sex"])["Age"].agg(
[pd.Series.mode, "mean", "median", "count"]
)
df.groupby(["Title"])["Age"].agg([pd.Series.isnull, "count"])
[
df["Age"].fillna(
df.groupby(["Pclass", "Sex", "Title"])["Age"].transform("median"), inplace=True
)
for df in [train_df, test_df]
]
cols_with_missing = list(
train_df.isnull().sum(axis=0)[train_df.isnull().sum(axis=0) > 0].index
)
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
sns.heatmap(
train_df.set_index(["PassengerId"])[cols_with_missing].isnull(), cmap="binary"
)
ax.set_title("Missing values")
plt.show()
test_df.iloc(test_df["Age"].isnull())
cols_with_missing = list(
test_df.isnull().sum(axis=0)[test_df.isnull().sum(axis=0) > 0].index
)
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
sns.heatmap(
test_df.set_index(["PassengerId"])[cols_with_missing].isnull(), cmap="binary"
)
ax.set_title("Missing values")
plt.show()
columns_nans = test_df.columns[test_df.isna().any()].tolist()
columns_nans
test_df["Age"].isna().any()
test_df.loc[test_df["Age"].isnull()]
[
df["Age"].fillna(
df.groupby(["Pclass", "Sex", "Title"])["Age"].transform("median"), inplace=True
)
for df in [train_df, test_df]
]
# We need to find do it more elegantly
test_df["Age"] = test_df["Age"].fillna(28)
test_df.loc[test_df["Age"].isnull()]
cols_with_missing = list(
test_df.isnull().sum(axis=0)[test_df.isnull().sum(axis=0) > 0].index
)
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
sns.heatmap(
test_df.set_index(["PassengerId"])[cols_with_missing].isnull(), cmap="binary"
)
ax.set_title("Missing values")
plt.show()
# Next Column we could use is fare
df.groupby(["Pclass", "Sex"])["Fare"].agg(["mean", "median", "count"])
df.groupby(["Pclass", "Sex"])["Fare"].plot(kind="box")
plt.title("Fare")
[
df["Fare"].fillna(
df.groupby(["Pclass", "Sex"])["Fare"].transform("median"), inplace=True
)
for df in [train_df, test_df]
]
cols_with_missing = list(
test_df.isnull().sum(axis=0)[test_df.isnull().sum(axis=0) > 0].index
)
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
sns.heatmap(
test_df.set_index(["PassengerId"])[cols_with_missing].isnull(), cmap="binary"
)
ax.set_title("Missing values")
plt.show()
# We need too do 'Embarked' & 'Cabin' (classify or smtg)
df["Embarked"].isnull().sum()
df.loc[df["Embarked"].isnull()]
df["Embarked"].unique().sum().plot(kind="bar")
df.loc[df["Ticket"] == "113572"] # & (df.loc(df['Cabin'] == 'B28'))
##Let us turn our attention to the missing fare
display(df[df.Fare.isnull()])
##Let us get fare per person
for df in [train_df, test_df, df]:
df["PeopleInTicket"] = df["Ticket"].map(df["Ticket"].value_counts())
df["FarePerPerson"] = df["Fare"] / df["PeopleInTicket"]
##Valuecounts is the swissknife of Pandas and is deeply explained in my earlier notebook
##Just take the mean fare for the PORT S and the Pclass & fill it. Remember to consider FarePerPerson and not Fare
print(
"Mean fare for this category: ",
train_df[(train_df.Embarked == "S") & (train_df.Pclass == 3)][
"FarePerPerson"
].mean(),
)
df.loc[df.Fare.isnull(), ["Fare", "FarePerPerson"]] = round(
df[(df.Embarked == "S") & (df.Pclass == 3) & (df.PeopleInTicket == 1)][
"Fare"
].mean(),
1,
)
display(df[df.Embarked.isnull()])
##Fare is 40 per person (80 for 2 people) for Pclass 1 for 2 adults. Where could they have Embarked from?
##Let us groupby Embarked and check some statistics
train_df[(train_df.Pclass == 1)].groupby("Embarked").agg(
{"FarePerPerson": "mean", "Fare": "mean", "PassengerId": "count"}
)
##Only 1 family got on at Q. Also fare is 30 per person and this is definitely not the case
##From the data below, it seems fairly obvious that the fareperperson of 40 for the 2 missing cases maps to Port C
##Let us check same data for groups of 2 adults
train_df[
(train_df.Pclass == 1) & (train_df.PeopleInTicket == 2) & (train_df.Age > 18)
].groupby("Embarked").agg(
{"FarePerPerson": "mean", "Fare": "mean", "PassengerId": "count"}
)
# Check why the values in the last 2 excercise with https://www.kaggle.com/code/allohvk/titanic-missing-age-imputation-tutorial-advanced
print(
train_df[
(~train_df.Cabin.isnull())
& (train_df.Pclass == 1)
& (train_df.PeopleInTicket == 2)
& (train_df.Sex == "female")
& (train_df.Age > 18)
]
.groupby("Embarked")
.agg({"FarePerPerson": "mean", "Fare": "mean", "PassengerId": "count"})
)
##Still port C comes out as a winner in all cases. We will go ahead with this
train_df.Embarked.fillna("C", inplace=True)
cols_with_missing = list(
train_df.isnull().sum(axis=0)[train_df.isnull().sum(axis=0) > 0].index
)
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
sns.heatmap(
train_df.set_index(["PassengerId"])[cols_with_missing].isnull(), cmap="binary"
)
ax.set_title("Missing values")
plt.show()
cols_with_missing = list(
test_df.isnull().sum(axis=0)[train_df.isnull().sum(axis=0) > 0].index
)
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
sns.heatmap(
test_df.set_index(["PassengerId"])[cols_with_missing].isnull(), cmap="binary"
)
ax.set_title("Missing values")
plt.show()
df.Cabin.value_counts().plot(kind="bar", stacked=False)
df.groupby(["Pclass", "Cabin"])["FarePerPerson"].agg(["mean", "median", "count"])
df.groupby(["Cabin", "Ticket"])["FarePerPerson"].mean()
train_df[train_df.Pclass == 1].groupby(["Pclass", "Cabin"])["FarePerPerson"].agg(
["mean", "median", "count"]
)
# We'll explore the collumn of 'Cabin' to see whether it may have some impact to surviving. We based our work to: https://www.kaggle.com/code/ccastleberry/titanic-cabin-features/notebook
cabin_only = df[["Cabin", "Pclass"]].copy()
cabin_only["Cabin_Data"] = cabin_only["Cabin"].isnull().apply(lambda x: not x)
cabin_only["Deck"] = cabin_only["Cabin"].str.slice(0, 1)
cabin_only["Room"] = (
cabin_only["Cabin"]
.str.slice(1, 5)
.str.extract("([0-9]+)", expand=False)
.astype("float")
)
cabin_only[cabin_only["Cabin_Data"]]
cabin_only[cabin_only["Deck"] == "F"]
cabin_only.drop(
["Cabin", "Cabin_Data", "Pclass"], axis=1, inplace=True, errors="ignore"
)
cabin_only["Deck"] = cabin_only["Deck"].fillna("N")
cabin_only["Room"] = cabin_only["Room"].fillna(cabin_only["Room"].mean())
cabin_only.info()
# Hot encoding the 'Deck' column
def one_hot_column(df, label, drop_col=False):
"""
This function will one hot encode the chosen column.
Args:
df: Pandas dataframe
label: Label of the column to encode
drop_col: boolean to decide if the chosen column should be dropped
Returns:
pandas dataframe with the given encoding
"""
one_hot = pd.get_dummies(df[label], prefix=label)
if drop_col:
df = df.drop(label, axis=1)
df = df.join(one_hot)
return df
def one_hot(df, labels, drop_col=False):
"""
This function will one hot encode a list of columns.
Args:
df: Pandas dataframe
labels: list of the columns to encode
drop_col: boolean to decide if the chosen column should be dropped
Returns:
pandas dataframe with the given encoding
"""
for label in labels:
df = one_hot_column(df, label, drop_col)
return df
cabin_only = one_hot(cabin_only, ["Deck"], drop_col=True)
cabin_only.head()
# Exploring Relationships between Cabin Data and Survivorship
for column in cabin_only.columns.values[1:]:
df[column] = cabin_only[column]
df.drop(["Ticket", "Cabin"], axis=1, inplace=True)
corr = df.corr()
corr["Pclass"].sort_values(ascending=False)
# Ok so this is quite interesting. It does seem that a lack of cabin data is highly correlated with lower class passengers and that decks B and C are fairly correlated with higher class passengers.
corr["Fare"].sort_values(ascending=False)
# Here again it appears as if no cabin data is correlated with lower Fare value and decks B and C are correlated with higher Fare values.
# Now let's split our sets back apart.
# Train
train_df = cabin_only.loc[traindex, :]
# Test
test_df = cabin_only.loc[testdex, :]
|
# # Introduction
# This project aims to utilize the power of machine learning to identify Eastern African bird species by their vocalizations. Birds are important indicators of biodiversity, and their presence or absence can indicate the success or failure of restoration projects. Traditional methods of observing and monitoring bird populations are logistically challenging and expensive. Passive acoustic monitoring combined with machine learning tools offers a promising solution to sample larger areas with higher temporal resolution. In this competition, participants are challenged to develop reliable classifiers with limited training data to help protect avian biodiversity in Africa.
# # Load the required libraries
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_io as tfio
import pandas as pd
import numpy as np
import librosa
import librosa.display
from librosa.filters import mel
import glob
import csv
import io
from IPython.display import Audio
import matplotlib.pyplot as plt
import IPython as ipd
# # Load the Data
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # Exploratory data analysis
df = pd.read_csv("/kaggle/input/birdclef-2023/eBird_Taxonomy_v2021.csv")
df.head()
# From the output, we can see that our dataset contains information on different bird species such as their order, family, and species group, among others. We can also see that some of the columns have missing values.
# Next, let's check the data types of each column using the dtypes attribute.
df.dtypes
# We can see that all columns are currently stored as objects. We need to convert the TAXON_ORDER column to integer data type for easier analysis.
df["TAXON_ORDER"] = pd.to_numeric(df["TAXON_ORDER"], errors="coerce")
# Now that we have cleaned up the data, we can start with our analysis. Let's create a bar chart using seaborn to show the number of species in each order.
import seaborn as sns
sns.countplot(x="ORDER1", data=df)
# This should output a bar chart showing the number of species in each order.
# From the chart, we can see that the order with the most number of species is the Tinamiformes, followed by the Struthioniformes and Rheiformes. We can also see that there are some orders with only one species in the dataset.
# Next, let's create a scatter plot using seaborn to show the relationship between the order and family.
# Create a scatter plot
sns.scatterplot(
data=df, x="CATEGORY", y="ORDER1", hue="FAMILY", style="REPORT_AS", alpha=0.8
)
# Set the title and axes labels
plt.title("Bird Species by Category and Order")
plt.xlabel("Category")
plt.ylabel("Order")
# Remove the legend
plt.legend([], [], frameon=False)
# Show the plot
plt.show()
# # Explore data training
# Load a sample audio files from two different species
audio_abe, sr_abe = librosa.load(
"/kaggle/input/birdclef-2023/train_audio/bawman1/XC115075.ogg"
)
audio_abh, sr_abh = librosa.load(
"/kaggle/input/birdclef-2023/train_audio/bkfruw1/XC113283.ogg"
)
# Play the audio
Audio(data=audio_abe, rate=sr_abe)
# Play the audio
Audio(data=audio_abh, rate=sr_abh)
metadata = pd.read_csv("/kaggle/input/birdclef-2023/train_metadata.csv")
metadata.head(5)
metadata = pd.read_csv("/kaggle/input/birdclef-2023/train_metadata.csv")
metadata.head()
competition_classes = sorted(metadata.primary_label.unique())
forced_defaults = 0
competition_class_map = []
for c in competition_classes:
try:
i = classes.index(c)
competition_class_map.append(i)
except:
competition_class_map.append(0)
forced_defaults += 1
## this is the count of classes not supported by our pretrained model
## you could choose to simply not predict these, set a default as above,
## or create your own model using the pretrained model as a base.
forced_defaults
# # Data Preprocessing:
def frame_audio(
audio_array: np.ndarray,
window_size_s: float = 5.0,
hop_size_s: float = 5.0,
sample_rate=32000,
) -> np.ndarray:
"""Helper function for framing audio for inference."""
""" using tf.signal """
if window_size_s is None or window_size_s < 0:
return audio_array[np.newaxis, :]
frame_length = int(window_size_s * sample_rate)
hop_length = int(hop_size_s * sample_rate)
framed_audio = tf.signal.frame(audio_array, frame_length, hop_length, pad_end=True)
return framed_audio
def ensure_sample_rate(waveform, original_sample_rate, desired_sample_rate=32000):
"""Resample waveform if required."""
if original_sample_rate != desired_sample_rate:
waveform = tfio.audio.resample(
waveform, original_sample_rate, desired_sample_rate
)
return desired_sample_rate, waveform
audio, sample_rate = librosa.load(
"/kaggle/input/birdclef-2023/train_audio/bawman1/XC115075.ogg"
)
sample_rate, wav_data = ensure_sample_rate(audio, sample_rate)
Audio(wav_data, rate=sample_rate)
sample_rate
# check whether the dataset is imbalanced
metadata["primary_label"].value_counts()
# let's read a sample audio using librosa
audio_file_path = "/kaggle/input/birdclef-2023/train_audio/barswa/XC113914.ogg"
librosa_audio_data, librosa_sample = librosa.load(audio_file_path)
print(librosa_audio_data)
# Lets plot the librosa audio data
plt.figure(figsize=(12, 4))
plt.plot(librosa_audio_data)
from pydub import AudioSegment
audio_file = AudioSegment.from_file(audio_file_path, format="ogg")
samples = audio_file.get_array_of_samples()
import soundfile as sf
wave_audio, wave_sample_rate = sf.read(audio_file_path)
wave_audio, wave_sample_rate
plt.figure(figsize=(12, 4))
time = np.arange(0, len(wave_audio)) / wave_sample_rate
plt.plot(time, wave_audio)
plt.xlabel("Time (seconds)")
plt.ylabel("Amplitude")
plt.show()
# Convert the audio files into Mel spectrograms using librosa.feature.melspectrogram():
# Load the audio file
file_path = "/kaggle/input/birdclef-2023/train_audio/edcsun3/XC479065.ogg"
audio, sample_rate = librosa.load(file_path)
# Compute the mel spectrogram
mel_spec = librosa.feature.melspectrogram(y=audio, sr=sample_rate)
# Convert the Mel spectrograms into decibel (dB) units using librosa.power_to_db():
mel_spec_db = librosa.power_to_db(mel_spec, ref=np.max)
# Normalize the Mel spectrograms using sklearn.preprocessing.minmax_scale():
import sklearn
from sklearn.preprocessing import MinMaxScaler
mel_spec_norm = sklearn.preprocessing.minmax_scale(
mel_spec_db, feature_range=(0, 1), axis=1
)
# # Model
# Split the training data into training and validation sets using sklearn.model_selection.train_test_split():
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = sklearn.model_selection.train_test_split(
metadata["filename"], metadata["primary_label"], test_size=0.2, random_state=42
)
# Create a Keras Sequential model with a pre-trained model as the base and add additional layers for fine-tuning:
import tensorflow as tf
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(64, activation="relu", input_shape=(784,)),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
model
# Compile the model with an appropriate optimizer, loss function, and evaluation metrics:
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
# Use tf.keras.preprocessing.image.ImageDataGenerator to prepare the data for training:
import os
# Define the directory containing the audio files
train_audio_dir = "/kaggle/input/birdclef-2023/train_audio"
# Get a list of file paths for the audio files
file_paths = []
for subdir, _, files in os.walk(train_audio_dir):
for file in files:
if file.endswith(".ogg"):
file_path = os.path.join(subdir, file)
file_paths.append(file_path)
def train_datagen(train_audio_dir, train_metadata, label_encoder, batch_size):
while True:
batch_audio = []
batch_labels = []
# randomly sample batch_size number of unique audio files from the train_metadata dataframe
audio_files = (
train_metadata["filename"].sample(batch_size, replace=False).values
)
for audio_file in audio_files:
# load the audio file and extract the Mel spectrogram
audio_path = os.path.join(train_audio_dir, audio_file)
audio, sample_rate = librosa.load(audio_path, sr=SAMPLE_RATE)
mel_spectrogram = librosa.feature.melspectrogram(
y=audio, sr=sample_rate, n_mels=128, fmin=20, fmax=16000
)
mel_spectrogram = librosa.power_to_db(mel_spectrogram, ref=np.max)
# randomly crop a segment of the Mel spectrogram
mel_spectrogram = random_crop(mel_spectrogram)
# convert the label to one-hot encoding
label = train_metadata.loc[
train_metadata["filename"] == audio_file, "primary_label"
].values[0]
label = label_encoder.transform([label])[0]
batch_audio.append(mel_spectrogram)
batch_labels.append(label)
# convert the batch of Mel spectrograms and labels to numpy arrays
batch_audio = np.array(batch_audio)
batch_labels = np.array(batch_labels)
yield batch_audio, batch_labels
# # Model Evaluation:
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
# define the target labels
y = metadata["primary_label"].values
X = np.array([librosa.load(file_path, sr=None)[0] for file_path in file_paths])
# create a label encoder
label_encoder = LabelEncoder()
# fit the label encoder to the target labels
label_encoder.fit(y)
batch_size = 32
num_epochs = 5
# Split the data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Train the model
model.fit(
train_datagen(train_audio_dir, train_metadata, label_encoder, batch_size),
steps_per_epoch=train_metadata.shape[0] // batch_size,
epochs=num_epochs,
validation_data=(X_test, y_test),
)
# Evaluate model
y_true = np.argmax(y_test, axis=1)
y_pred = np.argmax(model.predict(X_test), axis=1)
print(metrics.classification_report(y_true, y_pred))
# Model improvement
# Add more layers to the model, change activation functions, or adjust hyperparameters
# Test model on test data
test_samples_melspecs = []
for sample_file in test_samples:
audio, sr = librosa.load(sample_file, sr=None, mono=True, res_type="kaiser_fast")
melspec = librosa.feature.melspectrogram(
audio, sr=sr, n_fft=2048, hop_length=512, n_mels=128
)
test_samples_melspecs.append(melspec)
test_samples_melspecs = np.array(test_samples_melspecs)
test_samples_melspecs = np.expand_dims(test_samples_melspecs, -1)
predictions = model.predict(test_samples_melspecs)
# Make submission
for i, row in sample_sub.iterrows():
site = row["site"]
row_id = row["row_id"]
melspec = extract_melspectrogram(site, row_id)
melspec = np.expand_dims(melspec, axis=-1)
pred = model.predict(melspec)
sample_sub.iloc[i, 1:] = pred[0]
sample_sub.to_csv("submission.csv", index=False)
|
import json
import gc
import os
import re
import pandas as pd
import numpy as np
import random
import MeCab
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
import tqdm
from sklearn.metrics.pairwise import cosine_similarity
from transformers import *
import neologdn
m = MeCab.Tagger("-Ochasen")
# # AI王 最終提出用
# ## データセット準備
# ### データダウンロード
# ### データ読み込み
import gc
import tqdm
import json
import pandas as pd
import numpy as np
json_lst = []
with open("all_entities.json", "r") as f:
for line in tqdm.notebook.tqdm(f, total=920172):
d = json.loads(line)
json_lst.append(d)
df_all_entities = pd.DataFrame(json_lst)
# ### データ保存
df_all_entities.to_csv("all_entities.csv", index=None)
|
# # Figurative thinking
# In our material and pragmatic world, during the ages we have highly succeeded developing logical thinking. To this date this knowledge expressed in structured descriptive language models like GPT. Although they can demonstrate reasoning, they are not capable to discover the new concepts and go beyond logical interpretation.
# As a human, one of our main functions in to receive, transform and transmit information. The language, first of all is the way of transmitting the information. We want the language to to transmit as much as information per symbol/word/sentence as possible.
# Language - is more complex concept that we used to think. To understand it completely we have to re-discover our Figurative thinking. Think about this way: logic is our left brain's job, images (figures) - our right brain's job. These halves of the brain work synchronously and complement each other in the process of processing and transmitting information. Remember Inn-Yann concept and symbol? This is the good example when the single symbol can hold the whole concept in it.
# https://www.healthline.com/health/left-brain-vs-right-brain
# Today we don't fully understand our language, and we don't understand the symbols. We are trying to fing the paths where the word came from: latin, sanskrit or shumeric. Fe fight and devide our world by languages, religions, academia approach, etc. To my mind - the problem in modern languages is that the core meaning of the words is lost due to the highly skewed collective consciousness into the left half brain thinking.
# The key in reading of symbolic texts is in understanding the meaning of symbol-charachter (image, symbol, syllable, sound) and it's interaction with the object this symbol describes. These characters are universal, have the same meaning across the globe and sounds accordingly.
# I invite you to this journey and you will discover a new forgotten knowledge. In order to dive into understanding the Structural linguistics concept, try to forget everything you have been taught in life and start to perceive words and symbols like a 3-5yo child. You don't know english, french or russian written language. You know letters, numbers and can read the symbols.
# How do we code in symbols 2 very basic concepts for our child: laugh and wonder for example? These concepts are universal and don't depend on language. The language depend on them. When we laugh we pronounce "HA". When we wonder we pronounce "AH". Now try to draw a simple symbol to hold these concept. Reversing the symbol should flip the concept. Did you get the idea? Thins is a very basic example of how early Egyptians coded the Universal knowledge in symbols using Structural linguistics.
# In Structural linguistics (Structuralism) we don't look for the causes or origins of language (or of any other phenomenon). Structuralism looks for the rules that underlie language and govern how it functions: it looks for the structure.
# The components of a word structure are not merely a collection of independent items: they form a working unit because they exist in relation to one another. They interact and have the sequence importance. This is the science where Computer Vision and NLP work together for one purpose.
# Words do not simply refer to objects in the world for which they stand. Instead, a word is a linguistic sign consisting, like the two sides of a coin, of two inseparable parts: signifier + signified. A signifier is a “sound-image” (a mental imprint of a linguistic sound or hieroglyph); the signified is the concept to which the signifier refers. Thus, a word is not merely a sound-image (signifier), nor is it merely a concept (signified). A sound image becomes a word only when it is linked with a concept.
# The idea that signifiers, or linguistic sound-images, do not refer to things in the world but to concepts in our mind is crucial for structuralism.
# https://literariness.org/2018/12/22/structural-linguistics/
# https://en.wikipedia.org/wiki/Structural_linguistics
# I have started this project inspired by works of russian speaking egyptologist, symbols expert and language researcher - https://www.youtube.com/@Ur_Al
# He brings very interesting ideas of interpretting egyptian texts using old Russian, Sanskrit and Latin language structures, based on the concept of Structural linguistics.
# The main problem is that we do not understand complex multi-level meaning of egyptian texts. They contain much more information then what we see translated today. Rosetta stone origin has many questions and doesn't open us the secrects of hieroglyphs decomposition.
# Ideograms (symbols) and phonogram (pronounciation) should match and reflect the one character.
# For example:
# * ANT (very old) - antique, gigant, atlantic, anthem, antropology, infant
# * AR (stopping and holding) - dark, arret, array, arrest, arrow, arc
# * AV (gather goods by travelling) - travel, aviation, navy, slavery, have, save, avoir, caravan
# * BIR, BER, BOR (take, grab, get toghether) - sober, borrowed, berry, cyber, neighbor, berth, graber
# * BRI, BRA (give smth to establish connection) - bridge, brick, bring, bribe, bride, grab, gravel
# * CAR, CYR (current state, curly borders) - circle, circus, circumstance, curly, church, cyrillic, current, curve, cursor, curb, car, cart
# * CRE, CRA, CRI, KRA, KRE, KRI (mounted on top, superior, added) - credit, increase, crawl, crest, chist, creme, crete, concrete, crab, crater, create, cristal
# * CUT, CAT, KAT (categorise, separate, specialise, cut) - cut, category, catastrophe, catalogue, education, cation, cathode, cathedra
# etc...
# I will go further Saussure's concepts and try to create the Universal Language model in order to decode ancient languages and helps to remember the root meaning of the words in modern languages.
# # Slovo-grams or Slo-grams
# I am sharing with you the Slovo-grams dictionary (proto-syllables), english and russian words containing them, and the core meaning of this syllable. These are the basic buidlding blocks of most Indo-European group of languages. Some languages preserved more of this structure, other languages lost the meaning of the words through the ages. Using this approach I will try to discover the hidden connection between the characters and their meanings no matter the language we use.
# The data for Slovo-gram dictionary is taken troughout the years by me from:
# 1. Other researcher's work (list will be provided)
# 2. Old words, tales, songs
# 3. Cross-translation techniques
# 4. Common sense
# The Work is still in progress.
# importing slo-grams into the df
import pandas as pd
from IPython.display import display
df = pd.read_excel("/kaggle/input/slovo-grams/Slovo_Grams.xlsx")
df.head(50)
df.shape
# # Root Meaning Tokenization
# searching for syllables in the slogram column and return their meaning value
import pandas as pd
def meaning(word):
df["slogram"] = df["slogram"].astype(str)
df["meaning"] = df["meaning"].astype(str)
slo_grams = list(df["slogram"].str.lower())
meanings = list(df["meaning"])
tokens = []
i = 0
while i < len(word):
for j, s in enumerate(slo_grams):
if word[i:].startswith(s):
tokens.append(meanings[j])
i += len(s)
break
else:
tokens.append(word[i])
i += 1
return tokens
# testing our root_meaning function
meaning("carburator")
# root_meaning function that take also values from "related" columns also if there is no match
# also, I modified the function so it looks for 3-symbol syllable first, then looks for 2-symbols match.
def root_meaning(word):
df["slogram"] = df["slogram"].astype(str)
df["rel_1"] = df["rel_1"].astype(str)
df["rel_2"] = df["rel_2"].astype(str)
df["meaning"] = df["meaning"].astype(str)
slo_grams = []
meanings = []
for slogram, rel_1, rel_2, meaning in zip(
df["slogram"], df["rel_1"], df["rel_2"], df["meaning"]
):
if slogram != "nan":
slo_grams.append(slogram.lower())
meanings.append(meaning)
if rel_1 != "nan":
slo_grams.append(rel_1.lower())
meanings.append(meaning)
if rel_2 != "nan":
slo_grams.append(rel_2.lower())
meanings.append(meaning)
tokens = []
i = 0
while i < len(word):
found_match = False
for j, s in enumerate(slo_grams):
if len(s) == 3 and word[i:].startswith(s):
tokens.append(meanings[j])
i += len(s)
found_match = True
break
if not found_match:
for j, s in enumerate(slo_grams):
if len(s) == 2 and word[i:].startswith(s):
tokens.append(meanings[j])
i += len(s)
found_match = True
break
if not found_match:
tokens.append(word[i])
i += 1
return tokens
# testing our root_meaning function
root_meaning("carburator")
# testing our root_meaning function
root_meaning("cosmos")
# testing our root_meaning function
root_meaning("creator")
# testing our root_meaning function
root_meaning("germes")
# testing our root_meaning function
root_meaning("architecture")
# testing our root_meaning function
root_meaning("manager")
# testing our root_meaning function
root_meaning("scanner")
# testing our root_meaning function
root_meaning("pira-mida")
# testing our root_meaning function
root_meaning("intuition")
# Now, when we know how to deconstruct the words to their root syllables (slograms), we can start working with hieroglyphs to unpack The GERMETIC knowledge!
# # Exploring Egyptian hieroglyphs dataset
# This dataset is build from the hieroglyphs found in 10 different pictures from the book "The Pyramid of Unas" (Alexandre Piankoff, 1955).
# Each hieroglyph is manually annotated and labelled according the Gardiner Sign List. The images are stored with their label and number in their name.
# https://archive.org/details/pyramidofunas0005unse
#!pip install datasets
#!pip install transformens
#!git clone https://huggingface.co/datasets/HamdiJr/Egyptian_hieroglyphs
# importing the Hugging Face Egyptian hieroglyphs dataset from Hugging Face
from datasets import load_dataset
# Load the dataset using the Hugging Face Datasets library
dataset = load_dataset("HamdiJr/Egyptian_hieroglyphs")
# Print information about the available splits for the dataset
print("Available splits:", list(dataset.keys()))
# displaying 20 random hieroglyphs
import random
import matplotlib.pyplot as plt
# Choose 20 random examples from the train split of the dataset
train_examples = random.sample(list(dataset["train"]), 20)
# Create a 5x4 grid of subplots
fig, axes = plt.subplots(nrows=4, ncols=5, figsize=(16, 16))
# Loop through the examples and display each one in a subplot
for i, example in enumerate(train_examples):
row = i // 5
col = i % 5
ax = axes[row][col]
ax.imshow(example["image"])
ax.set_title(example["label"])
ax.axis("off")
plt.show()
# Print the dataset's features
print(dataset["train"].features)
import matplotlib.pyplot as plt
subset = dataset["train"].filter(lambda example: example["label"] == 11)
# Display all the images from subclass
fig, axes = plt.subplots(nrows=4, ncols=5, figsize=(16, 16))
for idx, example in enumerate(subset):
row = idx // 5
col = idx % 5
axes[row, col].imshow(example["image"])
axes[row, col].axis("off")
# If there are fewer than 20 images, hide the remaining subplots
for idx in range(len(subset), 20):
row = idx // 5
col = idx % 5
axes[row, col].axis("off")
plt.show()
# This symbol sounds like "CHE" and represents change in either upside or downside.
#
# let's check the classes with the most images in it
from collections import Counter
# Count the number of images per class
class_counts = Counter(example["label"] for example in dataset["train"])
# Get the top 10 classes with the most images
top_10_classes = class_counts.most_common(10)
# Print the top 10 classes and their counts
print("Top 10 classes with the most images:")
for class_id, count in top_10_classes:
print(f"Class {class_id}: {count} images")
import matplotlib.pyplot as plt
# Get the first image from each of the first 20 classes
first_images = []
for class_id in range(20):
first_image = dataset["train"].filter(lambda example: example["label"] == class_id)[
0
]["image"]
first_images.append(first_image)
# Display the first images from the first 20 classes in a 5x4 grid
fig, axes = plt.subplots(nrows=4, ncols=5, figsize=(16, 16))
for idx, image in enumerate(first_images):
row = idx // 5
col = idx % 5
axes[row, col].imshow(image)
axes[row, col].axis("off")
plt.show()
# let's export first image from first 20 categories to excel spreadsheet for manual annotation
import numpy as np
import os
from io import BytesIO
from PIL import Image
from openpyxl import Workbook
from openpyxl.drawing.image import Image as XLImage
# Define the number of classes you want to take into account
num_classes = 20
# Find the first image from each class
image_examples = []
for class_id in range(num_classes):
example = next(
iter(dataset["train"].filter(lambda example: example["label"] == class_id))
)
image_examples.append(example)
# Create a new workbook and add a worksheet
wb = Workbook()
ws = wb.active
ws.title = "Image Examples"
# Resize images, save them temporarily and insert them into the Excel sheet
thumbnail_size = (50, 50)
for idx, example in enumerate(image_examples):
img = example["image"]
img.thumbnail(thumbnail_size)
# Convert the image to a NumPy array
img_array = np.array(img)
# Convert the NumPy array back to an image
img = Image.fromarray(img_array, mode="L")
# Save the image to a buffer
buffer = BytesIO()
img.save(buffer, format="PNG")
buffer.seek(0)
# Insert the image into the worksheet
ws.column_dimensions["A"].width = img.width // 6
ws.row_dimensions[idx + 1].height = img.height
ws.add_image(XLImage(buffer), f"A{idx + 1}")
# Save the workbook
wb.save("image_examples.xlsx")
# amazing, now, let's take 3 random images from each category and their class label
import os
from openpyxl import Workbook
from openpyxl.drawing.image import Image as XLImage
import random
# Define the number of classes you want to take into account
num_classes = dataset["train"].features["label"].num_classes
num_random = 3
# Create a new workbook and add a worksheet
wb = Workbook()
ws = wb.active
ws.title = "Image Examples"
# Write the header row
header = ["Class"] + ["Random Image " + str(i + 1) for i in range(num_random)]
ws.append(header)
# Resize images, save them temporarily and insert them into the Excel sheet
thumbnail_size = (100, 100)
for class_id in range(num_classes):
# Find 3 random images from the class
examples = dataset["train"].filter(lambda example: example["label"] == class_id)
examples = list(examples)
random_examples = random.sample(examples, min(num_random, len(examples)))
# Convert images to thumbnails and save them temporarily
image_files = []
for i, example in enumerate(random_examples):
img = example["image"]
img.thumbnail(thumbnail_size)
# Convert the image to a NumPy array
img_array = np.array(img)
# Convert the NumPy array back to an image
img = Image.fromarray(img_array, mode="L")
# Save the image to a buffer
buffer = BytesIO()
img.save(buffer, format="PNG")
buffer.seek(0)
# Insert the image into the worksheet
xl_img = XLImage(buffer)
ws.column_dimensions[chr(66 + i)].width = img.width // 6
ws.row_dimensions[class_id + 2].height = img.height
ws.add_image(xl_img, f"{chr(66+i)}{class_id+2}")
# Add the image to the list of image files
image_files.append(buffer)
# Add the class label and image file names to the worksheet
row = [dataset["train"].features["label"].int2str(class_id)] + [None] * num_random
for i, example in enumerate(random_examples):
row[i + 1] = dataset["train"].features["label"].int2str(example["label"])
ws.append(row)
# Save the workbook
wb.save("image_examples.xlsx")
# # Decoding the main badge of "Pyramid of Unas"
# Ok, good job. Now we have the exchel spreadsheet with symbol samples from this dataset and their corresponding class (Gardiner's Sign List). Now I can start manually labeling this dataset with my Slovo-gram dictionary.
# This dataset contains only 171 symbol. Some experts estimate Ancient Egypt texts contain up to 700 symbols. My Slovo-gram vocabulary contains around 500 symbols by now. JSesh sowtware that we will also use contain 1500 symbols.
# The annotation (labeling) in the initial dataset is useless for my understanding. We cannot take Egyptian hieroglyphs as letters or words similar to our alphabet. Rather as structure of Symbol-Sound_Action. Symbol is a character and a simplest sound (syllable) at the same time.
# The whole point of this project to create proper annotations to symbols and develop a model on that.
# The proof that I am going in right direction will be the "Pyramid of Unas" main lodo decoded with this method.
# Here are some images:
from IPython.display import Image
Image(filename="/kaggle/input/slovo-grams/Pyramid_of_Unas2.png")
# Ok, so first of all, Pyramid of Unas = Pyramide of One.
# In this pyramide the concept of wholeness and rules of being Whole (Universal rules) are encoded.
# The main badge contain 5 hieroglyphs:
# 1. Rabbit - SCO - scope
# 2. Fence - CO - collective
# 3. Feather-R - CR - highest
# 4. Arc-L - DU - thoughts
# 5. O-shaped stand, containing 4 symbols above - NEI - in it
# SCO-CO CR(a)DU (v)NEI - Scope of collective highest thoughts here.
# Pretty meaningfull, isnt't it? You can verify it yourself.
# Isn't is that easter hare?
#
from IPython.display import Image
Image(filename="/kaggle/input/slovo-grams/Pyramid_of_Unas1.png")
# This is the example of translation from the Author, first page. Try to find some meaning there.
# You will not find specific coherent, much-less technical information in this translation. There is some poetic meaning. The author of the book "The Pyramid of Unas", Alexandre Piankoff, was not translating the main badge, but just writing "Unas".
# I simply cannot beleive that Egyptians or whoever built that did such a enormous amounts of work in stone just to praise the gods in poems.
# I am pretty sure those texts contain almost technical and exact information and instructions.
from IPython.display import Image
Image(filename="/kaggle/input/slovo-grams/Pyramid_of_Unas3.png")
from IPython.display import Image
Image(filename="/kaggle/input/slovo-grams/Pyramid_of_Unas4.png")
# # Labeling of Egyptian Hierogliphs
# To label properly the Egyptian Hierogliphs dataset with corresponding syllables (sounds) I will you the work of Andrei Bannikov and go further using his approach. https://www.youtube.com/@Ur_Al
# This method of assigning the sound (syllable, slogram) to symbol is very organic, makes you understand the meaning of the symbol (hieroglyph) by looking on it and hearing the corrwsponding sound.
from IPython.display import Image
Image(filename="/kaggle/input/slovo-grams/Proper_Hieroglyphs_Sounds1.jpg")
from IPython.display import Image
Image(filename="/kaggle/input/slovo-grams/Proper_Hieroglyphs_Sounds2.jpg")
from IPython.display import Image
Image(filename="/kaggle/input/slovo-grams/Proper_Hieroglyphs_Sounds3.jpg")
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import nltk
import re # regular expression ==> used to remove special characters
a = pd.read_csv(
"/kaggle/input/amazon-review-dataset/amazon_reviews.csv", encoding="ISO-8859-1"
)
a
a.head()
a.tail()
a.shape
a.dtypes
a.isna().sum()
a["overall"].value_counts().plot(kind="bar")
a["overall"].value_counts()
# dropping irrelevant rows from 'target'
a.drop(a.index[(a["overall"] == "Irrelevant")], axis=0, inplace=True)
a
# dropping 'id' and 'location'
a = a.drop(
[
"Unnamed: 0",
"reviewerName",
"reviewTime",
"day_diff",
"helpful_yes",
"helpful_no",
"total_vote",
"score_pos_neg_diff",
"score_average_rating",
"wilson_lower_bound",
],
axis=1,
)
a
a.isna().sum()
# preprocessing 'text'
nltk.download("wordnet")
nltk.download("stopwords")
nltk.download("punkt")
nltk.download("omw-1.4")
# There is a missing word in 'reviewText'
a["reviewText"] = a["reviewText"].fillna("Missing")
review = a.reviewText
review
# Preprocessing
# tokenisation
from nltk.tokenize import TweetTokenizer
tokens = TweetTokenizer()
review = review.apply(lambda x: tokens.tokenize(x)).apply(
lambda x: " ".join(x)
) # lambda==> anonymous function
review
# remove special characters
# we use re ==> regular expression
review = review.str.replace(
"[^a-zA-Z-0-9]+", " "
) # '+' is added because there could be combination of characters
review
# remove special characters
# we use re ==> regular expression
review = review.str.replace("[^a-zA-Z-0-9]+", " ")
review
# Stemming
from nltk.stem import SnowballStemmer
stemmer = SnowballStemmer("english")
review = review.apply(
lambda x: [stemmer.stem(i.lower()) for i in tokens.tokenize(x)]
).apply(lambda x: " ".join(x))
review
# remove stop words
from nltk.corpus import stopwords
sw = stopwords.words("english")
review = review.apply(lambda x: [i for i in tokens.tokenize(x) if i not in sw]).apply(
lambda x: " ".join(x)
)
review
# vectorization
from sklearn.feature_extraction.text import TfidfVectorizer
vector = TfidfVectorizer()
x = vector.fit_transform(review)
print(x)
x.shape # 4915 sentences 6084 words
y = a["overall"].values
y
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.30, random_state=42
)
x_train
y_train
# plot a pie Chart for 'overall'
a["overall"].value_counts().plot(kind="pie")
# Here we implement 5 classification models
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
model1 = KNeighborsClassifier(n_neighbors=9, weights="uniform")
model2 = MultinomialNB()
model3 = SVC()
model4 = DecisionTreeClassifier(criterion="entropy")
model5 = RandomForestClassifier(n_estimators=100)
modellist = [model1, model2, model3, model4, model5]
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
for i in modellist:
i.fit(x_train, y_train)
y_pred = i.predict(x_test)
print("the classification details of model", i, "is below")
print("the confusion matrix of ", i, "is")
print(confusion_matrix(y_test, y_pred))
print("accuracy score of", i, "is")
print(accuracy_score(y_test, y_pred))
print("the classification report of", i, "is")
print(classification_report(y_test, y_pred))
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflow as tf
import os
import cv2
from keras.applications import VGG19, Xception
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras import Sequential
from tensorflow.keras.losses import (
MeanSquaredError,
BinaryCrossentropy,
SparseCategoricalCrossentropy,
)
from tensorflow.keras.activations import sigmoid
from tensorflow.keras.losses import MeanSquaredLogarithmicError
from tensorflow import keras as k
import numpy as np
# for building linear regression models and preparing data
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import tensorflow as tf
from xgboost import *
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
import random
from tensorflow.keras.applications import EfficientNetV2M
DIRECTORY = "/kaggle/input/fer2013/train"
CATAGORY = ["disgust", "happy", "fear", "neutral", "sad", "surprise", "angry"]
training_data = []
class_list = []
# there are some broken imgs(corrupted)
# we will over look them as we encounter them through the try except block
def create_training_data():
for category in CATAGORY:
path = os.path.join(DIRECTORY, category)
class_num = CATAGORY.index(category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
img_array = cv2.cvtColor(img_array, cv2.COLOR_GRAY2RGB)
img_array = cv2.resize(img_array, (253, 253))
training_data.append([img_array, class_num])
class_list.append(class_num)
create_training_data()
random.shuffle(training_data)
X = []
y = []
for features, labels in training_data:
X.append(features)
y.append(labels)
# (number of images, height, width, num of channels)
X = np.array(X).reshape(-1, 253, 253, 3)
y = np.array(y)
X = X / 255.0
DIRECTORY = "/kaggle/input/fer2013/test"
CATAGORY = ["disgust", "happy", "fear", "neutral", "sad", "surprise", "angry"]
training_data = []
class_list = []
# there are some broken imgs(corrupted)
# we will over look them as we encounter them through the try except block
def create_training_data():
for category in CATAGORY:
path = os.path.join(DIRECTORY, category)
class_num = CATAGORY.index(category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
img_array = cv2.cvtColor(img_array, cv2.COLOR_GRAY2RGB)
img_array = cv2.resize(img_array, (253, 253))
training_data.append([img_array, class_num])
class_list.append(class_num)
create_training_data()
random.shuffle(training_data)
X_test = []
y_test = []
for features, labels in training_data:
X_test.append(features)
y_test.append(labels)
# (number of images, height, width, num of channels)
X_test = np.array(X_test).reshape(-1, 253, 253, 3)
y_test = np.array(y_test)
X_test = X_test / 255.0
fig = plt.figure(figsize=(10, 10), facecolor="black")
# Define row and cols in the figure
rows, cols = 4, 4
nm = list(plt.cm.datad.keys())
arr = np.random.randint(0, 10000, (rows * cols,))
c = np.random.randint(0, X.shape[0], (rows * cols,))
# Display first four images
i = 0
for j in range(0, cols * rows):
fig.add_subplot(rows, cols, j + 1)
plt.imshow(X[j, :])
i += 1
plt.axis("off")
plt.show()
top = EfficientNetV2M(include_top=False, input_shape=(253, 253, 3))
for i in top.layers:
i.trainable = False
model = Sequential(
[
top,
k.layers.Flatten(),
k.layers.Dense(256, activation="relu"),
k.layers.Dropout(0.5),
k.layers.Dense(7, "softmax"),
]
)
model.compile(
loss=k.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer="nadam",
metrics=["accuracy"],
)
hist = model.fit(
X,
y,
epochs=5,
batch_size=32,
validation_split=0.2,
callbacks=k.callbacks.EarlyStopping(patience=10, restore_best_weights=True),
)
for i in top.layers:
i.trainable = True
hist = model.fit(
X,
y,
epochs=5,
batch_size=32,
validation_split=0.2,
callbacks=k.callbacks.EarlyStopping(patience=10, restore_best_weights=True),
)
|
# # Config
import os
data_dir = "/kaggle/input/cbc-data"
# Hyper-parameter
VERSION = 1
DEBUG = False
USE_ECB_SPEECHES = True
USE_FIN_SPEECHES = True
FROM_DATE = "2002-04-02"
TO_DATE = "2023-04-01"
TRAIN_TO_DATE = "2014-01-01"
TARGETS = "rate_change_90D_class" # change in policy rate over the next 90 days
# Model parameter
PRETRAINED_MODEL = "microsoft/deberta-v3-base"
MAX_LEN = 1024
MODEL_NAME = f'{PRETRAINED_MODEL.split("/",1)[1]}_v{VERSION}'
LOAD_MODEL_FROM = os.path.join(data_dir, MODEL_NAME)
print(MODEL_NAME)
print(LOAD_MODEL_FROM)
# Import useful libraries
import pandas as pd
import numpy as np
import re
import random
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import seaborn as sns
sns.set_style("darkgrid")
sns.set(rc={"figure.figsize": (10, 4)})
# Set up device: TPU or GPU
from transformers import *
import tensorflow as tf
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print("TPU strategy")
except:
if len(tf.config.list_physical_devices("GPU")) >= 2:
strategy = tf.distribute.MirroredStrategy()
print("DEVICES AVAILABLE: {}".format(strategy.num_replicas_in_sync))
else:
strategy = tf.distribute.get_strategy()
print("single strategy")
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
print("Mixed precision enabled")
# # Get data
# ## Speeches
# ### Riksbank, Finlands Bank
# Helper function to get corpus dataset
def get_corpus(data_dir, from_date=FROM_DATE, sep=","):
data = pd.read_csv(data_dir, sep=sep)
data["date"] = pd.to_datetime(data["date"])
return data[data["date"] >= from_date]
corpus_df = get_corpus(os.path.join(data_dir, "all_corpus.csv"))
corpus_df["article_len"] = corpus_df["article"].str.len()
corpus_df = corpus_df[corpus_df["article_len"] > 100]
print("df shape: ", corpus_df.shape)
print("Number of unique ids: ", len(corpus_df["id"].unique()))
corpus_df.head()
# ### ECB
# Get speeches from file downloaded from ECB website
ecb_corpus_df = get_corpus(os.path.join(data_dir, "all_ECB_speeches.csv"), sep="|")
ecb_corpus_df["article_len"] = ecb_corpus_df["contents"].str.len()
ecb_corpus_df = ecb_corpus_df[ecb_corpus_df["article_len"] > 100]
ecb_corpus_df["speaker_title"] = ecb_corpus_df["subtitle"].str.split(",").str[1]
ecb_corpus_df["id"] = (
"ecb"
+ ecb_corpus_df["date"].dt.strftime("%m%d%Y")
+ ecb_corpus_df["speakers"].str[:2].str.lower()
+ ecb_corpus_df["article_len"].astype(str)
)
ecb_corpus_df["bank"] = "ECB"
ecb_corpus_df["policy_rate_control"] = "ECB"
ecb_corpus_df = ecb_corpus_df[
[
"bank",
"policy_rate_control",
"id",
"date",
"speakers",
"speaker_title",
"contents",
"article_len",
]
]
ecb_corpus_df.columns = [
"bank",
"policy_rate_control",
"id",
"date",
"speaker",
"speaker_title",
"article",
"article_len",
]
print("df shape: ", ecb_corpus_df.shape)
print("Number of unique ids: ", len(ecb_corpus_df["id"].unique()))
ecb_corpus_df.head()
# ### All speeches
# Join speech tables
full_corpus_df = pd.concat([corpus_df, ecb_corpus_df])
full_corpus_df.shape
# Article length distribution
sns.displot(
data=full_corpus_df, x="article_len", hue="bank", kind="kde", height=4, aspect=7 / 4
)
plt.show()
# Number of article per month
data = (
full_corpus_df.groupby(
[full_corpus_df["bank"], full_corpus_df["date"].dt.to_period("M")]
)
.agg({"id": "count"})
.reset_index()
)
data = data[data["date"] >= "2020-01-01"]
sns.barplot(data, x="date", y="id", hue="bank")
plt.xticks(rotation=90)
plt.show()
# ## Policy rate
# ### Riks bank
# Helper function to get and process policy rate csv files
def get_policy_rate(
data_dir,
skiprows=None,
policy_rate_control="Riksbank",
date_col="Period",
format_date="%d/%m/%Y",
value_col="Value",
from_date=FROM_DATE,
to_date=TO_DATE,
sep=",",
):
data = pd.read_csv(data_dir, sep=sep, skiprows=skiprows)
data["policy_rate_control"] = policy_rate_control
data[date_col] = pd.to_datetime(data[date_col], format=format_date)
data = data[["policy_rate_control", date_col, value_col]]
data.columns = ["policy_rate_control", "date", "policy_rate"]
data = data[data["date"] >= from_date]
date_df = pd.DataFrame({"date": pd.date_range(start=from_date, end=to_date)})
data = date_df.merge(data, on="date", how="left")
data = data.fillna(method="ffill")
return data
riks_rate_df = get_policy_rate(
os.path.join(data_dir, "riksbank_policy_rate.csv"), sep=";"
)
riks_rate_df["policy_rate"] = (
riks_rate_df["policy_rate"].str.replace(",", ".").astype(float)
)
riks_rate_df["policy_rate_lead60"] = riks_rate_df["policy_rate"].shift(periods=-60)
riks_rate_df["policy_rate_lead90"] = riks_rate_df["policy_rate"].shift(periods=-90)
print(riks_rate_df.shape)
riks_rate_df.head()
# ### ECB official interest rate
# Get data for ECB
ecb_rate_df = get_policy_rate(
data_dir=os.path.join(data_dir, "ecb_interest_rate.csv"),
skiprows=[0, 1, 3, 4],
policy_rate_control="ECB",
date_col="Unnamed: 0",
format_date="%Y-%m-%d",
value_col="Daily, ECB Deposit facility - date of changes (raw data), Level",
)
ecb_rate_df["policy_rate_lead60"] = ecb_rate_df["policy_rate"].shift(periods=-60)
ecb_rate_df["policy_rate_lead90"] = ecb_rate_df["policy_rate"].shift(periods=-90)
print(ecb_rate_df.shape)
ecb_rate_df.head()
# ### All policy rate
# Join all policy rate tables
full_rate_df = pd.concat([riks_rate_df, ecb_rate_df])
print("df shape: ", full_rate_df.shape)
full_rate_df.head()
# Compare policy rate
sns.lineplot(data=full_rate_df, x="date", y="policy_rate", hue="policy_rate_control")
plt.xticks(rotation=90)
plt.show()
# # Prepare dataset
# Combine speeches and policy rate tables
full_df = full_corpus_df.merge(
full_rate_df, on=["date", "policy_rate_control"], how="left"
)
full_df = full_df.dropna()
# Calculate policy rate change for 60 days, 90 days
full_df["rate_change_60D"] = full_df["policy_rate_lead60"] - full_df["policy_rate"]
full_df["rate_change_90D"] = full_df["policy_rate_lead90"] - full_df["policy_rate"]
# Classify rate change as Increase, Same, Decrease
def rate_change_classifior(col):
conditions = [full_df[col] > 0, full_df[col] == 0, full_df[col] < 0]
choices = ["Increase", "Same", "Decrease"]
return np.select(conditions, choices, default=np.nan)
full_df["rate_change_60D_class"] = rate_change_classifior("rate_change_60D")
full_df["rate_change_90D_class"] = rate_change_classifior("rate_change_90D")
# Select only useful columns for training
full_df = full_df[
[
"id",
"date",
"speaker",
"speaker_title",
"article",
"rate_change_60D",
"rate_change_90D",
"rate_change_60D_class",
"rate_change_90D_class",
]
]
# Print data shape and data samples
print("df shape: ", full_df.shape)
full_df.head()
# ## Train test split
train_df = full_df[full_df["date"] <= TRAIN_TO_DATE]
test_df = full_df[full_df["date"] > TRAIN_TO_DATE]
# ## Token, attention, output
# Check if we already have trained parameters
if os.path.exists(LOAD_MODEL_FROM):
tokenizer = AutoTokenizer.from_pretrained(f"{LOAD_MODEL_FROM}")
else:
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
# Define function to get token from pretrained tokenizer
def get_token(df):
ids = df["id"].unique()
input_ids = np.zeros((len(ids), MAX_LEN), dtype="int32")
attention_mask = np.zeros((len(ids), MAX_LEN), dtype="int32")
for i in range(len(ids)):
id = ids[i]
txt = df.loc[df["id"] == id]["article"].values[0]
tokens = tokenizer.encode_plus(
txt, max_length=MAX_LEN, padding="max_length", truncation=True
)
input_ids[i,] = tokens["input_ids"]
attention_mask[i,] = tokens["attention_mask"]
return input_ids, attention_mask
# Get tokens
train_input_ids, train_attention_mask = get_token(train_df)
test_input_ids, test_attention_mask = get_token(test_df)
print("Train set:")
print("Input_ids shape: ", train_input_ids.shape)
print("Attention mask shape: ", train_attention_mask.shape)
print("\n")
print("Test set:")
print("Input_ids shape: ", test_input_ids.shape)
print("Attention mask shape: ", test_attention_mask.shape)
# Encode target variables so we can use to feed to our model
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
enc = LabelEncoder()
enc.fit(train_df[TARGETS])
print("Classes: ", enc.classes_)
# Encode targets
train_target = enc.transform(train_df[TARGETS])
test_target = enc.transform(test_df[TARGETS])
print("Train targets shape: ", train_target.shape)
print("Test targets shape: ", test_target.shape)
# # Build model
def build_model():
# Corpus feature
## Load pre-trained model or fine-tuned model
if os.path.exists(LOAD_MODEL_FROM):
config = AutoConfig.from_pretrained(f"{LOAD_MODEL_FROM}/config.json")
backbone = TFAutoModel.from_pretrained(f"{LOAD_MODEL_FROM}/tf_model.h5")
else:
config = AutoConfig.from_pretrained(PRETRAINED_MODEL, output_hidden_states=True)
backbone = TFAutoModel.from_pretrained(PRETRAINED_MODEL, config=config)
## Get embeddings from pretrained model
tokens = tf.keras.layers.Input(shape=(MAX_LEN,), name="tokens", dtype=tf.int32)
attention = tf.keras.layers.Input(
shape=(MAX_LEN,), name="attention", dtype=tf.int32
)
## Get the last hidden state from pretrained model
outputs = backbone(tokens, attention_mask=attention)
hidden_state = outputs[1][-1]
## Calculate mean embeddings from the last hidden state
input_mask_expanded = tf.broadcast_to(
tf.expand_dims(attention, -1), tf.shape(hidden_state)
)
input_mask_expanded = tf.cast(input_mask_expanded, tf.float32)
sum_embeddings = tf.reduce_sum(hidden_state * input_mask_expanded, axis=1)
sum_mask = tf.reduce_sum(input_mask_expanded, axis=1)
sum_mask = tf.clip_by_value(
sum_mask, clip_value_min=1e-9, clip_value_max=tf.float32.max
)
mean_embeddings = sum_embeddings / sum_mask
# Final layer with softmax activation
logits = tf.keras.layers.Dense(
len(enc.classes_),
activation="softmax",
dtype="float32",
kernel_initializer=tf.keras.initializers.Orthogonal(seed=42),
)(mean_embeddings)
# Compile model
model = tf.keras.Model(inputs=[tokens, attention], outputs=logits)
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
return model
with strategy.scope():
model = build_model()
if DEBUG:
model.summary()
# This chunk is for debug only
if DEBUG:
config = AutoConfig.from_pretrained(PRETRAINED_MODEL, output_hidden_states=True)
backbone = TFAutoModel.from_pretrained(PRETRAINED_MODEL, config=config)
outputs = backbone(
train_input_ids[0:2,],
attention_mask=train_attention_mask[0:2,],
output_hidden_states=True,
)
hidden_state = outputs[0]
print(tf.shape(hidden_state))
print(tf.shape(train_attention_mask[0:2,]))
# Fit model
EPOCHS = 3
BATCH_SIZE = 4
if os.path.exists(LOAD_MODEL_FROM):
model.load_weights(f"{LOAD_MODEL_FROM}/{MODEL_NAME}")
else:
model.fit(
x=[train_input_ids, train_attention_mask],
y=train_target,
validation_data=([test_input_ids, test_attention_mask], test_target),
epochs=EPOCHS,
batch_size=BATCH_SIZE,
verbose=1,
)
# # Analyze results
# How the prediction looks like
preds = model.predict(
[val_input_ids, val_attention_mask], batch_size=BATCH_SIZE, verbose=1
)
preds = np.argmax(preds, axis=1)
preds
# How the target looks like?
val_target
# Confusion matrix
cf_matrix = confusion_matrix(
enc.inverse_transform(val_target),
enc.inverse_transform(preds),
labels=enc.classes_.tolist(),
)
sns.heatmap(cf_matrix, cmap="Blues")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df_train = pd.read_csv("/kaggle/input/titanic/train.csv")
df_train.head()
# Survived column is our dependent feature and other are independent features
df_test = pd.read_csv("/kaggle/input/titanic/test.csv")
df_test.head()
# # Feature Engineering
df_train.info()
df_train.isnull().sum()
# Cabin column having 687 null entries out of total 891 so better to remove it . Also Age we can change it to mean values coz only 177 are null.
df_train.drop("Cabin", axis=1, inplace=True)
df_train.info()
# we removed cabin column now we take care of age column. Now important thing is how age column is dependent on other features like passenger class. coz max from pclass3 died not from pclass1. So we need to do EDA on relationship btw them
import seaborn as sns
sns.set_style("darkgrid")
sns.countplot(x="Survived", data=df_train, hue="Sex")
sns.set_style("darkgrid")
sns.countplot(x="Survived", data=df_train, hue="Pclass")
# we need to see how our age is distributed
sns.distplot(df_train["Age"].dropna(), kde=True, bins=30)
# Relationshiip of pclass and age
import matplotlib.pyplot as plt
plt.figure(figsize=(12, 7))
sns.boxplot(x="Pclass", y="Age", data=df_train)
# so for pclass1 median is around 37,pclass2 : 28 and pclass3 : 24. so we replace null values with these values considering pclass dependency on Age
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 37
elif Pclass == 2:
return 29
else:
return 24
else:
return Age
# now we have to apply this function using .apply(function_name)
df_train["Age"] = df_train[["Age", "Pclass"]].apply(impute_age, axis=1)
df_train.info()
# we can see here all missing values from age column are taken care of.Now we will convert all categorical features into numerical data to get better results
df_train.head()
# we can change Sex, embarked columns into numerical data using get_dummies method of pandas. Columns like Ticket and Name are of no need as they dopn't give any info
embark = pd.get_dummies(df_train["Embarked"], drop_first=True)
sex = pd.get_dummies(df_train["Sex"], drop_first=True)
print(embark)
print(sex)
df_train.drop(["Sex", "Embarked", "Name", "Ticket"], axis=1, inplace=True)
df_train.info()
df_train = pd.concat([df_train, sex, embark], axis=1)
df_train.head()
# no point of passesngerid as it's just count of passesngers or index value
df_train.drop(["PassengerId"], axis=1, inplace=True)
df_train.head()
from sklearn.ensemble import RandomForestClassifier
features = ["Pclass", "Age", "SibSp", "Parch", "Fare", "male", "Q", "S"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
|
# In machine learning, Classification is used to split data into categories. But after cleaning and preprocessing the data and training our model, how do we know if our classification model performs well? That is where a confusion matrix comes into the picture.
# # Confusion Matrix
# A confusion matrix is a matrix that summarizes the performance of a machine learning model on a set of test data. It is often used to measure the performance of classification models, which aim to predict a categorical label for each input instance. The matrix displays the number of true positives (TP), true negatives (TN), false positives (FP), and false negatives (FN) produced by the model on the test data.
# # How to Calculate a Confusion Matrix
# Here, is step by step process for calculating a confusion Matrix in data mining
# Step 1) First, you need to test dataset with its expected outcome values.
# Step 2) Predict all the rows in the test dataset.
# Step 3) Calculate the expected predictions and outcomes:
# 1.The total of correct predictions of each class.
# 2.The total of incorrect predictions of each class.
# After that, these numbers are organized in the below-given methods:
# * Every row of the matrix links to a predicted class.
# * Every column of the matrix corresponds with an actual class.
# * The total counts of correct and incorrect classification are entered into the table.
# * The sum of correct predictions for a class go into the predicted column and expected row for that class value.
# * The sum of incorrect predictions for a class goes into the expected row for that class value and the predicted column for that specific class value.
# # Some features of Confusion matrix are given below:
# * For the 2 prediction classes of classifiers, the matrix is of 2x2 table, for 3 classes, it is 3x3 table, and n for nxn.
# * The matrix is divided into two dimensions, that are predicted values and actual values along with the total number of predictions.
# * Predicted values are those values, which are predicted by the model, and actual values are the true values for the given observations.
# * It looks like the below table:
# 
# **From the above example, we can conclude that:**
# * There are two possible predicted classes: "yes" and "no". If we were predicting the presence of a disease, for example, "yes" would mean they have the disease, and "no" would mean they don't have the disease.
# * The classifier made a total of 165 predictions (e.g., 165 patients were being tested for the presence of that disease).
# * Out of those 165 cases, the classifier predicted "yes" 110 times, and "no" 55 times.
# * In reality, 105 patients in the sample have the disease, and 60 patients do not.
# # From the confusion matrix, we can find the following metrics
# **Classification Accuracy**:
# It is one of the important parameters to determine the accuracy of the classification problems. It defines how often the model predicts the correct output. It can be calculated as the ratio of the number of correct predictions made by the classifier to all number of predictions made by the classifiers. The formula is given below:
# 
# **Misclassification rate:**
# It is also termed as Error rate, and it defines how often the model gives the wrong predictions. The value of error rate can be calculated as the number of incorrect predictions to all number of the predictions made by the classifier. The formula is given below:
# 
# **Precision:**
# It can be defined as the number of correct outputs provided by the model or out of all positive classes that have predicted correctly by the model, how many of them were actually true. It can be calculated using the below formula:
# 
# **Recall:**
# It is defined as the out of total positive classes, how our model predicted correctly. The recall must be as high as possible.
# 
# **F-measure:**
# If two models have low precision and high recall or vice versa, it is difficult to compare these models. So, for this purpose, we can use F-score. This score helps us to evaluate the recall and precision at the same time. The F-score is maximum if the recall is equal to the precision. It can be calculated using the below formula:
# 
# **Other important terms used in Confusion Matrix:**
# **Null Error rate:**
# It defines how often our model would be incorrect if it always predicted the majority class. As per the accuracy paradox, it is said that "the best classifier has a higher error rate than the null error rate."
# **Cohen's Kappa:**
# This is essentially a measure of how well the classifier performed as compared to how well it would have performed simply by chance. In other words, a model will have a high Kappa score if there is a big difference between the accuracy and the null error rate.
# Cohen's kappa coefficient (κ, lowercase Greek kappa) is a statistic that is used to measure inter-rater reliability (and also intra-rater reliability) for qualitative (categorical) items.It is generally thought to be a more robust measure than simple percent agreement calculation, as κ takes into account the possibility of the agreement occurring by chance. There is controversy surrounding Cohen's kappa due to the difficulty in interpreting indices of agreement. Some researchers have suggested that it is conceptually simpler to evaluate disagreement between items.
# **ROC Curve:**
# The ROC is a graph displaying a classifier's performance for all possible thresholds. The graph is plotted between the true positive rate (on the Y-axis) and the false Positive rate (on the x-axis).
# The Receiver Operator Characteristic (ROC) curve is an evaluation metric for binary classification problems. It is a probability curve that plots the TPR against FPR at various threshold values and essentially separates the ‘signal’ from the ‘noise.’ In other words, it shows the performance of a classification model at all classification thresholds. The Area Under the Curve (AUC) is the measure of the ability of a binary classifier to distinguish between classes and is used as a summary of the ROC curve.
# The higher the AUC(Area Under the Curve), the better the model’s performance at distinguishing between the positive and negative classes.
# When AUC = 1, the classifier can correctly distinguish between all the Positive and the Negative class points. If, however, the AUC had been 0, then the classifier would predict all Negatives as Positives and all Positives as Negatives.
# 
# When 0.5<AUC<1, there is a high chance that the classifier will be able to distinguish the positive class values from the negative ones. This is so because the classifier is able to detect more numbers of True positives and True negatives than False negatives and False positives.
# 
# When AUC=0.5, then the classifier is not able to distinguish between Positive and Negative class points. Meaning that the classifier either predicts a random class or a constant class for all the data points.
# So, the higher the AUC value for a classifier, the better its ability to distinguish between positive and negative classes.
# **Why you need Confusion matrix?**
# Here are pros/benefits of using a confusion matrix.
# * It shows how any classification model is confused when it makes predictions.
# * Confusion matrix not only gives you insight into the errors being made by your classifier but also types of errors that are being made.
# * This breakdown helps you to overcomes the limitation of using classification accuracy alone.
# * Every column of the confusion matrix represents the instances of that predicted class.
# * Each row of the confusion matrix represents the instances of the actual class.
# * It provides insight not only the errors which are made by a classifier but also errors that are being made.
# # Implementations of Confusion Matrix in Python:
# * Import the necessary libraries like Numpy, confusion_matrix from sklearn.metrics, seaborn, and matplotlib.
# * Create the NumPy array for actual and predicted labels.
# * compute the confusion matrix.
# * Plot the confusion matrix with the help of the seaborn heatmap.
# Import the necessary libraries
import numpy as np
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
# Create the NumPy array for actual and predicted labels.
actual = np.array(
[
"Dog",
"Dog",
"Dog",
"Not Dog",
"Dog",
"Not Dog",
"Dog",
"Dog",
"Not Dog",
"Not Dog",
]
)
predicted = np.array(
[
"Dog",
"Not Dog",
"Dog",
"Not Dog",
"Dog",
"Dog",
"Dog",
"Dog",
"Not Dog",
"Not Dog",
]
)
# compute the confusion matrix.
cm = confusion_matrix(actual, predicted)
# Plot the confusion matrix.
sns.heatmap(
cm,
annot=True,
fmt="g",
xticklabels=["Dog", "Not Dog"],
yticklabels=["Dog", "Not Dog"],
)
plt.ylabel("Prediction", fontsize=13)
plt.xlabel("Actual", fontsize=13)
plt.title("Confusion Matrix", fontsize=17)
plt.show()
# **From the confusion matrix, we can find the following metrics**
# **Accuracy:** Accuracy is used to measure the performance of the model. It is the ratio of Total correct instances to the total instances.
# 
# For the above case:
# Accuracy = (5+3)/(5+3+1+1) = 8/10 = 0.8
# **Precision:** Precision is a measure of how accurate a model’s positive predictions are. It is defined as the ratio of true positive predictions to the total number of positive predictions made by the model
# 
# For the above case:
# Precision = 5/(5+1) =5/6 = 0.8333
# **Recall:** Recall measures the effectiveness of a classification model in identifying all relevant instances from a dataset. It is the ratio of the number of true positive (TP) instances to the sum of true positive and false negative (FN) instances.
# 
# For the above case:
# Recall = 5/(5+1) =5/6 = 0.8333
# **F1-Score:** F1-score is used to evaluate the overall performance of a classification model. It is the harmonic mean of precision and recall,
# 
# For the above case:
# F1-Score: = (2* 0.8333* 0.8333)/( 0.8333+ 0.8333) = 0.8333
# **Example:2 Binary Classifications for Breast Cancer**
# Import the necessary libraries
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# Load the breast cancer dataset
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
# Train the model
tree = DecisionTreeClassifier(random_state=23)
tree.fit(X_train, y_train)
# preduction
y_pred = tree.predict(X_test)
# compute the confusion matrix
cm = confusion_matrix(y_test, y_pred)
# Plot the confusion matrix.
sns.heatmap(
cm,
annot=True,
fmt="g",
xticklabels=["malignant", "benign"],
yticklabels=["malignant", "benign"],
)
plt.ylabel("Prediction", fontsize=13)
plt.xlabel("Actual", fontsize=13)
plt.title("Confusion Matrix", fontsize=17)
plt.show()
# Finding precision and recall
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy :", accuracy)
precision = precision_score(y_test, y_pred)
print("Precision :", precision)
recall = recall_score(y_test, y_pred)
print("Recall :", recall)
F1_score = f1_score(y_test, y_pred)
print("F1-score :", F1_score)
# **Accuracy : 0.9230769230769231
# Precision : 1.0
# Recall : 0.8842105263157894
# F1-score : 0.9385474860335195**
# **Example 3: Multi-Class Classifications for Handwritten Digit dataset**
# Import the necessary libraries
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# Load the breast cancer dataset
X, y = load_digits(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
# Train the model
clf = RandomForestClassifier(random_state=23)
clf.fit(X_train, y_train)
# preduction
y_pred = clf.predict(X_test)
# compute the confusion matrix
cm = confusion_matrix(y_test, y_pred)
# Plot the confusion matrix.
sns.heatmap(cm, annot=True, fmt="g")
plt.ylabel("Prediction", fontsize=13)
plt.xlabel("Actual", fontsize=13)
plt.title("Confusion Matrix", fontsize=17)
plt.show()
# Finding precision and recall
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy :", accuracy)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# 필수 패키지 불러오기
import os # os 기능을 활용
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# 데이터 확인
os.listdir("../input/2021-immba-coding-workshop")
# 데이터 불러오기
train_raw = pd.read_csv("../input/2021-immba-coding-workshop/train.csv")
test_raw = pd.read_csv("../input/2021-immba-coding-workshop/test.csv")
sample_submission = pd.read_csv(
"../input/2021-immba-coding-workshop/sample_submission.csv"
)
train_raw.info()
train_raw.describe()
train_raw.head()
test_raw.info()
test_raw.head()
df_train = train_raw.drop(["id", "date"], axis=1)
df_test = test_raw.drop(["id", "date"], axis=1)
plt.figure(figsize=(14, 8))
sns.heatmap(df_train.corr(), annot=True, cmap="summer")
plt.show()
df_train["grade"].describe()
plt.figure(figsize=(14, 6))
sns.regplot(x="grade", y="price", data=df_train)
plt.show()
# grade에 따른 price 박스플롯
plt.figure(figsize=(14, 6))
sns.boxplot(x="grade", y="price", data=df_train)
plt.show()
sns.catplot(data=df_train, x="grade", y="price", col="bedrooms", col_wrap=4, kind="box")
plt.figure(figsize=(14, 6))
sns.regplot(x="bedrooms", y="price", data=df_train)
plt.show()
# ID 부여 컬럼 생성
df_train.reset_index(inplace=True)
df_test.reset_index(inplace=True)
# 각 grade별로 outlier 구하는 식 정리
# grade = 3
x = df_train[df_train["grade"] == 3]
q1 = x["price"].quantile(0.25)
q3 = x["price"].quantile(0.75)
out = q3 + 1.5 * (q3 - q1)
out_3 = x[x["price"] > out].index
# grade = 4
x = df_train[df_train["grade"] == 4]
q1 = x["price"].quantile(0.25)
q3 = x["price"].quantile(0.75)
out = q3 + 1.5 * (q3 - q1)
out_4 = x[x["price"] > out].index
# grade = 5
x = df_train[df_train["grade"] == 5]
q1 = x["price"].quantile(0.25)
q3 = x["price"].quantile(0.75)
out = q3 + 1.5 * (q3 - q1)
out_5 = x[x["price"] > out].index
# grade = 6
x = df_train[df_train["grade"] == 6]
q1 = x["price"].quantile(0.25)
q3 = x["price"].quantile(0.75)
out = q3 + 1.5 * (q3 - q1)
out_6 = x[x["price"] > out].index
# grade = 7
x = df_train[df_train["grade"] == 7]
q1 = x["price"].quantile(0.25)
q3 = x["price"].quantile(0.75)
out = q3 + 1.5 * (q3 - q1)
out_7 = x[x["price"] > out].index
# grade = 8
x = df_train[df_train["grade"] == 8]
q1 = x["price"].quantile(0.25)
q3 = x["price"].quantile(0.75)
out = q3 + 1.5 * (q3 - q1)
out_8 = x[x["price"] > out].index
# grade = 9
x = df_train[df_train["grade"] == 9]
q1 = x["price"].quantile(0.25)
q3 = x["price"].quantile(0.75)
out = q3 + 1.5 * (q3 - q1)
out_9 = x[x["price"] > out].index
# grade = 10
x = df_train[df_train["grade"] == 10]
q1 = x["price"].quantile(0.25)
q3 = x["price"].quantile(0.75)
out = q3 + 1.5 * (q3 - q1)
out_10 = x[x["price"] > out].index
# grade = 11
x = df_train[df_train["grade"] == 11]
q1 = x["price"].quantile(0.25)
q3 = x["price"].quantile(0.75)
out = q3 + 1.5 * (q3 - q1)
out_11 = x[x["price"] > out].index
# grade = 12
x = df_train[df_train["grade"] == 12]
q1 = x["price"].quantile(0.25)
q3 = x["price"].quantile(0.75)
out = q3 + 1.5 * (q3 - q1)
out_12 = x[x["price"] > out].index
# grade = 13
x = df_train[df_train["grade"] == 13]
q1 = x["price"].quantile(0.25)
q3 = x["price"].quantile(0.75)
out = q3 + 1.5 * (q3 - q1)
out_13 = x[x["price"] > out].index
print(out_3)
print(out_4)
print(out_5)
print(out_6)
print(out_7)
print(out_8)
print(out_9)
print(out_10)
print(out_11)
print(out_12)
print(out_13)
# outlier에 해당하는 key값 전체
outlier_key = (
list(out_3)
+ list(out_4)
+ list(out_5)
+ list(out_6)
+ list(out_7)
+ list(out_8)
+ list(out_9)
+ list(out_10)
+ list(out_11)
+ list(out_12)
+ list(out_13)
)
len(outlier_key)
# outlier에 해당하는 key값만큼 value값 '1' 리스트 생성
outlier_value = []
for i in range(len(outlier_key)):
outlier_value.append("1")
print(outlier_value)
len(outlier_value)
##이상치에 라벨링하기
outlier = dict(zip(outlier_key, outlier_value))
df_train["Outlier"] = df_train["index"].map(outlier)
df_train[df_train["Outlier"] == "1"]
df_train.shape
# Outlier 값이 1인 요소 삭제 (행 삭제)
out_result = df_train[df_train["Outlier"] == "1"].index
df_train_f = df_train.drop(out_result)
# Outlier 열 삭제
del df_train_f["Outlier"]
df_train_f.shape
# grade에 따른 price 박스플롯 (1차 이상치 제거 후)
plt.figure(figsize=(14, 6))
sns.boxplot(x="grade", y="price", data=df_train_f)
plt.show()
## grade 기준으로 outlier를 제거했는데 12, 13 grade에서 outlier가 크게 보인다 -> 해당 grade 이상치 추가 제거
# grade = 12
x = df_train_f[df_train_f["grade"] == 12]
q1 = x["price"].quantile(0.25)
q3 = x["price"].quantile(0.75)
out = q3 + 1.5 * (q3 - q1)
out_12_2 = x[x["price"] > out].index
# grade = 13
x = df_train_f[df_train_f["grade"] == 13]
q1 = x["price"].quantile(0.25)
q3 = x["price"].quantile(0.75)
out = q3 + 1.5 * (q3 - q1)
out_13_2 = x[x["price"] > out].index
# outlier에 해당하는 key값 전체
outlier_key2 = list(out_12_2) + list(out_13_2)
outlier_value2 = []
# outlier에 해당하는 key값만큼 value값 '1' 리스트 생성
for i in range(len(outlier_key2)):
outlier_value2.append("1")
print(outlier_value2)
print(outlier_key2)
# 이상치에 라벨링하기 (누적)
outlier2 = dict(zip(outlier_key2, outlier_value2))
df_train_f["Outlier2"] = df_train_f["index"].map(outlier2)
df_train_f[df_train_f["Outlier2"] == "1"]
df_train_f.shape
# Outlier 값이 1인 요소 삭제 (행 삭제)
out_result2 = df_train_f[df_train_f["Outlier2"] == "1"].index
df_train_f2 = df_train_f.drop(out_result2)
# Outlier 열 삭제
del df_train_f2["Outlier2"]
df_train_f2.shape
# grade에 따른 price 박스플롯
plt.figure(figsize=(14, 6))
sns.boxplot(x="grade", y="price", data=df_train_f2)
plt.show()
plt.figure(figsize=(14, 6))
sns.regplot(x="grade", y="price", data=df_train_f2)
plt.show()
## 예측모델 만들어보기
# 필요한 패키지 호출
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# train 대상과 target 대상을 분리
X = df_train_f2.drop("price", axis=1)
y = df_train_f2["price"]
# X_train 데이터를 학습데이터와 검증 데이터로 분리
X_train, X_val, y_train, y_val = train_test_split(X, y)
# 학습 시행
lr = LinearRegression()
lr.fit(X_train, y_train)
# R^2의 계수 확인
lr.score(X_train, y_train)
# 각 feature의 계수 확인
df_coeff = pd.DataFrame(X_train.columns)
df_coeff.columns = ["features"]
df_coeff["coefficients"] = pd.Series(
lr.coef_
) # 교재에서는 [0]을 통해여 접근하나, kaggle의 sklearn 버전에서는 바로 접근
df_coeff
# 검증데이터를 학습된 모델로 예측
predict = lr.predict(X_val)
# 검증데이터로 모델의 성능을 평가(대회의 평가기준 rmse 적용)
mse = mean_squared_error(y_val, predict)
rmse = np.sqrt(mse)
print("rmse : {}".format(rmse))
# 위에서 만들어진 모델로 test 데이터 예측
pred_submission = lr.predict(df_test)
# 제출양식에 기입
sample_submission["price"] = pred_submission
sample_submission
sample_submission.to_csv("trial_submit.csv", index=False)
# ##루프로 만들어보기 (나중에)
# n = 12
# outlier_key2 = []
# while n < 14:
# x = df_train[df_train['grade'] == n]
# q1 = x['price'].quantile(0.25)
# q3 = x['price'].quantile(0.75)
# out = q3 + 1.5*(q3-q1)
# out_result =x[x['price'] > out]['index']
# outlier_key2.append(out_result)
# n += 1
# out_9 = train_9[train_9['price'] > out].index
# len(out_9)
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
from matplotlib import pyplot as plt
from pyclustering.cluster.kmedians import kmedians
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
# from sklearn.preprocessing import minmax_scale
from pandas import crosstab
# # **Dimensionality reduction using PCA, followed by K-medians clustering**
iris = load_iris()
np.random.seed(911)
X = iris["data"]
y = iris["target"]
pca = PCA(n_components=3)
# projects data into PCA-subspace
X_pca = pca.fit_transform(X)
# min-max scaling
# X_scaled = minmax_scale(X_pca)
X_scaled = X_pca
# random initial center taken from dataset
initial_centers = X_scaled[np.random.permutation(X.shape[0])[0:3]]
# K-medians clustering
kmedians_ = kmedians(X_scaled, initial_centers)
kmedians_.process()
C = np.array(kmedians_.get_medians())
# clusters centers
cluster = kmedians_.get_clusters() # clustering output
ax = plt.axes()
cor = ["red", "blue", "green"]
for i in range(3):
ax.scatter(
X_scaled[cluster[i], 0],
X_scaled[cluster[i], 1],
color=cor[i],
alpha=0.5,
zorder=2,
)
ax.scatter(C[:, 0], C[:, 1], c="k", marker="*", zorder=3)
ax.set_aspect("equal")
ax.grid(visible=True, zorder=1)
# # **Compares clustering results with targets (flower species)**
y_kmedians = np.zeros(150)
for i in range(3):
y_kmedians[cluster[i]] = i
crosstab(y, y_kmedians)
|
# # Table of Contents
# 1. [Library](#1)
# 1. [Load Data](#2)
# 1. [Data Summary](#3)
# * [Check Missing Values](#4)
# * [Remove Missing Values](#5)
# * [Check Duplicates](#6)
# * [Remove Duplicates](#7)
# * [Create Dataset](#8)
# 1. [Preprocessing Functions](#9)
# 1. [Data Preprocessing](#10)
# 1. [Popularity-Based Recommender](#11)
# 1. [Clustering and Collaborative Recommender](#12)
# 1. [Clustering and Content-Based Recommender](#13)
# 1. [SVD Recommender](#14)
# 1. [Hybrid (K-Means + SVD) Recommender](#15)
# ## Library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from surprise import SVD, Dataset, Reader
from surprise.model_selection import train_test_split
from surprise import accuracy
import warnings
warnings.filterwarnings("ignore")
import nltk
import re
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import time
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
#
# ## Load Data
rating = pd.read_csv("/kaggle/input/anime-recommendations-database/rating.csv")
anime = pd.read_csv("/kaggle/input/anime-recommendations-database/anime.csv")
#
# ## Data Summary
rating.head()
anime.head()
anime[anime.name == "pokemon"]
anime.info()
print(f"anime shape: {anime.shape}\nrating shape: {rating.shape}")
#
# #### Check Missing Values
rating.isna().sum()
anime.isna().sum()
#
# #### Remove Missing Rows
anime.dropna(axis=0, inplace=True)
anime.isna().sum()
anime.describe()
anime.episodes.value_counts()
#
# #### Check Duplicates
duplicated_anime = anime[anime.duplicated()].shape[0]
print(f"count of duplicated anime: {duplicated_anime}")
duplicated_rating = rating[rating.duplicated()].shape[0]
print(f"count of duplicated anime: {duplicated_rating}")
#
# #### Remove Duplicates
rating.drop_duplicates(keep="first", inplace=True)
duplicated_rating = rating[rating.duplicated()].shape[0]
print(f"count of duplicated anime after removing: {duplicated_rating}")
#
# ## Create Dataset
df = pd.merge(anime, rating, on="anime_id")
df.tail()
df = df.rename(columns={"rating_x": "user_rating"})
df = df.drop("rating_y", axis=1)
df.tail()
df.describe()
# df = df.head(500000).copy()
#
# ## Preprocessing Function
df = df.copy()
df["user_rating"].replace(to_replace=-1, value=np.nan, inplace=True)
df = df.dropna(axis=0)
print("Null values after final pre-processing :")
df.isna().sum()
def lower_text(text):
"""
to lowercase
"""
text = text.lower()
return text
def clean_text(text):
"""
data preprocessing
"""
# to lowercase
text = text.lower()
# remove sybmols and other words
text = re.sub(r"<[^>]*>", "", text)
text = re.sub(r"http\S+", "", text)
text = re.sub(r""", "", text)
text = re.sub(r".hack//", "", text)
text = re.sub(r"'", "", text)
text = re.sub(r"A's", "", text)
text = re.sub(r"I'", "I'", text)
text = re.sub(r"&", "and", text)
# remove punctuation
text = text.translate(str.maketrans("", "", string.punctuation))
# remove number
# text = re.sub(r'\d+', '', text)
# tokenization
# words = word_tokenize(text)
# remove stopwords
# stop_words = set(stopwords.words('english'))
# words = [word for word in words if word not in stop_words]
# stemming
# stemmer = PorterStemmer()
# words = [stemmer.stem(word) for word in words]
# join words
# text = ' '.join(words)
return text
#
# ## Data Preprocessing
start_time = time.time()
df["name"] = df["name"].apply(clean_text)
anime["name"] = anime["name"].apply(clean_text)
end_time = time.time()
elapsed_time = end_time - start_time
print("process time: ", elapsed_time, " sec.")
#
# ## Popularity-Based Recommender
def popularity_recommender(df, selected_features):
"""
recommender system with popularity-based
"""
# grouping & calculating mean value
grouped_df = (
df.groupby(selected_features).agg({"user_rating": "mean"}).reset_index()
)
# sorting to rating
sorted_df = grouped_df.sort_values("user_rating", ascending=False)
# give the recommedations
recommendations = sorted_df.head(10)
return recommendations
df.columns
# according to anime names
selected_features = ["name"]
popularity_recommender(df, selected_features)
# according to members
selected_features = ["members"]
popularity_recommender(df, selected_features)
# create first genre
df["first_genre"] = df["genre"].apply(
lambda x: x.split(",")[0].strip() if "," in x else x
)
# according to genre
selected_features = ["first_genre"]
popularity_recommender(df, selected_features)
# according to type
selected_features = ["type"]
popularity_recommender(df, selected_features)
#
# ## Clustering and Collaborative Recommender
# encoding
le = LabelEncoder()
df["t_genre"] = le.fit_transform(df["genre"])
df["t_type"] = le.fit_transform(df["type"])
selected_features = ["anime_id", "t_genre", "t_type", "user_rating"]
# k-means model
n_clusters = 6
kmeans = KMeans(n_clusters=n_clusters, random_state=42)
df["cluster"] = kmeans.fit_predict(df[selected_features])
from collections import Counter
labels = kmeans.labels_
# count of cluster items
cluster_counts = Counter(labels)
for cluster_id, count in cluster_counts.items():
print(f" {cluster_id}. küme: {count} eleman")
len(df)
import random
# create random user id
const_member_index = random.randint(1, len(df))
const_cluster_no = df.cluster[const_member_index]
const_cluster_no
user_no = df.user_id[const_member_index]
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from math import sqrt
start_time = time.time()
# get clusters and set pivot table
df_pivot = (
df[df.cluster == const_cluster_no]
.pivot_table(index="name", columns="user_id", values="user_rating")
.fillna(0)
)
# collaborative filtering method (KNN)
df_matrix = csr_matrix(df_pivot.values)
model_knn = NearestNeighbors(metric="cosine", algorithm="brute")
model_knn.fit(df_matrix)
# random anime title and finding recommendation
query_no = np.random.choice(df_pivot.shape[0])
print(
f"We will find recommendation for {query_no} no anime which is {df_pivot.index[query_no]}."
)
anime_const = df_pivot.index[query_no]
distances, indices = model_knn.kneighbors(
df_pivot.iloc[query_no, :].values.reshape(1, -1), n_neighbors=10
)
no = []
name = []
distance = []
rating = []
genre = []
# create recommadation
for i in range(0, len(distances.flatten())):
if i == 0:
print(f"Recommendations for '{df_pivot.index[query_no]}' viewers :\n")
else:
no.append(i)
name.append(df_pivot.index[indices.flatten()[i]])
distance.append(distances.flatten()[i])
rating.append(
*anime[anime["name"] == df_pivot.index[indices.flatten()[i]]][
"rating"
].values
)
genre.append(
*anime[anime["name"] == df_pivot.index[indices.flatten()[i]]][
"genre"
].values
)
# print(f'distance: {distance}')
dic = {
"No": no,
"Anime Name": name,
"Rating": rating,
"Genre": genre,
"Similarity": distance[::-1],
}
recommendation = pd.DataFrame(data=dic)
recommendation.set_index("No", inplace=True)
end_time = time.time()
elapsed_time = end_time - start_time
print("process time: ", elapsed_time, " sec.")
recommendation.head(10)
#
# ## Clustering and Content-Based Recommender
# create vectorizer
tfv = TfidfVectorizer(analyzer="word")
# get clusters
rec_data = df[df.cluster == const_cluster_no].copy()
rec_data.drop_duplicates(subset="name", keep="first", inplace=True)
rec_data.reset_index(drop=True, inplace=True)
# evaluate to genre
genres = rec_data["name"].str.split(", | , | ,").astype(str)
# create tf-idf matrix
tfv_matrix = tfv.fit_transform(genres)
# calculate similarity matrix
cos_sim = cosine_similarity(tfv_matrix, tfv_matrix)
# drop duplicates
rec_indices = pd.Series(rec_data.index, index=rec_data["name"]).drop_duplicates()
# recommendation function
def give_recommendation(title, cos_sim=cos_sim):
idx = rec_indices[title]
cos_scores = list(enumerate(cos_sim[idx]))
cos_scores = sorted(cos_scores, key=lambda x: x[1], reverse=True)
cos_scores = cos_scores[1:11]
anime_indices = [i[0] for i in cos_scores]
# visualization
sim_scores = [i[1] for i in cos_scores]
rec_dic = {
"No": range(1, 11),
"Anime Name": anime["name"].iloc[anime_indices].values,
"Rating": anime["rating"].iloc[anime_indices].values,
"Genre": anime["genre"].iloc[anime_indices].values,
"Similarity Score": sim_scores,
}
dataframe = pd.DataFrame(data=rec_dic)
dataframe.set_index("No", inplace=True)
print(f"Recommendations for '{title}' viewers :\n")
return dataframe
start_time = time.time()
clustering_and_content = give_recommendation(anime_const)
end_time = time.time()
elapsed_time = end_time - start_time
print("process time: ", elapsed_time, " sec.")
clustering_and_content
#
# ## SVD Recommender
user_no = df.user_id[const_member_index]
user_no
df.columns
from surprise import SVD
from surprise import Dataset, Reader
from surprise.model_selection import train_test_split
start_time = time.time()
# create a reader
reader = Reader(rating_scale=(1, 10))
# get clusters
df_svd = df.copy()
# create data
data = Dataset.load_from_df(df_svd[["user_id", "name", "user_rating"]], reader)
# split data
train_set, test_set = train_test_split(data, test_size=0.25)
# train SVD model
model = SVD()
model.fit(train_set)
# Test seti üzerinde tahminler yapma
predictions = model.test(test_set)
end_time = time.time()
elapsed_time = end_time - start_time
print("process time: ", elapsed_time, " sec.")
# performance metrics
accuracy.rmse(predictions)
# give the recommendations
def get_top_n(user_id, n=10):
user_animes = df[df["user_id"] == user_id]["name"]
user_unrated_animes = df[~df["name"].isin(user_animes)]["name"]
user_unrated_animes = list(set(user_unrated_animes))
predictions = []
for anime_id in user_unrated_animes:
predictions.append((anime_id, model.predict(user_id, anime_id).est))
predictions.sort(key=lambda x: x[1], reverse=True)
top_n = predictions[:n]
top_n = [i[0] for i in top_n]
return top_n
# create recommendations
recommended_animes = get_top_n(user_no)
recommended_animes
# from list to dataframe
genre_lists, rating_lists = [], []
seen_names = set()
for name in recommended_animes:
if name not in seen_names:
matched_rows = df[df["name"] == name]
genres = list(matched_rows["genre"])
ratings = list(matched_rows["user_rating"])
genre_lists.append(genres)
rating_lists.append(ratings)
seen_names.add(name)
type_list = []
# Sonuçları yazdırma
for i, genres in enumerate(genre_lists):
type_list.append(genres[0])
rating_list = []
for i, ratings in enumerate(rating_lists):
rating_list.append(ratings[0])
recom_data = {
"Anime Name": recommended_animes,
"Rating": rating_list,
"Genre": type_list,
}
df_rec = pd.DataFrame(recom_data)
df_rec.head(10)
#
# ## Hybrid (K-Means + SVD) Recommender
from surprise import SVD
from surprise import Dataset, Reader
from surprise.model_selection import train_test_split
start_time = time.time()
# create a reader
reader = Reader(rating_scale=(1, 10))
# get clusters
df_hybrid = df[df.cluster == const_cluster_no].copy()
# create data
data = Dataset.load_from_df(df_hybrid[["user_id", "name", "user_rating"]], reader)
# split data
trainset, testset = train_test_split(
data,
test_size=0.25,
)
# train SVD model
model = SVD(n_factors=100, n_epochs=20, lr_all=0.005, reg_all=0.02)
model.fit(trainset)
# create predictions
predictions = model.test(testset)
end_time = time.time()
elapsed_time = end_time - start_time
print("process time: ", elapsed_time, " sec.")
# performance metrics
accuracy.rmse(predictions)
# create recommendations
recommended_animes = get_top_n(user_no)
recommended_animes
# from list to dataframe
genre_lists, rating_lists = [], []
seen_names = set()
for name in recommended_animes:
if name not in seen_names:
matched_rows = df[df["name"] == name]
genres = list(matched_rows["genre"])
ratings = list(matched_rows["user_rating"])
genre_lists.append(genres)
rating_lists.append(ratings)
seen_names.add(name)
type_list = []
# Sonuçları yazdırma
for i, genres in enumerate(genre_lists):
type_list.append(genres[0])
rating_list = []
for i, ratings in enumerate(rating_lists):
rating_list.append(ratings[0])
recom_data = {
"Anime Name": recommended_animes,
"Rating": rating_list,
"Genre": type_list,
}
df_rec = pd.DataFrame(recom_data)
df_rec.head(10)
|
# PROBLEM:
# The task at hand is to build a Hindi to English machine translation system from scratch using only the given data. We are allowed to use non-contextualised word embeddings.
# I have chosen FastText embeddings since Hindi is a morphologically rich language. However, the default FastText vectors appeared deficient on inspection so I downloaded FastText embeddings trained by independent researchers on a much larger dataset.
# Unlike
# ARCHITECTURE
# 1. Hindi embedding layer: Converts a given text string into a tensor of stacked embedding vectors. I plan for this to be non-trainable to enable faster training.
# 2.
# The ENglish embeddings were initialised using their GloVe word vectors.
# Approach 1:
# Build Hindi vocabulary and initialise Hindi embedding layer with those words, train it further
# Approach 2:
# Use Fasttext directly
# REFERENCES:
# 1. https://www.youtube.com/watch?v=wzfWHP6SXxY
# 2.
from google.colab import drive
drive.mount("/content/gdrive")
import gensim
import os
import spacy
import re
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from tqdm.auto import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader, Dataset
from torch.nn.utils.rnn import pad_sequence
from torch.nn import Transformer
from torch import Tensor
import math
import random
from torch import optim
import torchtext.vocab as vocab
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
df = pd.read_csv(
"/content/gdrive/My Drive/input_data/eng_Hindi_data_train.csv", header=None
)
# Exploratory analysis of train data
df.head()
from collections import Counter
def process_chunk(chunk):
all_text = " ".join(chunk.iloc[:, 1])
words = all_text.split()
word_counts = Counter(words)
return word_counts
file_path = "/content/gdrive/My Drive/input_data/eng_Hindi_data_train.csv"
chunk_size = 10000
all_word_counts = Counter()
for chunk in pd.read_csv(file_path, chunksize=chunk_size):
chunk_word_counts = process_chunk(chunk)
all_word_counts.update(chunk_word_counts)
most_common_words = all_word_counts.most_common(10)
print(most_common_words)
len(all_word_counts)
def process_chunk(chunk):
all_text = " ".join(chunk.iloc[:, 0])
words = all_text.split()
word_counts = Counter(words)
return word_counts
file_path = "/content/gdrive/My Drive/input_data/eng_Hindi_data_train.csv"
chunk_size = 10000
all_word_counts = Counter()
for chunk in pd.read_csv(file_path, chunksize=chunk_size):
chunk_word_counts = process_chunk(chunk)
all_word_counts.update(chunk_word_counts)
most_common_words = all_word_counts.most_common(10)
print(most_common_words)
lengths = [len(sentence.split()) for sentence in df.iloc[:, 0]]
plt.hist(lengths, bins=20, range=(0, 140))
plt.grid()
# LOADING HINDI EMBEDDINGS
# Fasttext
# Non contextual embeddings from:
# https://www.cse.iitb.ac.in/~pb/papers/sltu-ccurl20-il-we.pdf
# TO RUN FOR INFERENCE
from gensim.models import FastText
model_path = "/kaggle/input/d/arqamp/fasttext/fasttext/hi-d50-m2-fasttext.model"
embed = FastText.load(model_path)
torch.tensor(embed.wv.get_vector("कहाँ")).to(device)
# PREPROCESSING FOR ENGLISH:
# 1. Creating vocabulary
# 2. Function for converting sentences to lists of vocabulary indices
# 3. Creating embedding matrix corresponding to vocabulary
max_len = 64
from spacy.lang.en import English
nlp = English()
tokenizer = nlp.tokenizer
en_list = df.iloc[:, 0].tolist()
len(en_list)
# TO RUN FOR INFERENCE AND TRAINING
class Vocabulary:
def __init__(self, sentences):
self.no_tokens = 3
# Dictionaries
self.index_to_token = {
0: "<s>",
1: "</s>",
2: "<pad>",
} # Key: index, Value: token
self.token_to_index = {} # Key: token, Value: index
self.frequency = {} # Key: index, Value: frequency
for sentence in sentences:
self.process_sentence(sentence)
# Method to add a single token to vocabulary
def add_token(self, token):
if token not in self.token_to_index:
self.token_to_index[token] = self.no_tokens
self.frequency[token] = 1
self.index_to_token[self.no_tokens] = token
self.no_tokens += 1
else:
self.frequency[token] += 1
# Method for processing sentences
def process_sentence(self, sentence):
for token in tokenizer(sentence.lower()):
self.add_token(token.text)
sentences = ["This is a sentence.", "This is another sentence."]
test_vocab = Vocabulary(sentences)
# Print vocabulary stats
print("Vocabulary:", test_vocab.index_to_token)
print("Vocabulary size:", test_vocab.no_tokens)
print("Token frequencies:", test_vocab.frequency)
en_vocab = Vocabulary(en_list)
print("Vocabulary size:", en_vocab.no_tokens)
torch.save(en_vocab, "/content/gdrive/My Drive/vocab.pth")
# TO RUN FOR INFERENCE & TRAINING
en_vocab = torch.load("/kaggle/input/en-vocab/vocab.pth")
# Creating embedding matrix corresponding to vocabulary
# Define the vocabulary size and the embedding size
vocab_size = en_vocab.no_tokens
embedding_size = 50
# Load GloVe embeddings
glove = vocab.GloVe(name="6B", dim=embedding_size)
# Create an embedding matrix with the pre-trained embeddings
embedding_matrix = torch.zeros((vocab_size, embedding_size))
for i in range(vocab_size):
if glove.stoi.get(en_vocab.index_to_token[i]) is not None:
embedding_matrix[i] = glove.vectors[glove.stoi[en_vocab.index_to_token[i]]]
## URGENT: Maybe randomly initialise start and stop token representations (learn it?)
embedding_matrix.shape
torch.save(embedding_matrix, "/content/gdrive/My Drive/emb_mtx.pth")
# TO RUN FOR TRAINING & INFERENCE
embedding_matrix = torch.load("/kaggle/input/emb-mtx/emb_mtx.pth")
embedding_matrix.to(device)
# check whether emb mtx is in in correct order
glove.vectors[glove.stoi["how"]]
embedding_matrix[en_vocab.token_to_index["how"]]
# Sentence to indexlists conversion for dataloader
def sentence_to_indices(sentence):
tok = tokenizer(sentence.lower())
if len(tok) >= max_len - 2:
tok = tok[1 : max_len - 1]
output = [0]
output.extend([en_vocab.token_to_index[i.text] for i in tok])
output.append(1)
return output
sentence_to_indices("how are you")
indices_tensors_list = [torch.tensor(sentence_to_indices(s)) for s in en_list]
padded_tensors = pad_sequence(indices_tensors_list, batch_first=True, padding_value=2)
padded_tensors.shape
# TO RUN FOR INFERENCE
def indices_to_sentence(indices):
output = " ".join([en_vocab.index_to_token[i] for i in indices])
return output
#
# PREPROCESS HINDI DATA
# TO RUN FOR INFERENCE
# Simple rule based tokeniser for Hindi
def hindi_tokenize(text):
patterns = [r",|\-", r"\s+", r"[^\w\s]", r"\d+", r"[\u0900-\u097F]+"]
token_regex = "|".join(patterns)
return [token for token in re.findall(token_regex, text) if not token.isspace()]
# Example usage
text = "चलो देखें इसमें कितना डैम है, है की नहीं - अब तो पता चल ही जाएगा "
hindi_tokenize(text)
# TO RUN FOR INFERENCE
# positional embedding
def positional_embedding(pos, d_model=50):
# pos: the position of the word in the sentence
# d_model: the dimension of the FastText embeddings
pe = torch.zeros(d_model)
for i in range(0, d_model, 2):
pe[i] = np.sin(pos / (10000 ** ((2 * i) / d_model)))
pe[i + 1] = np.cos(pos / (10000 ** ((2 * (i + 1)) / d_model)))
return pe
# TO RUN FOR INFERENCE
# no padding, for normal input
def embed_hindi_ft(sentence):
tok = hindi_tokenize(sentence) # tokenization
tok1 = ["<s>"] # SOS
tok1.extend(tok)
tok1.append("</s>") # EOS
output = []
for i in range(len(tok1)):
pos_word_embedding = torch.tensor(
embed.wv.get_vector(tok1[i])
) + positional_embedding(i)
output.append(pos_word_embedding)
return torch.stack(output).to(device)
# with padding for training data
def embed_hindi_ft_pad(sentence):
tok = hindi_tokenize(sentence) # tokenization
if len(tok) >= max_len - 2:
tok = tok[1 : max_len - 1]
tok1 = ["<s>"] # SOS
tok1.extend(tok)
tok1.append("</s>") # EOS
for i in range(max_len - len(tok1)):
tok1.append("<pad>")
output = []
for i in range(len(tok1)):
pos_word_embedding = torch.tensor(
embed.wv.get_vector(tok1[i])
) + positional_embedding(i)
output.append(pos_word_embedding)
return torch.stack(output)
embed_hindi_ft_pad("देखते हैं कितना सही है,").shape
hi_list = df.iloc[:, 1].tolist()
embed_list = []
for i in tqdm(hi_list):
embed_list.append(embed_hindi_ft_pad(i))
len(embed_list)
embed_list[0].shape
# torch.save(embed_list, "/content/gdrive/My Drive/embed_list")
embed_tensor = torch.stack(embed_list)
embed_tensor.shape
dataset = TensorDataset(embed_tensor, padded_tensors)
# create DataLoader from dataset
batch_size = 32 # set batch size
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
# torch.save(dataloader, "/content/gdrive/My Drive/NMT_train_loader")
# TO RUN FOR TRAINING
dataloader = torch.load("/kaggle/input/nmt-train-loader/NMT_train_loader")
for i, batch in enumerate(dataloader):
# Move tensors to device
batch[0].to(device)
batch[1].to(device)
# MODEL
class Encoder(nn.Module):
def __init__(self, hidden_size=512, num_layers=4, embedding_dim=50):
super().__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = nn.LSTM(embedding_dim, hidden_size, num_layers, batch_first=True)
def forward(self, input, batched=True):
if not batched:
embedded = embed_hindi_ft(input).unsqueeze(0).to(device)
else:
embedded = input.to(device)
if batched:
# h0 = self.hidden
h0 = torch.zeros(self.num_layers, 32, self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, 32, self.hidden_size).to(device)
else:
# h0 = torch.zeros(self.num_layers, self.hidden_size).unsqueeze(1).to(device)
h0 = torch.zeros(self.num_layers, 1, self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, 1, self.hidden_size).to(device)
output, (hn, cn) = self.rnn(embedded.to(device), (h0.to(device), c0.to(device)))
return hn, cn
# if not batched:
# hn = hn.squeeze(1)
# cn = cn.squeeze(1)
# output, hidden = self.rnn(embedded, hidden)
# output.shape = (1, batch_size, hidden_size) , (seq_len, batch_size, hidden_size)
# hidden.shape = (num_layers, batch_size, hidden_size)
# final_hidden = hn[-1, :]
# def init_hidden(self, batch_size =32):
# hidden = torch.zeros(self.num_layers, batch_size, self.hidden_size) # (num_layers, batch_size, hidden_dim)
# return hidden.to(device)
enc = Encoder().to(device)
enc("क्या हुआ जो लाड़ी छूटी", batched=False)[0].shape
class Decoder(nn.Module):
def __init__(
self,
hidden_size=512,
num_layers=4,
output_size=en_vocab.no_tokens,
tgt_embedding_matrix=embedding_matrix,
):
super().__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.num_layers = num_layers
self.embedding = nn.Embedding.from_pretrained(tgt_embedding_matrix)
self.rnn = nn.LSTM(50, hidden_size, num_layers)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, x, hn, cn, batched=False):
if batched:
x = x.unsqueeze(0)
output = self.embedding(x).to(device) # (1, batch_size, hidden_size)
output, (hn, cn) = self.rnn(
output.to(device), (hn.to(device), cn.to(device))
)
predictions = self.out(output).to(device)
predictions.squeeze(0)
# output : (1, batch_size, hidden_size)
# output = self.out(output[0]) # (batch_size, output_size)
return predictions, hn, cn
else:
x = x.unsqueeze(0)
output = self.embedding(x).unsqueeze(1).to(device)
output, (hn, cn) = self.rnn(output, (hn.to(device), cn.to(device)))
predictions = self.out(output)
return predictions, hn, cn
# def init_hidden(self):
# h0 = torch.zeros(self.num_layers, 32, self.hidden_size)
# c0 = torch.zeros(self.num_layers, 32, self.hidden_size)
# return (h0, c0)
class NMTModel(nn.Module):
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, input, tgt_vocab=en_vocab, tf_ratio=0.5, batched=True):
batch_size = 32
tgt_vocab_size = tgt_vocab.no_tokens
if batched:
tgt_seq = input[1]
# tgt_len = -1
# for i in tgt_seq:
# tgt_len = tgt_len +1
outputs = torch.zeros(64).to(device)
hn, cn = self.encoder(input[0], batched=batched)
x = tgt_seq[0].to(device)
for t in range(1, 64):
output, hn, cn = self.decoder(x, hn, cn, batched=batched)
outputs[t] = x
guess = output.argmax(1)
x = tgt_seq[t] if random.random() < tf_ratio else guess.to(device)
return outputs
else:
outputs = [0]
x = torch.tensor(0).to(device)
hn, cn = self.encoder(input, batched=batched)
# hn = hn.squeeze(1)
# cn = cn.squeeze(1)
for t in range(1, 64):
output, hn, cn = self.decoder(x, hn, cn, batched=batched)
# hn = hn.squeeze(1)
# cn = cn.squeeze(1)
guess = torch.argmax(output)
x = guess.to(device)
outputs.append(x.item())
if guess.item() == 1:
break
return indices_to_sentence(outputs)
num_epochs = 20
lr = 0.001
enc = Encoder().to(device)
dec = Decoder().to(device)
model = NMTModel(enc, dec).to(device)
model("क्या हुआ जहां लादी छुट्टी", batched=False)
def train(model, dataloader, optimizer, criterion, epochs):
for epoch in range(epochs):
epoch_loss = 0.0
model.train()
for i, batch in enumerate(tqdm(dataloader)):
src = batch
tgt = batch[1]
optimizer.zero_grad()
try:
output = model(src)
except:
continue
output = output[1:].reshape(-1, output.shape[2])
tgt = tgt[1:].reshape(-1)
loss = criterion(output, tgt)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
if (i + 1) % 100 == 0:
print(
"Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}".format(
epoch + 1, epochs, i + 1, len(dataloader), loss.item()
)
)
print(
"Epoch [{}/{}], Loss: {:.4f}".format(
epoch + 1, epochs, epoch_loss / len(dataloader)
)
)
epochs = 10
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
train(model, dataloader, optimizer, criterion, epochs)
j = 0
for batch in dataloader:
print(model(batch))
j = j + 1
# print(batch[1])
if j == 1:
break
|
import os
import cv2
import keras
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from keras.optimizers import Adam
from keras.models import Sequential
from sklearn.metrics import classification_report, confusion_matrix
from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout
categories = os.listdir("../input/food20dataset/food20dataset/test_set")
img_size = 256
def get_data(data_dir):
data = []
for category in categories:
path = os.path.join(data_dir, category)
class_num = categories.index(category)
for img in os.listdir(path):
try:
img_arr = cv2.imread(os.path.join(path, img))
resized_arr = cv2.resize(img_arr, (img_size, img_size))
data.append([resized_arr, class_num])
except Exception as e:
print(e)
return np.array(data)
train = get_data("../input/food20dataset/food20dataset/train_set")
val = get_data("../input/food20dataset/food20dataset/test_set")
x_train = []
y_train = []
x_val = []
y_val = []
for feature, label in train:
x_train.append(feature)
y_train.append(label)
for feature, label in val:
x_val.append(feature)
y_val.append(label)
x_train = np.array(x_train) / 255
x_val = np.array(x_val) / 255
x_train.reshape(-1, img_size, img_size, 1)
y_train = np.array(y_train)
x_val.reshape(-1, img_size, img_size, 1)
y_val = np.array(y_val)
model = Sequential()
model.add(Conv2D(128, 3, padding="same", activation="relu", input_shape=(256, 256, 3)))
model.add(MaxPool2D())
model.add(Conv2D(32, 3, padding="same", activation="relu"))
model.add(MaxPool2D())
model.add(Conv2D(64, 3, padding="same", activation="relu"))
model.add(MaxPool2D())
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dense(20, activation="softmax"))
model.summary()
opt = Adam(lr=0.0001)
model.compile(
optimizer=opt,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
history = model.fit(x_train, y_train, epochs=10, validation_data=(x_val, y_val))
epochs_range = range(10)
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
plt.rc("xtick", labelsize=10)
plt.rc("ytick", labelsize=10)
ax1.plot(epochs_range, acc, label="Training Accuracy", c="blue", linewidth=4)
ax1.plot(epochs_range, val_acc, label="vaalidation_accuracy", c="red", linewidth=4)
ax1.legend()
ax1.set_title("Training and Accuracy")
ax1.set_xlabel("Accuracy", fontsize=10)
ax1.set_ylabel("Val_accuracy", fontsize=10)
ax2.plot(epochs_range, loss, label="Training Loss", c="green", linewidth=4)
ax2.plot(epochs_range, val_loss, label="vaalidation_loss", c="orange", linewidth=4)
ax2.legend()
ax2.set_title("Training and Validation")
ax2.set_xlabel("Loss", fontsize=10)
ax2.set_ylabel("Val_loss", fontsize=10)
fig.tight_layout(pad=3.0)
plt.show()
predictions = model.predict_classes(x_val)
predictions = predictions.reshape(1, -1)[0]
print(classification_report(y_val, predictions, target_names=categories))
cm1 = confusion_matrix(y_val, predictions)
df_cm = pd.DataFrame(
cm1, index=[i for i in categories], columns=[i for i in categories]
)
plt.figure(figsize=(12, 4))
import seaborn as ns
ns.heatmap(df_cm, annot=True, cmap="RdPu")
# # ***Using Transfer Learning for improvement***
base_model = tf.keras.applications.MobileNetV2(
input_shape=(256, 256, 3), include_top=False, weights="imagenet"
)
base_model.trainable = False
model = tf.keras.Sequential(
[
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(20, activation="softmax"),
]
)
model.summary()
base_learning_rate = 0.1
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
history1 = model.fit(x_train, y_train, epochs=4, validation_data=(x_val, y_val))
epochs_range = range(4)
acc = history1.history["accuracy"]
val_acc = history1.history["val_accuracy"]
loss = history1.history["loss"]
val_loss = history1.history["val_loss"]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
plt.rc("xtick", labelsize=10)
plt.rc("ytick", labelsize=10)
ax1.plot(epochs_range, acc, label="Training Accuracy", c="blue", linewidth=4)
ax1.plot(epochs_range, val_acc, label="validation_accuracy", c="red", linewidth=4)
ax1.legend()
ax1.set_title("Training and Accuracy")
ax1.set_xlabel("Accuracy", fontsize=10)
ax1.set_ylabel("Val_accuracy", fontsize=10)
ax2.plot(epochs_range, loss, label="Training Loss", c="green", linewidth=4)
ax2.plot(epochs_range, val_loss, label="vaalidation_loss", c="orange", linewidth=4)
ax2.legend()
ax2.set_title("Training and Validation")
ax2.set_xlabel("Loss", fontsize=10)
ax2.set_ylabel("Val_loss", fontsize=10)
fig.tight_layout(pad=3.0)
plt.show()
prediction = model.predict_classes(x_val)
prediction = prediction.reshape(1, -1)[0]
print(classification_report(y_val, prediction, target_names=categories))
cm1 = confusion_matrix(y_val, prediction)
df_cm = pd.DataFrame(
cm1, index=[i for i in categories], columns=[i for i in categories]
)
plt.figure(figsize=(12, 4))
import seaborn as ns
ns.heatmap(df_cm, annot=True, cmap="RdPu")
|
# ## Imports
import torch
from torch import nn
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
torch.__version__
# ## Uploading .CSV File and Defining and Analysing Data
# upload csv on to sample_data or to the folder using the below commands:
# `from google.colab import files`->
# `uploaded = files.upload()`
device = "cuda" if torch.cuda.is_available() else "cpu"
df = pd.read_csv("/kaggle/input/weatherhistory/weatherHistory.csv")
df.head(5)
# describe the data
df.describe()
df.info()
# ## Define the Dependent and Independant variables for Linear Regression
# the dependant variable relies on the independant varible. We must suspect the independent variable to have an impact on the dependent variable.
# your formula is y=aX+b
# here the y is the dependent variable as it relies on the X which is the Independent Analysis. You can read more about Regression Analysis.
#
X = df["Temperature (C)"].values # Independent
y = df["Apparent Temperature (C)"].values # dependent
X = torch.from_numpy(X).unsqueeze(1)
y = torch.from_numpy(y).unsqueeze(1)
X, y = torch.tensor(X, dtype=torch.float32), torch.tensor(
y, dtype=torch.float32
) # on debugging,I found out for some reason it works on folat32 but doesnt on float64
X, y
# ## Create A Training and Test Split
train_split = int(0.8 * len(X)) # 80%-20% split
X_train = X[:train_split]
y_train = y[:train_split]
X_test = X[train_split:]
y_test = y[train_split:]
len(X_train), len(y_train), len(X_test), len(y_test)
# ##Plotting the Training and Test Splits
def plot_predictions(
train_data=X_train,
train_label=y_train,
test_data=X_test,
test_label=y_test,
predictions=None,
):
plt.figure(figsize=(10, 10))
plt.scatter(train_data, train_label, c="b", s=3, label="Training Data")
plt.scatter(test_data, test_label, c="r", s=3, label="Testing Data")
if predictions is not None:
plt.scatter(test_data, predictions, c="g", s=3, label="Predictions")
plt.legend(prop={"size": 14})
plt.xlabel("Temperature (C)")
plt.ylabel("Apparent Temperature (C)")
plot_predictions()
# ## Linear Regression Model:
# we can use SGD or Adam as optimizer
from os import device_encoding
class LinearRegression(nn.Module):
def __init__(self):
super().__init__()
self.linear_layer = nn.Linear(in_features=1, out_features=1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.linear_layer(x)
torch.manual_seed(42)
model = LinearRegression()
model.state_dict()
loss_fn = nn.L1Loss()
optimizer = torch.optim.Adam(params=model.parameters(), lr=0.01)
# ## Training Loop
epochs = 300
epoch_count = []
loss_values = []
test_loss_values = []
for epoch in range(epochs):
model.train()
y_pred = model(X_train)
loss = loss_fn(y_pred, y_train)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Testing
model.eval()
with torch.inference_mode():
test_pred = model(X_test)
test_loss = loss_fn(test_pred, y_test)
if epoch % 10 == 0:
epoch_count.append(epoch)
loss_values.append(loss)
test_loss_values.append(test_loss)
print(f"Epoch: {epoch} | Loss: {loss}| Test Loss: {test_loss}")
# As we can see above the loss and Test loss is decreasing and heading towards zero that its working now lets move on further and visualize it.
with torch.inference_mode():
y_pred_new = model(X_test)
plot_predictions(predictions=y_pred_new)
plt.plot(epoch_count, np.array(torch.tensor(loss_values).numpy()), label="Train Loss")
plt.plot(
epoch_count, np.array(torch.tensor(test_loss_values).numpy()), label="Test Loss"
)
plt.title("Training and Test loss Curves")
plt.ylabel("Loss")
plt.xlabel("Epochs")
plt.legend(prop={"size": 14})
# ## Saving and Loading
# Saving:
from pathlib import Path
# 1. Create models directory:
Model_Path = Path("models") # directory name
Model_Path.mkdir(parents=True, exist_ok=True)
# 2. Create model save path
Model_Name = "LR_on_Weather_data_0.pth"
Model_Save_Path = Model_Path / Model_Name
# 3. Save model state dict
print(f"saving....to: {Model_Save_Path} ")
torch.save(obj=model.state_dict(), f=Model_Save_Path)
# Load the saved state_dict we have to instantiate a new instance of our model class
loaded_model_0 = LinearRegression()
loaded_model_0.load_state_dict(torch.load(Model_Save_Path))
loaded_model_0.eval()
with torch.inference_mode():
loaded_model_0_preds = loaded_model_0(X_test)
loaded_model_0_preds
model.eval()
with torch.inference_mode():
y_preds = model(X_test)
y_preds == loaded_model_0_preds
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
BASE_PATH = "/kaggle/input/house-prices-advanced-regression-techniques/"
TRAIN_PATH = os.path.join(BASE_PATH, "train.csv")
TEST_PATH = os.path.join(BASE_PATH, "test.csv")
train_df = pd.read_csv(TRAIN_PATH)
train_df = train_df.set_index("Id")
category_cols = [
"MSSubClass",
"MSZoning",
"Street",
"Alley",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"OverallQual",
"OverallCond",
"YearBuilt",
"YearRemodAdd",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"BsmtFullBath",
"BsmtHalfBath",
"FullBath",
"HalfBath",
"BedroomAbvGr",
"KitchenAbvGr",
"KitchenQual",
"TotRmsAbvGrd",
"Functional",
"Fireplaces",
"FireplaceQu",
"GarageType",
"GarageYrBlt",
"GarageFinish",
"GarageCars",
"GarageQual",
"GarageCond",
"PavedDrive",
"PoolQC",
"Fence",
"MiscFeature",
"SaleType",
"SaleCondition",
]
numeric_cols = [
"LotFrontage",
"LotArea",
"MasVnrArea",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"1stFlrSF",
"2ndFlrSF",
"LowQualFinSF",
"GrLivArea",
"GarageArea",
"WoodDeckSF",
"OpenPorchSF",
"EnclosedPorch",
"3SsnPorch",
"ScreenPorch",
"PoolArea",
"MiscVal",
]
target_col = "SalePrice"
train_df.info()
import math
def transform_categorical(df):
for c in category_cols:
df[c] = pd.Categorical(df[c])
return df
def transform_date(df):
df["DateSold"] = df["YrSold"].astype(str) + "-" + df["MoSold"].astype(str)
df["DateSold"] = pd.to_datetime(df["DateSold"])
remove_cols = ["YrSold", "MoSold"]
df = df.drop(remove_cols, axis=1)
return df
def manage_nan(df):
nan_cols = [
"LotFrontage",
"MasVnrArea",
"Alley",
"MasVnrType",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Electrical",
"FireplaceQu",
"GarageType",
"GarageYrBlt",
"GarageFinish",
"GarageQual",
"GarageCond",
"PoolQC",
"Fence",
"MiscFeature",
]
for n in nan_cols:
if n in category_cols:
df.loc[df[n].isna(), n] = "N/A"
else:
df.loc[df[n].isna(), n] = 0
return df
def group_years(df):
cols = ["YearBuilt", "YearRemodAdd", "GarageYrBlt"]
for c in cols:
start = int(min(df[c]) / 10) * 10
end = math.ceil(max(df[c]) / 10) * 10
bins = [start + 10 * i for i in range((end - start) // 10 + 1)]
df[c] = pd.cut(df[c], bins).values.add_categories("N/A")
return df
def data_cleaning(df):
df = group_years(df)
df = manage_nan(df)
df = transform_categorical(df)
df = transform_date(df)
df = hot_encoding(df)
return df
def hot_encoding(df):
from sklearn.preprocessing import OneHotEncoder
group_cols = ["YearBuilt", "YearRemodAdd", "GarageYrBlt"]
for c in group_cols:
df[c] = df[c].astype(str)
ohe = OneHotEncoder(drop="first")
data = ohe.fit_transform(df[category_cols])
final_df = pd.DataFrame(
data.toarray(), index=df.index, columns=ohe.get_feature_names_out()
)
merge_df = pd.merge(df, final_df, left_index=True, right_index=True, how="inner")
merge_df = merge_df.drop(category_cols, axis=1)
return merge_df
df = train_df.copy()
df = data_cleaning(df)
df.info()
import matplotlib.pyplot as plt
from pandas.api.types import is_numeric_dtype, is_categorical_dtype
def plot_column(series):
print("count", len(series), "nan", series.isna().sum())
fig = plt.figure(figsize=(17, 6))
if is_numeric_dtype(series):
hist = fig.add_subplot(131)
hist = series.plot(kind="hist", grid=False)
d = series.describe()
hist.axvline(x=d["mean"], color="r", linestyle="--", lw=2)
hist.axvline(x=d["50%"], color="g", linestyle="--", lw=2)
hist.axvline(x=d["mean"] - d["std"], color="b", linestyle="--", lw=1)
hist.axvline(x=d["mean"] + d["std"], color="b", linestyle="--", lw=1)
hist.axvline(x=d["mean"] - 2 * d["std"], color="b", linestyle="--", lw=2)
hist.axvline(x=d["mean"] + 2 * d["std"], color="b", linestyle="--", lw=2)
box = fig.add_subplot(132)
box = series.plot(kind="box", grid=False)
scatter = fig.add_subplot(133)
def get_correlation(series1, series2, ax):
from sklearn.linear_model import LinearRegression
from matplotlib.offsetbox import AnchoredText
corr = series1.corr(series2)
linear = LinearRegression()
linear.fit(series1.values.reshape(-1, 1), series2.values.reshape(-1, 1))
ax.scatter(series1, series2)
ax.plot(series1, linear.predict(series1.values.reshape(-1, 1)), color="red")
ax.axvline(x=0, ymin=-1, ymax=1, linestyle="dashed", color="gray")
ax.axhline(y=0, xmin=-1, xmax=1, linestyle="dashed", color="gray")
at = AnchoredText(
f"{corr:.0%}",
prop=dict(size="large"),
frameon=True,
loc="lower right",
)
at.patch.set_boxstyle("square, pad=0.0")
ax.add_artist(at)
get_correlation(series, train_df[target_col], scatter)
scatter = plt.scatter(series, train_df[target_col], alpha=0.2)
print("skew", round(series.skew(), 2))
elif is_categorical_dtype(series):
ax = series.value_counts().plot(kind="bar", grid=False)
for container in ax.containers:
ax.bar_label(container)
corr_df = df.corr()
# print(corr_df.head())
# import seaborn as sns
# sns.heatmap(corr_df, vmax=.8, square=True)
for idx1, row in corr_df.iterrows():
correlated = row[(row > 0.8) & (row < 1)]
if len(correlated) > 0:
for idx2, cor_row in correlated.iteritems():
fig = plt.figure(figsize=(17, 6))
plt.scatter(merge_df[idx1], merge_df[idx2])
plt.xlabel(idx1)
plt.ylabel(idx2)
break
break
corr_df = df.corr()
target_corr = corr_df[target_col].sort_values()
print(target_corr)
numeric_cols
log_cols = ["LotArea", "MasVnrArea", "BsmtFinSF1", "BsmtFinSF2"]
log = 0
idx = 0
for c in numeric_cols:
idx += 1
if idx < 6:
continue
print(c)
if log:
plot_column(np.log1p(df[c]))
else:
plot_column(df[c])
break
a = df.sort_values(by="DateSold")
a = a.groupby("DateSold").mean().reset_index()
plt.plot(a["DateSold"], a[target_col])
from sklearn.feature_selection import (
SelectKBest,
f_regression,
r_regression,
mutual_info_regression,
RFE,
)
def univariate_selection():
score_functions = {
"f_regr": f_regression,
"r_regr": r_regression,
"m_regr": mutual_info_regression,
}
X = df.drop([target_col, "DateSold"], axis=1)
y = df[[target_col]]
score_df = pd.DataFrame([[x] for x in X.columns], columns=["feature"])
for score_f in score_functions:
kbest_model = SelectKBest(score_func=score_functions[score_f], k="all")
fit = kbest_model.fit(X, y)
score_df[score_f] = fit.scores_
score_df = score_df.set_index("feature")
for col in score_df.columns:
if "r_regr" in col:
score_df[col] = abs(score_df[col])
score_df[col] = score_df[col].rank()
describe_df = score_df.apply(pd.DataFrame.describe, axis=1)
describe_df = describe_df[describe_df["25%"] <= len(describe_df) // 2]
return list(describe_df.index)
from sklearn.linear_model import LinearRegression, SGDRegressor, BayesianRidge
from sklearn.tree import DecisionTreeRegressor
from sklearn.feature_selection import RFE
def rfe_selection():
models = {
"lin_regr": LinearRegression,
"tree_regr": DecisionTreeRegressor,
"sgd_regr": SGDRegressor,
"ridge_regr": BayesianRidge,
}
X = df.drop([target_col, "DateSold"], axis=1)
y = df[[target_col]]
score_df = pd.DataFrame([[x] for x in X.columns], columns=["feature"])
for m in models:
model = models[m]()
rfe = RFE(model, n_features_to_select=1)
fit = rfe.fit(X, y)
score_df[m] = fit.ranking_
score_df = score_df.set_index("feature")
describe_df = score_df.apply(pd.DataFrame.describe, axis=1)
describe_df = describe_df[describe_df["25%"] <= len(describe_df) // 2]
return list(describe_df.index)
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet, LassoCV
from sklearn.svm import SVR
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from sklearn.decomposition import PCA
from sklearn.neural_network import MLPRegressor
import warnings
warnings.filterwarnings("ignore")
def rmse_cv(model, X, y, cv):
rmse = np.sqrt(
-cross_val_score(model, X, np.log(y), scoring="neg_mean_squared_error", cv=cv)
)
return rmse
def log_rmse(pred, actual, log):
if log:
return np.mean((pred - actual) ** 2) ** 0.5
else:
return np.mean((np.log(pred) - np.log(actual)) ** 2) ** 0.5
X = df.drop([target_col, "DateSold"], axis=1)
y = df[[target_col]]
feature_selection = False
log = True
scaler = True
pca = False
if feature_selection:
univariate_list = univariate_selection()
rfe_list = rfe_selection()
X = X[list(set(univariate_list + rfe_list))]
if log:
# LOG Features
for c in numeric_cols:
if c in X.columns:
X[c] = np.log1p(X[c])
# LOG Target
y = np.log1p(y)
if scaler:
# Scale Features
scaled_features = RobustScaler().fit_transform(X.values)
X = pd.DataFrame(scaled_features, index=X.index, columns=X.columns)
if pca:
comp = len(X.columns) // 2
p_ = PCA(n_components=comp)
idx = X.index
cols = [f"featurePCA{i + 1}" for i in range(comp)]
X = p_.fit_transform(X)
X = pd.DataFrame(X, columns=cols)
X.index = idx
model = Ridge()
cv = KFold(n_splits=5, shuffle=True)
train_scores = []
test_scores = []
for train_index, test_index in cv.split(X):
X_train, X_test = X.iloc[train_index, :], X.iloc[test_index, :]
y_train, y_test = y.iloc[train_index, :], y.iloc[test_index, :]
model.fit(X_train, y_train)
y_fit = model.predict(X_train)
y_hat = model.predict(X_test)
# print(y_hat)
# plt.scatter(y_hat, y_test.values)
if len(y_hat) > 1:
train_scores.append(log_rmse(y_fit, y_train.values, log))
test_scores.append(log_rmse(y_hat, y_test.values, log))
else:
train_scores.append(log_rmse(y_fit, y_train, log)[0])
test_scores.append(log_rmse(y_hat, y_test, log)[0])
break
print(test_scores)
print(
"TRAIN:", round(np.mean(train_scores), 4), "TEST:", round(np.mean(test_scores), 4)
)
# Hyper-parameter tuning
# Tutorials to get new ideas
## ONLY COMPUTES TEST SCORE ##
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
def rmse_cv(model, X, y, cv):
rmse = np.sqrt(
-cross_val_score(model, X, np.log(y), scoring="neg_mean_squared_error", cv=cv)
)
return rmse
def log_rmse(pred, actual):
return np.mean((np.log(pred) - np.log(actual)) ** 2) ** 0.5
X = df.drop([target_col, "DateSold"], axis=1)
# scaled_features = StandardScaler().fit_transform(df.values)
# scaled_features_df = pd.DataFrame(scaled_features, index=df.index, columns=df.columns)
y = df[[target_col]]
model = Ridge()
cv = KFold(n_splits=5, shuffle=True)
cv_score = rmse_cv(model, X, y, cv)
np.mean(cv_score)
|
import pandas as pd
import numpy as np
Train_data = pd.read_csv(
"/kaggle/input/grad-bunker-learning-hub-data-science-competition/train.csv"
)
# ##Data Understanding
# This section involves EDA using basic text based methods to gain a general understanding of the data before further visualization or processing. The methodos used in this section are.
# 1. data.head
# 2. data.describe
# 3. data.dtypes
# 4. data.isnull().sum()
# 5. data.value_counts
# 6. data.info
Train_data.head()
Train_data.shape
Train_data.describe()
# Data describe reveals that std is greater than mean for review_requested, which indicate non standard distribution.
Train_data.dtypes
# **Missing Value**
# - missing value columns: rating,canceled_by and discount are having highest number of missing data which are more than 50%, hence these can be consider as non-significant attributes.
# however, developer will only fill up the value of attributes which is 'drivers_tried' using appropriate steps further
# missing values
Train_data.isnull().sum()
# missing values in percentage
percent_missing = Train_data.isnull().sum() * 100 / len(Train_data)
percent_missing
# Value counts give a good idea on the skewness of the data. Below, we can see that the following data has perceivable skew based off textual analysis below
# 1. status
# 2. review_requested
# 3. payment_type
# Further skew in other data columns is possible, and these suspicions will be further investigated in the visualization setion
Train_data["status"].value_counts()
Train_data["review_requested"].value_counts()
Train_data["payment_type"].value_counts()
# Drop rating,canceled_by and discount
# 1. More than 50% of data of these columns are missing
Train_data = Train_data.drop("rating", axis=1)
Train_data = Train_data.drop("canceled_by", axis=1)
Train_data = Train_data.drop("discount", axis=1)
# dropping hashed_id since its unique for each case
Train_data = Train_data.drop("hashed_id", axis=1)
Train_data.isnull().sum()
# ### Categorical Missing Value will be impute by Mode
Train_data["drivers_tried"] = Train_data["drivers_tried"].fillna(
Train_data["drivers_tried"].mode()[0]
)
Train_data.isnull().sum()
Train_data.dtypes
# ## OLS Regression
# **Findings from OLS Regression:**
# - Target Variable - Status
# - Adjusted R-squared = 0.129, not that high, low error rate
# - Prob F-Test - 0.00 < 0.05, Model is significant
# - Coefficient of Constant - Intercept of the model is 0.1491
# - Credit Score, Term, Loan Purpose, Region - P value of t-test (P>t) more than 0.05, not significant variable, can be drop
# **Note: Developer will initially drop some attributes such as ID and encode few necessary columns to perform ols regression model, however these deleted attributes will be added again**
# Assigning new dataframe
data_ols = Train_data
# Encoding
data_ols["status"] = pd.factorize(data_ols["status"])[0]
data_ols["drivers_tried"] = pd.factorize(data_ols["drivers_tried"])[0]
# droping columns
data_ols = data_ols.drop("Unnamed: 0", axis=1)
data_ols = data_ols.drop("rider_id", axis=1)
data_ols = data_ols.drop("driver_id", axis=1)
data_ols.head(2)
# ### Assign Variable
# **To get inform about necessary attributes**
#
x = data_ols.drop("estimated_fare", axis=1)
y = data_ols["estimated_fare"]
import statsmodels.api as sm
X = sm.add_constant(x)
est = sm.OLS(y, X).fit()
print(est.summary())
# TV - estimated_fare
# Adj R^2 = 0.129, not that high, low error rate
# Prob F-test = 0.00 < 0.05 - model is significant
# coef const - intercept of the model is 0.1491
# review_requested P>t more than 0.05 - not significant variable, drop
data_test = data_ols.drop("review_requested", axis=1)
data_test.head(2)
# ## Feature Selection with Correlation Heat map
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(5, 5))
sns.heatmap(data_test.corr(), annot=True, linewidths=0.5)
# ### So datatest attrbutes are final cleaned dataset alongwith Unnamed: 0, hashed_id, rider_id, driver_id
# **So, before combining anyting developer will see the accuracy of the data using train data.**
df_accuarcy = Train_data
df_accuarcy.head()
# Encoding
df_accuarcy["status"] = pd.factorize(df_accuarcy["status"])[0]
df_accuarcy["drivers_tried"] = pd.factorize(df_accuarcy["drivers_tried"])[0]
# droping column
df_accuarcy = df_accuarcy.drop("review_requested", axis=1)
df_accuarcy.head()
x = df_accuarcy.drop("estimated_fare", axis=1)
y = df_accuarcy["estimated_fare"]
x.shape
# ##pre-processing
from sklearn import preprocessing
x = preprocessing.StandardScaler().fit(x).transform(x)
x.shape
x[0:5]
# Data Split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=1)
# Linear Regression
from sklearn.linear_model import LinearRegression
Linear_Regression = LinearRegression()
# training a linear regression model on train
Linear_Regression.fit(x_train, y_train)
# Accuracy of the model
# Train Accuracy
print(
"Train Accuracy = ", (Linear_Regression.score(x_train, y_train).round(2)) * 100, "%"
)
# Test Accuracy....This is the one we consider when evaluating the model
print("Test Accuracy = ", (Linear_Regression.score(x_test, y_test).round(2)) * 100, "%")
# Accuracy of the train and test are quite satisfied which are above 80%
# predicting on X_test
y_pred = Linear_Regression.predict(x_test)
pd.DataFrame({"Actual": y_test.round(2), "Predicted": y_pred.round(2)})
# evaluation using r-square
Linear_Regression.score(x_train, y_train).round(2)
# calculating MSE, RMSE, MAE
from sklearn.metrics import mean_squared_error, mean_absolute_error
print("Mean Squared Error: ", mean_squared_error(y_test, y_pred).round(2))
print("Mean Absolute Error: ", mean_absolute_error(y_test, y_pred).round(2))
print("Root Mean Squared Error: ", np.sqrt(mean_squared_error(y_test, y_pred)).round(2))
# ## Now let’s see the R2 score
# Calculation of R2 Score
linear_regression = LinearRegression()
from sklearn.model_selection import cross_val_score
print(cross_val_score(linear_regression, x, y, cv=10, scoring="r2").mean())
# **Residuals Plot**
# Residuals, in the context of regression models, are the difference between the observed value of the target variable (y_test) and the predicted value (y_pred), i.e. the error of the prediction.
# **Residual = Observed – Predicted**
# residual plot
x_plot = plt.scatter(y_test, (y_test - y_pred), c="b")
plt.hlines(y=0, xmin=-1, xmax=1)
plt.title("Residual plot")
plt.xlabel("y_test")
plt.ylabel("Residuals")
# **Using Yellow Brick library**
# pip install yellowbrick
from yellowbrick.regressor import ResidualsPlot
visualizer = ResidualsPlot(Linear_Regression)
visualizer.fit(x_train, y_train) # Fit the training data to the visualizer
visualizer.score(x_test, y_test) # Evaluate the model on the test data
visualizer.show()
# **Before performing anything else like encoding developer gonna follow all above steps for test data.**
Test_data = pd.read_csv(
"/kaggle/input/grad-bunker-learning-hub-data-science-competition/test.csv"
)
Test_data.shape
Test_data.head()
# missing values
Test_data.isnull().sum()
# **Here developer gonna follow the exact steps that has been perfom in train data**
# Drop rating,canceled_by and discount
Test_data = Test_data.drop("rating", axis=1)
Test_data = Test_data.drop("canceled_by", axis=1)
Test_data = Test_data.drop("discount", axis=1)
Test_data = Test_data.drop("hashed_id", axis=1)
# Test_data = Test_data.drop('review_requested',axis = 1)
Test_data["drivers_tried"] = Test_data["drivers_tried"].fillna(
Test_data["drivers_tried"].mode()[0]
)
Test_data.isnull().sum()
Test_data.dtypes
# **Now, its time to combine both data and finally follow the steps for encoding and droping columns that shown in data_test dataframe**
# ### Note Main_data is the combination of both test and train file
Main_data = pd.concat([Train_data, Test_data], axis=0)
Main_data.shape
# **First encode the status and drivers_tried attributes than drop non-significat column 'review_requested' foun in ols test**
# Encoding
Main_data["status"] = pd.factorize(Main_data["status"])[0]
Main_data["drivers_tried"] = pd.factorize(Main_data["drivers_tried"])[0]
# droping column
Main_data = Main_data.drop("review_requested", axis=1)
Main_data.isnull().sum()
# Linear Regression_MLR
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels.api as sm
# spliting Main data into train and test set
split = round(len(Train_data))
train_set, test_set = Main_data[:split], Main_data[split:]
print("train on %d instances, test on %d instances" % (len(train_set), len(test_set)))
# **Now get to put train set data into x and y train, sinmilarly test set data into x test. for further prediction**
# Train set
X_train = train_set.drop("estimated_fare", axis=1)
X_train.shape
y_train = train_set["estimated_fare"]
y_train = y_train.astype("int")
# Test set
x_test = test_set.drop("estimated_fare", axis=1)
# **After imputing x and y train and x test, develop perform a normalization method on both train and test data processes the data between 0-1 which would help to get rid from many issues such as outlier**
from sklearn import preprocessing
X_train = preprocessing.StandardScaler().fit(X_train).transform(X_train)
X_train[0:5]
x_test = preprocessing.StandardScaler().fit(x_test).transform(x_test)
x_test[0:5]
# Linear Regression
from sklearn.linear_model import LinearRegression
Linear_Regression = LinearRegression()
# training a linear regression model on train
Linear_Regression.fit(X_train, y_train)
# Accuracy of the model
# Train Accuracy
print(
"Train Accuracy = ", (Linear_Regression.score(X_train, y_train).round(2)) * 100, "%"
)
# ## Final prediction
# predicting on X_test
y_pred = Linear_Regression.predict(x_test)
y_pred.round(2)
pred = pd.DataFrame(y_pred)
pred
# ##Saving predicted file
pred.to_csv(r"submission.csv", index=False)
|
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
data = pd.read_csv("/kaggle/input/housedata/data.csv")
data.head()
data.shape
data.dtypes.value_counts()
data.isnull().sum()
data.duplicated().sum()
# # Linear Regression assuptions
data["price"].min(), data["price"].max()
data = data.query("price > 0")
import seaborn as sns
import matplotlib.pyplot as plt
sns.histplot(data=data, x="price")
plt.show()
sns.boxplot(data=data, y="price")
plt.show()
sns.histplot(data["price"])
plt.show()
sns.boxplot(data=data, y="price")
plt.show()
plt.figure(figsize=(12, 5))
correlation = data.corr()
sns.heatmap(correlation, annot=True)
plt.show()
# sns.pairplot(data)
# plt.show()
data.head(3)
sns.scatterplot(x=data.price, y=(data.sqft_living) ** 3)
plt.show()
data = data.drop(["date", "statezip", "street", "country"], axis=1)
features = data.drop("price", axis=1)
target = data.price.values
target_logged = np.log(target)
sns.histplot(target_logged)
plt.show()
features.head()
[data["city"].unique(), len(data["city"].unique())]
nums = features.select_dtypes("number")
text = features.select_dtypes("object")
nums.head()
binary_columns = nums.iloc[:, [0, 1, 4, 5, 6, 7, -2, -1]].reset_index(drop=True)
binary_columns
np.unique(data.bedrooms)
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import RobustScaler, StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.tree import DecisionTreeRegressor
cols = data.columns.to_list()
c_wanted = []
for c in cols:
if c.startswith("s"):
c_wanted.append(c)
poly_features = data[c_wanted]
scores = []
for i in range(2, 4):
poly = PolynomialFeatures(degree=i)
poly.fit(poly_features)
poly_features = poly.transform(poly_features)
polyies = pd.DataFrame(poly_features, columns=poly.get_feature_names_out())
polyies = pd.concat([polyies, pd.Series(target_logged)], axis=1)
scaler = MinMaxScaler()
scaler.fit(polyies)
nums = scaler.transform(polyies)
nums_scaled = pd.DataFrame(nums, columns=scaler.get_feature_names_out())
ohe_city = pd.get_dummies(text).reset_index(drop=True)
full_data = pd.concat([nums_scaled, ohe_city, binary_columns], axis=1)
X_train, X_test, y_train, y_test = train_test_split(
full_data, target_logged, test_size=0.2, random_state=42
)
model = LinearRegression()
model.fit(X_train, y_train)
scores.append((i, round(model.score(X_test, y_test), 2)))
scores
# ctr+]
# ctr+[
# ctr+/
# ctr+z
scaler = MinMaxScaler()
scaler.fit(polyies)
nums = scaler.transform(polyies)
nums_scaled = pd.DataFrame(nums, columns=scaler.get_feature_names_out())
ohe_city = pd.get_dummies(text).reset_index(drop=True)
ohe_city
full_data = pd.concat([nums_scaled, ohe_city, binary_columns], axis=1)
full_data.head()
full_data.isnull().sum()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
full_data, target_logged, test_size=0.2, random_state=42
)
X_train.shape
# from sklearn.decomposition import PCA
# pca = PCA(n_components=30)
# pca.fit(X_train)
# X_train_pca = pca.transform(X_train)
# X_test_pca = pca.transform(X_test)
# pca.explained_variance_ratio_.sum()
# # Model Building
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.tree import DecisionTreeRegressor
model = LinearRegression()
model.fit(X_train, y_train)
round(model.score(X_test, y_test), 2)
from sklearn.metrics import mean_absolute_error
mae = mean_absolute_error(y_test, model.predict(X_test))
float(mae) == 0.0000000000006440726717663404
preds = model.predict(X_test)
df = pd.DataFrame({"actual": np.exp(y_test), "predicted": np.exp(preds)})
df
1225000
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
print(90 > 89)
print(90 == 89)
print(90 < 89)
a = 77
b = 88
if b > a:
print("b a'dan iyidir")
else:
print("b a'dan iyi değil")
print(bool("hey"))
print(bool(5))
x = "hey"
y = 5
print(bool(x))
print(bool(y))
bool("geceler affetmez")
bool(456)
bool(["gözyaşım", "affetmez", "geceler"])
bool(False)
bool(None)
bool(0)
bool("heyo")
bool(())
bool([9])
bool({})
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# **Extract store data**
store_detail = pd.read_csv(
"/kaggle/input/store-sales-time-series-forecasting/stores.csv"
)
store_detail.head(n=10)
# **Create a lookup map from the Store Details**
# {store_id -> {store_detail}}
store_detail_lookup_map = store_detail.set_index(store_detail.store_nbr).T.to_dict()
list(store_detail_lookup_map.items())[0:3]
# take(5, store_detail_lookup_map.items())
# **Load Train Data**
train_data = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/train.csv")
train_data.head(n=3)
train_data.tail()
train_data.shape
# **Enrich training data with Store Details**
train_data["store_city"] = train_data.apply(
lambda row: store_detail_lookup_map[row.store_nbr]["city"], axis=1
)
train_data["store_state"] = train_data.apply(
lambda row: store_detail_lookup_map[row.store_nbr]["state"], axis=1
)
train_data["store_type"] = train_data.apply(
lambda row: store_detail_lookup_map[row.store_nbr]["type"], axis=1
)
train_data["store_cluster"] = train_data.apply(
lambda row: store_detail_lookup_map[row.store_nbr]["cluster"], axis=1
)
train_data.head()
# **Load Test Data**
test_data = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/test.csv")
test_data.head()
test_data.shape
# **Enrich Test Data**
test_data["store_city"] = train_data.apply(
lambda row: store_detail_lookup_map[row.store_nbr]["city"], axis=1
)
test_data["store_state"] = train_data.apply(
lambda row: store_detail_lookup_map[row.store_nbr]["state"], axis=1
)
test_data["store_type"] = train_data.apply(
lambda row: store_detail_lookup_map[row.store_nbr]["type"], axis=1
)
test_data["store_cluster"] = train_data.apply(
lambda row: store_detail_lookup_map[row.store_nbr]["cluster"], axis=1
)
test_data.head()
# **ML model using RF**
from sklearn.ensemble import RandomForestClassifier
y = train_data["sales"]
features = ["store_city", "store_state", "store_cluster", "family", "onpromotion"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=70, max_depth=3, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv("/kaggle/input/hungary-chicken/hungary_chickenpox.csv")
df.head()
import datetime
df_bp = df[["Date", "BUDAPEST"]].copy()
df_bp["Date"] = (
df_bp["Date"]
.apply(lambda x: datetime.datetime.strptime(x, "%d/%m/%Y").strftime("%Y-%m-%d"))
.astype(np.datetime64)
)
df_bp = df_bp.set_index("Date")
bp_series = df_bp["BUDAPEST"].squeeze()
bp_series.head()
bp_series.describe()
plt.rcParams["figure.figsize"] = (20, 10)
bp_series.plot.line()
from statsmodels.tsa.seasonal import seasonal_decompose
components = seasonal_decompose(bp_series, model="additive")
components.plot()
plt.show()
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
plot_acf(x=bp_series, lags=50, use_vlines=True)
plt.show()
# question for Zikica - method? https://www.statsmodels.org/dev/generated/statsmodels.graphics.tsaplots.plot_pacf.html
plot_pacf(x=bp_series, lags=50, method="ywm")
plt.show()
from statsmodels.tsa.stattools import acf
acf, confint, ljung_box_qstat = acf(x=bp_series, nlags=50, qstat=True)
print(acf)
# # Stationarity
# Dickey-Fuller test
# p-value > 0.05: Fail to reject the null hypothesis (H0), the data has a unit root and is non-stationary.
# p-value <= 0.05: Reject the null hypothesis (H0), the data does not have a unit root and is stationary.
from statsmodels.tsa.stattools import adfuller
adf_result = adfuller(bp_series)
print("ADF Statistic: %f" % adf_result[0])
print("p-value: %f" % adf_result[1])
print("Critical Values:")
for key, value in adf_result[4].items():
print("\t%s: %.3f" % (key, value))
# # Single HWES
# holt winters
# single exponential smoothing
from statsmodels.tsa.holtwinters import SimpleExpSmoothing
# double and triple exponential smoothing
from statsmodels.tsa.holtwinters import ExponentialSmoothing
df_single_hw = pd.DataFrame({"chickenpox": bp_series})
df_single_hw["HWES1"] = (
SimpleExpSmoothing(df_single_hw["chickenpox"])
.fit(smoothing_level=0.25, optimized=False, use_brute=True)
.fittedvalues
)
df_single_hw[["chickenpox", "HWES1"]].plot(
title="Holt Winters Single Exponential Smoothing, Budapest Chickenpox, smoothing level = 0.25"
)
df_single_hw = pd.DataFrame({"chickenpox": bp_series})
df_single_hw["HWES1"] = (
SimpleExpSmoothing(df_single_hw["chickenpox"])
.fit(smoothing_level=0.75, optimized=False, use_brute=True)
.fittedvalues
)
df_single_hw[["chickenpox", "HWES1"]].plot(
title="Holt Winters Single Exponential Smoothing, Budapest Chickenpox, smoothing level = 0.75"
)
# Forecasting
train_series = df_single_hw["chickenpox"][:450]
test_series = df_single_hw["chickenpox"][450:]
fitted_hwes = SimpleExpSmoothing(train_series).fit(
smoothing_level=0.25, optimized=True, use_brute=True
)
test_predictions = fitted_hwes.forecast(72)
train_series.plot(legend=True, label="TRAIN")
test_series.plot(legend=True, label="TEST")
test_predictions.plot(legend=True, label="PREDICTION")
plt.title("Train, Test and Predicted Test using Holt Winters")
from sklearn.metrics import mean_absolute_error, mean_squared_error
print(f"Mean Absolute Error = {mean_absolute_error(test_series,test_predictions)}")
print(f"Mean Squared Error = {mean_squared_error(test_series,test_predictions)}")
print(f"SSE = {fitted_hwes.sse}")
print(f"AIC = {fitted_hwes.aic}")
fitted_hwes.summary()
# residuals
plot_acf(x=fitted_hwes.resid[:], lags=50, use_vlines=True)
plt.show()
from statsmodels.api import qqplot
qqplot(fitted_hwes.resid)
plt.show()
# # ARMA model
from statsmodels.tsa.arima.model import ARIMA
arima_model = ARIMA(bp_series, order=(3, 0, 8))
arima_results = arima_model.fit()
plt.plot(bp_series)
plt.plot(arima_results.fittedvalues, color="red")
# summary of fit model
print(arima_results.summary())
# line plot of residuals
arma_residuals = pd.DataFrame(arima_results.resid)
arma_residuals.plot()
plt.show()
# summary stats of residuals
print(arma_residuals.describe())
plot_acf(x=arma_residuals, lags=50, use_vlines=True)
plt.show()
qqplot(arma_residuals)
plt.show()
from statsmodels.graphics.tsaplots import plot_predict
arima_model = ARIMA(train_series, order=(3, 0, 8))
arima_results = arima_model.fit()
fig, ax = plt.subplots()
ax = train_series.plot(ax=ax)
ax = test_series.plot(ax=ax)
plot_predict(arima_results, 450, 521, ax=ax)
plt.show()
|
# ## Introduction
# Topic modeling is a type of statistical modeling for discovering the abstract “topics” that occur in a collection of documents. Latent Dirichlet Allocation (LDA) is an example of topic model and is used to classify text in a document to a particular topic. It builds a topic per document model and words per topic model, modeled as Dirichlet distributions.
# Content:
# 1. [Libraries](#1)
# 1. [Load and Check Data](#2)
# 1. [Time Series](#3)
# * 3.1 [Tweet Activity Over Hours](#4)
# * 3.2 [Tweet Activity Over Years](#5)
# 1. [Text Preparation](#6)
# 1. [LDA Topic Modelling](#7)
# * 5.1. [Topic Modelling Correlation Matrix](#8)
# * 5.2. [LDA Visualization (pyLDAvis)](#9)
# 1. [References](#10)
# ## 1. Libraries
import os
import pandas as pd
import numpy as np
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly as py
import plotly.graph_objs as go
import gensim
from gensim import corpora, models, similarities
import logging
import tempfile
from nltk.corpus import stopwords
from string import punctuation
from collections import OrderedDict
import seaborn as sns
import pyLDAvis.gensim
import matplotlib.pyplot as plt
init_notebook_mode(connected=True) # do not miss this line
import warnings
warnings.filterwarnings("ignore")
pd.set_option("display.max_columns", None)
pd.set_option("display.float_format", lambda x: "%.3f" % x)
pd.set_option("display.width", 500)
#
# ## 2. Load and Check Data
data_path = "../input/data-bg-tweets/data_bg.csv"
tweets = pd.read_csv(data_path, encoding="latin1")
tweets = tweets.assign(Time=pd.to_datetime(tweets.time)).drop("id", axis="columns")
print("Number of tweets: ", len(tweets["tweet"]))
tweets.head(5)
#
# ## 3. Time Series
# ### 3.1. Tweet Activity Over Hours
#
tweets["Time"] = pd.to_datetime(tweets["time"], format="%H:%M:%S")
tweetsT = tweets["Time"]
trace = go.Histogram(x=tweetsT, marker=dict(color="blue"), opacity=0.75)
layout = go.Layout(
title="Tweet Activity Over Hours",
height=450,
width=1200,
xaxis=dict(title="Hours"),
yaxis=dict(title="Tweet Quantity"),
bargap=0.2,
)
data = [trace]
fig = go.Figure(data=data, layout=layout)
py.offline.iplot(fig)
#
# ### 3.2. Tweet Activity Over Years
tweets["Time"] = pd.to_datetime(tweets["date"], format="%Y-%m-%d")
tweetsT = tweets["Time"]
trace = go.Histogram(x=tweetsT, marker=dict(color="blue"), opacity=0.75)
layout = go.Layout(
title="Tweet Activity Over Years",
height=450,
width=1200,
xaxis=dict(title="Month and year"),
yaxis=dict(title="Tweet Quantity"),
bargap=0.2,
)
data = [trace]
fig = go.Figure(data=data, layout=layout)
py.offline.iplot(fig)
#
# ## 4. Text Preparation
# Preparing a corpus for analysis and checking first 10 entries
corpus = []
a = []
for i in range(len(tweets["tweet"])):
a = tweets["tweet"][i]
corpus.append(a)
corpus[0:10]
TEMP_FOLDER = tempfile.gettempdir()
print(
'Folder "{}" will be used to save temporary dictionary and corpus.'.format(
TEMP_FOLDER
)
)
logging.basicConfig(
format="%(asctime)s : %(levelname)s : %(message)s", level=logging.INFO
)
# removing common words and tokenizing
list1 = ["RT", "rt"]
stoplist = stopwords.words("english") + list(punctuation) + list1
texts = [
[word for word in str(document).lower().split() if word not in stoplist]
for document in corpus
]
dictionary = corpora.Dictionary(texts)
dictionary.save(
os.path.join(TEMP_FOLDER, "bill.dict")
) # store the dictionary, for future reference
# print(dictionary)
# print(dictionary.token2id)
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize(
os.path.join(TEMP_FOLDER, "bill.mm"), corpus
) # store to disk, for later use
#
# ## 5. LDA Topic Modelling
tfidf = models.TfidfModel(corpus) # step 1 -- initialize a model
corpus_tfidf = tfidf[corpus] # step 2 -- use the model to transform vectors
total_topics = 5
lda = models.LdaModel(corpus, id2word=dictionary, num_topics=total_topics)
corpus_lda = lda[
corpus_tfidf
] # create a double wrapper over the original corpus: bow->tfidf->fold-in-lsi
# Show first n important word in the topics:
lda.show_topics(total_topics, 5)
data_lda = {i: OrderedDict(lda.show_topic(i, 25)) for i in range(total_topics)}
# data_lda
df_lda = pd.DataFrame(data_lda)
df_lda = df_lda.fillna(0).T
print(df_lda.shape)
df_lda
#
# ### 5.1. Topic Modelling Correlation Matrix
g = sns.clustermap(
df_lda.corr(),
center=0,
standard_scale=1,
cmap="RdBu",
metric="cosine",
linewidths=0.75,
figsize=(15, 15),
)
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.show()
# plt.setp(ax_heatmap.get_yticklabels(), rotation=0) # For y axis
#
# ### 5.2. LDA Visualization (pyLDAvis)
pyLDAvis.enable_notebook()
panel = pyLDAvis.gensim.prepare(lda, corpus_lda, dictionary, mds="tsne")
panel
|
# # 1. Setting
# 内容特征层及loss加权系数
CONTENT_LAYERS = {"block4_conv3": 0.5, "block5_conv2": 0.5}
# 风格特征层及loss加权系数
STYLE_LAYERS = {
"block1_conv1": 0.2,
"block2_conv2": 0.2,
"block3_conv1": 0.2,
"block4_conv3": 0.2,
"block5_conv3": 0.2,
}
# 内容图片路径
CONTENT_IMAGE_PATH = "/kaggle/input/vgg19-style-transfer/images/content.jpg"
# 风格图片路径
STYLE_IMAGE_PATH = "/kaggle/input/vgg19-style-transfer/images/style.jpg"
# 生成图片的保存目录
OUTPUT_DIR = "./output"
# 内容loss总加权系数
CONTENT_LOSS_FACTOR = 1
# 风格loss总加权系数
STYLE_LOSS_FACTOR = 100
# 图片宽度
WIDTH = 450
# 图片高度
HEIGHT = 300
# 训练epoch数
EPOCHS = 20
# 每个epoch训练多少次
STEPS_PER_EPOCH = 100
# 学习率
LEARNING_RATE = 0.03
# # 2. Utils
import tensorflow as tf
# 我们准备使用经典网络在imagenet数据集上的与训练权重
# 所以归一化时也要使用imagenet的平均值和标准差
image_mean = tf.constant([0.485, 0.456, 0.406])
image_std = tf.constant([0.299, 0.224, 0.225])
def normalization(x):
"""
对输入图片x进行归一化,返回归一化的值
"""
return (x - image_mean) / image_std
def load_images(image_path, width=WIDTH, height=HEIGHT):
"""
加载并处理图片
:param image_path: 图片路径
:param width: 图片宽度
:param height: 图片长度
:return: 一个张量
"""
# 加载文件
x = tf.io.read_file(image_path)
# 解码图片
x = tf.image.decode_jpeg(x, channels=3)
# 修改图片大小
x = tf.image.resize(x, [height, width])
x = x / 255.0
# 归一化
x = normalization(x)
x = tf.reshape(x, [1, height, width, 3])
# 返回结果
return x
def save_image(image, filename):
x = tf.reshape(image, image.shape[1:])
x = x * image_std + image_mean
x = x * 255.0
x = tf.cast(x, tf.int32)
x = tf.clip_by_value(x, 0, 255)
x = tf.cast(x, tf.uint8)
x = tf.image.encode_jpeg(x)
tf.io.write_file(filename, x)
# # 3.Model
def get_vgg19_model(layers):
"""
创建并初始化vgg19模型
:return:
"""
# 加载imagenet上预训练的vgg19
vgg = tf.keras.applications.VGG19(include_top=False, weights="imagenet")
# 提取需要被用到的vgg的层的output
outputs = [vgg.get_layer(layer).output for layer in layers]
# 使用outputs创建新的模型
model = tf.keras.Model(
[
vgg.input,
],
outputs,
)
# 锁死参数,不进行训练
model.trainable = False
return model
import typing
class NeuralStyleTransferModel(tf.keras.Model):
def __init__(
self,
content_layers: typing.Dict[str, float] = CONTENT_LAYERS,
style_layers: typing.Dict[str, float] = STYLE_LAYERS,
):
super(NeuralStyleTransferModel, self).__init__()
# 内容特征层字典 Dict[层名,加权系数]
self.content_layers = content_layers
# 风格特征层
self.style_layers = style_layers
# 提取需要用到的所有vgg层
layers = list(self.content_layers.keys()) + list(self.style_layers.keys())
# 创建layer_name到output索引的映射
self.outputs_index_map = dict(zip(layers, range(len(layers))))
# 创建并初始化vgg网络
self.vgg = get_vgg19_model(layers)
def call(self, inputs, training=None, mask=None):
"""
前向传播
:return
typing.Dict[str,typing.List[outputs,加权系数]]
"""
outputs = self.vgg(inputs)
# 分离内容特征层和风格特征层的输出,方便后续计算 typing.List[outputs,加权系数]
content_outputs = []
for layer, factor in self.content_layers.items():
content_outputs.append((outputs[self.outputs_index_map[layer]][0], factor))
style_outputs = []
for layer, factor in self.style_layers.items():
style_outputs.append((outputs[self.outputs_index_map[layer]][0], factor))
# 以字典的形式返回输出
return {"content": content_outputs, "style": style_outputs}
# # 4. Train
import os
import numpy as np
from tqdm import tqdm
import tensorflow as tf
# 创建模型
model = NeuralStyleTransferModel()
# 加载内容图片
content_image = load_images(CONTENT_IMAGE_PATH)
# 风格图片
style_image = load_images(STYLE_IMAGE_PATH)
# 计算出目标内容图片的内容特征备用
target_content_features = model(
[
content_image,
]
)["content"]
# 计算目标风格图片的风格特征
target_style_features = model(
[
style_image,
]
)["style"]
M = WIDTH * HEIGHT
N = 3
def _compute_content_loss(noise_features, target_features):
"""
计算指定层上两个特征之间的内容loss
:param noise_features: 噪声图片在指定层的特征
:param target_features: 内容图片在指定层的特征
"""
content_loss = tf.reduce_sum(tf.square(noise_features - target_features))
# 计算系数
x = 2.0 * M * N
content_loss = content_loss / x
return content_loss
def compute_content_loss(noise_content_features):
"""
计算并当前图片的内容loss
:param noise_content_features: 噪声图片的内容特征
"""
# 初始化内容损失
content_losses = []
# 加权计算内容损失
for (noise_feature, factor), (target_feature, _) in zip(
noise_content_features, target_content_features
):
layer_content_loss = _compute_content_loss(noise_feature, target_feature)
content_losses.append(layer_content_loss * factor)
return tf.reduce_sum(content_losses)
def gram_matrix(feature):
"""
计算给定特征的格拉姆矩阵
"""
# 先交换维度,把channel维度提到最前面
x = tf.transpose(feature, perm=[2, 0, 1])
# reshape,压缩成2d
x = tf.reshape(x, (x.shape[0], -1))
# 计算x和x的逆的乘积
return x @ tf.transpose(x)
def _compute_style_loss(noise_feature, target_feature):
"""
计算指定层上两个特征之间的风格loss
:param noise_feature: 噪声图片在指定层的特征
:param target_feature: 风格图片在指定层的特征
"""
noise_gram_matrix = gram_matrix(noise_feature)
style_gram_matrix = gram_matrix(target_feature)
style_loss = tf.reduce_sum(tf.square(noise_gram_matrix - style_gram_matrix))
# 计算系数
x = 4.0 * (M**2) * (N**2)
return style_loss / x
def compute_style_loss(noise_style_features):
"""
计算并返回图片的风格loss
:param noise_style_features: 噪声图片的风格特征
"""
style_losses = []
for (noise_feature, factor), (target_feature, _) in zip(
noise_style_features, target_style_features
):
layer_style_loss = _compute_style_loss(noise_feature, target_feature)
style_losses.append(layer_style_loss * factor)
return tf.reduce_sum(style_losses)
def total_loss(noise_features):
"""
计算总损失
:param noise_features: 噪声图片特征数据
"""
content_loss = compute_content_loss(noise_features["content"])
style_loss = compute_style_loss(noise_features["style"])
return content_loss * CONTENT_LOSS_FACTOR + style_loss * STYLE_LOSS_FACTOR
# 使用Adma优化器
optimizer = tf.keras.optimizers.Adam(LEARNING_RATE)
# 基于内容图片随机生成一张噪声图片
noise_image = tf.Variable(
(content_image + np.random.uniform(-0.2, 0.2, (1, HEIGHT, WIDTH, 3))) / 2
)
# 使用tf.function加速训练
@tf.function
def train_one_step():
"""
一次迭代过程
"""
# 求loss
with tf.GradientTape() as tape:
noise_outputs = model(noise_image)
loss = total_loss(noise_outputs)
# 求梯度
grad = tape.gradient(loss, noise_image)
# 梯度下降,更新噪声图片
optimizer.apply_gradients([(grad, noise_image)])
return loss
# 创建保存生成图片的文件夹
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
loss_list = []
# 共训练settings.EPOCHS个epochs
for epoch in range(EPOCHS):
# 使用tqdm提示训练进度
with tqdm(
total=STEPS_PER_EPOCH, desc="Epoch {}/{}".format(epoch + 1, EPOCHS)
) as pbar:
# 每个epoch训练settings.STEPS_PER_EPOCH次
for step in range(STEPS_PER_EPOCH):
_loss = train_one_step()
pbar.set_postfix({"loss": "%.4f" % float(_loss)})
pbar.update(1)
# 每个epoch保存一次图片
save_image(noise_image, "{}/{}.jpg".format(OUTPUT_DIR, epoch + 1))
loss_list.append(float(_loss))
# 将最后一轮训练的图片显示
img_path = "/kaggle/working/output/" + str(EPOCHS) + ".jpg"
from PIL import Image
import matplotlib.pyplot as plt
img = Image.open(img_path)
plt.imshow(img)
plt.plot(loss_list)
plt.title("loss")
plt.show()
for i in loss_list:
print(i)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.cluster.hierarchy as sch
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
c = pd.read_csv("/kaggle/input/usarrests/USArrests.csv")
c.head()
crime = c.rename({"Unnamed: 0": "States"}, axis=1)
crime.tail()
# crime.States.value_counts()
crime.info()
crime.isna().sum()
# ### Hierarchy Clustering
n = MinMaxScaler()
data = n.fit_transform(crime.iloc[:, 1:].to_numpy())
crimes = pd.DataFrame(data, columns=crime.columns[1:])
crimes.head(4)
# ### Clustering
# create dendrogram
fig = plt.figure(figsize=(15, 8))
dendrogram = sch.dendrogram(sch.linkage(crimes, method="average"))
# create dendrogram
fig = plt.figure(figsize=(15, 8))
dendrogram = sch.dendrogram(sch.linkage(crimes, method="ward"))
# create clusters
hc1 = AgglomerativeClustering(n_clusters=4, affinity="euclidean", linkage="average")
# save clusters for chart
y2 = hc1.fit_predict(crimes)
cc = pd.DataFrame(y2, columns=["Clusters"])
cc.head(6)
crimes2 = pd.concat([crime, cc], axis=1)
crimes2.head(7)
crimes2.sort_values("Clusters").reset_index()
crimes2["Clusters"].value_counts()
# ### K-Means
kmeans = KMeans(n_clusters=4, random_state=0)
kmeans.fit(crimes)
kmeans.inertia_
wcss = []
for i in range(1, 8):
kmeans = KMeans(n_clusters=i, random_state=0)
kmeans.fit(crimes)
wcss.append(kmeans.inertia_)
wcss
plt.plot(range(1, 8), wcss)
plt.title("Elbow Method")
plt.xlabel("Number of clusters")
plt.ylabel("WCSS")
plt.show()
# Build Cluster algorithm
cc2 = KMeans(4, random_state=8)
cc2.fit(crimes)
cc2.labels_
# Converting array to dataframe
df2 = pd.DataFrame(cc2.labels_, columns=["clusters"])
crimes3 = pd.concat([crime, df2], axis=1)
crimes3.head(4)
crimes3["clusters"].value_counts()
crimes3.groupby("clusters").agg(["mean"])
# ### DBSCAN
array = crimes.values
# array
stscaler = StandardScaler().fit(array)
X2 = stscaler.transform(array)
X2
dbscan2 = DBSCAN(eps=0.98, min_samples=3)
dbscan2.fit(X2)
# Noisy samples are given the label -1.
dbscan2.labels_
c2 = pd.DataFrame(dbscan2.labels_, columns=["Cluster ID"])
c2.value_counts()
crimes4 = pd.concat([crime, c2], axis=1)
crimes4
|
# Import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Load datasets
train_df = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv")
# Explore the variables and their types
print(train_df.info())
# Check for missing data and handle it accordingly
print(train_df.isnull().sum())
# Check for outliers and skewness
print(train_df.describe())
# Visualize the data using plots and charts
sns.countplot(x="Transported", data=train_df)
plt.show()
sns.histplot(x="Age", data=train_df, kde=True)
plt.show()
sns.boxplot(x="VIP", y="Age", data=train_df)
plt.show()
# Classification Model
# Import libraries
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
f1_score,
confusion_matrix,
)
# Load dataset
train_df = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv")
# Preprocess data
X = train_df.drop(["PassengerId", "Name", "Transported"], axis=1)
y = train_df["Transported"]
X_test = test_df.drop(["PassengerId", "Name"], axis=1)
# Handle missing values
X.fillna(X.mean(), inplace=True)
X_test.fillna(X_test.mean(), inplace=True)
ohe = OneHotEncoder(handle_unknown="ignore")
X_ohe = ohe.fit_transform(X[["HomePlanet", "Cabin", "Destination"]]).toarray()
X_test_ohe = ohe.transform(X_test[["HomePlanet", "Cabin", "Destination"]]).toarray()
scaler = StandardScaler()
X_num = scaler.fit_transform(
X[["Age", "RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"]]
)
X_test_num = scaler.transform(
X_test[["Age", "RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"]]
)
X_prep = np.concatenate([X_ohe, X_num], axis=1)
X_test_prep = np.concatenate([X_test_ohe, X_test_num], axis=1)
X_train, X_val, y_train, y_val = train_test_split(
X_prep, y, test_size=0.2, random_state=42
)
# Train model
clf = LogisticRegression(random_state=42)
clf.fit(X_train, y_train)
# Evaluate model
y_pred = clf.predict(X_val)
accuracy = accuracy_score(y_val, y_pred)
precision = precision_score(y_val, y_pred)
recall = recall_score(y_val, y_pred)
f1 = f1_score(y_val, y_pred)
cm = confusion_matrix(y_val, y_pred)
print("Accuracy:", accuracy)
print("Precision:", precision)
print("Recall:", recall)
print("F1 score:", f1)
print("Confusion matrix:", cm)
# Make predictions on test set
y_test_pred = clf.predict(X_test_prep)
# Save predictions to file
test_df["Transported"] = y_test_pred
test_df[["PassengerId", "Transported"]].to_csv(
"test_predictions_classification.csv", index=False
)
import seaborn as sns
# Plot confusion matrix
sns.heatmap(cm, annot=True, fmt="d", cmap="Blues")
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.title("Confusion Matrix")
plt.show()
import matplotlib.pyplot as plt
# Plot evaluation metrics
metrics = ["Accuracy", "Precision", "Recall", "F1 Score"]
values = [accuracy, precision, recall, f1]
plt.bar(metrics, values, color="b")
plt.xlabel("Metrics")
plt.ylabel("Values")
plt.title("Evaluation Metrics")
plt.show()
# Regression Model
# Import libraries
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
# Load dataset
train_df = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv")
# Preprocess data
X = train_df.drop(["PassengerId", "Name", "Transported"], axis=1)
y = train_df["Transported"]
X_test = test_df.drop(["PassengerId", "Name"], axis=1)
# Handle missing values
X.fillna(X.mean(), inplace=True)
X_test.fillna(X_test.mean(), inplace=True)
ohe = OneHotEncoder(handle_unknown="ignore")
X_ohe = ohe.fit_transform(X[["HomePlanet", "Cabin", "Destination"]]).toarray()
X_test_ohe = ohe.transform(X_test[["HomePlanet", "Cabin", "Destination"]]).toarray()
scaler = StandardScaler()
X_num = scaler.fit_transform(
X[["Age", "RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"]]
)
X_test_num = scaler.transform(
X_test[["Age", "RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"]]
)
X_prep = np.concatenate([X_ohe, X_num], axis=1)
X_test_prep = np.concatenate([X_test_ohe, X_test_num], axis=1)
X_train, X_val, y_train, y_val = train_test_split(
X_prep, y, test_size=0.2, random_state=42
)
# Train model
reg = RandomForestRegressor(n_estimators=100, random_state=42)
reg.fit(X_train, y_train)
# Evaluate model
y_pred = reg.predict(X_val)
mae = mean_absolute_error(y_val, y_pred)
mse = mean_squared_error(y_val, y_pred)
r2 = r2_score(y_val, y_pred)
accuracy = reg.score(X_val, y_val) # Calculate accuracy
print("Mean Absolute Error:", mae)
print("Mean Squared Error:", mse)
print("R-squared:", r2)
print("Accuracy:", accuracy) # Print accuracy
# Make predictions on test set
y_test_pred = reg.predict(X_test_prep)
# Save predictions to file
test_df["Transported"] = y_test_pred
test_df[["PassengerId", "Transported"]].to_csv(
"test_predictions_regression.csv", index=False
)
import seaborn as sns
import matplotlib.pyplot as plt
# Plot scatter plot of actual versus predicted values
plt.scatter(y_val, y_pred)
plt.xlabel("Actual")
plt.ylabel("Predicted")
plt.title("Actual vs Predicted Values")
plt.show()
# Plot distribution plot of residuals
residuals = y_val - y_pred
sns.histplot(residuals, kde=True)
plt.xlabel("Residuals")
plt.ylabel("Count")
plt.title("Distribution of Residuals")
plt.show()
import matplotlib.pyplot as plt
import numpy as np
# Set up data for bar plot
metrics = ["Accuracy", "Precision", "Recall", "F1 Score", "MAE", "MSE", "R2 Score"]
classification_values = [accuracy, precision, recall, f1, 0, 0, 0]
regression_values = [accuracy, 0, 0, 0, mae, mse, r2]
# Plot bar plot
bar_width = 0.35
fig, ax = plt.subplots(figsize=(16, 6)) # Set figure size
classification_bars = ax.bar(
metrics, classification_values, bar_width, label="Classification"
)
regression_bars = ax.bar(
[str(metric) + str(bar_width) for metric in metrics],
regression_values,
bar_width,
label="Regression",
)
# Add labels and title to plot
ax.set_xlabel("Metrics")
ax.set_ylabel("Values")
ax.set_title("Comparison of Model Performance")
ax.legend()
# Show plot
plt.show()
|
# First we call the libraries that we will work on
#
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# Read data files
df = pd.read_csv("../input/big-data-derby-2022/nyra_race_table.csv")
df1 = pd.read_csv("../input/big-data-derby-2022/nyra_start_table.csv")
df2 = pd.read_csv("../input/big-data-derby-2022/nyra_tracking_table.csv")
# We read the first rows
#
df.head()
df1.head()
df2.head()
# Review the number of columns and rows
df.shape
df1.shape
df2.shape
df.dtypes
df1.dtypes
df2.dtypes
# We review data quality
# Is there missing data?
df.isnull().sum()
df1.isnull().sum()
df2.isnull().sum()
# data information
df.info()
df1.info()
df2.info()
# data description
#
df.describe()
df1.describe()
df2.describe()
# Are there single values?
c = df.corr()
print(c)
sns.heatmap(c)
sns.heatmap(c, annot=True)
sns.pairplot(df)
c = df1.corr()
print(c)
sns.heatmap(c)
sns.heatmap(c, annot=True)
sns.pairplot(df1)
c = df2.corr()
print(c)
sns.heatmap(c)
sns.heatmap(c, annot=True)
sns.pairplot(df2)
df.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Importing Necesaary Library
import scipy.io
import math
import sys
import timeit
import copy
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import confusion_matrix
from skimage.transform import rotate
import scipy.fftpack as fft
from skimage.transform import rotate
from skimage.transform import radon, rescale, iradon
from skimage.metrics import peak_signal_noise_ratio as psnr
from skimage.metrics import structural_similarity as ssim
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Loading CT Scans
data = scipy.io.loadmat(
"/kaggle/input/ctscan-segmentation-reconstruction-dataset/ctscan_hw1.mat"
)
temp_ct_scans = data["ctscan"]
ct_scans = []
for i in range(temp_ct_scans.shape[2]):
ct_scans.append(temp_ct_scans[:, :, i])
ct_scans = np.array(ct_scans)
print(ct_scans.shape)
# Loading Infection Masks
data = scipy.io.loadmat(
"/kaggle/input/ctscan-segmentation-reconstruction-dataset/infmsk_hw1.mat"
)
infmask = data["infmsk"]
infection_masks = []
for i in range(infmask.shape[2]):
infection_masks.append(infmask[:, :, i])
infection_masks = np.array(infection_masks)
print(infection_masks.shape)
N = ct_scans.shape[0]
# # Part B, Functions
# Functions to correct predicted mask, i.e., correct background, infection and healthy region as expected
def find_counts(mask):
count_background = np.count_nonzero(mask == 0)
count_infection = np.count_nonzero(mask == 1)
count_healthy = np.count_nonzero(mask == 2)
return np.array([count_background, count_infection, count_healthy])
def check_pred_mask(pred_mask):
pred_count_list = find_counts(pred_mask)
original_count_list = np.array([100, 10, 50])
mapping = dict({})
for i in range(3):
pred_max_idx = np.argmax(pred_count_list)
original_max_idx = np.argmax(original_count_list)
mapping[pred_max_idx] = original_max_idx
pred_count_list[pred_max_idx] = -1
original_count_list[original_max_idx] = -1
corrected_mask = np.empty(shape=(512, 512), dtype=int)
for i in range(512):
for j in range(512):
corrected_mask[i][j] = mapping[pred_mask[i][j]]
return corrected_mask
# Function for printing two sample slices
def print_two_sample_slices(pred_masks, ct_scans):
plt.rcParams["figure.figsize"] = (12, 12)
i = 99
f, axarr = plt.subplots(1, 3)
axarr[0].set_title("Expert Annotations")
axarr[0].imshow(infection_masks[i], cmap="gray")
axarr[1].set_title("Predicted Masks")
axarr[1].imshow(pred_masks[i], cmap="gray")
axarr[2].set_title("Reconstructed CT Scans")
axarr[2].imshow(ct_scans[i], cmap="gray")
f.tight_layout()
plt.show()
i = 69
f, axarr = plt.subplots(1, 3)
axarr[0].set_title("Expert Annotations")
axarr[0].imshow(infection_masks[i], cmap="gray")
axarr[1].set_title("Predicted Masks")
axarr[1].imshow(pred_masks[i], cmap="gray")
axarr[2].set_title("CT Scans")
axarr[2].imshow(ct_scans[i], cmap="gray")
f.tight_layout()
plt.show()
# Using k-means for Image Segmentation
def get_predicted_mask(ct_scans):
start = timeit.default_timer()
pred_masks = []
N = len(ct_scans)
for i in range(N):
sys.stdout.write("\r" + "Processing Image " + str(i))
sample = ct_scans[i]
kmeans_obj = KMeans(n_clusters=3, random_state=0)
ct_scan_flattened = sample.flatten().reshape((512 * 512, 1))
clusters = kmeans_obj.fit_predict(ct_scan_flattened)
curr_pred_mask = clusters.reshape((512, 512))
curr_pred_mask = check_pred_mask(curr_pred_mask)
pred_masks.append(curr_pred_mask)
pred_masks = np.array(pred_masks)
stop = timeit.default_timer()
print("Time Taken = ", stop - start)
print_two_sample_slices(pred_masks, ct_scans)
return pred_masks
# Evaluating the model performance using several evaluation metrics
def get_confusion_metric(true_y, pred_y):
true_y = true_y.flatten()
pred_y = pred_y.flatten()
return confusion_matrix(true_y, pred_y, labels=[0, 1, 2])
def get_req_avg_eval_metrics(infection_masks, pred_masks):
avg_infection_sensitivity = 0
avg_infection_specificity = 0
avg_infection_accuracy = 0
avg_infection_dice_score = 0
avg_healthy_sensitivity = 0
avg_healthy_specificity = 0
avg_healthy_accuracy = 0
avg_healthy_dice_score = 0
count_infection_sensitivity = 0 # nan error
count_healthy_sensitivity = 0 # nan error
N = len(pred_masks)
for i in range(N):
curr_confusion_metric = (
get_confusion_metric(infection_masks[i], pred_masks[i])
).T
infection_TP = curr_confusion_metric[1][1]
infection_TN = (
curr_confusion_metric[0][0]
+ curr_confusion_metric[2][0]
+ curr_confusion_metric[0][2]
+ curr_confusion_metric[2][2]
)
infection_FP = curr_confusion_metric[1][0] + curr_confusion_metric[1][2]
infection_FN = curr_confusion_metric[0][1] + curr_confusion_metric[2][1]
healthy_TP = curr_confusion_metric[2][2]
healthy_TN = (
curr_confusion_metric[0][0]
+ curr_confusion_metric[0][1]
+ curr_confusion_metric[1][0]
+ curr_confusion_metric[1][1]
)
healthy_FP = curr_confusion_metric[2][0] + curr_confusion_metric[2][1]
healthy_FN = curr_confusion_metric[0][2] + curr_confusion_metric[1][2]
# Sensitivity = Recall = TP/(TP+FN)
# Preicision = TP/(TP+FP)
# Specificity = TN/(TN+FP)
# Dice Score = 2.TP / (2.TP + FP + FN)
infection_sensitivity = 0
if (infection_TP + infection_FN) != 0:
count_infection_sensitivity += 1
infection_sensitivity = (infection_TP) / (infection_TP + infection_FN)
infection_specificity = (infection_TN) / (infection_TN + infection_FP)
infection_accuracy = (infection_TP + infection_TN) / (
infection_TP + infection_TN + infection_FP + infection_FN
)
infection_dice_score = (2 * infection_TP) / (
2 * infection_TP + infection_FP + infection_FN
)
healthy_sensitivity = 0
if (healthy_TP + healthy_FN) != 0:
count_healthy_sensitivity += 1
healthy_sensitivity = (healthy_TP) / (healthy_TP + healthy_FN)
healthy_specificity = (healthy_TN) / (healthy_TN + healthy_FP)
healthy_accuracy = (healthy_TP + healthy_TN) / (
healthy_TP + healthy_TN + healthy_FP + healthy_FN
)
healthy_dice_score = (2 * healthy_TP) / (
2 * healthy_TP + healthy_FP + healthy_FN
)
avg_infection_sensitivity += infection_sensitivity
avg_infection_specificity += infection_specificity
avg_infection_accuracy += infection_accuracy
avg_infection_dice_score += infection_dice_score
avg_healthy_sensitivity += healthy_sensitivity
avg_healthy_specificity += healthy_specificity
avg_healthy_accuracy += healthy_accuracy
avg_healthy_dice_score += healthy_dice_score
avg_infection_sensitivity = avg_infection_sensitivity / count_infection_sensitivity
avg_infection_specificity = avg_infection_specificity / N
avg_infection_accuracy = avg_infection_accuracy / N
avg_infection_dice_score = avg_infection_dice_score / N
avg_healthy_sensitivity = 0
if count_healthy_sensitivity != 0:
avg_healthy_sensitivity = avg_healthy_sensitivity / count_healthy_sensitivity
avg_healthy_specificity = avg_healthy_specificity / N
avg_healthy_accuracy = avg_healthy_accuracy / N
avg_healthy_dice_score = avg_healthy_dice_score / N
return (
avg_infection_dice_score,
avg_infection_sensitivity,
avg_infection_specificity,
avg_infection_accuracy,
avg_healthy_dice_score,
avg_healthy_sensitivity,
avg_healthy_specificity,
avg_healthy_accuracy,
)
def find_eval_metrics(infection_masks, pred_masks):
(
inf_ds,
inf_sen,
inf_spec,
inf_acc,
hea_ds,
hea_sen,
hea_spec,
hea_acc,
) = get_req_avg_eval_metrics(infection_masks, pred_masks)
print("Average Dice Score for Infection: ", inf_ds)
print("Average Sensitivity for Infection: ", inf_sen)
print("Average Specificity for Infection: ", inf_spec)
print("Average Accuracy for Infection: ", inf_acc)
print()
print("Average Dice Score for Healthy: ", hea_ds)
print("Average Sensitivity for Healthy: ", hea_sen)
print("Average Specificity for Healthy: ", hea_spec)
print("Average Accuracy for Healthy: ", hea_acc)
# # Part C, Reconstruction
class reconstruction_sinogram:
def __init__(self, ct_scans):
self.ct_scans = ct_scans
self.sinograms = []
self.reconstructed_ct_scans = []
def get_sinogram(self, ct_scan):
return radon(ct_scan, circle=False, preserve_range=True)
def ct_scans_to_sinograms(self):
N = len(self.ct_scans)
print("CT Scans -> Sinogram")
for i in range(N):
sys.stdout.write("\r" + "Image No. " + str(i))
self.sinograms.append(self.get_sinogram(self.ct_scans[i]))
print()
def get_reconstructed_ct_scan(self, sinogram, angle):
sinogram = np.array([sinogram[:, i] for i in range(0, 180, angle)])
return iradon(sinogram.T, circle=False, preserve_range=True)
def sinogram_to_ct_scans(self, angle):
N = len(self.ct_scans)
print("Sinogram -> CT Scans")
for i in range(N):
sys.stdout.write("\r" + "Image No. " + str(i))
self.reconstructed_ct_scans.append(
self.get_reconstructed_ct_scan(self.sinograms[i], angle)
)
print()
def correct_reconstruction(self, temp_infection_masks):
N = len(self.reconstructed_ct_scans)
for i in range(N):
curr_inf_mask = copy.copy(temp_infection_masks[i])
curr_inf_mask[curr_inf_mask == 2] = 1
self.reconstructed_ct_scans[i] = np.multiply(
self.reconstructed_ct_scans[i], curr_inf_mask
)
# # 4x Limited Angle Sinogram
reconstruct_4x = reconstruction_sinogram(ct_scans)
reconstruct_4x.ct_scans_to_sinograms()
reconstruct_4x.sinogram_to_ct_scans(angle=4)
reconstruct_4x.correct_reconstruction(infection_masks)
# Finding PSNR and SSIM
N = len(ct_scans)
avg_psnr_4x = 0
avg_ssim_4x = 0
for i in range(N):
sys.stdout.write("\r" + "Image No. " + str(i))
avg_psnr_4x += psnr(ct_scans[i], reconstruct_4x.reconstructed_ct_scans[i])
avg_ssim_4x += ssim(ct_scans[i], reconstruct_4x.reconstructed_ct_scans[i])
avg_psnr_4x = avg_psnr_4x / N
avg_ssim_4x = avg_ssim_4x / N
print()
print("Average Peak Signal to Noise Ratio for 4x Reconstruction: ", avg_psnr_4x)
print("Average Structute Similarity Index Measure for 4x Reconstruction: ", avg_ssim_4x)
# 8x Limited Angle Sinogram
reconstruct_8x = reconstruction_sinogram(ct_scans)
reconstruct_8x.ct_scans_to_sinograms()
reconstruct_8x.sinogram_to_ct_scans(angle=8)
reconstruct_8x.correct_reconstruction(infection_masks)
# Finding PSNR and SSIM
N = len(ct_scans)
avg_psnr_8x = 0
avg_ssim_8x = 0
for i in range(N):
sys.stdout.write("\r" + "Image No. " + str(i))
avg_psnr_8x += psnr(ct_scans[i], reconstruct_8x.reconstructed_ct_scans[i])
avg_ssim_8x += ssim(ct_scans[i], reconstruct_8x.reconstructed_ct_scans[i])
avg_psnr_8x = avg_psnr_8x / N
avg_ssim_8x = avg_ssim_8x / N
print("Average Peak Signal to Noise Ratio for 8x Reconstruction: ", avg_psnr_8x)
print("Average Structute Similarity Index Measure for 8x Reconstruction: ", avg_ssim_8x)
# Two samples of reconstructed CT Scan Images, one for 4x and one for 8x
plt.rcParams["figure.figsize"] = (12, 12)
i = 19
f, axarr = plt.subplots(1, 3)
axarr[0].set_title("4x Reconstruction")
axarr[0].imshow(reconstruct_4x.reconstructed_ct_scans[i], cmap="gray")
axarr[1].set_title("8x Reconstruction")
axarr[1].imshow(reconstruct_8x.reconstructed_ct_scans[i], cmap="gray")
axarr[2].set_title("CT Scans")
axarr[2].imshow(ct_scans[i], cmap="gray")
f.tight_layout()
plt.show()
# Evaluating Segmentation on 4x and 8x Reconstruction
pred_masks_4x = get_predicted_mask(reconstruct_4x.reconstructed_ct_scans)
print("Evaluation Metrics for 4x Reconstruction")
find_eval_metrics(infection_masks, pred_masks_4x)
print()
print()
print()
pred_masks_8x = get_predicted_mask(reconstruct_8x.reconstructed_ct_scans)
print("Evaluation Metrics for 8x Reconstruction")
find_eval_metrics(infection_masks, pred_masks_8x)
|
# **Kernel description:**
# This kernel demonstrates the application of an [autoregressive model][1] to the problem of predicting avocado prices for various cities, states, and regions of the USA. This kernel was written as part of my university project on time series forecasting.
# The dataset used is the [updated version][upd_dataset] of the [avocado dataset][original_dataset]. Please note that due to having lots of fluctuations in the data and the need to take the lag time span of at least 1 year for an AR model, almost all time series (except for the `Total U.S.` data) are quite tough ones to make reasonably accurate predictions for. For this reason, the `Total U.S.` data was used for the demonstration purposes.
# There is not much information in this kernel but, still, please consider upvoting it if you liked it and/or got some insights from it!
# PS. The Table of Contents was generated using ToC2 extension for Jupyter Notebook.
# TODO:
# * add stationarity tests
#
# [1]: https://machinelearningmastery.com/autoregression-models-time-series-forecasting-python
# [upd_dataset]: https://www.kaggle.com/timmate/avocado-prices-2020
# [original_dataset]: https://www.kaggle.com/neuromusic/avocado-prices
# **Links**
# Interesting and insightful kernels featuring other ML and DL methods:
# * https://www.kaggle.com/shahules/avocado-apocalypse
# * https://www.kaggle.com/ladylittlebee/linreg-knn-svr-decisiontreerandomforest-timeseries
# * https://www.kaggle.com/biphili/butter-fruit-avocado-price-forecast
# * https://www.kaggle.com/dimitreoliveira/deep-learning-for-time-series-forecasting
# * https://www.kaggle.com/dimitreoliveira/time-series-forecasting-with-lstm-autoencoders/input
# Articles on autoregressive and ARIMA models:
# * https://towardsdatascience.com/machine-learning-part-19-time-series-and-autoregressive-integrated-moving-average-model-arima-c1005347b0d7
# * https://towardsdatascience.com/millennials-favorite-fruit-forecasting-avocado-prices-with-arima-models-5b46e4e0e914
# Table of Contents
# 1 Read the dataset2 Preprocess the data3 Get a subset of the data which will be used for model traning and making predictions4 Stationarize the subset5 Prepare the data from the subset for the model training6 Train and evaluate the AR model7 Plot the predictions and ground-truth data
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
# Set global parameters for plotting.
plt.rc("figure", figsize=(12, 6))
sns.set(font_scale=1.2)
import warnings
warnings.filterwarnings("ignore")
# ## Read the dataset
# Read the data from the dataset updated up to June 2020.
DATASET_PATH = "/kaggle/input/avocado-prices-2020/avocado-updated-2020.csv"
avocado_df = pd.read_csv(DATASET_PATH, parse_dates=["date"], index_col=["date"])
avocado_df
# ## Preprocess the data
# Select only the columns that we need to perform TSA and average price forecasting using an AR model.
columns_considered = ["average_price", "type", "geography"]
avocado_df = avocado_df[columns_considered]
avocado_df.head()
# Uncomment the lines below to print the number of entries for various cities, states, and regions.
# print('Number of entries for various cities and regions:')
# print()
# for geographical_name in avocado_df.geography.unique():
# num_entries = sum(avocado_df.geography == geographical_name)
# print(f'{geographical_name:25} {num_entries}')
# Plot the average price of conventional avocados in all regions over time (for each date, prices in all regions are plotted).
sub_df = avocado_df.query("type == 'conventional'")
plt.scatter(sub_df.index, sub_df.average_price, cmap="plasma")
plt.title(
"Average price of conventional avocados in all regions and " "cities over time"
)
plt.xlabel("Date")
plt.ylabel("Average price")
plt.show()
# ## Get a subset of the data which will be used for model traning and making predictions
def plot_rolling_stats(time_series, window, avocado_type, geography):
"""
A helper function for plotting the given time series, its rolling
mean and standard deviation.
"""
rolling_mean = time_series.rolling(window=window).mean()
rolling_std = time_series.rolling(window=window).std()
index = time_series.index
sns.lineplot(
x=index, y=time_series.average_price, label="data", color="cornflowerblue"
)
sns.lineplot(
x=index, y=rolling_mean.average_price, label="rolling mean", color="orange"
)
sns.lineplot(
x=index, y=rolling_std.average_price, label="rolling std", color="seagreen"
)
plt.title(f"Average price of {avocado_type} avocados in {geography}")
plt.xlabel("Date")
plt.ylabel("Average price")
# Choose a geography (i.e., a certain region, state, city, or the `Total U.S.` aggregated data) and an avocado type here. NB: `Total U.S.` contains the data which seems the most predictable in comparison to other geographical names of the U.S.
# NB: these two variables affect all the following calculations in that kernel.
AVOCADO_TYPE = "conventional"
GEOGRAPHY = "Total U.S."
sub_df = avocado_df.query(
f"type == '{AVOCADO_TYPE}' and " f"geography == '{GEOGRAPHY}'"
)
sub_df.drop(["type", "geography"], axis=1, inplace=True)
sub_df
# Resample the subset if needed (not really needed for the `Total U.S.` data). This leads to shrinking of the data, however, it might help to smoothen the data a bit and make it slighly easier to predict.
# sub_df = sub_df.resample('2W').mean().bfill()
# sub_df.dropna(axis=0, inplace=True)
# sub_df
# Plot the chosen subset (time series), its rolling mean and standard deviation.
plot_rolling_stats(sub_df, window=4, avocado_type=AVOCADO_TYPE, geography=GEOGRAPHY)
# ## Stationarize the subset
# Apply differencing of a given order (if needed).
# sub_df = sub_df.diff(periods=1)
# sub_df
# Differencing always results in at least one NaN value, so drop all NaNs appeared after the differencing.
# sub_df.dropna(axis=0, inplace=True)
# sub_df
# plot_rolling_stats(sub_df, window=4, avocado_type=AVOCADO_TYPE, region=REGION)
# ## Prepare the data from the subset for the model training
# Split the data into the training and test sets.
TEST_SET_SIZE = 45 # number of weeks left for the test set
data = sub_df.values
train_set, test_set = data[:-TEST_SET_SIZE], data[-TEST_SET_SIZE:]
print("shapes:", data.shape, train_set.shape, test_set.shape)
# Plot the training and test data.
train_set_size = len(data) - TEST_SET_SIZE
train_set_dates = sub_df.head(train_set_size).index # for plotting
test_set_dates = sub_df.tail(TEST_SET_SIZE).index
plt.plot(train_set_dates, train_set, color="cornflowerblue", label="train data")
plt.plot(test_set_dates, test_set, color="orange", label="test data")
plt.legend(loc="best")
plt.title(f"Average price of {AVOCADO_TYPE} avocados in {GEOGRAPHY}")
plt.xlabel("Date")
plt.ylabel("Average price")
plt.show()
# ## Train and evaluate the AR model
from statsmodels.tsa.ar_model import AutoReg
model = AutoReg(train_set, lags=52) # use time span of 1 year for lagging
trained_model = model.fit()
# print('Coefficients: %s' % trained_model.params)
# Get predictions and calculate an MSE and RMSE.
from sklearn.metrics import mean_squared_error as mse
predictions = trained_model.predict(
start=train_set_size, end=train_set_size + TEST_SET_SIZE - 1
)
error = mse(test_set, predictions)
print(f"test MSE: {error:.3}")
print(f"test RMSE: {error ** 0.5:.3}")
# ## Plot the predictions and ground-truth data
plt.plot(test_set_dates, predictions, color="orange", label="predicted")
plt.plot(
sub_df.index, sub_df.average_price, color="cornflowerblue", label="ground truth"
)
plt.legend(loc="best")
plt.title(f"Average price of {AVOCADO_TYPE} avocados in {GEOGRAPHY}")
plt.xlabel("Date")
plt.ylabel("Average price")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Library imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
classification_report,
confusion_matrix,
f1_score,
accuracy_score,
)
import warnings
warnings.simplefilter("ignore")
# ## Load the dataset and verify the dataload
strokedf = pd.read_csv(
"../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv"
)
# Check the initial rows
strokedf.head()
# Lets check the datastructure
strokedf.info()
# Check for missing values
strokedf.isna().sum()
# ### Missing value treatment for 'bmi' attribute
# The approach is to substitue mean of the bmi by the target variable - stroke
strokedf["bmi"] = strokedf["bmi"].fillna(
strokedf.groupby("stroke")["bmi"].transform("mean")
)
# Check whether imputations are done
strokedf.isna().sum()
# Another alternate approach
# strokedf["bmi"] = strokedf.groupby("stroke").transform(lambda x: x.fillna(x.mean()))
# ## Exploratory Data Analysis
# Explore the target variable
sns.countplot(strokedf["stroke"])
strokedf["stroke"].value_counts()
# #### exploration - gender, hypertension and heart_disease attributes
print("Gender by the target variable")
print(strokedf.groupby("stroke")["gender"].value_counts())
print("\n")
print("hypertension by the target variable")
print(strokedf.groupby("stroke")["hypertension"].value_counts())
print("\n")
print("heart_disease by the target variable")
print(strokedf.groupby("stroke")["heart_disease"].value_counts())
# Doing the visualizations
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(16, 6))
sns.countplot(x="gender", hue="stroke", data=strokedf, ax=ax1)
sns.countplot(x="hypertension", hue="stroke", data=strokedf, ax=ax2)
ax2.set_ylabel("")
sns.countplot(x="heart_disease", hue="stroke", data=strokedf, ax=ax3)
ax3.set_ylabel("")
plt.show()
# #### exploration Age attribute
# Explore Age attribute
print(strokedf.groupby("stroke")["age"].mean())
# Explore Age variable with respect to the stroke attribute
sns.catplot(x="stroke", y="age", kind="box", data=strokedf)
g = sns.FacetGrid(data=strokedf, col="stroke", height=5)
g.map(sns.distplot, "age")
plt.show()
# Explore gender and age with respect to stroke and establish any conclusion
sns.catplot(x="gender", y="age", hue="stroke", kind="box", data=strokedf)
# #### exploration - ever_married, work_type, Residence_type, smoking_status attributes
print("ever_married by the target variable")
print(strokedf.groupby("stroke")["ever_married"].value_counts())
print("\n")
print("work_type by the target variable")
print(strokedf.groupby("stroke")["work_type"].value_counts())
print("\n")
print("Residence_type by the target variable")
print(strokedf.groupby("stroke")["Residence_type"].value_counts())
print("\n")
print("smoking_status by the target variable")
print(strokedf.groupby("stroke")["smoking_status"].value_counts())
# Doing the visualizations
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(20, 6))
sns.countplot(x="ever_married", hue="stroke", data=strokedf, ax=ax1)
sns.countplot(x="work_type", hue="stroke", data=strokedf, ax=ax2)
ax2.set_ylabel("")
sns.countplot(x="Residence_type", hue="stroke", data=strokedf, ax=ax3)
ax3.set_ylabel("")
sns.countplot(x="smoking_status", hue="stroke", data=strokedf, ax=ax4)
ax4.set_ylabel("")
plt.show()
# #### exploration avg_glucose level
# Explore avg_glucose_level variable with respect to the stroke attribute
print(strokedf.groupby("stroke")["avg_glucose_level"].mean())
sns.catplot(x="stroke", y="avg_glucose_level", kind="box", data=strokedf)
g = sns.FacetGrid(data=strokedf, col="stroke", height=5)
g.map(sns.distplot, "avg_glucose_level")
plt.show()
# #### exploration bmi attribute
# Explore bmi variable with respect to the stroke attribute
print(strokedf.groupby("stroke")["bmi"].mean())
sns.catplot(x="stroke", y="bmi", kind="box", data=strokedf)
g = sns.FacetGrid(data=strokedf, col="stroke", height=5)
g.map(sns.distplot, "bmi")
plt.show()
# ## Modelling - Random Forest
# ### Peform Label Encoder Transformations
# Initialize the label encoder
label_encoder = LabelEncoder()
# Encode labels
strokedf["gender"] = label_encoder.fit_transform(strokedf["gender"])
strokedf["ever_married"] = label_encoder.fit_transform(strokedf["ever_married"])
strokedf["work_type"] = label_encoder.fit_transform(strokedf["work_type"])
strokedf["Residence_type"] = label_encoder.fit_transform(strokedf["Residence_type"])
strokedf["smoking_status"] = label_encoder.fit_transform(strokedf["smoking_status"])
# ### Perform Train , Test Split of the data
# Since proportion of the stroke data is less, we will perform a stratified sampling
features = strokedf.drop("stroke", axis=1)
target = strokedf["stroke"]
features_train, features_test, target_train, target_test = train_test_split(
features, target, test_size=0.3, random_state=101, stratify=target
)
# ### Building the Model
# Training the Random Forest model
from sklearn.ensemble import RandomForestClassifier
# we will build the random forest classifier both using entropy and gini index
rfc = RandomForestClassifier(n_estimators=100, criterion="entropy")
rfc.fit(features_train, target_train)
# ### Predictions and Evaluations
predictions = rfc.predict(features_test)
print("Confusion Matrix - Random Forest Using Gini Index\n")
print(confusion_matrix(target_test, predictions))
print("\n")
print("Classification Report \n")
print(classification_report(target_test, predictions))
print("\n")
print("Accuracy Score \n")
print(accuracy_score(target_test, predictions))
print("\n")
print("F1 Score \n")
print(f1_score(target_test, predictions))
|
# #**SAMPLE CODE**
# ---
# # **HAND GESTURE RECOGNITION FOR DYNAMIC GESTURES**
# ---
#
# ## Defining Hyper-Parameters
# ---
#
import keras
from tensorflow import keras
from keras.datasets import fashion_mnist
from keras.layers import Dense, Activation, Flatten, Conv2D, MaxPooling2D
from keras.models import Sequential
from tensorflow.keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
# from tensorflow_docs.vis import embed
from tensorflow import keras
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # for 3d plotting
import h5py
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
IMG_SIZE = 224
BATCH_SIZE = 30
EPOCHS = 10
MAX_SEQ_LENGTH = 30
test_df = pd.read_csv("../input/gesture-recognition/val.csv")
test_df.head()
train_df = pd.read_csv("../input/csvfiles/train.csv")
test_df = pd.read_csv("../input/csvfiles/val.csv", sep="\t")
print(f"Total videos for training: {len(train_df)}")
print(f"Total videos for testing: {len(test_df)}")
from sklearn.utils import shuffle
train_df = shuffle(train_df)
test_df = shuffle(test_df)
training_input_data = train_df
testing_input_data = test_df
# train_df.sample(10)
test_df.sample(10)
#
# ---
# One of the many challenges of training video classifiers is figuring out a way to feed the videos to a network. This blog post discusses five such methods. Since a video is an ordered sequence of frames, we could just extract the frames and put them in a 3D tensor. But the number of frames may differ from video to video which would prevent us from stacking them into batches (unless we use padding). As an alternative, we can save video frames at a fixed interval until a maximum frame count is reached. In this example we will do the following:
# 1. Capture the frames of a video.
# 2. Extract frames from the videos until a maximum frame count is reached.
# 3. In the case, where a video's frame count is lesser than the maximum frame count we will pad the video with zeros.
# Note that this workflow is identical to problems involving texts sequences. Videos of the UCF101 dataset is known to not contain extreme variations in objects and actions across frames. Because of this, it may be okay to only consider a few frames for the learning task. But this approach may not generalize well to other video classification problems. We will be using OpenCV's VideoCapture() method to read frames from videos.
# ---
#
# import the modules
import os
from os import listdir
import cv2
label_processor = keras.layers.StringLookup(
num_oov_indices=0, vocabulary=np.unique(train_df["Label"])
)
print(label_processor.get_vocabulary())
# take all classlabels from train_df column named 'tag' and store in labels
labels = train_df["Label"].values
# convert classlabels to label encoding
labels = label_processor(labels[..., None]).numpy()
# The following two methods are taken from this tutorial:
# https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub
def crop_center_square(frame):
y, x = frame.shape[0:2]
min_dim = min(y, x)
start_x = (x // 2) - (min_dim // 2)
start_y = (y // 2) - (min_dim // 2)
return frame[start_y : start_y + min_dim, start_x : start_x + min_dim]
def load_image(df, path, max_frames=0):
num_samples = len(df)
labels = df["Label"].values.tolist()
convert_tensor = transforms.ToTensor()
img = cv2.imread(path)
print(img.shape)
fig = plt.figure(figsize=(10, 7))
rows = 2
columns = 2
fig.add_subplot(rows, columns, 1)
plt.imshow(img)
plt.title("image1")
if img.shape[0] == 360:
cropped_image = img[0:350, 0:220]
fig.add_subplot(rows, columns, 2)
plt.imshow(cropped_image)
plt.title("image2")
else:
cropped_image = img[0:350:, 0:120]
fig.add_subplot(rows, columns, 2)
plt.imshow(cropped_image)
plt.title("image2")
frames = crop_center_square(img)
frames = cv2.resize(frames, (224, 224))
# img = Image.open(path)
# frames = convert_tensor(img)
return frames, labels
def load_gestures(df, path):
num_samples = len(df)
gesture_number = to_categorical(df["ImgNo"])
video_paths = df["Image"].values.tolist()
# print(gesture_number)
# print(num_samples)
return gesture_number, video_paths
"""
labels = df["Image"].values
labels = label_processor(labels[..., None]).numpy()
img = load_img(path)
frames = tf.keras.preprocessing.image.img_to_array(img)
return np.array([frames]) # Convert single image to a batch.
"""
train_gestures, train_videos = load_gestures(
train_df, "../input/gesture-recognition/train"
)
val_gestures, val_videos = load_gestures(test_df, "../input/gesture-recognition/val")
label_processor = keras.layers.StringLookup(
num_oov_indices=0, vocabulary=np.unique(train_df["Label"])
)
print(label_processor.get_vocabulary())
"""
import torch
from torchvision import transforms
from PIL import Image
t = torch.Tensor(train_data[0])
def tensor_to_image(tensor):
tensor = tensor*255
tensor = np.array(tensor, dtype=np.uint8)
for i in range(30):
if np.ndim(tensor[i])>3:
assert tensor[i].shape[0] == 1
tensor = tensor[i][0]
return PIL.Image.fromarray(tensor)
"""
# get the path/directory
folder_dir = "../input/gesture-recognition/train"
video_data_train = []
for vid in train_videos[:2]:
dir = folder_dir + f"/{vid}"
# print(dir)
frame = []
for images in os.listdir(dir):
# check if the image ends with png
if images.endswith(".png"):
frames, labels = load_image(train_df, dir + f"/{images}")
frame.append(frames)
break
video_data_train.append(frame)
train_data, train_labels = video_data_train, labels
"""
# get the path/directory
folder_dir = "../input/gesture-recognition/val"
video_data_test = []
for vid in val_videos[:1]:
dir = folder_dir + f'/{vid}'
frame = []
for images in os.listdir(folder_dir):
# check if the image ends with png
if (images.endswith(".png")):
frames = load_image(test_df, folder_dir + f'/{images}')
frame.append(frames)
video_data_test.append(frame)
"""
test_data, test_labels = video_data_test, labels
# print(val_videos[
import csv
train_data = np.array(train_data)
train_labels = np.array(train_labels)
test_data = np.array(test_data)
test_labels = np.array(test_labels)
# train_data each index: (20, 224, 224, 3)
train_data.shape
# train_data each index: (20, 224, 224, 3)
train_data.shape
train_labels.shape
test_data = np.array(test_data)
train_gestures[9]
# # **Extracting Hands from the Image**
type(train_data[0])
train_data[0].shape
from PIL import Image as im
tdi = np.reshape(train_data[0][0], (224, 224, 3))
print(tdi.shape)
# print(tdi)
train_data_image = im.fromarray(tdi)
print(type(train_data_image))
train_data_image.save("image1.png")
img = im.open("/kaggle/working/image1.png")
plt.figure()
img.show()
plt.show()
# Importing Libraries
import cv2
import mediapipe as mp
import time
# Used to convert protobuf message
# to a dictionary.
from google.protobuf.json_format import MessageToDict
from google.colab.patches import cv2_imshow
class handTracker:
def __init__(
self, mode=False, maxHands=2, detectionCon=0.5, modelComplexity=1, trackCon=0.5
):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.modelComplex = modelComplexity
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(
self.mode,
self.maxHands,
self.modelComplex,
self.detectionCon,
self.trackCon,
)
self.mpDraw = mp.solutions.drawing_utils
def handsFinder(self, image, draw=True):
imageRGB = image
# imageRGB = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imageRGB)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(
image, handLms, self.mpHands.HAND_CONNECTIONS
)
return image
def positionFinder(self, image, handNo=0, draw=True):
lmlist = []
if self.results.multi_hand_landmarks:
Hand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(Hand.landmark):
h, w, c = image.shape
cx, cy = int(lm.x * w), int(lm.y * h)
lmlist.append([id, cx, cy])
if draw:
cv2.circle(image, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
return lmlist
def main():
# cap = cv2.VideoCapture(0)
tracker = handTracker()
print("True")
while True:
# success,image = cap.read()
image = tracker.handsFinder(tdi)
lmList = tracker.positionFinder(image)
if len(lmList) != 0:
print(lmList[4])
cv2_imshow(image)
cv2.waitKey(1)
tracker = handTracker()
# print("True")
while True:
# success,image = cap.read()
image = tracker.handsFinder(tdi)
lmList = tracker.positionFinder(image)
if len(lmList) != 0:
print(lmList[4])
cv2_imshow(image)
cv2.waitKey(1)
# ## **Sequence model**
# ---
#
import numpy
with open("/kaggle/input/hand-gestures-npy/train/train.npy", "rb") as f1:
train_data = numpy.load(f1)
with open("/kaggle/input/hand-gestures-npy/test/test.npy", "rb") as f2:
test_data = numpy.load(f2)
with open("/kaggle/input/hand-gestures-npy/train_gestures.npy", "rb") as g1:
train_gestures = numpy.load(g1)
with open("/kaggle/input/hand-gestures-npy/test_gestures.npy", "rb") as g2:
test_gestures = numpy.load(g2)
train_data.shape
test_data.shape
train_gestures.shape
test_gestures.shape
import keras
from keras.models import Sequential
from tensorflow.keras import layers
from keras.layers import Dense, MaxPooling3D, Conv3D, Flatten, Dropout, LSTM
from tensorflow.keras.optimizers import Adadelta
train_data.shape
import tensorflow as tf
train_data = tf.stack(train_data)
from keras import callbacks
def build_model():
# 3D CNN
model = Sequential()
# Input Layer
# filters = Similar to number of Neurons, kernel_size = (3,3, 3), strides = (1,2,2), padding = 'same'(zero padding), activation = 'relu'
model.add(
Conv3D(
filters=16,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
padding="same",
activation="relu",
input_shape=(30, 224, 224, 3),
)
)
# MaxPooling
model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))
# Conv3D - II
model.add(
Conv3D(
filters=32,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
padding="same",
activation="relu",
)
)
# MaxPooling
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2)))
model.add(layers.BatchNormalization())
# Conv3D - III
model.add(
Conv3D(
filters=32,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
padding="same",
activation="relu",
)
)
# MaxPooling
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2)))
# model.add(layers.BatchNormalization())
# Conv3D - IV
model.add(
Conv3D(
filters=64,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
padding="same",
activation="relu",
)
)
# MaxPooling
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1)))
# Conv3D - V
# model.add(Conv3D(filters = 128, kernel_size = (3,3,3), strides = (1,2,2), padding = 'same', activation = 'relu'))
# Conv3D - VI
# model.add(Conv3D(filters = 128, kernel_size = (3,3,3), strides = (1,2,2), padding = 'same', activation = 'relu'))
# MaxPooling
# model.add(MaxPooling3D(pool_size=(2, 2, 2), padding="same"))
# print('after last max pooling: ', model.shape)
model.add(tf.keras.layers.Reshape((32, 12), input_shape=(None, 6, 1, 1, 64)))
# model.add(layers.BatchNormalization())
model.add(LSTM(1, input_shape=(4, 16, 16), return_sequences=True))
model.add(Dropout(0.25))
# Flatten Layer
model.add(Flatten())
# Fully Connected Layer
model.add(Dense(units=256, activation="relu"))
model.add(Dense(5, activation="softmax"))
# Optimizers
# optimizers = keras.optimizers.Adadelta(learning_rate = 0.01)
# from keras.optimizers import SGD
# opt = SGD(lr=0.01)
# Model Compiler
earlystopping = callbacks.EarlyStopping(
monitor="val_loss", mode="min", patience=4, restore_best_weights=True
)
# model.build(train_data)
model.compile(
loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"],
)
history = model.fit(
train_data,
train_gestures[:],
batch_size=64,
epochs=15,
verbose=1,
validation_data=(test_data, test_gestures),
callbacks=[earlystopping],
)
score = model.evaluate(test_data, test_gestures[:], verbose=0)
print(f"Test loss: {score[0]} / Test accuracy: {score[1]}")
return model, history, score
model, history, score = build_model()
import matplotlib.pyplot as plt
accuracy_train = history.history["accuracy"]
accuracy_val = history.history["val_accuracy"]
# print(history.history.keys())
epochs = range(1, 16)
plt.plot(epochs, accuracy_train, "g", label="Training accuracy")
plt.plot(epochs, accuracy_val, "b", label="validation accuracy")
plt.title("Training and Validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("accuracy")
plt.legend()
plt.show()
train_loss = history.history["loss"]
val_loss = history.history["val_loss"]
# print(history.history.keys())
epochs = range(1, 16)
plt.plot(epochs, train_loss, "g", label="Training loss")
plt.plot(epochs, val_loss, "b", label="validation loss")
plt.title("Training and Validation loss")
plt.xlabel("Epochs")
plt.ylabel("loss")
plt.legend()
plt.show()
from tensorflow.keras.models import load_model
import numpy as np
import cv2
import vlc
import time
flag = 0
class Capturing:
@staticmethod
def capture_images():
cam_port = 0
cam = cv2.VideoCapture(cam_port)
for i in range(30):
result, image = cam.read()
if result:
cv2.imshow("Taking image", image)
cv2.imwrite(str(i) + ".png", image)
else:
print("No image detected. Please! try again")
class ExecutingVlcCommands:
@staticmethod
def increase_volume():
value = int(interface.audio_get_volume() / 10) * 10
interface.audio_set_volume(value + 10)
@staticmethod
def decrease_volume():
value = int(interface.audio_get_volume() / 10) * 10
interface.audio_set_volume(value - 10)
@staticmethod
def increase_play_rate():
value = interface.get_rate()
interface.set_rate(value * 1.2)
@staticmethod
def decrease_play_rate():
value = interface.get_rate()
interface.set_rate(value / 1.2)
@staticmethod
def play_or_pause(flag):
if flag:
interface.set_pause(1)
flag = 1
time.sleep(0.5)
else:
interface.play()
interface.set_rate(1)
flag = 0
time.sleep(0.5)
def Execute(flag):
Capturing.capture_images() # Capturing images
# result = from the model
# if result == 1:
# ExecutingVlcCommands.increase_volume()
# elif result == 2:
# ExecutingVlcCommands.decrease_volume()
# elif result == 3:
# ExecutingVlcCommands.increase_play_rate()
# elif result == 4:
# ExecutingVlcCommands.decrease_play_rate()
# elif result == 5:
# ExecutingVlcCommands.play_or_pause(flag)
# else:
# pass
print(flag)
ExecutingVlcCommands.play_or_pause(flag)
Execute(flag)
if __name__ == "__main__":
file_name = input(
"the absolute path of the video"
+ "\n"
+ "For example: C:\\\\Users\\\\Sampath\\\\Desktop\\\\temp\\\\project\\\\movie.mkv:"
)
# #print(file_name)
# file_name=b"C:\\Users\\Sampath\\Desktop\\temp\\project\\movie.mkv"
# file_name=file_name.encode('utf-8')
interface = vlc.MediaPlayer()
playing = vlc.Media(file_name)
interface.set_media(playing)
interface.play()
interface.audio_set_volume(50)
# flag=0
Execute(flag)
# class Preprocessing():
# @staticmethod
# def load_image(df, path, max_frames=0):
# num_samples = len(df)
# labels = df["Label"].values.tolist()
# img = cv2.imread(path)
# improved_img = Preprocessing.improve_quality(img)
# frames = Preprocessing.crop_center_square(improved_img)
# #224,224 changed to 120,120
# frames = cv2.resize(frames, (120, 120))
# return frames, labels
# @staticmethod
# def improve_quality(img):
# brightness = 7
# contrast = 1
# contrasted_image = cv2.addWeighted(img, contrast, np.zeros(img.shape, img.dtype), 0, brightness)
# return contrasted_image
# @staticmethod
# def crop_center_square(frame):
# y, x = frame.shape[0:2]
# min_dim = min(y, x)
# start_x = (x // 2) - (min_dim // 2)
# start_y = (y // 2) - (min_dim // 2)
# return frame[start_y : start_y + min_dim, start_x : start_x + min_dim]
# if __name__=="__main__":
# folder_dir = "/kaggle/input/corrected-augmented-hg-data/val"
# test_data = []
# for vid in val_videos[:]:
# dir = folder_dir + f'/{vid}'
# frame = []
# list_dir = os.listdir(dir)
# list_dir.sort()
# for images in list_dir:
# if (images.endswith(".png")):
# frames, frame_labels = load_image(test_df, dir + f'/{images}')
# frame.append(frames)
# test_data.append(frame)
# test_data = np.array(test_data)
# new_model = load_model("C:/Users/Sampath/Desktop/temp/project/trail.hd5")
# new_model.summary()
# new_model.predict(test_data)
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True,
)
train_data.shape
model = build_model()
history = model.fit(
train_data,
train_gestures[:100],
validation_split=0.3,
epochs=7,
)
# My model
class Conv3DModel(tf.keras.Model):
def __init__(self):
super(Conv3DModel, self).__init__()
# Convolutions
self.conv1 = tf.compat.v2.keras.layers.Conv3D(
32, (3, 3, 3), activation="relu", name="conv1", data_format="channels_last"
)
self.pool1 = tf.keras.layers.MaxPool3D(
pool_size=(2, 2, 2), data_format="channels_last"
)
self.conv2 = tf.compat.v2.keras.layers.Conv3D(
64, (3, 3, 3), activation="relu", name="conv1", data_format="channels_last"
)
self.pool2 = tf.keras.layers.MaxPool3D(
pool_size=(2, 2, 2), data_format="channels_last"
)
# LSTM & Flatten
self.convLSTM = tf.keras.layers.ConvLSTM2D(40, (3, 3))
self.flatten = tf.keras.layers.Flatten(name="flatten")
# Dense layers
self.d1 = tf.keras.layers.Dense(128, activation="relu", name="d1")
self.out = tf.keras.layers.Dense(4, activation="softmax", name="output")
def call(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.convLSTM(x)
# x = self.pool2(x)
# x = self.conv3(x)
# x = self.pool3(x)
x = self.flatten(x)
x = self.d1(x)
return self.out(x)
model = Conv3DModel()
# choose the loss and optimizer methods
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"],
)
history = model.fit(
train_data,
train_gestures,
validation_split=0.3,
epochs=7,
)
model.summary()
img_tensor = (30, 224, 224, 3)
def defineModel(img_tensor):
inputShape = (img_tensor[0], img_tensor[1], img_tensor[2], img_tensor[3])
model = Sequential(
[
Conv3D(16, tuple([5] * 3), activation="relu", input_shape=inputShape),
MaxPooling3D(tuple([2] * 3), padding="same"),
Conv3D(32, tuple([3] * 3), activation="relu"),
MaxPooling3D(pool_size=(1, 2, 2), padding="same"),
Conv3D(64, tuple([3] * 3), activation="relu"),
MaxPooling3D(pool_size=(1, 2, 2), padding="same"),
Flatten(),
Dense(128, activation="relu"),
Dropout(0.25),
Dense(64, activation="relu"),
Dropout(0.25),
Dense(5, activation="softmax"),
]
)
model.compile(
optimizer=Adam(lr=0.01),
loss="categorical_crossentropy",
metrics=["categorical_accuracy"],
)
return model
model = defineModel(img_tensor)
model.summary()
history = model.fit(
train_data,
train_gestures[:50],
validation_split=0.3,
epochs=1,
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
labeled = pd.read_csv(
"/kaggle/input/uta-datathon-2023-text-classification/checkworthy_labeled.csv"
)
eval = pd.read_csv(
"/kaggle/input/uta-datathon-2023-text-classification/checkworthy_eval.csv"
)
eval.head()
from sklearn.feature_extraction.text import (
CountVectorizer,
TfidfVectorizer,
HashingVectorizer,
TfidfTransformer,
)
from sklearn.linear_model import LogisticRegressionCV, SGDClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score
import matplotlib.pyplot as plt
import random
import warnings
warnings.filterwarnings("ignore")
train_df, test_df = train_test_split(labeled, test_size=0.1, shuffle=True)
train_df, val_df = train_test_split(train_df, test_size=0.2, shuffle=True)
vectorizer = CountVectorizer(stop_words="english", lowercase=True, analyzer="word")
X_train = vectorizer.fit_transform(train_df["Text"])
y_train = train_df["Category"]
X_val = vectorizer.transform(val_df["Text"])
y_val, y_test = val_df["Category"], test_df["Category"]
X_test = vectorizer.transform(test_df["Text"])
train = vectorizer.transform(labeled["Text"])
output = labeled["Category"]
Log_reg = LogisticRegressionCV(max_iter=100)
Log_reg.fit(X_train, y_train)
y_pred = Log_reg.predict(X_val)
print(accuracy_score(y_pred, y_val))
y_pred = Log_reg.predict(X_test)
print(accuracy_score(y_pred, y_test))
def factcheck_worthy(sentence):
X = vectorizer.transform([sentence])
category = Log_reg.predict_proba(X)[:, 1][0]
return category
arr = []
best_count = 0
for j in range(370, 400, 1):
j = j / 1000
print(j)
count = 0
for i in range(len(labeled)):
predict = "No" if factcheck_worthy(labeled["Text"][i]) < j else "Yes"
if predict == labeled["Category"][i]:
count += 1
arr.append(count)
print(count)
best_count = max(count, best_count)
plt.plot(arr)
plt.show()
print(best_count)
# print(random.getstate())
state = random.getstate()
eval["Category"] = None
for i in range(len(eval)):
eval["Category"][i] = 0 if factcheck_worthy(eval["Text"][i]) < 0.4 else 1
display(eval["Category"].value_counts())
eval["Category"] = eval["Category"].map({0: "No", 1: "Yes"})
eval[["Id", "Category"]].to_csv("1.csv", index=False)
eval["Category"] = None
for i in range(len(eval)):
eval["Category"][i] = 0 if factcheck_worthy(eval["Text"][i]) < 0.4 else 1
display(eval["Category"].value_counts())
eval["Category"] = eval["Category"].map({0: "No", 1: "Yes"})
eval[["Id", "Category"]].to_csv("1.csv", index=False)
from sklearn.feature_extraction.text import (
CountVectorizer,
TfidfVectorizer,
HashingVectorizer,
TfidfTransformer,
)
from sklearn.linear_model import LogisticRegressionCV, SGDClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score
import matplotlib.pyplot as plt
import random
import warnings
warnings.filterwarnings("ignore")
val_score, test_score = 0, 0
def run():
train_df, test_df = train_test_split(labeled, test_size=0.2, shuffle=True)
train_df, val_df = train_test_split(train_df, test_size=0.2, shuffle=True)
vectorizer = CountVectorizer(stop_words="english", lowercase=True, analyzer="word")
X_train = vectorizer.fit_transform(train_df["Text"])
y_train = train_df["Category"]
X_val = vectorizer.transform(val_df["Text"])
y_val, y_test = val_df["Category"], test_df["Category"]
X_test = vectorizer.transform(test_df["Text"])
train = vectorizer.transform(labeled["Text"])
output = labeled["Category"]
Log_reg = LogisticRegressionCV(max_iter=10)
Log_reg.fit(X_train, y_train)
y_pred = Log_reg.predict(X_val)
print(accuracy_score(y_pred, y_val))
val_score = accuracy_score(y_pred, y_val)
y_pred = Log_reg.predict(X_test)
print(accuracy_score(y_pred, y_test))
test_score = accuracy_score(y_pred, y_test)
return val_score, test_score
def factcheck_worthy(sentence):
X = vectorizer.transform([sentence])
category = Log_reg.predict_proba(X)[:, 1][0]
return category
"""
arr = []
best_count = 0
for j in range(370, 400, 1):
j = j/1000
print(j)
count = 0
for i in range(len(labeled)):
predict = 'No' if factcheck_worthy(labeled['Text'][i])<j else 'Yes'
if predict==labeled['Category'][i]:
count+=1
arr.append(count)
print(count)
best_count = max(count, best_count)
plt.plot(arr)
plt.show()
print(best_count)
#print(random.getstate())
state = random.getstate()
"""
while test_score < 0.84 and val_score < 0.84:
val_score,
test_score = run()
print(test_score, val_score, "wekfjbwf")
|
# Import des Moduls "Panda" mit dem Alias "pd"
import pandas as pd
import pandas as pd
data = {"Kandidat 1": 17, "Kandidat 2": 19, "Kandidat 3": 12}
punkte = pd.Series(data)
print(punkte)
# ### AUFGABE:
# Erstellen Sie eine Serie, die wie folgt aussieht (die linke Spalte ist der Index):
# Punkte Kandidat 1
# 17
# Punkte Kandidat 2
# 19
# Punkte Kandidat 3
# 12
# Erstellen eines Dataframes mit "Integer"-Einträgen
pd.DataFrame({"Yes": [50, 21], "No": [131, 2]})
# Erstellen eines Dataframes mit "String"-Einträgen
import pandas as pd
pd.DataFrame(
{"Bob": ["I liked it.", "It was awful."], "Sue": ["Pretty good.", "Bland."]}
)
# Erstellen eines Dataframes mit einem Labelindex für die Zeilen
pd.DataFrame(
{"Bob": ["I liked it.", "It was awful."], "Sue": ["Pretty good.", "Bland."]},
index=["Product A", "Product B"],
)
# ### AUFGABE:
# Erstellen Sie ein Dataframe, das wie folgt aussieht:
# Die linke Spalte und die obere Spalte sind jeweils ein Labelindex.
# Die Werte der mittleren Spalte haben das Format "String".
# Die Werte der rechten Spalte haben das Format "Integer".
#
# KanzlerIn
# Dauer in Jahren
# 1949–1963
# Konrad Adenauer
# 14
# 1963–1966
# Ludwig Erhard
# 3
# 1969–1974
# Willy Brandt
# 5
#
# 2005–2021
# Angela Merkel
# 16
#
#
import pandas as pd
data = {
"KanzlerIn": ["1949–1963", "1963–1966", "1969–1974", "2005–2021"],
"Name": ["Konrad Adenauer", "Ludwig Erhard", "Willy Brandt", "Angela Merkel"],
"Dauer in Jahren": [14, 3, 5, 16],
}
df = pd.DataFrame(data=data, columns=["KanzlerIn", "Name", "Dauer in Jahren"])
df.set_index("KanzlerIn", inplace=True)
df.index.name = None
df.columns.name = None
display(df)
# Einlesen einer csv-Datei aus Kaggle
df = pd.read_csv("/kaggle/input/customers-dataset/Customers.csv", index_col=0)
# Einlesen einer csv-Datei aus einer anderen Quelle: Laden Sie die Datei auf Ihre Festplatte. Klicken Sie dann oben links auf "File" und dann auf "Upload Data".
# Ausmaß des Datensatz bestimmen (Anzahl Beobachtungen, Anzahl Variablen)
df.shape
# Zeige die obersten 5 Zeilen des Datensatzes
df.head(5)
# ### AUFGABE:
# Erstellen Sie einen Kaggle-Account und schicken Sie mir Ihren Nickname, damit ich meinen Code für Sie freigeben kann.
# Suchen Sie einen CSV-Datensatz zu einem Thema, das Sie interessant finden. Der Datensatz sollte eine mögliche metrische abhängige Variable und eine mögliche unabhängige Variable beinhalten.
# Laden Sie den Datensatz in dieses Python Notebook.
# Führen Sie eine erste sehr kurze Zusammenhangsanalyse mit Hilfe des df.groupby Befehls oder mit Hilfe des df.corr Befehls durch. Schreiben Sie Ihre Interpretation in den Code (als #Kommentar oder als Markdown-Zelle).
# Teilen Sie dieses Python Nootebook mit mir. Klicken Sie dazu oben rechts auf "File" und dann auf "Share".
# Eine mögliche metrisch zu erklärende Variable ist die Geldsumme, die ein Fußballspieler erhalten hat. Eine mögliche erklärende Variable ist der Name des Fußballspielers.
# Glassdoor- Analyze Gender Pay Gap | Kaggle
# Zu erklärende Variable: Gehalt
# Erklärende Variable: Geschlecht
# Die metrische Variable ist: die tägliche Bildschirmzeit
# Die erklärende Variable ist: tägliches Yoga
# https://www.kaggle.com/datasets/thedevastator/how-does-daily-yoga-impact-screen-time-habits?resource=download
df = pd.read_csv("/kaggle/input/customers-dataset/Customers.csv", index_col=0)
df = pd.read_csv(
"/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv", index_col=0
)
df.shape
df.head(5)
import pandas as pd
category_sales = df.groupby("Product line")["Quantity"].mean()
print(category_sales)
# Interpretation:
# Ich habe den Supermarkt-Verkaufsdatensatz nach Produktkategorien gruppiert und daraufhin die durchschnittliche Verkaufsmenge für jede Kategorie berechnen lassen. Die Kategorie "Health and Beauty" weist im Durchschnitt die höchste Verkaufsmenge auf, während die Kategorie "Home and Lifestyle" die durchschnittlich niedrigste Verkausfsmenge hat. Hieraus würde ich interpretieren, dass Kunden im Supermarkt eher Produkte kaufen, die mit Gesundheit und Schönheit zu tun haben, während Produkte, die mit Haus und Lebenssitl zu tun haben, weniger beliebt sind.
# Eine mögliche metrisch abhängige Variable in diesem Datensatz könnte der Gesamtumsatz (Total) sein.
# Eine mögliche unabhängige Variable könnte die Anzahl der verkauften Produkte (Quantity) sein.
# Hier der Link zum Datensatz: https://www.kaggle.com/datasets/aungpyaeap/supermarket-sales
#
corr = df["Total"].corr(df["Quantity"])
print("Korrelationskoeffizient: {:.2f}".format(corr))
|
# # Introduction
# A dataset is a collection of data that has been collected and organized in a specific way to be used for analysis, research, or training purposes. In the context of machine learning, a dataset is a collection of data that is used to train an algorithm or model. A dataset can be composed of various types of data, such as images, audio, text, or numerical data.
# In this particular case, the TESS dataset is a collection of audio recordings of two female actresses speaking 200 target words in a carrier phrase, portraying seven different emotions. The dataset is organized in folders containing audio files of each actress's emotions, and each audio file is in WAV format. The TESS dataset is of high quality audio and is unique because it is female-only, providing a balanced representation for training an emotion classifier.
# # Importing libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import IPython as ipd
import librosa
import librosa.display
from IPython.display import Audio
import os
plt.rcParams["figure.figsize"] = (12, 6)
plt.style.use("fivethirtyeight")
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.models import Sequential, Model
from keras.layers import (
Dense,
Dropout,
Flatten,
Conv2D,
MaxPooling2D,
LSTM,
Bidirectional,
Input,
concatenate,
)
from keras.optimizers import Adam
from sklearn.metrics import classification_report
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from keras.layers import Conv1D, MaxPooling1D, Dense, Dropout, LSTM
# # Load the dataset
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
paths = []
labels = []
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
paths.append(os.path.join(dirname, filename))
label = filename.split("_")[-1]
label = label.split(".")[0]
labels.append(label.lower())
if len(paths) == 2800:
break
print("Dataset is Loaded")
## Create a dataframe
df = pd.DataFrame()
df["speech"] = paths
df["label"] = labels
df.head().style.set_properties(
**{"background-color": "yellow", "color": "black", "border-color": "#8b8c8c"}
)
df["label"].value_counts()
def waveshow(data, sr, emotion):
plt.figure(figsize=(10, 4))
plt.title(emotion, size=20)
librosa.display.waveshow(data, sr=sr)
plt.show()
def spectogram(data, sr, emotion):
x = librosa.stft(data)
xdb = librosa.amplitude_to_db(abs(x))
plt.figure(figsize=(11, 4))
plt.title(emotion, size=20)
librosa.display.specshow(xdb, sr=sr, x_axis="time", y_axis="hz")
plt.colorbar()
# Select an emotion
emotion = "happy"
# Load the audio file
path = np.array(df["speech"][df["label"] == emotion])[0]
data, sampling_rate = librosa.load(path)
# Display the waveform and spectrogram
waveshow(data, sampling_rate, emotion)
spectogram(data, sampling_rate, emotion)
# Play the audio file
Audio(path)
# Select an emotion
emotion = "angry"
# Load the audio file
path = np.array(df["speech"][df["label"] == emotion])[0]
data, sampling_rate = librosa.load(path)
# Display the waveform and spectrogram
waveshow(data, sampling_rate, emotion)
spectogram(data, sampling_rate, emotion)
# Play the audio file
Audio(path)
# Select an emotion
emotion = "fear"
# Load the audio file
path = np.array(df["speech"][df["label"] == emotion])[0]
data, sampling_rate = librosa.load(path)
# Display the waveform and spectrogram
waveshow(data, sampling_rate, emotion)
spectogram(data, sampling_rate, emotion)
# Play the audio file
Audio(path)
# Select an emotion
emotion = "sad"
# Load the audio file
path = np.array(df["speech"][df["label"] == emotion])[0]
data, sampling_rate = librosa.load(path)
# Display the waveform and spectrogram
waveshow(data, sampling_rate, emotion)
spectogram(data, sampling_rate, emotion)
# Play the audio file
Audio(path)
# # Feature Extraction
# One of the most popular feature extraction techniques used in speech emotion recognition is Mel Frequency Cepstral Coefficients (MFCCs). Here's an explanation of MFCCs and an implementation using the TESS dataset:
# Mel Frequency Cepstral Coefficients (MFCCs)
# MFCCs are commonly used for speech and audio signal processing. They represent a compressed representation of the spectral envelope of a signal, which is useful for speech emotion recognition. Here's a brief overview of the steps involved in computing MFCCs:
# 1. Pre-emphasis: amplifies higher frequencies and reduces lower frequencies to compensate for energy lost in high-pass filtering
# 2. Framing: the signal is divided into short, overlapping frames
# 3. Windowing: each frame is multiplied by a window function to reduce spectral leakage
# 4. Fourier Transform: a Fast Fourier Transform (FFT) is applied to each frame to obtain its frequency spectrum
# 5. Mel Filterbank: a set of overlapping triangular filters is applied to the frequency spectrum to transform it onto the mel scale
# 6. Logarithm: the logarithm of the filterbank energies is taken
# 7. Discrete Cosine Transform (DCT): the DCT is applied to the log filterbank energies to obtain the MFCCs
# Here's an implementation of MFCCs using the TESS dataset:
# compute MFCCs for each audio file
def extract_mfcc(filename):
y, sr = librosa.load(filename, duration=3, offset=0.5)
mfcc = np.mean(librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40).T, axis=0)
return mfcc
extract_mfcc(df["speech"][0])
X_mfcc = df["speech"].apply(lambda X: extract_mfcc(X))
X_mfcc
X = [x for x in X_mfcc]
X = np.array(X)
X.shape
X = np.expand_dims(X, -1)
X.shape
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.models import Sequential, Model
from keras.layers import (
Dense,
Dropout,
Flatten,
Conv2D,
MaxPooling2D,
LSTM,
Bidirectional,
Input,
concatenate,
)
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from sklearn.metrics import classification_report
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from keras.layers import Conv1D, MaxPooling1D, Dense, Dropout, LSTM
enc = OneHotEncoder()
y = enc.fit_transform(df[["label"]])
y.toarray()
y.shape
# Define CNN model
model = Sequential(
[
Conv1D(filters=32, kernel_size=3, activation="relu", input_shape=(40, 1)),
MaxPooling1D(pool_size=2),
Conv1D(filters=64, kernel_size=3, activation="relu"),
MaxPooling1D(pool_size=2),
Flatten(),
Dense(128, activation="relu"),
Dropout(0.2),
Dense(64, activation="relu"),
Dropout(0.2),
Dense(7, activation="softmax"),
]
)
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
# Define LSTM model
model = Sequential(
[
LSTM(256, return_sequences=False, input_shape=(40, 1)),
Dropout(0.2),
Dense(128, activation="relu"),
Dropout(0.2),
Dense(64, activation="relu"),
Dropout(0.2),
Dense(7, activation="softmax"),
]
)
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
from keras.layers import SimpleRNN
# Define RNN model
model = Sequential(
[
SimpleRNN(256, return_sequences=False, input_shape=(40, 1)),
Dropout(0.2),
Dense(128, activation="relu"),
Dropout(0.2),
Dense(64, activation="relu"),
Dropout(0.2),
Dense(7, activation="softmax"),
]
)
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
# Define hybrid model
model = Sequential()
# Add a 1D convolutional layer
model.add(Conv1D(filters=64, kernel_size=3, activation="relu", input_shape=(40, 1)))
# Add a max pooling layer
model.add(MaxPooling1D(pool_size=2))
# Add a dropout layer
model.add(Dropout(0.2))
# Add an LSTM layer
model.add(LSTM(128, return_sequences=False))
# Add a dense output layer with softmax activation
model.add(Dense(7, activation="softmax"))
# Compile the model
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# Print the model summary
model.summary()
|
# ## Import the required libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats.mstats import normaltest
from scipy.stats import chi2_contingency
from scipy import stats
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz, plot_tree
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.metrics import make_scorer, recall_score
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.inspection import permutation_importance
import time
from sklearn import tree
from imblearn.over_sampling import RandomOverSampler, SMOTE
import lime.lime_tabular
from sklearn.ensemble import AdaBoostClassifier
from xgboost import XGBClassifier
from sklearn.metrics import (
classification_report,
accuracy_score,
precision_recall_fscore_support,
confusion_matrix,
precision_score,
recall_score,
roc_auc_score,
)
import warnings
warnings.filterwarnings("ignore")
rs = 12
# ## Importing the Dataset
# reading the data
df = pd.read_csv("dataset.csv")
# Visualizing the table
df.head()
# # 1. Description of the Data
# The Data is taken from Kaggle.com. Prediction of Cerebral stroke is a very important study becasue it can save life of people who are at risk of cerebral stroke by looking into different attributes like somking_status,hypertension.
# Below the name of attributes along with the datatype is shown.
# Information of the dataset
df.info()
# id is a unique number number given to different cases.
# Let's look into classes and distribution of all Categorical Variables.
# distribution of categorical variables
mask_categorical = df.dtypes != np.float
category_cols = df.columns[mask_categorical]
category_cols = category_cols.tolist()
category_cols.remove("id")
for col in category_cols:
fig, ax = plt.subplots(figsize=(10, 8))
plt1 = sns.countplot(data=df, x=col, order=df[col].value_counts().index)
plt1.set(xlabel=col, ylabel="Count")
plt.show()
plt.tight_layout()
# From the above plots we can see the all the categorical variable along with classes and distirbution of that categorical variable.
# Gender have 3 classes Male,Female,Others.
# Hypertension have 2 classes 0 and 1. 0 means no and 1 means yes.
# heart_disease have 2 classes 0 and 1. 0 means no and 1 means yes.
# ever_married have two classes Yes and No.
# work_type have s 5 classes chidren, Private,Never-worked,Self_employed,Govt_job.
# Residence_type have 2 classes Rural and Urban.
# Smoking Status have 3 classes never smoked, formaly smoked, smokes.
# Stroke have 2 classes 0 an 1 where 0 means not having a stroke and 1 means yes having a stroke.It is our target variable as welL and we can see that this is an imablanced dataset. So.we need to take nessecary step to accurately model the problem.
# Let's look in to the distribution of continuous variable.
# distribution of continuous variables
mask_numeric = df.dtypes == np.float
numeric_cols = df.columns[mask_numeric]
numeric_cols = numeric_cols.tolist()
for col in numeric_cols:
sns.distplot(df[col], color="green", hist=True)
plt.show()
# From the above plot we can see that 'avg_glucose_level' and 'bmi' have right skewed distribution. We might like to fix it the later stage.
# # 2. Objectives
# Our goal will be to accurately predict the likelihood of a patient experiencing a cerebral stroke. Additionally, we will aim to identify the key contributing factors that can initiate a cerebral stroke. This information will help doctors evaluate the risk of cerebral stroke in their patients and take necessary preventive measures.
# 1. At first, we will do data cleaning in form of finding and handling null values in rows.
# 2. Looking into the distribution of the continuous attributes and if they are not normally distributed then dealing with it.
# 3. Doing some Statistical analysis on the data do get some inference.
# 4. Convertical categorical variable of object data type to int type.
# 5. Scaling the continuous variable using StandardScaler
# 6. Fitting simple classification models to the data using Class Reweighting.
# 7. Fitting progressive complex models to get good accuracy and getting model explanation of the complex models.
# # 3.Data cleaning and Feature Engineering
# The first step of this process is to find out the duplicates and removing them from the dataset. From this operation ID attribute is used to find out the duplicates.
# Check for duplicate data
duplicate = df[df.duplicated(["id"])]
duplicate
# As we can see that there is no duplicate rows in the dataset.
# Now.Lets check whether there is any columns with null value.
df.isnull().sum()
# From the above table it can be seen that there is null values in 'bmi' and 'smoking_status'.
# We will remove the rows where 'smoking_status' is not known as this variable has lots of null values. So, if we try to replace the null value by some class then it can change the distribution drastically and from the below plot we can see that most of those rows where 'smoking_status' is not known belongs to class 0 of 'stroke' attributes so this rows will not have much impact on our goal as our goal is too correctly predict class 1 of 'stroke'.
smoke_null = df[df["smoking_status"].isnull()]
fig, ax = plt.subplots(figsize=(10, 8))
plt1 = sns.countplot(
data=smoke_null, x="stroke", order=smoke_null["stroke"].value_counts().index
)
plt1.set(xlabel="stroke", ylabel="Count")
plt.show()
plt.tight_layout()
# Removing rows where 'smoking_status' is null
df = df[df["smoking_status"].notnull()]
# We need to deal with the null value of the 'bmi'. But before that let's make a copy of the current dataframe.
# Let's make a copy of the dataframe.
X = df.copy()
X.drop("id", axis=1, inplace=True)
# We have already noticed the skewness of the variable 'bmi' and 'avg_glucose_level'. We need to fix this or it can cause problem for some of our ML models.
# Let's do normaltest on 'bmi'.
normaltest(X.bmi.dropna().values)
# The above result confirm our assumption of distribution of 'bmi' not being normal.
# We will try to fix the skew of the 'bmi' variable in the next step by transforming the distribution of the variable.
from sklearn.preprocessing import QuantileTransformer
qt_bmi = QuantileTransformer(
n_quantiles=500, output_distribution="normal", random_state=0
)
X["bmi"] = qt_bmi.fit_transform(np.array(X["bmi"]).reshape(-1, 1))
sns.kdeplot(X["bmi"], shade=True)
plt.title("bmi")
plt.show()
# Let's do normaltest on the transformed 'bmi'.
normaltest(X.bmi.dropna().values)
# From the statistic above and also from the plot we can say that we have sucessfully fixed the skew of the 'bmi'.
# Let's do normal test on 'avg_glucose_level'.
normaltest(X.avg_glucose_level.dropna().values)
# The Statistics above suggests that 'avg_glucose_level' is not normally distributed whcih can be seen from the previous plotted distribution of 'avg_glucose_level'.
# In the next step we will try to fix the skew of 'avg_glucose_level' by transforming the distribution of the variable.
qt_avg_glucose_level = QuantileTransformer(
n_quantiles=500, output_distribution="normal", random_state=0
)
X["avg_glucose_level"] = qt_bmi.fit_transform(
np.array(X["avg_glucose_level"]).reshape(-1, 1)
)
sns.kdeplot(X["avg_glucose_level"], shade=True)
plt.title("avg_glucose_level")
plt.show()
# Let's do normal test on the transformed 'avg_glucose_level'.
normaltest(X.avg_glucose_level.dropna().values)
# From the statistics above we can say that now the 'avg_glucose_level' is normally distributed.
# Let's do normaltest on the variable 'age'.
normaltest(X.age.values)
# We can see that the 'age' variable is not normal. But in this case we are not going to apply transformation to 'age'. Rather we will look into the skew of the data and if the skew is low then we will leave the distribution as it is.
X.age.skew()
# We can see that the skew is low. So, we will leave the distribution as it is.
# Now we will try to find out the relation of different variable with stroke.
# We will start with continuous variables.
# In the next step we will plot the distribution of continous variables for every class of 'stroke' which is 0 and 1.
for col in numeric_cols:
sns.boxplot(x="stroke", y=col, data=X)
plt.title(col)
plt.show()
# From the above distribution we can say for sure that 'age' and 'avg_glucose_level' has strong relation with 'stroke'.
# Let's do one statistical test to prove one of our claim.
# Let's propose a hypothesis.
# Null hypothesis: Person having Cerebral Stroke doesn't depend on the avg_glucose_level of the person.
# Alternate Hypothesis: Person having Cerebral Stroke does depend on the avg_glucose_level of the person.
# We set the significance level for p-value to be 0.05.
# Let's do another visual inspection first.
Y_Stroke = X.loc[X.stroke == 1]
N_Stroke = X.loc[X.stroke == 0]
Y_Stroke_avg_glucose_level = Y_Stroke.avg_glucose_level
N_Stroke_avg_glucose_level = N_Stroke.avg_glucose_level
sns.distplot(Y_Stroke_avg_glucose_level, color="green", hist=False)
sns.distplot(N_Stroke_avg_glucose_level, color="red", hist=False)
plt.legend(["Y_Stroke", "N_Stroke"])
# We can clearly see that the distribution of 'avg_glucose_level' for different class of stroke [0:N_Stroke,1:Y_Stroke] is very different. Let's do a statistical test(F-test) to quantitatively prove our hypothesis.
f, p = stats.f_oneway(Y_Stroke_avg_glucose_level, N_Stroke_avg_glucose_level)
print("F-statistic:", f)
print("p-value:", p)
# We can see that p-value is very low compared to 0.05. So, we reject the Null Hypothesis and can say that 'avg_glucose_level' does have effect on 'stroke'.
# Now we will look into the distribution of categorical variables for stroke class 1. From this we will try to get idea of effect of different class of categorical variable on Stroke.
for col in category_cols:
if col != "stroke":
# calculate the percentage of occurrences
percentage = (
X.groupby(["stroke", col]).size().div(X.groupby([col]).size(), level=col)
* 100
)
percentage = percentage.reset_index(name="percentage")
# Create a stacked bar chart
# plt.figure(figsize=(10, 8))
Y_Stroke = percentage.loc[percentage.stroke == 1]
sns.barplot(x=col, y="percentage", data=Y_Stroke, dodge=True)
plt.title(col)
plt.show()
# The plots above shows that what percentage of certain class having stroke. For example Let's Consider all the heart_disease class 1 people as 100%. Out of that 100%, more that 8% of the people have'stroke' class 1.
# From the above plot we can say that 'hypertension' and 'heart_disease' categorical variable does have a role to play in 'stroke'. Some variable like 'ever_married','work_type' and 'somking_status' also looking to have effect on 'stroke'.
# Let's do a statistical test to check wheather 'smoking_status' have relation with 'stroke'.
# Let's propose a hypothesis.
# Null hypothesis: Person having Cerebral Stroke doesn't depend on the smoking_status of the person.
# Alternate Hypothesis: Person having Cerebral Stroke does depend on the smoking_status of the person.
# We set the significance level for p-value to be 0.05.
# We need to perform ch_square test between 'stroke' and 'smoking_status' column.
ct = pd.crosstab(X["smoking_status"], X["stroke"])
chi2, p, dof, expected = chi2_contingency(ct)
print(f"chi2: {chi2}, p-value: {p}")
# As the p-value is much less than 0.05. So,we can reject the null hypothesis and can say that 'smoking_status' does have relation with 'stroke'.
# Let's split our data set into train set and test set. We need stratified split so that ratio of class twos classes of 'stroke' remain same in train set and test set.
new_column_names = {old_name: old_name.replace("_", "-") for old_name in X.columns}
X = X.rename(columns=new_column_names)
y = X.pop("stroke")
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, stratify=y, random_state=12
)
# Let's check wheather after split the ratio of classes in 'stroke' is same or not.
train_set_distribution = dict(zip(*np.unique(y_train, return_counts=True)))
test_set_distribution = dict(zip(*np.unique(y_test, return_counts=True)))
print(
"Training set distribution: ",
{k: v / len(y_train) for k, v in train_set_distribution.items()},
)
print(
"Test set distribution: ",
{k: v / len(y_test) for k, v in test_set_distribution.items()},
)
# We can see that ratio of the classes in 'stroke' column of train set and test set is same.
# Let's fill the null value of 'bmi' in X_train and X_test using median of the variable.
from sklearn.impute import SimpleImputer
imputer_bmi = SimpleImputer(strategy="median")
imputer_bmi.fit(X_train[["bmi"]])
X_train[["bmi"]] = imputer_bmi.transform(X_train[["bmi"]])
X_test[["bmi"]] = imputer_bmi.transform(X_test[["bmi"]])
# Let's check the number of null values in train set and test set now.
X_train.isnull().sum()
X_test.isnull().sum()
# Now there is no null values in any column of test set and train set.
# We need to scale our continuous variable so no variable gets more weightage during ML model fitting.
ss = StandardScaler()
mask_numeric = X_train.dtypes == np.float
numeric_cols = X_train.columns[mask_numeric]
numeric_cols = numeric_cols.tolist()
X_train[numeric_cols] = ss.fit_transform(X_train[numeric_cols])
X_test[numeric_cols] = ss.transform(X_test[numeric_cols])
X_train.head(10)
X_test.head(10)
# We need to deal with categorical columns of object datatype. We will use onehotencoder to convert this catgorical variable of object dataype to integer datatype so we can fit ML models.
mask_categorical_object = X_train.dtypes == np.object
category_cols_object_dtype = X_train.columns[mask_categorical_object]
category_cols_object_dtype = category_cols_object_dtype.tolist()
one_hot = ColumnTransformer(
transformers=[("one_hot", OneHotEncoder(), category_cols_object_dtype)],
remainder="passthrough",
)
X_train = one_hot.fit_transform(X_train)
X_test = one_hot.transform(X_test)
names = one_hot.get_feature_names_out()
# colunm_names=[name[name.find("_")+1:] for name in [name[name.find("__")+2:] for name in names]]
colunm_names = [name[name.find("__") + 2 :] for name in names]
X_train = pd.DataFrame(data=X_train, columns=colunm_names)
X_train.head(10)
X_test = pd.DataFrame(data=X_test, columns=colunm_names)
X_test.head(10)
for col in X_train.columns:
if set(X_train[col].unique()) == {0, 1}:
X_train[col] = X_train[col].astype(int)
for col in X_test.columns:
if set(X_test[col].unique()) == {0, 1}:
X_test[col] = X_test[col].astype(int)
# # 4. Classification Models
# Our main objective will be to get a explaination of 'stroke'. Because of which we can't use Dimensionality Reduction algorithm on our dataset. After getting some explaination for 'stroke', we might focus on more accurate prediction.
# Classes in our target variable is imbalanced. In other word, our target variable is skewed. To deal with this, we can use SMOTE, Class Reweighting or undersmaling method. Out of which undersmaoling will not be a good choice because high skewness of the target variable 'stroke' shown below. We will go with Class Reweighting first.
# For this dataset as predicting 'stroke' correctly has much impact than incorrectly predicting 'stroke', so we will focus on 'Recall' and 'ROC-AUC' value of model on test set.
# ### a) Logistic Regression with Class Reweighting
# Let's first build simple logistic regression model without class reweighting and predict with it.
lr_model = LogisticRegression(random_state=rs, max_iter=1000)
lr_model.fit(X_train, y_train)
y_preds_lr = lr_model.predict(X_test)
# Function in the below cell will give us Accuracy,Precision,Recall,Fscore and ROC-AUC given predicted data and actual data.
def evaluate_metrics(yt, yp):
accuracy = accuracy_score(yt, yp)
precision, recall, fbeta, support = precision_recall_fscore_support(
yt, yp, beta=2, pos_label=1, average="binary"
)
auc = roc_auc_score(yt, yp)
print(f"Accuracy is: {accuracy:.2f}")
print(f"Precision is: {precision:.2f}")
print(f"Recall is: {recall:.2f}")
print(f"Fscore is: {fbeta:.2f}")
print(f"AUC is: {auc:.2f}")
# As we can see below this 'lr_model' didn't perform well in terms of AUC and Recall.
evaluate_metrics(y_test, y_preds_lr)
# Let's build a logistic regression with class_weight parameter which will do the class reweighting for us on the 'target variable'.
class_weight = {0: 0.05, 1: 0.95}
lr_model_class_weight = LogisticRegression(
random_state=rs, max_iter=1000, class_weight=class_weight
)
lr_model_class_weight.fit(X_train, y_train)
y_preds_lr_class_weight = lr_model_class_weight.predict(X_test)
# From the below result we can see that our logistic regression model imporved with class reweighting.
evaluate_metrics(y_test, y_preds_lr_class_weight)
# Let's try to tune hyper-parameters of Logistic Regression.(Below cell can be uncommented to get the tuned hyper-parameters)
# param_grid_lr = {
# 'penalty': ['l1', 'l2'],
# 'C': [0.001, 0.01, 0.1, 1,10],
# 'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],
# 'class_weight': [{0:0.05, 1:0.95}, {0:0.1, 1:0.9},'balanced'],
# }
# cv = StratifiedKFold(n_splits=5)
# lr_grid_search = GridSearchCV(estimator=lr_model,param_grid=param_grid_lr,
# scoring='roc_auc',cv=cv,n_jobs=8)
# lr_grid_search.fit(X_train, y_train)
# lr_grid_search_best=lr_grid_search.best_estimator_
# print(lr_grid_search_best.get_params())
# y_preds_lr_grid_search_best=lr_grid_search_best.predict(X_test)
# evaluate_metrics(y_test, y_preds_lr_grid_search_best)
# Below is the Logistic Regression model with tuned hyper-parameter.
lr_class_weight_hp_tuned = LogisticRegression(
C=10,
class_weight={0: 0.05, 1: 0.95},
dual=False,
fit_intercept=True,
intercept_scaling=1,
l1_ratio=None,
max_iter=1000,
multi_class="auto",
n_jobs=8,
penalty="l2",
random_state=rs,
solver="lbfgs",
tol=0.0001,
verbose=0,
warm_start=False,
)
lr_class_weight_hp_tuned.fit(X_train, y_train)
y_preds_lr_class_weight_hp_tuned = lr_class_weight_hp_tuned.predict(X_test)
# From the below table we can see that results didn't improved much.
evaluate_metrics(y_test, y_preds_lr_class_weight_hp_tuned)
# Let's try to look into the co-efficients of the tuned Logistic Regression model to get some idea that which attributes have major relation with 'stroke'.
plt.figure(figsize=(10, 8))
colors = ["red" if c < 0 else "blue" for c in lr_class_weight_hp_tuned.coef_[0]]
plt.barh(
range(lr_class_weight_hp_tuned.coef_.shape[1]),
lr_class_weight_hp_tuned.coef_[0],
color=colors,
)
plt.yticks(range(lr_class_weight_hp_tuned.coef_.shape[1]), X_train.columns)
plt.ylabel("Feature")
plt.xlabel("Coefficient")
plt.title("Logistic Regression Coefficients")
plt.show()
# From the abvove plot we can say that 'stroke' have strongest positive dependency on 'stroke' followed by 'heart-disease' and 'hypertension' and negative dependecy of person being 'children'.
# ### b) Decision Tree Classifier with Class Reweighting
# Let's build our second model using Decision Tree. First we will build Decision Tree with default value of hyper-parameters without Class Reweighting.
dr_model = DecisionTreeClassifier(random_state=rs)
dr_model.fit(X_train, y_train.values.ravel())
y_preds_dr = dr_model.predict(X_test)
# From the below values we can say that 'dr_model' didn't perform well.
evaluate_metrics(y_test, y_preds_dr)
# Our next step is to built a Decision Tree with class_weight.
dt_model_class_weight = DecisionTreeClassifier(
class_weight=class_weight, random_state=rs
)
dt_model_class_weight.fit(X_train, y_train)
y_preds_dt_class_weight = dt_model_class_weight.predict(X_test)
# We can see that Class Reweighting didn't help with AUC and Recall of the model. The main reason for this can be said to be overfitting of the model.
evaluate_metrics(y_test, y_preds_dt_class_weight)
# Let's tune the hyper-parameters of decision tree with class weight in hope for getting better result.(Below cells can be uncommented to look in to the results of Grid-search)
# param_grid_dt = {
# 'criterion': ['gini', 'entropy'],
# 'max_depth': [2, 5, 8],
# 'class_weight': [{0:0.05, 1:0.95},{0:0.01, 1:0.99}],
# 'min_samples_split': [2, 4, 6],
# 'max_features' : ["sqrt", "log2"]
# }
# cv = StratifiedKFold(n_splits=5)
# dt_grid_search = GridSearchCV(estimator=dr_model,param_grid=param_grid_dt,
# scoring='roc_auc',cv=cv,n_jobs=8)
# dt_grid_search.fit(X_train, y_train.values.ravel())
# dt_grid_search_best=dt_grid_search.best_estimator_
# print(dt_grid_search_best.get_params())
# y_preds_dt_grid_search_best=dt_grid_search_best.predict(X_test)
# evaluate_metrics(y_test, y_preds_dt_grid_search_best)
# A Decision Tree model is bulit with class_weight using tuned hyper-parameters and trained in the below cells.
dt_class_weight_hp_tuned = DecisionTreeClassifier(
ccp_alpha=0.0,
class_weight={0: 0.05, 1: 0.95},
criterion="gini",
max_depth=5,
max_features="sqrt",
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_samples_leaf=1,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
random_state=rs,
splitter="best",
)
dt_class_weight_hp_tuned.fit(X_train, y_train)
y_preds_dt_class_weight_hp_tuned = dt_class_weight_hp_tuned.predict(X_test)
# We can see that the performance of the Decision Tree have been imporved from the below data.
evaluate_metrics(y_test, y_preds_dt_class_weight_hp_tuned)
# Let's look into the Tree made by latest Decision Tree.
tree_exp = export_text(dt_class_weight_hp_tuned, feature_names=list(X_train.columns))
# From the Tree below we can see that High 'age' and having 'hypertension' and 'heart-disease' have dominating effect on 'stroke'.
print(tree_exp)
# ### c) Bagging
# In this section we will build a Bagging Classifier. Let's first built a bagging classifier using hyper-parameter tuned Decison Tree from the last section.
bag_model = BaggingClassifier(
estimator=dt_class_weight_hp_tuned, n_estimators=20, random_state=rs, bootstrap=True
)
bag_model.fit(X_train, y_train)
y_preds_bag = bag_model.predict(X_test)
# From the below scores, we can say that Bagging classifier is overfitting to data.
evaluate_metrics(y_test, y_preds_bag)
# Let's tune hyper-parameter of Bagging Classifier.
# param_grid_bag = {'n_estimators': [5,10,20,30,50],
# 'estimator__criterion': ['gini', 'entropy'],
# 'estimator__max_depth': [2, 5, 8],
# 'estimator__class_weight': [{0:0.05, 1:0.95},{0:0.01, 1:0.99}],
# 'estimator__min_samples_split': [2, 4, 6],
# 'estimator__max_features' : ["sqrt", "log2",None],
# }
# cv = StratifiedKFold(n_splits=5)
# bag_grid_search = GridSearchCV(estimator=bag_model, param_grid=param_grid_bag,
# scoring='roc_auc', cv=cv,n_jobs=8)
# bag_grid_search.fit(X_train, y_train)
# bag_grid_search_best=bag_grid_search.best_estimator_
# print(bag_grid_search_best.get_params())
# y_preds_bag_grid_search_best=bag_grid_search_best.predict(X_test)
# evaluate_metrics(y_test, y_preds_bag_grid_search_best)
bag_class_weight_hp_tuned = BaggingClassifier(
DecisionTreeClassifier(
ccp_alpha=0.0,
class_weight={0: 0.05, 1: 0.95},
criterion="entropy",
max_depth=5,
max_features=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_samples_leaf=1,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
random_state=rs,
splitter="best",
),
n_estimators=30,
oob_score=False,
random_state=12,
warm_start=False,
max_features=1.0,
max_samples=1.0,
n_jobs=8,
bootstrap=True,
bootstrap_features=False,
)
bag_class_weight_hp_tuned.fit(X_train, y_train)
y_preds_bag_class_weight_hp_tuned = bag_class_weight_hp_tuned.predict(X_test)
# We can see that BaggingClassifier performed poorly comared to Decision Tree. The reason in the highly co-related trees in the model.
evaluate_metrics(y_test, y_preds_bag_class_weight_hp_tuned)
# Let's visualize the co-relation of the models.
def get_correlation(X_test, y_test, models):
# This function calculates the average correlation between predictors
n_estimators = len(models.estimators_)
prediction = np.zeros((y_test.shape[0], n_estimators))
predictions = pd.DataFrame(
{"estimator " + str(n + 1): [] for n in range(n_estimators)}
)
for key, model in zip(predictions.keys(), models.estimators_):
predictions[key] = model.predict(X_test.to_numpy())
corr = predictions.corr()
print(
"Average correlation between predictors: ",
corr.mean().mean() - 1 / n_estimators,
)
return corr
# As we can see that co-relation between trees are very high which can cause overfitting to data.
get_correlation(X_test, y_test, bag_class_weight_hp_tuned).style.background_gradient(
cmap="coolwarm"
)
# ### d) Random Forest Classifier with Class Reweighting
# Let's built our next ensemble model called Random Forest Classifier.
# First we will build a model with default value of RandomForestClassifier.
rf_model = RandomForestClassifier(max_features="sqrt", n_estimators=10, random_state=rs)
rf_model.fit(X_train, y_train)
y_preds_rf = rf_model.predict(X_test)
# This RandomForestModel didn't perform well as we can see different scores below.
evaluate_metrics(y_test, y_preds_rf)
# Now we will built a model with class_weight parameter and will tune the hyper-parameter.
# params_grid_rf = {
# 'criterion': ['gini', 'entropy'],
# 'max_depth': [3, 5, 8],
# 'n_estimators': [5,10,20],
# 'class_weight': [{0:1, 1:50},{0:1,1:25},"balanced_subsample"],
# 'max_features': ["log2", "sqrt",None],
# }
# cv = StratifiedKFold(n_splits=5)
# rf_grid_search = GridSearchCV(estimator = rf_model,
# param_grid = params_grid_rf,
# scoring='roc_auc',
# cv = cv,n_jobs=8)
# rf_grid_search.fit(X_train,y_train)
# rf_grid_search_best=rf_grid_search.best_estimator_
# print(rf_grid_search_best.get_params())
# y_preds_rf_grid_search_best=rf_grid_search_best.predict(X_test)
# evaluate_metrics(y_test, y_preds_rf_grid_search_best)
# Below a RandomForestClassifier model is bulit with tuned hyper-parameter.
rf_class_weight_hp_tuned = RandomForestClassifier(
bootstrap=True,
class_weight={0: 1, 1: 50},
criterion="gini",
max_depth=3,
max_features=None,
n_estimators=10,
n_jobs=8,
random_state=rs,
)
rf_class_weight_hp_tuned.fit(X_train, y_train)
y_preds_rf_class_weight_hp_tuned = rf_class_weight_hp_tuned.predict(X_test)
# After training, we can see different scores of the model and this model outperformed other models in terms of AUC and Recall.
evaluate_metrics(y_test, y_preds_rf_class_weight_hp_tuned)
# #### Random Forest model explanation with Permutation Feature Importance
def visualize_feature_importance(importance_array):
# Sort the array based on mean value
sorted_idx = importance_array.importances_mean.argsort()
# Visualize the feature importances using boxplot
fig, ax = plt.subplots()
fig.set_figwidth(16)
fig.set_figheight(10)
fig.tight_layout()
ax.boxplot(
importance_array.importances[sorted_idx].T,
vert=False,
labels=X_train.columns[sorted_idx],
)
ax.set_title("Permutation Importances (train set)")
plt.show()
feature_importances_rf = permutation_importance(
estimator=rf_class_weight_hp_tuned,
X=X_train,
y=y_train,
n_repeats=5,
random_state=rs,
n_jobs=8,
)
# The below chart give us the importance of features, according to current model, for 'stroke'
visualize_feature_importance(feature_importances_rf)
# #### Random Forest model explanation with LIME
# In this part we will try to understand why our Random Forest Model thinks that a person will have stroke or not.
explainer = lime.lime_tabular.LimeTabularExplainer(
training_data=X_test.values,
mode="classification",
class_names=["no_stroke", "stroke"],
feature_names=list(X_train.columns),
random_state=rs,
verbose=True,
)
# Let's take a instance of data.
instance_index = 55
selected_instance_55 = X_test.iloc[[instance_index]]
lime_test_instance_55 = selected_instance_55.values.reshape(-1)
selected_instance_55
# Let's see what class in assigned to this instance by RandomForest.
rf_class_weight_hp_tuned.predict(selected_instance_55)
# Now we we try to understand why our model thing that this class belongs to 'stroke' class 1.
exp_rf_55 = explainer.explain_instance(
lime_test_instance_55, rf_class_weight_hp_tuned.predict_proba, num_features=10
)
exp_rf_55.as_pyplot_figure()
# The reason of that is clearly high 'age' and presence of 'heart_disease' and high 'avg_glucose_level'.
# ### e) Boosting Classifiers with Class Reweighting
# #### i) AdaBoostClassifier
# We will first try Adaboost(a popular boosting algorithm) with default hyper-parameters value.
ab_model = AdaBoostClassifier(n_estimators=30, random_state=rs)
ab_model.fit(X_train, y_train)
y_preds_ab = ab_model.predict(X_test)
# Default Adaboost didn't perform well on the data.
evaluate_metrics(y_test, y_preds_ab)
# In Adaboost there is no class_weight like parameter to give certain class more weightage. So, we will use GridSearchCV's sample_weight parameter to give weightage to positive class. In the below cell weightage is defined.
sample_weights_ab = compute_sample_weight("balanced", y_train)
# Let's tune the hyper-parameter of the AdaboostClassifier.(Below cells can be uncommented to do the hyper-parameter tuning.)
# param_grid_ab = {
# 'learning_rate': [0.001,0.01, 0.1, 1],
# 'n_estimators': [30,50, 100,200,300],
# 'algorithm': ['SAMME.R','SAMME']
# }
# cv = StratifiedKFold(n_splits=5)
# ab_grid_search = GridSearchCV(estimator = ab_model,
# param_grid = param_grid_ab,
# scoring='roc_auc',
# cv = cv,n_jobs=8)
# ab_grid_search.fit(X_train,y_train,sample_weight=sample_weights_ab)
# ab_grid_search_best=ab_grid_search.best_estimator_
# print(ab_grid_search_best.get_params())
# y_preds_ab_grid_search_best=ab_grid_search_best.predict(X_test)
# evaluate_metrics(y_test, y_preds_ab_grid_search_best)
# Now we will build a model with tuned hyper-parameters.
ab_class_weight_hp_tuned = AdaBoostClassifier(
algorithm="SAMME.R",
estimator=None,
learning_rate=0.1,
n_estimators=200,
random_state=rs,
)
ab_class_weight_hp_tuned.fit(X_train, y_train, sample_weight=sample_weights_ab)
y_preds_ab_class_weight_hp_tuned = ab_class_weight_hp_tuned.predict(X_test)
# The score of this is slight improvement over the RandomForestModel.
evaluate_metrics(y_test, y_preds_ab_class_weight_hp_tuned)
# #### AdaBoostClassifier explanation with Permutation Feature Importance
feature_importances_ab = permutation_importance(
estimator=ab_class_weight_hp_tuned,
X=X_train,
y=y_train,
n_repeats=5,
random_state=rs,
n_jobs=8,
)
# The below chart give us the importance of features, according to current model, for 'stroke'
visualize_feature_importance(feature_importances_ab)
# Let's take a instance of data with index 55 in test set.
# Let's see what class in assigned to this instance by AdaBoostClassifier..
ab_class_weight_hp_tuned.predict(selected_instance_55)
# Now we we try to understand why our model thing that this class belongs to 'stroke' class 1.
exp_rf_55 = explainer.explain_instance(
lime_test_instance_55, ab_class_weight_hp_tuned.predict_proba, num_features=10
)
exp_rf_55.as_pyplot_figure()
# The reason of that is clearly again high 'age' and presence of 'heart_disease' and high 'avg_glucose_level'as also predicted by RandomForest
# #### ii) XGBClassifier
# In this section we will use another Boosting algorithm called XGBoost. We will directly directly dive into the hyper-parameter tuning of the XGBClassifier with Class Reweighting. In this model scale_pos_weight hyper-parameter will help us in Class Reweighting.(Uncommented the belows cells to get the tuned hyper-parameter)
# param_grid_xgb = {
# 'learning_rate': [0.1, 0.2, 0.5],
# 'max_depth': [1,3, 6],
# 'n_estimators': [50,100,200],
# 'scale_pos_weight': [50,75,90],
# 'reg_alpha':[0, 1, 10]
# }
# cv = StratifiedKFold(n_splits=5)
# xgb_grid_search = GridSearchCV(estimator = xgb_model,
# param_grid = param_grid_xgb,
# scoring='roc_auc',
# cv = cv,n_jobs=8)
# xgb_grid_search.fit(X_train,y_train)
# xgb_grid_search_best=xgb_grid_search.best_estimator_
# print(xgb_grid_search_best.get_params())
# y_preds_xgb_grid_search_best=xgb_grid_search_best.predict(X_test)
# evaluate_metrics(y_test, y_preds_xgb_grid_search_best)
# Let's build a XGBClassifier Model with tuned hyper-parameters.
xgb_class_weight_hp_tuned = XGBClassifier(
objective="binary:logistic",
learning_rate=0.1,
max_depth=1,
n_estimators=200,
n_jobs=8,
random_state=rs,
reg_alpha=10,
scale_pos_weight=50,
)
xgb_class_weight_hp_tuned.fit(X_train, y_train, sample_weight=sample_weights_ab)
y_preds_xgb_class_weight_hp_tuned = ab_class_weight_hp_tuned.predict(X_test)
# The scores are almost equal of that of AdaboostClassifier.
evaluate_metrics(y_test, y_preds_xgb_class_weight_hp_tuned)
# # 5. Choice of Final Model
# For our task AdaBoostClassifier (ab_class_weight_hp_tuned) is best model as for this model our Recall and AUC is highest. Higher Recall will ensure that 'Stroke' class 1 will be predicted correctly most of time and higher AUC will ensure that precision will also be maintained. For interpretibility Logistic Regression will be the best choice as it is simple and easy to understand and its Recall and AUC value is also good.
# # 6. Insights and key findings
# i) Our target variable 'stroke' have class imabalance. So we decided to go with Class Reweighting method to fit our model to the data.
# ii) Attributes 'bmi' and 'avg_glucose_level' had skewed distribution. So, we converted the distribution to get distribution close to normal.
# iii) We find out using box-plots that 'stroke' might depend on 'age' and 'avg_glucose_level'. Statistically also we find out 'stroke' does depend 'avg_glucose_level'
# iv) We find out using bar plots that 'heart_disease' and 'hypertension' have effect on 'stroke'. We statistically find out also that 'stroke' depend on 'smoking_status'.
# v) We fit simple models like Logistic regression and Desicion Tree to the data with class_weight and by plotting co-efficient and tree we get a explanation of this models and find out that 'age','hypertension' and 'avg_glucose_level' have highest effect on 'stroke'.
# vi) We fitted complex models like RandomForest,Adaboost and XGBoost to the data and get better Recall and AUC value.
# vii) Model explanation of complex models give us expected results.
# # 7. Next Steps
# For next step we can reduce the dimension and co-linearity of the dataset using Principal Component Analysis.
from sklearn.decomposition import PCA
pca = PCA()
pca.fit(X_train)
X_train_hat = pca.transform(X_train)
print(X_train_hat.shape)
# From the below graph we can see that arond 12 PCA component can explain all the variance of the dataset.
plt.plot(pca.explained_variance_ratio_)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.title("Component-wise variance and cumulative explained variance")
X_train_hat_PCA = pd.DataFrame(
columns=[f"Projection on Component {i+1}" for i in range(len(X_train.columns))],
data=X_train_hat,
)
X_train_hat_PCA.head()
X_test_hat = pca.transform(X_test)
print(X_test_hat.shape)
X_test_hat_PCA = pd.DataFrame(
columns=[f"Projection on Component {i+1}" for i in range(len(X_train.columns))],
data=X_test_hat,
)
X_test_hat_PCA.head()
# We will select 12 PCA components to create attribute of our train and test set.
N = 12
X_train_hat_PCA = X_train_hat_PCA.iloc[:, :N]
X_test_hat_PCA = X_test_hat_PCA.iloc[:, :N]
# #### RandomForest using PCA transformed data with class_weight
# We will create a RandomForestClassifier with PCA transformed data using class_weight
# In the below cell RandomForestClassifier is tuned.
# params_grid_rf_class_weight_pca = {
# 'criterion': ['gini', 'entropy'],
# 'max_depth': [1, 3, 5, 8],
# 'n_estimators': [5,10,20,40],
# 'class_weight': [{0:1, 1:50},{0:1,1:25},"balanced_subsample"],
# 'max_features': ["log2", "sqrt",None],
# }
# cv = StratifiedKFold(n_splits=5)
# rf_pca_class_weight_grid_search = GridSearchCV(estimator = rf_model,
# param_grid = params_grid_rf_class_weight_pca,
# scoring='roc_auc',
# cv = cv,n_jobs=8)
# rf_pca_class_weight_grid_search.fit(X_train_hat_PCA,y_train)
# rf_pca_class_weight_grid_search_best=rf_pca_class_weight_grid_search.best_estimator_
# print(rf_pca_class_weight_grid_search_best.get_params())
# y_preds_rf_pca_class_weight_grid_search_best=rf_pca_class_weight_grid_search_best.predict(X_test_hat_PCA)
# evaluate_metrics(y_test, y_preds_rf_pca_class_weight_grid_search_best)
# We will build a RandomForestClassifier tuned on PCA transformed train set.
rf_pca_class_weight_hp_tuned = RandomForestClassifier(
bootstrap=True,
class_weight={0: 1, 1: 25},
criterion="entropy",
max_depth=5,
max_features=None,
n_estimators=40,
n_jobs=8,
random_state=rs,
)
rf_pca_class_weight_hp_tuned.fit(X_train_hat_PCA, y_train)
y_preds_rf_pca_class_weight_hp_tuned = rf_pca_class_weight_hp_tuned.predict(
X_test_hat_PCA
)
# The scores for the model is shown here.
evaluate_metrics(y_test, y_preds_rf_pca_class_weight_hp_tuned)
# #### RandomForest using PCA transformed data with SMOTE
# We will use SMOTE to oversmapling our data ,so the class get balanced in the train set.
smote_sampler = SMOTE(random_state=rs)
X_smo, y_smo = smote_sampler.fit_resample(X_train_hat_PCA, y_train)
y_smo.value_counts().plot.bar(color=["green", "red"])
# params_grid_rf_pca_smote = {
# 'criterion': ['gini', 'entropy'],
# 'max_depth': [1, 3, 5, 8],
# 'n_estimators': [5,10,20,40],
# 'max_features': ["log2", "sqrt",None],
# }
# cv = StratifiedKFold(n_splits=5)
# rf_pca_smote_grid_search = GridSearchCV(estimator = rf_model,
# param_grid = params_grid_rf_pca_smote,
# scoring='roc_auc',
# cv = cv,n_jobs=8)
# rf_pca_smote_grid_search.fit(X_smo,y_smo)
# rf_pca_smote_grid_search_best=rf_pca_smote_grid_search.best_estimator_
# print(rf_pca_smote_grid_search_best.get_params())
# y_preds_rf_pca_smote_grid_search_best=rf_pca_smote_grid_search_best.predict(X_test_hat_PCA)
# evaluate_metrics(y_test, y_preds_rf_pca_smote_grid_search_best)
# Hyper-parameter tuned model is shown here along with all the score.
rf_pca_smote_hp_tuned = RandomForestClassifier(
bootstrap=True,
criterion="gini",
max_depth=8,
max_features=None,
n_estimators=40,
n_jobs=8,
random_state=rs,
)
rf_pca_smote_hp_tuned.fit(X_smo, y_smo)
y_preds_rf_pca_smote_hp_tuned = rf_pca_smote_hp_tuned.predict(X_test_hat_PCA)
evaluate_metrics(y_test, y_preds_rf_pca_smote_hp_tuned)
|
# ### Case Study: Who is the guest stars who was in the most watched Office episode through data visualisation.
# In this notebook, we will initiate this process of data visualisation by creating an informative plot of the episode data. In doing so, we're going to work on several different variables, including the episode number, the viewership, the fan rating, and guest appearances.
# ### Ask:
# Problem to solve: The Office! What started as a British mockumentary series about office culture in 2001 has since spawned ten other variants across the world, including an Israeli version (2010-13), a Hindi version (2019-), and even a French Canadian variant (2006-2007). Of all these iterations (including the original), the American series has been the longest-running, spanning 201 episodes over nine seasons.
# In this notebook, we will take a look at a dataset of The Office episodes, and try to understand how the popularity and quality of the series varied over time and answer the question of who is the name of one of the guest stars who was in the most watched Office episode through data visualisation.
# #### Project requirments:
# In this notebook, we will initiate this process of data visualisation by creating an informative plot of the episode data. In doing so, we're going to work on several different variables, including the episode number, the viewership, the fan rating, and guest appearances.
# 1. Create a matplotlib scatter plot of the data that contains the following attributes:
# 1. Each episode's episode number plotted along the x-axis
# 2. Each episode's viewership (in millions) plotted along the y-axis
# 3. A color scheme reflecting the scaled ratings (not the regular ratings) of each episode, such that:
# 1. Ratings < 0.25 are colored "red"
# 2. Ratings >= 0.25 and < 0.50 are colored "orange"
# 3. Ratings >= 0.50 and < 0.75 are colored "lightgreen"
# 4. Ratings >= 0.75 are colored "darkgreen"
# 4. A sizing system, such that episodes with guest appearances have a marker size of 250 and episodes without are sized 25
# 5. A title, reading "Popularity, Quality, and Guest Appearances on the Office"
# 6. An x-axis label reading "Episode Number"
# 7. A y-axis label reading "Viewership (Millions)"
# ### Prepare
# 1. Download data and store it appropriately.
# Data is downloaded from /kaggle/input/the-office-dataset/the_office_series.csv and stored securely here at kaggle.
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# set the figure size parameters
plt.rcParams["figure.figsize"] = [11, 7]
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Import data
office_df = pd.read_csv("/kaggle/input/the-office-dataset/the_office_series.csv")
# Let's get a feel of data and look at every column in a data frame.
print(office_df.head())
print(office_df.info())
print(office_df.columns)
# 2. Identify how it’s organized
# Data is on the form of 1 CSV files that contain 12 columns('Unnamed: 0', 'Season', 'EpisodeTitle', 'About', 'Ratings', 'Votes',
# 'Viewership', 'Duration', 'Date', 'GuestStars', 'Director', 'Writers')
# 3. Sort and filter the data:
# The first column needs to be renamed to episode number and incremented by one (+1) to get rid of the first zero value.
office_df.rename(columns={"Unnamed: 0": "EpisodeNumber"}, inplace=True)
office_df["EpisodeNumber"] = office_df["EpisodeNumber"] + 1
print(office_df.columns)
# NA (Not Available) data was detected on the column GuestStars.
office_df.isna().sum()
# GuestStars column has 159 Null values which indicates that some episodes had no guests.
# ### Process
# ##### Compare Ratings between diffrent season to check the best season(s)
season_avg_ranking = office_df.groupby("Season")["Ratings"].mean()
print(season_avg_ranking.sort_values(ascending=False))
season_avg_ranking.plot(kind="bar", x="Season", y="Ratings")
plt.title("Season Rating Ranking")
plt.xlabel("Seasons")
plt.ylabel("Average Ratings")
plt.show()
# The Show started okay on the first season and then had better ratings starting from season 2 to season 7 before it started to get a lower in ratings on the final two seasons.
# #### Now let's calculate the ratings scale for each data point
# Calculate the min value of the Ratings column
min_value = office_df["Ratings"].min()
# Calculate the max value of the Ratings column
max_value = office_df["Ratings"].max()
# Calculate the range of data
data_range = max_value - min_value
# Calculate the Ratings scale
office_df["RatingScale"] = (office_df["Ratings"] - min_value) / data_range
print(office_df.head())
# Let's add a column HasGuests to hold True if the episode had guests and False if not.
office_df["HasGuests"] = ~office_df["GuestStars"].isna()
# Lets add a column Color to represent the color scheme reflecting the scaled ratings (not the regular ratings) of each episode, such that:
# Ratings < 0.25 are colored "red"
# Ratings >= 0.25 and < 0.50 are colored "orange"
# Ratings >= 0.50 and < 0.75 are colored "lightgreen"
# Ratings >= 0.75 are colored "darkgreen"
cols = []
for index, row in office_df.iterrows():
if row["RatingScale"] < 0.25:
cols.append("red")
elif row["RatingScale"] < 0.50:
cols.append("orange")
elif row["RatingScale"] < 0.75:
cols.append("lightgreen")
else:
cols.append("darkgreen")
office_df["colors"] = cols
# Now we need to add another column to represent a sizing system, such that episodes with guest appearances have a marker size of 250 and episodes without are sized 25.
sizes = []
for index, row in office_df.iterrows():
if row["HasGuests"] == True:
sizes.append(250)
else:
sizes.append(25)
office_df["sizes"] = sizes
# Let's split out dataset to two dataframes one with episodes with guest stars and another without gues stars
#
with_guests = office_df[office_df["HasGuests"] == True]
without_guests = office_df[office_df["HasGuests"] == False]
fig = plt.figure()
plt.style.use("fivethirtyeight")
plt.scatter(
x=without_guests["EpisodeNumber"],
y=without_guests["Viewership"],
c=without_guests["colors"],
s=without_guests["sizes"],
)
plt.scatter(
x=with_guests["EpisodeNumber"],
y=with_guests["Viewership"],
c=with_guests["colors"],
s=with_guests["sizes"],
marker="*",
)
plt.title("Popularity, Quality, and Guest Appearances on the Office")
plt.xlabel("Episode Number")
plt.ylabel("Viewership (Millions)")
plt.show()
# Now it's time to answer our question of who are the star guests of the most rated episode?
top_star = office_df[office_df["Viewership"] == office_df["Viewership"].max()][
"GuestStars"
]
print(top_star)
|
# # Importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from glob import glob
import os
# From tensorflow
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Import Sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
# # Loading data
files = glob("/kaggle/input/bd-fish-data/fish data/*/*/*")
len(files)
files[:3]
File_Path = pd.Series(files).astype(str)
Labels = list(map(lambda x: os.path.split(os.path.split(x)[0])[1], files))
Labels = pd.Series(Labels)
Labels[:2]
df = pd.concat([File_Path, Labels], axis=1)
df.columns = ["image", "label"]
df
# Removing GT images
df = df[df["label"].apply(lambda x: x[-2:] != "GT")].reset_index(drop=True)
df
df.label.value_counts()
fig = plt.figure(figsize=(15, 8))
sns.countplot(df["label"])
# Train test split
x_train, x_test = train_test_split(df, test_size=0.2, random_state=30)
x_train, x_val = train_test_split(x_train, test_size=0.2, random_state=30)
print(x_test.shape)
print(x_train.shape)
print(x_val.shape)
# # Generating data
img_gen = ImageDataGenerator(rescale=1.0 / 255)
train = img_gen.flow_from_dataframe(
dataframe=x_train,
x_col="image",
y_col="label",
target_size=(200, 200),
color_mode="rgb",
class_mode="categorical",
shuffle=False,
)
test = img_gen.flow_from_dataframe(
dataframe=x_test,
x_col="image",
y_col="label",
target_size=(200, 200),
color_mode="rgb",
class_mode="categorical",
shuffle=False,
)
val = img_gen.flow_from_dataframe(
dataframe=x_val,
x_col="image",
y_col="label",
target_size=(200, 200),
color_mode="rgb",
class_mode="categorical",
shuffle=False,
)
# # Model creation
input_shape = (200, 200, 3)
model = tf.keras.models.Sequential(
[
tf.keras.layers.Conv2D(32, (3, 3), activation="relu", input_shape=input_shape),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(16, (3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(16, (3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(16, (3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation="relu"),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(9, activation="softmax"),
]
)
model.summary()
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
# # Training model
history = model.fit(train, validation_data=val, epochs=15)
accuracy = history.history["accuracy"]
val_accuracy = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
plt.figure(figsize=(15, 10))
plt.subplot(2, 2, 1)
plt.plot(accuracy, label="Training accuracy")
plt.plot(val_accuracy, label="Validation accuracy")
plt.legend()
plt.title("Training vs validation accuracy")
plt.subplot(2, 2, 2)
plt.plot(loss, label="Training loss")
plt.plot(val_loss, label="Validation loss")
plt.legend()
plt.title("Training vs validation loss")
plt.show()
# # Prediction
pred = model.predict(test)
pred = np.argmax(pred, axis=1)
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import accuracy_score
accuracy_score(test.labels, pred)
confusion_mtx = confusion_matrix(test.labels, pred)
plt.figure(figsize=(10, 8))
sns.heatmap(confusion_mtx, annot=True, annot_kws={"size": 10}, cmap="YlGnBu")
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.plot()
print(classification_report(test.labels, pred))
# ## Test with database images
from tensorflow.keras.preprocessing import image
def predict_it(image_location):
test_image = image.load_img(image_location, target_size=(200, 200))
plt.imshow(test_image)
test_image = image.img_to_array(test_image)
test_image = test_image / 255
test_image = np.expand_dims(test_image, axis=0)
preds = np.argmax(model.predict(test_image))
print(f"The fish is {list(test.class_indices.keys())[preds]}")
predict_it("/kaggle/input/bd-fish-data/fish data/val/Chingri/download (1) - Copy.jpg")
predict_it("/kaggle/input/bd-fish-data/fish data/val/Rupchada/22.jpg")
# ## Test with online images
from urllib.request import urlopen
from PIL import Image
def predict_url(url):
img = Image.open(urlopen(url))
plt.imshow(img)
test_image = image.img_to_array(img)
test_image = tf.image.resize(test_image, (200, 200))
test_image = test_image / 255
test_image = np.expand_dims(test_image, axis=0)
preds = np.argmax(model.predict(test_image))
print(f"The fish is {list(test.class_indices.keys())[preds]}")
predict_url("https://bazarmama.xyz/wp-content/uploads/2022/07/089A8004.jpeg")
predict_url(
"https://img3.exportersindia.com/product_images/bc-full/dir_14/401539/trout-fish-1538328.jpg"
)
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
from tqdm import tqdm
import random
import pandas as pd
from tensorflow.keras.utils import img_to_array, load_img
from keras.utils import np_utils
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dense, Flatten, Dropout
DATADIR = "/kaggle/input/fruit-dataset/fruits-360/Training"
DATADIR_test = "/kaggle/input/fruit-dataset/fruits-360/Test"
TYPES = ["Apple", "Banana", "Plum", "Cherry", "Grape", "Pear", "Peach"]
fruits = {}
def load_dataset(dire):
fruits = {}
images_as_array = []
labels = []
for category in tqdm(os.listdir(dire)):
for typ in TYPES:
if category.split()[0] == typ:
fruits[category] = typ
path = os.path.join(dire, category)
class_num = TYPES.index(fruits[category])
class_name = fruits[category]
for img in tqdm(os.listdir(path)):
file = os.path.join(path, img)
images_as_array.append(
img_to_array(load_img(file, target_size=(32, 32)))
)
labels.append(class_num)
images_as_array = np.array(images_as_array)
labels = np.array(labels)
return images_as_array, labels
train = load_dataset(DATADIR)
test = load_dataset(DATADIR_test)
x_train, y_train = train
x_test, y_test = test
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
no_of_classes = len(np.unique(y_train))
y_train = np_utils.to_categorical(y_train, no_of_classes)
y_test = np_utils.to_categorical(y_test, no_of_classes)
x_train, y_train = shuffle(x_train, y_train)
x_test, y_test = shuffle(x_test, y_test)
plt.figure(figsize=(3, 3))
plt.imshow(np.squeeze(x_train[1]))
plt.title("{}".format(TYPES[np.argmax(y_train[1])]))
split = len(x_test) * 80 // 100
x_test, x_valid = x_test[split:], x_test[:split]
y_test, y_valid = y_test[split:], y_test[:split]
print("Train set: ", x_train.shape[0])
print("Validation set: ", x_valid.shape[0])
print("Test set: ", x_test.shape[0])
input_shape = (32, 32, 3)
num_classes = no_of_classes
model = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(
6,
kernel_size=(5, 5),
strides=(1, 1),
activation="tanh",
input_shape=input_shape,
padding="same",
),
tf.keras.layers.AveragePooling2D(
pool_size=(2, 2), strides=(2, 2), padding="valid"
),
tf.keras.layers.Conv2D(
16, kernel_size=(5, 5), strides=(1, 1), activation="tanh", padding="valid"
),
tf.keras.layers.AveragePooling2D(
pool_size=(2, 2), strides=(2, 2), padding="valid"
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(120, activation="tanh"),
tf.keras.layers.Dense(84, activation="tanh"),
tf.keras.layers.Dense(num_classes, activation="softmax"),
]
)
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
model.summary()
history = model.fit(
x_train,
y_train,
batch_size=32,
epochs=10,
validation_data=(x_valid, y_valid),
verbose=2,
)
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
y_pred = model.predict(x_test)
for i, idx in enumerate(np.random.choice(x_test.shape[0], size=3, replace=False)):
plt.figure(figsize=(3, 3))
plt.imshow(np.squeeze(x_test[idx]))
pred_idx = np.argmax(y_pred[idx])
true_idx = np.argmax(y_test[idx])
plt.title("{} ({})".format(TYPES[pred_idx], TYPES[true_idx]))
plt.show()
|
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
from matplotlib import pyplot as plt
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
women = train_data.loc[train_data.Sex == "female"]["Survived"]
rate_women = sum(women) / len(women)
print("% of women who survived:", rate_women)
men = train_data.loc[train_data.Sex == "male"]["Survived"]
rate_men = sum(men) / len(men)
print("% of men who survived:", rate_men)
sns.barplot(x="Sex", y="Survived", data=train_data)
test_data.info
train_data.info
test_data.describe()
train_data.describe()
data = [train_data, test_data]
data
train_data.dtypes
test_data.dtypes
train_data.isnull().sum()
test_data.isnull().sum()
sns.barplot(x="Pclass", y="Survived", data=train_data)
sns.barplot(x="Sex", y="Survived", data=train_data)
sns.barplot(x="Age", y="Survived", data=train_data)
train_data["Sex"] = train_data.Sex.map(dict(male=1, female=0))
imp = SimpleImputer(missing_values=np.nan, strategy="mean")
train_data["Sex"] = imp.fit_transform(train_data[["Sex"]]).astype(int)
train_data.dtypes
imp = SimpleImputer(missing_values=np.nan, strategy="mean")
train_data["Age"] = imp.fit_transform(train_data[["Age"]])
train_data["Age"] = round(train_data["Age"], 2).astype(int)
train_data
train_data.dtypes
imp = SimpleImputer(missing_values=np.nan, strategy="mean")
train_data["Fare"] = imp.fit_transform(train_data[["Fare"]]).astype(int)
train_data
train_data.dtypes
train_data["LastName"] = train_data["Name"].str.split(",").str.get(1)
train_data["Title"] = train_data["LastName"].str.split(".").str.get(0)
train_data["Title"]
test_data["LastName"] = test_data["Name"].str.split(",").str.get(1)
test_data["Title"] = test_data["LastName"].str.split(".").str.get(0)
test_data["Title"]
imp = SimpleImputer(missing_values=np.nan, strategy="most_frequent")
train_data["Title"] = imp.fit_transform(train_data[["Title"]])
train_data["Title"]
imp = SimpleImputer(missing_values=np.nan, strategy="most_frequent")
test_data["Title"] = imp.fit_transform(test_data[["Title"]])
test_data["Title"]
for dataset in data:
dataset["Title"] = dataset.Name.str.extract(" ([A-Za-z]+)\.", expand=False)
pd.crosstab(train_data["Title"], train_data["Sex"])
for dataset in data:
dataset["Title"] = dataset["Title"].replace(
[
"Lady",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
],
"Rare",
)
dataset["Title"] = dataset["Title"].replace("Mlle", "Miss")
dataset["Title"] = dataset["Title"].replace("Ms", "Miss")
dataset["Title"] = dataset["Title"].replace("Mme", "Mrs")
train_data[["Title", "Survived"]].groupby(["Title"], as_index=False).mean()
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in data:
dataset["Title"] = dataset["Title"].map(title_mapping)
dataset["Title"] = dataset["Title"].fillna(0)
train_data.head()
train_data["Title"].astype(int)
train_data.dtypes
train_data = train_data.drop(["Name", "LastName"], axis=1)
train_data.dtypes
test_data.dtypes
imp = SimpleImputer(missing_values=np.nan, strategy="mean")
test_data["Age"] = imp.fit_transform(test_data[["Age"]])
test_data["Age"] = round(test_data["Age"], 2).astype(int)
test_data
test_data["Sex"] = test_data.Sex.map(dict(male=1, female=0))
imp = SimpleImputer(missing_values=np.nan, strategy="mean")
test_data["Sex"] = imp.fit_transform(test_data[["Sex"]]).astype(int)
test_data.dtypes
test_data = test_data.drop(["Name", "LastName"], axis=1)
test_data.dtypes
imp = SimpleImputer(missing_values=np.nan, strategy="mean")
test_data["Fare"] = imp.fit_transform(test_data[["Fare"]]).astype(int)
test_data
test_data.dtypes
test_data["Embarked"] = test_data.Embarked.map(dict(S=1, C=2, Q=0))
imp = SimpleImputer(missing_values=np.nan, strategy="mean")
test_data["Embarked"] = imp.fit_transform(test_data[["Embarked"]]).astype(int)
test_data
train_data["Embarked"] = train_data.Embarked.map(dict(S=1, C=2, Q=0))
imp = SimpleImputer(missing_values=np.nan, strategy="mean")
train_data["Embarked"] = imp.fit_transform(train_data[["Embarked"]]).astype(int)
train_data.head(5)
train_data.dtypes
test_data.dtypes
test_data.info
train_data.Cabin = train_data.Cabin.fillna("U")
test_data.Cabin = test_data.Cabin.fillna("U")
train_data
train_data["FamilySize"] = train_data.Parch + train_data.SibSp + 1
test_data["FamilySize"] = test_data.Parch + test_data.SibSp + 1
train_data.Cabin = train_data.Cabin.map(lambda x: x[0])
test_data.Cabin = test_data.Cabin.map(lambda x: x[0])
train_data.tail(5)
train_data["Cabin"].unique()
test_data["Cabin"].unique()
train_data["Cabin"] = train_data["Cabin"].replace("T", "U")
train_data["Cabin"].unique()
test_data["Cabin"] = test_data.Cabin.map(dict(A=1, B=2, C=3, D=4, E=5, F=6, G=7, U=0))
train_data["Cabin"] = train_data.Cabin.map(dict(A=1, B=2, C=3, D=4, E=5, F=6, G=7, U=0))
train_data
test_data[22:30]
train_data = train_data.drop(["PassengerId"], axis=1)
train_data.head(5)
test_data.head(5)
train_data = train_data.drop(["Parch", "SibSp"], axis=1)
test_data = test_data.drop(["Parch", "SibSp"], axis=1)
data = [train_data, test_data]
train_data.head(5)
train_data["FareRange"] = pd.qcut(train_data["Fare"], 4)
train_data[["FareRange", "Survived"]].groupby(["FareRange"], as_index=False).mean()
for dataset in data:
dataset.loc[dataset["Fare"] <= 7.91, "Fare"] = 0
dataset.loc[(dataset["Fare"] > 7.91) & (dataset["Fare"] <= 14.454), "Fare"] = 1
dataset.loc[(dataset["Fare"] > 14.454) & (dataset["Fare"] <= 31), "Fare"] = 2
dataset.loc[dataset["Fare"] > 31, "Fare"] = 3
dataset["Fare"] = dataset["Fare"].astype(int)
train_data = train_data.drop(["FareRange"], axis=1)
data = [train_data, test_data]
train_data.head(10)
train_data["AgeRange"] = pd.qcut(train_data["Age"], 4)
train_data[["AgeRange", "Survived"]].groupby(["AgeRange"], as_index=False).mean()
for dataset in data:
dataset.loc[dataset["Age"] <= 22, "Age"] = 0
dataset.loc[(dataset["Age"] > 22) & (dataset["Age"] <= 29), "Age"] = 1
dataset.loc[(dataset["Age"] > 29) & (dataset["Age"] <= 35), "Age"] = 2
dataset.loc[dataset["Age"] > 35, "Age"] = 3
dataset["Age"] = dataset["Age"].astype(int)
train_data = train_data.drop(["AgeRange"], axis=1)
data = [train_data, test_data]
train_data.head(10)
sns.barplot(x="Age", y="Survived", data=train_data)
train_data = train_data.drop(["Ticket"], axis=1)
test_data = test_data.drop(["Ticket"], axis=1)
data = [train_data, test_data]
train_data.head(5)
train_data.dtypes
test_data.head(5)
X_train = train_data.drop("Survived", axis=1)
Y_train = train_data["Survived"]
X_test = test_data.drop("PassengerId", axis=1).copy()
X_train.shape, Y_train.shape, X_test.shape
# **Logistic Regression**
log_regression = LogisticRegression()
log_regression.fit(X_train, Y_train)
Y_pred = log_regression.predict(X_test)
accuracy = round(log_regression.score(X_train, Y_train) * 100, 2)
accuracy
# **Linear Regression**
model = LinearRegression()
model.fit(X_train, Y_train)
Y_pred = model.predict(X_test)
accuracy = round(model.score(X_train, Y_train) * 100, 2)
accuracy
# **Gaussian Naive Bayes Classifier**
gnb = GaussianNB()
gnb.fit(X_train, Y_train)
Y_pred = gnb.predict(X_test)
accuracy = round(gnb.score(X_train, Y_train) * 100, 2)
accuracy
# **K-means Clustering**
kmeans = KMeans()
kmeans.fit(X_train, Y_train)
Y_pred = kmeans.predict(X_test)
accuracy = round(kmeans.score(X_train, Y_train) * 100, 2)
accuracy
# **Random Forest Classifier**
clf = RandomForestClassifier()
clf.fit(X_train, Y_train)
prediction = clf.predict(X_test)
accuracy = round(clf.score(X_train, Y_train) * 100, 2)
accuracy
# **Decision Tree**
dt = DecisionTreeClassifier()
dt.fit(X_train, Y_train)
Y_pred = dt.predict(X_test)
accuracy = round(dt.score(X_train, Y_train) * 100, 2)
accuracy
# **KNN**
knn = KNeighborsClassifier()
knn.fit(X_train, Y_train)
Y_pred = knn.predict(X_test)
accuracy = round(dt.score(X_train, Y_train) * 100, 2)
accuracy
Sowjfile = pd.DataFrame(
{"PassengerId": test_data["PassengerId"], "Survived": prediction}
)
Sowjfile.to_csv("Sowjfile.csv", index=False)
|
# # Adding features to data to improve model performance
# ## Importing modules and data
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
df = pd.read_csv("../input/fe-course-data/concrete.csv")
df.head()
# ## creating a baseline model
# - essential at the start of feature engineering as it helps decide whether your new features are worth keeping or not
X = df.copy()
y = X.pop("CompressiveStrength")
X, y
# Train and score baseline model
baseline = RandomForestRegressor(criterion="mae", random_state=0)
baseline_score = cross_val_score(
baseline, X, y, cv=5, scoring="neg_mean_absolute_error"
)
baseline_score = -1 * baseline_score.mean()
print(f"MAE Baseline Score: {baseline_score:.4}")
# ## Adding new features to data
# Create synthetic features
X["FCRatio"] = X["FineAggregate"] / X["CoarseAggregate"]
X["AggCmtRatio"] = (X["CoarseAggregate"] + X["FineAggregate"]) / X["Cement"]
X["WtrCmtRatio"] = X["Water"] / X["Cement"]
X
# Train and score model on dataset with additional ratio features
model = RandomForestRegressor(criterion="mae", random_state=0)
score = cross_val_score(model, X, y, cv=5, scoring="neg_mean_absolute_error")
score = -1 * score.mean()
print(f"MAE Score with Ratio Features: {score:.4}")
|
import pandas as pd
df = pd.read_csv("/kaggle/input/credit-risk-customers/credit_customers.csv")
df.head()
df.shape
df.dtypes
n = 1
for columnName, columnData in df.iteritems():
if columnData.dtype == "O":
print("S.no : ", n)
print("Name : ", columnName)
print("Unique : ", columnData.unique())
print("No : ", len(columnData.unique()))
print()
n += 1
else:
pass
df[["sex", "marriage"]] = df.personal_status.str.split(" ", expand=True)
df.drop(["personal_status"], axis=1, inplace=True)
df.head(5)
df["checking_status"].replace(
["no checking", "<0", "0<=X<200", ">=200"], [0, 1, 2, 3], inplace=True
)
df["credit_history"].replace(
[
"critical/other existing credit",
"delayed previously",
"existing paid",
"no credits/all paid",
"all paid",
],
[0, 1, 2, 2, 2],
inplace=True,
)
df["purpose"].replace(
[
"business",
"new car",
"used car",
"education",
"retraining",
"other",
"domestic appliance",
"radio/tv",
"furniture/equipment",
"repairs",
],
[5, 5, 4, 4, 3, 3, 3, 2, 2, 1],
inplace=True,
)
df["savings_status"].replace(
["no known savings", "<100", "100<=X<500", "500<=X<1000", ">=1000"],
[0, 1, 2, 3, 4],
inplace=True,
)
df["employment"].replace(
["unemployed", "<1", "1<=X<4", "4<=X<7", ">=7"], [0, 1, 2, 3, 4], inplace=True
)
df["other_parties"].replace(
["none", "co applicant", "guarantor"], [0, 1, 2], inplace=True
)
df["property_magnitude"].replace(
["no known property", "life insurance", "car", "real estate"],
[0, 1, 2, 3],
inplace=True,
)
df["other_payment_plans"].replace(["none", "stores", "bank"], [0, 1, 1], inplace=True)
df["housing"].replace(["for free", "rent", "own"], [0, 1, 2], inplace=True)
df["job"].replace(
[
"unemp/unskilled non res",
"unskilled resident",
"skilled",
"high qualif/self emp/mgmt",
],
[0, 1, 2, 3],
inplace=True,
)
df["own_telephone"].replace(["yes", "none"], [1, 0], inplace=True)
df["foreign_worker"].replace(["yes", "no"], [1, 0], inplace=True)
df["class"].replace(["good", "bad"], [1, 0], inplace=True)
df["sex"].replace(["male", "female"], [1, 0], inplace=True)
df["marriage"].replace(
["single", "div/sep", "div/dep/mar", "mar/wid"], [0, 0, 1, 1], inplace=True
)
df.head()
df.dtypes
from sklearn.preprocessing import StandardScaler
X = df.drop(["class"], axis=1)
y = df["class"]
std_scaler = StandardScaler()
Xa = std_scaler.fit_transform(X)
X = pd.DataFrame(Xa, columns=X.columns)
df.columns
from sklearn.model_selection import train_test_split
a, d, s, f = train_test_split(X, y)
y
X.shape
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
for i in [a, s, d, f]:
print(i.shape)
model.fit(a, s)
g = model.predict(d)
from sklearn.metrics import classification_report
classification_report(f, g)
def model_eval(m):
model = m
model.fit(a, s)
g = model.predict(d)
print(model)
print(classification_report(f, g))
model_eval(LogisticRegression())
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100, max_features=20)
model_eval(rf)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.